ppp_generic: Use skb_peek() in ppp_receive_mp_frame().
[linux-2.6] / drivers / net / tg3.c
1 /*
2  * tg3.c: Broadcom Tigon3 ethernet driver.
3  *
4  * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5  * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6  * Copyright (C) 2004 Sun Microsystems Inc.
7  * Copyright (C) 2005-2007 Broadcom Corporation.
8  *
9  * Firmware is:
10  *      Derived from proprietary unpublished source code,
11  *      Copyright (C) 2000-2003 Broadcom Corporation.
12  *
13  *      Permission is hereby granted for the distribution of this firmware
14  *      data in hexadecimal or equivalent format, provided this copyright
15  *      notice is accompanying it.
16  */
17
18
19 #include <linux/module.h>
20 #include <linux/moduleparam.h>
21 #include <linux/kernel.h>
22 #include <linux/types.h>
23 #include <linux/compiler.h>
24 #include <linux/slab.h>
25 #include <linux/delay.h>
26 #include <linux/in.h>
27 #include <linux/init.h>
28 #include <linux/ioport.h>
29 #include <linux/pci.h>
30 #include <linux/netdevice.h>
31 #include <linux/etherdevice.h>
32 #include <linux/skbuff.h>
33 #include <linux/ethtool.h>
34 #include <linux/mii.h>
35 #include <linux/phy.h>
36 #include <linux/brcmphy.h>
37 #include <linux/if_vlan.h>
38 #include <linux/ip.h>
39 #include <linux/tcp.h>
40 #include <linux/workqueue.h>
41 #include <linux/prefetch.h>
42 #include <linux/dma-mapping.h>
43
44 #include <net/checksum.h>
45 #include <net/ip.h>
46
47 #include <asm/system.h>
48 #include <asm/io.h>
49 #include <asm/byteorder.h>
50 #include <asm/uaccess.h>
51
52 #ifdef CONFIG_SPARC
53 #include <asm/idprom.h>
54 #include <asm/prom.h>
55 #endif
56
57 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
58 #define TG3_VLAN_TAG_USED 1
59 #else
60 #define TG3_VLAN_TAG_USED 0
61 #endif
62
63 #define TG3_TSO_SUPPORT 1
64
65 #include "tg3.h"
66
67 #define DRV_MODULE_NAME         "tg3"
68 #define PFX DRV_MODULE_NAME     ": "
69 #define DRV_MODULE_VERSION      "3.94"
70 #define DRV_MODULE_RELDATE      "August 14, 2008"
71
72 #define TG3_DEF_MAC_MODE        0
73 #define TG3_DEF_RX_MODE         0
74 #define TG3_DEF_TX_MODE         0
75 #define TG3_DEF_MSG_ENABLE        \
76         (NETIF_MSG_DRV          | \
77          NETIF_MSG_PROBE        | \
78          NETIF_MSG_LINK         | \
79          NETIF_MSG_TIMER        | \
80          NETIF_MSG_IFDOWN       | \
81          NETIF_MSG_IFUP         | \
82          NETIF_MSG_RX_ERR       | \
83          NETIF_MSG_TX_ERR)
84
85 /* length of time before we decide the hardware is borked,
86  * and dev->tx_timeout() should be called to fix the problem
87  */
88 #define TG3_TX_TIMEOUT                  (5 * HZ)
89
90 /* hardware minimum and maximum for a single frame's data payload */
91 #define TG3_MIN_MTU                     60
92 #define TG3_MAX_MTU(tp) \
93         ((tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) ? 9000 : 1500)
94
95 /* These numbers seem to be hard coded in the NIC firmware somehow.
96  * You can't change the ring sizes, but you can change where you place
97  * them in the NIC onboard memory.
98  */
99 #define TG3_RX_RING_SIZE                512
100 #define TG3_DEF_RX_RING_PENDING         200
101 #define TG3_RX_JUMBO_RING_SIZE          256
102 #define TG3_DEF_RX_JUMBO_RING_PENDING   100
103
104 /* Do not place this n-ring entries value into the tp struct itself,
105  * we really want to expose these constants to GCC so that modulo et
106  * al.  operations are done with shifts and masks instead of with
107  * hw multiply/modulo instructions.  Another solution would be to
108  * replace things like '% foo' with '& (foo - 1)'.
109  */
110 #define TG3_RX_RCB_RING_SIZE(tp)        \
111         ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ?  512 : 1024)
112
113 #define TG3_TX_RING_SIZE                512
114 #define TG3_DEF_TX_RING_PENDING         (TG3_TX_RING_SIZE - 1)
115
116 #define TG3_RX_RING_BYTES       (sizeof(struct tg3_rx_buffer_desc) * \
117                                  TG3_RX_RING_SIZE)
118 #define TG3_RX_JUMBO_RING_BYTES (sizeof(struct tg3_rx_buffer_desc) * \
119                                  TG3_RX_JUMBO_RING_SIZE)
120 #define TG3_RX_RCB_RING_BYTES(tp) (sizeof(struct tg3_rx_buffer_desc) * \
121                                    TG3_RX_RCB_RING_SIZE(tp))
122 #define TG3_TX_RING_BYTES       (sizeof(struct tg3_tx_buffer_desc) * \
123                                  TG3_TX_RING_SIZE)
124 #define NEXT_TX(N)              (((N) + 1) & (TG3_TX_RING_SIZE - 1))
125
126 #define RX_PKT_BUF_SZ           (1536 + tp->rx_offset + 64)
127 #define RX_JUMBO_PKT_BUF_SZ     (9046 + tp->rx_offset + 64)
128
129 /* minimum number of free TX descriptors required to wake up TX process */
130 #define TG3_TX_WAKEUP_THRESH(tp)                ((tp)->tx_pending / 4)
131
132 /* number of ETHTOOL_GSTATS u64's */
133 #define TG3_NUM_STATS           (sizeof(struct tg3_ethtool_stats)/sizeof(u64))
134
135 #define TG3_NUM_TEST            6
136
137 static char version[] __devinitdata =
138         DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
139
140 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
141 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
142 MODULE_LICENSE("GPL");
143 MODULE_VERSION(DRV_MODULE_VERSION);
144
145 static int tg3_debug = -1;      /* -1 == use TG3_DEF_MSG_ENABLE as value */
146 module_param(tg3_debug, int, 0);
147 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
148
149 static struct pci_device_id tg3_pci_tbl[] = {
150         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
151         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
152         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
153         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
154         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
155         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
156         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
157         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
158         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
159         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
160         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
161         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
162         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
163         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
164         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
165         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
166         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
167         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
168         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901)},
169         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2)},
170         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
171         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F)},
172         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5720)},
173         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
174         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
175         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750)},
176         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
177         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750M)},
178         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
179         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F)},
180         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
181         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
182         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
183         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
184         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F)},
185         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
186         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
187         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
188         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
189         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
190         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
191         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
192         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
193         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F)},
194         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
195         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
196         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
197         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
198         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
199         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
200         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
201         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
202         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
203         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
204         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
205         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
206         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
207         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
208         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5785)},
209         {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
210         {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
211         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
212         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
213         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
214         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
215         {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
216         {}
217 };
218
219 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
220
221 static const struct {
222         const char string[ETH_GSTRING_LEN];
223 } ethtool_stats_keys[TG3_NUM_STATS] = {
224         { "rx_octets" },
225         { "rx_fragments" },
226         { "rx_ucast_packets" },
227         { "rx_mcast_packets" },
228         { "rx_bcast_packets" },
229         { "rx_fcs_errors" },
230         { "rx_align_errors" },
231         { "rx_xon_pause_rcvd" },
232         { "rx_xoff_pause_rcvd" },
233         { "rx_mac_ctrl_rcvd" },
234         { "rx_xoff_entered" },
235         { "rx_frame_too_long_errors" },
236         { "rx_jabbers" },
237         { "rx_undersize_packets" },
238         { "rx_in_length_errors" },
239         { "rx_out_length_errors" },
240         { "rx_64_or_less_octet_packets" },
241         { "rx_65_to_127_octet_packets" },
242         { "rx_128_to_255_octet_packets" },
243         { "rx_256_to_511_octet_packets" },
244         { "rx_512_to_1023_octet_packets" },
245         { "rx_1024_to_1522_octet_packets" },
246         { "rx_1523_to_2047_octet_packets" },
247         { "rx_2048_to_4095_octet_packets" },
248         { "rx_4096_to_8191_octet_packets" },
249         { "rx_8192_to_9022_octet_packets" },
250
251         { "tx_octets" },
252         { "tx_collisions" },
253
254         { "tx_xon_sent" },
255         { "tx_xoff_sent" },
256         { "tx_flow_control" },
257         { "tx_mac_errors" },
258         { "tx_single_collisions" },
259         { "tx_mult_collisions" },
260         { "tx_deferred" },
261         { "tx_excessive_collisions" },
262         { "tx_late_collisions" },
263         { "tx_collide_2times" },
264         { "tx_collide_3times" },
265         { "tx_collide_4times" },
266         { "tx_collide_5times" },
267         { "tx_collide_6times" },
268         { "tx_collide_7times" },
269         { "tx_collide_8times" },
270         { "tx_collide_9times" },
271         { "tx_collide_10times" },
272         { "tx_collide_11times" },
273         { "tx_collide_12times" },
274         { "tx_collide_13times" },
275         { "tx_collide_14times" },
276         { "tx_collide_15times" },
277         { "tx_ucast_packets" },
278         { "tx_mcast_packets" },
279         { "tx_bcast_packets" },
280         { "tx_carrier_sense_errors" },
281         { "tx_discards" },
282         { "tx_errors" },
283
284         { "dma_writeq_full" },
285         { "dma_write_prioq_full" },
286         { "rxbds_empty" },
287         { "rx_discards" },
288         { "rx_errors" },
289         { "rx_threshold_hit" },
290
291         { "dma_readq_full" },
292         { "dma_read_prioq_full" },
293         { "tx_comp_queue_full" },
294
295         { "ring_set_send_prod_index" },
296         { "ring_status_update" },
297         { "nic_irqs" },
298         { "nic_avoided_irqs" },
299         { "nic_tx_threshold_hit" }
300 };
301
302 static const struct {
303         const char string[ETH_GSTRING_LEN];
304 } ethtool_test_keys[TG3_NUM_TEST] = {
305         { "nvram test     (online) " },
306         { "link test      (online) " },
307         { "register test  (offline)" },
308         { "memory test    (offline)" },
309         { "loopback test  (offline)" },
310         { "interrupt test (offline)" },
311 };
312
313 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
314 {
315         writel(val, tp->regs + off);
316 }
317
318 static u32 tg3_read32(struct tg3 *tp, u32 off)
319 {
320         return (readl(tp->regs + off));
321 }
322
323 static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
324 {
325         writel(val, tp->aperegs + off);
326 }
327
328 static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
329 {
330         return (readl(tp->aperegs + off));
331 }
332
333 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
334 {
335         unsigned long flags;
336
337         spin_lock_irqsave(&tp->indirect_lock, flags);
338         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
339         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
340         spin_unlock_irqrestore(&tp->indirect_lock, flags);
341 }
342
343 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
344 {
345         writel(val, tp->regs + off);
346         readl(tp->regs + off);
347 }
348
349 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
350 {
351         unsigned long flags;
352         u32 val;
353
354         spin_lock_irqsave(&tp->indirect_lock, flags);
355         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
356         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
357         spin_unlock_irqrestore(&tp->indirect_lock, flags);
358         return val;
359 }
360
361 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
362 {
363         unsigned long flags;
364
365         if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
366                 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
367                                        TG3_64BIT_REG_LOW, val);
368                 return;
369         }
370         if (off == (MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW)) {
371                 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
372                                        TG3_64BIT_REG_LOW, val);
373                 return;
374         }
375
376         spin_lock_irqsave(&tp->indirect_lock, flags);
377         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
378         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
379         spin_unlock_irqrestore(&tp->indirect_lock, flags);
380
381         /* In indirect mode when disabling interrupts, we also need
382          * to clear the interrupt bit in the GRC local ctrl register.
383          */
384         if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
385             (val == 0x1)) {
386                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
387                                        tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
388         }
389 }
390
391 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
392 {
393         unsigned long flags;
394         u32 val;
395
396         spin_lock_irqsave(&tp->indirect_lock, flags);
397         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
398         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
399         spin_unlock_irqrestore(&tp->indirect_lock, flags);
400         return val;
401 }
402
403 /* usec_wait specifies the wait time in usec when writing to certain registers
404  * where it is unsafe to read back the register without some delay.
405  * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
406  * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
407  */
408 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
409 {
410         if ((tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) ||
411             (tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND))
412                 /* Non-posted methods */
413                 tp->write32(tp, off, val);
414         else {
415                 /* Posted method */
416                 tg3_write32(tp, off, val);
417                 if (usec_wait)
418                         udelay(usec_wait);
419                 tp->read32(tp, off);
420         }
421         /* Wait again after the read for the posted method to guarantee that
422          * the wait time is met.
423          */
424         if (usec_wait)
425                 udelay(usec_wait);
426 }
427
428 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
429 {
430         tp->write32_mbox(tp, off, val);
431         if (!(tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) &&
432             !(tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND))
433                 tp->read32_mbox(tp, off);
434 }
435
436 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
437 {
438         void __iomem *mbox = tp->regs + off;
439         writel(val, mbox);
440         if (tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG)
441                 writel(val, mbox);
442         if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
443                 readl(mbox);
444 }
445
446 static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
447 {
448         return (readl(tp->regs + off + GRCMBOX_BASE));
449 }
450
451 static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
452 {
453         writel(val, tp->regs + off + GRCMBOX_BASE);
454 }
455
456 #define tw32_mailbox(reg, val)  tp->write32_mbox(tp, reg, val)
457 #define tw32_mailbox_f(reg, val)        tw32_mailbox_flush(tp, (reg), (val))
458 #define tw32_rx_mbox(reg, val)  tp->write32_rx_mbox(tp, reg, val)
459 #define tw32_tx_mbox(reg, val)  tp->write32_tx_mbox(tp, reg, val)
460 #define tr32_mailbox(reg)       tp->read32_mbox(tp, reg)
461
462 #define tw32(reg,val)           tp->write32(tp, reg, val)
463 #define tw32_f(reg,val)         _tw32_flush(tp,(reg),(val), 0)
464 #define tw32_wait_f(reg,val,us) _tw32_flush(tp,(reg),(val), (us))
465 #define tr32(reg)               tp->read32(tp, reg)
466
467 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
468 {
469         unsigned long flags;
470
471         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) &&
472             (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
473                 return;
474
475         spin_lock_irqsave(&tp->indirect_lock, flags);
476         if (tp->tg3_flags & TG3_FLAG_SRAM_USE_CONFIG) {
477                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
478                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
479
480                 /* Always leave this as zero. */
481                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
482         } else {
483                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
484                 tw32_f(TG3PCI_MEM_WIN_DATA, val);
485
486                 /* Always leave this as zero. */
487                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
488         }
489         spin_unlock_irqrestore(&tp->indirect_lock, flags);
490 }
491
492 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
493 {
494         unsigned long flags;
495
496         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) &&
497             (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
498                 *val = 0;
499                 return;
500         }
501
502         spin_lock_irqsave(&tp->indirect_lock, flags);
503         if (tp->tg3_flags & TG3_FLAG_SRAM_USE_CONFIG) {
504                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
505                 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
506
507                 /* Always leave this as zero. */
508                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
509         } else {
510                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
511                 *val = tr32(TG3PCI_MEM_WIN_DATA);
512
513                 /* Always leave this as zero. */
514                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
515         }
516         spin_unlock_irqrestore(&tp->indirect_lock, flags);
517 }
518
519 static void tg3_ape_lock_init(struct tg3 *tp)
520 {
521         int i;
522
523         /* Make sure the driver hasn't any stale locks. */
524         for (i = 0; i < 8; i++)
525                 tg3_ape_write32(tp, TG3_APE_LOCK_GRANT + 4 * i,
526                                 APE_LOCK_GRANT_DRIVER);
527 }
528
529 static int tg3_ape_lock(struct tg3 *tp, int locknum)
530 {
531         int i, off;
532         int ret = 0;
533         u32 status;
534
535         if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
536                 return 0;
537
538         switch (locknum) {
539                 case TG3_APE_LOCK_GRC:
540                 case TG3_APE_LOCK_MEM:
541                         break;
542                 default:
543                         return -EINVAL;
544         }
545
546         off = 4 * locknum;
547
548         tg3_ape_write32(tp, TG3_APE_LOCK_REQ + off, APE_LOCK_REQ_DRIVER);
549
550         /* Wait for up to 1 millisecond to acquire lock. */
551         for (i = 0; i < 100; i++) {
552                 status = tg3_ape_read32(tp, TG3_APE_LOCK_GRANT + off);
553                 if (status == APE_LOCK_GRANT_DRIVER)
554                         break;
555                 udelay(10);
556         }
557
558         if (status != APE_LOCK_GRANT_DRIVER) {
559                 /* Revoke the lock request. */
560                 tg3_ape_write32(tp, TG3_APE_LOCK_GRANT + off,
561                                 APE_LOCK_GRANT_DRIVER);
562
563                 ret = -EBUSY;
564         }
565
566         return ret;
567 }
568
569 static void tg3_ape_unlock(struct tg3 *tp, int locknum)
570 {
571         int off;
572
573         if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
574                 return;
575
576         switch (locknum) {
577                 case TG3_APE_LOCK_GRC:
578                 case TG3_APE_LOCK_MEM:
579                         break;
580                 default:
581                         return;
582         }
583
584         off = 4 * locknum;
585         tg3_ape_write32(tp, TG3_APE_LOCK_GRANT + off, APE_LOCK_GRANT_DRIVER);
586 }
587
588 static void tg3_disable_ints(struct tg3 *tp)
589 {
590         tw32(TG3PCI_MISC_HOST_CTRL,
591              (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
592         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
593 }
594
595 static inline void tg3_cond_int(struct tg3 *tp)
596 {
597         if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
598             (tp->hw_status->status & SD_STATUS_UPDATED))
599                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
600         else
601                 tw32(HOSTCC_MODE, tp->coalesce_mode |
602                      (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
603 }
604
605 static void tg3_enable_ints(struct tg3 *tp)
606 {
607         tp->irq_sync = 0;
608         wmb();
609
610         tw32(TG3PCI_MISC_HOST_CTRL,
611              (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
612         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
613                        (tp->last_tag << 24));
614         if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI)
615                 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
616                                (tp->last_tag << 24));
617         tg3_cond_int(tp);
618 }
619
620 static inline unsigned int tg3_has_work(struct tg3 *tp)
621 {
622         struct tg3_hw_status *sblk = tp->hw_status;
623         unsigned int work_exists = 0;
624
625         /* check for phy events */
626         if (!(tp->tg3_flags &
627               (TG3_FLAG_USE_LINKCHG_REG |
628                TG3_FLAG_POLL_SERDES))) {
629                 if (sblk->status & SD_STATUS_LINK_CHG)
630                         work_exists = 1;
631         }
632         /* check for RX/TX work to do */
633         if (sblk->idx[0].tx_consumer != tp->tx_cons ||
634             sblk->idx[0].rx_producer != tp->rx_rcb_ptr)
635                 work_exists = 1;
636
637         return work_exists;
638 }
639
640 /* tg3_restart_ints
641  *  similar to tg3_enable_ints, but it accurately determines whether there
642  *  is new work pending and can return without flushing the PIO write
643  *  which reenables interrupts
644  */
645 static void tg3_restart_ints(struct tg3 *tp)
646 {
647         tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
648                      tp->last_tag << 24);
649         mmiowb();
650
651         /* When doing tagged status, this work check is unnecessary.
652          * The last_tag we write above tells the chip which piece of
653          * work we've completed.
654          */
655         if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
656             tg3_has_work(tp))
657                 tw32(HOSTCC_MODE, tp->coalesce_mode |
658                      (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
659 }
660
661 static inline void tg3_netif_stop(struct tg3 *tp)
662 {
663         tp->dev->trans_start = jiffies; /* prevent tx timeout */
664         napi_disable(&tp->napi);
665         netif_tx_disable(tp->dev);
666 }
667
668 static inline void tg3_netif_start(struct tg3 *tp)
669 {
670         netif_wake_queue(tp->dev);
671         /* NOTE: unconditional netif_wake_queue is only appropriate
672          * so long as all callers are assured to have free tx slots
673          * (such as after tg3_init_hw)
674          */
675         napi_enable(&tp->napi);
676         tp->hw_status->status |= SD_STATUS_UPDATED;
677         tg3_enable_ints(tp);
678 }
679
680 static void tg3_switch_clocks(struct tg3 *tp)
681 {
682         u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
683         u32 orig_clock_ctrl;
684
685         if ((tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) ||
686             (tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
687                 return;
688
689         orig_clock_ctrl = clock_ctrl;
690         clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
691                        CLOCK_CTRL_CLKRUN_OENABLE |
692                        0x1f);
693         tp->pci_clock_ctrl = clock_ctrl;
694
695         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
696                 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
697                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
698                                     clock_ctrl | CLOCK_CTRL_625_CORE, 40);
699                 }
700         } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
701                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
702                             clock_ctrl |
703                             (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
704                             40);
705                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
706                             clock_ctrl | (CLOCK_CTRL_ALTCLK),
707                             40);
708         }
709         tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
710 }
711
712 #define PHY_BUSY_LOOPS  5000
713
714 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
715 {
716         u32 frame_val;
717         unsigned int loops;
718         int ret;
719
720         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
721                 tw32_f(MAC_MI_MODE,
722                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
723                 udelay(80);
724         }
725
726         *val = 0x0;
727
728         frame_val  = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
729                       MI_COM_PHY_ADDR_MASK);
730         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
731                       MI_COM_REG_ADDR_MASK);
732         frame_val |= (MI_COM_CMD_READ | MI_COM_START);
733
734         tw32_f(MAC_MI_COM, frame_val);
735
736         loops = PHY_BUSY_LOOPS;
737         while (loops != 0) {
738                 udelay(10);
739                 frame_val = tr32(MAC_MI_COM);
740
741                 if ((frame_val & MI_COM_BUSY) == 0) {
742                         udelay(5);
743                         frame_val = tr32(MAC_MI_COM);
744                         break;
745                 }
746                 loops -= 1;
747         }
748
749         ret = -EBUSY;
750         if (loops != 0) {
751                 *val = frame_val & MI_COM_DATA_MASK;
752                 ret = 0;
753         }
754
755         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
756                 tw32_f(MAC_MI_MODE, tp->mi_mode);
757                 udelay(80);
758         }
759
760         return ret;
761 }
762
763 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
764 {
765         u32 frame_val;
766         unsigned int loops;
767         int ret;
768
769         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 &&
770             (reg == MII_TG3_CTRL || reg == MII_TG3_AUX_CTRL))
771                 return 0;
772
773         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
774                 tw32_f(MAC_MI_MODE,
775                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
776                 udelay(80);
777         }
778
779         frame_val  = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
780                       MI_COM_PHY_ADDR_MASK);
781         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
782                       MI_COM_REG_ADDR_MASK);
783         frame_val |= (val & MI_COM_DATA_MASK);
784         frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
785
786         tw32_f(MAC_MI_COM, frame_val);
787
788         loops = PHY_BUSY_LOOPS;
789         while (loops != 0) {
790                 udelay(10);
791                 frame_val = tr32(MAC_MI_COM);
792                 if ((frame_val & MI_COM_BUSY) == 0) {
793                         udelay(5);
794                         frame_val = tr32(MAC_MI_COM);
795                         break;
796                 }
797                 loops -= 1;
798         }
799
800         ret = -EBUSY;
801         if (loops != 0)
802                 ret = 0;
803
804         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
805                 tw32_f(MAC_MI_MODE, tp->mi_mode);
806                 udelay(80);
807         }
808
809         return ret;
810 }
811
812 static int tg3_bmcr_reset(struct tg3 *tp)
813 {
814         u32 phy_control;
815         int limit, err;
816
817         /* OK, reset it, and poll the BMCR_RESET bit until it
818          * clears or we time out.
819          */
820         phy_control = BMCR_RESET;
821         err = tg3_writephy(tp, MII_BMCR, phy_control);
822         if (err != 0)
823                 return -EBUSY;
824
825         limit = 5000;
826         while (limit--) {
827                 err = tg3_readphy(tp, MII_BMCR, &phy_control);
828                 if (err != 0)
829                         return -EBUSY;
830
831                 if ((phy_control & BMCR_RESET) == 0) {
832                         udelay(40);
833                         break;
834                 }
835                 udelay(10);
836         }
837         if (limit <= 0)
838                 return -EBUSY;
839
840         return 0;
841 }
842
843 static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)
844 {
845         struct tg3 *tp = (struct tg3 *)bp->priv;
846         u32 val;
847
848         if (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_PAUSED)
849                 return -EAGAIN;
850
851         if (tg3_readphy(tp, reg, &val))
852                 return -EIO;
853
854         return val;
855 }
856
857 static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val)
858 {
859         struct tg3 *tp = (struct tg3 *)bp->priv;
860
861         if (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_PAUSED)
862                 return -EAGAIN;
863
864         if (tg3_writephy(tp, reg, val))
865                 return -EIO;
866
867         return 0;
868 }
869
870 static int tg3_mdio_reset(struct mii_bus *bp)
871 {
872         return 0;
873 }
874
875 static void tg3_mdio_config(struct tg3 *tp)
876 {
877         u32 val;
878
879         if (tp->mdio_bus.phy_map[PHY_ADDR]->interface !=
880             PHY_INTERFACE_MODE_RGMII)
881                 return;
882
883         val = tr32(MAC_PHYCFG1) & ~(MAC_PHYCFG1_RGMII_EXT_RX_DEC |
884                                     MAC_PHYCFG1_RGMII_SND_STAT_EN);
885         if (tp->tg3_flags3 & TG3_FLG3_RGMII_STD_IBND_DISABLE) {
886                 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_RX_EN)
887                         val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC;
888                 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_TX_EN)
889                         val |= MAC_PHYCFG1_RGMII_SND_STAT_EN;
890         }
891         tw32(MAC_PHYCFG1, val | MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV);
892
893         val = tr32(MAC_PHYCFG2) & ~(MAC_PHYCFG2_INBAND_ENABLE);
894         if (!(tp->tg3_flags3 & TG3_FLG3_RGMII_STD_IBND_DISABLE))
895                 val |= MAC_PHYCFG2_INBAND_ENABLE;
896         tw32(MAC_PHYCFG2, val);
897
898         val = tr32(MAC_EXT_RGMII_MODE);
899         val &= ~(MAC_RGMII_MODE_RX_INT_B |
900                  MAC_RGMII_MODE_RX_QUALITY |
901                  MAC_RGMII_MODE_RX_ACTIVITY |
902                  MAC_RGMII_MODE_RX_ENG_DET |
903                  MAC_RGMII_MODE_TX_ENABLE |
904                  MAC_RGMII_MODE_TX_LOWPWR |
905                  MAC_RGMII_MODE_TX_RESET);
906         if (tp->tg3_flags3 & TG3_FLG3_RGMII_STD_IBND_DISABLE) {
907                 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_RX_EN)
908                         val |= MAC_RGMII_MODE_RX_INT_B |
909                                MAC_RGMII_MODE_RX_QUALITY |
910                                MAC_RGMII_MODE_RX_ACTIVITY |
911                                MAC_RGMII_MODE_RX_ENG_DET;
912                 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_TX_EN)
913                         val |= MAC_RGMII_MODE_TX_ENABLE |
914                                MAC_RGMII_MODE_TX_LOWPWR |
915                                MAC_RGMII_MODE_TX_RESET;
916         }
917         tw32(MAC_EXT_RGMII_MODE, val);
918 }
919
920 static void tg3_mdio_start(struct tg3 *tp)
921 {
922         if (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED) {
923                 mutex_lock(&tp->mdio_bus.mdio_lock);
924                 tp->tg3_flags3 &= ~TG3_FLG3_MDIOBUS_PAUSED;
925                 mutex_unlock(&tp->mdio_bus.mdio_lock);
926         }
927
928         tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL;
929         tw32_f(MAC_MI_MODE, tp->mi_mode);
930         udelay(80);
931
932         if (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED)
933                 tg3_mdio_config(tp);
934 }
935
936 static void tg3_mdio_stop(struct tg3 *tp)
937 {
938         if (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED) {
939                 mutex_lock(&tp->mdio_bus.mdio_lock);
940                 tp->tg3_flags3 |= TG3_FLG3_MDIOBUS_PAUSED;
941                 mutex_unlock(&tp->mdio_bus.mdio_lock);
942         }
943 }
944
945 static int tg3_mdio_init(struct tg3 *tp)
946 {
947         int i;
948         u32 reg;
949         struct phy_device *phydev;
950         struct mii_bus *mdio_bus = &tp->mdio_bus;
951
952         tg3_mdio_start(tp);
953
954         if (!(tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) ||
955             (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED))
956                 return 0;
957
958         memset(mdio_bus, 0, sizeof(*mdio_bus));
959
960         mdio_bus->name     = "tg3 mdio bus";
961         snprintf(mdio_bus->id, MII_BUS_ID_SIZE, "%x",
962                  (tp->pdev->bus->number << 8) | tp->pdev->devfn);
963         mdio_bus->priv     = tp;
964         mdio_bus->dev      = &tp->pdev->dev;
965         mdio_bus->read     = &tg3_mdio_read;
966         mdio_bus->write    = &tg3_mdio_write;
967         mdio_bus->reset    = &tg3_mdio_reset;
968         mdio_bus->phy_mask = ~(1 << PHY_ADDR);
969         mdio_bus->irq      = &tp->mdio_irq[0];
970
971         for (i = 0; i < PHY_MAX_ADDR; i++)
972                 mdio_bus->irq[i] = PHY_POLL;
973
974         /* The bus registration will look for all the PHYs on the mdio bus.
975          * Unfortunately, it does not ensure the PHY is powered up before
976          * accessing the PHY ID registers.  A chip reset is the
977          * quickest way to bring the device back to an operational state..
978          */
979         if (tg3_readphy(tp, MII_BMCR, &reg) || (reg & BMCR_PDOWN))
980                 tg3_bmcr_reset(tp);
981
982         i = mdiobus_register(mdio_bus);
983         if (i) {
984                 printk(KERN_WARNING "%s: mdiobus_reg failed (0x%x)\n",
985                         tp->dev->name, i);
986                 return i;
987         }
988
989         tp->tg3_flags3 |= TG3_FLG3_MDIOBUS_INITED;
990
991         phydev = tp->mdio_bus.phy_map[PHY_ADDR];
992
993         switch (phydev->phy_id) {
994         case TG3_PHY_ID_BCM50610:
995                 phydev->interface = PHY_INTERFACE_MODE_RGMII;
996                 if (tp->tg3_flags3 & TG3_FLG3_RGMII_STD_IBND_DISABLE)
997                         phydev->dev_flags |= PHY_BRCM_STD_IBND_DISABLE;
998                 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_RX_EN)
999                         phydev->dev_flags |= PHY_BRCM_EXT_IBND_RX_ENABLE;
1000                 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_TX_EN)
1001                         phydev->dev_flags |= PHY_BRCM_EXT_IBND_TX_ENABLE;
1002                 break;
1003         case TG3_PHY_ID_BCMAC131:
1004                 phydev->interface = PHY_INTERFACE_MODE_MII;
1005                 break;
1006         }
1007
1008         tg3_mdio_config(tp);
1009
1010         return 0;
1011 }
1012
1013 static void tg3_mdio_fini(struct tg3 *tp)
1014 {
1015         if (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED) {
1016                 tp->tg3_flags3 &= ~TG3_FLG3_MDIOBUS_INITED;
1017                 mdiobus_unregister(&tp->mdio_bus);
1018                 tp->tg3_flags3 &= ~TG3_FLG3_MDIOBUS_PAUSED;
1019         }
1020 }
1021
1022 /* tp->lock is held. */
1023 static inline void tg3_generate_fw_event(struct tg3 *tp)
1024 {
1025         u32 val;
1026
1027         val = tr32(GRC_RX_CPU_EVENT);
1028         val |= GRC_RX_CPU_DRIVER_EVENT;
1029         tw32_f(GRC_RX_CPU_EVENT, val);
1030
1031         tp->last_event_jiffies = jiffies;
1032 }
1033
1034 #define TG3_FW_EVENT_TIMEOUT_USEC 2500
1035
1036 /* tp->lock is held. */
1037 static void tg3_wait_for_event_ack(struct tg3 *tp)
1038 {
1039         int i;
1040         unsigned int delay_cnt;
1041         long time_remain;
1042
1043         /* If enough time has passed, no wait is necessary. */
1044         time_remain = (long)(tp->last_event_jiffies + 1 +
1045                       usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC)) -
1046                       (long)jiffies;
1047         if (time_remain < 0)
1048                 return;
1049
1050         /* Check if we can shorten the wait time. */
1051         delay_cnt = jiffies_to_usecs(time_remain);
1052         if (delay_cnt > TG3_FW_EVENT_TIMEOUT_USEC)
1053                 delay_cnt = TG3_FW_EVENT_TIMEOUT_USEC;
1054         delay_cnt = (delay_cnt >> 3) + 1;
1055
1056         for (i = 0; i < delay_cnt; i++) {
1057                 if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
1058                         break;
1059                 udelay(8);
1060         }
1061 }
1062
1063 /* tp->lock is held. */
1064 static void tg3_ump_link_report(struct tg3 *tp)
1065 {
1066         u32 reg;
1067         u32 val;
1068
1069         if (!(tp->tg3_flags2 & TG3_FLG2_5780_CLASS) ||
1070             !(tp->tg3_flags  & TG3_FLAG_ENABLE_ASF))
1071                 return;
1072
1073         tg3_wait_for_event_ack(tp);
1074
1075         tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE);
1076
1077         tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14);
1078
1079         val = 0;
1080         if (!tg3_readphy(tp, MII_BMCR, &reg))
1081                 val = reg << 16;
1082         if (!tg3_readphy(tp, MII_BMSR, &reg))
1083                 val |= (reg & 0xffff);
1084         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, val);
1085
1086         val = 0;
1087         if (!tg3_readphy(tp, MII_ADVERTISE, &reg))
1088                 val = reg << 16;
1089         if (!tg3_readphy(tp, MII_LPA, &reg))
1090                 val |= (reg & 0xffff);
1091         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 4, val);
1092
1093         val = 0;
1094         if (!(tp->tg3_flags2 & TG3_FLG2_MII_SERDES)) {
1095                 if (!tg3_readphy(tp, MII_CTRL1000, &reg))
1096                         val = reg << 16;
1097                 if (!tg3_readphy(tp, MII_STAT1000, &reg))
1098                         val |= (reg & 0xffff);
1099         }
1100         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 8, val);
1101
1102         if (!tg3_readphy(tp, MII_PHYADDR, &reg))
1103                 val = reg << 16;
1104         else
1105                 val = 0;
1106         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 12, val);
1107
1108         tg3_generate_fw_event(tp);
1109 }
1110
1111 static void tg3_link_report(struct tg3 *tp)
1112 {
1113         if (!netif_carrier_ok(tp->dev)) {
1114                 if (netif_msg_link(tp))
1115                         printk(KERN_INFO PFX "%s: Link is down.\n",
1116                                tp->dev->name);
1117                 tg3_ump_link_report(tp);
1118         } else if (netif_msg_link(tp)) {
1119                 printk(KERN_INFO PFX "%s: Link is up at %d Mbps, %s duplex.\n",
1120                        tp->dev->name,
1121                        (tp->link_config.active_speed == SPEED_1000 ?
1122                         1000 :
1123                         (tp->link_config.active_speed == SPEED_100 ?
1124                          100 : 10)),
1125                        (tp->link_config.active_duplex == DUPLEX_FULL ?
1126                         "full" : "half"));
1127
1128                 printk(KERN_INFO PFX
1129                        "%s: Flow control is %s for TX and %s for RX.\n",
1130                        tp->dev->name,
1131                        (tp->link_config.active_flowctrl & TG3_FLOW_CTRL_TX) ?
1132                        "on" : "off",
1133                        (tp->link_config.active_flowctrl & TG3_FLOW_CTRL_RX) ?
1134                        "on" : "off");
1135                 tg3_ump_link_report(tp);
1136         }
1137 }
1138
1139 static u16 tg3_advert_flowctrl_1000T(u8 flow_ctrl)
1140 {
1141         u16 miireg;
1142
1143         if ((flow_ctrl & TG3_FLOW_CTRL_TX) && (flow_ctrl & TG3_FLOW_CTRL_RX))
1144                 miireg = ADVERTISE_PAUSE_CAP;
1145         else if (flow_ctrl & TG3_FLOW_CTRL_TX)
1146                 miireg = ADVERTISE_PAUSE_ASYM;
1147         else if (flow_ctrl & TG3_FLOW_CTRL_RX)
1148                 miireg = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1149         else
1150                 miireg = 0;
1151
1152         return miireg;
1153 }
1154
1155 static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
1156 {
1157         u16 miireg;
1158
1159         if ((flow_ctrl & TG3_FLOW_CTRL_TX) && (flow_ctrl & TG3_FLOW_CTRL_RX))
1160                 miireg = ADVERTISE_1000XPAUSE;
1161         else if (flow_ctrl & TG3_FLOW_CTRL_TX)
1162                 miireg = ADVERTISE_1000XPSE_ASYM;
1163         else if (flow_ctrl & TG3_FLOW_CTRL_RX)
1164                 miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1165         else
1166                 miireg = 0;
1167
1168         return miireg;
1169 }
1170
1171 static u8 tg3_resolve_flowctrl_1000T(u16 lcladv, u16 rmtadv)
1172 {
1173         u8 cap = 0;
1174
1175         if (lcladv & ADVERTISE_PAUSE_CAP) {
1176                 if (lcladv & ADVERTISE_PAUSE_ASYM) {
1177                         if (rmtadv & LPA_PAUSE_CAP)
1178                                 cap = TG3_FLOW_CTRL_TX | TG3_FLOW_CTRL_RX;
1179                         else if (rmtadv & LPA_PAUSE_ASYM)
1180                                 cap = TG3_FLOW_CTRL_RX;
1181                 } else {
1182                         if (rmtadv & LPA_PAUSE_CAP)
1183                                 cap = TG3_FLOW_CTRL_TX | TG3_FLOW_CTRL_RX;
1184                 }
1185         } else if (lcladv & ADVERTISE_PAUSE_ASYM) {
1186                 if ((rmtadv & LPA_PAUSE_CAP) && (rmtadv & LPA_PAUSE_ASYM))
1187                         cap = TG3_FLOW_CTRL_TX;
1188         }
1189
1190         return cap;
1191 }
1192
1193 static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
1194 {
1195         u8 cap = 0;
1196
1197         if (lcladv & ADVERTISE_1000XPAUSE) {
1198                 if (lcladv & ADVERTISE_1000XPSE_ASYM) {
1199                         if (rmtadv & LPA_1000XPAUSE)
1200                                 cap = TG3_FLOW_CTRL_TX | TG3_FLOW_CTRL_RX;
1201                         else if (rmtadv & LPA_1000XPAUSE_ASYM)
1202                                 cap = TG3_FLOW_CTRL_RX;
1203                 } else {
1204                         if (rmtadv & LPA_1000XPAUSE)
1205                                 cap = TG3_FLOW_CTRL_TX | TG3_FLOW_CTRL_RX;
1206                 }
1207         } else if (lcladv & ADVERTISE_1000XPSE_ASYM) {
1208                 if ((rmtadv & LPA_1000XPAUSE) && (rmtadv & LPA_1000XPAUSE_ASYM))
1209                         cap = TG3_FLOW_CTRL_TX;
1210         }
1211
1212         return cap;
1213 }
1214
1215 static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv)
1216 {
1217         u8 autoneg;
1218         u8 flowctrl = 0;
1219         u32 old_rx_mode = tp->rx_mode;
1220         u32 old_tx_mode = tp->tx_mode;
1221
1222         if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)
1223                 autoneg = tp->mdio_bus.phy_map[PHY_ADDR]->autoneg;
1224         else
1225                 autoneg = tp->link_config.autoneg;
1226
1227         if (autoneg == AUTONEG_ENABLE &&
1228             (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG)) {
1229                 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
1230                         flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv);
1231                 else
1232                         flowctrl = tg3_resolve_flowctrl_1000T(lcladv, rmtadv);
1233         } else
1234                 flowctrl = tp->link_config.flowctrl;
1235
1236         tp->link_config.active_flowctrl = flowctrl;
1237
1238         if (flowctrl & TG3_FLOW_CTRL_RX)
1239                 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1240         else
1241                 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1242
1243         if (old_rx_mode != tp->rx_mode)
1244                 tw32_f(MAC_RX_MODE, tp->rx_mode);
1245
1246         if (flowctrl & TG3_FLOW_CTRL_TX)
1247                 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1248         else
1249                 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1250
1251         if (old_tx_mode != tp->tx_mode)
1252                 tw32_f(MAC_TX_MODE, tp->tx_mode);
1253 }
1254
1255 static void tg3_adjust_link(struct net_device *dev)
1256 {
1257         u8 oldflowctrl, linkmesg = 0;
1258         u32 mac_mode, lcl_adv, rmt_adv;
1259         struct tg3 *tp = netdev_priv(dev);
1260         struct phy_device *phydev = tp->mdio_bus.phy_map[PHY_ADDR];
1261
1262         spin_lock(&tp->lock);
1263
1264         mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK |
1265                                     MAC_MODE_HALF_DUPLEX);
1266
1267         oldflowctrl = tp->link_config.active_flowctrl;
1268
1269         if (phydev->link) {
1270                 lcl_adv = 0;
1271                 rmt_adv = 0;
1272
1273                 if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10)
1274                         mac_mode |= MAC_MODE_PORT_MODE_MII;
1275                 else
1276                         mac_mode |= MAC_MODE_PORT_MODE_GMII;
1277
1278                 if (phydev->duplex == DUPLEX_HALF)
1279                         mac_mode |= MAC_MODE_HALF_DUPLEX;
1280                 else {
1281                         lcl_adv = tg3_advert_flowctrl_1000T(
1282                                   tp->link_config.flowctrl);
1283
1284                         if (phydev->pause)
1285                                 rmt_adv = LPA_PAUSE_CAP;
1286                         if (phydev->asym_pause)
1287                                 rmt_adv |= LPA_PAUSE_ASYM;
1288                 }
1289
1290                 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
1291         } else
1292                 mac_mode |= MAC_MODE_PORT_MODE_GMII;
1293
1294         if (mac_mode != tp->mac_mode) {
1295                 tp->mac_mode = mac_mode;
1296                 tw32_f(MAC_MODE, tp->mac_mode);
1297                 udelay(40);
1298         }
1299
1300         if (phydev->speed == SPEED_1000 && phydev->duplex == DUPLEX_HALF)
1301                 tw32(MAC_TX_LENGTHS,
1302                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1303                       (6 << TX_LENGTHS_IPG_SHIFT) |
1304                       (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
1305         else
1306                 tw32(MAC_TX_LENGTHS,
1307                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1308                       (6 << TX_LENGTHS_IPG_SHIFT) |
1309                       (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
1310
1311         if ((phydev->link && tp->link_config.active_speed == SPEED_INVALID) ||
1312             (!phydev->link && tp->link_config.active_speed != SPEED_INVALID) ||
1313             phydev->speed != tp->link_config.active_speed ||
1314             phydev->duplex != tp->link_config.active_duplex ||
1315             oldflowctrl != tp->link_config.active_flowctrl)
1316             linkmesg = 1;
1317
1318         tp->link_config.active_speed = phydev->speed;
1319         tp->link_config.active_duplex = phydev->duplex;
1320
1321         spin_unlock(&tp->lock);
1322
1323         if (linkmesg)
1324                 tg3_link_report(tp);
1325 }
1326
1327 static int tg3_phy_init(struct tg3 *tp)
1328 {
1329         struct phy_device *phydev;
1330
1331         if (tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED)
1332                 return 0;
1333
1334         /* Bring the PHY back to a known state. */
1335         tg3_bmcr_reset(tp);
1336
1337         phydev = tp->mdio_bus.phy_map[PHY_ADDR];
1338
1339         /* Attach the MAC to the PHY. */
1340         phydev = phy_connect(tp->dev, phydev->dev.bus_id, tg3_adjust_link,
1341                              phydev->dev_flags, phydev->interface);
1342         if (IS_ERR(phydev)) {
1343                 printk(KERN_ERR "%s: Could not attach to PHY\n", tp->dev->name);
1344                 return PTR_ERR(phydev);
1345         }
1346
1347         tp->tg3_flags3 |= TG3_FLG3_PHY_CONNECTED;
1348
1349         /* Mask with MAC supported features. */
1350         phydev->supported &= (PHY_GBIT_FEATURES |
1351                               SUPPORTED_Pause |
1352                               SUPPORTED_Asym_Pause);
1353
1354         phydev->advertising = phydev->supported;
1355
1356         printk(KERN_INFO
1357                "%s: attached PHY driver [%s] (mii_bus:phy_addr=%s)\n",
1358                tp->dev->name, phydev->drv->name, phydev->dev.bus_id);
1359
1360         return 0;
1361 }
1362
1363 static void tg3_phy_start(struct tg3 *tp)
1364 {
1365         struct phy_device *phydev;
1366
1367         if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
1368                 return;
1369
1370         phydev = tp->mdio_bus.phy_map[PHY_ADDR];
1371
1372         if (tp->link_config.phy_is_low_power) {
1373                 tp->link_config.phy_is_low_power = 0;
1374                 phydev->speed = tp->link_config.orig_speed;
1375                 phydev->duplex = tp->link_config.orig_duplex;
1376                 phydev->autoneg = tp->link_config.orig_autoneg;
1377                 phydev->advertising = tp->link_config.orig_advertising;
1378         }
1379
1380         phy_start(phydev);
1381
1382         phy_start_aneg(phydev);
1383 }
1384
1385 static void tg3_phy_stop(struct tg3 *tp)
1386 {
1387         if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
1388                 return;
1389
1390         phy_stop(tp->mdio_bus.phy_map[PHY_ADDR]);
1391 }
1392
1393 static void tg3_phy_fini(struct tg3 *tp)
1394 {
1395         if (tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED) {
1396                 phy_disconnect(tp->mdio_bus.phy_map[PHY_ADDR]);
1397                 tp->tg3_flags3 &= ~TG3_FLG3_PHY_CONNECTED;
1398         }
1399 }
1400
1401 static void tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
1402 {
1403         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1404         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
1405 }
1406
1407 static void tg3_phy_toggle_automdix(struct tg3 *tp, int enable)
1408 {
1409         u32 phy;
1410
1411         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
1412             (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES))
1413                 return;
1414
1415         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1416                 u32 ephy;
1417
1418                 if (!tg3_readphy(tp, MII_TG3_EPHY_TEST, &ephy)) {
1419                         tg3_writephy(tp, MII_TG3_EPHY_TEST,
1420                                      ephy | MII_TG3_EPHY_SHADOW_EN);
1421                         if (!tg3_readphy(tp, MII_TG3_EPHYTST_MISCCTRL, &phy)) {
1422                                 if (enable)
1423                                         phy |= MII_TG3_EPHYTST_MISCCTRL_MDIX;
1424                                 else
1425                                         phy &= ~MII_TG3_EPHYTST_MISCCTRL_MDIX;
1426                                 tg3_writephy(tp, MII_TG3_EPHYTST_MISCCTRL, phy);
1427                         }
1428                         tg3_writephy(tp, MII_TG3_EPHY_TEST, ephy);
1429                 }
1430         } else {
1431                 phy = MII_TG3_AUXCTL_MISC_RDSEL_MISC |
1432                       MII_TG3_AUXCTL_SHDWSEL_MISC;
1433                 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, phy) &&
1434                     !tg3_readphy(tp, MII_TG3_AUX_CTRL, &phy)) {
1435                         if (enable)
1436                                 phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
1437                         else
1438                                 phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
1439                         phy |= MII_TG3_AUXCTL_MISC_WREN;
1440                         tg3_writephy(tp, MII_TG3_AUX_CTRL, phy);
1441                 }
1442         }
1443 }
1444
1445 static void tg3_phy_set_wirespeed(struct tg3 *tp)
1446 {
1447         u32 val;
1448
1449         if (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED)
1450                 return;
1451
1452         if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x7007) &&
1453             !tg3_readphy(tp, MII_TG3_AUX_CTRL, &val))
1454                 tg3_writephy(tp, MII_TG3_AUX_CTRL,
1455                              (val | (1 << 15) | (1 << 4)));
1456 }
1457
1458 static void tg3_phy_apply_otp(struct tg3 *tp)
1459 {
1460         u32 otp, phy;
1461
1462         if (!tp->phy_otp)
1463                 return;
1464
1465         otp = tp->phy_otp;
1466
1467         /* Enable SM_DSP clock and tx 6dB coding. */
1468         phy = MII_TG3_AUXCTL_SHDWSEL_AUXCTL |
1469               MII_TG3_AUXCTL_ACTL_SMDSP_ENA |
1470               MII_TG3_AUXCTL_ACTL_TX_6DB;
1471         tg3_writephy(tp, MII_TG3_AUX_CTRL, phy);
1472
1473         phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
1474         phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT;
1475         tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy);
1476
1477         phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) |
1478               ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT);
1479         tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy);
1480
1481         phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT);
1482         phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ;
1483         tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy);
1484
1485         phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT);
1486         tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy);
1487
1488         phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT);
1489         tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy);
1490
1491         phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) |
1492               ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
1493         tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);
1494
1495         /* Turn off SM_DSP clock. */
1496         phy = MII_TG3_AUXCTL_SHDWSEL_AUXCTL |
1497               MII_TG3_AUXCTL_ACTL_TX_6DB;
1498         tg3_writephy(tp, MII_TG3_AUX_CTRL, phy);
1499 }
1500
1501 static int tg3_wait_macro_done(struct tg3 *tp)
1502 {
1503         int limit = 100;
1504
1505         while (limit--) {
1506                 u32 tmp32;
1507
1508                 if (!tg3_readphy(tp, 0x16, &tmp32)) {
1509                         if ((tmp32 & 0x1000) == 0)
1510                                 break;
1511                 }
1512         }
1513         if (limit <= 0)
1514                 return -EBUSY;
1515
1516         return 0;
1517 }
1518
1519 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
1520 {
1521         static const u32 test_pat[4][6] = {
1522         { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
1523         { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
1524         { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
1525         { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
1526         };
1527         int chan;
1528
1529         for (chan = 0; chan < 4; chan++) {
1530                 int i;
1531
1532                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1533                              (chan * 0x2000) | 0x0200);
1534                 tg3_writephy(tp, 0x16, 0x0002);
1535
1536                 for (i = 0; i < 6; i++)
1537                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
1538                                      test_pat[chan][i]);
1539
1540                 tg3_writephy(tp, 0x16, 0x0202);
1541                 if (tg3_wait_macro_done(tp)) {
1542                         *resetp = 1;
1543                         return -EBUSY;
1544                 }
1545
1546                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1547                              (chan * 0x2000) | 0x0200);
1548                 tg3_writephy(tp, 0x16, 0x0082);
1549                 if (tg3_wait_macro_done(tp)) {
1550                         *resetp = 1;
1551                         return -EBUSY;
1552                 }
1553
1554                 tg3_writephy(tp, 0x16, 0x0802);
1555                 if (tg3_wait_macro_done(tp)) {
1556                         *resetp = 1;
1557                         return -EBUSY;
1558                 }
1559
1560                 for (i = 0; i < 6; i += 2) {
1561                         u32 low, high;
1562
1563                         if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
1564                             tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
1565                             tg3_wait_macro_done(tp)) {
1566                                 *resetp = 1;
1567                                 return -EBUSY;
1568                         }
1569                         low &= 0x7fff;
1570                         high &= 0x000f;
1571                         if (low != test_pat[chan][i] ||
1572                             high != test_pat[chan][i+1]) {
1573                                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
1574                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
1575                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
1576
1577                                 return -EBUSY;
1578                         }
1579                 }
1580         }
1581
1582         return 0;
1583 }
1584
1585 static int tg3_phy_reset_chanpat(struct tg3 *tp)
1586 {
1587         int chan;
1588
1589         for (chan = 0; chan < 4; chan++) {
1590                 int i;
1591
1592                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1593                              (chan * 0x2000) | 0x0200);
1594                 tg3_writephy(tp, 0x16, 0x0002);
1595                 for (i = 0; i < 6; i++)
1596                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
1597                 tg3_writephy(tp, 0x16, 0x0202);
1598                 if (tg3_wait_macro_done(tp))
1599                         return -EBUSY;
1600         }
1601
1602         return 0;
1603 }
1604
1605 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
1606 {
1607         u32 reg32, phy9_orig;
1608         int retries, do_phy_reset, err;
1609
1610         retries = 10;
1611         do_phy_reset = 1;
1612         do {
1613                 if (do_phy_reset) {
1614                         err = tg3_bmcr_reset(tp);
1615                         if (err)
1616                                 return err;
1617                         do_phy_reset = 0;
1618                 }
1619
1620                 /* Disable transmitter and interrupt.  */
1621                 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
1622                         continue;
1623
1624                 reg32 |= 0x3000;
1625                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
1626
1627                 /* Set full-duplex, 1000 mbps.  */
1628                 tg3_writephy(tp, MII_BMCR,
1629                              BMCR_FULLDPLX | TG3_BMCR_SPEED1000);
1630
1631                 /* Set to master mode.  */
1632                 if (tg3_readphy(tp, MII_TG3_CTRL, &phy9_orig))
1633                         continue;
1634
1635                 tg3_writephy(tp, MII_TG3_CTRL,
1636                              (MII_TG3_CTRL_AS_MASTER |
1637                               MII_TG3_CTRL_ENABLE_AS_MASTER));
1638
1639                 /* Enable SM_DSP_CLOCK and 6dB.  */
1640                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1641
1642                 /* Block the PHY control access.  */
1643                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
1644                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0800);
1645
1646                 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
1647                 if (!err)
1648                         break;
1649         } while (--retries);
1650
1651         err = tg3_phy_reset_chanpat(tp);
1652         if (err)
1653                 return err;
1654
1655         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
1656         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0000);
1657
1658         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
1659         tg3_writephy(tp, 0x16, 0x0000);
1660
1661         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
1662             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
1663                 /* Set Extended packet length bit for jumbo frames */
1664                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4400);
1665         }
1666         else {
1667                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1668         }
1669
1670         tg3_writephy(tp, MII_TG3_CTRL, phy9_orig);
1671
1672         if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32)) {
1673                 reg32 &= ~0x3000;
1674                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
1675         } else if (!err)
1676                 err = -EBUSY;
1677
1678         return err;
1679 }
1680
1681 /* This will reset the tigon3 PHY if there is no valid
1682  * link unless the FORCE argument is non-zero.
1683  */
1684 static int tg3_phy_reset(struct tg3 *tp)
1685 {
1686         u32 cpmuctrl;
1687         u32 phy_status;
1688         int err;
1689
1690         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1691                 u32 val;
1692
1693                 val = tr32(GRC_MISC_CFG);
1694                 tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
1695                 udelay(40);
1696         }
1697         err  = tg3_readphy(tp, MII_BMSR, &phy_status);
1698         err |= tg3_readphy(tp, MII_BMSR, &phy_status);
1699         if (err != 0)
1700                 return -EBUSY;
1701
1702         if (netif_running(tp->dev) && netif_carrier_ok(tp->dev)) {
1703                 netif_carrier_off(tp->dev);
1704                 tg3_link_report(tp);
1705         }
1706
1707         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
1708             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
1709             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
1710                 err = tg3_phy_reset_5703_4_5(tp);
1711                 if (err)
1712                         return err;
1713                 goto out;
1714         }
1715
1716         cpmuctrl = 0;
1717         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
1718             GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
1719                 cpmuctrl = tr32(TG3_CPMU_CTRL);
1720                 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY)
1721                         tw32(TG3_CPMU_CTRL,
1722                              cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY);
1723         }
1724
1725         err = tg3_bmcr_reset(tp);
1726         if (err)
1727                 return err;
1728
1729         if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) {
1730                 u32 phy;
1731
1732                 phy = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz;
1733                 tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, phy);
1734
1735                 tw32(TG3_CPMU_CTRL, cpmuctrl);
1736         }
1737
1738         if (tp->tg3_flags3 & TG3_FLG3_5761_5784_AX_FIXES) {
1739                 u32 val;
1740
1741                 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
1742                 if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
1743                     CPMU_LSPD_1000MB_MACCLK_12_5) {
1744                         val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
1745                         udelay(40);
1746                         tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
1747                 }
1748
1749                 /* Disable GPHY autopowerdown. */
1750                 tg3_writephy(tp, MII_TG3_MISC_SHDW,
1751                              MII_TG3_MISC_SHDW_WREN |
1752                              MII_TG3_MISC_SHDW_APD_SEL |
1753                              MII_TG3_MISC_SHDW_APD_WKTM_84MS);
1754         }
1755
1756         tg3_phy_apply_otp(tp);
1757
1758 out:
1759         if (tp->tg3_flags2 & TG3_FLG2_PHY_ADC_BUG) {
1760                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1761                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1762                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x2aaa);
1763                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1764                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0323);
1765                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1766         }
1767         if (tp->tg3_flags2 & TG3_FLG2_PHY_5704_A0_BUG) {
1768                 tg3_writephy(tp, 0x1c, 0x8d68);
1769                 tg3_writephy(tp, 0x1c, 0x8d68);
1770         }
1771         if (tp->tg3_flags2 & TG3_FLG2_PHY_BER_BUG) {
1772                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1773                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1774                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x310b);
1775                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1776                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x9506);
1777                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x401f);
1778                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x14e2);
1779                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1780         }
1781         else if (tp->tg3_flags2 & TG3_FLG2_PHY_JITTER_BUG) {
1782                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1783                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1784                 if (tp->tg3_flags2 & TG3_FLG2_PHY_ADJUST_TRIM) {
1785                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
1786                         tg3_writephy(tp, MII_TG3_TEST1,
1787                                      MII_TG3_TEST1_TRIM_EN | 0x4);
1788                 } else
1789                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
1790                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1791         }
1792         /* Set Extended packet length bit (bit 14) on all chips that */
1793         /* support jumbo frames */
1794         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
1795                 /* Cannot do read-modify-write on 5401 */
1796                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
1797         } else if (tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) {
1798                 u32 phy_reg;
1799
1800                 /* Set bit 14 with read-modify-write to preserve other bits */
1801                 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0007) &&
1802                     !tg3_readphy(tp, MII_TG3_AUX_CTRL, &phy_reg))
1803                         tg3_writephy(tp, MII_TG3_AUX_CTRL, phy_reg | 0x4000);
1804         }
1805
1806         /* Set phy register 0x10 bit 0 to high fifo elasticity to support
1807          * jumbo frames transmission.
1808          */
1809         if (tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) {
1810                 u32 phy_reg;
1811
1812                 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &phy_reg))
1813                     tg3_writephy(tp, MII_TG3_EXT_CTRL,
1814                                  phy_reg | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
1815         }
1816
1817         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1818                 /* adjust output voltage */
1819                 tg3_writephy(tp, MII_TG3_EPHY_PTEST, 0x12);
1820         }
1821
1822         tg3_phy_toggle_automdix(tp, 1);
1823         tg3_phy_set_wirespeed(tp);
1824         return 0;
1825 }
1826
1827 static void tg3_frob_aux_power(struct tg3 *tp)
1828 {
1829         struct tg3 *tp_peer = tp;
1830
1831         if ((tp->tg3_flags2 & TG3_FLG2_IS_NIC) == 0)
1832                 return;
1833
1834         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
1835             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)) {
1836                 struct net_device *dev_peer;
1837
1838                 dev_peer = pci_get_drvdata(tp->pdev_peer);
1839                 /* remove_one() may have been run on the peer. */
1840                 if (!dev_peer)
1841                         tp_peer = tp;
1842                 else
1843                         tp_peer = netdev_priv(dev_peer);
1844         }
1845
1846         if ((tp->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
1847             (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0 ||
1848             (tp_peer->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
1849             (tp_peer->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0) {
1850                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1851                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1852                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1853                                     (GRC_LCLCTRL_GPIO_OE0 |
1854                                      GRC_LCLCTRL_GPIO_OE1 |
1855                                      GRC_LCLCTRL_GPIO_OE2 |
1856                                      GRC_LCLCTRL_GPIO_OUTPUT0 |
1857                                      GRC_LCLCTRL_GPIO_OUTPUT1),
1858                                     100);
1859                 } else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761) {
1860                         /* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
1861                         u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
1862                                              GRC_LCLCTRL_GPIO_OE1 |
1863                                              GRC_LCLCTRL_GPIO_OE2 |
1864                                              GRC_LCLCTRL_GPIO_OUTPUT0 |
1865                                              GRC_LCLCTRL_GPIO_OUTPUT1 |
1866                                              tp->grc_local_ctrl;
1867                         tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, 100);
1868
1869                         grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2;
1870                         tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, 100);
1871
1872                         grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0;
1873                         tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, 100);
1874                 } else {
1875                         u32 no_gpio2;
1876                         u32 grc_local_ctrl = 0;
1877
1878                         if (tp_peer != tp &&
1879                             (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
1880                                 return;
1881
1882                         /* Workaround to prevent overdrawing Amps. */
1883                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
1884                             ASIC_REV_5714) {
1885                                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
1886                                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1887                                             grc_local_ctrl, 100);
1888                         }
1889
1890                         /* On 5753 and variants, GPIO2 cannot be used. */
1891                         no_gpio2 = tp->nic_sram_data_cfg &
1892                                     NIC_SRAM_DATA_CFG_NO_GPIO2;
1893
1894                         grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
1895                                          GRC_LCLCTRL_GPIO_OE1 |
1896                                          GRC_LCLCTRL_GPIO_OE2 |
1897                                          GRC_LCLCTRL_GPIO_OUTPUT1 |
1898                                          GRC_LCLCTRL_GPIO_OUTPUT2;
1899                         if (no_gpio2) {
1900                                 grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
1901                                                     GRC_LCLCTRL_GPIO_OUTPUT2);
1902                         }
1903                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1904                                                     grc_local_ctrl, 100);
1905
1906                         grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
1907
1908                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1909                                                     grc_local_ctrl, 100);
1910
1911                         if (!no_gpio2) {
1912                                 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
1913                                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1914                                             grc_local_ctrl, 100);
1915                         }
1916                 }
1917         } else {
1918                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
1919                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
1920                         if (tp_peer != tp &&
1921                             (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
1922                                 return;
1923
1924                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1925                                     (GRC_LCLCTRL_GPIO_OE1 |
1926                                      GRC_LCLCTRL_GPIO_OUTPUT1), 100);
1927
1928                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1929                                     GRC_LCLCTRL_GPIO_OE1, 100);
1930
1931                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1932                                     (GRC_LCLCTRL_GPIO_OE1 |
1933                                      GRC_LCLCTRL_GPIO_OUTPUT1), 100);
1934                 }
1935         }
1936 }
1937
1938 static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
1939 {
1940         if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
1941                 return 1;
1942         else if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411) {
1943                 if (speed != SPEED_10)
1944                         return 1;
1945         } else if (speed == SPEED_10)
1946                 return 1;
1947
1948         return 0;
1949 }
1950
1951 static int tg3_setup_phy(struct tg3 *, int);
1952
1953 #define RESET_KIND_SHUTDOWN     0
1954 #define RESET_KIND_INIT         1
1955 #define RESET_KIND_SUSPEND      2
1956
1957 static void tg3_write_sig_post_reset(struct tg3 *, int);
1958 static int tg3_halt_cpu(struct tg3 *, u32);
1959 static int tg3_nvram_lock(struct tg3 *);
1960 static void tg3_nvram_unlock(struct tg3 *);
1961
1962 static void tg3_power_down_phy(struct tg3 *tp)
1963 {
1964         u32 val;
1965
1966         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
1967                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
1968                         u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
1969                         u32 serdes_cfg = tr32(MAC_SERDES_CFG);
1970
1971                         sg_dig_ctrl |=
1972                                 SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
1973                         tw32(SG_DIG_CTRL, sg_dig_ctrl);
1974                         tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
1975                 }
1976                 return;
1977         }
1978
1979         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1980                 tg3_bmcr_reset(tp);
1981                 val = tr32(GRC_MISC_CFG);
1982                 tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
1983                 udelay(40);
1984                 return;
1985         } else if (!(tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)) {
1986                 tg3_writephy(tp, MII_TG3_EXT_CTRL,
1987                              MII_TG3_EXT_CTRL_FORCE_LED_OFF);
1988                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x01b2);
1989         }
1990
1991         /* The PHY should not be powered down on some chips because
1992          * of bugs.
1993          */
1994         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1995             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
1996             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 &&
1997              (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)))
1998                 return;
1999
2000         if (tp->tg3_flags3 & TG3_FLG3_5761_5784_AX_FIXES) {
2001                 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2002                 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2003                 val |= CPMU_LSPD_1000MB_MACCLK_12_5;
2004                 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2005         }
2006
2007         tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
2008 }
2009
2010 static int tg3_set_power_state(struct tg3 *tp, pci_power_t state)
2011 {
2012         u32 misc_host_ctrl;
2013
2014         /* Make sure register accesses (indirect or otherwise)
2015          * will function correctly.
2016          */
2017         pci_write_config_dword(tp->pdev,
2018                                TG3PCI_MISC_HOST_CTRL,
2019                                tp->misc_host_ctrl);
2020
2021         switch (state) {
2022         case PCI_D0:
2023                 pci_enable_wake(tp->pdev, state, false);
2024                 pci_set_power_state(tp->pdev, PCI_D0);
2025
2026                 /* Switch out of Vaux if it is a NIC */
2027                 if (tp->tg3_flags2 & TG3_FLG2_IS_NIC)
2028                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl, 100);
2029
2030                 return 0;
2031
2032         case PCI_D1:
2033         case PCI_D2:
2034         case PCI_D3hot:
2035                 break;
2036
2037         default:
2038                 printk(KERN_ERR PFX "%s: Invalid power state (D%d) requested\n",
2039                         tp->dev->name, state);
2040                 return -EINVAL;
2041         }
2042         misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
2043         tw32(TG3PCI_MISC_HOST_CTRL,
2044              misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
2045
2046         if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
2047                 if ((tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED) &&
2048                     !tp->link_config.phy_is_low_power) {
2049                         struct phy_device *phydev;
2050                         u32 advertising;
2051
2052                         phydev = tp->mdio_bus.phy_map[PHY_ADDR];
2053
2054                         tp->link_config.phy_is_low_power = 1;
2055
2056                         tp->link_config.orig_speed = phydev->speed;
2057                         tp->link_config.orig_duplex = phydev->duplex;
2058                         tp->link_config.orig_autoneg = phydev->autoneg;
2059                         tp->link_config.orig_advertising = phydev->advertising;
2060
2061                         advertising = ADVERTISED_TP |
2062                                       ADVERTISED_Pause |
2063                                       ADVERTISED_Autoneg |
2064                                       ADVERTISED_10baseT_Half;
2065
2066                         if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) ||
2067                             (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)) {
2068                                 if (tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB)
2069                                         advertising |=
2070                                                 ADVERTISED_100baseT_Half |
2071                                                 ADVERTISED_100baseT_Full |
2072                                                 ADVERTISED_10baseT_Full;
2073                                 else
2074                                         advertising |= ADVERTISED_10baseT_Full;
2075                         }
2076
2077                         phydev->advertising = advertising;
2078
2079                         phy_start_aneg(phydev);
2080                 }
2081         } else {
2082                 if (tp->link_config.phy_is_low_power == 0) {
2083                         tp->link_config.phy_is_low_power = 1;
2084                         tp->link_config.orig_speed = tp->link_config.speed;
2085                         tp->link_config.orig_duplex = tp->link_config.duplex;
2086                         tp->link_config.orig_autoneg = tp->link_config.autoneg;
2087                 }
2088
2089                 if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)) {
2090                         tp->link_config.speed = SPEED_10;
2091                         tp->link_config.duplex = DUPLEX_HALF;
2092                         tp->link_config.autoneg = AUTONEG_ENABLE;
2093                         tg3_setup_phy(tp, 0);
2094                 }
2095         }
2096
2097         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2098                 u32 val;
2099
2100                 val = tr32(GRC_VCPU_EXT_CTRL);
2101                 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
2102         } else if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
2103                 int i;
2104                 u32 val;
2105
2106                 for (i = 0; i < 200; i++) {
2107                         tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
2108                         if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
2109                                 break;
2110                         msleep(1);
2111                 }
2112         }
2113         if (tp->tg3_flags & TG3_FLAG_WOL_CAP)
2114                 tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
2115                                                      WOL_DRV_STATE_SHUTDOWN |
2116                                                      WOL_DRV_WOL |
2117                                                      WOL_SET_MAGIC_PKT);
2118
2119         if (tp->tg3_flags & TG3_FLAG_WOL_ENABLE) {
2120                 u32 mac_mode;
2121
2122                 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
2123                         if (!(tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)) {
2124                                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x5a);
2125                                 udelay(40);
2126                         }
2127
2128                         if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
2129                                 mac_mode = MAC_MODE_PORT_MODE_GMII;
2130                         else
2131                                 mac_mode = MAC_MODE_PORT_MODE_MII;
2132
2133                         mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
2134                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
2135                             ASIC_REV_5700) {
2136                                 u32 speed = (tp->tg3_flags &
2137                                              TG3_FLAG_WOL_SPEED_100MB) ?
2138                                              SPEED_100 : SPEED_10;
2139                                 if (tg3_5700_link_polarity(tp, speed))
2140                                         mac_mode |= MAC_MODE_LINK_POLARITY;
2141                                 else
2142                                         mac_mode &= ~MAC_MODE_LINK_POLARITY;
2143                         }
2144                 } else {
2145                         mac_mode = MAC_MODE_PORT_MODE_TBI;
2146                 }
2147
2148                 if (!(tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
2149                         tw32(MAC_LED_CTRL, tp->led_ctrl);
2150
2151                 if (pci_pme_capable(tp->pdev, state) &&
2152                      (tp->tg3_flags & TG3_FLAG_WOL_ENABLE))
2153                         mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
2154
2155                 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
2156                         mac_mode |= tp->mac_mode &
2157                                     (MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN);
2158                         if (mac_mode & MAC_MODE_APE_TX_EN)
2159                                 mac_mode |= MAC_MODE_TDE_ENABLE;
2160                 }
2161
2162                 tw32_f(MAC_MODE, mac_mode);
2163                 udelay(100);
2164
2165                 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
2166                 udelay(10);
2167         }
2168
2169         if (!(tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB) &&
2170             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2171              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
2172                 u32 base_val;
2173
2174                 base_val = tp->pci_clock_ctrl;
2175                 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
2176                              CLOCK_CTRL_TXCLK_DISABLE);
2177
2178                 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
2179                             CLOCK_CTRL_PWRDOWN_PLL133, 40);
2180         } else if ((tp->tg3_flags2 & TG3_FLG2_5780_CLASS) ||
2181                    (tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) ||
2182                    (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)) {
2183                 /* do nothing */
2184         } else if (!((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
2185                      (tp->tg3_flags & TG3_FLAG_ENABLE_ASF))) {
2186                 u32 newbits1, newbits2;
2187
2188                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2189                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2190                         newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
2191                                     CLOCK_CTRL_TXCLK_DISABLE |
2192                                     CLOCK_CTRL_ALTCLK);
2193                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
2194                 } else if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
2195                         newbits1 = CLOCK_CTRL_625_CORE;
2196                         newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
2197                 } else {
2198                         newbits1 = CLOCK_CTRL_ALTCLK;
2199                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
2200                 }
2201
2202                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
2203                             40);
2204
2205                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
2206                             40);
2207
2208                 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
2209                         u32 newbits3;
2210
2211                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2212                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2213                                 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
2214                                             CLOCK_CTRL_TXCLK_DISABLE |
2215                                             CLOCK_CTRL_44MHZ_CORE);
2216                         } else {
2217                                 newbits3 = CLOCK_CTRL_44MHZ_CORE;
2218                         }
2219
2220                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
2221                                     tp->pci_clock_ctrl | newbits3, 40);
2222                 }
2223         }
2224
2225         if (!(tp->tg3_flags & TG3_FLAG_WOL_ENABLE) &&
2226             !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF) &&
2227             !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
2228                 tg3_power_down_phy(tp);
2229
2230         tg3_frob_aux_power(tp);
2231
2232         /* Workaround for unstable PLL clock */
2233         if ((GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX) ||
2234             (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX)) {
2235                 u32 val = tr32(0x7d00);
2236
2237                 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
2238                 tw32(0x7d00, val);
2239                 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
2240                         int err;
2241
2242                         err = tg3_nvram_lock(tp);
2243                         tg3_halt_cpu(tp, RX_CPU_BASE);
2244                         if (!err)
2245                                 tg3_nvram_unlock(tp);
2246                 }
2247         }
2248
2249         tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
2250
2251         if (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)
2252                 pci_enable_wake(tp->pdev, state, true);
2253
2254         /* Finally, set the new power state. */
2255         pci_set_power_state(tp->pdev, state);
2256
2257         return 0;
2258 }
2259
2260 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
2261 {
2262         switch (val & MII_TG3_AUX_STAT_SPDMASK) {
2263         case MII_TG3_AUX_STAT_10HALF:
2264                 *speed = SPEED_10;
2265                 *duplex = DUPLEX_HALF;
2266                 break;
2267
2268         case MII_TG3_AUX_STAT_10FULL:
2269                 *speed = SPEED_10;
2270                 *duplex = DUPLEX_FULL;
2271                 break;
2272
2273         case MII_TG3_AUX_STAT_100HALF:
2274                 *speed = SPEED_100;
2275                 *duplex = DUPLEX_HALF;
2276                 break;
2277
2278         case MII_TG3_AUX_STAT_100FULL:
2279                 *speed = SPEED_100;
2280                 *duplex = DUPLEX_FULL;
2281                 break;
2282
2283         case MII_TG3_AUX_STAT_1000HALF:
2284                 *speed = SPEED_1000;
2285                 *duplex = DUPLEX_HALF;
2286                 break;
2287
2288         case MII_TG3_AUX_STAT_1000FULL:
2289                 *speed = SPEED_1000;
2290                 *duplex = DUPLEX_FULL;
2291                 break;
2292
2293         default:
2294                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2295                         *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
2296                                  SPEED_10;
2297                         *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
2298                                   DUPLEX_HALF;
2299                         break;
2300                 }
2301                 *speed = SPEED_INVALID;
2302                 *duplex = DUPLEX_INVALID;
2303                 break;
2304         }
2305 }
2306
2307 static void tg3_phy_copper_begin(struct tg3 *tp)
2308 {
2309         u32 new_adv;
2310         int i;
2311
2312         if (tp->link_config.phy_is_low_power) {
2313                 /* Entering low power mode.  Disable gigabit and
2314                  * 100baseT advertisements.
2315                  */
2316                 tg3_writephy(tp, MII_TG3_CTRL, 0);
2317
2318                 new_adv = (ADVERTISE_10HALF | ADVERTISE_10FULL |
2319                            ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
2320                 if (tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB)
2321                         new_adv |= (ADVERTISE_100HALF | ADVERTISE_100FULL);
2322
2323                 tg3_writephy(tp, MII_ADVERTISE, new_adv);
2324         } else if (tp->link_config.speed == SPEED_INVALID) {
2325                 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
2326                         tp->link_config.advertising &=
2327                                 ~(ADVERTISED_1000baseT_Half |
2328                                   ADVERTISED_1000baseT_Full);
2329
2330                 new_adv = ADVERTISE_CSMA;
2331                 if (tp->link_config.advertising & ADVERTISED_10baseT_Half)
2332                         new_adv |= ADVERTISE_10HALF;
2333                 if (tp->link_config.advertising & ADVERTISED_10baseT_Full)
2334                         new_adv |= ADVERTISE_10FULL;
2335                 if (tp->link_config.advertising & ADVERTISED_100baseT_Half)
2336                         new_adv |= ADVERTISE_100HALF;
2337                 if (tp->link_config.advertising & ADVERTISED_100baseT_Full)
2338                         new_adv |= ADVERTISE_100FULL;
2339
2340                 new_adv |= tg3_advert_flowctrl_1000T(tp->link_config.flowctrl);
2341
2342                 tg3_writephy(tp, MII_ADVERTISE, new_adv);
2343
2344                 if (tp->link_config.advertising &
2345                     (ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full)) {
2346                         new_adv = 0;
2347                         if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
2348                                 new_adv |= MII_TG3_CTRL_ADV_1000_HALF;
2349                         if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
2350                                 new_adv |= MII_TG3_CTRL_ADV_1000_FULL;
2351                         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY) &&
2352                             (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
2353                              tp->pci_chip_rev_id == CHIPREV_ID_5701_B0))
2354                                 new_adv |= (MII_TG3_CTRL_AS_MASTER |
2355                                             MII_TG3_CTRL_ENABLE_AS_MASTER);
2356                         tg3_writephy(tp, MII_TG3_CTRL, new_adv);
2357                 } else {
2358                         tg3_writephy(tp, MII_TG3_CTRL, 0);
2359                 }
2360         } else {
2361                 new_adv = tg3_advert_flowctrl_1000T(tp->link_config.flowctrl);
2362                 new_adv |= ADVERTISE_CSMA;
2363
2364                 /* Asking for a specific link mode. */
2365                 if (tp->link_config.speed == SPEED_1000) {
2366                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
2367
2368                         if (tp->link_config.duplex == DUPLEX_FULL)
2369                                 new_adv = MII_TG3_CTRL_ADV_1000_FULL;
2370                         else
2371                                 new_adv = MII_TG3_CTRL_ADV_1000_HALF;
2372                         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
2373                             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
2374                                 new_adv |= (MII_TG3_CTRL_AS_MASTER |
2375                                             MII_TG3_CTRL_ENABLE_AS_MASTER);
2376                 } else {
2377                         if (tp->link_config.speed == SPEED_100) {
2378                                 if (tp->link_config.duplex == DUPLEX_FULL)
2379                                         new_adv |= ADVERTISE_100FULL;
2380                                 else
2381                                         new_adv |= ADVERTISE_100HALF;
2382                         } else {
2383                                 if (tp->link_config.duplex == DUPLEX_FULL)
2384                                         new_adv |= ADVERTISE_10FULL;
2385                                 else
2386                                         new_adv |= ADVERTISE_10HALF;
2387                         }
2388                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
2389
2390                         new_adv = 0;
2391                 }
2392
2393                 tg3_writephy(tp, MII_TG3_CTRL, new_adv);
2394         }
2395
2396         if (tp->link_config.autoneg == AUTONEG_DISABLE &&
2397             tp->link_config.speed != SPEED_INVALID) {
2398                 u32 bmcr, orig_bmcr;
2399
2400                 tp->link_config.active_speed = tp->link_config.speed;
2401                 tp->link_config.active_duplex = tp->link_config.duplex;
2402
2403                 bmcr = 0;
2404                 switch (tp->link_config.speed) {
2405                 default:
2406                 case SPEED_10:
2407                         break;
2408
2409                 case SPEED_100:
2410                         bmcr |= BMCR_SPEED100;
2411                         break;
2412
2413                 case SPEED_1000:
2414                         bmcr |= TG3_BMCR_SPEED1000;
2415                         break;
2416                 }
2417
2418                 if (tp->link_config.duplex == DUPLEX_FULL)
2419                         bmcr |= BMCR_FULLDPLX;
2420
2421                 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
2422                     (bmcr != orig_bmcr)) {
2423                         tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
2424                         for (i = 0; i < 1500; i++) {
2425                                 u32 tmp;
2426
2427                                 udelay(10);
2428                                 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
2429                                     tg3_readphy(tp, MII_BMSR, &tmp))
2430                                         continue;
2431                                 if (!(tmp & BMSR_LSTATUS)) {
2432                                         udelay(40);
2433                                         break;
2434                                 }
2435                         }
2436                         tg3_writephy(tp, MII_BMCR, bmcr);
2437                         udelay(40);
2438                 }
2439         } else {
2440                 tg3_writephy(tp, MII_BMCR,
2441                              BMCR_ANENABLE | BMCR_ANRESTART);
2442         }
2443 }
2444
2445 static int tg3_init_5401phy_dsp(struct tg3 *tp)
2446 {
2447         int err;
2448
2449         /* Turn off tap power management. */
2450         /* Set Extended packet length bit */
2451         err  = tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
2452
2453         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0012);
2454         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1804);
2455
2456         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0013);
2457         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1204);
2458
2459         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
2460         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0132);
2461
2462         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
2463         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0232);
2464
2465         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
2466         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0a20);
2467
2468         udelay(40);
2469
2470         return err;
2471 }
2472
2473 static int tg3_copper_is_advertising_all(struct tg3 *tp, u32 mask)
2474 {
2475         u32 adv_reg, all_mask = 0;
2476
2477         if (mask & ADVERTISED_10baseT_Half)
2478                 all_mask |= ADVERTISE_10HALF;
2479         if (mask & ADVERTISED_10baseT_Full)
2480                 all_mask |= ADVERTISE_10FULL;
2481         if (mask & ADVERTISED_100baseT_Half)
2482                 all_mask |= ADVERTISE_100HALF;
2483         if (mask & ADVERTISED_100baseT_Full)
2484                 all_mask |= ADVERTISE_100FULL;
2485
2486         if (tg3_readphy(tp, MII_ADVERTISE, &adv_reg))
2487                 return 0;
2488
2489         if ((adv_reg & all_mask) != all_mask)
2490                 return 0;
2491         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
2492                 u32 tg3_ctrl;
2493
2494                 all_mask = 0;
2495                 if (mask & ADVERTISED_1000baseT_Half)
2496                         all_mask |= ADVERTISE_1000HALF;
2497                 if (mask & ADVERTISED_1000baseT_Full)
2498                         all_mask |= ADVERTISE_1000FULL;
2499
2500                 if (tg3_readphy(tp, MII_TG3_CTRL, &tg3_ctrl))
2501                         return 0;
2502
2503                 if ((tg3_ctrl & all_mask) != all_mask)
2504                         return 0;
2505         }
2506         return 1;
2507 }
2508
2509 static int tg3_adv_1000T_flowctrl_ok(struct tg3 *tp, u32 *lcladv, u32 *rmtadv)
2510 {
2511         u32 curadv, reqadv;
2512
2513         if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
2514                 return 1;
2515
2516         curadv = *lcladv & (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
2517         reqadv = tg3_advert_flowctrl_1000T(tp->link_config.flowctrl);
2518
2519         if (tp->link_config.active_duplex == DUPLEX_FULL) {
2520                 if (curadv != reqadv)
2521                         return 0;
2522
2523                 if (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG)
2524                         tg3_readphy(tp, MII_LPA, rmtadv);
2525         } else {
2526                 /* Reprogram the advertisement register, even if it
2527                  * does not affect the current link.  If the link
2528                  * gets renegotiated in the future, we can save an
2529                  * additional renegotiation cycle by advertising
2530                  * it correctly in the first place.
2531                  */
2532                 if (curadv != reqadv) {
2533                         *lcladv &= ~(ADVERTISE_PAUSE_CAP |
2534                                      ADVERTISE_PAUSE_ASYM);
2535                         tg3_writephy(tp, MII_ADVERTISE, *lcladv | reqadv);
2536                 }
2537         }
2538
2539         return 1;
2540 }
2541
2542 static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
2543 {
2544         int current_link_up;
2545         u32 bmsr, dummy;
2546         u32 lcl_adv, rmt_adv;
2547         u16 current_speed;
2548         u8 current_duplex;
2549         int i, err;
2550
2551         tw32(MAC_EVENT, 0);
2552
2553         tw32_f(MAC_STATUS,
2554              (MAC_STATUS_SYNC_CHANGED |
2555               MAC_STATUS_CFG_CHANGED |
2556               MAC_STATUS_MI_COMPLETION |
2557               MAC_STATUS_LNKSTATE_CHANGED));
2558         udelay(40);
2559
2560         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
2561                 tw32_f(MAC_MI_MODE,
2562                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
2563                 udelay(80);
2564         }
2565
2566         tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x02);
2567
2568         /* Some third-party PHYs need to be reset on link going
2569          * down.
2570          */
2571         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
2572              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2573              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
2574             netif_carrier_ok(tp->dev)) {
2575                 tg3_readphy(tp, MII_BMSR, &bmsr);
2576                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
2577                     !(bmsr & BMSR_LSTATUS))
2578                         force_reset = 1;
2579         }
2580         if (force_reset)
2581                 tg3_phy_reset(tp);
2582
2583         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
2584                 tg3_readphy(tp, MII_BMSR, &bmsr);
2585                 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
2586                     !(tp->tg3_flags & TG3_FLAG_INIT_COMPLETE))
2587                         bmsr = 0;
2588
2589                 if (!(bmsr & BMSR_LSTATUS)) {
2590                         err = tg3_init_5401phy_dsp(tp);
2591                         if (err)
2592                                 return err;
2593
2594                         tg3_readphy(tp, MII_BMSR, &bmsr);
2595                         for (i = 0; i < 1000; i++) {
2596                                 udelay(10);
2597                                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
2598                                     (bmsr & BMSR_LSTATUS)) {
2599                                         udelay(40);
2600                                         break;
2601                                 }
2602                         }
2603
2604                         if ((tp->phy_id & PHY_ID_REV_MASK) == PHY_REV_BCM5401_B0 &&
2605                             !(bmsr & BMSR_LSTATUS) &&
2606                             tp->link_config.active_speed == SPEED_1000) {
2607                                 err = tg3_phy_reset(tp);
2608                                 if (!err)
2609                                         err = tg3_init_5401phy_dsp(tp);
2610                                 if (err)
2611                                         return err;
2612                         }
2613                 }
2614         } else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
2615                    tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
2616                 /* 5701 {A0,B0} CRC bug workaround */
2617                 tg3_writephy(tp, 0x15, 0x0a75);
2618                 tg3_writephy(tp, 0x1c, 0x8c68);
2619                 tg3_writephy(tp, 0x1c, 0x8d68);
2620                 tg3_writephy(tp, 0x1c, 0x8c68);
2621         }
2622
2623         /* Clear pending interrupts... */
2624         tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
2625         tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
2626
2627         if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT)
2628                 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
2629         else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906)
2630                 tg3_writephy(tp, MII_TG3_IMASK, ~0);
2631
2632         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2633             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2634                 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
2635                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
2636                                      MII_TG3_EXT_CTRL_LNK3_LED_MODE);
2637                 else
2638                         tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
2639         }
2640
2641         current_link_up = 0;
2642         current_speed = SPEED_INVALID;
2643         current_duplex = DUPLEX_INVALID;
2644
2645         if (tp->tg3_flags2 & TG3_FLG2_CAPACITIVE_COUPLING) {
2646                 u32 val;
2647
2648                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4007);
2649                 tg3_readphy(tp, MII_TG3_AUX_CTRL, &val);
2650                 if (!(val & (1 << 10))) {
2651                         val |= (1 << 10);
2652                         tg3_writephy(tp, MII_TG3_AUX_CTRL, val);
2653                         goto relink;
2654                 }
2655         }
2656
2657         bmsr = 0;
2658         for (i = 0; i < 100; i++) {
2659                 tg3_readphy(tp, MII_BMSR, &bmsr);
2660                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
2661                     (bmsr & BMSR_LSTATUS))
2662                         break;
2663                 udelay(40);
2664         }
2665
2666         if (bmsr & BMSR_LSTATUS) {
2667                 u32 aux_stat, bmcr;
2668
2669                 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
2670                 for (i = 0; i < 2000; i++) {
2671                         udelay(10);
2672                         if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
2673                             aux_stat)
2674                                 break;
2675                 }
2676
2677                 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
2678                                              &current_speed,
2679                                              &current_duplex);
2680
2681                 bmcr = 0;
2682                 for (i = 0; i < 200; i++) {
2683                         tg3_readphy(tp, MII_BMCR, &bmcr);
2684                         if (tg3_readphy(tp, MII_BMCR, &bmcr))
2685                                 continue;
2686                         if (bmcr && bmcr != 0x7fff)
2687                                 break;
2688                         udelay(10);
2689                 }
2690
2691                 lcl_adv = 0;
2692                 rmt_adv = 0;
2693
2694                 tp->link_config.active_speed = current_speed;
2695                 tp->link_config.active_duplex = current_duplex;
2696
2697                 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2698                         if ((bmcr & BMCR_ANENABLE) &&
2699                             tg3_copper_is_advertising_all(tp,
2700                                                 tp->link_config.advertising)) {
2701                                 if (tg3_adv_1000T_flowctrl_ok(tp, &lcl_adv,
2702                                                                   &rmt_adv))
2703                                         current_link_up = 1;
2704                         }
2705                 } else {
2706                         if (!(bmcr & BMCR_ANENABLE) &&
2707                             tp->link_config.speed == current_speed &&
2708                             tp->link_config.duplex == current_duplex &&
2709                             tp->link_config.flowctrl ==
2710                             tp->link_config.active_flowctrl) {
2711                                 current_link_up = 1;
2712                         }
2713                 }
2714
2715                 if (current_link_up == 1 &&
2716                     tp->link_config.active_duplex == DUPLEX_FULL)
2717                         tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
2718         }
2719
2720 relink:
2721         if (current_link_up == 0 || tp->link_config.phy_is_low_power) {
2722                 u32 tmp;
2723
2724                 tg3_phy_copper_begin(tp);
2725
2726                 tg3_readphy(tp, MII_BMSR, &tmp);
2727                 if (!tg3_readphy(tp, MII_BMSR, &tmp) &&
2728                     (tmp & BMSR_LSTATUS))
2729                         current_link_up = 1;
2730         }
2731
2732         tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
2733         if (current_link_up == 1) {
2734                 if (tp->link_config.active_speed == SPEED_100 ||
2735                     tp->link_config.active_speed == SPEED_10)
2736                         tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
2737                 else
2738                         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
2739         } else
2740                 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
2741
2742         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
2743         if (tp->link_config.active_duplex == DUPLEX_HALF)
2744                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
2745
2746         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
2747                 if (current_link_up == 1 &&
2748                     tg3_5700_link_polarity(tp, tp->link_config.active_speed))
2749                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
2750                 else
2751                         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
2752         }
2753
2754         /* ??? Without this setting Netgear GA302T PHY does not
2755          * ??? send/receive packets...
2756          */
2757         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411 &&
2758             tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) {
2759                 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
2760                 tw32_f(MAC_MI_MODE, tp->mi_mode);
2761                 udelay(80);
2762         }
2763
2764         tw32_f(MAC_MODE, tp->mac_mode);
2765         udelay(40);
2766
2767         if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
2768                 /* Polled via timer. */
2769                 tw32_f(MAC_EVENT, 0);
2770         } else {
2771                 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2772         }
2773         udelay(40);
2774
2775         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 &&
2776             current_link_up == 1 &&
2777             tp->link_config.active_speed == SPEED_1000 &&
2778             ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ||
2779              (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED))) {
2780                 udelay(120);
2781                 tw32_f(MAC_STATUS,
2782                      (MAC_STATUS_SYNC_CHANGED |
2783                       MAC_STATUS_CFG_CHANGED));
2784                 udelay(40);
2785                 tg3_write_mem(tp,
2786                               NIC_SRAM_FIRMWARE_MBOX,
2787                               NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
2788         }
2789
2790         if (current_link_up != netif_carrier_ok(tp->dev)) {
2791                 if (current_link_up)
2792                         netif_carrier_on(tp->dev);
2793                 else
2794                         netif_carrier_off(tp->dev);
2795                 tg3_link_report(tp);
2796         }
2797
2798         return 0;
2799 }
2800
2801 struct tg3_fiber_aneginfo {
2802         int state;
2803 #define ANEG_STATE_UNKNOWN              0
2804 #define ANEG_STATE_AN_ENABLE            1
2805 #define ANEG_STATE_RESTART_INIT         2
2806 #define ANEG_STATE_RESTART              3
2807 #define ANEG_STATE_DISABLE_LINK_OK      4
2808 #define ANEG_STATE_ABILITY_DETECT_INIT  5
2809 #define ANEG_STATE_ABILITY_DETECT       6
2810 #define ANEG_STATE_ACK_DETECT_INIT      7
2811 #define ANEG_STATE_ACK_DETECT           8
2812 #define ANEG_STATE_COMPLETE_ACK_INIT    9
2813 #define ANEG_STATE_COMPLETE_ACK         10
2814 #define ANEG_STATE_IDLE_DETECT_INIT     11
2815 #define ANEG_STATE_IDLE_DETECT          12
2816 #define ANEG_STATE_LINK_OK              13
2817 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT  14
2818 #define ANEG_STATE_NEXT_PAGE_WAIT       15
2819
2820         u32 flags;
2821 #define MR_AN_ENABLE            0x00000001
2822 #define MR_RESTART_AN           0x00000002
2823 #define MR_AN_COMPLETE          0x00000004
2824 #define MR_PAGE_RX              0x00000008
2825 #define MR_NP_LOADED            0x00000010
2826 #define MR_TOGGLE_TX            0x00000020
2827 #define MR_LP_ADV_FULL_DUPLEX   0x00000040
2828 #define MR_LP_ADV_HALF_DUPLEX   0x00000080
2829 #define MR_LP_ADV_SYM_PAUSE     0x00000100
2830 #define MR_LP_ADV_ASYM_PAUSE    0x00000200
2831 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
2832 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
2833 #define MR_LP_ADV_NEXT_PAGE     0x00001000
2834 #define MR_TOGGLE_RX            0x00002000
2835 #define MR_NP_RX                0x00004000
2836
2837 #define MR_LINK_OK              0x80000000
2838
2839         unsigned long link_time, cur_time;
2840
2841         u32 ability_match_cfg;
2842         int ability_match_count;
2843
2844         char ability_match, idle_match, ack_match;
2845
2846         u32 txconfig, rxconfig;
2847 #define ANEG_CFG_NP             0x00000080
2848 #define ANEG_CFG_ACK            0x00000040
2849 #define ANEG_CFG_RF2            0x00000020
2850 #define ANEG_CFG_RF1            0x00000010
2851 #define ANEG_CFG_PS2            0x00000001
2852 #define ANEG_CFG_PS1            0x00008000
2853 #define ANEG_CFG_HD             0x00004000
2854 #define ANEG_CFG_FD             0x00002000
2855 #define ANEG_CFG_INVAL          0x00001f06
2856
2857 };
2858 #define ANEG_OK         0
2859 #define ANEG_DONE       1
2860 #define ANEG_TIMER_ENAB 2
2861 #define ANEG_FAILED     -1
2862
2863 #define ANEG_STATE_SETTLE_TIME  10000
2864
2865 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
2866                                    struct tg3_fiber_aneginfo *ap)
2867 {
2868         u16 flowctrl;
2869         unsigned long delta;
2870         u32 rx_cfg_reg;
2871         int ret;
2872
2873         if (ap->state == ANEG_STATE_UNKNOWN) {
2874                 ap->rxconfig = 0;
2875                 ap->link_time = 0;
2876                 ap->cur_time = 0;
2877                 ap->ability_match_cfg = 0;
2878                 ap->ability_match_count = 0;
2879                 ap->ability_match = 0;
2880                 ap->idle_match = 0;
2881                 ap->ack_match = 0;
2882         }
2883         ap->cur_time++;
2884
2885         if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
2886                 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
2887
2888                 if (rx_cfg_reg != ap->ability_match_cfg) {
2889                         ap->ability_match_cfg = rx_cfg_reg;
2890                         ap->ability_match = 0;
2891                         ap->ability_match_count = 0;
2892                 } else {
2893                         if (++ap->ability_match_count > 1) {
2894                                 ap->ability_match = 1;
2895                                 ap->ability_match_cfg = rx_cfg_reg;
2896                         }
2897                 }
2898                 if (rx_cfg_reg & ANEG_CFG_ACK)
2899                         ap->ack_match = 1;
2900                 else
2901                         ap->ack_match = 0;
2902
2903                 ap->idle_match = 0;
2904         } else {
2905                 ap->idle_match = 1;
2906                 ap->ability_match_cfg = 0;
2907                 ap->ability_match_count = 0;
2908                 ap->ability_match = 0;
2909                 ap->ack_match = 0;
2910
2911                 rx_cfg_reg = 0;
2912         }
2913
2914         ap->rxconfig = rx_cfg_reg;
2915         ret = ANEG_OK;
2916
2917         switch(ap->state) {
2918         case ANEG_STATE_UNKNOWN:
2919                 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
2920                         ap->state = ANEG_STATE_AN_ENABLE;
2921
2922                 /* fallthru */
2923         case ANEG_STATE_AN_ENABLE:
2924                 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
2925                 if (ap->flags & MR_AN_ENABLE) {
2926                         ap->link_time = 0;
2927                         ap->cur_time = 0;
2928                         ap->ability_match_cfg = 0;
2929                         ap->ability_match_count = 0;
2930                         ap->ability_match = 0;
2931                         ap->idle_match = 0;
2932                         ap->ack_match = 0;
2933
2934                         ap->state = ANEG_STATE_RESTART_INIT;
2935                 } else {
2936                         ap->state = ANEG_STATE_DISABLE_LINK_OK;
2937                 }
2938                 break;
2939
2940         case ANEG_STATE_RESTART_INIT:
2941                 ap->link_time = ap->cur_time;
2942                 ap->flags &= ~(MR_NP_LOADED);
2943                 ap->txconfig = 0;
2944                 tw32(MAC_TX_AUTO_NEG, 0);
2945                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
2946                 tw32_f(MAC_MODE, tp->mac_mode);
2947                 udelay(40);
2948
2949                 ret = ANEG_TIMER_ENAB;
2950                 ap->state = ANEG_STATE_RESTART;
2951
2952                 /* fallthru */
2953         case ANEG_STATE_RESTART:
2954                 delta = ap->cur_time - ap->link_time;
2955                 if (delta > ANEG_STATE_SETTLE_TIME) {
2956                         ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
2957                 } else {
2958                         ret = ANEG_TIMER_ENAB;
2959                 }
2960                 break;
2961
2962         case ANEG_STATE_DISABLE_LINK_OK:
2963                 ret = ANEG_DONE;
2964                 break;
2965
2966         case ANEG_STATE_ABILITY_DETECT_INIT:
2967                 ap->flags &= ~(MR_TOGGLE_TX);
2968                 ap->txconfig = ANEG_CFG_FD;
2969                 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
2970                 if (flowctrl & ADVERTISE_1000XPAUSE)
2971                         ap->txconfig |= ANEG_CFG_PS1;
2972                 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
2973                         ap->txconfig |= ANEG_CFG_PS2;
2974                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
2975                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
2976                 tw32_f(MAC_MODE, tp->mac_mode);
2977                 udelay(40);
2978
2979                 ap->state = ANEG_STATE_ABILITY_DETECT;
2980                 break;
2981
2982         case ANEG_STATE_ABILITY_DETECT:
2983                 if (ap->ability_match != 0 && ap->rxconfig != 0) {
2984                         ap->state = ANEG_STATE_ACK_DETECT_INIT;
2985                 }
2986                 break;
2987
2988         case ANEG_STATE_ACK_DETECT_INIT:
2989                 ap->txconfig |= ANEG_CFG_ACK;
2990                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
2991                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
2992                 tw32_f(MAC_MODE, tp->mac_mode);
2993                 udelay(40);
2994
2995                 ap->state = ANEG_STATE_ACK_DETECT;
2996
2997                 /* fallthru */
2998         case ANEG_STATE_ACK_DETECT:
2999                 if (ap->ack_match != 0) {
3000                         if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
3001                             (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
3002                                 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
3003                         } else {
3004                                 ap->state = ANEG_STATE_AN_ENABLE;
3005                         }
3006                 } else if (ap->ability_match != 0 &&
3007                            ap->rxconfig == 0) {
3008                         ap->state = ANEG_STATE_AN_ENABLE;
3009                 }
3010                 break;
3011
3012         case ANEG_STATE_COMPLETE_ACK_INIT:
3013                 if (ap->rxconfig & ANEG_CFG_INVAL) {
3014                         ret = ANEG_FAILED;
3015                         break;
3016                 }
3017                 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
3018                                MR_LP_ADV_HALF_DUPLEX |
3019                                MR_LP_ADV_SYM_PAUSE |
3020                                MR_LP_ADV_ASYM_PAUSE |
3021                                MR_LP_ADV_REMOTE_FAULT1 |
3022                                MR_LP_ADV_REMOTE_FAULT2 |
3023                                MR_LP_ADV_NEXT_PAGE |
3024                                MR_TOGGLE_RX |
3025                                MR_NP_RX);
3026                 if (ap->rxconfig & ANEG_CFG_FD)
3027                         ap->flags |= MR_LP_ADV_FULL_DUPLEX;
3028                 if (ap->rxconfig & ANEG_CFG_HD)
3029                         ap->flags |= MR_LP_ADV_HALF_DUPLEX;
3030                 if (ap->rxconfig & ANEG_CFG_PS1)
3031                         ap->flags |= MR_LP_ADV_SYM_PAUSE;
3032                 if (ap->rxconfig & ANEG_CFG_PS2)
3033                         ap->flags |= MR_LP_ADV_ASYM_PAUSE;
3034                 if (ap->rxconfig & ANEG_CFG_RF1)
3035                         ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
3036                 if (ap->rxconfig & ANEG_CFG_RF2)
3037                         ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
3038                 if (ap->rxconfig & ANEG_CFG_NP)
3039                         ap->flags |= MR_LP_ADV_NEXT_PAGE;
3040
3041                 ap->link_time = ap->cur_time;
3042
3043                 ap->flags ^= (MR_TOGGLE_TX);
3044                 if (ap->rxconfig & 0x0008)
3045                         ap->flags |= MR_TOGGLE_RX;
3046                 if (ap->rxconfig & ANEG_CFG_NP)
3047                         ap->flags |= MR_NP_RX;
3048                 ap->flags |= MR_PAGE_RX;
3049
3050                 ap->state = ANEG_STATE_COMPLETE_ACK;
3051                 ret = ANEG_TIMER_ENAB;
3052                 break;
3053
3054         case ANEG_STATE_COMPLETE_ACK:
3055                 if (ap->ability_match != 0 &&
3056                     ap->rxconfig == 0) {
3057                         ap->state = ANEG_STATE_AN_ENABLE;
3058                         break;
3059                 }
3060                 delta = ap->cur_time - ap->link_time;
3061                 if (delta > ANEG_STATE_SETTLE_TIME) {
3062                         if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
3063                                 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
3064                         } else {
3065                                 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
3066                                     !(ap->flags & MR_NP_RX)) {
3067                                         ap->state = ANEG_STATE_IDLE_DETECT_INIT;
3068                                 } else {
3069                                         ret = ANEG_FAILED;
3070                                 }
3071                         }
3072                 }
3073                 break;
3074
3075         case ANEG_STATE_IDLE_DETECT_INIT:
3076                 ap->link_time = ap->cur_time;
3077                 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
3078                 tw32_f(MAC_MODE, tp->mac_mode);
3079                 udelay(40);
3080
3081                 ap->state = ANEG_STATE_IDLE_DETECT;
3082                 ret = ANEG_TIMER_ENAB;
3083                 break;
3084
3085         case ANEG_STATE_IDLE_DETECT:
3086                 if (ap->ability_match != 0 &&
3087                     ap->rxconfig == 0) {
3088                         ap->state = ANEG_STATE_AN_ENABLE;
3089                         break;
3090                 }
3091                 delta = ap->cur_time - ap->link_time;
3092                 if (delta > ANEG_STATE_SETTLE_TIME) {
3093                         /* XXX another gem from the Broadcom driver :( */
3094                         ap->state = ANEG_STATE_LINK_OK;
3095                 }
3096                 break;
3097
3098         case ANEG_STATE_LINK_OK:
3099                 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
3100                 ret = ANEG_DONE;
3101                 break;
3102
3103         case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
3104                 /* ??? unimplemented */
3105                 break;
3106
3107         case ANEG_STATE_NEXT_PAGE_WAIT:
3108                 /* ??? unimplemented */
3109                 break;
3110
3111         default:
3112                 ret = ANEG_FAILED;
3113                 break;
3114         }
3115
3116         return ret;
3117 }
3118
3119 static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags)
3120 {
3121         int res = 0;
3122         struct tg3_fiber_aneginfo aninfo;
3123         int status = ANEG_FAILED;
3124         unsigned int tick;
3125         u32 tmp;
3126
3127         tw32_f(MAC_TX_AUTO_NEG, 0);
3128
3129         tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
3130         tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
3131         udelay(40);
3132
3133         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
3134         udelay(40);
3135
3136         memset(&aninfo, 0, sizeof(aninfo));
3137         aninfo.flags |= MR_AN_ENABLE;
3138         aninfo.state = ANEG_STATE_UNKNOWN;
3139         aninfo.cur_time = 0;
3140         tick = 0;
3141         while (++tick < 195000) {
3142                 status = tg3_fiber_aneg_smachine(tp, &aninfo);
3143                 if (status == ANEG_DONE || status == ANEG_FAILED)
3144                         break;
3145
3146                 udelay(1);
3147         }
3148
3149         tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
3150         tw32_f(MAC_MODE, tp->mac_mode);
3151         udelay(40);
3152
3153         *txflags = aninfo.txconfig;
3154         *rxflags = aninfo.flags;
3155
3156         if (status == ANEG_DONE &&
3157             (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
3158                              MR_LP_ADV_FULL_DUPLEX)))
3159                 res = 1;
3160
3161         return res;
3162 }
3163
3164 static void tg3_init_bcm8002(struct tg3 *tp)
3165 {
3166         u32 mac_status = tr32(MAC_STATUS);
3167         int i;
3168
3169         /* Reset when initting first time or we have a link. */
3170         if ((tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) &&
3171             !(mac_status & MAC_STATUS_PCS_SYNCED))
3172                 return;
3173
3174         /* Set PLL lock range. */
3175         tg3_writephy(tp, 0x16, 0x8007);
3176
3177         /* SW reset */
3178         tg3_writephy(tp, MII_BMCR, BMCR_RESET);
3179
3180         /* Wait for reset to complete. */
3181         /* XXX schedule_timeout() ... */
3182         for (i = 0; i < 500; i++)
3183                 udelay(10);
3184
3185         /* Config mode; select PMA/Ch 1 regs. */
3186         tg3_writephy(tp, 0x10, 0x8411);
3187
3188         /* Enable auto-lock and comdet, select txclk for tx. */
3189         tg3_writephy(tp, 0x11, 0x0a10);
3190
3191         tg3_writephy(tp, 0x18, 0x00a0);
3192         tg3_writephy(tp, 0x16, 0x41ff);
3193
3194         /* Assert and deassert POR. */
3195         tg3_writephy(tp, 0x13, 0x0400);
3196         udelay(40);
3197         tg3_writephy(tp, 0x13, 0x0000);
3198
3199         tg3_writephy(tp, 0x11, 0x0a50);
3200         udelay(40);
3201         tg3_writephy(tp, 0x11, 0x0a10);
3202
3203         /* Wait for signal to stabilize */
3204         /* XXX schedule_timeout() ... */
3205         for (i = 0; i < 15000; i++)
3206                 udelay(10);
3207
3208         /* Deselect the channel register so we can read the PHYID
3209          * later.
3210          */
3211         tg3_writephy(tp, 0x10, 0x8011);
3212 }
3213
3214 static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
3215 {
3216         u16 flowctrl;
3217         u32 sg_dig_ctrl, sg_dig_status;
3218         u32 serdes_cfg, expected_sg_dig_ctrl;
3219         int workaround, port_a;
3220         int current_link_up;
3221
3222         serdes_cfg = 0;
3223         expected_sg_dig_ctrl = 0;
3224         workaround = 0;
3225         port_a = 1;
3226         current_link_up = 0;
3227
3228         if (tp->pci_chip_rev_id != CHIPREV_ID_5704_A0 &&
3229             tp->pci_chip_rev_id != CHIPREV_ID_5704_A1) {
3230                 workaround = 1;
3231                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
3232                         port_a = 0;
3233
3234                 /* preserve bits 0-11,13,14 for signal pre-emphasis */
3235                 /* preserve bits 20-23 for voltage regulator */
3236                 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
3237         }
3238
3239         sg_dig_ctrl = tr32(SG_DIG_CTRL);
3240
3241         if (tp->link_config.autoneg != AUTONEG_ENABLE) {
3242                 if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) {
3243                         if (workaround) {
3244                                 u32 val = serdes_cfg;
3245
3246                                 if (port_a)
3247                                         val |= 0xc010000;
3248                                 else
3249                                         val |= 0x4010000;
3250                                 tw32_f(MAC_SERDES_CFG, val);
3251                         }
3252
3253                         tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
3254                 }
3255                 if (mac_status & MAC_STATUS_PCS_SYNCED) {
3256                         tg3_setup_flow_control(tp, 0, 0);
3257                         current_link_up = 1;
3258                 }
3259                 goto out;
3260         }
3261
3262         /* Want auto-negotiation.  */
3263         expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP;
3264
3265         flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
3266         if (flowctrl & ADVERTISE_1000XPAUSE)
3267                 expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP;
3268         if (flowctrl & ADVERTISE_1000XPSE_ASYM)
3269                 expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE;
3270
3271         if (sg_dig_ctrl != expected_sg_dig_ctrl) {
3272                 if ((tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT) &&
3273                     tp->serdes_counter &&
3274                     ((mac_status & (MAC_STATUS_PCS_SYNCED |
3275                                     MAC_STATUS_RCVD_CFG)) ==
3276                      MAC_STATUS_PCS_SYNCED)) {
3277                         tp->serdes_counter--;
3278                         current_link_up = 1;
3279                         goto out;
3280                 }
3281 restart_autoneg:
3282                 if (workaround)
3283                         tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
3284                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET);
3285                 udelay(5);
3286                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
3287
3288                 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
3289                 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3290         } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
3291                                  MAC_STATUS_SIGNAL_DET)) {
3292                 sg_dig_status = tr32(SG_DIG_STATUS);
3293                 mac_status = tr32(MAC_STATUS);
3294
3295                 if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) &&
3296                     (mac_status & MAC_STATUS_PCS_SYNCED)) {
3297                         u32 local_adv = 0, remote_adv = 0;
3298
3299                         if (sg_dig_ctrl & SG_DIG_PAUSE_CAP)
3300                                 local_adv |= ADVERTISE_1000XPAUSE;
3301                         if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE)
3302                                 local_adv |= ADVERTISE_1000XPSE_ASYM;
3303
3304                         if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE)
3305                                 remote_adv |= LPA_1000XPAUSE;
3306                         if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE)
3307                                 remote_adv |= LPA_1000XPAUSE_ASYM;
3308
3309                         tg3_setup_flow_control(tp, local_adv, remote_adv);
3310                         current_link_up = 1;
3311                         tp->serdes_counter = 0;
3312                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3313                 } else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) {
3314                         if (tp->serdes_counter)
3315                                 tp->serdes_counter--;
3316                         else {
3317                                 if (workaround) {
3318                                         u32 val = serdes_cfg;
3319
3320                                         if (port_a)
3321                                                 val |= 0xc010000;
3322                                         else
3323                                                 val |= 0x4010000;
3324
3325                                         tw32_f(MAC_SERDES_CFG, val);
3326                                 }
3327
3328                                 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
3329                                 udelay(40);
3330
3331                                 /* Link parallel detection - link is up */
3332                                 /* only if we have PCS_SYNC and not */
3333                                 /* receiving config code words */
3334                                 mac_status = tr32(MAC_STATUS);
3335                                 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
3336                                     !(mac_status & MAC_STATUS_RCVD_CFG)) {
3337                                         tg3_setup_flow_control(tp, 0, 0);
3338                                         current_link_up = 1;
3339                                         tp->tg3_flags2 |=
3340                                                 TG3_FLG2_PARALLEL_DETECT;
3341                                         tp->serdes_counter =
3342                                                 SERDES_PARALLEL_DET_TIMEOUT;
3343                                 } else
3344                                         goto restart_autoneg;
3345                         }
3346                 }
3347         } else {
3348                 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
3349                 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3350         }
3351
3352 out:
3353         return current_link_up;
3354 }
3355
3356 static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
3357 {
3358         int current_link_up = 0;
3359
3360         if (!(mac_status & MAC_STATUS_PCS_SYNCED))
3361                 goto out;
3362
3363         if (tp->link_config.autoneg == AUTONEG_ENABLE) {
3364                 u32 txflags, rxflags;
3365                 int i;
3366
3367                 if (fiber_autoneg(tp, &txflags, &rxflags)) {
3368                         u32 local_adv = 0, remote_adv = 0;
3369
3370                         if (txflags & ANEG_CFG_PS1)
3371                                 local_adv |= ADVERTISE_1000XPAUSE;
3372                         if (txflags & ANEG_CFG_PS2)
3373                                 local_adv |= ADVERTISE_1000XPSE_ASYM;
3374
3375                         if (rxflags & MR_LP_ADV_SYM_PAUSE)
3376                                 remote_adv |= LPA_1000XPAUSE;
3377                         if (rxflags & MR_LP_ADV_ASYM_PAUSE)
3378                                 remote_adv |= LPA_1000XPAUSE_ASYM;
3379
3380                         tg3_setup_flow_control(tp, local_adv, remote_adv);
3381
3382                         current_link_up = 1;
3383                 }
3384                 for (i = 0; i < 30; i++) {
3385                         udelay(20);
3386                         tw32_f(MAC_STATUS,
3387                                (MAC_STATUS_SYNC_CHANGED |
3388                                 MAC_STATUS_CFG_CHANGED));
3389                         udelay(40);
3390                         if ((tr32(MAC_STATUS) &
3391                              (MAC_STATUS_SYNC_CHANGED |
3392                               MAC_STATUS_CFG_CHANGED)) == 0)
3393                                 break;
3394                 }
3395
3396                 mac_status = tr32(MAC_STATUS);
3397                 if (current_link_up == 0 &&
3398                     (mac_status & MAC_STATUS_PCS_SYNCED) &&
3399                     !(mac_status & MAC_STATUS_RCVD_CFG))
3400                         current_link_up = 1;
3401         } else {
3402                 tg3_setup_flow_control(tp, 0, 0);
3403
3404                 /* Forcing 1000FD link up. */
3405                 current_link_up = 1;
3406
3407                 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
3408                 udelay(40);
3409
3410                 tw32_f(MAC_MODE, tp->mac_mode);
3411                 udelay(40);
3412         }
3413
3414 out:
3415         return current_link_up;
3416 }
3417
3418 static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
3419 {
3420         u32 orig_pause_cfg;
3421         u16 orig_active_speed;
3422         u8 orig_active_duplex;
3423         u32 mac_status;
3424         int current_link_up;
3425         int i;
3426
3427         orig_pause_cfg = tp->link_config.active_flowctrl;
3428         orig_active_speed = tp->link_config.active_speed;
3429         orig_active_duplex = tp->link_config.active_duplex;
3430
3431         if (!(tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG) &&
3432             netif_carrier_ok(tp->dev) &&
3433             (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE)) {
3434                 mac_status = tr32(MAC_STATUS);
3435                 mac_status &= (MAC_STATUS_PCS_SYNCED |
3436                                MAC_STATUS_SIGNAL_DET |
3437                                MAC_STATUS_CFG_CHANGED |
3438                                MAC_STATUS_RCVD_CFG);
3439                 if (mac_status == (MAC_STATUS_PCS_SYNCED |
3440                                    MAC_STATUS_SIGNAL_DET)) {
3441                         tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
3442                                             MAC_STATUS_CFG_CHANGED));
3443                         return 0;
3444                 }
3445         }
3446
3447         tw32_f(MAC_TX_AUTO_NEG, 0);
3448
3449         tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
3450         tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
3451         tw32_f(MAC_MODE, tp->mac_mode);
3452         udelay(40);
3453
3454         if (tp->phy_id == PHY_ID_BCM8002)
3455                 tg3_init_bcm8002(tp);
3456
3457         /* Enable link change event even when serdes polling.  */
3458         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
3459         udelay(40);
3460
3461         current_link_up = 0;
3462         mac_status = tr32(MAC_STATUS);
3463
3464         if (tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG)
3465                 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
3466         else
3467                 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
3468
3469         tp->hw_status->status =
3470                 (SD_STATUS_UPDATED |
3471                  (tp->hw_status->status & ~SD_STATUS_LINK_CHG));
3472
3473         for (i = 0; i < 100; i++) {
3474                 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
3475                                     MAC_STATUS_CFG_CHANGED));
3476                 udelay(5);
3477                 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
3478                                          MAC_STATUS_CFG_CHANGED |
3479                                          MAC_STATUS_LNKSTATE_CHANGED)) == 0)
3480                         break;
3481         }
3482
3483         mac_status = tr32(MAC_STATUS);
3484         if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
3485                 current_link_up = 0;
3486                 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
3487                     tp->serdes_counter == 0) {
3488                         tw32_f(MAC_MODE, (tp->mac_mode |
3489                                           MAC_MODE_SEND_CONFIGS));
3490                         udelay(1);
3491                         tw32_f(MAC_MODE, tp->mac_mode);
3492                 }
3493         }
3494
3495         if (current_link_up == 1) {
3496                 tp->link_config.active_speed = SPEED_1000;
3497                 tp->link_config.active_duplex = DUPLEX_FULL;
3498                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
3499                                     LED_CTRL_LNKLED_OVERRIDE |
3500                                     LED_CTRL_1000MBPS_ON));
3501         } else {
3502                 tp->link_config.active_speed = SPEED_INVALID;
3503                 tp->link_config.active_duplex = DUPLEX_INVALID;
3504                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
3505                                     LED_CTRL_LNKLED_OVERRIDE |
3506                                     LED_CTRL_TRAFFIC_OVERRIDE));
3507         }
3508
3509         if (current_link_up != netif_carrier_ok(tp->dev)) {
3510                 if (current_link_up)
3511                         netif_carrier_on(tp->dev);
3512                 else
3513                         netif_carrier_off(tp->dev);
3514                 tg3_link_report(tp);
3515         } else {
3516                 u32 now_pause_cfg = tp->link_config.active_flowctrl;
3517                 if (orig_pause_cfg != now_pause_cfg ||
3518                     orig_active_speed != tp->link_config.active_speed ||
3519                     orig_active_duplex != tp->link_config.active_duplex)
3520                         tg3_link_report(tp);
3521         }
3522
3523         return 0;
3524 }
3525
3526 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
3527 {
3528         int current_link_up, err = 0;
3529         u32 bmsr, bmcr;
3530         u16 current_speed;
3531         u8 current_duplex;
3532         u32 local_adv, remote_adv;
3533
3534         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
3535         tw32_f(MAC_MODE, tp->mac_mode);
3536         udelay(40);
3537
3538         tw32(MAC_EVENT, 0);
3539
3540         tw32_f(MAC_STATUS,
3541              (MAC_STATUS_SYNC_CHANGED |
3542               MAC_STATUS_CFG_CHANGED |
3543               MAC_STATUS_MI_COMPLETION |
3544               MAC_STATUS_LNKSTATE_CHANGED));
3545         udelay(40);
3546
3547         if (force_reset)
3548                 tg3_phy_reset(tp);
3549
3550         current_link_up = 0;
3551         current_speed = SPEED_INVALID;
3552         current_duplex = DUPLEX_INVALID;
3553
3554         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
3555         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
3556         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
3557                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
3558                         bmsr |= BMSR_LSTATUS;
3559                 else
3560                         bmsr &= ~BMSR_LSTATUS;
3561         }
3562
3563         err |= tg3_readphy(tp, MII_BMCR, &bmcr);
3564
3565         if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
3566             (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT)) {
3567                 /* do nothing, just check for link up at the end */
3568         } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
3569                 u32 adv, new_adv;
3570
3571                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
3572                 new_adv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
3573                                   ADVERTISE_1000XPAUSE |
3574                                   ADVERTISE_1000XPSE_ASYM |
3575                                   ADVERTISE_SLCT);
3576
3577                 new_adv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
3578
3579                 if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
3580                         new_adv |= ADVERTISE_1000XHALF;
3581                 if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
3582                         new_adv |= ADVERTISE_1000XFULL;
3583
3584                 if ((new_adv != adv) || !(bmcr & BMCR_ANENABLE)) {
3585                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
3586                         bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
3587                         tg3_writephy(tp, MII_BMCR, bmcr);
3588
3589                         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
3590                         tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
3591                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3592
3593                         return err;
3594                 }
3595         } else {
3596                 u32 new_bmcr;
3597
3598                 bmcr &= ~BMCR_SPEED1000;
3599                 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
3600
3601                 if (tp->link_config.duplex == DUPLEX_FULL)
3602                         new_bmcr |= BMCR_FULLDPLX;
3603
3604                 if (new_bmcr != bmcr) {
3605                         /* BMCR_SPEED1000 is a reserved bit that needs
3606                          * to be set on write.
3607                          */
3608                         new_bmcr |= BMCR_SPEED1000;
3609
3610                         /* Force a linkdown */
3611                         if (netif_carrier_ok(tp->dev)) {
3612                                 u32 adv;
3613
3614                                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
3615                                 adv &= ~(ADVERTISE_1000XFULL |
3616                                          ADVERTISE_1000XHALF |
3617                                          ADVERTISE_SLCT);
3618                                 tg3_writephy(tp, MII_ADVERTISE, adv);
3619                                 tg3_writephy(tp, MII_BMCR, bmcr |
3620                                                            BMCR_ANRESTART |
3621                                                            BMCR_ANENABLE);
3622                                 udelay(10);
3623                                 netif_carrier_off(tp->dev);
3624                         }
3625                         tg3_writephy(tp, MII_BMCR, new_bmcr);
3626                         bmcr = new_bmcr;
3627                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
3628                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
3629                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
3630                             ASIC_REV_5714) {
3631                                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
3632                                         bmsr |= BMSR_LSTATUS;
3633                                 else
3634                                         bmsr &= ~BMSR_LSTATUS;
3635                         }
3636                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3637                 }
3638         }
3639
3640         if (bmsr & BMSR_LSTATUS) {
3641                 current_speed = SPEED_1000;
3642                 current_link_up = 1;
3643                 if (bmcr & BMCR_FULLDPLX)
3644                         current_duplex = DUPLEX_FULL;
3645                 else
3646                         current_duplex = DUPLEX_HALF;
3647
3648                 local_adv = 0;
3649                 remote_adv = 0;
3650
3651                 if (bmcr & BMCR_ANENABLE) {
3652                         u32 common;
3653
3654                         err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
3655                         err |= tg3_readphy(tp, MII_LPA, &remote_adv);
3656                         common = local_adv & remote_adv;
3657                         if (common & (ADVERTISE_1000XHALF |
3658                                       ADVERTISE_1000XFULL)) {
3659                                 if (common & ADVERTISE_1000XFULL)
3660                                         current_duplex = DUPLEX_FULL;
3661                                 else
3662                                         current_duplex = DUPLEX_HALF;
3663                         }
3664                         else
3665                                 current_link_up = 0;
3666                 }
3667         }
3668
3669         if (current_link_up == 1 && current_duplex == DUPLEX_FULL)
3670                 tg3_setup_flow_control(tp, local_adv, remote_adv);
3671
3672         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
3673         if (tp->link_config.active_duplex == DUPLEX_HALF)
3674                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
3675
3676         tw32_f(MAC_MODE, tp->mac_mode);
3677         udelay(40);
3678
3679         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
3680
3681         tp->link_config.active_speed = current_speed;
3682         tp->link_config.active_duplex = current_duplex;
3683
3684         if (current_link_up != netif_carrier_ok(tp->dev)) {
3685                 if (current_link_up)
3686                         netif_carrier_on(tp->dev);
3687                 else {
3688                         netif_carrier_off(tp->dev);
3689                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3690                 }
3691                 tg3_link_report(tp);
3692         }
3693         return err;
3694 }
3695
3696 static void tg3_serdes_parallel_detect(struct tg3 *tp)
3697 {
3698         if (tp->serdes_counter) {
3699                 /* Give autoneg time to complete. */
3700                 tp->serdes_counter--;
3701                 return;
3702         }
3703         if (!netif_carrier_ok(tp->dev) &&
3704             (tp->link_config.autoneg == AUTONEG_ENABLE)) {
3705                 u32 bmcr;
3706
3707                 tg3_readphy(tp, MII_BMCR, &bmcr);
3708                 if (bmcr & BMCR_ANENABLE) {
3709                         u32 phy1, phy2;
3710
3711                         /* Select shadow register 0x1f */
3712                         tg3_writephy(tp, 0x1c, 0x7c00);
3713                         tg3_readphy(tp, 0x1c, &phy1);
3714
3715                         /* Select expansion interrupt status register */
3716                         tg3_writephy(tp, 0x17, 0x0f01);
3717                         tg3_readphy(tp, 0x15, &phy2);
3718                         tg3_readphy(tp, 0x15, &phy2);
3719
3720                         if ((phy1 & 0x10) && !(phy2 & 0x20)) {
3721                                 /* We have signal detect and not receiving
3722                                  * config code words, link is up by parallel
3723                                  * detection.
3724                                  */
3725
3726                                 bmcr &= ~BMCR_ANENABLE;
3727                                 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
3728                                 tg3_writephy(tp, MII_BMCR, bmcr);
3729                                 tp->tg3_flags2 |= TG3_FLG2_PARALLEL_DETECT;
3730                         }
3731                 }
3732         }
3733         else if (netif_carrier_ok(tp->dev) &&
3734                  (tp->link_config.autoneg == AUTONEG_ENABLE) &&
3735                  (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT)) {
3736                 u32 phy2;
3737
3738                 /* Select expansion interrupt status register */
3739                 tg3_writephy(tp, 0x17, 0x0f01);
3740                 tg3_readphy(tp, 0x15, &phy2);
3741                 if (phy2 & 0x20) {
3742                         u32 bmcr;
3743
3744                         /* Config code words received, turn on autoneg. */
3745                         tg3_readphy(tp, MII_BMCR, &bmcr);
3746                         tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
3747
3748                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3749
3750                 }
3751         }
3752 }
3753
3754 static int tg3_setup_phy(struct tg3 *tp, int force_reset)
3755 {
3756         int err;
3757
3758         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
3759                 err = tg3_setup_fiber_phy(tp, force_reset);
3760         } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
3761                 err = tg3_setup_fiber_mii_phy(tp, force_reset);
3762         } else {
3763                 err = tg3_setup_copper_phy(tp, force_reset);
3764         }
3765
3766         if (tp->pci_chip_rev_id == CHIPREV_ID_5784_A0 ||
3767             tp->pci_chip_rev_id == CHIPREV_ID_5784_A1) {
3768                 u32 val, scale;
3769
3770                 val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
3771                 if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5)
3772                         scale = 65;
3773                 else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25)
3774                         scale = 6;
3775                 else
3776                         scale = 12;
3777
3778                 val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK;
3779                 val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT);
3780                 tw32(GRC_MISC_CFG, val);
3781         }
3782
3783         if (tp->link_config.active_speed == SPEED_1000 &&
3784             tp->link_config.active_duplex == DUPLEX_HALF)
3785                 tw32(MAC_TX_LENGTHS,
3786                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
3787                       (6 << TX_LENGTHS_IPG_SHIFT) |
3788                       (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
3789         else
3790                 tw32(MAC_TX_LENGTHS,
3791                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
3792                       (6 << TX_LENGTHS_IPG_SHIFT) |
3793                       (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
3794
3795         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
3796                 if (netif_carrier_ok(tp->dev)) {
3797                         tw32(HOSTCC_STAT_COAL_TICKS,
3798                              tp->coal.stats_block_coalesce_usecs);
3799                 } else {
3800                         tw32(HOSTCC_STAT_COAL_TICKS, 0);
3801                 }
3802         }
3803
3804         if (tp->tg3_flags & TG3_FLAG_ASPM_WORKAROUND) {
3805                 u32 val = tr32(PCIE_PWR_MGMT_THRESH);
3806                 if (!netif_carrier_ok(tp->dev))
3807                         val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
3808                               tp->pwrmgmt_thresh;
3809                 else
3810                         val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
3811                 tw32(PCIE_PWR_MGMT_THRESH, val);
3812         }
3813
3814         return err;
3815 }
3816
3817 /* This is called whenever we suspect that the system chipset is re-
3818  * ordering the sequence of MMIO to the tx send mailbox. The symptom
3819  * is bogus tx completions. We try to recover by setting the
3820  * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
3821  * in the workqueue.
3822  */
3823 static void tg3_tx_recover(struct tg3 *tp)
3824 {
3825         BUG_ON((tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) ||
3826                tp->write32_tx_mbox == tg3_write_indirect_mbox);
3827
3828         printk(KERN_WARNING PFX "%s: The system may be re-ordering memory-"
3829                "mapped I/O cycles to the network device, attempting to "
3830                "recover. Please report the problem to the driver maintainer "
3831                "and include system chipset information.\n", tp->dev->name);
3832
3833         spin_lock(&tp->lock);
3834         tp->tg3_flags |= TG3_FLAG_TX_RECOVERY_PENDING;
3835         spin_unlock(&tp->lock);
3836 }
3837
3838 static inline u32 tg3_tx_avail(struct tg3 *tp)
3839 {
3840         smp_mb();
3841         return (tp->tx_pending -
3842                 ((tp->tx_prod - tp->tx_cons) & (TG3_TX_RING_SIZE - 1)));
3843 }
3844
3845 /* Tigon3 never reports partial packet sends.  So we do not
3846  * need special logic to handle SKBs that have not had all
3847  * of their frags sent yet, like SunGEM does.
3848  */
3849 static void tg3_tx(struct tg3 *tp)
3850 {
3851         u32 hw_idx = tp->hw_status->idx[0].tx_consumer;
3852         u32 sw_idx = tp->tx_cons;
3853
3854         while (sw_idx != hw_idx) {
3855                 struct tx_ring_info *ri = &tp->tx_buffers[sw_idx];
3856                 struct sk_buff *skb = ri->skb;
3857                 int i, tx_bug = 0;
3858
3859                 if (unlikely(skb == NULL)) {
3860                         tg3_tx_recover(tp);
3861                         return;
3862                 }
3863
3864                 skb_dma_unmap(&tp->pdev->dev, skb, DMA_TO_DEVICE);
3865
3866                 ri->skb = NULL;
3867
3868                 sw_idx = NEXT_TX(sw_idx);
3869
3870                 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
3871                         ri = &tp->tx_buffers[sw_idx];
3872                         if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
3873                                 tx_bug = 1;
3874                         sw_idx = NEXT_TX(sw_idx);
3875                 }
3876
3877                 dev_kfree_skb(skb);
3878
3879                 if (unlikely(tx_bug)) {
3880                         tg3_tx_recover(tp);
3881                         return;
3882                 }
3883         }
3884
3885         tp->tx_cons = sw_idx;
3886
3887         /* Need to make the tx_cons update visible to tg3_start_xmit()
3888          * before checking for netif_queue_stopped().  Without the
3889          * memory barrier, there is a small possibility that tg3_start_xmit()
3890          * will miss it and cause the queue to be stopped forever.
3891          */
3892         smp_mb();
3893
3894         if (unlikely(netif_queue_stopped(tp->dev) &&
3895                      (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp)))) {
3896                 netif_tx_lock(tp->dev);
3897                 if (netif_queue_stopped(tp->dev) &&
3898                     (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp)))
3899                         netif_wake_queue(tp->dev);
3900                 netif_tx_unlock(tp->dev);
3901         }
3902 }
3903
3904 /* Returns size of skb allocated or < 0 on error.
3905  *
3906  * We only need to fill in the address because the other members
3907  * of the RX descriptor are invariant, see tg3_init_rings.
3908  *
3909  * Note the purposeful assymetry of cpu vs. chip accesses.  For
3910  * posting buffers we only dirty the first cache line of the RX
3911  * descriptor (containing the address).  Whereas for the RX status
3912  * buffers the cpu only reads the last cacheline of the RX descriptor
3913  * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
3914  */
3915 static int tg3_alloc_rx_skb(struct tg3 *tp, u32 opaque_key,
3916                             int src_idx, u32 dest_idx_unmasked)
3917 {
3918         struct tg3_rx_buffer_desc *desc;
3919         struct ring_info *map, *src_map;
3920         struct sk_buff *skb;
3921         dma_addr_t mapping;
3922         int skb_size, dest_idx;
3923
3924         src_map = NULL;
3925         switch (opaque_key) {
3926         case RXD_OPAQUE_RING_STD:
3927                 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
3928                 desc = &tp->rx_std[dest_idx];
3929                 map = &tp->rx_std_buffers[dest_idx];
3930                 if (src_idx >= 0)
3931                         src_map = &tp->rx_std_buffers[src_idx];
3932                 skb_size = tp->rx_pkt_buf_sz;
3933                 break;
3934
3935         case RXD_OPAQUE_RING_JUMBO:
3936                 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
3937                 desc = &tp->rx_jumbo[dest_idx];
3938                 map = &tp->rx_jumbo_buffers[dest_idx];
3939                 if (src_idx >= 0)
3940                         src_map = &tp->rx_jumbo_buffers[src_idx];
3941                 skb_size = RX_JUMBO_PKT_BUF_SZ;
3942                 break;
3943
3944         default:
3945                 return -EINVAL;
3946         }
3947
3948         /* Do not overwrite any of the map or rp information
3949          * until we are sure we can commit to a new buffer.
3950          *
3951          * Callers depend upon this behavior and assume that
3952          * we leave everything unchanged if we fail.
3953          */
3954         skb = netdev_alloc_skb(tp->dev, skb_size);
3955         if (skb == NULL)
3956                 return -ENOMEM;
3957
3958         skb_reserve(skb, tp->rx_offset);
3959
3960         mapping = pci_map_single(tp->pdev, skb->data,
3961                                  skb_size - tp->rx_offset,
3962                                  PCI_DMA_FROMDEVICE);
3963
3964         map->skb = skb;
3965         pci_unmap_addr_set(map, mapping, mapping);
3966
3967         if (src_map != NULL)
3968                 src_map->skb = NULL;
3969
3970         desc->addr_hi = ((u64)mapping >> 32);
3971         desc->addr_lo = ((u64)mapping & 0xffffffff);
3972
3973         return skb_size;
3974 }
3975
3976 /* We only need to move over in the address because the other
3977  * members of the RX descriptor are invariant.  See notes above
3978  * tg3_alloc_rx_skb for full details.
3979  */
3980 static void tg3_recycle_rx(struct tg3 *tp, u32 opaque_key,
3981                            int src_idx, u32 dest_idx_unmasked)
3982 {
3983         struct tg3_rx_buffer_desc *src_desc, *dest_desc;
3984         struct ring_info *src_map, *dest_map;
3985         int dest_idx;
3986
3987         switch (opaque_key) {
3988         case RXD_OPAQUE_RING_STD:
3989                 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
3990                 dest_desc = &tp->rx_std[dest_idx];
3991                 dest_map = &tp->rx_std_buffers[dest_idx];
3992                 src_desc = &tp->rx_std[src_idx];
3993                 src_map = &tp->rx_std_buffers[src_idx];
3994                 break;
3995
3996         case RXD_OPAQUE_RING_JUMBO:
3997                 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
3998                 dest_desc = &tp->rx_jumbo[dest_idx];
3999                 dest_map = &tp->rx_jumbo_buffers[dest_idx];
4000                 src_desc = &tp->rx_jumbo[src_idx];
4001                 src_map = &tp->rx_jumbo_buffers[src_idx];
4002                 break;
4003
4004         default:
4005                 return;
4006         }
4007
4008         dest_map->skb = src_map->skb;
4009         pci_unmap_addr_set(dest_map, mapping,
4010                            pci_unmap_addr(src_map, mapping));
4011         dest_desc->addr_hi = src_desc->addr_hi;
4012         dest_desc->addr_lo = src_desc->addr_lo;
4013
4014         src_map->skb = NULL;
4015 }
4016
4017 #if TG3_VLAN_TAG_USED
4018 static int tg3_vlan_rx(struct tg3 *tp, struct sk_buff *skb, u16 vlan_tag)
4019 {
4020         return vlan_hwaccel_receive_skb(skb, tp->vlgrp, vlan_tag);
4021 }
4022 #endif
4023
4024 /* The RX ring scheme is composed of multiple rings which post fresh
4025  * buffers to the chip, and one special ring the chip uses to report
4026  * status back to the host.
4027  *
4028  * The special ring reports the status of received packets to the
4029  * host.  The chip does not write into the original descriptor the
4030  * RX buffer was obtained from.  The chip simply takes the original
4031  * descriptor as provided by the host, updates the status and length
4032  * field, then writes this into the next status ring entry.
4033  *
4034  * Each ring the host uses to post buffers to the chip is described
4035  * by a TG3_BDINFO entry in the chips SRAM area.  When a packet arrives,
4036  * it is first placed into the on-chip ram.  When the packet's length
4037  * is known, it walks down the TG3_BDINFO entries to select the ring.
4038  * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
4039  * which is within the range of the new packet's length is chosen.
4040  *
4041  * The "separate ring for rx status" scheme may sound queer, but it makes
4042  * sense from a cache coherency perspective.  If only the host writes
4043  * to the buffer post rings, and only the chip writes to the rx status
4044  * rings, then cache lines never move beyond shared-modified state.
4045  * If both the host and chip were to write into the same ring, cache line
4046  * eviction could occur since both entities want it in an exclusive state.
4047  */
4048 static int tg3_rx(struct tg3 *tp, int budget)
4049 {
4050         u32 work_mask, rx_std_posted = 0;
4051         u32 sw_idx = tp->rx_rcb_ptr;
4052         u16 hw_idx;
4053         int received;
4054
4055         hw_idx = tp->hw_status->idx[0].rx_producer;
4056         /*
4057          * We need to order the read of hw_idx and the read of
4058          * the opaque cookie.
4059          */
4060         rmb();
4061         work_mask = 0;
4062         received = 0;
4063         while (sw_idx != hw_idx && budget > 0) {
4064                 struct tg3_rx_buffer_desc *desc = &tp->rx_rcb[sw_idx];
4065                 unsigned int len;
4066                 struct sk_buff *skb;
4067                 dma_addr_t dma_addr;
4068                 u32 opaque_key, desc_idx, *post_ptr;
4069
4070                 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
4071                 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
4072                 if (opaque_key == RXD_OPAQUE_RING_STD) {
4073                         dma_addr = pci_unmap_addr(&tp->rx_std_buffers[desc_idx],
4074                                                   mapping);
4075                         skb = tp->rx_std_buffers[desc_idx].skb;
4076                         post_ptr = &tp->rx_std_ptr;
4077                         rx_std_posted++;
4078                 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
4079                         dma_addr = pci_unmap_addr(&tp->rx_jumbo_buffers[desc_idx],
4080                                                   mapping);
4081                         skb = tp->rx_jumbo_buffers[desc_idx].skb;
4082                         post_ptr = &tp->rx_jumbo_ptr;
4083                 }
4084                 else {
4085                         goto next_pkt_nopost;
4086                 }
4087
4088                 work_mask |= opaque_key;
4089
4090                 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
4091                     (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
4092                 drop_it:
4093                         tg3_recycle_rx(tp, opaque_key,
4094                                        desc_idx, *post_ptr);
4095                 drop_it_no_recycle:
4096                         /* Other statistics kept track of by card. */
4097                         tp->net_stats.rx_dropped++;
4098                         goto next_pkt;
4099                 }
4100
4101                 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4; /* omit crc */
4102
4103                 if (len > RX_COPY_THRESHOLD
4104                         && tp->rx_offset == 2
4105                         /* rx_offset != 2 iff this is a 5701 card running
4106                          * in PCI-X mode [see tg3_get_invariants()] */
4107                 ) {
4108                         int skb_size;
4109
4110                         skb_size = tg3_alloc_rx_skb(tp, opaque_key,
4111                                                     desc_idx, *post_ptr);
4112                         if (skb_size < 0)
4113                                 goto drop_it;
4114
4115                         pci_unmap_single(tp->pdev, dma_addr,
4116                                          skb_size - tp->rx_offset,
4117                                          PCI_DMA_FROMDEVICE);
4118
4119                         skb_put(skb, len);
4120                 } else {
4121                         struct sk_buff *copy_skb;
4122
4123                         tg3_recycle_rx(tp, opaque_key,
4124                                        desc_idx, *post_ptr);
4125
4126                         copy_skb = netdev_alloc_skb(tp->dev, len + 2);
4127                         if (copy_skb == NULL)
4128                                 goto drop_it_no_recycle;
4129
4130                         skb_reserve(copy_skb, 2);
4131                         skb_put(copy_skb, len);
4132                         pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
4133                         skb_copy_from_linear_data(skb, copy_skb->data, len);
4134                         pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
4135
4136                         /* We'll reuse the original ring buffer. */
4137                         skb = copy_skb;
4138                 }
4139
4140                 if ((tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) &&
4141                     (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
4142                     (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
4143                       >> RXD_TCPCSUM_SHIFT) == 0xffff))
4144                         skb->ip_summed = CHECKSUM_UNNECESSARY;
4145                 else
4146                         skb->ip_summed = CHECKSUM_NONE;
4147
4148                 skb->protocol = eth_type_trans(skb, tp->dev);
4149 #if TG3_VLAN_TAG_USED
4150                 if (tp->vlgrp != NULL &&
4151                     desc->type_flags & RXD_FLAG_VLAN) {
4152                         tg3_vlan_rx(tp, skb,
4153                                     desc->err_vlan & RXD_VLAN_MASK);
4154                 } else
4155 #endif
4156                         netif_receive_skb(skb);
4157
4158                 tp->dev->last_rx = jiffies;
4159                 received++;
4160                 budget--;
4161
4162 next_pkt:
4163                 (*post_ptr)++;
4164
4165                 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
4166                         u32 idx = *post_ptr % TG3_RX_RING_SIZE;
4167
4168                         tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX +
4169                                      TG3_64BIT_REG_LOW, idx);
4170                         work_mask &= ~RXD_OPAQUE_RING_STD;
4171                         rx_std_posted = 0;
4172                 }
4173 next_pkt_nopost:
4174                 sw_idx++;
4175                 sw_idx &= (TG3_RX_RCB_RING_SIZE(tp) - 1);
4176
4177                 /* Refresh hw_idx to see if there is new work */
4178                 if (sw_idx == hw_idx) {
4179                         hw_idx = tp->hw_status->idx[0].rx_producer;
4180                         rmb();
4181                 }
4182         }
4183
4184         /* ACK the status ring. */
4185         tp->rx_rcb_ptr = sw_idx;
4186         tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW, sw_idx);
4187
4188         /* Refill RX ring(s). */
4189         if (work_mask & RXD_OPAQUE_RING_STD) {
4190                 sw_idx = tp->rx_std_ptr % TG3_RX_RING_SIZE;
4191                 tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
4192                              sw_idx);
4193         }
4194         if (work_mask & RXD_OPAQUE_RING_JUMBO) {
4195                 sw_idx = tp->rx_jumbo_ptr % TG3_RX_JUMBO_RING_SIZE;
4196                 tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
4197                              sw_idx);
4198         }
4199         mmiowb();
4200
4201         return received;
4202 }
4203
4204 static int tg3_poll_work(struct tg3 *tp, int work_done, int budget)
4205 {
4206         struct tg3_hw_status *sblk = tp->hw_status;
4207
4208         /* handle link change and other phy events */
4209         if (!(tp->tg3_flags &
4210               (TG3_FLAG_USE_LINKCHG_REG |
4211                TG3_FLAG_POLL_SERDES))) {
4212                 if (sblk->status & SD_STATUS_LINK_CHG) {
4213                         sblk->status = SD_STATUS_UPDATED |
4214                                 (sblk->status & ~SD_STATUS_LINK_CHG);
4215                         spin_lock(&tp->lock);
4216                         if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
4217                                 tw32_f(MAC_STATUS,
4218                                      (MAC_STATUS_SYNC_CHANGED |
4219                                       MAC_STATUS_CFG_CHANGED |
4220                                       MAC_STATUS_MI_COMPLETION |
4221                                       MAC_STATUS_LNKSTATE_CHANGED));
4222                                 udelay(40);
4223                         } else
4224                                 tg3_setup_phy(tp, 0);
4225                         spin_unlock(&tp->lock);
4226                 }
4227         }
4228
4229         /* run TX completion thread */
4230         if (sblk->idx[0].tx_consumer != tp->tx_cons) {
4231                 tg3_tx(tp);
4232                 if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING))
4233                         return work_done;
4234         }
4235
4236         /* run RX thread, within the bounds set by NAPI.
4237          * All RX "locking" is done by ensuring outside
4238          * code synchronizes with tg3->napi.poll()
4239          */
4240         if (sblk->idx[0].rx_producer != tp->rx_rcb_ptr)
4241                 work_done += tg3_rx(tp, budget - work_done);
4242
4243         return work_done;
4244 }
4245
4246 static int tg3_poll(struct napi_struct *napi, int budget)
4247 {
4248         struct tg3 *tp = container_of(napi, struct tg3, napi);
4249         int work_done = 0;
4250         struct tg3_hw_status *sblk = tp->hw_status;
4251
4252         while (1) {
4253                 work_done = tg3_poll_work(tp, work_done, budget);
4254
4255                 if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING))
4256                         goto tx_recovery;
4257
4258                 if (unlikely(work_done >= budget))
4259                         break;
4260
4261                 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) {
4262                         /* tp->last_tag is used in tg3_restart_ints() below
4263                          * to tell the hw how much work has been processed,
4264                          * so we must read it before checking for more work.
4265                          */
4266                         tp->last_tag = sblk->status_tag;
4267                         rmb();
4268                 } else
4269                         sblk->status &= ~SD_STATUS_UPDATED;
4270
4271                 if (likely(!tg3_has_work(tp))) {
4272                         netif_rx_complete(tp->dev, napi);
4273                         tg3_restart_ints(tp);
4274                         break;
4275                 }
4276         }
4277
4278         return work_done;
4279
4280 tx_recovery:
4281         /* work_done is guaranteed to be less than budget. */
4282         netif_rx_complete(tp->dev, napi);
4283         schedule_work(&tp->reset_task);
4284         return work_done;
4285 }
4286
4287 static void tg3_irq_quiesce(struct tg3 *tp)
4288 {
4289         BUG_ON(tp->irq_sync);
4290
4291         tp->irq_sync = 1;
4292         smp_mb();
4293
4294         synchronize_irq(tp->pdev->irq);
4295 }
4296
4297 static inline int tg3_irq_sync(struct tg3 *tp)
4298 {
4299         return tp->irq_sync;
4300 }
4301
4302 /* Fully shutdown all tg3 driver activity elsewhere in the system.
4303  * If irq_sync is non-zero, then the IRQ handler must be synchronized
4304  * with as well.  Most of the time, this is not necessary except when
4305  * shutting down the device.
4306  */
4307 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
4308 {
4309         spin_lock_bh(&tp->lock);
4310         if (irq_sync)
4311                 tg3_irq_quiesce(tp);
4312 }
4313
4314 static inline void tg3_full_unlock(struct tg3 *tp)
4315 {
4316         spin_unlock_bh(&tp->lock);
4317 }
4318
4319 /* One-shot MSI handler - Chip automatically disables interrupt
4320  * after sending MSI so driver doesn't have to do it.
4321  */
4322 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
4323 {
4324         struct net_device *dev = dev_id;
4325         struct tg3 *tp = netdev_priv(dev);
4326
4327         prefetch(tp->hw_status);
4328         prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
4329
4330         if (likely(!tg3_irq_sync(tp)))
4331                 netif_rx_schedule(dev, &tp->napi);
4332
4333         return IRQ_HANDLED;
4334 }
4335
4336 /* MSI ISR - No need to check for interrupt sharing and no need to
4337  * flush status block and interrupt mailbox. PCI ordering rules
4338  * guarantee that MSI will arrive after the status block.
4339  */
4340 static irqreturn_t tg3_msi(int irq, void *dev_id)
4341 {
4342         struct net_device *dev = dev_id;
4343         struct tg3 *tp = netdev_priv(dev);
4344
4345         prefetch(tp->hw_status);
4346         prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
4347         /*
4348          * Writing any value to intr-mbox-0 clears PCI INTA# and
4349          * chip-internal interrupt pending events.
4350          * Writing non-zero to intr-mbox-0 additional tells the
4351          * NIC to stop sending us irqs, engaging "in-intr-handler"
4352          * event coalescing.
4353          */
4354         tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
4355         if (likely(!tg3_irq_sync(tp)))
4356                 netif_rx_schedule(dev, &tp->napi);
4357
4358         return IRQ_RETVAL(1);
4359 }
4360
4361 static irqreturn_t tg3_interrupt(int irq, void *dev_id)
4362 {
4363         struct net_device *dev = dev_id;
4364         struct tg3 *tp = netdev_priv(dev);
4365         struct tg3_hw_status *sblk = tp->hw_status;
4366         unsigned int handled = 1;
4367
4368         /* In INTx mode, it is possible for the interrupt to arrive at
4369          * the CPU before the status block posted prior to the interrupt.
4370          * Reading the PCI State register will confirm whether the
4371          * interrupt is ours and will flush the status block.
4372          */
4373         if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
4374                 if ((tp->tg3_flags & TG3_FLAG_CHIP_RESETTING) ||
4375                     (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
4376                         handled = 0;
4377                         goto out;
4378                 }
4379         }
4380
4381         /*
4382          * Writing any value to intr-mbox-0 clears PCI INTA# and
4383          * chip-internal interrupt pending events.
4384          * Writing non-zero to intr-mbox-0 additional tells the
4385          * NIC to stop sending us irqs, engaging "in-intr-handler"
4386          * event coalescing.
4387          *
4388          * Flush the mailbox to de-assert the IRQ immediately to prevent
4389          * spurious interrupts.  The flush impacts performance but
4390          * excessive spurious interrupts can be worse in some cases.
4391          */
4392         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
4393         if (tg3_irq_sync(tp))
4394                 goto out;
4395         sblk->status &= ~SD_STATUS_UPDATED;
4396         if (likely(tg3_has_work(tp))) {
4397                 prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
4398                 netif_rx_schedule(dev, &tp->napi);
4399         } else {
4400                 /* No work, shared interrupt perhaps?  re-enable
4401                  * interrupts, and flush that PCI write
4402                  */
4403                 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
4404                                0x00000000);
4405         }
4406 out:
4407         return IRQ_RETVAL(handled);
4408 }
4409
4410 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
4411 {
4412         struct net_device *dev = dev_id;
4413         struct tg3 *tp = netdev_priv(dev);
4414         struct tg3_hw_status *sblk = tp->hw_status;
4415         unsigned int handled = 1;
4416
4417         /* In INTx mode, it is possible for the interrupt to arrive at
4418          * the CPU before the status block posted prior to the interrupt.
4419          * Reading the PCI State register will confirm whether the
4420          * interrupt is ours and will flush the status block.
4421          */
4422         if (unlikely(sblk->status_tag == tp->last_tag)) {
4423                 if ((tp->tg3_flags & TG3_FLAG_CHIP_RESETTING) ||
4424                     (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
4425                         handled = 0;
4426                         goto out;
4427                 }
4428         }
4429
4430         /*
4431          * writing any value to intr-mbox-0 clears PCI INTA# and
4432          * chip-internal interrupt pending events.
4433          * writing non-zero to intr-mbox-0 additional tells the
4434          * NIC to stop sending us irqs, engaging "in-intr-handler"
4435          * event coalescing.
4436          *
4437          * Flush the mailbox to de-assert the IRQ immediately to prevent
4438          * spurious interrupts.  The flush impacts performance but
4439          * excessive spurious interrupts can be worse in some cases.
4440          */
4441         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
4442         if (tg3_irq_sync(tp))
4443                 goto out;
4444         if (netif_rx_schedule_prep(dev, &tp->napi)) {
4445                 prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
4446                 /* Update last_tag to mark that this status has been
4447                  * seen. Because interrupt may be shared, we may be
4448                  * racing with tg3_poll(), so only update last_tag
4449                  * if tg3_poll() is not scheduled.
4450                  */
4451                 tp->last_tag = sblk->status_tag;
4452                 __netif_rx_schedule(dev, &tp->napi);
4453         }
4454 out:
4455         return IRQ_RETVAL(handled);
4456 }
4457
4458 /* ISR for interrupt test */
4459 static irqreturn_t tg3_test_isr(int irq, void *dev_id)
4460 {
4461         struct net_device *dev = dev_id;
4462         struct tg3 *tp = netdev_priv(dev);
4463         struct tg3_hw_status *sblk = tp->hw_status;
4464
4465         if ((sblk->status & SD_STATUS_UPDATED) ||
4466             !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
4467                 tg3_disable_ints(tp);
4468                 return IRQ_RETVAL(1);
4469         }
4470         return IRQ_RETVAL(0);
4471 }
4472
4473 static int tg3_init_hw(struct tg3 *, int);
4474 static int tg3_halt(struct tg3 *, int, int);
4475
4476 /* Restart hardware after configuration changes, self-test, etc.
4477  * Invoked with tp->lock held.
4478  */
4479 static int tg3_restart_hw(struct tg3 *tp, int reset_phy)
4480         __releases(tp->lock)
4481         __acquires(tp->lock)
4482 {
4483         int err;
4484
4485         err = tg3_init_hw(tp, reset_phy);
4486         if (err) {
4487                 printk(KERN_ERR PFX "%s: Failed to re-initialize device, "
4488                        "aborting.\n", tp->dev->name);
4489                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
4490                 tg3_full_unlock(tp);
4491                 del_timer_sync(&tp->timer);
4492                 tp->irq_sync = 0;
4493                 napi_enable(&tp->napi);
4494                 dev_close(tp->dev);
4495                 tg3_full_lock(tp, 0);
4496         }
4497         return err;
4498 }
4499
4500 #ifdef CONFIG_NET_POLL_CONTROLLER
4501 static void tg3_poll_controller(struct net_device *dev)
4502 {
4503         struct tg3 *tp = netdev_priv(dev);
4504
4505         tg3_interrupt(tp->pdev->irq, dev);
4506 }
4507 #endif
4508
4509 static void tg3_reset_task(struct work_struct *work)
4510 {
4511         struct tg3 *tp = container_of(work, struct tg3, reset_task);
4512         int err;
4513         unsigned int restart_timer;
4514
4515         tg3_full_lock(tp, 0);
4516
4517         if (!netif_running(tp->dev)) {
4518                 tg3_full_unlock(tp);
4519                 return;
4520         }
4521
4522         tg3_full_unlock(tp);
4523
4524         tg3_phy_stop(tp);
4525
4526         tg3_netif_stop(tp);
4527
4528         tg3_full_lock(tp, 1);
4529
4530         restart_timer = tp->tg3_flags2 & TG3_FLG2_RESTART_TIMER;
4531         tp->tg3_flags2 &= ~TG3_FLG2_RESTART_TIMER;
4532
4533         if (tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING) {
4534                 tp->write32_tx_mbox = tg3_write32_tx_mbox;
4535                 tp->write32_rx_mbox = tg3_write_flush_reg32;
4536                 tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;
4537                 tp->tg3_flags &= ~TG3_FLAG_TX_RECOVERY_PENDING;
4538         }
4539
4540         tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
4541         err = tg3_init_hw(tp, 1);
4542         if (err)
4543                 goto out;
4544
4545         tg3_netif_start(tp);
4546
4547         if (restart_timer)
4548                 mod_timer(&tp->timer, jiffies + 1);
4549
4550 out:
4551         tg3_full_unlock(tp);
4552
4553         if (!err)
4554                 tg3_phy_start(tp);
4555 }
4556
4557 static void tg3_dump_short_state(struct tg3 *tp)
4558 {
4559         printk(KERN_ERR PFX "DEBUG: MAC_TX_STATUS[%08x] MAC_RX_STATUS[%08x]\n",
4560                tr32(MAC_TX_STATUS), tr32(MAC_RX_STATUS));
4561         printk(KERN_ERR PFX "DEBUG: RDMAC_STATUS[%08x] WDMAC_STATUS[%08x]\n",
4562                tr32(RDMAC_STATUS), tr32(WDMAC_STATUS));
4563 }
4564
4565 static void tg3_tx_timeout(struct net_device *dev)
4566 {
4567         struct tg3 *tp = netdev_priv(dev);
4568
4569         if (netif_msg_tx_err(tp)) {
4570                 printk(KERN_ERR PFX "%s: transmit timed out, resetting\n",
4571                        dev->name);
4572                 tg3_dump_short_state(tp);
4573         }
4574
4575         schedule_work(&tp->reset_task);
4576 }
4577
4578 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
4579 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
4580 {
4581         u32 base = (u32) mapping & 0xffffffff;
4582
4583         return ((base > 0xffffdcc0) &&
4584                 (base + len + 8 < base));
4585 }
4586
4587 /* Test for DMA addresses > 40-bit */
4588 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
4589                                           int len)
4590 {
4591 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
4592         if (tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG)
4593                 return (((u64) mapping + len) > DMA_40BIT_MASK);
4594         return 0;
4595 #else
4596         return 0;
4597 #endif
4598 }
4599
4600 static void tg3_set_txd(struct tg3 *, int, dma_addr_t, int, u32, u32);
4601
4602 /* Workaround 4GB and 40-bit hardware DMA bugs. */
4603 static int tigon3_dma_hwbug_workaround(struct tg3 *tp, struct sk_buff *skb,
4604                                        u32 last_plus_one, u32 *start,
4605                                        u32 base_flags, u32 mss)
4606 {
4607         struct sk_buff *new_skb;
4608         dma_addr_t new_addr = 0;
4609         u32 entry = *start;
4610         int i, ret = 0;
4611
4612         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
4613                 new_skb = skb_copy(skb, GFP_ATOMIC);
4614         else {
4615                 int more_headroom = 4 - ((unsigned long)skb->data & 3);
4616
4617                 new_skb = skb_copy_expand(skb,
4618                                           skb_headroom(skb) + more_headroom,
4619                                           skb_tailroom(skb), GFP_ATOMIC);
4620         }
4621
4622         if (!new_skb) {
4623                 ret = -1;
4624         } else {
4625                 /* New SKB is guaranteed to be linear. */
4626                 entry = *start;
4627                 ret = skb_dma_map(&tp->pdev->dev, new_skb, DMA_TO_DEVICE);
4628                 new_addr = skb_shinfo(new_skb)->dma_maps[0];
4629
4630                 /* Make sure new skb does not cross any 4G boundaries.
4631                  * Drop the packet if it does.
4632                  */
4633                 if (ret || tg3_4g_overflow_test(new_addr, new_skb->len)) {
4634                         if (!ret)
4635                                 skb_dma_unmap(&tp->pdev->dev, new_skb,
4636                                               DMA_TO_DEVICE);
4637                         ret = -1;
4638                         dev_kfree_skb(new_skb);
4639                         new_skb = NULL;
4640                 } else {
4641                         tg3_set_txd(tp, entry, new_addr, new_skb->len,
4642                                     base_flags, 1 | (mss << 1));
4643                         *start = NEXT_TX(entry);
4644                 }
4645         }
4646
4647         /* Now clean up the sw ring entries. */
4648         i = 0;
4649         while (entry != last_plus_one) {
4650                 if (i == 0) {
4651                         tp->tx_buffers[entry].skb = new_skb;
4652                 } else {
4653                         tp->tx_buffers[entry].skb = NULL;
4654                 }
4655                 entry = NEXT_TX(entry);
4656                 i++;
4657         }
4658
4659         skb_dma_unmap(&tp->pdev->dev, skb, DMA_TO_DEVICE);
4660         dev_kfree_skb(skb);
4661
4662         return ret;
4663 }
4664
4665 static void tg3_set_txd(struct tg3 *tp, int entry,
4666                         dma_addr_t mapping, int len, u32 flags,
4667                         u32 mss_and_is_end)
4668 {
4669         struct tg3_tx_buffer_desc *txd = &tp->tx_ring[entry];
4670         int is_end = (mss_and_is_end & 0x1);
4671         u32 mss = (mss_and_is_end >> 1);
4672         u32 vlan_tag = 0;
4673
4674         if (is_end)
4675                 flags |= TXD_FLAG_END;
4676         if (flags & TXD_FLAG_VLAN) {
4677                 vlan_tag = flags >> 16;
4678                 flags &= 0xffff;
4679         }
4680         vlan_tag |= (mss << TXD_MSS_SHIFT);
4681
4682         txd->addr_hi = ((u64) mapping >> 32);
4683         txd->addr_lo = ((u64) mapping & 0xffffffff);
4684         txd->len_flags = (len << TXD_LEN_SHIFT) | flags;
4685         txd->vlan_tag = vlan_tag << TXD_VLAN_TAG_SHIFT;
4686 }
4687
4688 /* hard_start_xmit for devices that don't have any bugs and
4689  * support TG3_FLG2_HW_TSO_2 only.
4690  */
4691 static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
4692 {
4693         struct tg3 *tp = netdev_priv(dev);
4694         u32 len, entry, base_flags, mss;
4695         struct skb_shared_info *sp;
4696         dma_addr_t mapping;
4697
4698         len = skb_headlen(skb);
4699
4700         /* We are running in BH disabled context with netif_tx_lock
4701          * and TX reclaim runs via tp->napi.poll inside of a software
4702          * interrupt.  Furthermore, IRQ processing runs lockless so we have
4703          * no IRQ context deadlocks to worry about either.  Rejoice!
4704          */
4705         if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->nr_frags + 1))) {
4706                 if (!netif_queue_stopped(dev)) {
4707                         netif_stop_queue(dev);
4708
4709                         /* This is a hard error, log it. */
4710                         printk(KERN_ERR PFX "%s: BUG! Tx Ring full when "
4711                                "queue awake!\n", dev->name);
4712                 }
4713                 return NETDEV_TX_BUSY;
4714         }
4715
4716         entry = tp->tx_prod;
4717         base_flags = 0;
4718         mss = 0;
4719         if ((mss = skb_shinfo(skb)->gso_size) != 0) {
4720                 int tcp_opt_len, ip_tcp_len;
4721
4722                 if (skb_header_cloned(skb) &&
4723                     pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
4724                         dev_kfree_skb(skb);
4725                         goto out_unlock;
4726                 }
4727
4728                 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
4729                         mss |= (skb_headlen(skb) - ETH_HLEN) << 9;
4730                 else {
4731                         struct iphdr *iph = ip_hdr(skb);
4732
4733                         tcp_opt_len = tcp_optlen(skb);
4734                         ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
4735
4736                         iph->check = 0;
4737                         iph->tot_len = htons(mss + ip_tcp_len + tcp_opt_len);
4738                         mss |= (ip_tcp_len + tcp_opt_len) << 9;
4739                 }
4740
4741                 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
4742                                TXD_FLAG_CPU_POST_DMA);
4743
4744                 tcp_hdr(skb)->check = 0;
4745
4746         }
4747         else if (skb->ip_summed == CHECKSUM_PARTIAL)
4748                 base_flags |= TXD_FLAG_TCPUDP_CSUM;
4749 #if TG3_VLAN_TAG_USED
4750         if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
4751                 base_flags |= (TXD_FLAG_VLAN |
4752                                (vlan_tx_tag_get(skb) << 16));
4753 #endif
4754
4755         if (skb_dma_map(&tp->pdev->dev, skb, DMA_TO_DEVICE)) {
4756                 dev_kfree_skb(skb);
4757                 goto out_unlock;
4758         }
4759
4760         sp = skb_shinfo(skb);
4761
4762         mapping = sp->dma_maps[0];
4763
4764         tp->tx_buffers[entry].skb = skb;
4765
4766         tg3_set_txd(tp, entry, mapping, len, base_flags,
4767                     (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
4768
4769         entry = NEXT_TX(entry);
4770
4771         /* Now loop through additional data fragments, and queue them. */
4772         if (skb_shinfo(skb)->nr_frags > 0) {
4773                 unsigned int i, last;
4774
4775                 last = skb_shinfo(skb)->nr_frags - 1;
4776                 for (i = 0; i <= last; i++) {
4777                         skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4778
4779                         len = frag->size;
4780                         mapping = sp->dma_maps[i + 1];
4781                         tp->tx_buffers[entry].skb = NULL;
4782
4783                         tg3_set_txd(tp, entry, mapping, len,
4784                                     base_flags, (i == last) | (mss << 1));
4785
4786                         entry = NEXT_TX(entry);
4787                 }
4788         }
4789
4790         /* Packets are ready, update Tx producer idx local and on card. */
4791         tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry);
4792
4793         tp->tx_prod = entry;
4794         if (unlikely(tg3_tx_avail(tp) <= (MAX_SKB_FRAGS + 1))) {
4795                 netif_stop_queue(dev);
4796                 if (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp))
4797                         netif_wake_queue(tp->dev);
4798         }
4799
4800 out_unlock:
4801         mmiowb();
4802
4803         dev->trans_start = jiffies;
4804
4805         return NETDEV_TX_OK;
4806 }
4807
4808 static int tg3_start_xmit_dma_bug(struct sk_buff *, struct net_device *);
4809
4810 /* Use GSO to workaround a rare TSO bug that may be triggered when the
4811  * TSO header is greater than 80 bytes.
4812  */
4813 static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
4814 {
4815         struct sk_buff *segs, *nskb;
4816
4817         /* Estimate the number of fragments in the worst case */
4818         if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->gso_segs * 3))) {
4819                 netif_stop_queue(tp->dev);
4820                 if (tg3_tx_avail(tp) <= (skb_shinfo(skb)->gso_segs * 3))
4821                         return NETDEV_TX_BUSY;
4822
4823                 netif_wake_queue(tp->dev);
4824         }
4825
4826         segs = skb_gso_segment(skb, tp->dev->features & ~NETIF_F_TSO);
4827         if (IS_ERR(segs))
4828                 goto tg3_tso_bug_end;
4829
4830         do {
4831                 nskb = segs;
4832                 segs = segs->next;
4833                 nskb->next = NULL;
4834                 tg3_start_xmit_dma_bug(nskb, tp->dev);
4835         } while (segs);
4836
4837 tg3_tso_bug_end:
4838         dev_kfree_skb(skb);
4839
4840         return NETDEV_TX_OK;
4841 }
4842
4843 /* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
4844  * support TG3_FLG2_HW_TSO_1 or firmware TSO only.
4845  */
4846 static int tg3_start_xmit_dma_bug(struct sk_buff *skb, struct net_device *dev)
4847 {
4848         struct tg3 *tp = netdev_priv(dev);
4849         u32 len, entry, base_flags, mss;
4850         struct skb_shared_info *sp;
4851         int would_hit_hwbug;
4852         dma_addr_t mapping;
4853
4854         len = skb_headlen(skb);
4855
4856         /* We are running in BH disabled context with netif_tx_lock
4857          * and TX reclaim runs via tp->napi.poll inside of a software
4858          * interrupt.  Furthermore, IRQ processing runs lockless so we have
4859          * no IRQ context deadlocks to worry about either.  Rejoice!
4860          */
4861         if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->nr_frags + 1))) {
4862                 if (!netif_queue_stopped(dev)) {
4863                         netif_stop_queue(dev);
4864
4865                         /* This is a hard error, log it. */
4866                         printk(KERN_ERR PFX "%s: BUG! Tx Ring full when "
4867                                "queue awake!\n", dev->name);
4868                 }
4869                 return NETDEV_TX_BUSY;
4870         }
4871
4872         entry = tp->tx_prod;
4873         base_flags = 0;
4874         if (skb->ip_summed == CHECKSUM_PARTIAL)
4875                 base_flags |= TXD_FLAG_TCPUDP_CSUM;
4876         mss = 0;
4877         if ((mss = skb_shinfo(skb)->gso_size) != 0) {
4878                 struct iphdr *iph;
4879                 int tcp_opt_len, ip_tcp_len, hdr_len;
4880
4881                 if (skb_header_cloned(skb) &&
4882                     pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
4883                         dev_kfree_skb(skb);
4884                         goto out_unlock;
4885                 }
4886
4887                 tcp_opt_len = tcp_optlen(skb);
4888                 ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
4889
4890                 hdr_len = ip_tcp_len + tcp_opt_len;
4891                 if (unlikely((ETH_HLEN + hdr_len) > 80) &&
4892                              (tp->tg3_flags2 & TG3_FLG2_TSO_BUG))
4893                         return (tg3_tso_bug(tp, skb));
4894
4895                 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
4896                                TXD_FLAG_CPU_POST_DMA);
4897
4898                 iph = ip_hdr(skb);
4899                 iph->check = 0;
4900                 iph->tot_len = htons(mss + hdr_len);
4901                 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
4902                         tcp_hdr(skb)->check = 0;
4903                         base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
4904                 } else
4905                         tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
4906                                                                  iph->daddr, 0,
4907                                                                  IPPROTO_TCP,
4908                                                                  0);
4909
4910                 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO) ||
4911                     (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)) {
4912                         if (tcp_opt_len || iph->ihl > 5) {
4913                                 int tsflags;
4914
4915                                 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
4916                                 mss |= (tsflags << 11);
4917                         }
4918                 } else {
4919                         if (tcp_opt_len || iph->ihl > 5) {
4920                                 int tsflags;
4921
4922                                 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
4923                                 base_flags |= tsflags << 12;
4924                         }
4925                 }
4926         }
4927 #if TG3_VLAN_TAG_USED
4928         if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
4929                 base_flags |= (TXD_FLAG_VLAN |
4930                                (vlan_tx_tag_get(skb) << 16));
4931 #endif
4932
4933         if (skb_dma_map(&tp->pdev->dev, skb, DMA_TO_DEVICE)) {
4934                 dev_kfree_skb(skb);
4935                 goto out_unlock;
4936         }
4937
4938         sp = skb_shinfo(skb);
4939
4940         mapping = sp->dma_maps[0];
4941
4942         tp->tx_buffers[entry].skb = skb;
4943
4944         would_hit_hwbug = 0;
4945
4946         if (tp->tg3_flags3 & TG3_FLG3_5701_DMA_BUG)
4947                 would_hit_hwbug = 1;
4948         else if (tg3_4g_overflow_test(mapping, len))
4949                 would_hit_hwbug = 1;
4950
4951         tg3_set_txd(tp, entry, mapping, len, base_flags,
4952                     (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
4953
4954         entry = NEXT_TX(entry);
4955
4956         /* Now loop through additional data fragments, and queue them. */
4957         if (skb_shinfo(skb)->nr_frags > 0) {
4958                 unsigned int i, last;
4959
4960                 last = skb_shinfo(skb)->nr_frags - 1;
4961                 for (i = 0; i <= last; i++) {
4962                         skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4963
4964                         len = frag->size;
4965                         mapping = sp->dma_maps[i + 1];
4966
4967                         tp->tx_buffers[entry].skb = NULL;
4968
4969                         if (tg3_4g_overflow_test(mapping, len))
4970                                 would_hit_hwbug = 1;
4971
4972                         if (tg3_40bit_overflow_test(tp, mapping, len))
4973                                 would_hit_hwbug = 1;
4974
4975                         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
4976                                 tg3_set_txd(tp, entry, mapping, len,
4977                                             base_flags, (i == last)|(mss << 1));
4978                         else
4979                                 tg3_set_txd(tp, entry, mapping, len,
4980                                             base_flags, (i == last));
4981
4982                         entry = NEXT_TX(entry);
4983                 }
4984         }
4985
4986         if (would_hit_hwbug) {
4987                 u32 last_plus_one = entry;
4988                 u32 start;
4989
4990                 start = entry - 1 - skb_shinfo(skb)->nr_frags;
4991                 start &= (TG3_TX_RING_SIZE - 1);
4992
4993                 /* If the workaround fails due to memory/mapping
4994                  * failure, silently drop this packet.
4995                  */
4996                 if (tigon3_dma_hwbug_workaround(tp, skb, last_plus_one,
4997                                                 &start, base_flags, mss))
4998                         goto out_unlock;
4999
5000                 entry = start;
5001         }
5002
5003         /* Packets are ready, update Tx producer idx local and on card. */
5004         tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry);
5005
5006         tp->tx_prod = entry;
5007         if (unlikely(tg3_tx_avail(tp) <= (MAX_SKB_FRAGS + 1))) {
5008                 netif_stop_queue(dev);
5009                 if (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp))
5010                         netif_wake_queue(tp->dev);
5011         }
5012
5013 out_unlock:
5014         mmiowb();
5015
5016         dev->trans_start = jiffies;
5017
5018         return NETDEV_TX_OK;
5019 }
5020
5021 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
5022                                int new_mtu)
5023 {
5024         dev->mtu = new_mtu;
5025
5026         if (new_mtu > ETH_DATA_LEN) {
5027                 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
5028                         tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
5029                         ethtool_op_set_tso(dev, 0);
5030                 }
5031                 else
5032                         tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE;
5033         } else {
5034                 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
5035                         tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
5036                 tp->tg3_flags &= ~TG3_FLAG_JUMBO_RING_ENABLE;
5037         }
5038 }
5039
5040 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
5041 {
5042         struct tg3 *tp = netdev_priv(dev);
5043         int err;
5044
5045         if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
5046                 return -EINVAL;
5047
5048         if (!netif_running(dev)) {
5049                 /* We'll just catch it later when the
5050                  * device is up'd.
5051                  */
5052                 tg3_set_mtu(dev, tp, new_mtu);
5053                 return 0;
5054         }
5055
5056         tg3_phy_stop(tp);
5057
5058         tg3_netif_stop(tp);
5059
5060         tg3_full_lock(tp, 1);
5061
5062         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
5063
5064         tg3_set_mtu(dev, tp, new_mtu);
5065
5066         err = tg3_restart_hw(tp, 0);
5067
5068         if (!err)
5069                 tg3_netif_start(tp);
5070
5071         tg3_full_unlock(tp);
5072
5073         if (!err)
5074                 tg3_phy_start(tp);
5075
5076         return err;
5077 }
5078
5079 /* Free up pending packets in all rx/tx rings.
5080  *
5081  * The chip has been shut down and the driver detached from
5082  * the networking, so no interrupts or new tx packets will
5083  * end up in the driver.  tp->{tx,}lock is not held and we are not
5084  * in an interrupt context and thus may sleep.
5085  */
5086 static void tg3_free_rings(struct tg3 *tp)
5087 {
5088         struct ring_info *rxp;
5089         int i;
5090
5091         for (i = 0; i < TG3_RX_RING_SIZE; i++) {
5092                 rxp = &tp->rx_std_buffers[i];
5093
5094                 if (rxp->skb == NULL)
5095                         continue;
5096                 pci_unmap_single(tp->pdev,
5097                                  pci_unmap_addr(rxp, mapping),
5098                                  tp->rx_pkt_buf_sz - tp->rx_offset,
5099                                  PCI_DMA_FROMDEVICE);
5100                 dev_kfree_skb_any(rxp->skb);
5101                 rxp->skb = NULL;
5102         }
5103
5104         for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
5105                 rxp = &tp->rx_jumbo_buffers[i];
5106
5107                 if (rxp->skb == NULL)
5108                         continue;
5109                 pci_unmap_single(tp->pdev,
5110                                  pci_unmap_addr(rxp, mapping),
5111                                  RX_JUMBO_PKT_BUF_SZ - tp->rx_offset,
5112                                  PCI_DMA_FROMDEVICE);
5113                 dev_kfree_skb_any(rxp->skb);
5114                 rxp->skb = NULL;
5115         }
5116
5117         for (i = 0; i < TG3_TX_RING_SIZE; ) {
5118                 struct tx_ring_info *txp;
5119                 struct sk_buff *skb;
5120
5121                 txp = &tp->tx_buffers[i];
5122                 skb = txp->skb;
5123
5124                 if (skb == NULL) {
5125                         i++;
5126                         continue;
5127                 }
5128
5129                 skb_dma_unmap(&tp->pdev->dev, skb, DMA_TO_DEVICE);
5130
5131                 txp->skb = NULL;
5132
5133                 i += skb_shinfo(skb)->nr_frags + 1;
5134
5135                 dev_kfree_skb_any(skb);
5136         }
5137 }
5138
5139 /* Initialize tx/rx rings for packet processing.
5140  *
5141  * The chip has been shut down and the driver detached from
5142  * the networking, so no interrupts or new tx packets will
5143  * end up in the driver.  tp->{tx,}lock are held and thus
5144  * we may not sleep.
5145  */
5146 static int tg3_init_rings(struct tg3 *tp)
5147 {
5148         u32 i;
5149
5150         /* Free up all the SKBs. */
5151         tg3_free_rings(tp);
5152
5153         /* Zero out all descriptors. */
5154         memset(tp->rx_std, 0, TG3_RX_RING_BYTES);
5155         memset(tp->rx_jumbo, 0, TG3_RX_JUMBO_RING_BYTES);
5156         memset(tp->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
5157         memset(tp->tx_ring, 0, TG3_TX_RING_BYTES);
5158
5159         tp->rx_pkt_buf_sz = RX_PKT_BUF_SZ;
5160         if ((tp->tg3_flags2 & TG3_FLG2_5780_CLASS) &&
5161             (tp->dev->mtu > ETH_DATA_LEN))
5162                 tp->rx_pkt_buf_sz = RX_JUMBO_PKT_BUF_SZ;
5163
5164         /* Initialize invariants of the rings, we only set this
5165          * stuff once.  This works because the card does not
5166          * write into the rx buffer posting rings.
5167          */
5168         for (i = 0; i < TG3_RX_RING_SIZE; i++) {
5169                 struct tg3_rx_buffer_desc *rxd;
5170
5171                 rxd = &tp->rx_std[i];
5172                 rxd->idx_len = (tp->rx_pkt_buf_sz - tp->rx_offset - 64)
5173                         << RXD_LEN_SHIFT;
5174                 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
5175                 rxd->opaque = (RXD_OPAQUE_RING_STD |
5176                                (i << RXD_OPAQUE_INDEX_SHIFT));
5177         }
5178
5179         if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
5180                 for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
5181                         struct tg3_rx_buffer_desc *rxd;
5182
5183                         rxd = &tp->rx_jumbo[i];
5184                         rxd->idx_len = (RX_JUMBO_PKT_BUF_SZ - tp->rx_offset - 64)
5185                                 << RXD_LEN_SHIFT;
5186                         rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
5187                                 RXD_FLAG_JUMBO;
5188                         rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
5189                                (i << RXD_OPAQUE_INDEX_SHIFT));
5190                 }
5191         }
5192
5193         /* Now allocate fresh SKBs for each rx ring. */
5194         for (i = 0; i < tp->rx_pending; i++) {
5195                 if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_STD, -1, i) < 0) {
5196                         printk(KERN_WARNING PFX
5197                                "%s: Using a smaller RX standard ring, "
5198                                "only %d out of %d buffers were allocated "
5199                                "successfully.\n",
5200                                tp->dev->name, i, tp->rx_pending);
5201                         if (i == 0)
5202                                 return -ENOMEM;
5203                         tp->rx_pending = i;
5204                         break;
5205                 }
5206         }
5207
5208         if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
5209                 for (i = 0; i < tp->rx_jumbo_pending; i++) {
5210                         if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_JUMBO,
5211                                              -1, i) < 0) {
5212                                 printk(KERN_WARNING PFX
5213                                        "%s: Using a smaller RX jumbo ring, "
5214                                        "only %d out of %d buffers were "
5215                                        "allocated successfully.\n",
5216                                        tp->dev->name, i, tp->rx_jumbo_pending);
5217                                 if (i == 0) {
5218                                         tg3_free_rings(tp);
5219                                         return -ENOMEM;
5220                                 }
5221                                 tp->rx_jumbo_pending = i;
5222                                 break;
5223                         }
5224                 }
5225         }
5226         return 0;
5227 }
5228
5229 /*
5230  * Must not be invoked with interrupt sources disabled and
5231  * the hardware shutdown down.
5232  */
5233 static void tg3_free_consistent(struct tg3 *tp)
5234 {
5235         kfree(tp->rx_std_buffers);
5236         tp->rx_std_buffers = NULL;
5237         if (tp->rx_std) {
5238                 pci_free_consistent(tp->pdev, TG3_RX_RING_BYTES,
5239                                     tp->rx_std, tp->rx_std_mapping);
5240                 tp->rx_std = NULL;
5241         }
5242         if (tp->rx_jumbo) {
5243                 pci_free_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
5244                                     tp->rx_jumbo, tp->rx_jumbo_mapping);
5245                 tp->rx_jumbo = NULL;
5246         }
5247         if (tp->rx_rcb) {
5248                 pci_free_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
5249                                     tp->rx_rcb, tp->rx_rcb_mapping);
5250                 tp->rx_rcb = NULL;
5251         }
5252         if (tp->tx_ring) {
5253                 pci_free_consistent(tp->pdev, TG3_TX_RING_BYTES,
5254                         tp->tx_ring, tp->tx_desc_mapping);
5255                 tp->tx_ring = NULL;
5256         }
5257         if (tp->hw_status) {
5258                 pci_free_consistent(tp->pdev, TG3_HW_STATUS_SIZE,
5259                                     tp->hw_status, tp->status_mapping);
5260                 tp->hw_status = NULL;
5261         }
5262         if (tp->hw_stats) {
5263                 pci_free_consistent(tp->pdev, sizeof(struct tg3_hw_stats),
5264                                     tp->hw_stats, tp->stats_mapping);
5265                 tp->hw_stats = NULL;
5266         }
5267 }
5268
5269 /*
5270  * Must not be invoked with interrupt sources disabled and
5271  * the hardware shutdown down.  Can sleep.
5272  */
5273 static int tg3_alloc_consistent(struct tg3 *tp)
5274 {
5275         tp->rx_std_buffers = kzalloc((sizeof(struct ring_info) *
5276                                       (TG3_RX_RING_SIZE +
5277                                        TG3_RX_JUMBO_RING_SIZE)) +
5278                                      (sizeof(struct tx_ring_info) *
5279                                       TG3_TX_RING_SIZE),
5280                                      GFP_KERNEL);
5281         if (!tp->rx_std_buffers)
5282                 return -ENOMEM;
5283
5284         tp->rx_jumbo_buffers = &tp->rx_std_buffers[TG3_RX_RING_SIZE];
5285         tp->tx_buffers = (struct tx_ring_info *)
5286                 &tp->rx_jumbo_buffers[TG3_RX_JUMBO_RING_SIZE];
5287
5288         tp->rx_std = pci_alloc_consistent(tp->pdev, TG3_RX_RING_BYTES,
5289                                           &tp->rx_std_mapping);
5290         if (!tp->rx_std)
5291                 goto err_out;
5292
5293         tp->rx_jumbo = pci_alloc_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
5294                                             &tp->rx_jumbo_mapping);
5295
5296         if (!tp->rx_jumbo)
5297                 goto err_out;
5298
5299         tp->rx_rcb = pci_alloc_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
5300                                           &tp->rx_rcb_mapping);
5301         if (!tp->rx_rcb)
5302                 goto err_out;
5303
5304         tp->tx_ring = pci_alloc_consistent(tp->pdev, TG3_TX_RING_BYTES,
5305                                            &tp->tx_desc_mapping);
5306         if (!tp->tx_ring)
5307                 goto err_out;
5308
5309         tp->hw_status = pci_alloc_consistent(tp->pdev,
5310                                              TG3_HW_STATUS_SIZE,
5311                                              &tp->status_mapping);
5312         if (!tp->hw_status)
5313                 goto err_out;
5314
5315         tp->hw_stats = pci_alloc_consistent(tp->pdev,
5316                                             sizeof(struct tg3_hw_stats),
5317                                             &tp->stats_mapping);
5318         if (!tp->hw_stats)
5319                 goto err_out;
5320
5321         memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
5322         memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
5323
5324         return 0;
5325
5326 err_out:
5327         tg3_free_consistent(tp);
5328         return -ENOMEM;
5329 }
5330
5331 #define MAX_WAIT_CNT 1000
5332
5333 /* To stop a block, clear the enable bit and poll till it
5334  * clears.  tp->lock is held.
5335  */
5336 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, int silent)
5337 {
5338         unsigned int i;
5339         u32 val;
5340
5341         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
5342                 switch (ofs) {
5343                 case RCVLSC_MODE:
5344                 case DMAC_MODE:
5345                 case MBFREE_MODE:
5346                 case BUFMGR_MODE:
5347                 case MEMARB_MODE:
5348                         /* We can't enable/disable these bits of the
5349                          * 5705/5750, just say success.
5350                          */
5351                         return 0;
5352
5353                 default:
5354                         break;
5355                 }
5356         }
5357
5358         val = tr32(ofs);
5359         val &= ~enable_bit;
5360         tw32_f(ofs, val);
5361
5362         for (i = 0; i < MAX_WAIT_CNT; i++) {
5363                 udelay(100);
5364                 val = tr32(ofs);
5365                 if ((val & enable_bit) == 0)
5366                         break;
5367         }
5368
5369         if (i == MAX_WAIT_CNT && !silent) {
5370                 printk(KERN_ERR PFX "tg3_stop_block timed out, "
5371                        "ofs=%lx enable_bit=%x\n",
5372                        ofs, enable_bit);
5373                 return -ENODEV;
5374         }
5375
5376         return 0;
5377 }
5378
5379 /* tp->lock is held. */
5380 static int tg3_abort_hw(struct tg3 *tp, int silent)
5381 {
5382         int i, err;
5383
5384         tg3_disable_ints(tp);
5385
5386         tp->rx_mode &= ~RX_MODE_ENABLE;
5387         tw32_f(MAC_RX_MODE, tp->rx_mode);
5388         udelay(10);
5389
5390         err  = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
5391         err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
5392         err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
5393         err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
5394         err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
5395         err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
5396
5397         err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
5398         err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
5399         err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
5400         err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
5401         err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
5402         err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
5403         err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
5404
5405         tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
5406         tw32_f(MAC_MODE, tp->mac_mode);
5407         udelay(40);
5408
5409         tp->tx_mode &= ~TX_MODE_ENABLE;
5410         tw32_f(MAC_TX_MODE, tp->tx_mode);
5411
5412         for (i = 0; i < MAX_WAIT_CNT; i++) {
5413                 udelay(100);
5414                 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
5415                         break;
5416         }
5417         if (i >= MAX_WAIT_CNT) {
5418                 printk(KERN_ERR PFX "tg3_abort_hw timed out for %s, "
5419                        "TX_MODE_ENABLE will not clear MAC_TX_MODE=%08x\n",
5420                        tp->dev->name, tr32(MAC_TX_MODE));
5421                 err |= -ENODEV;
5422         }
5423
5424         err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
5425         err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
5426         err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
5427
5428         tw32(FTQ_RESET, 0xffffffff);
5429         tw32(FTQ_RESET, 0x00000000);
5430
5431         err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
5432         err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
5433
5434         if (tp->hw_status)
5435                 memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
5436         if (tp->hw_stats)
5437                 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
5438
5439         return err;
5440 }
5441
5442 /* tp->lock is held. */
5443 static int tg3_nvram_lock(struct tg3 *tp)
5444 {
5445         if (tp->tg3_flags & TG3_FLAG_NVRAM) {
5446                 int i;
5447
5448                 if (tp->nvram_lock_cnt == 0) {
5449                         tw32(NVRAM_SWARB, SWARB_REQ_SET1);
5450                         for (i = 0; i < 8000; i++) {
5451                                 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
5452                                         break;
5453                                 udelay(20);
5454                         }
5455                         if (i == 8000) {
5456                                 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
5457                                 return -ENODEV;
5458                         }
5459                 }
5460                 tp->nvram_lock_cnt++;
5461         }
5462         return 0;
5463 }
5464
5465 /* tp->lock is held. */
5466 static void tg3_nvram_unlock(struct tg3 *tp)
5467 {
5468         if (tp->tg3_flags & TG3_FLAG_NVRAM) {
5469                 if (tp->nvram_lock_cnt > 0)
5470                         tp->nvram_lock_cnt--;
5471                 if (tp->nvram_lock_cnt == 0)
5472                         tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
5473         }
5474 }
5475
5476 /* tp->lock is held. */
5477 static void tg3_enable_nvram_access(struct tg3 *tp)
5478 {
5479         if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
5480             !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM)) {
5481                 u32 nvaccess = tr32(NVRAM_ACCESS);
5482
5483                 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
5484         }
5485 }
5486
5487 /* tp->lock is held. */
5488 static void tg3_disable_nvram_access(struct tg3 *tp)
5489 {
5490         if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
5491             !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM)) {
5492                 u32 nvaccess = tr32(NVRAM_ACCESS);
5493
5494                 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
5495         }
5496 }
5497
5498 static void tg3_ape_send_event(struct tg3 *tp, u32 event)
5499 {
5500         int i;
5501         u32 apedata;
5502
5503         apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
5504         if (apedata != APE_SEG_SIG_MAGIC)
5505                 return;
5506
5507         apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
5508         if (!(apedata & APE_FW_STATUS_READY))
5509                 return;
5510
5511         /* Wait for up to 1 millisecond for APE to service previous event. */
5512         for (i = 0; i < 10; i++) {
5513                 if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
5514                         return;
5515
5516                 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
5517
5518                 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
5519                         tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
5520                                         event | APE_EVENT_STATUS_EVENT_PENDING);
5521
5522                 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
5523
5524                 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
5525                         break;
5526
5527                 udelay(100);
5528         }
5529
5530         if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
5531                 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
5532 }
5533
5534 static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
5535 {
5536         u32 event;
5537         u32 apedata;
5538
5539         if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
5540                 return;
5541
5542         switch (kind) {
5543                 case RESET_KIND_INIT:
5544                         tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
5545                                         APE_HOST_SEG_SIG_MAGIC);
5546                         tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
5547                                         APE_HOST_SEG_LEN_MAGIC);
5548                         apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
5549                         tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
5550                         tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
5551                                         APE_HOST_DRIVER_ID_MAGIC);
5552                         tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
5553                                         APE_HOST_BEHAV_NO_PHYLOCK);
5554
5555                         event = APE_EVENT_STATUS_STATE_START;
5556                         break;
5557                 case RESET_KIND_SHUTDOWN:
5558                         event = APE_EVENT_STATUS_STATE_UNLOAD;
5559                         break;
5560                 case RESET_KIND_SUSPEND:
5561                         event = APE_EVENT_STATUS_STATE_SUSPEND;
5562                         break;
5563                 default:
5564                         return;
5565         }
5566
5567         event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
5568
5569         tg3_ape_send_event(tp, event);
5570 }
5571
5572 /* tp->lock is held. */
5573 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
5574 {
5575         tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
5576                       NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
5577
5578         if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
5579                 switch (kind) {
5580                 case RESET_KIND_INIT:
5581                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5582                                       DRV_STATE_START);
5583                         break;
5584
5585                 case RESET_KIND_SHUTDOWN:
5586                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5587                                       DRV_STATE_UNLOAD);
5588                         break;
5589
5590                 case RESET_KIND_SUSPEND:
5591                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5592                                       DRV_STATE_SUSPEND);
5593                         break;
5594
5595                 default:
5596                         break;
5597                 }
5598         }
5599
5600         if (kind == RESET_KIND_INIT ||
5601             kind == RESET_KIND_SUSPEND)
5602                 tg3_ape_driver_state_change(tp, kind);
5603 }
5604
5605 /* tp->lock is held. */
5606 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
5607 {
5608         if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
5609                 switch (kind) {
5610                 case RESET_KIND_INIT:
5611                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5612                                       DRV_STATE_START_DONE);
5613                         break;
5614
5615                 case RESET_KIND_SHUTDOWN:
5616                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5617                                       DRV_STATE_UNLOAD_DONE);
5618                         break;
5619
5620                 default:
5621                         break;
5622                 }
5623         }
5624
5625         if (kind == RESET_KIND_SHUTDOWN)
5626                 tg3_ape_driver_state_change(tp, kind);
5627 }
5628
5629 /* tp->lock is held. */
5630 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
5631 {
5632         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
5633                 switch (kind) {
5634                 case RESET_KIND_INIT:
5635                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5636                                       DRV_STATE_START);
5637                         break;
5638
5639                 case RESET_KIND_SHUTDOWN:
5640                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5641                                       DRV_STATE_UNLOAD);
5642                         break;
5643
5644                 case RESET_KIND_SUSPEND:
5645                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5646                                       DRV_STATE_SUSPEND);
5647                         break;
5648
5649                 default:
5650                         break;
5651                 }
5652         }
5653 }
5654
5655 static int tg3_poll_fw(struct tg3 *tp)
5656 {
5657         int i;
5658         u32 val;
5659
5660         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
5661                 /* Wait up to 20ms for init done. */
5662                 for (i = 0; i < 200; i++) {
5663                         if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
5664                                 return 0;
5665                         udelay(100);
5666                 }
5667                 return -ENODEV;
5668         }
5669
5670         /* Wait for firmware initialization to complete. */
5671         for (i = 0; i < 100000; i++) {
5672                 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
5673                 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
5674                         break;
5675                 udelay(10);
5676         }
5677
5678         /* Chip might not be fitted with firmware.  Some Sun onboard
5679          * parts are configured like that.  So don't signal the timeout
5680          * of the above loop as an error, but do report the lack of
5681          * running firmware once.
5682          */
5683         if (i >= 100000 &&
5684             !(tp->tg3_flags2 & TG3_FLG2_NO_FWARE_REPORTED)) {
5685                 tp->tg3_flags2 |= TG3_FLG2_NO_FWARE_REPORTED;
5686
5687                 printk(KERN_INFO PFX "%s: No firmware running.\n",
5688                        tp->dev->name);
5689         }
5690
5691         return 0;
5692 }
5693
5694 /* Save PCI command register before chip reset */
5695 static void tg3_save_pci_state(struct tg3 *tp)
5696 {
5697         pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd);
5698 }
5699
5700 /* Restore PCI state after chip reset */
5701 static void tg3_restore_pci_state(struct tg3 *tp)
5702 {
5703         u32 val;
5704
5705         /* Re-enable indirect register accesses. */
5706         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
5707                                tp->misc_host_ctrl);
5708
5709         /* Set MAX PCI retry to zero. */
5710         val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
5711         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
5712             (tp->tg3_flags & TG3_FLAG_PCIX_MODE))
5713                 val |= PCISTATE_RETRY_SAME_DMA;
5714         /* Allow reads and writes to the APE register and memory space. */
5715         if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
5716                 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
5717                        PCISTATE_ALLOW_APE_SHMEM_WR;
5718         pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
5719
5720         pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
5721
5722         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)
5723                 pcie_set_readrq(tp->pdev, 4096);
5724         else {
5725                 pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
5726                                       tp->pci_cacheline_sz);
5727                 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
5728                                       tp->pci_lat_timer);
5729         }
5730
5731         /* Make sure PCI-X relaxed ordering bit is clear. */
5732         if (tp->pcix_cap) {
5733                 u16 pcix_cmd;
5734
5735                 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
5736                                      &pcix_cmd);
5737                 pcix_cmd &= ~PCI_X_CMD_ERO;
5738                 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
5739                                       pcix_cmd);
5740         }
5741
5742         if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
5743
5744                 /* Chip reset on 5780 will reset MSI enable bit,
5745                  * so need to restore it.
5746                  */
5747                 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
5748                         u16 ctrl;
5749
5750                         pci_read_config_word(tp->pdev,
5751                                              tp->msi_cap + PCI_MSI_FLAGS,
5752                                              &ctrl);
5753                         pci_write_config_word(tp->pdev,
5754                                               tp->msi_cap + PCI_MSI_FLAGS,
5755                                               ctrl | PCI_MSI_FLAGS_ENABLE);
5756                         val = tr32(MSGINT_MODE);
5757                         tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
5758                 }
5759         }
5760 }
5761
5762 static void tg3_stop_fw(struct tg3 *);
5763
5764 /* tp->lock is held. */
5765 static int tg3_chip_reset(struct tg3 *tp)
5766 {
5767         u32 val;
5768         void (*write_op)(struct tg3 *, u32, u32);
5769         int err;
5770
5771         tg3_nvram_lock(tp);
5772
5773         tg3_mdio_stop(tp);
5774
5775         tg3_ape_lock(tp, TG3_APE_LOCK_GRC);
5776
5777         /* No matching tg3_nvram_unlock() after this because
5778          * chip reset below will undo the nvram lock.
5779          */
5780         tp->nvram_lock_cnt = 0;
5781
5782         /* GRC_MISC_CFG core clock reset will clear the memory
5783          * enable bit in PCI register 4 and the MSI enable bit
5784          * on some chips, so we save relevant registers here.
5785          */
5786         tg3_save_pci_state(tp);
5787
5788         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
5789             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
5790             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
5791             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
5792             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
5793             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
5794                 tw32(GRC_FASTBOOT_PC, 0);
5795
5796         /*
5797          * We must avoid the readl() that normally takes place.
5798          * It locks machines, causes machine checks, and other
5799          * fun things.  So, temporarily disable the 5701
5800          * hardware workaround, while we do the reset.
5801          */
5802         write_op = tp->write32;
5803         if (write_op == tg3_write_flush_reg32)
5804                 tp->write32 = tg3_write32;
5805
5806         /* Prevent the irq handler from reading or writing PCI registers
5807          * during chip reset when the memory enable bit in the PCI command
5808          * register may be cleared.  The chip does not generate interrupt
5809          * at this time, but the irq handler may still be called due to irq
5810          * sharing or irqpoll.
5811          */
5812         tp->tg3_flags |= TG3_FLAG_CHIP_RESETTING;
5813         if (tp->hw_status) {
5814                 tp->hw_status->status = 0;
5815                 tp->hw_status->status_tag = 0;
5816         }
5817         tp->last_tag = 0;
5818         smp_mb();
5819         synchronize_irq(tp->pdev->irq);
5820
5821         /* do the reset */
5822         val = GRC_MISC_CFG_CORECLK_RESET;
5823
5824         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
5825                 if (tr32(0x7e2c) == 0x60) {
5826                         tw32(0x7e2c, 0x20);
5827                 }
5828                 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
5829                         tw32(GRC_MISC_CFG, (1 << 29));
5830                         val |= (1 << 29);
5831                 }
5832         }
5833
5834         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
5835                 tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
5836                 tw32(GRC_VCPU_EXT_CTRL,
5837                      tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
5838         }
5839
5840         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
5841                 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
5842         tw32(GRC_MISC_CFG, val);
5843
5844         /* restore 5701 hardware bug workaround write method */
5845         tp->write32 = write_op;
5846
5847         /* Unfortunately, we have to delay before the PCI read back.
5848          * Some 575X chips even will not respond to a PCI cfg access
5849          * when the reset command is given to the chip.
5850          *
5851          * How do these hardware designers expect things to work
5852          * properly if the PCI write is posted for a long period
5853          * of time?  It is always necessary to have some method by
5854          * which a register read back can occur to push the write
5855          * out which does the reset.
5856          *
5857          * For most tg3 variants the trick below was working.
5858          * Ho hum...
5859          */
5860         udelay(120);
5861
5862         /* Flush PCI posted writes.  The normal MMIO registers
5863          * are inaccessible at this time so this is the only
5864          * way to make this reliably (actually, this is no longer
5865          * the case, see above).  I tried to use indirect
5866          * register read/write but this upset some 5701 variants.
5867          */
5868         pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
5869
5870         udelay(120);
5871
5872         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
5873                 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) {
5874                         int i;
5875                         u32 cfg_val;
5876
5877                         /* Wait for link training to complete.  */
5878                         for (i = 0; i < 5000; i++)
5879                                 udelay(100);
5880
5881                         pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
5882                         pci_write_config_dword(tp->pdev, 0xc4,
5883                                                cfg_val | (1 << 15));
5884                 }
5885                 /* Set PCIE max payload size and clear error status.  */
5886                 pci_write_config_dword(tp->pdev, 0xd8, 0xf5000);
5887         }
5888
5889         tg3_restore_pci_state(tp);
5890
5891         tp->tg3_flags &= ~TG3_FLAG_CHIP_RESETTING;
5892
5893         val = 0;
5894         if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
5895                 val = tr32(MEMARB_MODE);
5896         tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
5897
5898         if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A3) {
5899                 tg3_stop_fw(tp);
5900                 tw32(0x5000, 0x400);
5901         }
5902
5903         tw32(GRC_MODE, tp->grc_mode);
5904
5905         if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0) {
5906                 val = tr32(0xc4);
5907
5908                 tw32(0xc4, val | (1 << 15));
5909         }
5910
5911         if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
5912             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
5913                 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
5914                 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0)
5915                         tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
5916                 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
5917         }
5918
5919         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
5920                 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
5921                 tw32_f(MAC_MODE, tp->mac_mode);
5922         } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
5923                 tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
5924                 tw32_f(MAC_MODE, tp->mac_mode);
5925         } else if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
5926                 tp->mac_mode &= (MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN);
5927                 if (tp->mac_mode & MAC_MODE_APE_TX_EN)
5928                         tp->mac_mode |= MAC_MODE_TDE_ENABLE;
5929                 tw32_f(MAC_MODE, tp->mac_mode);
5930         } else
5931                 tw32_f(MAC_MODE, 0);
5932         udelay(40);
5933
5934         tg3_mdio_start(tp);
5935
5936         tg3_ape_unlock(tp, TG3_APE_LOCK_GRC);
5937
5938         err = tg3_poll_fw(tp);
5939         if (err)
5940                 return err;
5941
5942         if ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
5943             tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
5944                 val = tr32(0x7c00);
5945
5946                 tw32(0x7c00, val | (1 << 25));
5947         }
5948
5949         /* Reprobe ASF enable state.  */
5950         tp->tg3_flags &= ~TG3_FLAG_ENABLE_ASF;
5951         tp->tg3_flags2 &= ~TG3_FLG2_ASF_NEW_HANDSHAKE;
5952         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
5953         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
5954                 u32 nic_cfg;
5955
5956                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
5957                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
5958                         tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
5959                         tp->last_event_jiffies = jiffies;
5960                         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
5961                                 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
5962                 }
5963         }
5964
5965         return 0;
5966 }
5967
5968 /* tp->lock is held. */
5969 static void tg3_stop_fw(struct tg3 *tp)
5970 {
5971         if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) &&
5972            !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)) {
5973                 /* Wait for RX cpu to ACK the previous event. */
5974                 tg3_wait_for_event_ack(tp);
5975
5976                 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
5977
5978                 tg3_generate_fw_event(tp);
5979
5980                 /* Wait for RX cpu to ACK this event. */
5981                 tg3_wait_for_event_ack(tp);
5982         }
5983 }
5984
5985 /* tp->lock is held. */
5986 static int tg3_halt(struct tg3 *tp, int kind, int silent)
5987 {
5988         int err;
5989
5990         tg3_stop_fw(tp);
5991
5992         tg3_write_sig_pre_reset(tp, kind);
5993
5994         tg3_abort_hw(tp, silent);
5995         err = tg3_chip_reset(tp);
5996
5997         tg3_write_sig_legacy(tp, kind);
5998         tg3_write_sig_post_reset(tp, kind);
5999
6000         if (err)
6001                 return err;
6002
6003         return 0;
6004 }
6005
6006 #define TG3_FW_RELEASE_MAJOR    0x0
6007 #define TG3_FW_RELASE_MINOR     0x0
6008 #define TG3_FW_RELEASE_FIX      0x0
6009 #define TG3_FW_START_ADDR       0x08000000
6010 #define TG3_FW_TEXT_ADDR        0x08000000
6011 #define TG3_FW_TEXT_LEN         0x9c0
6012 #define TG3_FW_RODATA_ADDR      0x080009c0
6013 #define TG3_FW_RODATA_LEN       0x60
6014 #define TG3_FW_DATA_ADDR        0x08000a40
6015 #define TG3_FW_DATA_LEN         0x20
6016 #define TG3_FW_SBSS_ADDR        0x08000a60
6017 #define TG3_FW_SBSS_LEN         0xc
6018 #define TG3_FW_BSS_ADDR         0x08000a70
6019 #define TG3_FW_BSS_LEN          0x10
6020
6021 static const u32 tg3FwText[(TG3_FW_TEXT_LEN / sizeof(u32)) + 1] = {
6022         0x00000000, 0x10000003, 0x00000000, 0x0000000d, 0x0000000d, 0x3c1d0800,
6023         0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100000, 0x0e000018, 0x00000000,
6024         0x0000000d, 0x3c1d0800, 0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100034,
6025         0x0e00021c, 0x00000000, 0x0000000d, 0x00000000, 0x00000000, 0x00000000,
6026         0x27bdffe0, 0x3c1cc000, 0xafbf0018, 0xaf80680c, 0x0e00004c, 0x241b2105,
6027         0x97850000, 0x97870002, 0x9782002c, 0x9783002e, 0x3c040800, 0x248409c0,
6028         0xafa00014, 0x00021400, 0x00621825, 0x00052c00, 0xafa30010, 0x8f860010,
6029         0x00e52825, 0x0e000060, 0x24070102, 0x3c02ac00, 0x34420100, 0x3c03ac01,
6030         0x34630100, 0xaf820490, 0x3c02ffff, 0xaf820494, 0xaf830498, 0xaf82049c,
6031         0x24020001, 0xaf825ce0, 0x0e00003f, 0xaf825d00, 0x0e000140, 0x00000000,
6032         0x8fbf0018, 0x03e00008, 0x27bd0020, 0x2402ffff, 0xaf825404, 0x8f835400,
6033         0x34630400, 0xaf835400, 0xaf825404, 0x3c020800, 0x24420034, 0xaf82541c,
6034         0x03e00008, 0xaf805400, 0x00000000, 0x00000000, 0x3c020800, 0x34423000,
6035         0x3c030800, 0x34633000, 0x3c040800, 0x348437ff, 0x3c010800, 0xac220a64,
6036         0x24020040, 0x3c010800, 0xac220a68, 0x3c010800, 0xac200a60, 0xac600000,
6037         0x24630004, 0x0083102b, 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000,
6038         0x00804821, 0x8faa0010, 0x3c020800, 0x8c420a60, 0x3c040800, 0x8c840a68,
6039         0x8fab0014, 0x24430001, 0x0044102b, 0x3c010800, 0xac230a60, 0x14400003,
6040         0x00004021, 0x3c010800, 0xac200a60, 0x3c020800, 0x8c420a60, 0x3c030800,
6041         0x8c630a64, 0x91240000, 0x00021140, 0x00431021, 0x00481021, 0x25080001,
6042         0xa0440000, 0x29020008, 0x1440fff4, 0x25290001, 0x3c020800, 0x8c420a60,
6043         0x3c030800, 0x8c630a64, 0x8f84680c, 0x00021140, 0x00431021, 0xac440008,
6044         0xac45000c, 0xac460010, 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c,
6045         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
6046         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
6047         0, 0, 0, 0, 0, 0,
6048         0x02000008, 0x00000000, 0x0a0001e3, 0x3c0a0001, 0x0a0001e3, 0x3c0a0002,
6049         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
6050         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
6051         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
6052         0x0a0001e3, 0x3c0a0007, 0x0a0001e3, 0x3c0a0008, 0x0a0001e3, 0x3c0a0009,
6053         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a000b,
6054         0x0a0001e3, 0x3c0a000c, 0x0a0001e3, 0x3c0a000d, 0x0a0001e3, 0x00000000,
6055         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a000e, 0x0a0001e3, 0x00000000,
6056         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
6057         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
6058         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a0013, 0x0a0001e3, 0x3c0a0014,
6059         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
6060         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
6061         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
6062         0x27bdffe0, 0x00001821, 0x00001021, 0xafbf0018, 0xafb10014, 0xafb00010,
6063         0x3c010800, 0x00220821, 0xac200a70, 0x3c010800, 0x00220821, 0xac200a74,
6064         0x3c010800, 0x00220821, 0xac200a78, 0x24630001, 0x1860fff5, 0x2442000c,
6065         0x24110001, 0x8f906810, 0x32020004, 0x14400005, 0x24040001, 0x3c020800,
6066         0x8c420a78, 0x18400003, 0x00002021, 0x0e000182, 0x00000000, 0x32020001,
6067         0x10400003, 0x00000000, 0x0e000169, 0x00000000, 0x0a000153, 0xaf915028,
6068         0x8fbf0018, 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0020, 0x3c050800,
6069         0x8ca50a70, 0x3c060800, 0x8cc60a80, 0x3c070800, 0x8ce70a78, 0x27bdffe0,
6070         0x3c040800, 0x248409d0, 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014,
6071         0x0e00017b, 0x00002021, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x24020001,
6072         0x8f836810, 0x00821004, 0x00021027, 0x00621824, 0x03e00008, 0xaf836810,
6073         0x27bdffd8, 0xafbf0024, 0x1080002e, 0xafb00020, 0x8f825cec, 0xafa20018,
6074         0x8f825cec, 0x3c100800, 0x26100a78, 0xafa2001c, 0x34028000, 0xaf825cec,
6075         0x8e020000, 0x18400016, 0x00000000, 0x3c020800, 0x94420a74, 0x8fa3001c,
6076         0x000221c0, 0xac830004, 0x8fa2001c, 0x3c010800, 0x0e000201, 0xac220a74,
6077         0x10400005, 0x00000000, 0x8e020000, 0x24420001, 0x0a0001df, 0xae020000,
6078         0x3c020800, 0x8c420a70, 0x00021c02, 0x000321c0, 0x0a0001c5, 0xafa2001c,
6079         0x0e000201, 0x00000000, 0x1040001f, 0x00000000, 0x8e020000, 0x8fa3001c,
6080         0x24420001, 0x3c010800, 0xac230a70, 0x3c010800, 0xac230a74, 0x0a0001df,
6081         0xae020000, 0x3c100800, 0x26100a78, 0x8e020000, 0x18400028, 0x00000000,
6082         0x0e000201, 0x00000000, 0x14400024, 0x00000000, 0x8e020000, 0x3c030800,
6083         0x8c630a70, 0x2442ffff, 0xafa3001c, 0x18400006, 0xae020000, 0x00031402,
6084         0x000221c0, 0x8c820004, 0x3c010800, 0xac220a70, 0x97a2001e, 0x2442ff00,
6085         0x2c420300, 0x1440000b, 0x24024000, 0x3c040800, 0x248409dc, 0xafa00010,
6086         0xafa00014, 0x8fa6001c, 0x24050008, 0x0e000060, 0x00003821, 0x0a0001df,
6087         0x00000000, 0xaf825cf8, 0x3c020800, 0x8c420a40, 0x8fa3001c, 0x24420001,
6088         0xaf835cf8, 0x3c010800, 0xac220a40, 0x8fbf0024, 0x8fb00020, 0x03e00008,
6089         0x27bd0028, 0x27bdffe0, 0x3c040800, 0x248409e8, 0x00002821, 0x00003021,
6090         0x00003821, 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014, 0x8fbf0018,
6091         0x03e00008, 0x27bd0020, 0x8f82680c, 0x8f85680c, 0x00021827, 0x0003182b,
6092         0x00031823, 0x00431024, 0x00441021, 0x00a2282b, 0x10a00006, 0x00000000,
6093         0x00401821, 0x8f82680c, 0x0043102b, 0x1440fffd, 0x00000000, 0x03e00008,
6094         0x00000000, 0x3c040800, 0x8c840000, 0x3c030800, 0x8c630a40, 0x0064102b,
6095         0x54400002, 0x00831023, 0x00641023, 0x2c420008, 0x03e00008, 0x38420001,
6096         0x27bdffe0, 0x00802821, 0x3c040800, 0x24840a00, 0x00003021, 0x00003821,
6097         0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014, 0x0a000216, 0x00000000,
6098         0x8fbf0018, 0x03e00008, 0x27bd0020, 0x00000000, 0x27bdffe0, 0x3c1cc000,
6099         0xafbf0018, 0x0e00004c, 0xaf80680c, 0x3c040800, 0x24840a10, 0x03802821,
6100         0x00003021, 0x00003821, 0xafa00010, 0x0e000060, 0xafa00014, 0x2402ffff,
6101         0xaf825404, 0x3c0200aa, 0x0e000234, 0xaf825434, 0x8fbf0018, 0x03e00008,
6102         0x27bd0020, 0x00000000, 0x00000000, 0x00000000, 0x27bdffe8, 0xafb00010,
6103         0x24100001, 0xafbf0014, 0x3c01c003, 0xac200000, 0x8f826810, 0x30422000,
6104         0x10400003, 0x00000000, 0x0e000246, 0x00000000, 0x0a00023a, 0xaf905428,
6105         0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x27bdfff8, 0x8f845d0c,
6106         0x3c0200ff, 0x3c030800, 0x8c630a50, 0x3442fff8, 0x00821024, 0x1043001e,
6107         0x3c0500ff, 0x34a5fff8, 0x3c06c003, 0x3c074000, 0x00851824, 0x8c620010,
6108         0x3c010800, 0xac230a50, 0x30420008, 0x10400005, 0x00871025, 0x8cc20000,
6109         0x24420001, 0xacc20000, 0x00871025, 0xaf825d0c, 0x8fa20000, 0x24420001,
6110         0xafa20000, 0x8fa20000, 0x8fa20000, 0x24420001, 0xafa20000, 0x8fa20000,
6111         0x8f845d0c, 0x3c030800, 0x8c630a50, 0x00851024, 0x1443ffe8, 0x00851824,
6112         0x27bd0008, 0x03e00008, 0x00000000, 0x00000000, 0x00000000
6113 };
6114
6115 static const u32 tg3FwRodata[(TG3_FW_RODATA_LEN / sizeof(u32)) + 1] = {
6116         0x35373031, 0x726c7341, 0x00000000, 0x00000000, 0x53774576, 0x656e7430,
6117         0x00000000, 0x726c7045, 0x76656e74, 0x31000000, 0x556e6b6e, 0x45766e74,
6118         0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x66617461, 0x6c457272,
6119         0x00000000, 0x00000000, 0x4d61696e, 0x43707542, 0x00000000, 0x00000000,
6120         0x00000000
6121 };
6122
6123 #if 0 /* All zeros, don't eat up space with it. */
6124 u32 tg3FwData[(TG3_FW_DATA_LEN / sizeof(u32)) + 1] = {
6125         0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
6126         0x00000000, 0x00000000, 0x00000000, 0x00000000
6127 };
6128 #endif
6129
6130 #define RX_CPU_SCRATCH_BASE     0x30000
6131 #define RX_CPU_SCRATCH_SIZE     0x04000
6132 #define TX_CPU_SCRATCH_BASE     0x34000
6133 #define TX_CPU_SCRATCH_SIZE     0x04000
6134
6135 /* tp->lock is held. */
6136 static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
6137 {
6138         int i;
6139
6140         BUG_ON(offset == TX_CPU_BASE &&
6141             (tp->tg3_flags2 & TG3_FLG2_5705_PLUS));
6142
6143         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
6144                 u32 val = tr32(GRC_VCPU_EXT_CTRL);
6145
6146                 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
6147                 return 0;
6148         }
6149         if (offset == RX_CPU_BASE) {
6150                 for (i = 0; i < 10000; i++) {
6151                         tw32(offset + CPU_STATE, 0xffffffff);
6152                         tw32(offset + CPU_MODE,  CPU_MODE_HALT);
6153                         if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
6154                                 break;
6155                 }
6156
6157                 tw32(offset + CPU_STATE, 0xffffffff);
6158                 tw32_f(offset + CPU_MODE,  CPU_MODE_HALT);
6159                 udelay(10);
6160         } else {
6161                 for (i = 0; i < 10000; i++) {
6162                         tw32(offset + CPU_STATE, 0xffffffff);
6163                         tw32(offset + CPU_MODE,  CPU_MODE_HALT);
6164                         if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
6165                                 break;
6166                 }
6167         }
6168
6169         if (i >= 10000) {
6170                 printk(KERN_ERR PFX "tg3_reset_cpu timed out for %s, "
6171                        "and %s CPU\n",
6172                        tp->dev->name,
6173                        (offset == RX_CPU_BASE ? "RX" : "TX"));
6174                 return -ENODEV;
6175         }
6176
6177         /* Clear firmware's nvram arbitration. */
6178         if (tp->tg3_flags & TG3_FLAG_NVRAM)
6179                 tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
6180         return 0;
6181 }
6182
6183 struct fw_info {
6184         unsigned int text_base;
6185         unsigned int text_len;
6186         const u32 *text_data;
6187         unsigned int rodata_base;
6188         unsigned int rodata_len;
6189         const u32 *rodata_data;
6190         unsigned int data_base;
6191         unsigned int data_len;
6192         const u32 *data_data;
6193 };
6194
6195 /* tp->lock is held. */
6196 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base, u32 cpu_scratch_base,
6197                                  int cpu_scratch_size, struct fw_info *info)
6198 {
6199         int err, lock_err, i;
6200         void (*write_op)(struct tg3 *, u32, u32);
6201
6202         if (cpu_base == TX_CPU_BASE &&
6203             (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6204                 printk(KERN_ERR PFX "tg3_load_firmware_cpu: Trying to load "
6205                        "TX cpu firmware on %s which is 5705.\n",
6206                        tp->dev->name);
6207                 return -EINVAL;
6208         }
6209
6210         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
6211                 write_op = tg3_write_mem;
6212         else
6213                 write_op = tg3_write_indirect_reg32;
6214
6215         /* It is possible that bootcode is still loading at this point.
6216          * Get the nvram lock first before halting the cpu.
6217          */
6218         lock_err = tg3_nvram_lock(tp);
6219         err = tg3_halt_cpu(tp, cpu_base);
6220         if (!lock_err)
6221                 tg3_nvram_unlock(tp);
6222         if (err)
6223                 goto out;
6224
6225         for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
6226                 write_op(tp, cpu_scratch_base + i, 0);
6227         tw32(cpu_base + CPU_STATE, 0xffffffff);
6228         tw32(cpu_base + CPU_MODE, tr32(cpu_base+CPU_MODE)|CPU_MODE_HALT);
6229         for (i = 0; i < (info->text_len / sizeof(u32)); i++)
6230                 write_op(tp, (cpu_scratch_base +
6231                               (info->text_base & 0xffff) +
6232                               (i * sizeof(u32))),
6233                          (info->text_data ?
6234                           info->text_data[i] : 0));
6235         for (i = 0; i < (info->rodata_len / sizeof(u32)); i++)
6236                 write_op(tp, (cpu_scratch_base +
6237                               (info->rodata_base & 0xffff) +
6238                               (i * sizeof(u32))),
6239                          (info->rodata_data ?
6240                           info->rodata_data[i] : 0));
6241         for (i = 0; i < (info->data_len / sizeof(u32)); i++)
6242                 write_op(tp, (cpu_scratch_base +
6243                               (info->data_base & 0xffff) +
6244                               (i * sizeof(u32))),
6245                          (info->data_data ?
6246                           info->data_data[i] : 0));
6247
6248         err = 0;
6249
6250 out:
6251         return err;
6252 }
6253
6254 /* tp->lock is held. */
6255 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
6256 {
6257         struct fw_info info;
6258         int err, i;
6259
6260         info.text_base = TG3_FW_TEXT_ADDR;
6261         info.text_len = TG3_FW_TEXT_LEN;
6262         info.text_data = &tg3FwText[0];
6263         info.rodata_base = TG3_FW_RODATA_ADDR;
6264         info.rodata_len = TG3_FW_RODATA_LEN;
6265         info.rodata_data = &tg3FwRodata[0];
6266         info.data_base = TG3_FW_DATA_ADDR;
6267         info.data_len = TG3_FW_DATA_LEN;
6268         info.data_data = NULL;
6269
6270         err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
6271                                     RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
6272                                     &info);
6273         if (err)
6274                 return err;
6275
6276         err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
6277                                     TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
6278                                     &info);
6279         if (err)
6280                 return err;
6281
6282         /* Now startup only the RX cpu. */
6283         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
6284         tw32_f(RX_CPU_BASE + CPU_PC,    TG3_FW_TEXT_ADDR);
6285
6286         for (i = 0; i < 5; i++) {
6287                 if (tr32(RX_CPU_BASE + CPU_PC) == TG3_FW_TEXT_ADDR)
6288                         break;
6289                 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
6290                 tw32(RX_CPU_BASE + CPU_MODE,  CPU_MODE_HALT);
6291                 tw32_f(RX_CPU_BASE + CPU_PC,    TG3_FW_TEXT_ADDR);
6292                 udelay(1000);
6293         }
6294         if (i >= 5) {
6295                 printk(KERN_ERR PFX "tg3_load_firmware fails for %s "
6296                        "to set RX CPU PC, is %08x should be %08x\n",
6297                        tp->dev->name, tr32(RX_CPU_BASE + CPU_PC),
6298                        TG3_FW_TEXT_ADDR);
6299                 return -ENODEV;
6300         }
6301         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
6302         tw32_f(RX_CPU_BASE + CPU_MODE,  0x00000000);
6303
6304         return 0;
6305 }
6306
6307
6308 #define TG3_TSO_FW_RELEASE_MAJOR        0x1
6309 #define TG3_TSO_FW_RELASE_MINOR         0x6
6310 #define TG3_TSO_FW_RELEASE_FIX          0x0
6311 #define TG3_TSO_FW_START_ADDR           0x08000000
6312 #define TG3_TSO_FW_TEXT_ADDR            0x08000000
6313 #define TG3_TSO_FW_TEXT_LEN             0x1aa0
6314 #define TG3_TSO_FW_RODATA_ADDR          0x08001aa0
6315 #define TG3_TSO_FW_RODATA_LEN           0x60
6316 #define TG3_TSO_FW_DATA_ADDR            0x08001b20
6317 #define TG3_TSO_FW_DATA_LEN             0x30
6318 #define TG3_TSO_FW_SBSS_ADDR            0x08001b50
6319 #define TG3_TSO_FW_SBSS_LEN             0x2c
6320 #define TG3_TSO_FW_BSS_ADDR             0x08001b80
6321 #define TG3_TSO_FW_BSS_LEN              0x894
6322
6323 static const u32 tg3TsoFwText[(TG3_TSO_FW_TEXT_LEN / 4) + 1] = {
6324         0x0e000003, 0x00000000, 0x08001b24, 0x00000000, 0x10000003, 0x00000000,
6325         0x0000000d, 0x0000000d, 0x3c1d0800, 0x37bd4000, 0x03a0f021, 0x3c100800,
6326         0x26100000, 0x0e000010, 0x00000000, 0x0000000d, 0x27bdffe0, 0x3c04fefe,
6327         0xafbf0018, 0x0e0005d8, 0x34840002, 0x0e000668, 0x00000000, 0x3c030800,
6328         0x90631b68, 0x24020002, 0x3c040800, 0x24841aac, 0x14620003, 0x24050001,
6329         0x3c040800, 0x24841aa0, 0x24060006, 0x00003821, 0xafa00010, 0x0e00067c,
6330         0xafa00014, 0x8f625c50, 0x34420001, 0xaf625c50, 0x8f625c90, 0x34420001,
6331         0xaf625c90, 0x2402ffff, 0x0e000034, 0xaf625404, 0x8fbf0018, 0x03e00008,
6332         0x27bd0020, 0x00000000, 0x00000000, 0x00000000, 0x27bdffe0, 0xafbf001c,
6333         0xafb20018, 0xafb10014, 0x0e00005b, 0xafb00010, 0x24120002, 0x24110001,
6334         0x8f706820, 0x32020100, 0x10400003, 0x00000000, 0x0e0000bb, 0x00000000,
6335         0x8f706820, 0x32022000, 0x10400004, 0x32020001, 0x0e0001f0, 0x24040001,
6336         0x32020001, 0x10400003, 0x00000000, 0x0e0000a3, 0x00000000, 0x3c020800,
6337         0x90421b98, 0x14520003, 0x00000000, 0x0e0004c0, 0x00000000, 0x0a00003c,
6338         0xaf715028, 0x8fbf001c, 0x8fb20018, 0x8fb10014, 0x8fb00010, 0x03e00008,
6339         0x27bd0020, 0x27bdffe0, 0x3c040800, 0x24841ac0, 0x00002821, 0x00003021,
6340         0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014, 0x3c040800,
6341         0x248423d8, 0xa4800000, 0x3c010800, 0xa0201b98, 0x3c010800, 0xac201b9c,
6342         0x3c010800, 0xac201ba0, 0x3c010800, 0xac201ba4, 0x3c010800, 0xac201bac,
6343         0x3c010800, 0xac201bb8, 0x3c010800, 0xac201bbc, 0x8f624434, 0x3c010800,
6344         0xac221b88, 0x8f624438, 0x3c010800, 0xac221b8c, 0x8f624410, 0xac80f7a8,
6345         0x3c010800, 0xac201b84, 0x3c010800, 0xac2023e0, 0x3c010800, 0xac2023c8,
6346         0x3c010800, 0xac2023cc, 0x3c010800, 0xac202400, 0x3c010800, 0xac221b90,
6347         0x8f620068, 0x24030007, 0x00021702, 0x10430005, 0x00000000, 0x8f620068,
6348         0x00021702, 0x14400004, 0x24020001, 0x3c010800, 0x0a000097, 0xac20240c,
6349         0xac820034, 0x3c040800, 0x24841acc, 0x3c050800, 0x8ca5240c, 0x00003021,
6350         0x00003821, 0xafa00010, 0x0e00067c, 0xafa00014, 0x8fbf0018, 0x03e00008,
6351         0x27bd0020, 0x27bdffe0, 0x3c040800, 0x24841ad8, 0x00002821, 0x00003021,
6352         0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014, 0x0e00005b,
6353         0x00000000, 0x0e0000b4, 0x00002021, 0x8fbf0018, 0x03e00008, 0x27bd0020,
6354         0x24020001, 0x8f636820, 0x00821004, 0x00021027, 0x00621824, 0x03e00008,
6355         0xaf636820, 0x27bdffd0, 0xafbf002c, 0xafb60028, 0xafb50024, 0xafb40020,
6356         0xafb3001c, 0xafb20018, 0xafb10014, 0xafb00010, 0x8f675c5c, 0x3c030800,
6357         0x24631bbc, 0x8c620000, 0x14470005, 0x3c0200ff, 0x3c020800, 0x90421b98,
6358         0x14400119, 0x3c0200ff, 0x3442fff8, 0x00e28824, 0xac670000, 0x00111902,
6359         0x306300ff, 0x30e20003, 0x000211c0, 0x00622825, 0x00a04021, 0x00071602,
6360         0x3c030800, 0x90631b98, 0x3044000f, 0x14600036, 0x00804821, 0x24020001,
6361         0x3c010800, 0xa0221b98, 0x00051100, 0x00821025, 0x3c010800, 0xac201b9c,
6362         0x3c010800, 0xac201ba0, 0x3c010800, 0xac201ba4, 0x3c010800, 0xac201bac,
6363         0x3c010800, 0xac201bb8, 0x3c010800, 0xac201bb0, 0x3c010800, 0xac201bb4,
6364         0x3c010800, 0xa42223d8, 0x9622000c, 0x30437fff, 0x3c010800, 0xa4222410,
6365         0x30428000, 0x3c010800, 0xa4231bc6, 0x10400005, 0x24020001, 0x3c010800,
6366         0xac2223f4, 0x0a000102, 0x2406003e, 0x24060036, 0x3c010800, 0xac2023f4,
6367         0x9622000a, 0x3c030800, 0x94631bc6, 0x3c010800, 0xac2023f0, 0x3c010800,
6368         0xac2023f8, 0x00021302, 0x00021080, 0x00c21021, 0x00621821, 0x3c010800,
6369         0xa42223d0, 0x3c010800, 0x0a000115, 0xa4231b96, 0x9622000c, 0x3c010800,
6370         0xa42223ec, 0x3c040800, 0x24841b9c, 0x8c820000, 0x00021100, 0x3c010800,
6371         0x00220821, 0xac311bc8, 0x8c820000, 0x00021100, 0x3c010800, 0x00220821,
6372         0xac271bcc, 0x8c820000, 0x25030001, 0x306601ff, 0x00021100, 0x3c010800,
6373         0x00220821, 0xac261bd0, 0x8c820000, 0x00021100, 0x3c010800, 0x00220821,
6374         0xac291bd4, 0x96230008, 0x3c020800, 0x8c421bac, 0x00432821, 0x3c010800,
6375         0xac251bac, 0x9622000a, 0x30420004, 0x14400018, 0x00061100, 0x8f630c14,
6376         0x3063000f, 0x2c620002, 0x1440000b, 0x3c02c000, 0x8f630c14, 0x3c020800,
6377         0x8c421b40, 0x3063000f, 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002,
6378         0x1040fff7, 0x3c02c000, 0x00e21825, 0xaf635c5c, 0x8f625c50, 0x30420002,
6379         0x10400014, 0x00000000, 0x0a000147, 0x00000000, 0x3c030800, 0x8c631b80,
6380         0x3c040800, 0x94841b94, 0x01221025, 0x3c010800, 0xa42223da, 0x24020001,
6381         0x3c010800, 0xac221bb8, 0x24630001, 0x0085202a, 0x3c010800, 0x10800003,
6382         0xac231b80, 0x3c010800, 0xa4251b94, 0x3c060800, 0x24c61b9c, 0x8cc20000,
6383         0x24420001, 0xacc20000, 0x28420080, 0x14400005, 0x00000000, 0x0e000656,
6384         0x24040002, 0x0a0001e6, 0x00000000, 0x3c020800, 0x8c421bb8, 0x10400078,
6385         0x24020001, 0x3c050800, 0x90a51b98, 0x14a20072, 0x00000000, 0x3c150800,
6386         0x96b51b96, 0x3c040800, 0x8c841bac, 0x32a3ffff, 0x0083102a, 0x1440006c,
6387         0x00000000, 0x14830003, 0x00000000, 0x3c010800, 0xac2523f0, 0x1060005c,
6388         0x00009021, 0x24d60004, 0x0060a021, 0x24d30014, 0x8ec20000, 0x00028100,
6389         0x3c110800, 0x02308821, 0x0e000625, 0x8e311bc8, 0x00402821, 0x10a00054,
6390         0x00000000, 0x9628000a, 0x31020040, 0x10400005, 0x2407180c, 0x8e22000c,
6391         0x2407188c, 0x00021400, 0xaca20018, 0x3c030800, 0x00701821, 0x8c631bd0,
6392         0x3c020800, 0x00501021, 0x8c421bd4, 0x00031d00, 0x00021400, 0x00621825,
6393         0xaca30014, 0x8ec30004, 0x96220008, 0x00432023, 0x3242ffff, 0x3083ffff,
6394         0x00431021, 0x0282102a, 0x14400002, 0x02b23023, 0x00803021, 0x8e620000,
6395         0x30c4ffff, 0x00441021, 0xae620000, 0x8e220000, 0xaca20000, 0x8e220004,
6396         0x8e63fff4, 0x00431021, 0xaca20004, 0xa4a6000e, 0x8e62fff4, 0x00441021,
6397         0xae62fff4, 0x96230008, 0x0043102a, 0x14400005, 0x02469021, 0x8e62fff0,
6398         0xae60fff4, 0x24420001, 0xae62fff0, 0xaca00008, 0x3242ffff, 0x14540008,
6399         0x24020305, 0x31020080, 0x54400001, 0x34e70010, 0x24020905, 0xa4a2000c,
6400         0x0a0001cb, 0x34e70020, 0xa4a2000c, 0x3c020800, 0x8c4223f0, 0x10400003,
6401         0x3c024b65, 0x0a0001d3, 0x34427654, 0x3c02b49a, 0x344289ab, 0xaca2001c,
6402         0x30e2ffff, 0xaca20010, 0x0e0005a2, 0x00a02021, 0x3242ffff, 0x0054102b,
6403         0x1440ffa9, 0x00000000, 0x24020002, 0x3c010800, 0x0a0001e6, 0xa0221b98,
6404         0x8ec2083c, 0x24420001, 0x0a0001e6, 0xaec2083c, 0x0e0004c0, 0x00000000,
6405         0x8fbf002c, 0x8fb60028, 0x8fb50024, 0x8fb40020, 0x8fb3001c, 0x8fb20018,
6406         0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0030, 0x27bdffd0, 0xafbf0028,
6407         0xafb30024, 0xafb20020, 0xafb1001c, 0xafb00018, 0x8f725c9c, 0x3c0200ff,
6408         0x3442fff8, 0x3c070800, 0x24e71bb4, 0x02428824, 0x9623000e, 0x8ce20000,
6409         0x00431021, 0xace20000, 0x8e220010, 0x30420020, 0x14400011, 0x00809821,
6410         0x0e00063b, 0x02202021, 0x3c02c000, 0x02421825, 0xaf635c9c, 0x8f625c90,
6411         0x30420002, 0x1040011e, 0x00000000, 0xaf635c9c, 0x8f625c90, 0x30420002,
6412         0x10400119, 0x00000000, 0x0a00020d, 0x00000000, 0x8e240008, 0x8e230014,
6413         0x00041402, 0x000231c0, 0x00031502, 0x304201ff, 0x2442ffff, 0x3042007f,
6414         0x00031942, 0x30637800, 0x00021100, 0x24424000, 0x00624821, 0x9522000a,
6415         0x3084ffff, 0x30420008, 0x104000b0, 0x000429c0, 0x3c020800, 0x8c422400,
6416         0x14400024, 0x24c50008, 0x94c20014, 0x3c010800, 0xa42223d0, 0x8cc40010,
6417         0x00041402, 0x3c010800, 0xa42223d2, 0x3c010800, 0xa42423d4, 0x94c2000e,
6418         0x3083ffff, 0x00431023, 0x3c010800, 0xac222408, 0x94c2001a, 0x3c010800,
6419         0xac262400, 0x3c010800, 0xac322404, 0x3c010800, 0xac2223fc, 0x3c02c000,
6420         0x02421825, 0xaf635c9c, 0x8f625c90, 0x30420002, 0x104000e5, 0x00000000,
6421         0xaf635c9c, 0x8f625c90, 0x30420002, 0x104000e0, 0x00000000, 0x0a000246,
6422         0x00000000, 0x94c2000e, 0x3c030800, 0x946323d4, 0x00434023, 0x3103ffff,
6423         0x2c620008, 0x1040001c, 0x00000000, 0x94c20014, 0x24420028, 0x00a22821,
6424         0x00031042, 0x1840000b, 0x00002021, 0x24e60848, 0x00403821, 0x94a30000,
6425         0x8cc20000, 0x24840001, 0x00431021, 0xacc20000, 0x0087102a, 0x1440fff9,
6426         0x24a50002, 0x31020001, 0x1040001f, 0x3c024000, 0x3c040800, 0x248423fc,
6427         0xa0a00001, 0x94a30000, 0x8c820000, 0x00431021, 0x0a000285, 0xac820000,
6428         0x8f626800, 0x3c030010, 0x00431024, 0x10400009, 0x00000000, 0x94c2001a,
6429         0x3c030800, 0x8c6323fc, 0x00431021, 0x3c010800, 0xac2223fc, 0x0a000286,
6430         0x3c024000, 0x94c2001a, 0x94c4001c, 0x3c030800, 0x8c6323fc, 0x00441023,
6431         0x00621821, 0x3c010800, 0xac2323fc, 0x3c024000, 0x02421825, 0xaf635c9c,
6432         0x8f625c90, 0x30420002, 0x1440fffc, 0x00000000, 0x9522000a, 0x30420010,
6433         0x1040009b, 0x00000000, 0x3c030800, 0x946323d4, 0x3c070800, 0x24e72400,
6434         0x8ce40000, 0x8f626800, 0x24630030, 0x00832821, 0x3c030010, 0x00431024,
6435         0x1440000a, 0x00000000, 0x94a20004, 0x3c040800, 0x8c842408, 0x3c030800,
6436         0x8c6323fc, 0x00441023, 0x00621821, 0x3c010800, 0xac2323fc, 0x3c040800,
6437         0x8c8423fc, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402, 0x00822021,
6438         0x00041027, 0xa4a20006, 0x3c030800, 0x8c632404, 0x3c0200ff, 0x3442fff8,
6439         0x00628824, 0x96220008, 0x24050001, 0x24034000, 0x000231c0, 0x00801021,
6440         0xa4c2001a, 0xa4c0001c, 0xace00000, 0x3c010800, 0xac251b60, 0xaf635cb8,
6441         0x8f625cb0, 0x30420002, 0x10400003, 0x00000000, 0x3c010800, 0xac201b60,
6442         0x8e220008, 0xaf625cb8, 0x8f625cb0, 0x30420002, 0x10400003, 0x00000000,
6443         0x3c010800, 0xac201b60, 0x3c020800, 0x8c421b60, 0x1040ffec, 0x00000000,
6444         0x3c040800, 0x0e00063b, 0x8c842404, 0x0a00032a, 0x00000000, 0x3c030800,
6445         0x90631b98, 0x24020002, 0x14620003, 0x3c034b65, 0x0a0002e1, 0x00008021,
6446         0x8e22001c, 0x34637654, 0x10430002, 0x24100002, 0x24100001, 0x00c02021,
6447         0x0e000350, 0x02003021, 0x24020003, 0x3c010800, 0xa0221b98, 0x24020002,
6448         0x1202000a, 0x24020001, 0x3c030800, 0x8c6323f0, 0x10620006, 0x00000000,
6449         0x3c020800, 0x944223d8, 0x00021400, 0x0a00031f, 0xae220014, 0x3c040800,
6450         0x248423da, 0x94820000, 0x00021400, 0xae220014, 0x3c020800, 0x8c421bbc,
6451         0x3c03c000, 0x3c010800, 0xa0201b98, 0x00431025, 0xaf625c5c, 0x8f625c50,
6452         0x30420002, 0x10400009, 0x00000000, 0x2484f7e2, 0x8c820000, 0x00431025,
6453         0xaf625c5c, 0x8f625c50, 0x30420002, 0x1440fffa, 0x00000000, 0x3c020800,
6454         0x24421b84, 0x8c430000, 0x24630001, 0xac430000, 0x8f630c14, 0x3063000f,
6455         0x2c620002, 0x1440000c, 0x3c024000, 0x8f630c14, 0x3c020800, 0x8c421b40,
6456         0x3063000f, 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002, 0x1040fff7,
6457         0x00000000, 0x3c024000, 0x02421825, 0xaf635c9c, 0x8f625c90, 0x30420002,
6458         0x1440fffc, 0x00000000, 0x12600003, 0x00000000, 0x0e0004c0, 0x00000000,
6459         0x8fbf0028, 0x8fb30024, 0x8fb20020, 0x8fb1001c, 0x8fb00018, 0x03e00008,
6460         0x27bd0030, 0x8f634450, 0x3c040800, 0x24841b88, 0x8c820000, 0x00031c02,
6461         0x0043102b, 0x14400007, 0x3c038000, 0x8c840004, 0x8f624450, 0x00021c02,
6462         0x0083102b, 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024,
6463         0x1440fffd, 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3c024000,
6464         0x00822025, 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00000000,
6465         0x03e00008, 0x00000000, 0x27bdffe0, 0x00805821, 0x14c00011, 0x256e0008,
6466         0x3c020800, 0x8c4223f4, 0x10400007, 0x24020016, 0x3c010800, 0xa42223d2,
6467         0x2402002a, 0x3c010800, 0x0a000364, 0xa42223d4, 0x8d670010, 0x00071402,
6468         0x3c010800, 0xa42223d2, 0x3c010800, 0xa42723d4, 0x3c040800, 0x948423d4,
6469         0x3c030800, 0x946323d2, 0x95cf0006, 0x3c020800, 0x944223d0, 0x00832023,
6470         0x01e2c023, 0x3065ffff, 0x24a20028, 0x01c24821, 0x3082ffff, 0x14c0001a,
6471         0x01226021, 0x9582000c, 0x3042003f, 0x3c010800, 0xa42223d6, 0x95820004,
6472         0x95830006, 0x3c010800, 0xac2023e4, 0x3c010800, 0xac2023e8, 0x00021400,
6473         0x00431025, 0x3c010800, 0xac221bc0, 0x95220004, 0x3c010800, 0xa4221bc4,
6474         0x95230002, 0x01e51023, 0x0043102a, 0x10400010, 0x24020001, 0x3c010800,
6475         0x0a000398, 0xac2223f8, 0x3c030800, 0x8c6323e8, 0x3c020800, 0x94421bc4,
6476         0x00431021, 0xa5220004, 0x3c020800, 0x94421bc0, 0xa5820004, 0x3c020800,
6477         0x8c421bc0, 0xa5820006, 0x3c020800, 0x8c4223f0, 0x3c0d0800, 0x8dad23e4,
6478         0x3c0a0800, 0x144000e5, 0x8d4a23e8, 0x3c020800, 0x94421bc4, 0x004a1821,
6479         0x3063ffff, 0x0062182b, 0x24020002, 0x10c2000d, 0x01435023, 0x3c020800,
6480         0x944223d6, 0x30420009, 0x10400008, 0x00000000, 0x9582000c, 0x3042fff6,
6481         0xa582000c, 0x3c020800, 0x944223d6, 0x30420009, 0x01a26823, 0x3c020800,
6482         0x8c4223f8, 0x1040004a, 0x01203821, 0x3c020800, 0x944223d2, 0x00004021,
6483         0xa520000a, 0x01e21023, 0xa5220002, 0x3082ffff, 0x00021042, 0x18400008,
6484         0x00003021, 0x00401821, 0x94e20000, 0x25080001, 0x00c23021, 0x0103102a,
6485         0x1440fffb, 0x24e70002, 0x00061c02, 0x30c2ffff, 0x00623021, 0x00061402,
6486         0x00c23021, 0x00c02821, 0x00061027, 0xa522000a, 0x00003021, 0x2527000c,
6487         0x00004021, 0x94e20000, 0x25080001, 0x00c23021, 0x2d020004, 0x1440fffb,
6488         0x24e70002, 0x95220002, 0x00004021, 0x91230009, 0x00442023, 0x01803821,
6489         0x3082ffff, 0xa4e00010, 0x00621821, 0x00021042, 0x18400010, 0x00c33021,
6490         0x00404821, 0x94e20000, 0x24e70002, 0x00c23021, 0x30e2007f, 0x14400006,
6491         0x25080001, 0x8d630000, 0x3c02007f, 0x3442ff80, 0x00625824, 0x25670008,
6492         0x0109102a, 0x1440fff3, 0x00000000, 0x30820001, 0x10400005, 0x00061c02,
6493         0xa0e00001, 0x94e20000, 0x00c23021, 0x00061c02, 0x30c2ffff, 0x00623021,
6494         0x00061402, 0x00c23021, 0x0a00047d, 0x30c6ffff, 0x24020002, 0x14c20081,
6495         0x00000000, 0x3c020800, 0x8c42240c, 0x14400007, 0x00000000, 0x3c020800,
6496         0x944223d2, 0x95230002, 0x01e21023, 0x10620077, 0x00000000, 0x3c020800,
6497         0x944223d2, 0x01e21023, 0xa5220002, 0x3c020800, 0x8c42240c, 0x1040001a,
6498         0x31e3ffff, 0x8dc70010, 0x3c020800, 0x94421b96, 0x00e04021, 0x00072c02,
6499         0x00aa2021, 0x00431023, 0x00823823, 0x00072402, 0x30e2ffff, 0x00823821,
6500         0x00071027, 0xa522000a, 0x3102ffff, 0x3c040800, 0x948423d4, 0x00453023,
6501         0x00e02821, 0x00641823, 0x006d1821, 0x00c33021, 0x00061c02, 0x30c2ffff,
6502         0x0a00047d, 0x00623021, 0x01203821, 0x00004021, 0x3082ffff, 0x00021042,
6503         0x18400008, 0x00003021, 0x00401821, 0x94e20000, 0x25080001, 0x00c23021,
6504         0x0103102a, 0x1440fffb, 0x24e70002, 0x00061c02, 0x30c2ffff, 0x00623021,
6505         0x00061402, 0x00c23021, 0x00c02821, 0x00061027, 0xa522000a, 0x00003021,
6506         0x2527000c, 0x00004021, 0x94e20000, 0x25080001, 0x00c23021, 0x2d020004,
6507         0x1440fffb, 0x24e70002, 0x95220002, 0x00004021, 0x91230009, 0x00442023,
6508         0x01803821, 0x3082ffff, 0xa4e00010, 0x3c040800, 0x948423d4, 0x00621821,
6509         0x00c33021, 0x00061c02, 0x30c2ffff, 0x00623021, 0x00061c02, 0x3c020800,
6510         0x944223d0, 0x00c34821, 0x00441023, 0x00021fc2, 0x00431021, 0x00021043,
6511         0x18400010, 0x00003021, 0x00402021, 0x94e20000, 0x24e70002, 0x00c23021,
6512         0x30e2007f, 0x14400006, 0x25080001, 0x8d630000, 0x3c02007f, 0x3442ff80,
6513         0x00625824, 0x25670008, 0x0104102a, 0x1440fff3, 0x00000000, 0x3c020800,
6514         0x944223ec, 0x00c23021, 0x3122ffff, 0x00c23021, 0x00061c02, 0x30c2ffff,
6515         0x00623021, 0x00061402, 0x00c23021, 0x00c04021, 0x00061027, 0xa5820010,
6516         0xadc00014, 0x0a00049d, 0xadc00000, 0x8dc70010, 0x00e04021, 0x11400007,
6517         0x00072c02, 0x00aa3021, 0x00061402, 0x30c3ffff, 0x00433021, 0x00061402,
6518         0x00c22821, 0x00051027, 0xa522000a, 0x3c030800, 0x946323d4, 0x3102ffff,
6519         0x01e21021, 0x00433023, 0x00cd3021, 0x00061c02, 0x30c2ffff, 0x00623021,
6520         0x00061402, 0x00c23021, 0x00c04021, 0x00061027, 0xa5820010, 0x3102ffff,
6521         0x00051c00, 0x00431025, 0xadc20010, 0x3c020800, 0x8c4223f4, 0x10400005,
6522         0x2de205eb, 0x14400002, 0x25e2fff2, 0x34028870, 0xa5c20034, 0x3c030800,
6523         0x246323e8, 0x8c620000, 0x24420001, 0xac620000, 0x3c040800, 0x8c8423e4,
6524         0x3c020800, 0x8c421bc0, 0x3303ffff, 0x00832021, 0x00431821, 0x0062102b,
6525         0x3c010800, 0xac2423e4, 0x10400003, 0x2482ffff, 0x3c010800, 0xac2223e4,
6526         0x3c010800, 0xac231bc0, 0x03e00008, 0x27bd0020, 0x27bdffb8, 0x3c050800,
6527         0x24a51b96, 0xafbf0044, 0xafbe0040, 0xafb7003c, 0xafb60038, 0xafb50034,
6528         0xafb40030, 0xafb3002c, 0xafb20028, 0xafb10024, 0xafb00020, 0x94a90000,
6529         0x3c020800, 0x944223d0, 0x3c030800, 0x8c631bb0, 0x3c040800, 0x8c841bac,
6530         0x01221023, 0x0064182a, 0xa7a9001e, 0x106000be, 0xa7a20016, 0x24be0022,
6531         0x97b6001e, 0x24b3001a, 0x24b70016, 0x8fc20000, 0x14400008, 0x00000000,
6532         0x8fc2fff8, 0x97a30016, 0x8fc4fff4, 0x00431021, 0x0082202a, 0x148000b0,
6533         0x00000000, 0x97d50818, 0x32a2ffff, 0x104000a3, 0x00009021, 0x0040a021,
6534         0x00008821, 0x0e000625, 0x00000000, 0x00403021, 0x14c00007, 0x00000000,
6535         0x3c020800, 0x8c4223dc, 0x24420001, 0x3c010800, 0x0a000596, 0xac2223dc,
6536         0x3c100800, 0x02118021, 0x8e101bc8, 0x9608000a, 0x31020040, 0x10400005,
6537         0x2407180c, 0x8e02000c, 0x2407188c, 0x00021400, 0xacc20018, 0x31020080,
6538         0x54400001, 0x34e70010, 0x3c020800, 0x00511021, 0x8c421bd0, 0x3c030800,
6539         0x00711821, 0x8c631bd4, 0x00021500, 0x00031c00, 0x00431025, 0xacc20014,
6540         0x96040008, 0x3242ffff, 0x00821021, 0x0282102a, 0x14400002, 0x02b22823,
6541         0x00802821, 0x8e020000, 0x02459021, 0xacc20000, 0x8e020004, 0x00c02021,
6542         0x26310010, 0xac820004, 0x30e2ffff, 0xac800008, 0xa485000e, 0xac820010,
6543         0x24020305, 0x0e0005a2, 0xa482000c, 0x3242ffff, 0x0054102b, 0x1440ffc5,
6544         0x3242ffff, 0x0a00058e, 0x00000000, 0x8e620000, 0x8e63fffc, 0x0043102a,
6545         0x10400067, 0x00000000, 0x8e62fff0, 0x00028900, 0x3c100800, 0x02118021,
6546         0x0e000625, 0x8e101bc8, 0x00403021, 0x14c00005, 0x00000000, 0x8e62082c,
6547         0x24420001, 0x0a000596, 0xae62082c, 0x9608000a, 0x31020040, 0x10400005,
6548         0x2407180c, 0x8e02000c, 0x2407188c, 0x00021400, 0xacc20018, 0x3c020800,
6549         0x00511021, 0x8c421bd0, 0x3c030800, 0x00711821, 0x8c631bd4, 0x00021500,
6550         0x00031c00, 0x00431025, 0xacc20014, 0x8e63fff4, 0x96020008, 0x00432023,
6551         0x3242ffff, 0x3083ffff, 0x00431021, 0x02c2102a, 0x10400003, 0x00802821,
6552         0x97a9001e, 0x01322823, 0x8e620000, 0x30a4ffff, 0x00441021, 0xae620000,
6553         0xa4c5000e, 0x8e020000, 0xacc20000, 0x8e020004, 0x8e63fff4, 0x00431021,
6554         0xacc20004, 0x8e63fff4, 0x96020008, 0x00641821, 0x0062102a, 0x14400006,
6555         0x02459021, 0x8e62fff0, 0xae60fff4, 0x24420001, 0x0a000571, 0xae62fff0,
6556         0xae63fff4, 0xacc00008, 0x3242ffff, 0x10560003, 0x31020004, 0x10400006,
6557         0x24020305, 0x31020080, 0x54400001, 0x34e70010, 0x34e70020, 0x24020905,
6558         0xa4c2000c, 0x8ee30000, 0x8ee20004, 0x14620007, 0x3c02b49a, 0x8ee20860,
6559         0x54400001, 0x34e70400, 0x3c024b65, 0x0a000588, 0x34427654, 0x344289ab,
6560         0xacc2001c, 0x30e2ffff, 0xacc20010, 0x0e0005a2, 0x00c02021, 0x3242ffff,
6561         0x0056102b, 0x1440ff9b, 0x00000000, 0x8e620000, 0x8e63fffc, 0x0043102a,
6562         0x1440ff48, 0x00000000, 0x8fbf0044, 0x8fbe0040, 0x8fb7003c, 0x8fb60038,
6563         0x8fb50034, 0x8fb40030, 0x8fb3002c, 0x8fb20028, 0x8fb10024, 0x8fb00020,
6564         0x03e00008, 0x27bd0048, 0x27bdffe8, 0xafbf0014, 0xafb00010, 0x8f624450,
6565         0x8f634410, 0x0a0005b1, 0x00808021, 0x8f626820, 0x30422000, 0x10400003,
6566         0x00000000, 0x0e0001f0, 0x00002021, 0x8f624450, 0x8f634410, 0x3042ffff,
6567         0x0043102b, 0x1440fff5, 0x00000000, 0x8f630c14, 0x3063000f, 0x2c620002,
6568         0x1440000b, 0x00000000, 0x8f630c14, 0x3c020800, 0x8c421b40, 0x3063000f,
6569         0x24420001, 0x3c010800, 0xac221b40, 0x2c620002, 0x1040fff7, 0x00000000,
6570         0xaf705c18, 0x8f625c10, 0x30420002, 0x10400009, 0x00000000, 0x8f626820,
6571         0x30422000, 0x1040fff8, 0x00000000, 0x0e0001f0, 0x00002021, 0x0a0005c4,
6572         0x00000000, 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x00000000,
6573         0x00000000, 0x00000000, 0x27bdffe8, 0x3c1bc000, 0xafbf0014, 0xafb00010,
6574         0xaf60680c, 0x8f626804, 0x34420082, 0xaf626804, 0x8f634000, 0x24020b50,
6575         0x3c010800, 0xac221b54, 0x24020b78, 0x3c010800, 0xac221b64, 0x34630002,
6576         0xaf634000, 0x0e000605, 0x00808021, 0x3c010800, 0xa0221b68, 0x304200ff,
6577         0x24030002, 0x14430005, 0x00000000, 0x3c020800, 0x8c421b54, 0x0a0005f8,
6578         0xac5000c0, 0x3c020800, 0x8c421b54, 0xac5000bc, 0x8f624434, 0x8f634438,
6579         0x8f644410, 0x3c010800, 0xac221b5c, 0x3c010800, 0xac231b6c, 0x3c010800,
6580         0xac241b58, 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x3c040800,
6581         0x8c870000, 0x3c03aa55, 0x3463aa55, 0x3c06c003, 0xac830000, 0x8cc20000,
6582         0x14430007, 0x24050002, 0x3c0355aa, 0x346355aa, 0xac830000, 0x8cc20000,
6583         0x50430001, 0x24050001, 0x3c020800, 0xac470000, 0x03e00008, 0x00a01021,
6584         0x27bdfff8, 0x18800009, 0x00002821, 0x8f63680c, 0x8f62680c, 0x1043fffe,
6585         0x00000000, 0x24a50001, 0x00a4102a, 0x1440fff9, 0x00000000, 0x03e00008,
6586         0x27bd0008, 0x8f634450, 0x3c020800, 0x8c421b5c, 0x00031c02, 0x0043102b,
6587         0x14400008, 0x3c038000, 0x3c040800, 0x8c841b6c, 0x8f624450, 0x00021c02,
6588         0x0083102b, 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024,
6589         0x1440fffd, 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3082ffff,
6590         0x2442e000, 0x2c422001, 0x14400003, 0x3c024000, 0x0a000648, 0x2402ffff,
6591         0x00822025, 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00001021,
6592         0x03e00008, 0x00000000, 0x8f624450, 0x3c030800, 0x8c631b58, 0x0a000651,
6593         0x3042ffff, 0x8f624450, 0x3042ffff, 0x0043102b, 0x1440fffc, 0x00000000,
6594         0x03e00008, 0x00000000, 0x27bdffe0, 0x00802821, 0x3c040800, 0x24841af0,
6595         0x00003021, 0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014,
6596         0x0a000660, 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x00000000,
6597         0x00000000, 0x00000000, 0x3c020800, 0x34423000, 0x3c030800, 0x34633000,
6598         0x3c040800, 0x348437ff, 0x3c010800, 0xac221b74, 0x24020040, 0x3c010800,
6599         0xac221b78, 0x3c010800, 0xac201b70, 0xac600000, 0x24630004, 0x0083102b,
6600         0x5040fffd, 0xac600000, 0x03e00008, 0x00000000, 0x00804821, 0x8faa0010,
6601         0x3c020800, 0x8c421b70, 0x3c040800, 0x8c841b78, 0x8fab0014, 0x24430001,
6602         0x0044102b, 0x3c010800, 0xac231b70, 0x14400003, 0x00004021, 0x3c010800,
6603         0xac201b70, 0x3c020800, 0x8c421b70, 0x3c030800, 0x8c631b74, 0x91240000,
6604         0x00021140, 0x00431021, 0x00481021, 0x25080001, 0xa0440000, 0x29020008,
6605         0x1440fff4, 0x25290001, 0x3c020800, 0x8c421b70, 0x3c030800, 0x8c631b74,
6606         0x8f64680c, 0x00021140, 0x00431021, 0xac440008, 0xac45000c, 0xac460010,
6607         0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c, 0x00000000, 0x00000000,
6608 };
6609
6610 static const u32 tg3TsoFwRodata[] = {
6611         0x4d61696e, 0x43707542, 0x00000000, 0x4d61696e, 0x43707541, 0x00000000,
6612         0x00000000, 0x00000000, 0x73746b6f, 0x66666c64, 0x496e0000, 0x73746b6f,
6613         0x66662a2a, 0x00000000, 0x53774576, 0x656e7430, 0x00000000, 0x00000000,
6614         0x00000000, 0x00000000, 0x66617461, 0x6c457272, 0x00000000, 0x00000000,
6615         0x00000000,
6616 };
6617
6618 static const u32 tg3TsoFwData[] = {
6619         0x00000000, 0x73746b6f, 0x66666c64, 0x5f76312e, 0x362e3000, 0x00000000,
6620         0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
6621         0x00000000,
6622 };
6623
6624 /* 5705 needs a special version of the TSO firmware.  */
6625 #define TG3_TSO5_FW_RELEASE_MAJOR       0x1
6626 #define TG3_TSO5_FW_RELASE_MINOR        0x2
6627 #define TG3_TSO5_FW_RELEASE_FIX         0x0
6628 #define TG3_TSO5_FW_START_ADDR          0x00010000
6629 #define TG3_TSO5_FW_TEXT_ADDR           0x00010000
6630 #define TG3_TSO5_FW_TEXT_LEN            0xe90
6631 #define TG3_TSO5_FW_RODATA_ADDR         0x00010e90
6632 #define TG3_TSO5_FW_RODATA_LEN          0x50
6633 #define TG3_TSO5_FW_DATA_ADDR           0x00010f00
6634 #define TG3_TSO5_FW_DATA_LEN            0x20
6635 #define TG3_TSO5_FW_SBSS_ADDR           0x00010f20
6636 #define TG3_TSO5_FW_SBSS_LEN            0x28
6637 #define TG3_TSO5_FW_BSS_ADDR            0x00010f50
6638 #define TG3_TSO5_FW_BSS_LEN             0x88
6639
6640 static const u32 tg3Tso5FwText[(TG3_TSO5_FW_TEXT_LEN / 4) + 1] = {
6641         0x0c004003, 0x00000000, 0x00010f04, 0x00000000, 0x10000003, 0x00000000,
6642         0x0000000d, 0x0000000d, 0x3c1d0001, 0x37bde000, 0x03a0f021, 0x3c100001,
6643         0x26100000, 0x0c004010, 0x00000000, 0x0000000d, 0x27bdffe0, 0x3c04fefe,
6644         0xafbf0018, 0x0c0042e8, 0x34840002, 0x0c004364, 0x00000000, 0x3c030001,
6645         0x90630f34, 0x24020002, 0x3c040001, 0x24840e9c, 0x14620003, 0x24050001,
6646         0x3c040001, 0x24840e90, 0x24060002, 0x00003821, 0xafa00010, 0x0c004378,
6647         0xafa00014, 0x0c00402c, 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020,
6648         0x00000000, 0x00000000, 0x27bdffe0, 0xafbf001c, 0xafb20018, 0xafb10014,
6649         0x0c0042d4, 0xafb00010, 0x3c128000, 0x24110001, 0x8f706810, 0x32020400,
6650         0x10400007, 0x00000000, 0x8f641008, 0x00921024, 0x14400003, 0x00000000,
6651         0x0c004064, 0x00000000, 0x3c020001, 0x90420f56, 0x10510003, 0x32020200,
6652         0x1040fff1, 0x00000000, 0x0c0041b4, 0x00000000, 0x08004034, 0x00000000,
6653         0x8fbf001c, 0x8fb20018, 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0020,
6654         0x27bdffe0, 0x3c040001, 0x24840eb0, 0x00002821, 0x00003021, 0x00003821,
6655         0xafbf0018, 0xafa00010, 0x0c004378, 0xafa00014, 0x0000d021, 0x24020130,
6656         0xaf625000, 0x3c010001, 0xa4200f50, 0x3c010001, 0xa0200f57, 0x8fbf0018,
6657         0x03e00008, 0x27bd0020, 0x00000000, 0x00000000, 0x3c030001, 0x24630f60,
6658         0x90620000, 0x27bdfff0, 0x14400003, 0x0080c021, 0x08004073, 0x00004821,
6659         0x3c022000, 0x03021024, 0x10400003, 0x24090002, 0x08004073, 0xa0600000,
6660         0x24090001, 0x00181040, 0x30431f80, 0x346f8008, 0x1520004b, 0x25eb0028,
6661         0x3c040001, 0x00832021, 0x8c848010, 0x3c050001, 0x24a50f7a, 0x00041402,
6662         0xa0a20000, 0x3c010001, 0xa0240f7b, 0x3c020001, 0x00431021, 0x94428014,
6663         0x3c010001, 0xa0220f7c, 0x3c0c0001, 0x01836021, 0x8d8c8018, 0x304200ff,
6664         0x24420008, 0x000220c3, 0x24020001, 0x3c010001, 0xa0220f60, 0x0124102b,
6665         0x1040000c, 0x00003821, 0x24a6000e, 0x01602821, 0x8ca20000, 0x8ca30004,
6666         0x24a50008, 0x24e70001, 0xacc20000, 0xacc30004, 0x00e4102b, 0x1440fff8,
6667         0x24c60008, 0x00003821, 0x3c080001, 0x25080f7b, 0x91060000, 0x3c020001,
6668         0x90420f7c, 0x2503000d, 0x00c32821, 0x00461023, 0x00021fc2, 0x00431021,
6669         0x00021043, 0x1840000c, 0x00002021, 0x91020001, 0x00461023, 0x00021fc2,
6670         0x00431021, 0x00021843, 0x94a20000, 0x24e70001, 0x00822021, 0x00e3102a,
6671         0x1440fffb, 0x24a50002, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402,
6672         0x00822021, 0x3c02ffff, 0x01821024, 0x3083ffff, 0x00431025, 0x3c010001,
6673         0x080040fa, 0xac220f80, 0x3c050001, 0x24a50f7c, 0x90a20000, 0x3c0c0001,
6674         0x01836021, 0x8d8c8018, 0x000220c2, 0x1080000e, 0x00003821, 0x01603021,
6675         0x24a5000c, 0x8ca20000, 0x8ca30004, 0x24a50008, 0x24e70001, 0xacc20000,
6676         0xacc30004, 0x00e4102b, 0x1440fff8, 0x24c60008, 0x3c050001, 0x24a50f7c,
6677         0x90a20000, 0x30430007, 0x24020004, 0x10620011, 0x28620005, 0x10400005,
6678         0x24020002, 0x10620008, 0x000710c0, 0x080040fa, 0x00000000, 0x24020006,
6679         0x1062000e, 0x000710c0, 0x080040fa, 0x00000000, 0x00a21821, 0x9463000c,
6680         0x004b1021, 0x080040fa, 0xa4430000, 0x000710c0, 0x00a21821, 0x8c63000c,
6681         0x004b1021, 0x080040fa, 0xac430000, 0x00a21821, 0x8c63000c, 0x004b2021,
6682         0x00a21021, 0xac830000, 0x94420010, 0xa4820004, 0x95e70006, 0x3c020001,
6683         0x90420f7c, 0x3c030001, 0x90630f7a, 0x00e2c823, 0x3c020001, 0x90420f7b,
6684         0x24630028, 0x01e34021, 0x24420028, 0x15200012, 0x01e23021, 0x94c2000c,
6685         0x3c010001, 0xa4220f78, 0x94c20004, 0x94c30006, 0x3c010001, 0xa4200f76,
6686         0x3c010001, 0xa4200f72, 0x00021400, 0x00431025, 0x3c010001, 0xac220f6c,
6687         0x95020004, 0x3c010001, 0x08004124, 0xa4220f70, 0x3c020001, 0x94420f70,
6688         0x3c030001, 0x94630f72, 0x00431021, 0xa5020004, 0x3c020001, 0x94420f6c,
6689         0xa4c20004, 0x3c020001, 0x8c420f6c, 0xa4c20006, 0x3c040001, 0x94840f72,
6690         0x3c020001, 0x94420f70, 0x3c0a0001, 0x954a0f76, 0x00441821, 0x3063ffff,
6691         0x0062182a, 0x24020002, 0x1122000b, 0x00832023, 0x3c030001, 0x94630f78,
6692         0x30620009, 0x10400006, 0x3062fff6, 0xa4c2000c, 0x3c020001, 0x94420f78,
6693         0x30420009, 0x01425023, 0x24020001, 0x1122001b, 0x29220002, 0x50400005,
6694         0x24020002, 0x11200007, 0x31a2ffff, 0x08004197, 0x00000000, 0x1122001d,
6695         0x24020016, 0x08004197, 0x31a2ffff, 0x3c0e0001, 0x95ce0f80, 0x10800005,
6696         0x01806821, 0x01c42021, 0x00041c02, 0x3082ffff, 0x00627021, 0x000e1027,
6697         0xa502000a, 0x3c030001, 0x90630f7b, 0x31a2ffff, 0x00e21021, 0x0800418d,
6698         0x00432023, 0x3c020001, 0x94420f80, 0x00442021, 0x00041c02, 0x3082ffff,
6699         0x00622021, 0x00807021, 0x00041027, 0x08004185, 0xa502000a, 0x3c050001,
6700         0x24a50f7a, 0x90a30000, 0x14620002, 0x24e2fff2, 0xa5e20034, 0x90a20000,
6701         0x00e21023, 0xa5020002, 0x3c030001, 0x94630f80, 0x3c020001, 0x94420f5a,
6702         0x30e5ffff, 0x00641821, 0x00451023, 0x00622023, 0x00041c02, 0x3082ffff,
6703         0x00622021, 0x00041027, 0xa502000a, 0x3c030001, 0x90630f7c, 0x24620001,
6704         0x14a20005, 0x00807021, 0x01631021, 0x90420000, 0x08004185, 0x00026200,
6705         0x24620002, 0x14a20003, 0x306200fe, 0x004b1021, 0x944c0000, 0x3c020001,
6706         0x94420f82, 0x3183ffff, 0x3c040001, 0x90840f7b, 0x00431021, 0x00e21021,
6707         0x00442023, 0x008a2021, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402,
6708         0x00822021, 0x00806821, 0x00041027, 0xa4c20010, 0x31a2ffff, 0x000e1c00,
6709         0x00431025, 0x3c040001, 0x24840f72, 0xade20010, 0x94820000, 0x3c050001,
6710         0x94a50f76, 0x3c030001, 0x8c630f6c, 0x24420001, 0x00b92821, 0xa4820000,
6711         0x3322ffff, 0x00622021, 0x0083182b, 0x3c010001, 0xa4250f76, 0x10600003,
6712         0x24a2ffff, 0x3c010001, 0xa4220f76, 0x3c024000, 0x03021025, 0x3c010001,
6713         0xac240f6c, 0xaf621008, 0x03e00008, 0x27bd0010, 0x3c030001, 0x90630f56,
6714         0x27bdffe8, 0x24020001, 0xafbf0014, 0x10620026, 0xafb00010, 0x8f620cf4,
6715         0x2442ffff, 0x3042007f, 0x00021100, 0x8c434000, 0x3c010001, 0xac230f64,
6716         0x8c434008, 0x24444000, 0x8c5c4004, 0x30620040, 0x14400002, 0x24020088,
6717         0x24020008, 0x3c010001, 0xa4220f68, 0x30620004, 0x10400005, 0x24020001,
6718         0x3c010001, 0xa0220f57, 0x080041d5, 0x00031402, 0x3c010001, 0xa0200f57,
6719         0x00031402, 0x3c010001, 0xa4220f54, 0x9483000c, 0x24020001, 0x3c010001,
6720         0xa4200f50, 0x3c010001, 0xa0220f56, 0x3c010001, 0xa4230f62, 0x24020001,
6721         0x1342001e, 0x00000000, 0x13400005, 0x24020003, 0x13420067, 0x00000000,
6722         0x080042cf, 0x00000000, 0x3c020001, 0x94420f62, 0x241a0001, 0x3c010001,
6723         0xa4200f5e, 0x3c010001, 0xa4200f52, 0x304407ff, 0x00021bc2, 0x00031823,
6724         0x3063003e, 0x34630036, 0x00021242, 0x3042003c, 0x00621821, 0x3c010001,
6725         0xa4240f58, 0x00832021, 0x24630030, 0x3c010001, 0xa4240f5a, 0x3c010001,
6726         0xa4230f5c, 0x3c060001, 0x24c60f52, 0x94c50000, 0x94c30002, 0x3c040001,
6727         0x94840f5a, 0x00651021, 0x0044102a, 0x10400013, 0x3c108000, 0x00a31021,
6728         0xa4c20000, 0x3c02a000, 0xaf620cf4, 0x3c010001, 0xa0200f56, 0x8f641008,
6729         0x00901024, 0x14400003, 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4,
6730         0x00501024, 0x104000b7, 0x00000000, 0x0800420f, 0x00000000, 0x3c030001,
6731         0x94630f50, 0x00851023, 0xa4c40000, 0x00621821, 0x3042ffff, 0x3c010001,
6732         0xa4230f50, 0xaf620ce8, 0x3c020001, 0x94420f68, 0x34420024, 0xaf620cec,
6733         0x94c30002, 0x3c020001, 0x94420f50, 0x14620012, 0x3c028000, 0x3c108000,
6734         0x3c02a000, 0xaf620cf4, 0x3c010001, 0xa0200f56, 0x8f641008, 0x00901024,
6735         0x14400003, 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4, 0x00501024,
6736         0x1440fff7, 0x00000000, 0x080042cf, 0x241a0003, 0xaf620cf4, 0x3c108000,
6737         0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064, 0x00000000,
6738         0x8f620cf4, 0x00501024, 0x1440fff7, 0x00000000, 0x080042cf, 0x241a0003,
6739         0x3c070001, 0x24e70f50, 0x94e20000, 0x03821021, 0xaf620ce0, 0x3c020001,
6740         0x8c420f64, 0xaf620ce4, 0x3c050001, 0x94a50f54, 0x94e30000, 0x3c040001,
6741         0x94840f58, 0x3c020001, 0x94420f5e, 0x00a32823, 0x00822023, 0x30a6ffff,
6742         0x3083ffff, 0x00c3102b, 0x14400043, 0x00000000, 0x3c020001, 0x94420f5c,
6743         0x00021400, 0x00621025, 0xaf620ce8, 0x94e20000, 0x3c030001, 0x94630f54,
6744         0x00441021, 0xa4e20000, 0x3042ffff, 0x14430021, 0x3c020008, 0x3c020001,
6745         0x90420f57, 0x10400006, 0x3c03000c, 0x3c020001, 0x94420f68, 0x34630624,
6746         0x0800427c, 0x0000d021, 0x3c020001, 0x94420f68, 0x3c030008, 0x34630624,
6747         0x00431025, 0xaf620cec, 0x3c108000, 0x3c02a000, 0xaf620cf4, 0x3c010001,
6748         0xa0200f56, 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064,
6749         0x00000000, 0x8f620cf4, 0x00501024, 0x10400015, 0x00000000, 0x08004283,
6750         0x00000000, 0x3c030001, 0x94630f68, 0x34420624, 0x3c108000, 0x00621825,
6751         0x3c028000, 0xaf630cec, 0xaf620cf4, 0x8f641008, 0x00901024, 0x14400003,
6752         0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4, 0x00501024, 0x1440fff7,
6753         0x00000000, 0x3c010001, 0x080042cf, 0xa4200f5e, 0x3c020001, 0x94420f5c,
6754         0x00021400, 0x00c21025, 0xaf620ce8, 0x3c020001, 0x90420f57, 0x10400009,
6755         0x3c03000c, 0x3c020001, 0x94420f68, 0x34630624, 0x0000d021, 0x00431025,
6756         0xaf620cec, 0x080042c1, 0x3c108000, 0x3c020001, 0x94420f68, 0x3c030008,
6757         0x34630604, 0x00431025, 0xaf620cec, 0x3c020001, 0x94420f5e, 0x00451021,
6758         0x3c010001, 0xa4220f5e, 0x3c108000, 0x3c02a000, 0xaf620cf4, 0x3c010001,
6759         0xa0200f56, 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064,
6760         0x00000000, 0x8f620cf4, 0x00501024, 0x1440fff7, 0x00000000, 0x8fbf0014,
6761         0x8fb00010, 0x03e00008, 0x27bd0018, 0x00000000, 0x27bdffe0, 0x3c040001,
6762         0x24840ec0, 0x00002821, 0x00003021, 0x00003821, 0xafbf0018, 0xafa00010,
6763         0x0c004378, 0xafa00014, 0x0000d021, 0x24020130, 0xaf625000, 0x3c010001,
6764         0xa4200f50, 0x3c010001, 0xa0200f57, 0x8fbf0018, 0x03e00008, 0x27bd0020,
6765         0x27bdffe8, 0x3c1bc000, 0xafbf0014, 0xafb00010, 0xaf60680c, 0x8f626804,
6766         0x34420082, 0xaf626804, 0x8f634000, 0x24020b50, 0x3c010001, 0xac220f20,
6767         0x24020b78, 0x3c010001, 0xac220f30, 0x34630002, 0xaf634000, 0x0c004315,
6768         0x00808021, 0x3c010001, 0xa0220f34, 0x304200ff, 0x24030002, 0x14430005,
6769         0x00000000, 0x3c020001, 0x8c420f20, 0x08004308, 0xac5000c0, 0x3c020001,
6770         0x8c420f20, 0xac5000bc, 0x8f624434, 0x8f634438, 0x8f644410, 0x3c010001,
6771         0xac220f28, 0x3c010001, 0xac230f38, 0x3c010001, 0xac240f24, 0x8fbf0014,
6772         0x8fb00010, 0x03e00008, 0x27bd0018, 0x03e00008, 0x24020001, 0x27bdfff8,
6773         0x18800009, 0x00002821, 0x8f63680c, 0x8f62680c, 0x1043fffe, 0x00000000,
6774         0x24a50001, 0x00a4102a, 0x1440fff9, 0x00000000, 0x03e00008, 0x27bd0008,
6775         0x8f634450, 0x3c020001, 0x8c420f28, 0x00031c02, 0x0043102b, 0x14400008,
6776         0x3c038000, 0x3c040001, 0x8c840f38, 0x8f624450, 0x00021c02, 0x0083102b,
6777         0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024, 0x1440fffd,
6778         0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3082ffff, 0x2442e000,
6779         0x2c422001, 0x14400003, 0x3c024000, 0x08004347, 0x2402ffff, 0x00822025,
6780         0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00001021, 0x03e00008,
6781         0x00000000, 0x8f624450, 0x3c030001, 0x8c630f24, 0x08004350, 0x3042ffff,
6782         0x8f624450, 0x3042ffff, 0x0043102b, 0x1440fffc, 0x00000000, 0x03e00008,
6783         0x00000000, 0x27bdffe0, 0x00802821, 0x3c040001, 0x24840ed0, 0x00003021,
6784         0x00003821, 0xafbf0018, 0xafa00010, 0x0c004378, 0xafa00014, 0x0800435f,
6785         0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x3c020001, 0x3442d600,
6786         0x3c030001, 0x3463d600, 0x3c040001, 0x3484ddff, 0x3c010001, 0xac220f40,
6787         0x24020040, 0x3c010001, 0xac220f44, 0x3c010001, 0xac200f3c, 0xac600000,
6788         0x24630004, 0x0083102b, 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000,
6789         0x00804821, 0x8faa0010, 0x3c020001, 0x8c420f3c, 0x3c040001, 0x8c840f44,
6790         0x8fab0014, 0x24430001, 0x0044102b, 0x3c010001, 0xac230f3c, 0x14400003,
6791         0x00004021, 0x3c010001, 0xac200f3c, 0x3c020001, 0x8c420f3c, 0x3c030001,
6792         0x8c630f40, 0x91240000, 0x00021140, 0x00431021, 0x00481021, 0x25080001,
6793         0xa0440000, 0x29020008, 0x1440fff4, 0x25290001, 0x3c020001, 0x8c420f3c,
6794         0x3c030001, 0x8c630f40, 0x8f64680c, 0x00021140, 0x00431021, 0xac440008,
6795         0xac45000c, 0xac460010, 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c,
6796         0x00000000, 0x00000000, 0x00000000,
6797 };
6798
6799 static const u32 tg3Tso5FwRodata[(TG3_TSO5_FW_RODATA_LEN / 4) + 1] = {
6800         0x4d61696e, 0x43707542, 0x00000000, 0x4d61696e, 0x43707541, 0x00000000,
6801         0x00000000, 0x00000000, 0x73746b6f, 0x66666c64, 0x00000000, 0x00000000,
6802         0x73746b6f, 0x66666c64, 0x00000000, 0x00000000, 0x66617461, 0x6c457272,
6803         0x00000000, 0x00000000, 0x00000000,
6804 };
6805
6806 static const u32 tg3Tso5FwData[(TG3_TSO5_FW_DATA_LEN / 4) + 1] = {
6807         0x00000000, 0x73746b6f, 0x66666c64, 0x5f76312e, 0x322e3000, 0x00000000,
6808         0x00000000, 0x00000000, 0x00000000,
6809 };
6810
6811 /* tp->lock is held. */
6812 static int tg3_load_tso_firmware(struct tg3 *tp)
6813 {
6814         struct fw_info info;
6815         unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
6816         int err, i;
6817
6818         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
6819                 return 0;
6820
6821         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
6822                 info.text_base = TG3_TSO5_FW_TEXT_ADDR;
6823                 info.text_len = TG3_TSO5_FW_TEXT_LEN;
6824                 info.text_data = &tg3Tso5FwText[0];
6825                 info.rodata_base = TG3_TSO5_FW_RODATA_ADDR;
6826                 info.rodata_len = TG3_TSO5_FW_RODATA_LEN;
6827                 info.rodata_data = &tg3Tso5FwRodata[0];
6828                 info.data_base = TG3_TSO5_FW_DATA_ADDR;
6829                 info.data_len = TG3_TSO5_FW_DATA_LEN;
6830                 info.data_data = &tg3Tso5FwData[0];
6831                 cpu_base = RX_CPU_BASE;
6832                 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
6833                 cpu_scratch_size = (info.text_len +
6834                                     info.rodata_len +
6835                                     info.data_len +
6836                                     TG3_TSO5_FW_SBSS_LEN +
6837                                     TG3_TSO5_FW_BSS_LEN);
6838         } else {
6839                 info.text_base = TG3_TSO_FW_TEXT_ADDR;
6840                 info.text_len = TG3_TSO_FW_TEXT_LEN;
6841                 info.text_data = &tg3TsoFwText[0];
6842                 info.rodata_base = TG3_TSO_FW_RODATA_ADDR;
6843                 info.rodata_len = TG3_TSO_FW_RODATA_LEN;
6844                 info.rodata_data = &tg3TsoFwRodata[0];
6845                 info.data_base = TG3_TSO_FW_DATA_ADDR;
6846                 info.data_len = TG3_TSO_FW_DATA_LEN;
6847                 info.data_data = &tg3TsoFwData[0];
6848                 cpu_base = TX_CPU_BASE;
6849                 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
6850                 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
6851         }
6852
6853         err = tg3_load_firmware_cpu(tp, cpu_base,
6854                                     cpu_scratch_base, cpu_scratch_size,
6855                                     &info);
6856         if (err)
6857                 return err;
6858
6859         /* Now startup the cpu. */
6860         tw32(cpu_base + CPU_STATE, 0xffffffff);
6861         tw32_f(cpu_base + CPU_PC,    info.text_base);
6862
6863         for (i = 0; i < 5; i++) {
6864                 if (tr32(cpu_base + CPU_PC) == info.text_base)
6865                         break;
6866                 tw32(cpu_base + CPU_STATE, 0xffffffff);
6867                 tw32(cpu_base + CPU_MODE,  CPU_MODE_HALT);
6868                 tw32_f(cpu_base + CPU_PC,    info.text_base);
6869                 udelay(1000);
6870         }
6871         if (i >= 5) {
6872                 printk(KERN_ERR PFX "tg3_load_tso_firmware fails for %s "
6873                        "to set CPU PC, is %08x should be %08x\n",
6874                        tp->dev->name, tr32(cpu_base + CPU_PC),
6875                        info.text_base);
6876                 return -ENODEV;
6877         }
6878         tw32(cpu_base + CPU_STATE, 0xffffffff);
6879         tw32_f(cpu_base + CPU_MODE,  0x00000000);
6880         return 0;
6881 }
6882
6883
6884 /* tp->lock is held. */
6885 static void __tg3_set_mac_addr(struct tg3 *tp, int skip_mac_1)
6886 {
6887         u32 addr_high, addr_low;
6888         int i;
6889
6890         addr_high = ((tp->dev->dev_addr[0] << 8) |
6891                      tp->dev->dev_addr[1]);
6892         addr_low = ((tp->dev->dev_addr[2] << 24) |
6893                     (tp->dev->dev_addr[3] << 16) |
6894                     (tp->dev->dev_addr[4] <<  8) |
6895                     (tp->dev->dev_addr[5] <<  0));
6896         for (i = 0; i < 4; i++) {
6897                 if (i == 1 && skip_mac_1)
6898                         continue;
6899                 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
6900                 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
6901         }
6902
6903         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
6904             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
6905                 for (i = 0; i < 12; i++) {
6906                         tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
6907                         tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
6908                 }
6909         }
6910
6911         addr_high = (tp->dev->dev_addr[0] +
6912                      tp->dev->dev_addr[1] +
6913                      tp->dev->dev_addr[2] +
6914                      tp->dev->dev_addr[3] +
6915                      tp->dev->dev_addr[4] +
6916                      tp->dev->dev_addr[5]) &
6917                 TX_BACKOFF_SEED_MASK;
6918         tw32(MAC_TX_BACKOFF_SEED, addr_high);
6919 }
6920
6921 static int tg3_set_mac_addr(struct net_device *dev, void *p)
6922 {
6923         struct tg3 *tp = netdev_priv(dev);
6924         struct sockaddr *addr = p;
6925         int err = 0, skip_mac_1 = 0;
6926
6927         if (!is_valid_ether_addr(addr->sa_data))
6928                 return -EINVAL;
6929
6930         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
6931
6932         if (!netif_running(dev))
6933                 return 0;
6934
6935         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
6936                 u32 addr0_high, addr0_low, addr1_high, addr1_low;
6937
6938                 addr0_high = tr32(MAC_ADDR_0_HIGH);
6939                 addr0_low = tr32(MAC_ADDR_0_LOW);
6940                 addr1_high = tr32(MAC_ADDR_1_HIGH);
6941                 addr1_low = tr32(MAC_ADDR_1_LOW);
6942
6943                 /* Skip MAC addr 1 if ASF is using it. */
6944                 if ((addr0_high != addr1_high || addr0_low != addr1_low) &&
6945                     !(addr1_high == 0 && addr1_low == 0))
6946                         skip_mac_1 = 1;
6947         }
6948         spin_lock_bh(&tp->lock);
6949         __tg3_set_mac_addr(tp, skip_mac_1);
6950         spin_unlock_bh(&tp->lock);
6951
6952         return err;
6953 }
6954
6955 /* tp->lock is held. */
6956 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
6957                            dma_addr_t mapping, u32 maxlen_flags,
6958                            u32 nic_addr)
6959 {
6960         tg3_write_mem(tp,
6961                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
6962                       ((u64) mapping >> 32));
6963         tg3_write_mem(tp,
6964                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
6965                       ((u64) mapping & 0xffffffff));
6966         tg3_write_mem(tp,
6967                       (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
6968                        maxlen_flags);
6969
6970         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
6971                 tg3_write_mem(tp,
6972                               (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
6973                               nic_addr);
6974 }
6975
6976 static void __tg3_set_rx_mode(struct net_device *);
6977 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
6978 {
6979         tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
6980         tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
6981         tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
6982         tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
6983         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6984                 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
6985                 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
6986         }
6987         tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
6988         tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
6989         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6990                 u32 val = ec->stats_block_coalesce_usecs;
6991
6992                 if (!netif_carrier_ok(tp->dev))
6993                         val = 0;
6994
6995                 tw32(HOSTCC_STAT_COAL_TICKS, val);
6996         }
6997 }
6998
6999 /* tp->lock is held. */
7000 static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
7001 {
7002         u32 val, rdmac_mode;
7003         int i, err, limit;
7004
7005         tg3_disable_ints(tp);
7006
7007         tg3_stop_fw(tp);
7008
7009         tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
7010
7011         if (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) {
7012                 tg3_abort_hw(tp, 1);
7013         }
7014
7015         if (reset_phy &&
7016             !(tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB))
7017                 tg3_phy_reset(tp);
7018
7019         err = tg3_chip_reset(tp);
7020         if (err)
7021                 return err;
7022
7023         tg3_write_sig_legacy(tp, RESET_KIND_INIT);
7024
7025         if (tp->pci_chip_rev_id == CHIPREV_ID_5784_A0 ||
7026             tp->pci_chip_rev_id == CHIPREV_ID_5784_A1) {
7027                 val = tr32(TG3_CPMU_CTRL);
7028                 val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE);
7029                 tw32(TG3_CPMU_CTRL, val);
7030
7031                 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
7032                 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
7033                 val |= CPMU_LSPD_10MB_MACCLK_6_25;
7034                 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
7035
7036                 val = tr32(TG3_CPMU_LNK_AWARE_PWRMD);
7037                 val &= ~CPMU_LNK_AWARE_MACCLK_MASK;
7038                 val |= CPMU_LNK_AWARE_MACCLK_6_25;
7039                 tw32(TG3_CPMU_LNK_AWARE_PWRMD, val);
7040
7041                 val = tr32(TG3_CPMU_HST_ACC);
7042                 val &= ~CPMU_HST_ACC_MACCLK_MASK;
7043                 val |= CPMU_HST_ACC_MACCLK_6_25;
7044                 tw32(TG3_CPMU_HST_ACC, val);
7045         }
7046
7047         /* This works around an issue with Athlon chipsets on
7048          * B3 tigon3 silicon.  This bit has no effect on any
7049          * other revision.  But do not set this on PCI Express
7050          * chips and don't even touch the clocks if the CPMU is present.
7051          */
7052         if (!(tp->tg3_flags & TG3_FLAG_CPMU_PRESENT)) {
7053                 if (!(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
7054                         tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
7055                 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
7056         }
7057
7058         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
7059             (tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
7060                 val = tr32(TG3PCI_PCISTATE);
7061                 val |= PCISTATE_RETRY_SAME_DMA;
7062                 tw32(TG3PCI_PCISTATE, val);
7063         }
7064
7065         if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
7066                 /* Allow reads and writes to the
7067                  * APE register and memory space.
7068                  */
7069                 val = tr32(TG3PCI_PCISTATE);
7070                 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
7071                        PCISTATE_ALLOW_APE_SHMEM_WR;
7072                 tw32(TG3PCI_PCISTATE, val);
7073         }
7074
7075         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_BX) {
7076                 /* Enable some hw fixes.  */
7077                 val = tr32(TG3PCI_MSI_DATA);
7078                 val |= (1 << 26) | (1 << 28) | (1 << 29);
7079                 tw32(TG3PCI_MSI_DATA, val);
7080         }
7081
7082         /* Descriptor ring init may make accesses to the
7083          * NIC SRAM area to setup the TX descriptors, so we
7084          * can only do this after the hardware has been
7085          * successfully reset.
7086          */
7087         err = tg3_init_rings(tp);
7088         if (err)
7089                 return err;
7090
7091         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 &&
7092             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761 &&
7093             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785) {
7094                 /* This value is determined during the probe time DMA
7095                  * engine test, tg3_test_dma.
7096                  */
7097                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
7098         }
7099
7100         tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
7101                           GRC_MODE_4X_NIC_SEND_RINGS |
7102                           GRC_MODE_NO_TX_PHDR_CSUM |
7103                           GRC_MODE_NO_RX_PHDR_CSUM);
7104         tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
7105
7106         /* Pseudo-header checksum is done by hardware logic and not
7107          * the offload processers, so make the chip do the pseudo-
7108          * header checksums on receive.  For transmit it is more
7109          * convenient to do the pseudo-header checksum in software
7110          * as Linux does that on transmit for us in all cases.
7111          */
7112         tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
7113
7114         tw32(GRC_MODE,
7115              tp->grc_mode |
7116              (GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP));
7117
7118         /* Setup the timer prescalar register.  Clock is always 66Mhz. */
7119         val = tr32(GRC_MISC_CFG);
7120         val &= ~0xff;
7121         val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
7122         tw32(GRC_MISC_CFG, val);
7123
7124         /* Initialize MBUF/DESC pool. */
7125         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
7126                 /* Do nothing.  */
7127         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
7128                 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
7129                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
7130                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
7131                 else
7132                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
7133                 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
7134                 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
7135         }
7136         else if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
7137                 int fw_len;
7138
7139                 fw_len = (TG3_TSO5_FW_TEXT_LEN +
7140                           TG3_TSO5_FW_RODATA_LEN +
7141                           TG3_TSO5_FW_DATA_LEN +
7142                           TG3_TSO5_FW_SBSS_LEN +
7143                           TG3_TSO5_FW_BSS_LEN);
7144                 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
7145                 tw32(BUFMGR_MB_POOL_ADDR,
7146                      NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
7147                 tw32(BUFMGR_MB_POOL_SIZE,
7148                      NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
7149         }
7150
7151         if (tp->dev->mtu <= ETH_DATA_LEN) {
7152                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
7153                      tp->bufmgr_config.mbuf_read_dma_low_water);
7154                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
7155                      tp->bufmgr_config.mbuf_mac_rx_low_water);
7156                 tw32(BUFMGR_MB_HIGH_WATER,
7157                      tp->bufmgr_config.mbuf_high_water);
7158         } else {
7159                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
7160                      tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
7161                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
7162                      tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
7163                 tw32(BUFMGR_MB_HIGH_WATER,
7164                      tp->bufmgr_config.mbuf_high_water_jumbo);
7165         }
7166         tw32(BUFMGR_DMA_LOW_WATER,
7167              tp->bufmgr_config.dma_low_water);
7168         tw32(BUFMGR_DMA_HIGH_WATER,
7169              tp->bufmgr_config.dma_high_water);
7170
7171         tw32(BUFMGR_MODE, BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE);
7172         for (i = 0; i < 2000; i++) {
7173                 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
7174                         break;
7175                 udelay(10);
7176         }
7177         if (i >= 2000) {
7178                 printk(KERN_ERR PFX "tg3_reset_hw cannot enable BUFMGR for %s.\n",
7179                        tp->dev->name);
7180                 return -ENODEV;
7181         }
7182
7183         /* Setup replenish threshold. */
7184         val = tp->rx_pending / 8;
7185         if (val == 0)
7186                 val = 1;
7187         else if (val > tp->rx_std_max_post)
7188                 val = tp->rx_std_max_post;
7189         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
7190                 if (tp->pci_chip_rev_id == CHIPREV_ID_5906_A1)
7191                         tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
7192
7193                 if (val > (TG3_RX_INTERNAL_RING_SZ_5906 / 2))
7194                         val = TG3_RX_INTERNAL_RING_SZ_5906 / 2;
7195         }
7196
7197         tw32(RCVBDI_STD_THRESH, val);
7198
7199         /* Initialize TG3_BDINFO's at:
7200          *  RCVDBDI_STD_BD:     standard eth size rx ring
7201          *  RCVDBDI_JUMBO_BD:   jumbo frame rx ring
7202          *  RCVDBDI_MINI_BD:    small frame rx ring (??? does not work)
7203          *
7204          * like so:
7205          *  TG3_BDINFO_HOST_ADDR:       high/low parts of DMA address of ring
7206          *  TG3_BDINFO_MAXLEN_FLAGS:    (rx max buffer size << 16) |
7207          *                              ring attribute flags
7208          *  TG3_BDINFO_NIC_ADDR:        location of descriptors in nic SRAM
7209          *
7210          * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
7211          * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
7212          *
7213          * The size of each ring is fixed in the firmware, but the location is
7214          * configurable.
7215          */
7216         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
7217              ((u64) tp->rx_std_mapping >> 32));
7218         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
7219              ((u64) tp->rx_std_mapping & 0xffffffff));
7220         tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
7221              NIC_SRAM_RX_BUFFER_DESC);
7222
7223         /* Don't even try to program the JUMBO/MINI buffer descriptor
7224          * configs on 5705.
7225          */
7226         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
7227                 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS,
7228                      RX_STD_MAX_SIZE_5705 << BDINFO_FLAGS_MAXLEN_SHIFT);
7229         } else {
7230                 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS,
7231                      RX_STD_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT);
7232
7233                 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
7234                      BDINFO_FLAGS_DISABLED);
7235
7236                 /* Setup replenish threshold. */
7237                 tw32(RCVBDI_JUMBO_THRESH, tp->rx_jumbo_pending / 8);
7238
7239                 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
7240                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
7241                              ((u64) tp->rx_jumbo_mapping >> 32));
7242                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
7243                              ((u64) tp->rx_jumbo_mapping & 0xffffffff));
7244                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
7245                              RX_JUMBO_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT);
7246                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
7247                              NIC_SRAM_RX_JUMBO_BUFFER_DESC);
7248                 } else {
7249                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
7250                              BDINFO_FLAGS_DISABLED);
7251                 }
7252
7253         }
7254
7255         /* There is only one send ring on 5705/5750, no need to explicitly
7256          * disable the others.
7257          */
7258         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
7259                 /* Clear out send RCB ring in SRAM. */
7260                 for (i = NIC_SRAM_SEND_RCB; i < NIC_SRAM_RCV_RET_RCB; i += TG3_BDINFO_SIZE)
7261                         tg3_write_mem(tp, i + TG3_BDINFO_MAXLEN_FLAGS,
7262                                       BDINFO_FLAGS_DISABLED);
7263         }
7264
7265         tp->tx_prod = 0;
7266         tp->tx_cons = 0;
7267         tw32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0);
7268         tw32_tx_mbox(MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0);
7269
7270         tg3_set_bdinfo(tp, NIC_SRAM_SEND_RCB,
7271                        tp->tx_desc_mapping,
7272                        (TG3_TX_RING_SIZE <<
7273                         BDINFO_FLAGS_MAXLEN_SHIFT),
7274                        NIC_SRAM_TX_BUFFER_DESC);
7275
7276         /* There is only one receive return ring on 5705/5750, no need
7277          * to explicitly disable the others.
7278          */
7279         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
7280                 for (i = NIC_SRAM_RCV_RET_RCB; i < NIC_SRAM_STATS_BLK;
7281                      i += TG3_BDINFO_SIZE) {
7282                         tg3_write_mem(tp, i + TG3_BDINFO_MAXLEN_FLAGS,
7283                                       BDINFO_FLAGS_DISABLED);
7284                 }
7285         }
7286
7287         tp->rx_rcb_ptr = 0;
7288         tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW, 0);
7289
7290         tg3_set_bdinfo(tp, NIC_SRAM_RCV_RET_RCB,
7291                        tp->rx_rcb_mapping,
7292                        (TG3_RX_RCB_RING_SIZE(tp) <<
7293                         BDINFO_FLAGS_MAXLEN_SHIFT),
7294                        0);
7295
7296         tp->rx_std_ptr = tp->rx_pending;
7297         tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
7298                      tp->rx_std_ptr);
7299
7300         tp->rx_jumbo_ptr = (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) ?
7301                                                 tp->rx_jumbo_pending : 0;
7302         tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
7303                      tp->rx_jumbo_ptr);
7304
7305         /* Initialize MAC address and backoff seed. */
7306         __tg3_set_mac_addr(tp, 0);
7307
7308         /* MTU + ethernet header + FCS + optional VLAN tag */
7309         tw32(MAC_RX_MTU_SIZE, tp->dev->mtu + ETH_HLEN + 8);
7310
7311         /* The slot time is changed by tg3_setup_phy if we
7312          * run at gigabit with half duplex.
7313          */
7314         tw32(MAC_TX_LENGTHS,
7315              (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
7316              (6 << TX_LENGTHS_IPG_SHIFT) |
7317              (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
7318
7319         /* Receive rules. */
7320         tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
7321         tw32(RCVLPC_CONFIG, 0x0181);
7322
7323         /* Calculate RDMAC_MODE setting early, we need it to determine
7324          * the RCVLPC_STATE_ENABLE mask.
7325          */
7326         rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
7327                       RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
7328                       RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
7329                       RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
7330                       RDMAC_MODE_LNGREAD_ENAB);
7331
7332         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
7333             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
7334                 rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
7335                               RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
7336                               RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
7337
7338         /* If statement applies to 5705 and 5750 PCI devices only */
7339         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
7340              tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
7341             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)) {
7342                 if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE &&
7343                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
7344                         rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
7345                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
7346                            !(tp->tg3_flags2 & TG3_FLG2_IS_5788)) {
7347                         rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
7348                 }
7349         }
7350
7351         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)
7352                 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
7353
7354         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
7355                 rdmac_mode |= (1 << 27);
7356
7357         /* Receive/send statistics. */
7358         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
7359                 val = tr32(RCVLPC_STATS_ENABLE);
7360                 val &= ~RCVLPC_STATSENAB_DACK_FIX;
7361                 tw32(RCVLPC_STATS_ENABLE, val);
7362         } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
7363                    (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
7364                 val = tr32(RCVLPC_STATS_ENABLE);
7365                 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
7366                 tw32(RCVLPC_STATS_ENABLE, val);
7367         } else {
7368                 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
7369         }
7370         tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
7371         tw32(SNDDATAI_STATSENAB, 0xffffff);
7372         tw32(SNDDATAI_STATSCTRL,
7373              (SNDDATAI_SCTRL_ENABLE |
7374               SNDDATAI_SCTRL_FASTUPD));
7375
7376         /* Setup host coalescing engine. */
7377         tw32(HOSTCC_MODE, 0);
7378         for (i = 0; i < 2000; i++) {
7379                 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
7380                         break;
7381                 udelay(10);
7382         }
7383
7384         __tg3_set_coalesce(tp, &tp->coal);
7385
7386         /* set status block DMA address */
7387         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
7388              ((u64) tp->status_mapping >> 32));
7389         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
7390              ((u64) tp->status_mapping & 0xffffffff));
7391
7392         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
7393                 /* Status/statistics block address.  See tg3_timer,
7394                  * the tg3_periodic_fetch_stats call there, and
7395                  * tg3_get_stats to see how this works for 5705/5750 chips.
7396                  */
7397                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
7398                      ((u64) tp->stats_mapping >> 32));
7399                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
7400                      ((u64) tp->stats_mapping & 0xffffffff));
7401                 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
7402                 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
7403         }
7404
7405         tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
7406
7407         tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
7408         tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
7409         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
7410                 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
7411
7412         /* Clear statistics/status block in chip, and status block in ram. */
7413         for (i = NIC_SRAM_STATS_BLK;
7414              i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
7415              i += sizeof(u32)) {
7416                 tg3_write_mem(tp, i, 0);
7417                 udelay(40);
7418         }
7419         memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
7420
7421         if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
7422                 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
7423                 /* reset to prevent losing 1st rx packet intermittently */
7424                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
7425                 udelay(10);
7426         }
7427
7428         if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
7429                 tp->mac_mode &= MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
7430         else
7431                 tp->mac_mode = 0;
7432         tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
7433                 MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE | MAC_MODE_FHDE_ENABLE;
7434         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
7435             !(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
7436             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
7437                 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
7438         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
7439         udelay(40);
7440
7441         /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
7442          * If TG3_FLG2_IS_NIC is zero, we should read the
7443          * register to preserve the GPIO settings for LOMs. The GPIOs,
7444          * whether used as inputs or outputs, are set by boot code after
7445          * reset.
7446          */
7447         if (!(tp->tg3_flags2 & TG3_FLG2_IS_NIC)) {
7448                 u32 gpio_mask;
7449
7450                 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
7451                             GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
7452                             GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
7453
7454                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
7455                         gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
7456                                      GRC_LCLCTRL_GPIO_OUTPUT3;
7457
7458                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
7459                         gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
7460
7461                 tp->grc_local_ctrl &= ~gpio_mask;
7462                 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
7463
7464                 /* GPIO1 must be driven high for eeprom write protect */
7465                 if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT)
7466                         tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
7467                                                GRC_LCLCTRL_GPIO_OUTPUT1);
7468         }
7469         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
7470         udelay(100);
7471
7472         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0);
7473         tp->last_tag = 0;
7474
7475         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
7476                 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
7477                 udelay(40);
7478         }
7479
7480         val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
7481                WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
7482                WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
7483                WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
7484                WDMAC_MODE_LNGREAD_ENAB);
7485
7486         /* If statement applies to 5705 and 5750 PCI devices only */
7487         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
7488              tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
7489             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
7490                 if ((tp->tg3_flags & TG3_FLG2_TSO_CAPABLE) &&
7491                     (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
7492                      tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
7493                         /* nothing */
7494                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
7495                            !(tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
7496                            !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
7497                         val |= WDMAC_MODE_RX_ACCEL;
7498                 }
7499         }
7500
7501         /* Enable host coalescing bug fix */
7502         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755) ||
7503             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787) ||
7504             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784) ||
7505             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) ||
7506             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785))
7507                 val |= WDMAC_MODE_STATUS_TAG_FIX;
7508
7509         tw32_f(WDMAC_MODE, val);
7510         udelay(40);
7511
7512         if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) {
7513                 u16 pcix_cmd;
7514
7515                 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
7516                                      &pcix_cmd);
7517                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) {
7518                         pcix_cmd &= ~PCI_X_CMD_MAX_READ;
7519                         pcix_cmd |= PCI_X_CMD_READ_2K;
7520                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
7521                         pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ);
7522                         pcix_cmd |= PCI_X_CMD_READ_2K;
7523                 }
7524                 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
7525                                       pcix_cmd);
7526         }
7527
7528         tw32_f(RDMAC_MODE, rdmac_mode);
7529         udelay(40);
7530
7531         tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
7532         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
7533                 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
7534
7535         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
7536                 tw32(SNDDATAC_MODE,
7537                      SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY);
7538         else
7539                 tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
7540
7541         tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
7542         tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
7543         tw32(RCVDBDI_MODE, RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ);
7544         tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
7545         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
7546                 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
7547         tw32(SNDBDI_MODE, SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE);
7548         tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
7549
7550         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
7551                 err = tg3_load_5701_a0_firmware_fix(tp);
7552                 if (err)
7553                         return err;
7554         }
7555
7556         if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
7557                 err = tg3_load_tso_firmware(tp);
7558                 if (err)
7559                         return err;
7560         }
7561
7562         tp->tx_mode = TX_MODE_ENABLE;
7563         tw32_f(MAC_TX_MODE, tp->tx_mode);
7564         udelay(100);
7565
7566         tp->rx_mode = RX_MODE_ENABLE;
7567         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
7568             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
7569             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
7570             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
7571                 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
7572
7573         tw32_f(MAC_RX_MODE, tp->rx_mode);
7574         udelay(10);
7575
7576         tw32(MAC_LED_CTRL, tp->led_ctrl);
7577
7578         tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
7579         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
7580                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
7581                 udelay(10);
7582         }
7583         tw32_f(MAC_RX_MODE, tp->rx_mode);
7584         udelay(10);
7585
7586         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
7587                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) &&
7588                         !(tp->tg3_flags2 & TG3_FLG2_SERDES_PREEMPHASIS)) {
7589                         /* Set drive transmission level to 1.2V  */
7590                         /* only if the signal pre-emphasis bit is not set  */
7591                         val = tr32(MAC_SERDES_CFG);
7592                         val &= 0xfffff000;
7593                         val |= 0x880;
7594                         tw32(MAC_SERDES_CFG, val);
7595                 }
7596                 if (tp->pci_chip_rev_id == CHIPREV_ID_5703_A1)
7597                         tw32(MAC_SERDES_CFG, 0x616000);
7598         }
7599
7600         /* Prevent chip from dropping frames when flow control
7601          * is enabled.
7602          */
7603         tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, 2);
7604
7605         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
7606             (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
7607                 /* Use hardware link auto-negotiation */
7608                 tp->tg3_flags2 |= TG3_FLG2_HW_AUTONEG;
7609         }
7610
7611         if ((tp->tg3_flags2 & TG3_FLG2_MII_SERDES) &&
7612             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)) {
7613                 u32 tmp;
7614
7615                 tmp = tr32(SERDES_RX_CTRL);
7616                 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
7617                 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
7618                 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
7619                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
7620         }
7621
7622         if (!(tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)) {
7623                 if (tp->link_config.phy_is_low_power) {
7624                         tp->link_config.phy_is_low_power = 0;
7625                         tp->link_config.speed = tp->link_config.orig_speed;
7626                         tp->link_config.duplex = tp->link_config.orig_duplex;
7627                         tp->link_config.autoneg = tp->link_config.orig_autoneg;
7628                 }
7629
7630                 err = tg3_setup_phy(tp, 0);
7631                 if (err)
7632                         return err;
7633
7634                 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
7635                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906) {
7636                         u32 tmp;
7637
7638                         /* Clear CRC stats. */
7639                         if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
7640                                 tg3_writephy(tp, MII_TG3_TEST1,
7641                                              tmp | MII_TG3_TEST1_CRC_EN);
7642                                 tg3_readphy(tp, 0x14, &tmp);
7643                         }
7644                 }
7645         }
7646
7647         __tg3_set_rx_mode(tp->dev);
7648
7649         /* Initialize receive rules. */
7650         tw32(MAC_RCV_RULE_0,  0xc2000000 & RCV_RULE_DISABLE_MASK);
7651         tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
7652         tw32(MAC_RCV_RULE_1,  0x86000004 & RCV_RULE_DISABLE_MASK);
7653         tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
7654
7655         if ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
7656             !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
7657                 limit = 8;
7658         else
7659                 limit = 16;
7660         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF)
7661                 limit -= 4;
7662         switch (limit) {
7663         case 16:
7664                 tw32(MAC_RCV_RULE_15,  0); tw32(MAC_RCV_VALUE_15,  0);
7665         case 15:
7666                 tw32(MAC_RCV_RULE_14,  0); tw32(MAC_RCV_VALUE_14,  0);
7667         case 14:
7668                 tw32(MAC_RCV_RULE_13,  0); tw32(MAC_RCV_VALUE_13,  0);
7669         case 13:
7670                 tw32(MAC_RCV_RULE_12,  0); tw32(MAC_RCV_VALUE_12,  0);
7671         case 12:
7672                 tw32(MAC_RCV_RULE_11,  0); tw32(MAC_RCV_VALUE_11,  0);
7673         case 11:
7674                 tw32(MAC_RCV_RULE_10,  0); tw32(MAC_RCV_VALUE_10,  0);
7675         case 10:
7676                 tw32(MAC_RCV_RULE_9,  0); tw32(MAC_RCV_VALUE_9,  0);
7677         case 9:
7678                 tw32(MAC_RCV_RULE_8,  0); tw32(MAC_RCV_VALUE_8,  0);
7679         case 8:
7680                 tw32(MAC_RCV_RULE_7,  0); tw32(MAC_RCV_VALUE_7,  0);
7681         case 7:
7682                 tw32(MAC_RCV_RULE_6,  0); tw32(MAC_RCV_VALUE_6,  0);
7683         case 6:
7684                 tw32(MAC_RCV_RULE_5,  0); tw32(MAC_RCV_VALUE_5,  0);
7685         case 5:
7686                 tw32(MAC_RCV_RULE_4,  0); tw32(MAC_RCV_VALUE_4,  0);
7687         case 4:
7688                 /* tw32(MAC_RCV_RULE_3,  0); tw32(MAC_RCV_VALUE_3,  0); */
7689         case 3:
7690                 /* tw32(MAC_RCV_RULE_2,  0); tw32(MAC_RCV_VALUE_2,  0); */
7691         case 2:
7692         case 1:
7693
7694         default:
7695                 break;
7696         }
7697
7698         if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
7699                 /* Write our heartbeat update interval to APE. */
7700                 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS,
7701                                 APE_HOST_HEARTBEAT_INT_DISABLE);
7702
7703         tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
7704
7705         return 0;
7706 }
7707
7708 /* Called at device open time to get the chip ready for
7709  * packet processing.  Invoked with tp->lock held.
7710  */
7711 static int tg3_init_hw(struct tg3 *tp, int reset_phy)
7712 {
7713         tg3_switch_clocks(tp);
7714
7715         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
7716
7717         return tg3_reset_hw(tp, reset_phy);
7718 }
7719
7720 #define TG3_STAT_ADD32(PSTAT, REG) \
7721 do {    u32 __val = tr32(REG); \
7722         (PSTAT)->low += __val; \
7723         if ((PSTAT)->low < __val) \
7724                 (PSTAT)->high += 1; \
7725 } while (0)
7726
7727 static void tg3_periodic_fetch_stats(struct tg3 *tp)
7728 {
7729         struct tg3_hw_stats *sp = tp->hw_stats;
7730
7731         if (!netif_carrier_ok(tp->dev))
7732                 return;
7733
7734         TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
7735         TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
7736         TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
7737         TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
7738         TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
7739         TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
7740         TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
7741         TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
7742         TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
7743         TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
7744         TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
7745         TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
7746         TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
7747
7748         TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
7749         TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
7750         TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
7751         TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
7752         TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
7753         TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
7754         TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
7755         TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
7756         TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
7757         TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
7758         TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
7759         TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
7760         TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
7761         TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
7762
7763         TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
7764         TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
7765         TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
7766 }
7767
7768 static void tg3_timer(unsigned long __opaque)
7769 {
7770         struct tg3 *tp = (struct tg3 *) __opaque;
7771
7772         if (tp->irq_sync)
7773                 goto restart_timer;
7774
7775         spin_lock(&tp->lock);
7776
7777         if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) {
7778                 /* All of this garbage is because when using non-tagged
7779                  * IRQ status the mailbox/status_block protocol the chip
7780                  * uses with the cpu is race prone.
7781                  */
7782                 if (tp->hw_status->status & SD_STATUS_UPDATED) {
7783                         tw32(GRC_LOCAL_CTRL,
7784                              tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
7785                 } else {
7786                         tw32(HOSTCC_MODE, tp->coalesce_mode |
7787                              (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
7788                 }
7789
7790                 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
7791                         tp->tg3_flags2 |= TG3_FLG2_RESTART_TIMER;
7792                         spin_unlock(&tp->lock);
7793                         schedule_work(&tp->reset_task);
7794                         return;
7795                 }
7796         }
7797
7798         /* This part only runs once per second. */
7799         if (!--tp->timer_counter) {
7800                 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
7801                         tg3_periodic_fetch_stats(tp);
7802
7803                 if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
7804                         u32 mac_stat;
7805                         int phy_event;
7806
7807                         mac_stat = tr32(MAC_STATUS);
7808
7809                         phy_event = 0;
7810                         if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) {
7811                                 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
7812                                         phy_event = 1;
7813                         } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
7814                                 phy_event = 1;
7815
7816                         if (phy_event)
7817                                 tg3_setup_phy(tp, 0);
7818                 } else if (tp->tg3_flags & TG3_FLAG_POLL_SERDES) {
7819                         u32 mac_stat = tr32(MAC_STATUS);
7820                         int need_setup = 0;
7821
7822                         if (netif_carrier_ok(tp->dev) &&
7823                             (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
7824                                 need_setup = 1;
7825                         }
7826                         if (! netif_carrier_ok(tp->dev) &&
7827                             (mac_stat & (MAC_STATUS_PCS_SYNCED |
7828                                          MAC_STATUS_SIGNAL_DET))) {
7829                                 need_setup = 1;
7830                         }
7831                         if (need_setup) {
7832                                 if (!tp->serdes_counter) {
7833                                         tw32_f(MAC_MODE,
7834                                              (tp->mac_mode &
7835                                               ~MAC_MODE_PORT_MODE_MASK));
7836                                         udelay(40);
7837                                         tw32_f(MAC_MODE, tp->mac_mode);
7838                                         udelay(40);
7839                                 }
7840                                 tg3_setup_phy(tp, 0);
7841                         }
7842                 } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
7843                         tg3_serdes_parallel_detect(tp);
7844
7845                 tp->timer_counter = tp->timer_multiplier;
7846         }
7847
7848         /* Heartbeat is only sent once every 2 seconds.
7849          *
7850          * The heartbeat is to tell the ASF firmware that the host
7851          * driver is still alive.  In the event that the OS crashes,
7852          * ASF needs to reset the hardware to free up the FIFO space
7853          * that may be filled with rx packets destined for the host.
7854          * If the FIFO is full, ASF will no longer function properly.
7855          *
7856          * Unintended resets have been reported on real time kernels
7857          * where the timer doesn't run on time.  Netpoll will also have
7858          * same problem.
7859          *
7860          * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
7861          * to check the ring condition when the heartbeat is expiring
7862          * before doing the reset.  This will prevent most unintended
7863          * resets.
7864          */
7865         if (!--tp->asf_counter) {
7866                 if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) &&
7867                     !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)) {
7868                         tg3_wait_for_event_ack(tp);
7869
7870                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
7871                                       FWCMD_NICDRV_ALIVE3);
7872                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
7873                         /* 5 seconds timeout */
7874                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, 5);
7875
7876                         tg3_generate_fw_event(tp);
7877                 }
7878                 tp->asf_counter = tp->asf_multiplier;
7879         }
7880
7881         spin_unlock(&tp->lock);
7882
7883 restart_timer:
7884         tp->timer.expires = jiffies + tp->timer_offset;
7885         add_timer(&tp->timer);
7886 }
7887
7888 static int tg3_request_irq(struct tg3 *tp)
7889 {
7890         irq_handler_t fn;
7891         unsigned long flags;
7892         struct net_device *dev = tp->dev;
7893
7894         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7895                 fn = tg3_msi;
7896                 if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI)
7897                         fn = tg3_msi_1shot;
7898                 flags = IRQF_SAMPLE_RANDOM;
7899         } else {
7900                 fn = tg3_interrupt;
7901                 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
7902                         fn = tg3_interrupt_tagged;
7903                 flags = IRQF_SHARED | IRQF_SAMPLE_RANDOM;
7904         }
7905         return (request_irq(tp->pdev->irq, fn, flags, dev->name, dev));
7906 }
7907
7908 static int tg3_test_interrupt(struct tg3 *tp)
7909 {
7910         struct net_device *dev = tp->dev;
7911         int err, i, intr_ok = 0;
7912
7913         if (!netif_running(dev))
7914                 return -ENODEV;
7915
7916         tg3_disable_ints(tp);
7917
7918         free_irq(tp->pdev->irq, dev);
7919
7920         err = request_irq(tp->pdev->irq, tg3_test_isr,
7921                           IRQF_SHARED | IRQF_SAMPLE_RANDOM, dev->name, dev);
7922         if (err)
7923                 return err;
7924
7925         tp->hw_status->status &= ~SD_STATUS_UPDATED;
7926         tg3_enable_ints(tp);
7927
7928         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
7929                HOSTCC_MODE_NOW);
7930
7931         for (i = 0; i < 5; i++) {
7932                 u32 int_mbox, misc_host_ctrl;
7933
7934                 int_mbox = tr32_mailbox(MAILBOX_INTERRUPT_0 +
7935                                         TG3_64BIT_REG_LOW);
7936                 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
7937
7938                 if ((int_mbox != 0) ||
7939                     (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) {
7940                         intr_ok = 1;
7941                         break;
7942                 }
7943
7944                 msleep(10);
7945         }
7946
7947         tg3_disable_ints(tp);
7948
7949         free_irq(tp->pdev->irq, dev);
7950
7951         err = tg3_request_irq(tp);
7952
7953         if (err)
7954                 return err;
7955
7956         if (intr_ok)
7957                 return 0;
7958
7959         return -EIO;
7960 }
7961
7962 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
7963  * successfully restored
7964  */
7965 static int tg3_test_msi(struct tg3 *tp)
7966 {
7967         struct net_device *dev = tp->dev;
7968         int err;
7969         u16 pci_cmd;
7970
7971         if (!(tp->tg3_flags2 & TG3_FLG2_USING_MSI))
7972                 return 0;
7973
7974         /* Turn off SERR reporting in case MSI terminates with Master
7975          * Abort.
7976          */
7977         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
7978         pci_write_config_word(tp->pdev, PCI_COMMAND,
7979                               pci_cmd & ~PCI_COMMAND_SERR);
7980
7981         err = tg3_test_interrupt(tp);
7982
7983         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
7984
7985         if (!err)
7986                 return 0;
7987
7988         /* other failures */
7989         if (err != -EIO)
7990                 return err;
7991
7992         /* MSI test failed, go back to INTx mode */
7993         printk(KERN_WARNING PFX "%s: No interrupt was generated using MSI, "
7994                "switching to INTx mode. Please report this failure to "
7995                "the PCI maintainer and include system chipset information.\n",
7996                        tp->dev->name);
7997
7998         free_irq(tp->pdev->irq, dev);
7999         pci_disable_msi(tp->pdev);
8000
8001         tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
8002
8003         err = tg3_request_irq(tp);
8004         if (err)
8005                 return err;
8006
8007         /* Need to reset the chip because the MSI cycle may have terminated
8008          * with Master Abort.
8009          */
8010         tg3_full_lock(tp, 1);
8011
8012         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
8013         err = tg3_init_hw(tp, 1);
8014
8015         tg3_full_unlock(tp);
8016
8017         if (err)
8018                 free_irq(tp->pdev->irq, dev);
8019
8020         return err;
8021 }
8022
8023 static int tg3_open(struct net_device *dev)
8024 {
8025         struct tg3 *tp = netdev_priv(dev);
8026         int err;
8027
8028         netif_carrier_off(tp->dev);
8029
8030         err = tg3_set_power_state(tp, PCI_D0);
8031         if (err)
8032                 return err;
8033
8034         tg3_full_lock(tp, 0);
8035
8036         tg3_disable_ints(tp);
8037         tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
8038
8039         tg3_full_unlock(tp);
8040
8041         /* The placement of this call is tied
8042          * to the setup and use of Host TX descriptors.
8043          */
8044         err = tg3_alloc_consistent(tp);
8045         if (err)
8046                 return err;
8047
8048         if (tp->tg3_flags & TG3_FLAG_SUPPORT_MSI) {
8049                 /* All MSI supporting chips should support tagged
8050                  * status.  Assert that this is the case.
8051                  */
8052                 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) {
8053                         printk(KERN_WARNING PFX "%s: MSI without TAGGED? "
8054                                "Not using MSI.\n", tp->dev->name);
8055                 } else if (pci_enable_msi(tp->pdev) == 0) {
8056                         u32 msi_mode;
8057
8058                         msi_mode = tr32(MSGINT_MODE);
8059                         tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
8060                         tp->tg3_flags2 |= TG3_FLG2_USING_MSI;
8061                 }
8062         }
8063         err = tg3_request_irq(tp);
8064
8065         if (err) {
8066                 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
8067                         pci_disable_msi(tp->pdev);
8068                         tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
8069                 }
8070                 tg3_free_consistent(tp);
8071                 return err;
8072         }
8073
8074         napi_enable(&tp->napi);
8075
8076         tg3_full_lock(tp, 0);
8077
8078         err = tg3_init_hw(tp, 1);
8079         if (err) {
8080                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
8081                 tg3_free_rings(tp);
8082         } else {
8083                 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
8084                         tp->timer_offset = HZ;
8085                 else
8086                         tp->timer_offset = HZ / 10;
8087
8088                 BUG_ON(tp->timer_offset > HZ);
8089                 tp->timer_counter = tp->timer_multiplier =
8090                         (HZ / tp->timer_offset);
8091                 tp->asf_counter = tp->asf_multiplier =
8092                         ((HZ / tp->timer_offset) * 2);
8093
8094                 init_timer(&tp->timer);
8095                 tp->timer.expires = jiffies + tp->timer_offset;
8096                 tp->timer.data = (unsigned long) tp;
8097                 tp->timer.function = tg3_timer;
8098         }
8099
8100         tg3_full_unlock(tp);
8101
8102         if (err) {
8103                 napi_disable(&tp->napi);
8104                 free_irq(tp->pdev->irq, dev);
8105                 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
8106                         pci_disable_msi(tp->pdev);
8107                         tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
8108                 }
8109                 tg3_free_consistent(tp);
8110                 return err;
8111         }
8112
8113         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
8114                 err = tg3_test_msi(tp);
8115
8116                 if (err) {
8117                         tg3_full_lock(tp, 0);
8118
8119                         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
8120                                 pci_disable_msi(tp->pdev);
8121                                 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
8122                         }
8123                         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
8124                         tg3_free_rings(tp);
8125                         tg3_free_consistent(tp);
8126
8127                         tg3_full_unlock(tp);
8128
8129                         napi_disable(&tp->napi);
8130
8131                         return err;
8132                 }
8133
8134                 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
8135                         if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI) {
8136                                 u32 val = tr32(PCIE_TRANSACTION_CFG);
8137
8138                                 tw32(PCIE_TRANSACTION_CFG,
8139                                      val | PCIE_TRANS_CFG_1SHOT_MSI);
8140                         }
8141                 }
8142         }
8143
8144         tg3_phy_start(tp);
8145
8146         tg3_full_lock(tp, 0);
8147
8148         add_timer(&tp->timer);
8149         tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
8150         tg3_enable_ints(tp);
8151
8152         tg3_full_unlock(tp);
8153
8154         netif_start_queue(dev);
8155
8156         return 0;
8157 }
8158
8159 #if 0
8160 /*static*/ void tg3_dump_state(struct tg3 *tp)
8161 {
8162         u32 val32, val32_2, val32_3, val32_4, val32_5;
8163         u16 val16;
8164         int i;
8165
8166         pci_read_config_word(tp->pdev, PCI_STATUS, &val16);
8167         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE, &val32);
8168         printk("DEBUG: PCI status [%04x] TG3PCI state[%08x]\n",
8169                val16, val32);
8170
8171         /* MAC block */
8172         printk("DEBUG: MAC_MODE[%08x] MAC_STATUS[%08x]\n",
8173                tr32(MAC_MODE), tr32(MAC_STATUS));
8174         printk("       MAC_EVENT[%08x] MAC_LED_CTRL[%08x]\n",
8175                tr32(MAC_EVENT), tr32(MAC_LED_CTRL));
8176         printk("DEBUG: MAC_TX_MODE[%08x] MAC_TX_STATUS[%08x]\n",
8177                tr32(MAC_TX_MODE), tr32(MAC_TX_STATUS));
8178         printk("       MAC_RX_MODE[%08x] MAC_RX_STATUS[%08x]\n",
8179                tr32(MAC_RX_MODE), tr32(MAC_RX_STATUS));
8180
8181         /* Send data initiator control block */
8182         printk("DEBUG: SNDDATAI_MODE[%08x] SNDDATAI_STATUS[%08x]\n",
8183                tr32(SNDDATAI_MODE), tr32(SNDDATAI_STATUS));
8184         printk("       SNDDATAI_STATSCTRL[%08x]\n",
8185                tr32(SNDDATAI_STATSCTRL));
8186
8187         /* Send data completion control block */
8188         printk("DEBUG: SNDDATAC_MODE[%08x]\n", tr32(SNDDATAC_MODE));
8189
8190         /* Send BD ring selector block */
8191         printk("DEBUG: SNDBDS_MODE[%08x] SNDBDS_STATUS[%08x]\n",
8192                tr32(SNDBDS_MODE), tr32(SNDBDS_STATUS));
8193
8194         /* Send BD initiator control block */
8195         printk("DEBUG: SNDBDI_MODE[%08x] SNDBDI_STATUS[%08x]\n",
8196                tr32(SNDBDI_MODE), tr32(SNDBDI_STATUS));
8197
8198         /* Send BD completion control block */
8199         printk("DEBUG: SNDBDC_MODE[%08x]\n", tr32(SNDBDC_MODE));
8200
8201         /* Receive list placement control block */
8202         printk("DEBUG: RCVLPC_MODE[%08x] RCVLPC_STATUS[%08x]\n",
8203                tr32(RCVLPC_MODE), tr32(RCVLPC_STATUS));
8204         printk("       RCVLPC_STATSCTRL[%08x]\n",
8205                tr32(RCVLPC_STATSCTRL));
8206
8207         /* Receive data and receive BD initiator control block */
8208         printk("DEBUG: RCVDBDI_MODE[%08x] RCVDBDI_STATUS[%08x]\n",
8209                tr32(RCVDBDI_MODE), tr32(RCVDBDI_STATUS));
8210
8211         /* Receive data completion control block */
8212         printk("DEBUG: RCVDCC_MODE[%08x]\n",
8213                tr32(RCVDCC_MODE));
8214
8215         /* Receive BD initiator control block */
8216         printk("DEBUG: RCVBDI_MODE[%08x] RCVBDI_STATUS[%08x]\n",
8217                tr32(RCVBDI_MODE), tr32(RCVBDI_STATUS));
8218
8219         /* Receive BD completion control block */
8220         printk("DEBUG: RCVCC_MODE[%08x] RCVCC_STATUS[%08x]\n",
8221                tr32(RCVCC_MODE), tr32(RCVCC_STATUS));
8222
8223         /* Receive list selector control block */
8224         printk("DEBUG: RCVLSC_MODE[%08x] RCVLSC_STATUS[%08x]\n",
8225                tr32(RCVLSC_MODE), tr32(RCVLSC_STATUS));
8226
8227         /* Mbuf cluster free block */
8228         printk("DEBUG: MBFREE_MODE[%08x] MBFREE_STATUS[%08x]\n",
8229                tr32(MBFREE_MODE), tr32(MBFREE_STATUS));
8230
8231         /* Host coalescing control block */
8232         printk("DEBUG: HOSTCC_MODE[%08x] HOSTCC_STATUS[%08x]\n",
8233                tr32(HOSTCC_MODE), tr32(HOSTCC_STATUS));
8234         printk("DEBUG: HOSTCC_STATS_BLK_HOST_ADDR[%08x%08x]\n",
8235                tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
8236                tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
8237         printk("DEBUG: HOSTCC_STATUS_BLK_HOST_ADDR[%08x%08x]\n",
8238                tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
8239                tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
8240         printk("DEBUG: HOSTCC_STATS_BLK_NIC_ADDR[%08x]\n",
8241                tr32(HOSTCC_STATS_BLK_NIC_ADDR));
8242         printk("DEBUG: HOSTCC_STATUS_BLK_NIC_ADDR[%08x]\n",
8243                tr32(HOSTCC_STATUS_BLK_NIC_ADDR));
8244
8245         /* Memory arbiter control block */
8246         printk("DEBUG: MEMARB_MODE[%08x] MEMARB_STATUS[%08x]\n",
8247                tr32(MEMARB_MODE), tr32(MEMARB_STATUS));
8248
8249         /* Buffer manager control block */
8250         printk("DEBUG: BUFMGR_MODE[%08x] BUFMGR_STATUS[%08x]\n",
8251                tr32(BUFMGR_MODE), tr32(BUFMGR_STATUS));
8252         printk("DEBUG: BUFMGR_MB_POOL_ADDR[%08x] BUFMGR_MB_POOL_SIZE[%08x]\n",
8253                tr32(BUFMGR_MB_POOL_ADDR), tr32(BUFMGR_MB_POOL_SIZE));
8254         printk("DEBUG: BUFMGR_DMA_DESC_POOL_ADDR[%08x] "
8255                "BUFMGR_DMA_DESC_POOL_SIZE[%08x]\n",
8256                tr32(BUFMGR_DMA_DESC_POOL_ADDR),
8257                tr32(BUFMGR_DMA_DESC_POOL_SIZE));
8258
8259         /* Read DMA control block */
8260         printk("DEBUG: RDMAC_MODE[%08x] RDMAC_STATUS[%08x]\n",
8261                tr32(RDMAC_MODE), tr32(RDMAC_STATUS));
8262
8263         /* Write DMA control block */
8264         printk("DEBUG: WDMAC_MODE[%08x] WDMAC_STATUS[%08x]\n",
8265                tr32(WDMAC_MODE), tr32(WDMAC_STATUS));
8266
8267         /* DMA completion block */
8268         printk("DEBUG: DMAC_MODE[%08x]\n",
8269                tr32(DMAC_MODE));
8270
8271         /* GRC block */
8272         printk("DEBUG: GRC_MODE[%08x] GRC_MISC_CFG[%08x]\n",
8273                tr32(GRC_MODE), tr32(GRC_MISC_CFG));
8274         printk("DEBUG: GRC_LOCAL_CTRL[%08x]\n",
8275                tr32(GRC_LOCAL_CTRL));
8276
8277         /* TG3_BDINFOs */
8278         printk("DEBUG: RCVDBDI_JUMBO_BD[%08x%08x:%08x:%08x]\n",
8279                tr32(RCVDBDI_JUMBO_BD + 0x0),
8280                tr32(RCVDBDI_JUMBO_BD + 0x4),
8281                tr32(RCVDBDI_JUMBO_BD + 0x8),
8282                tr32(RCVDBDI_JUMBO_BD + 0xc));
8283         printk("DEBUG: RCVDBDI_STD_BD[%08x%08x:%08x:%08x]\n",
8284                tr32(RCVDBDI_STD_BD + 0x0),
8285                tr32(RCVDBDI_STD_BD + 0x4),
8286                tr32(RCVDBDI_STD_BD + 0x8),
8287                tr32(RCVDBDI_STD_BD + 0xc));
8288         printk("DEBUG: RCVDBDI_MINI_BD[%08x%08x:%08x:%08x]\n",
8289                tr32(RCVDBDI_MINI_BD + 0x0),
8290                tr32(RCVDBDI_MINI_BD + 0x4),
8291                tr32(RCVDBDI_MINI_BD + 0x8),
8292                tr32(RCVDBDI_MINI_BD + 0xc));
8293
8294         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x0, &val32);
8295         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x4, &val32_2);
8296         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x8, &val32_3);
8297         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0xc, &val32_4);
8298         printk("DEBUG: SRAM_SEND_RCB_0[%08x%08x:%08x:%08x]\n",
8299                val32, val32_2, val32_3, val32_4);
8300
8301         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x0, &val32);
8302         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x4, &val32_2);
8303         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x8, &val32_3);
8304         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0xc, &val32_4);
8305         printk("DEBUG: SRAM_RCV_RET_RCB_0[%08x%08x:%08x:%08x]\n",
8306                val32, val32_2, val32_3, val32_4);
8307
8308         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x0, &val32);
8309         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x4, &val32_2);
8310         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x8, &val32_3);
8311         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0xc, &val32_4);
8312         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x10, &val32_5);
8313         printk("DEBUG: SRAM_STATUS_BLK[%08x:%08x:%08x:%08x:%08x]\n",
8314                val32, val32_2, val32_3, val32_4, val32_5);
8315
8316         /* SW status block */
8317         printk("DEBUG: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
8318                tp->hw_status->status,
8319                tp->hw_status->status_tag,
8320                tp->hw_status->rx_jumbo_consumer,
8321                tp->hw_status->rx_consumer,
8322                tp->hw_status->rx_mini_consumer,
8323                tp->hw_status->idx[0].rx_producer,
8324                tp->hw_status->idx[0].tx_consumer);
8325
8326         /* SW statistics block */
8327         printk("DEBUG: Host statistics block [%08x:%08x:%08x:%08x]\n",
8328                ((u32 *)tp->hw_stats)[0],
8329                ((u32 *)tp->hw_stats)[1],
8330                ((u32 *)tp->hw_stats)[2],
8331                ((u32 *)tp->hw_stats)[3]);
8332
8333         /* Mailboxes */
8334         printk("DEBUG: SNDHOST_PROD[%08x%08x] SNDNIC_PROD[%08x%08x]\n",
8335                tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + 0x0),
8336                tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + 0x4),
8337                tr32_mailbox(MAILBOX_SNDNIC_PROD_IDX_0 + 0x0),
8338                tr32_mailbox(MAILBOX_SNDNIC_PROD_IDX_0 + 0x4));
8339
8340         /* NIC side send descriptors. */
8341         for (i = 0; i < 6; i++) {
8342                 unsigned long txd;
8343
8344                 txd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_TX_BUFFER_DESC
8345                         + (i * sizeof(struct tg3_tx_buffer_desc));
8346                 printk("DEBUG: NIC TXD(%d)[%08x:%08x:%08x:%08x]\n",
8347                        i,
8348                        readl(txd + 0x0), readl(txd + 0x4),
8349                        readl(txd + 0x8), readl(txd + 0xc));
8350         }
8351
8352         /* NIC side RX descriptors. */
8353         for (i = 0; i < 6; i++) {
8354                 unsigned long rxd;
8355
8356                 rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_BUFFER_DESC
8357                         + (i * sizeof(struct tg3_rx_buffer_desc));
8358                 printk("DEBUG: NIC RXD_STD(%d)[0][%08x:%08x:%08x:%08x]\n",
8359                        i,
8360                        readl(rxd + 0x0), readl(rxd + 0x4),
8361                        readl(rxd + 0x8), readl(rxd + 0xc));
8362                 rxd += (4 * sizeof(u32));
8363                 printk("DEBUG: NIC RXD_STD(%d)[1][%08x:%08x:%08x:%08x]\n",
8364                        i,
8365                        readl(rxd + 0x0), readl(rxd + 0x4),
8366                        readl(rxd + 0x8), readl(rxd + 0xc));
8367         }
8368
8369         for (i = 0; i < 6; i++) {
8370                 unsigned long rxd;
8371
8372                 rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_JUMBO_BUFFER_DESC
8373                         + (i * sizeof(struct tg3_rx_buffer_desc));
8374                 printk("DEBUG: NIC RXD_JUMBO(%d)[0][%08x:%08x:%08x:%08x]\n",
8375                        i,
8376                        readl(rxd + 0x0), readl(rxd + 0x4),
8377                        readl(rxd + 0x8), readl(rxd + 0xc));
8378                 rxd += (4 * sizeof(u32));
8379                 printk("DEBUG: NIC RXD_JUMBO(%d)[1][%08x:%08x:%08x:%08x]\n",
8380                        i,
8381                        readl(rxd + 0x0), readl(rxd + 0x4),
8382                        readl(rxd + 0x8), readl(rxd + 0xc));
8383         }
8384 }
8385 #endif
8386
8387 static struct net_device_stats *tg3_get_stats(struct net_device *);
8388 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *);
8389
8390 static int tg3_close(struct net_device *dev)
8391 {
8392         struct tg3 *tp = netdev_priv(dev);
8393
8394         napi_disable(&tp->napi);
8395         cancel_work_sync(&tp->reset_task);
8396
8397         netif_stop_queue(dev);
8398
8399         del_timer_sync(&tp->timer);
8400
8401         tg3_full_lock(tp, 1);
8402 #if 0
8403         tg3_dump_state(tp);
8404 #endif
8405
8406         tg3_disable_ints(tp);
8407
8408         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
8409         tg3_free_rings(tp);
8410         tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
8411
8412         tg3_full_unlock(tp);
8413
8414         free_irq(tp->pdev->irq, dev);
8415         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
8416                 pci_disable_msi(tp->pdev);
8417                 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
8418         }
8419
8420         memcpy(&tp->net_stats_prev, tg3_get_stats(tp->dev),
8421                sizeof(tp->net_stats_prev));
8422         memcpy(&tp->estats_prev, tg3_get_estats(tp),
8423                sizeof(tp->estats_prev));
8424
8425         tg3_free_consistent(tp);
8426
8427         tg3_set_power_state(tp, PCI_D3hot);
8428
8429         netif_carrier_off(tp->dev);
8430
8431         return 0;
8432 }
8433
8434 static inline unsigned long get_stat64(tg3_stat64_t *val)
8435 {
8436         unsigned long ret;
8437
8438 #if (BITS_PER_LONG == 32)
8439         ret = val->low;
8440 #else
8441         ret = ((u64)val->high << 32) | ((u64)val->low);
8442 #endif
8443         return ret;
8444 }
8445
8446 static inline u64 get_estat64(tg3_stat64_t *val)
8447 {
8448        return ((u64)val->high << 32) | ((u64)val->low);
8449 }
8450
8451 static unsigned long calc_crc_errors(struct tg3 *tp)
8452 {
8453         struct tg3_hw_stats *hw_stats = tp->hw_stats;
8454
8455         if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
8456             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
8457              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
8458                 u32 val;
8459
8460                 spin_lock_bh(&tp->lock);
8461                 if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) {
8462                         tg3_writephy(tp, MII_TG3_TEST1,
8463                                      val | MII_TG3_TEST1_CRC_EN);
8464                         tg3_readphy(tp, 0x14, &val);
8465                 } else
8466                         val = 0;
8467                 spin_unlock_bh(&tp->lock);
8468
8469                 tp->phy_crc_errors += val;
8470
8471                 return tp->phy_crc_errors;
8472         }
8473
8474         return get_stat64(&hw_stats->rx_fcs_errors);
8475 }
8476
8477 #define ESTAT_ADD(member) \
8478         estats->member =        old_estats->member + \
8479                                 get_estat64(&hw_stats->member)
8480
8481 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *tp)
8482 {
8483         struct tg3_ethtool_stats *estats = &tp->estats;
8484         struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
8485         struct tg3_hw_stats *hw_stats = tp->hw_stats;
8486
8487         if (!hw_stats)
8488                 return old_estats;
8489
8490         ESTAT_ADD(rx_octets);
8491         ESTAT_ADD(rx_fragments);
8492         ESTAT_ADD(rx_ucast_packets);
8493         ESTAT_ADD(rx_mcast_packets);
8494         ESTAT_ADD(rx_bcast_packets);
8495         ESTAT_ADD(rx_fcs_errors);
8496         ESTAT_ADD(rx_align_errors);
8497         ESTAT_ADD(rx_xon_pause_rcvd);
8498         ESTAT_ADD(rx_xoff_pause_rcvd);
8499         ESTAT_ADD(rx_mac_ctrl_rcvd);
8500         ESTAT_ADD(rx_xoff_entered);
8501         ESTAT_ADD(rx_frame_too_long_errors);
8502         ESTAT_ADD(rx_jabbers);
8503         ESTAT_ADD(rx_undersize_packets);
8504         ESTAT_ADD(rx_in_length_errors);
8505         ESTAT_ADD(rx_out_length_errors);
8506         ESTAT_ADD(rx_64_or_less_octet_packets);
8507         ESTAT_ADD(rx_65_to_127_octet_packets);
8508         ESTAT_ADD(rx_128_to_255_octet_packets);
8509         ESTAT_ADD(rx_256_to_511_octet_packets);
8510         ESTAT_ADD(rx_512_to_1023_octet_packets);
8511         ESTAT_ADD(rx_1024_to_1522_octet_packets);
8512         ESTAT_ADD(rx_1523_to_2047_octet_packets);
8513         ESTAT_ADD(rx_2048_to_4095_octet_packets);
8514         ESTAT_ADD(rx_4096_to_8191_octet_packets);
8515         ESTAT_ADD(rx_8192_to_9022_octet_packets);
8516
8517         ESTAT_ADD(tx_octets);
8518         ESTAT_ADD(tx_collisions);
8519         ESTAT_ADD(tx_xon_sent);
8520         ESTAT_ADD(tx_xoff_sent);
8521         ESTAT_ADD(tx_flow_control);
8522         ESTAT_ADD(tx_mac_errors);
8523         ESTAT_ADD(tx_single_collisions);
8524         ESTAT_ADD(tx_mult_collisions);
8525         ESTAT_ADD(tx_deferred);
8526         ESTAT_ADD(tx_excessive_collisions);
8527         ESTAT_ADD(tx_late_collisions);
8528         ESTAT_ADD(tx_collide_2times);
8529         ESTAT_ADD(tx_collide_3times);
8530         ESTAT_ADD(tx_collide_4times);
8531         ESTAT_ADD(tx_collide_5times);
8532         ESTAT_ADD(tx_collide_6times);
8533         ESTAT_ADD(tx_collide_7times);
8534         ESTAT_ADD(tx_collide_8times);
8535         ESTAT_ADD(tx_collide_9times);
8536         ESTAT_ADD(tx_collide_10times);
8537         ESTAT_ADD(tx_collide_11times);
8538         ESTAT_ADD(tx_collide_12times);
8539         ESTAT_ADD(tx_collide_13times);
8540         ESTAT_ADD(tx_collide_14times);
8541         ESTAT_ADD(tx_collide_15times);
8542         ESTAT_ADD(tx_ucast_packets);
8543         ESTAT_ADD(tx_mcast_packets);
8544         ESTAT_ADD(tx_bcast_packets);
8545         ESTAT_ADD(tx_carrier_sense_errors);
8546         ESTAT_ADD(tx_discards);
8547         ESTAT_ADD(tx_errors);
8548
8549         ESTAT_ADD(dma_writeq_full);
8550         ESTAT_ADD(dma_write_prioq_full);
8551         ESTAT_ADD(rxbds_empty);
8552         ESTAT_ADD(rx_discards);
8553         ESTAT_ADD(rx_errors);
8554         ESTAT_ADD(rx_threshold_hit);
8555
8556         ESTAT_ADD(dma_readq_full);
8557         ESTAT_ADD(dma_read_prioq_full);
8558         ESTAT_ADD(tx_comp_queue_full);
8559
8560         ESTAT_ADD(ring_set_send_prod_index);
8561         ESTAT_ADD(ring_status_update);
8562         ESTAT_ADD(nic_irqs);
8563         ESTAT_ADD(nic_avoided_irqs);
8564         ESTAT_ADD(nic_tx_threshold_hit);
8565
8566         return estats;
8567 }
8568
8569 static struct net_device_stats *tg3_get_stats(struct net_device *dev)
8570 {
8571         struct tg3 *tp = netdev_priv(dev);
8572         struct net_device_stats *stats = &tp->net_stats;
8573         struct net_device_stats *old_stats = &tp->net_stats_prev;
8574         struct tg3_hw_stats *hw_stats = tp->hw_stats;
8575
8576         if (!hw_stats)
8577                 return old_stats;
8578
8579         stats->rx_packets = old_stats->rx_packets +
8580                 get_stat64(&hw_stats->rx_ucast_packets) +
8581                 get_stat64(&hw_stats->rx_mcast_packets) +
8582                 get_stat64(&hw_stats->rx_bcast_packets);
8583
8584         stats->tx_packets = old_stats->tx_packets +
8585                 get_stat64(&hw_stats->tx_ucast_packets) +
8586                 get_stat64(&hw_stats->tx_mcast_packets) +
8587                 get_stat64(&hw_stats->tx_bcast_packets);
8588
8589         stats->rx_bytes = old_stats->rx_bytes +
8590                 get_stat64(&hw_stats->rx_octets);
8591         stats->tx_bytes = old_stats->tx_bytes +
8592                 get_stat64(&hw_stats->tx_octets);
8593
8594         stats->rx_errors = old_stats->rx_errors +
8595                 get_stat64(&hw_stats->rx_errors);
8596         stats->tx_errors = old_stats->tx_errors +
8597                 get_stat64(&hw_stats->tx_errors) +
8598                 get_stat64(&hw_stats->tx_mac_errors) +
8599                 get_stat64(&hw_stats->tx_carrier_sense_errors) +
8600                 get_stat64(&hw_stats->tx_discards);
8601
8602         stats->multicast = old_stats->multicast +
8603                 get_stat64(&hw_stats->rx_mcast_packets);
8604         stats->collisions = old_stats->collisions +
8605                 get_stat64(&hw_stats->tx_collisions);
8606
8607         stats->rx_length_errors = old_stats->rx_length_errors +
8608                 get_stat64(&hw_stats->rx_frame_too_long_errors) +
8609                 get_stat64(&hw_stats->rx_undersize_packets);
8610
8611         stats->rx_over_errors = old_stats->rx_over_errors +
8612                 get_stat64(&hw_stats->rxbds_empty);
8613         stats->rx_frame_errors = old_stats->rx_frame_errors +
8614                 get_stat64(&hw_stats->rx_align_errors);
8615         stats->tx_aborted_errors = old_stats->tx_aborted_errors +
8616                 get_stat64(&hw_stats->tx_discards);
8617         stats->tx_carrier_errors = old_stats->tx_carrier_errors +
8618                 get_stat64(&hw_stats->tx_carrier_sense_errors);
8619
8620         stats->rx_crc_errors = old_stats->rx_crc_errors +
8621                 calc_crc_errors(tp);
8622
8623         stats->rx_missed_errors = old_stats->rx_missed_errors +
8624                 get_stat64(&hw_stats->rx_discards);
8625
8626         return stats;
8627 }
8628
8629 static inline u32 calc_crc(unsigned char *buf, int len)
8630 {
8631         u32 reg;
8632         u32 tmp;
8633         int j, k;
8634
8635         reg = 0xffffffff;
8636
8637         for (j = 0; j < len; j++) {
8638                 reg ^= buf[j];
8639
8640                 for (k = 0; k < 8; k++) {
8641                         tmp = reg & 0x01;
8642
8643                         reg >>= 1;
8644
8645                         if (tmp) {
8646                                 reg ^= 0xedb88320;
8647                         }
8648                 }
8649         }
8650
8651         return ~reg;
8652 }
8653
8654 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
8655 {
8656         /* accept or reject all multicast frames */
8657         tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
8658         tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
8659         tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
8660         tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
8661 }
8662
8663 static void __tg3_set_rx_mode(struct net_device *dev)
8664 {
8665         struct tg3 *tp = netdev_priv(dev);
8666         u32 rx_mode;
8667
8668         rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
8669                                   RX_MODE_KEEP_VLAN_TAG);
8670
8671         /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
8672          * flag clear.
8673          */
8674 #if TG3_VLAN_TAG_USED
8675         if (!tp->vlgrp &&
8676             !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
8677                 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
8678 #else
8679         /* By definition, VLAN is disabled always in this
8680          * case.
8681          */
8682         if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
8683                 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
8684 #endif
8685
8686         if (dev->flags & IFF_PROMISC) {
8687                 /* Promiscuous mode. */
8688                 rx_mode |= RX_MODE_PROMISC;
8689         } else if (dev->flags & IFF_ALLMULTI) {
8690                 /* Accept all multicast. */
8691                 tg3_set_multi (tp, 1);
8692         } else if (dev->mc_count < 1) {
8693                 /* Reject all multicast. */
8694                 tg3_set_multi (tp, 0);
8695         } else {
8696                 /* Accept one or more multicast(s). */
8697                 struct dev_mc_list *mclist;
8698                 unsigned int i;
8699                 u32 mc_filter[4] = { 0, };
8700                 u32 regidx;
8701                 u32 bit;
8702                 u32 crc;
8703
8704                 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
8705                      i++, mclist = mclist->next) {
8706
8707                         crc = calc_crc (mclist->dmi_addr, ETH_ALEN);
8708                         bit = ~crc & 0x7f;
8709                         regidx = (bit & 0x60) >> 5;
8710                         bit &= 0x1f;
8711                         mc_filter[regidx] |= (1 << bit);
8712                 }
8713
8714                 tw32(MAC_HASH_REG_0, mc_filter[0]);
8715                 tw32(MAC_HASH_REG_1, mc_filter[1]);
8716                 tw32(MAC_HASH_REG_2, mc_filter[2]);
8717                 tw32(MAC_HASH_REG_3, mc_filter[3]);
8718         }
8719
8720         if (rx_mode != tp->rx_mode) {
8721                 tp->rx_mode = rx_mode;
8722                 tw32_f(MAC_RX_MODE, rx_mode);
8723                 udelay(10);
8724         }
8725 }
8726
8727 static void tg3_set_rx_mode(struct net_device *dev)
8728 {
8729         struct tg3 *tp = netdev_priv(dev);
8730
8731         if (!netif_running(dev))
8732                 return;
8733
8734         tg3_full_lock(tp, 0);
8735         __tg3_set_rx_mode(dev);
8736         tg3_full_unlock(tp);
8737 }
8738
8739 #define TG3_REGDUMP_LEN         (32 * 1024)
8740
8741 static int tg3_get_regs_len(struct net_device *dev)
8742 {
8743         return TG3_REGDUMP_LEN;
8744 }
8745
8746 static void tg3_get_regs(struct net_device *dev,
8747                 struct ethtool_regs *regs, void *_p)
8748 {
8749         u32 *p = _p;
8750         struct tg3 *tp = netdev_priv(dev);
8751         u8 *orig_p = _p;
8752         int i;
8753
8754         regs->version = 0;
8755
8756         memset(p, 0, TG3_REGDUMP_LEN);
8757
8758         if (tp->link_config.phy_is_low_power)
8759                 return;
8760
8761         tg3_full_lock(tp, 0);
8762
8763 #define __GET_REG32(reg)        (*(p)++ = tr32(reg))
8764 #define GET_REG32_LOOP(base,len)                \
8765 do {    p = (u32 *)(orig_p + (base));           \
8766         for (i = 0; i < len; i += 4)            \
8767                 __GET_REG32((base) + i);        \
8768 } while (0)
8769 #define GET_REG32_1(reg)                        \
8770 do {    p = (u32 *)(orig_p + (reg));            \
8771         __GET_REG32((reg));                     \
8772 } while (0)
8773
8774         GET_REG32_LOOP(TG3PCI_VENDOR, 0xb0);
8775         GET_REG32_LOOP(MAILBOX_INTERRUPT_0, 0x200);
8776         GET_REG32_LOOP(MAC_MODE, 0x4f0);
8777         GET_REG32_LOOP(SNDDATAI_MODE, 0xe0);
8778         GET_REG32_1(SNDDATAC_MODE);
8779         GET_REG32_LOOP(SNDBDS_MODE, 0x80);
8780         GET_REG32_LOOP(SNDBDI_MODE, 0x48);
8781         GET_REG32_1(SNDBDC_MODE);
8782         GET_REG32_LOOP(RCVLPC_MODE, 0x20);
8783         GET_REG32_LOOP(RCVLPC_SELLST_BASE, 0x15c);
8784         GET_REG32_LOOP(RCVDBDI_MODE, 0x0c);
8785         GET_REG32_LOOP(RCVDBDI_JUMBO_BD, 0x3c);
8786         GET_REG32_LOOP(RCVDBDI_BD_PROD_IDX_0, 0x44);
8787         GET_REG32_1(RCVDCC_MODE);
8788         GET_REG32_LOOP(RCVBDI_MODE, 0x20);
8789         GET_REG32_LOOP(RCVCC_MODE, 0x14);
8790         GET_REG32_LOOP(RCVLSC_MODE, 0x08);
8791         GET_REG32_1(MBFREE_MODE);
8792         GET_REG32_LOOP(HOSTCC_MODE, 0x100);
8793         GET_REG32_LOOP(MEMARB_MODE, 0x10);
8794         GET_REG32_LOOP(BUFMGR_MODE, 0x58);
8795         GET_REG32_LOOP(RDMAC_MODE, 0x08);
8796         GET_REG32_LOOP(WDMAC_MODE, 0x08);
8797         GET_REG32_1(RX_CPU_MODE);
8798         GET_REG32_1(RX_CPU_STATE);
8799         GET_REG32_1(RX_CPU_PGMCTR);
8800         GET_REG32_1(RX_CPU_HWBKPT);
8801         GET_REG32_1(TX_CPU_MODE);
8802         GET_REG32_1(TX_CPU_STATE);
8803         GET_REG32_1(TX_CPU_PGMCTR);
8804         GET_REG32_LOOP(GRCMBOX_INTERRUPT_0, 0x110);
8805         GET_REG32_LOOP(FTQ_RESET, 0x120);
8806         GET_REG32_LOOP(MSGINT_MODE, 0x0c);
8807         GET_REG32_1(DMAC_MODE);
8808         GET_REG32_LOOP(GRC_MODE, 0x4c);
8809         if (tp->tg3_flags & TG3_FLAG_NVRAM)
8810                 GET_REG32_LOOP(NVRAM_CMD, 0x24);
8811
8812 #undef __GET_REG32
8813 #undef GET_REG32_LOOP
8814 #undef GET_REG32_1
8815
8816         tg3_full_unlock(tp);
8817 }
8818
8819 static int tg3_get_eeprom_len(struct net_device *dev)
8820 {
8821         struct tg3 *tp = netdev_priv(dev);
8822
8823         return tp->nvram_size;
8824 }
8825
8826 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val);
8827 static int tg3_nvram_read_le(struct tg3 *tp, u32 offset, __le32 *val);
8828 static int tg3_nvram_read_swab(struct tg3 *tp, u32 offset, u32 *val);
8829
8830 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
8831 {
8832         struct tg3 *tp = netdev_priv(dev);
8833         int ret;
8834         u8  *pd;
8835         u32 i, offset, len, b_offset, b_count;
8836         __le32 val;
8837
8838         if (tp->link_config.phy_is_low_power)
8839                 return -EAGAIN;
8840
8841         offset = eeprom->offset;
8842         len = eeprom->len;
8843         eeprom->len = 0;
8844
8845         eeprom->magic = TG3_EEPROM_MAGIC;
8846
8847         if (offset & 3) {
8848                 /* adjustments to start on required 4 byte boundary */
8849                 b_offset = offset & 3;
8850                 b_count = 4 - b_offset;
8851                 if (b_count > len) {
8852                         /* i.e. offset=1 len=2 */
8853                         b_count = len;
8854                 }
8855                 ret = tg3_nvram_read_le(tp, offset-b_offset, &val);
8856                 if (ret)
8857                         return ret;
8858                 memcpy(data, ((char*)&val) + b_offset, b_count);
8859                 len -= b_count;
8860                 offset += b_count;
8861                 eeprom->len += b_count;
8862         }
8863
8864         /* read bytes upto the last 4 byte boundary */
8865         pd = &data[eeprom->len];
8866         for (i = 0; i < (len - (len & 3)); i += 4) {
8867                 ret = tg3_nvram_read_le(tp, offset + i, &val);
8868                 if (ret) {
8869                         eeprom->len += i;
8870                         return ret;
8871                 }
8872                 memcpy(pd + i, &val, 4);
8873         }
8874         eeprom->len += i;
8875
8876         if (len & 3) {
8877                 /* read last bytes not ending on 4 byte boundary */
8878                 pd = &data[eeprom->len];
8879                 b_count = len & 3;
8880                 b_offset = offset + len - b_count;
8881                 ret = tg3_nvram_read_le(tp, b_offset, &val);
8882                 if (ret)
8883                         return ret;
8884                 memcpy(pd, &val, b_count);
8885                 eeprom->len += b_count;
8886         }
8887         return 0;
8888 }
8889
8890 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf);
8891
8892 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
8893 {
8894         struct tg3 *tp = netdev_priv(dev);
8895         int ret;
8896         u32 offset, len, b_offset, odd_len;
8897         u8 *buf;
8898         __le32 start, end;
8899
8900         if (tp->link_config.phy_is_low_power)
8901                 return -EAGAIN;
8902
8903         if (eeprom->magic != TG3_EEPROM_MAGIC)
8904                 return -EINVAL;
8905
8906         offset = eeprom->offset;
8907         len = eeprom->len;
8908
8909         if ((b_offset = (offset & 3))) {
8910                 /* adjustments to start on required 4 byte boundary */
8911                 ret = tg3_nvram_read_le(tp, offset-b_offset, &start);
8912                 if (ret)
8913                         return ret;
8914                 len += b_offset;
8915                 offset &= ~3;
8916                 if (len < 4)
8917                         len = 4;
8918         }
8919
8920         odd_len = 0;
8921         if (len & 3) {
8922                 /* adjustments to end on required 4 byte boundary */
8923                 odd_len = 1;
8924                 len = (len + 3) & ~3;
8925                 ret = tg3_nvram_read_le(tp, offset+len-4, &end);
8926                 if (ret)
8927                         return ret;
8928         }
8929
8930         buf = data;
8931         if (b_offset || odd_len) {
8932                 buf = kmalloc(len, GFP_KERNEL);
8933                 if (!buf)
8934                         return -ENOMEM;
8935                 if (b_offset)
8936                         memcpy(buf, &start, 4);
8937                 if (odd_len)
8938                         memcpy(buf+len-4, &end, 4);
8939                 memcpy(buf + b_offset, data, eeprom->len);
8940         }
8941
8942         ret = tg3_nvram_write_block(tp, offset, len, buf);
8943
8944         if (buf != data)
8945                 kfree(buf);
8946
8947         return ret;
8948 }
8949
8950 static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
8951 {
8952         struct tg3 *tp = netdev_priv(dev);
8953
8954         if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
8955                 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
8956                         return -EAGAIN;
8957                 return phy_ethtool_gset(tp->mdio_bus.phy_map[PHY_ADDR], cmd);
8958         }
8959
8960         cmd->supported = (SUPPORTED_Autoneg);
8961
8962         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
8963                 cmd->supported |= (SUPPORTED_1000baseT_Half |
8964                                    SUPPORTED_1000baseT_Full);
8965
8966         if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)) {
8967                 cmd->supported |= (SUPPORTED_100baseT_Half |
8968                                   SUPPORTED_100baseT_Full |
8969                                   SUPPORTED_10baseT_Half |
8970                                   SUPPORTED_10baseT_Full |
8971                                   SUPPORTED_TP);
8972                 cmd->port = PORT_TP;
8973         } else {
8974                 cmd->supported |= SUPPORTED_FIBRE;
8975                 cmd->port = PORT_FIBRE;
8976         }
8977
8978         cmd->advertising = tp->link_config.advertising;
8979         if (netif_running(dev)) {
8980                 cmd->speed = tp->link_config.active_speed;
8981                 cmd->duplex = tp->link_config.active_duplex;
8982         }
8983         cmd->phy_address = PHY_ADDR;
8984         cmd->transceiver = 0;
8985         cmd->autoneg = tp->link_config.autoneg;
8986         cmd->maxtxpkt = 0;
8987         cmd->maxrxpkt = 0;
8988         return 0;
8989 }
8990
8991 static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
8992 {
8993         struct tg3 *tp = netdev_priv(dev);
8994
8995         if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
8996                 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
8997                         return -EAGAIN;
8998                 return phy_ethtool_sset(tp->mdio_bus.phy_map[PHY_ADDR], cmd);
8999         }
9000
9001         if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) {
9002                 /* These are the only valid advertisement bits allowed.  */
9003                 if (cmd->autoneg == AUTONEG_ENABLE &&
9004                     (cmd->advertising & ~(ADVERTISED_1000baseT_Half |
9005                                           ADVERTISED_1000baseT_Full |
9006                                           ADVERTISED_Autoneg |
9007                                           ADVERTISED_FIBRE)))
9008                         return -EINVAL;
9009                 /* Fiber can only do SPEED_1000.  */
9010                 else if ((cmd->autoneg != AUTONEG_ENABLE) &&
9011                          (cmd->speed != SPEED_1000))
9012                         return -EINVAL;
9013         /* Copper cannot force SPEED_1000.  */
9014         } else if ((cmd->autoneg != AUTONEG_ENABLE) &&
9015                    (cmd->speed == SPEED_1000))
9016                 return -EINVAL;
9017         else if ((cmd->speed == SPEED_1000) &&
9018                  (tp->tg3_flags & TG3_FLAG_10_100_ONLY))
9019                 return -EINVAL;
9020
9021         tg3_full_lock(tp, 0);
9022
9023         tp->link_config.autoneg = cmd->autoneg;
9024         if (cmd->autoneg == AUTONEG_ENABLE) {
9025                 tp->link_config.advertising = (cmd->advertising |
9026                                               ADVERTISED_Autoneg);
9027                 tp->link_config.speed = SPEED_INVALID;
9028                 tp->link_config.duplex = DUPLEX_INVALID;
9029         } else {
9030                 tp->link_config.advertising = 0;
9031                 tp->link_config.speed = cmd->speed;
9032                 tp->link_config.duplex = cmd->duplex;
9033         }
9034
9035         tp->link_config.orig_speed = tp->link_config.speed;
9036         tp->link_config.orig_duplex = tp->link_config.duplex;
9037         tp->link_config.orig_autoneg = tp->link_config.autoneg;
9038
9039         if (netif_running(dev))
9040                 tg3_setup_phy(tp, 1);
9041
9042         tg3_full_unlock(tp);
9043
9044         return 0;
9045 }
9046
9047 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
9048 {
9049         struct tg3 *tp = netdev_priv(dev);
9050
9051         strcpy(info->driver, DRV_MODULE_NAME);
9052         strcpy(info->version, DRV_MODULE_VERSION);
9053         strcpy(info->fw_version, tp->fw_ver);
9054         strcpy(info->bus_info, pci_name(tp->pdev));
9055 }
9056
9057 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
9058 {
9059         struct tg3 *tp = netdev_priv(dev);
9060
9061         if ((tp->tg3_flags & TG3_FLAG_WOL_CAP) &&
9062             device_can_wakeup(&tp->pdev->dev))
9063                 wol->supported = WAKE_MAGIC;
9064         else
9065                 wol->supported = 0;
9066         wol->wolopts = 0;
9067         if (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)
9068                 wol->wolopts = WAKE_MAGIC;
9069         memset(&wol->sopass, 0, sizeof(wol->sopass));
9070 }
9071
9072 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
9073 {
9074         struct tg3 *tp = netdev_priv(dev);
9075         struct device *dp = &tp->pdev->dev;
9076
9077         if (wol->wolopts & ~WAKE_MAGIC)
9078                 return -EINVAL;
9079         if ((wol->wolopts & WAKE_MAGIC) &&
9080             !((tp->tg3_flags & TG3_FLAG_WOL_CAP) && device_can_wakeup(dp)))
9081                 return -EINVAL;
9082
9083         spin_lock_bh(&tp->lock);
9084         if (wol->wolopts & WAKE_MAGIC) {
9085                 tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
9086                 device_set_wakeup_enable(dp, true);
9087         } else {
9088                 tp->tg3_flags &= ~TG3_FLAG_WOL_ENABLE;
9089                 device_set_wakeup_enable(dp, false);
9090         }
9091         spin_unlock_bh(&tp->lock);
9092
9093         return 0;
9094 }
9095
9096 static u32 tg3_get_msglevel(struct net_device *dev)
9097 {
9098         struct tg3 *tp = netdev_priv(dev);
9099         return tp->msg_enable;
9100 }
9101
9102 static void tg3_set_msglevel(struct net_device *dev, u32 value)
9103 {
9104         struct tg3 *tp = netdev_priv(dev);
9105         tp->msg_enable = value;
9106 }
9107
9108 static int tg3_set_tso(struct net_device *dev, u32 value)
9109 {
9110         struct tg3 *tp = netdev_priv(dev);
9111
9112         if (!(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
9113                 if (value)
9114                         return -EINVAL;
9115                 return 0;
9116         }
9117         if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_2) &&
9118             (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906)) {
9119                 if (value) {
9120                         dev->features |= NETIF_F_TSO6;
9121                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
9122                             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
9123                              GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) ||
9124                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
9125                                 dev->features |= NETIF_F_TSO_ECN;
9126                 } else
9127                         dev->features &= ~(NETIF_F_TSO6 | NETIF_F_TSO_ECN);
9128         }
9129         return ethtool_op_set_tso(dev, value);
9130 }
9131
9132 static int tg3_nway_reset(struct net_device *dev)
9133 {
9134         struct tg3 *tp = netdev_priv(dev);
9135         int r;
9136
9137         if (!netif_running(dev))
9138                 return -EAGAIN;
9139
9140         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
9141                 return -EINVAL;
9142
9143         if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
9144                 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
9145                         return -EAGAIN;
9146                 r = phy_start_aneg(tp->mdio_bus.phy_map[PHY_ADDR]);
9147         } else {
9148                 u32 bmcr;
9149
9150                 spin_lock_bh(&tp->lock);
9151                 r = -EINVAL;
9152                 tg3_readphy(tp, MII_BMCR, &bmcr);
9153                 if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
9154                     ((bmcr & BMCR_ANENABLE) ||
9155                      (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT))) {
9156                         tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
9157                                                    BMCR_ANENABLE);
9158                         r = 0;
9159                 }
9160                 spin_unlock_bh(&tp->lock);
9161         }
9162
9163         return r;
9164 }
9165
9166 static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
9167 {
9168         struct tg3 *tp = netdev_priv(dev);
9169
9170         ering->rx_max_pending = TG3_RX_RING_SIZE - 1;
9171         ering->rx_mini_max_pending = 0;
9172         if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE)
9173                 ering->rx_jumbo_max_pending = TG3_RX_JUMBO_RING_SIZE - 1;
9174         else
9175                 ering->rx_jumbo_max_pending = 0;
9176
9177         ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
9178
9179         ering->rx_pending = tp->rx_pending;
9180         ering->rx_mini_pending = 0;
9181         if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE)
9182                 ering->rx_jumbo_pending = tp->rx_jumbo_pending;
9183         else
9184                 ering->rx_jumbo_pending = 0;
9185
9186         ering->tx_pending = tp->tx_pending;
9187 }
9188
9189 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
9190 {
9191         struct tg3 *tp = netdev_priv(dev);
9192         int irq_sync = 0, err = 0;
9193
9194         if ((ering->rx_pending > TG3_RX_RING_SIZE - 1) ||
9195             (ering->rx_jumbo_pending > TG3_RX_JUMBO_RING_SIZE - 1) ||
9196             (ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
9197             (ering->tx_pending <= MAX_SKB_FRAGS) ||
9198             ((tp->tg3_flags2 & TG3_FLG2_TSO_BUG) &&
9199              (ering->tx_pending <= (MAX_SKB_FRAGS * 3))))
9200                 return -EINVAL;
9201
9202         if (netif_running(dev)) {
9203                 tg3_phy_stop(tp);
9204                 tg3_netif_stop(tp);
9205                 irq_sync = 1;
9206         }
9207
9208         tg3_full_lock(tp, irq_sync);
9209
9210         tp->rx_pending = ering->rx_pending;
9211
9212         if ((tp->tg3_flags2 & TG3_FLG2_MAX_RXPEND_64) &&
9213             tp->rx_pending > 63)
9214                 tp->rx_pending = 63;
9215         tp->rx_jumbo_pending = ering->rx_jumbo_pending;
9216         tp->tx_pending = ering->tx_pending;
9217
9218         if (netif_running(dev)) {
9219                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9220                 err = tg3_restart_hw(tp, 1);
9221                 if (!err)
9222                         tg3_netif_start(tp);
9223         }
9224
9225         tg3_full_unlock(tp);
9226
9227         if (irq_sync && !err)
9228                 tg3_phy_start(tp);
9229
9230         return err;
9231 }
9232
9233 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
9234 {
9235         struct tg3 *tp = netdev_priv(dev);
9236
9237         epause->autoneg = (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG) != 0;
9238
9239         if (tp->link_config.active_flowctrl & TG3_FLOW_CTRL_RX)
9240                 epause->rx_pause = 1;
9241         else
9242                 epause->rx_pause = 0;
9243
9244         if (tp->link_config.active_flowctrl & TG3_FLOW_CTRL_TX)
9245                 epause->tx_pause = 1;
9246         else
9247                 epause->tx_pause = 0;
9248 }
9249
9250 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
9251 {
9252         struct tg3 *tp = netdev_priv(dev);
9253         int err = 0;
9254
9255         if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
9256                 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
9257                         return -EAGAIN;
9258
9259                 if (epause->autoneg) {
9260                         u32 newadv;
9261                         struct phy_device *phydev;
9262
9263                         phydev = tp->mdio_bus.phy_map[PHY_ADDR];
9264
9265                         if (epause->rx_pause) {
9266                                 if (epause->tx_pause)
9267                                         newadv = ADVERTISED_Pause;
9268                                 else
9269                                         newadv = ADVERTISED_Pause |
9270                                                  ADVERTISED_Asym_Pause;
9271                         } else if (epause->tx_pause) {
9272                                 newadv = ADVERTISED_Asym_Pause;
9273                         } else
9274                                 newadv = 0;
9275
9276                         if (tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED) {
9277                                 u32 oldadv = phydev->advertising &
9278                                              (ADVERTISED_Pause |
9279                                               ADVERTISED_Asym_Pause);
9280                                 if (oldadv != newadv) {
9281                                         phydev->advertising &=
9282                                                 ~(ADVERTISED_Pause |
9283                                                   ADVERTISED_Asym_Pause);
9284                                         phydev->advertising |= newadv;
9285                                         err = phy_start_aneg(phydev);
9286                                 }
9287                         } else {
9288                                 tp->link_config.advertising &=
9289                                                 ~(ADVERTISED_Pause |
9290                                                   ADVERTISED_Asym_Pause);
9291                                 tp->link_config.advertising |= newadv;
9292                         }
9293                 } else {
9294                         if (epause->rx_pause)
9295                                 tp->link_config.flowctrl |= TG3_FLOW_CTRL_RX;
9296                         else
9297                                 tp->link_config.flowctrl &= ~TG3_FLOW_CTRL_RX;
9298
9299                         if (epause->tx_pause)
9300                                 tp->link_config.flowctrl |= TG3_FLOW_CTRL_TX;
9301                         else
9302                                 tp->link_config.flowctrl &= ~TG3_FLOW_CTRL_TX;
9303
9304                         if (netif_running(dev))
9305                                 tg3_setup_flow_control(tp, 0, 0);
9306                 }
9307         } else {
9308                 int irq_sync = 0;
9309
9310                 if (netif_running(dev)) {
9311                         tg3_netif_stop(tp);
9312                         irq_sync = 1;
9313                 }
9314
9315                 tg3_full_lock(tp, irq_sync);
9316
9317                 if (epause->autoneg)
9318                         tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
9319                 else
9320                         tp->tg3_flags &= ~TG3_FLAG_PAUSE_AUTONEG;
9321                 if (epause->rx_pause)
9322                         tp->link_config.flowctrl |= TG3_FLOW_CTRL_RX;
9323                 else
9324                         tp->link_config.flowctrl &= ~TG3_FLOW_CTRL_RX;
9325                 if (epause->tx_pause)
9326                         tp->link_config.flowctrl |= TG3_FLOW_CTRL_TX;
9327                 else
9328                         tp->link_config.flowctrl &= ~TG3_FLOW_CTRL_TX;
9329
9330                 if (netif_running(dev)) {
9331                         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9332                         err = tg3_restart_hw(tp, 1);
9333                         if (!err)
9334                                 tg3_netif_start(tp);
9335                 }
9336
9337                 tg3_full_unlock(tp);
9338         }
9339
9340         return err;
9341 }
9342
9343 static u32 tg3_get_rx_csum(struct net_device *dev)
9344 {
9345         struct tg3 *tp = netdev_priv(dev);
9346         return (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0;
9347 }
9348
9349 static int tg3_set_rx_csum(struct net_device *dev, u32 data)
9350 {
9351         struct tg3 *tp = netdev_priv(dev);
9352
9353         if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
9354                 if (data != 0)
9355                         return -EINVAL;
9356                 return 0;
9357         }
9358
9359         spin_lock_bh(&tp->lock);
9360         if (data)
9361                 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
9362         else
9363                 tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
9364         spin_unlock_bh(&tp->lock);
9365
9366         return 0;
9367 }
9368
9369 static int tg3_set_tx_csum(struct net_device *dev, u32 data)
9370 {
9371         struct tg3 *tp = netdev_priv(dev);
9372
9373         if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
9374                 if (data != 0)
9375                         return -EINVAL;
9376                 return 0;
9377         }
9378
9379         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
9380             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
9381             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
9382             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
9383             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
9384                 ethtool_op_set_tx_ipv6_csum(dev, data);
9385         else
9386                 ethtool_op_set_tx_csum(dev, data);
9387
9388         return 0;
9389 }
9390
9391 static int tg3_get_sset_count (struct net_device *dev, int sset)
9392 {
9393         switch (sset) {
9394         case ETH_SS_TEST:
9395                 return TG3_NUM_TEST;
9396         case ETH_SS_STATS:
9397                 return TG3_NUM_STATS;
9398         default:
9399                 return -EOPNOTSUPP;
9400         }
9401 }
9402
9403 static void tg3_get_strings (struct net_device *dev, u32 stringset, u8 *buf)
9404 {
9405         switch (stringset) {
9406         case ETH_SS_STATS:
9407                 memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
9408                 break;
9409         case ETH_SS_TEST:
9410                 memcpy(buf, &ethtool_test_keys, sizeof(ethtool_test_keys));
9411                 break;
9412         default:
9413                 WARN_ON(1);     /* we need a WARN() */
9414                 break;
9415         }
9416 }
9417
9418 static int tg3_phys_id(struct net_device *dev, u32 data)
9419 {
9420         struct tg3 *tp = netdev_priv(dev);
9421         int i;
9422
9423         if (!netif_running(tp->dev))
9424                 return -EAGAIN;
9425
9426         if (data == 0)
9427                 data = UINT_MAX / 2;
9428
9429         for (i = 0; i < (data * 2); i++) {
9430                 if ((i % 2) == 0)
9431                         tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
9432                                            LED_CTRL_1000MBPS_ON |
9433                                            LED_CTRL_100MBPS_ON |
9434                                            LED_CTRL_10MBPS_ON |
9435                                            LED_CTRL_TRAFFIC_OVERRIDE |
9436                                            LED_CTRL_TRAFFIC_BLINK |
9437                                            LED_CTRL_TRAFFIC_LED);
9438
9439                 else
9440                         tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
9441                                            LED_CTRL_TRAFFIC_OVERRIDE);
9442
9443                 if (msleep_interruptible(500))
9444                         break;
9445         }
9446         tw32(MAC_LED_CTRL, tp->led_ctrl);
9447         return 0;
9448 }
9449
9450 static void tg3_get_ethtool_stats (struct net_device *dev,
9451                                    struct ethtool_stats *estats, u64 *tmp_stats)
9452 {
9453         struct tg3 *tp = netdev_priv(dev);
9454         memcpy(tmp_stats, tg3_get_estats(tp), sizeof(tp->estats));
9455 }
9456
9457 #define NVRAM_TEST_SIZE 0x100
9458 #define NVRAM_SELFBOOT_FORMAT1_0_SIZE   0x14
9459 #define NVRAM_SELFBOOT_FORMAT1_2_SIZE   0x18
9460 #define NVRAM_SELFBOOT_FORMAT1_3_SIZE   0x1c
9461 #define NVRAM_SELFBOOT_HW_SIZE 0x20
9462 #define NVRAM_SELFBOOT_DATA_SIZE 0x1c
9463
9464 static int tg3_test_nvram(struct tg3 *tp)
9465 {
9466         u32 csum, magic;
9467         __le32 *buf;
9468         int i, j, k, err = 0, size;
9469
9470         if (tg3_nvram_read_swab(tp, 0, &magic) != 0)
9471                 return -EIO;
9472
9473         if (magic == TG3_EEPROM_MAGIC)
9474                 size = NVRAM_TEST_SIZE;
9475         else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) {
9476                 if ((magic & TG3_EEPROM_SB_FORMAT_MASK) ==
9477                     TG3_EEPROM_SB_FORMAT_1) {
9478                         switch (magic & TG3_EEPROM_SB_REVISION_MASK) {
9479                         case TG3_EEPROM_SB_REVISION_0:
9480                                 size = NVRAM_SELFBOOT_FORMAT1_0_SIZE;
9481                                 break;
9482                         case TG3_EEPROM_SB_REVISION_2:
9483                                 size = NVRAM_SELFBOOT_FORMAT1_2_SIZE;
9484                                 break;
9485                         case TG3_EEPROM_SB_REVISION_3:
9486                                 size = NVRAM_SELFBOOT_FORMAT1_3_SIZE;
9487                                 break;
9488                         default:
9489                                 return 0;
9490                         }
9491                 } else
9492                         return 0;
9493         } else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
9494                 size = NVRAM_SELFBOOT_HW_SIZE;
9495         else
9496                 return -EIO;
9497
9498         buf = kmalloc(size, GFP_KERNEL);
9499         if (buf == NULL)
9500                 return -ENOMEM;
9501
9502         err = -EIO;
9503         for (i = 0, j = 0; i < size; i += 4, j++) {
9504                 if ((err = tg3_nvram_read_le(tp, i, &buf[j])) != 0)
9505                         break;
9506         }
9507         if (i < size)
9508                 goto out;
9509
9510         /* Selfboot format */
9511         magic = swab32(le32_to_cpu(buf[0]));
9512         if ((magic & TG3_EEPROM_MAGIC_FW_MSK) ==
9513             TG3_EEPROM_MAGIC_FW) {
9514                 u8 *buf8 = (u8 *) buf, csum8 = 0;
9515
9516                 if ((magic & TG3_EEPROM_SB_REVISION_MASK) ==
9517                     TG3_EEPROM_SB_REVISION_2) {
9518                         /* For rev 2, the csum doesn't include the MBA. */
9519                         for (i = 0; i < TG3_EEPROM_SB_F1R2_MBA_OFF; i++)
9520                                 csum8 += buf8[i];
9521                         for (i = TG3_EEPROM_SB_F1R2_MBA_OFF + 4; i < size; i++)
9522                                 csum8 += buf8[i];
9523                 } else {
9524                         for (i = 0; i < size; i++)
9525                                 csum8 += buf8[i];
9526                 }
9527
9528                 if (csum8 == 0) {
9529                         err = 0;
9530                         goto out;
9531                 }
9532
9533                 err = -EIO;
9534                 goto out;
9535         }
9536
9537         if ((magic & TG3_EEPROM_MAGIC_HW_MSK) ==
9538             TG3_EEPROM_MAGIC_HW) {
9539                 u8 data[NVRAM_SELFBOOT_DATA_SIZE];
9540                 u8 parity[NVRAM_SELFBOOT_DATA_SIZE];
9541                 u8 *buf8 = (u8 *) buf;
9542
9543                 /* Separate the parity bits and the data bytes.  */
9544                 for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) {
9545                         if ((i == 0) || (i == 8)) {
9546                                 int l;
9547                                 u8 msk;
9548
9549                                 for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1)
9550                                         parity[k++] = buf8[i] & msk;
9551                                 i++;
9552                         }
9553                         else if (i == 16) {
9554                                 int l;
9555                                 u8 msk;
9556
9557                                 for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1)
9558                                         parity[k++] = buf8[i] & msk;
9559                                 i++;
9560
9561                                 for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1)
9562                                         parity[k++] = buf8[i] & msk;
9563                                 i++;
9564                         }
9565                         data[j++] = buf8[i];
9566                 }
9567
9568                 err = -EIO;
9569                 for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) {
9570                         u8 hw8 = hweight8(data[i]);
9571
9572                         if ((hw8 & 0x1) && parity[i])
9573                                 goto out;
9574                         else if (!(hw8 & 0x1) && !parity[i])
9575                                 goto out;
9576                 }
9577                 err = 0;
9578                 goto out;
9579         }
9580
9581         /* Bootstrap checksum at offset 0x10 */
9582         csum = calc_crc((unsigned char *) buf, 0x10);
9583         if(csum != le32_to_cpu(buf[0x10/4]))
9584                 goto out;
9585
9586         /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
9587         csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
9588         if (csum != le32_to_cpu(buf[0xfc/4]))
9589                  goto out;
9590
9591         err = 0;
9592
9593 out:
9594         kfree(buf);
9595         return err;
9596 }
9597
9598 #define TG3_SERDES_TIMEOUT_SEC  2
9599 #define TG3_COPPER_TIMEOUT_SEC  6
9600
9601 static int tg3_test_link(struct tg3 *tp)
9602 {
9603         int i, max;
9604
9605         if (!netif_running(tp->dev))
9606                 return -ENODEV;
9607
9608         if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
9609                 max = TG3_SERDES_TIMEOUT_SEC;
9610         else
9611                 max = TG3_COPPER_TIMEOUT_SEC;
9612
9613         for (i = 0; i < max; i++) {
9614                 if (netif_carrier_ok(tp->dev))
9615                         return 0;
9616
9617                 if (msleep_interruptible(1000))
9618                         break;
9619         }
9620
9621         return -EIO;
9622 }
9623
9624 /* Only test the commonly used registers */
9625 static int tg3_test_registers(struct tg3 *tp)
9626 {
9627         int i, is_5705, is_5750;
9628         u32 offset, read_mask, write_mask, val, save_val, read_val;
9629         static struct {
9630                 u16 offset;
9631                 u16 flags;
9632 #define TG3_FL_5705     0x1
9633 #define TG3_FL_NOT_5705 0x2
9634 #define TG3_FL_NOT_5788 0x4
9635 #define TG3_FL_NOT_5750 0x8
9636                 u32 read_mask;
9637                 u32 write_mask;
9638         } reg_tbl[] = {
9639                 /* MAC Control Registers */
9640                 { MAC_MODE, TG3_FL_NOT_5705,
9641                         0x00000000, 0x00ef6f8c },
9642                 { MAC_MODE, TG3_FL_5705,
9643                         0x00000000, 0x01ef6b8c },
9644                 { MAC_STATUS, TG3_FL_NOT_5705,
9645                         0x03800107, 0x00000000 },
9646                 { MAC_STATUS, TG3_FL_5705,
9647                         0x03800100, 0x00000000 },
9648                 { MAC_ADDR_0_HIGH, 0x0000,
9649                         0x00000000, 0x0000ffff },
9650                 { MAC_ADDR_0_LOW, 0x0000,
9651                         0x00000000, 0xffffffff },
9652                 { MAC_RX_MTU_SIZE, 0x0000,
9653                         0x00000000, 0x0000ffff },
9654                 { MAC_TX_MODE, 0x0000,
9655                         0x00000000, 0x00000070 },
9656                 { MAC_TX_LENGTHS, 0x0000,
9657                         0x00000000, 0x00003fff },
9658                 { MAC_RX_MODE, TG3_FL_NOT_5705,
9659                         0x00000000, 0x000007fc },
9660                 { MAC_RX_MODE, TG3_FL_5705,
9661                         0x00000000, 0x000007dc },
9662                 { MAC_HASH_REG_0, 0x0000,
9663                         0x00000000, 0xffffffff },
9664                 { MAC_HASH_REG_1, 0x0000,
9665                         0x00000000, 0xffffffff },
9666                 { MAC_HASH_REG_2, 0x0000,
9667                         0x00000000, 0xffffffff },
9668                 { MAC_HASH_REG_3, 0x0000,
9669                         0x00000000, 0xffffffff },
9670
9671                 /* Receive Data and Receive BD Initiator Control Registers. */
9672                 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
9673                         0x00000000, 0xffffffff },
9674                 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
9675                         0x00000000, 0xffffffff },
9676                 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
9677                         0x00000000, 0x00000003 },
9678                 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
9679                         0x00000000, 0xffffffff },
9680                 { RCVDBDI_STD_BD+0, 0x0000,
9681                         0x00000000, 0xffffffff },
9682                 { RCVDBDI_STD_BD+4, 0x0000,
9683                         0x00000000, 0xffffffff },
9684                 { RCVDBDI_STD_BD+8, 0x0000,
9685                         0x00000000, 0xffff0002 },
9686                 { RCVDBDI_STD_BD+0xc, 0x0000,
9687                         0x00000000, 0xffffffff },
9688
9689                 /* Receive BD Initiator Control Registers. */
9690                 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
9691                         0x00000000, 0xffffffff },
9692                 { RCVBDI_STD_THRESH, TG3_FL_5705,
9693                         0x00000000, 0x000003ff },
9694                 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
9695                         0x00000000, 0xffffffff },
9696
9697                 /* Host Coalescing Control Registers. */
9698                 { HOSTCC_MODE, TG3_FL_NOT_5705,
9699                         0x00000000, 0x00000004 },
9700                 { HOSTCC_MODE, TG3_FL_5705,
9701                         0x00000000, 0x000000f6 },
9702                 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
9703                         0x00000000, 0xffffffff },
9704                 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
9705                         0x00000000, 0x000003ff },
9706                 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
9707                         0x00000000, 0xffffffff },
9708                 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
9709                         0x00000000, 0x000003ff },
9710                 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
9711                         0x00000000, 0xffffffff },
9712                 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
9713                         0x00000000, 0x000000ff },
9714                 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
9715                         0x00000000, 0xffffffff },
9716                 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
9717                         0x00000000, 0x000000ff },
9718                 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
9719                         0x00000000, 0xffffffff },
9720                 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
9721                         0x00000000, 0xffffffff },
9722                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
9723                         0x00000000, 0xffffffff },
9724                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
9725                         0x00000000, 0x000000ff },
9726                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
9727                         0x00000000, 0xffffffff },
9728                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
9729                         0x00000000, 0x000000ff },
9730                 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
9731                         0x00000000, 0xffffffff },
9732                 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
9733                         0x00000000, 0xffffffff },
9734                 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
9735                         0x00000000, 0xffffffff },
9736                 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
9737                         0x00000000, 0xffffffff },
9738                 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
9739                         0x00000000, 0xffffffff },
9740                 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
9741                         0xffffffff, 0x00000000 },
9742                 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
9743                         0xffffffff, 0x00000000 },
9744
9745                 /* Buffer Manager Control Registers. */
9746                 { BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750,
9747                         0x00000000, 0x007fff80 },
9748                 { BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750,
9749                         0x00000000, 0x007fffff },
9750                 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
9751                         0x00000000, 0x0000003f },
9752                 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
9753                         0x00000000, 0x000001ff },
9754                 { BUFMGR_MB_HIGH_WATER, 0x0000,
9755                         0x00000000, 0x000001ff },
9756                 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
9757                         0xffffffff, 0x00000000 },
9758                 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
9759                         0xffffffff, 0x00000000 },
9760
9761                 /* Mailbox Registers */
9762                 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
9763                         0x00000000, 0x000001ff },
9764                 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
9765                         0x00000000, 0x000001ff },
9766                 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
9767                         0x00000000, 0x000007ff },
9768                 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
9769                         0x00000000, 0x000001ff },
9770
9771                 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
9772         };
9773
9774         is_5705 = is_5750 = 0;
9775         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
9776                 is_5705 = 1;
9777                 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
9778                         is_5750 = 1;
9779         }
9780
9781         for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
9782                 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
9783                         continue;
9784
9785                 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
9786                         continue;
9787
9788                 if ((tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
9789                     (reg_tbl[i].flags & TG3_FL_NOT_5788))
9790                         continue;
9791
9792                 if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750))
9793                         continue;
9794
9795                 offset = (u32) reg_tbl[i].offset;
9796                 read_mask = reg_tbl[i].read_mask;
9797                 write_mask = reg_tbl[i].write_mask;
9798
9799                 /* Save the original register content */
9800                 save_val = tr32(offset);
9801
9802                 /* Determine the read-only value. */
9803                 read_val = save_val & read_mask;
9804
9805                 /* Write zero to the register, then make sure the read-only bits
9806                  * are not changed and the read/write bits are all zeros.
9807                  */
9808                 tw32(offset, 0);
9809
9810                 val = tr32(offset);
9811
9812                 /* Test the read-only and read/write bits. */
9813                 if (((val & read_mask) != read_val) || (val & write_mask))
9814                         goto out;
9815
9816                 /* Write ones to all the bits defined by RdMask and WrMask, then
9817                  * make sure the read-only bits are not changed and the
9818                  * read/write bits are all ones.
9819                  */
9820                 tw32(offset, read_mask | write_mask);
9821
9822                 val = tr32(offset);
9823
9824                 /* Test the read-only bits. */
9825                 if ((val & read_mask) != read_val)
9826                         goto out;
9827
9828                 /* Test the read/write bits. */
9829                 if ((val & write_mask) != write_mask)
9830                         goto out;
9831
9832                 tw32(offset, save_val);
9833         }
9834
9835         return 0;
9836
9837 out:
9838         if (netif_msg_hw(tp))
9839                 printk(KERN_ERR PFX "Register test failed at offset %x\n",
9840                        offset);
9841         tw32(offset, save_val);
9842         return -EIO;
9843 }
9844
9845 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
9846 {
9847         static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
9848         int i;
9849         u32 j;
9850
9851         for (i = 0; i < ARRAY_SIZE(test_pattern); i++) {
9852                 for (j = 0; j < len; j += 4) {
9853                         u32 val;
9854
9855                         tg3_write_mem(tp, offset + j, test_pattern[i]);
9856                         tg3_read_mem(tp, offset + j, &val);
9857                         if (val != test_pattern[i])
9858                                 return -EIO;
9859                 }
9860         }
9861         return 0;
9862 }
9863
9864 static int tg3_test_memory(struct tg3 *tp)
9865 {
9866         static struct mem_entry {
9867                 u32 offset;
9868                 u32 len;
9869         } mem_tbl_570x[] = {
9870                 { 0x00000000, 0x00b50},
9871                 { 0x00002000, 0x1c000},
9872                 { 0xffffffff, 0x00000}
9873         }, mem_tbl_5705[] = {
9874                 { 0x00000100, 0x0000c},
9875                 { 0x00000200, 0x00008},
9876                 { 0x00004000, 0x00800},
9877                 { 0x00006000, 0x01000},
9878                 { 0x00008000, 0x02000},
9879                 { 0x00010000, 0x0e000},
9880                 { 0xffffffff, 0x00000}
9881         }, mem_tbl_5755[] = {
9882                 { 0x00000200, 0x00008},
9883                 { 0x00004000, 0x00800},
9884                 { 0x00006000, 0x00800},
9885                 { 0x00008000, 0x02000},
9886                 { 0x00010000, 0x0c000},
9887                 { 0xffffffff, 0x00000}
9888         }, mem_tbl_5906[] = {
9889                 { 0x00000200, 0x00008},
9890                 { 0x00004000, 0x00400},
9891                 { 0x00006000, 0x00400},
9892                 { 0x00008000, 0x01000},
9893                 { 0x00010000, 0x01000},
9894                 { 0xffffffff, 0x00000}
9895         };
9896         struct mem_entry *mem_tbl;
9897         int err = 0;
9898         int i;
9899
9900         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
9901                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
9902                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
9903                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
9904                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
9905                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
9906                         mem_tbl = mem_tbl_5755;
9907                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
9908                         mem_tbl = mem_tbl_5906;
9909                 else
9910                         mem_tbl = mem_tbl_5705;
9911         } else
9912                 mem_tbl = mem_tbl_570x;
9913
9914         for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
9915                 if ((err = tg3_do_mem_test(tp, mem_tbl[i].offset,
9916                     mem_tbl[i].len)) != 0)
9917                         break;
9918         }
9919
9920         return err;
9921 }
9922
9923 #define TG3_MAC_LOOPBACK        0
9924 #define TG3_PHY_LOOPBACK        1
9925
9926 static int tg3_run_loopback(struct tg3 *tp, int loopback_mode)
9927 {
9928         u32 mac_mode, rx_start_idx, rx_idx, tx_idx, opaque_key;
9929         u32 desc_idx;
9930         struct sk_buff *skb, *rx_skb;
9931         u8 *tx_data;
9932         dma_addr_t map;
9933         int num_pkts, tx_len, rx_len, i, err;
9934         struct tg3_rx_buffer_desc *desc;
9935
9936         if (loopback_mode == TG3_MAC_LOOPBACK) {
9937                 /* HW errata - mac loopback fails in some cases on 5780.
9938                  * Normal traffic and PHY loopback are not affected by
9939                  * errata.
9940                  */
9941                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780)
9942                         return 0;
9943
9944                 mac_mode = (tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK) |
9945                            MAC_MODE_PORT_INT_LPBACK;
9946                 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
9947                         mac_mode |= MAC_MODE_LINK_POLARITY;
9948                 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
9949                         mac_mode |= MAC_MODE_PORT_MODE_MII;
9950                 else
9951                         mac_mode |= MAC_MODE_PORT_MODE_GMII;
9952                 tw32(MAC_MODE, mac_mode);
9953         } else if (loopback_mode == TG3_PHY_LOOPBACK) {
9954                 u32 val;
9955
9956                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
9957                         u32 phytest;
9958
9959                         if (!tg3_readphy(tp, MII_TG3_EPHY_TEST, &phytest)) {
9960                                 u32 phy;
9961
9962                                 tg3_writephy(tp, MII_TG3_EPHY_TEST,
9963                                              phytest | MII_TG3_EPHY_SHADOW_EN);
9964                                 if (!tg3_readphy(tp, 0x1b, &phy))
9965                                         tg3_writephy(tp, 0x1b, phy & ~0x20);
9966                                 tg3_writephy(tp, MII_TG3_EPHY_TEST, phytest);
9967                         }
9968                         val = BMCR_LOOPBACK | BMCR_FULLDPLX | BMCR_SPEED100;
9969                 } else
9970                         val = BMCR_LOOPBACK | BMCR_FULLDPLX | BMCR_SPEED1000;
9971
9972                 tg3_phy_toggle_automdix(tp, 0);
9973
9974                 tg3_writephy(tp, MII_BMCR, val);
9975                 udelay(40);
9976
9977                 mac_mode = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
9978                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
9979                         tg3_writephy(tp, MII_TG3_EPHY_PTEST, 0x1800);
9980                         mac_mode |= MAC_MODE_PORT_MODE_MII;
9981                 } else
9982                         mac_mode |= MAC_MODE_PORT_MODE_GMII;
9983
9984                 /* reset to prevent losing 1st rx packet intermittently */
9985                 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
9986                         tw32_f(MAC_RX_MODE, RX_MODE_RESET);
9987                         udelay(10);
9988                         tw32_f(MAC_RX_MODE, tp->rx_mode);
9989                 }
9990                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
9991                         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401)
9992                                 mac_mode &= ~MAC_MODE_LINK_POLARITY;
9993                         else if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411)
9994                                 mac_mode |= MAC_MODE_LINK_POLARITY;
9995                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
9996                                      MII_TG3_EXT_CTRL_LNK3_LED_MODE);
9997                 }
9998                 tw32(MAC_MODE, mac_mode);
9999         }
10000         else
10001                 return -EINVAL;
10002
10003         err = -EIO;
10004
10005         tx_len = 1514;
10006         skb = netdev_alloc_skb(tp->dev, tx_len);
10007         if (!skb)
10008                 return -ENOMEM;
10009
10010         tx_data = skb_put(skb, tx_len);
10011         memcpy(tx_data, tp->dev->dev_addr, 6);
10012         memset(tx_data + 6, 0x0, 8);
10013
10014         tw32(MAC_RX_MTU_SIZE, tx_len + 4);
10015
10016         for (i = 14; i < tx_len; i++)
10017                 tx_data[i] = (u8) (i & 0xff);
10018
10019         map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
10020
10021         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
10022              HOSTCC_MODE_NOW);
10023
10024         udelay(10);
10025
10026         rx_start_idx = tp->hw_status->idx[0].rx_producer;
10027
10028         num_pkts = 0;
10029
10030         tg3_set_txd(tp, tp->tx_prod, map, tx_len, 0, 1);
10031
10032         tp->tx_prod++;
10033         num_pkts++;
10034
10035         tw32_tx_mbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW,
10036                      tp->tx_prod);
10037         tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW);
10038
10039         udelay(10);
10040
10041         /* 250 usec to allow enough time on some 10/100 Mbps devices.  */
10042         for (i = 0; i < 25; i++) {
10043                 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
10044                        HOSTCC_MODE_NOW);
10045
10046                 udelay(10);
10047
10048                 tx_idx = tp->hw_status->idx[0].tx_consumer;
10049                 rx_idx = tp->hw_status->idx[0].rx_producer;
10050                 if ((tx_idx == tp->tx_prod) &&
10051                     (rx_idx == (rx_start_idx + num_pkts)))
10052                         break;
10053         }
10054
10055         pci_unmap_single(tp->pdev, map, tx_len, PCI_DMA_TODEVICE);
10056         dev_kfree_skb(skb);
10057
10058         if (tx_idx != tp->tx_prod)
10059                 goto out;
10060
10061         if (rx_idx != rx_start_idx + num_pkts)
10062                 goto out;
10063
10064         desc = &tp->rx_rcb[rx_start_idx];
10065         desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
10066         opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
10067         if (opaque_key != RXD_OPAQUE_RING_STD)
10068                 goto out;
10069
10070         if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
10071             (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
10072                 goto out;
10073
10074         rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4;
10075         if (rx_len != tx_len)
10076                 goto out;
10077
10078         rx_skb = tp->rx_std_buffers[desc_idx].skb;
10079
10080         map = pci_unmap_addr(&tp->rx_std_buffers[desc_idx], mapping);
10081         pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len, PCI_DMA_FROMDEVICE);
10082
10083         for (i = 14; i < tx_len; i++) {
10084                 if (*(rx_skb->data + i) != (u8) (i & 0xff))
10085                         goto out;
10086         }
10087         err = 0;
10088
10089         /* tg3_free_rings will unmap and free the rx_skb */
10090 out:
10091         return err;
10092 }
10093
10094 #define TG3_MAC_LOOPBACK_FAILED         1
10095 #define TG3_PHY_LOOPBACK_FAILED         2
10096 #define TG3_LOOPBACK_FAILED             (TG3_MAC_LOOPBACK_FAILED |      \
10097                                          TG3_PHY_LOOPBACK_FAILED)
10098
10099 static int tg3_test_loopback(struct tg3 *tp)
10100 {
10101         int err = 0;
10102         u32 cpmuctrl = 0;
10103
10104         if (!netif_running(tp->dev))
10105                 return TG3_LOOPBACK_FAILED;
10106
10107         err = tg3_reset_hw(tp, 1);
10108         if (err)
10109                 return TG3_LOOPBACK_FAILED;
10110
10111         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
10112             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
10113             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
10114                 int i;
10115                 u32 status;
10116
10117                 tw32(TG3_CPMU_MUTEX_REQ, CPMU_MUTEX_REQ_DRIVER);
10118
10119                 /* Wait for up to 40 microseconds to acquire lock. */
10120                 for (i = 0; i < 4; i++) {
10121                         status = tr32(TG3_CPMU_MUTEX_GNT);
10122                         if (status == CPMU_MUTEX_GNT_DRIVER)
10123                                 break;
10124                         udelay(10);
10125                 }
10126
10127                 if (status != CPMU_MUTEX_GNT_DRIVER)
10128                         return TG3_LOOPBACK_FAILED;
10129
10130                 /* Turn off link-based power management. */
10131                 cpmuctrl = tr32(TG3_CPMU_CTRL);
10132                 tw32(TG3_CPMU_CTRL,
10133                      cpmuctrl & ~(CPMU_CTRL_LINK_SPEED_MODE |
10134                                   CPMU_CTRL_LINK_AWARE_MODE));
10135         }
10136
10137         if (tg3_run_loopback(tp, TG3_MAC_LOOPBACK))
10138                 err |= TG3_MAC_LOOPBACK_FAILED;
10139
10140         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
10141             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
10142             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
10143                 tw32(TG3_CPMU_CTRL, cpmuctrl);
10144
10145                 /* Release the mutex */
10146                 tw32(TG3_CPMU_MUTEX_GNT, CPMU_MUTEX_GNT_DRIVER);
10147         }
10148
10149         if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
10150             !(tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)) {
10151                 if (tg3_run_loopback(tp, TG3_PHY_LOOPBACK))
10152                         err |= TG3_PHY_LOOPBACK_FAILED;
10153         }
10154
10155         return err;
10156 }
10157
10158 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
10159                           u64 *data)
10160 {
10161         struct tg3 *tp = netdev_priv(dev);
10162
10163         if (tp->link_config.phy_is_low_power)
10164                 tg3_set_power_state(tp, PCI_D0);
10165
10166         memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
10167
10168         if (tg3_test_nvram(tp) != 0) {
10169                 etest->flags |= ETH_TEST_FL_FAILED;
10170                 data[0] = 1;
10171         }
10172         if (tg3_test_link(tp) != 0) {
10173                 etest->flags |= ETH_TEST_FL_FAILED;
10174                 data[1] = 1;
10175         }
10176         if (etest->flags & ETH_TEST_FL_OFFLINE) {
10177                 int err, err2 = 0, irq_sync = 0;
10178
10179                 if (netif_running(dev)) {
10180                         tg3_phy_stop(tp);
10181                         tg3_netif_stop(tp);
10182                         irq_sync = 1;
10183                 }
10184
10185                 tg3_full_lock(tp, irq_sync);
10186
10187                 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
10188                 err = tg3_nvram_lock(tp);
10189                 tg3_halt_cpu(tp, RX_CPU_BASE);
10190                 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
10191                         tg3_halt_cpu(tp, TX_CPU_BASE);
10192                 if (!err)
10193                         tg3_nvram_unlock(tp);
10194
10195                 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
10196                         tg3_phy_reset(tp);
10197
10198                 if (tg3_test_registers(tp) != 0) {
10199                         etest->flags |= ETH_TEST_FL_FAILED;
10200                         data[2] = 1;
10201                 }
10202                 if (tg3_test_memory(tp) != 0) {
10203                         etest->flags |= ETH_TEST_FL_FAILED;
10204                         data[3] = 1;
10205                 }
10206                 if ((data[4] = tg3_test_loopback(tp)) != 0)
10207                         etest->flags |= ETH_TEST_FL_FAILED;
10208
10209                 tg3_full_unlock(tp);
10210
10211                 if (tg3_test_interrupt(tp) != 0) {
10212                         etest->flags |= ETH_TEST_FL_FAILED;
10213                         data[5] = 1;
10214                 }
10215
10216                 tg3_full_lock(tp, 0);
10217
10218                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10219                 if (netif_running(dev)) {
10220                         tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
10221                         err2 = tg3_restart_hw(tp, 1);
10222                         if (!err2)
10223                                 tg3_netif_start(tp);
10224                 }
10225
10226                 tg3_full_unlock(tp);
10227
10228                 if (irq_sync && !err2)
10229                         tg3_phy_start(tp);
10230         }
10231         if (tp->link_config.phy_is_low_power)
10232                 tg3_set_power_state(tp, PCI_D3hot);
10233
10234 }
10235
10236 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
10237 {
10238         struct mii_ioctl_data *data = if_mii(ifr);
10239         struct tg3 *tp = netdev_priv(dev);
10240         int err;
10241
10242         if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
10243                 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
10244                         return -EAGAIN;
10245                 return phy_mii_ioctl(tp->mdio_bus.phy_map[PHY_ADDR], data, cmd);
10246         }
10247
10248         switch(cmd) {
10249         case SIOCGMIIPHY:
10250                 data->phy_id = PHY_ADDR;
10251
10252                 /* fallthru */
10253         case SIOCGMIIREG: {
10254                 u32 mii_regval;
10255
10256                 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
10257                         break;                  /* We have no PHY */
10258
10259                 if (tp->link_config.phy_is_low_power)
10260                         return -EAGAIN;
10261
10262                 spin_lock_bh(&tp->lock);
10263                 err = tg3_readphy(tp, data->reg_num & 0x1f, &mii_regval);
10264                 spin_unlock_bh(&tp->lock);
10265
10266                 data->val_out = mii_regval;
10267
10268                 return err;
10269         }
10270
10271         case SIOCSMIIREG:
10272                 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
10273                         break;                  /* We have no PHY */
10274
10275                 if (!capable(CAP_NET_ADMIN))
10276                         return -EPERM;
10277
10278                 if (tp->link_config.phy_is_low_power)
10279                         return -EAGAIN;
10280
10281                 spin_lock_bh(&tp->lock);
10282                 err = tg3_writephy(tp, data->reg_num & 0x1f, data->val_in);
10283                 spin_unlock_bh(&tp->lock);
10284
10285                 return err;
10286
10287         default:
10288                 /* do nothing */
10289                 break;
10290         }
10291         return -EOPNOTSUPP;
10292 }
10293
10294 #if TG3_VLAN_TAG_USED
10295 static void tg3_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
10296 {
10297         struct tg3 *tp = netdev_priv(dev);
10298
10299         if (netif_running(dev))
10300                 tg3_netif_stop(tp);
10301
10302         tg3_full_lock(tp, 0);
10303
10304         tp->vlgrp = grp;
10305
10306         /* Update RX_MODE_KEEP_VLAN_TAG bit in RX_MODE register. */
10307         __tg3_set_rx_mode(dev);
10308
10309         if (netif_running(dev))
10310                 tg3_netif_start(tp);
10311
10312         tg3_full_unlock(tp);
10313 }
10314 #endif
10315
10316 static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
10317 {
10318         struct tg3 *tp = netdev_priv(dev);
10319
10320         memcpy(ec, &tp->coal, sizeof(*ec));
10321         return 0;
10322 }
10323
10324 static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
10325 {
10326         struct tg3 *tp = netdev_priv(dev);
10327         u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
10328         u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
10329
10330         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
10331                 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
10332                 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
10333                 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
10334                 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
10335         }
10336
10337         if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
10338             (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
10339             (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
10340             (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
10341             (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
10342             (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
10343             (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
10344             (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
10345             (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
10346             (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
10347                 return -EINVAL;
10348
10349         /* No rx interrupts will be generated if both are zero */
10350         if ((ec->rx_coalesce_usecs == 0) &&
10351             (ec->rx_max_coalesced_frames == 0))
10352                 return -EINVAL;
10353
10354         /* No tx interrupts will be generated if both are zero */
10355         if ((ec->tx_coalesce_usecs == 0) &&
10356             (ec->tx_max_coalesced_frames == 0))
10357                 return -EINVAL;
10358
10359         /* Only copy relevant parameters, ignore all others. */
10360         tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
10361         tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
10362         tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
10363         tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
10364         tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
10365         tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
10366         tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
10367         tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
10368         tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
10369
10370         if (netif_running(dev)) {
10371                 tg3_full_lock(tp, 0);
10372                 __tg3_set_coalesce(tp, &tp->coal);
10373                 tg3_full_unlock(tp);
10374         }
10375         return 0;
10376 }
10377
10378 static const struct ethtool_ops tg3_ethtool_ops = {
10379         .get_settings           = tg3_get_settings,
10380         .set_settings           = tg3_set_settings,
10381         .get_drvinfo            = tg3_get_drvinfo,
10382         .get_regs_len           = tg3_get_regs_len,
10383         .get_regs               = tg3_get_regs,
10384         .get_wol                = tg3_get_wol,
10385         .set_wol                = tg3_set_wol,
10386         .get_msglevel           = tg3_get_msglevel,
10387         .set_msglevel           = tg3_set_msglevel,
10388         .nway_reset             = tg3_nway_reset,
10389         .get_link               = ethtool_op_get_link,
10390         .get_eeprom_len         = tg3_get_eeprom_len,
10391         .get_eeprom             = tg3_get_eeprom,
10392         .set_eeprom             = tg3_set_eeprom,
10393         .get_ringparam          = tg3_get_ringparam,
10394         .set_ringparam          = tg3_set_ringparam,
10395         .get_pauseparam         = tg3_get_pauseparam,
10396         .set_pauseparam         = tg3_set_pauseparam,
10397         .get_rx_csum            = tg3_get_rx_csum,
10398         .set_rx_csum            = tg3_set_rx_csum,
10399         .set_tx_csum            = tg3_set_tx_csum,
10400         .set_sg                 = ethtool_op_set_sg,
10401         .set_tso                = tg3_set_tso,
10402         .self_test              = tg3_self_test,
10403         .get_strings            = tg3_get_strings,
10404         .phys_id                = tg3_phys_id,
10405         .get_ethtool_stats      = tg3_get_ethtool_stats,
10406         .get_coalesce           = tg3_get_coalesce,
10407         .set_coalesce           = tg3_set_coalesce,
10408         .get_sset_count         = tg3_get_sset_count,
10409 };
10410
10411 static void __devinit tg3_get_eeprom_size(struct tg3 *tp)
10412 {
10413         u32 cursize, val, magic;
10414
10415         tp->nvram_size = EEPROM_CHIP_SIZE;
10416
10417         if (tg3_nvram_read_swab(tp, 0, &magic) != 0)
10418                 return;
10419
10420         if ((magic != TG3_EEPROM_MAGIC) &&
10421             ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) &&
10422             ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW))
10423                 return;
10424
10425         /*
10426          * Size the chip by reading offsets at increasing powers of two.
10427          * When we encounter our validation signature, we know the addressing
10428          * has wrapped around, and thus have our chip size.
10429          */
10430         cursize = 0x10;
10431
10432         while (cursize < tp->nvram_size) {
10433                 if (tg3_nvram_read_swab(tp, cursize, &val) != 0)
10434                         return;
10435
10436                 if (val == magic)
10437                         break;
10438
10439                 cursize <<= 1;
10440         }
10441
10442         tp->nvram_size = cursize;
10443 }
10444
10445 static void __devinit tg3_get_nvram_size(struct tg3 *tp)
10446 {
10447         u32 val;
10448
10449         if (tg3_nvram_read_swab(tp, 0, &val) != 0)
10450                 return;
10451
10452         /* Selfboot format */
10453         if (val != TG3_EEPROM_MAGIC) {
10454                 tg3_get_eeprom_size(tp);
10455                 return;
10456         }
10457
10458         if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
10459                 if (val != 0) {
10460                         tp->nvram_size = (val >> 16) * 1024;
10461                         return;
10462                 }
10463         }
10464         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
10465 }
10466
10467 static void __devinit tg3_get_nvram_info(struct tg3 *tp)
10468 {
10469         u32 nvcfg1;
10470
10471         nvcfg1 = tr32(NVRAM_CFG1);
10472         if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
10473                 tp->tg3_flags2 |= TG3_FLG2_FLASH;
10474         }
10475         else {
10476                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
10477                 tw32(NVRAM_CFG1, nvcfg1);
10478         }
10479
10480         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) ||
10481             (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
10482                 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
10483                         case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
10484                                 tp->nvram_jedecnum = JEDEC_ATMEL;
10485                                 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
10486                                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10487                                 break;
10488                         case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
10489                                 tp->nvram_jedecnum = JEDEC_ATMEL;
10490                                 tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
10491                                 break;
10492                         case FLASH_VENDOR_ATMEL_EEPROM:
10493                                 tp->nvram_jedecnum = JEDEC_ATMEL;
10494                                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
10495                                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10496                                 break;
10497                         case FLASH_VENDOR_ST:
10498                                 tp->nvram_jedecnum = JEDEC_ST;
10499                                 tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
10500                                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10501                                 break;
10502                         case FLASH_VENDOR_SAIFUN:
10503                                 tp->nvram_jedecnum = JEDEC_SAIFUN;
10504                                 tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
10505                                 break;
10506                         case FLASH_VENDOR_SST_SMALL:
10507                         case FLASH_VENDOR_SST_LARGE:
10508                                 tp->nvram_jedecnum = JEDEC_SST;
10509                                 tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
10510                                 break;
10511                 }
10512         }
10513         else {
10514                 tp->nvram_jedecnum = JEDEC_ATMEL;
10515                 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
10516                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10517         }
10518 }
10519
10520 static void __devinit tg3_get_5752_nvram_info(struct tg3 *tp)
10521 {
10522         u32 nvcfg1;
10523
10524         nvcfg1 = tr32(NVRAM_CFG1);
10525
10526         /* NVRAM protection for TPM */
10527         if (nvcfg1 & (1 << 27))
10528                 tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM;
10529
10530         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
10531                 case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
10532                 case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
10533                         tp->nvram_jedecnum = JEDEC_ATMEL;
10534                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10535                         break;
10536                 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
10537                         tp->nvram_jedecnum = JEDEC_ATMEL;
10538                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10539                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
10540                         break;
10541                 case FLASH_5752VENDOR_ST_M45PE10:
10542                 case FLASH_5752VENDOR_ST_M45PE20:
10543                 case FLASH_5752VENDOR_ST_M45PE40:
10544                         tp->nvram_jedecnum = JEDEC_ST;
10545                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10546                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
10547                         break;
10548         }
10549
10550         if (tp->tg3_flags2 & TG3_FLG2_FLASH) {
10551                 switch (nvcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
10552                         case FLASH_5752PAGE_SIZE_256:
10553                                 tp->nvram_pagesize = 256;
10554                                 break;
10555                         case FLASH_5752PAGE_SIZE_512:
10556                                 tp->nvram_pagesize = 512;
10557                                 break;
10558                         case FLASH_5752PAGE_SIZE_1K:
10559                                 tp->nvram_pagesize = 1024;
10560                                 break;
10561                         case FLASH_5752PAGE_SIZE_2K:
10562                                 tp->nvram_pagesize = 2048;
10563                                 break;
10564                         case FLASH_5752PAGE_SIZE_4K:
10565                                 tp->nvram_pagesize = 4096;
10566                                 break;
10567                         case FLASH_5752PAGE_SIZE_264:
10568                                 tp->nvram_pagesize = 264;
10569                                 break;
10570                 }
10571         }
10572         else {
10573                 /* For eeprom, set pagesize to maximum eeprom size */
10574                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
10575
10576                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
10577                 tw32(NVRAM_CFG1, nvcfg1);
10578         }
10579 }
10580
10581 static void __devinit tg3_get_5755_nvram_info(struct tg3 *tp)
10582 {
10583         u32 nvcfg1, protect = 0;
10584
10585         nvcfg1 = tr32(NVRAM_CFG1);
10586
10587         /* NVRAM protection for TPM */
10588         if (nvcfg1 & (1 << 27)) {
10589                 tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM;
10590                 protect = 1;
10591         }
10592
10593         nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
10594         switch (nvcfg1) {
10595                 case FLASH_5755VENDOR_ATMEL_FLASH_1:
10596                 case FLASH_5755VENDOR_ATMEL_FLASH_2:
10597                 case FLASH_5755VENDOR_ATMEL_FLASH_3:
10598                 case FLASH_5755VENDOR_ATMEL_FLASH_5:
10599                         tp->nvram_jedecnum = JEDEC_ATMEL;
10600                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10601                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
10602                         tp->nvram_pagesize = 264;
10603                         if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 ||
10604                             nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5)
10605                                 tp->nvram_size = (protect ? 0x3e200 :
10606                                                   TG3_NVRAM_SIZE_512KB);
10607                         else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2)
10608                                 tp->nvram_size = (protect ? 0x1f200 :
10609                                                   TG3_NVRAM_SIZE_256KB);
10610                         else
10611                                 tp->nvram_size = (protect ? 0x1f200 :
10612                                                   TG3_NVRAM_SIZE_128KB);
10613                         break;
10614                 case FLASH_5752VENDOR_ST_M45PE10:
10615                 case FLASH_5752VENDOR_ST_M45PE20:
10616                 case FLASH_5752VENDOR_ST_M45PE40:
10617                         tp->nvram_jedecnum = JEDEC_ST;
10618                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10619                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
10620                         tp->nvram_pagesize = 256;
10621                         if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10)
10622                                 tp->nvram_size = (protect ?
10623                                                   TG3_NVRAM_SIZE_64KB :
10624                                                   TG3_NVRAM_SIZE_128KB);
10625                         else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20)
10626                                 tp->nvram_size = (protect ?
10627                                                   TG3_NVRAM_SIZE_64KB :
10628                                                   TG3_NVRAM_SIZE_256KB);
10629                         else
10630                                 tp->nvram_size = (protect ?
10631                                                   TG3_NVRAM_SIZE_128KB :
10632                                                   TG3_NVRAM_SIZE_512KB);
10633                         break;
10634         }
10635 }
10636
10637 static void __devinit tg3_get_5787_nvram_info(struct tg3 *tp)
10638 {
10639         u32 nvcfg1;
10640
10641         nvcfg1 = tr32(NVRAM_CFG1);
10642
10643         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
10644                 case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
10645                 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
10646                 case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
10647                 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
10648                         tp->nvram_jedecnum = JEDEC_ATMEL;
10649                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10650                         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
10651
10652                         nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
10653                         tw32(NVRAM_CFG1, nvcfg1);
10654                         break;
10655                 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
10656                 case FLASH_5755VENDOR_ATMEL_FLASH_1:
10657                 case FLASH_5755VENDOR_ATMEL_FLASH_2:
10658                 case FLASH_5755VENDOR_ATMEL_FLASH_3:
10659                         tp->nvram_jedecnum = JEDEC_ATMEL;
10660                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10661                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
10662                         tp->nvram_pagesize = 264;
10663                         break;
10664                 case FLASH_5752VENDOR_ST_M45PE10:
10665                 case FLASH_5752VENDOR_ST_M45PE20:
10666                 case FLASH_5752VENDOR_ST_M45PE40:
10667                         tp->nvram_jedecnum = JEDEC_ST;
10668                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10669                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
10670                         tp->nvram_pagesize = 256;
10671                         break;
10672         }
10673 }
10674
10675 static void __devinit tg3_get_5761_nvram_info(struct tg3 *tp)
10676 {
10677         u32 nvcfg1, protect = 0;
10678
10679         nvcfg1 = tr32(NVRAM_CFG1);
10680
10681         /* NVRAM protection for TPM */
10682         if (nvcfg1 & (1 << 27)) {
10683                 tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM;
10684                 protect = 1;
10685         }
10686
10687         nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
10688         switch (nvcfg1) {
10689                 case FLASH_5761VENDOR_ATMEL_ADB021D:
10690                 case FLASH_5761VENDOR_ATMEL_ADB041D:
10691                 case FLASH_5761VENDOR_ATMEL_ADB081D:
10692                 case FLASH_5761VENDOR_ATMEL_ADB161D:
10693                 case FLASH_5761VENDOR_ATMEL_MDB021D:
10694                 case FLASH_5761VENDOR_ATMEL_MDB041D:
10695                 case FLASH_5761VENDOR_ATMEL_MDB081D:
10696                 case FLASH_5761VENDOR_ATMEL_MDB161D:
10697                         tp->nvram_jedecnum = JEDEC_ATMEL;
10698                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10699                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
10700                         tp->tg3_flags3 |= TG3_FLG3_NO_NVRAM_ADDR_TRANS;
10701                         tp->nvram_pagesize = 256;
10702                         break;
10703                 case FLASH_5761VENDOR_ST_A_M45PE20:
10704                 case FLASH_5761VENDOR_ST_A_M45PE40:
10705                 case FLASH_5761VENDOR_ST_A_M45PE80:
10706                 case FLASH_5761VENDOR_ST_A_M45PE16:
10707                 case FLASH_5761VENDOR_ST_M_M45PE20:
10708                 case FLASH_5761VENDOR_ST_M_M45PE40:
10709                 case FLASH_5761VENDOR_ST_M_M45PE80:
10710                 case FLASH_5761VENDOR_ST_M_M45PE16:
10711                         tp->nvram_jedecnum = JEDEC_ST;
10712                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10713                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
10714                         tp->nvram_pagesize = 256;
10715                         break;
10716         }
10717
10718         if (protect) {
10719                 tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT);
10720         } else {
10721                 switch (nvcfg1) {
10722                         case FLASH_5761VENDOR_ATMEL_ADB161D:
10723                         case FLASH_5761VENDOR_ATMEL_MDB161D:
10724                         case FLASH_5761VENDOR_ST_A_M45PE16:
10725                         case FLASH_5761VENDOR_ST_M_M45PE16:
10726                                 tp->nvram_size = TG3_NVRAM_SIZE_2MB;
10727                                 break;
10728                         case FLASH_5761VENDOR_ATMEL_ADB081D:
10729                         case FLASH_5761VENDOR_ATMEL_MDB081D:
10730                         case FLASH_5761VENDOR_ST_A_M45PE80:
10731                         case FLASH_5761VENDOR_ST_M_M45PE80:
10732                                 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
10733                                 break;
10734                         case FLASH_5761VENDOR_ATMEL_ADB041D:
10735                         case FLASH_5761VENDOR_ATMEL_MDB041D:
10736                         case FLASH_5761VENDOR_ST_A_M45PE40:
10737                         case FLASH_5761VENDOR_ST_M_M45PE40:
10738                                 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
10739                                 break;
10740                         case FLASH_5761VENDOR_ATMEL_ADB021D:
10741                         case FLASH_5761VENDOR_ATMEL_MDB021D:
10742                         case FLASH_5761VENDOR_ST_A_M45PE20:
10743                         case FLASH_5761VENDOR_ST_M_M45PE20:
10744                                 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
10745                                 break;
10746                 }
10747         }
10748 }
10749
10750 static void __devinit tg3_get_5906_nvram_info(struct tg3 *tp)
10751 {
10752         tp->nvram_jedecnum = JEDEC_ATMEL;
10753         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10754         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
10755 }
10756
10757 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
10758 static void __devinit tg3_nvram_init(struct tg3 *tp)
10759 {
10760         tw32_f(GRC_EEPROM_ADDR,
10761              (EEPROM_ADDR_FSM_RESET |
10762               (EEPROM_DEFAULT_CLOCK_PERIOD <<
10763                EEPROM_ADDR_CLKPERD_SHIFT)));
10764
10765         msleep(1);
10766
10767         /* Enable seeprom accesses. */
10768         tw32_f(GRC_LOCAL_CTRL,
10769              tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
10770         udelay(100);
10771
10772         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
10773             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
10774                 tp->tg3_flags |= TG3_FLAG_NVRAM;
10775
10776                 if (tg3_nvram_lock(tp)) {
10777                         printk(KERN_WARNING PFX "%s: Cannot get nvarm lock, "
10778                                "tg3_nvram_init failed.\n", tp->dev->name);
10779                         return;
10780                 }
10781                 tg3_enable_nvram_access(tp);
10782
10783                 tp->nvram_size = 0;
10784
10785                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
10786                         tg3_get_5752_nvram_info(tp);
10787                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
10788                         tg3_get_5755_nvram_info(tp);
10789                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
10790                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
10791                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
10792                         tg3_get_5787_nvram_info(tp);
10793                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
10794                         tg3_get_5761_nvram_info(tp);
10795                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
10796                         tg3_get_5906_nvram_info(tp);
10797                 else
10798                         tg3_get_nvram_info(tp);
10799
10800                 if (tp->nvram_size == 0)
10801                         tg3_get_nvram_size(tp);
10802
10803                 tg3_disable_nvram_access(tp);
10804                 tg3_nvram_unlock(tp);
10805
10806         } else {
10807                 tp->tg3_flags &= ~(TG3_FLAG_NVRAM | TG3_FLAG_NVRAM_BUFFERED);
10808
10809                 tg3_get_eeprom_size(tp);
10810         }
10811 }
10812
10813 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
10814                                         u32 offset, u32 *val)
10815 {
10816         u32 tmp;
10817         int i;
10818
10819         if (offset > EEPROM_ADDR_ADDR_MASK ||
10820             (offset % 4) != 0)
10821                 return -EINVAL;
10822
10823         tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
10824                                         EEPROM_ADDR_DEVID_MASK |
10825                                         EEPROM_ADDR_READ);
10826         tw32(GRC_EEPROM_ADDR,
10827              tmp |
10828              (0 << EEPROM_ADDR_DEVID_SHIFT) |
10829              ((offset << EEPROM_ADDR_ADDR_SHIFT) &
10830               EEPROM_ADDR_ADDR_MASK) |
10831              EEPROM_ADDR_READ | EEPROM_ADDR_START);
10832
10833         for (i = 0; i < 1000; i++) {
10834                 tmp = tr32(GRC_EEPROM_ADDR);
10835
10836                 if (tmp & EEPROM_ADDR_COMPLETE)
10837                         break;
10838                 msleep(1);
10839         }
10840         if (!(tmp & EEPROM_ADDR_COMPLETE))
10841                 return -EBUSY;
10842
10843         *val = tr32(GRC_EEPROM_DATA);
10844         return 0;
10845 }
10846
10847 #define NVRAM_CMD_TIMEOUT 10000
10848
10849 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
10850 {
10851         int i;
10852
10853         tw32(NVRAM_CMD, nvram_cmd);
10854         for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
10855                 udelay(10);
10856                 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
10857                         udelay(10);
10858                         break;
10859                 }
10860         }
10861         if (i == NVRAM_CMD_TIMEOUT) {
10862                 return -EBUSY;
10863         }
10864         return 0;
10865 }
10866
10867 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
10868 {
10869         if ((tp->tg3_flags & TG3_FLAG_NVRAM) &&
10870             (tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) &&
10871             (tp->tg3_flags2 & TG3_FLG2_FLASH) &&
10872            !(tp->tg3_flags3 & TG3_FLG3_NO_NVRAM_ADDR_TRANS) &&
10873             (tp->nvram_jedecnum == JEDEC_ATMEL))
10874
10875                 addr = ((addr / tp->nvram_pagesize) <<
10876                         ATMEL_AT45DB0X1B_PAGE_POS) +
10877                        (addr % tp->nvram_pagesize);
10878
10879         return addr;
10880 }
10881
10882 static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
10883 {
10884         if ((tp->tg3_flags & TG3_FLAG_NVRAM) &&
10885             (tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) &&
10886             (tp->tg3_flags2 & TG3_FLG2_FLASH) &&
10887            !(tp->tg3_flags3 & TG3_FLG3_NO_NVRAM_ADDR_TRANS) &&
10888             (tp->nvram_jedecnum == JEDEC_ATMEL))
10889
10890                 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
10891                         tp->nvram_pagesize) +
10892                        (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
10893
10894         return addr;
10895 }
10896
10897 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
10898 {
10899         int ret;
10900
10901         if (!(tp->tg3_flags & TG3_FLAG_NVRAM))
10902                 return tg3_nvram_read_using_eeprom(tp, offset, val);
10903
10904         offset = tg3_nvram_phys_addr(tp, offset);
10905
10906         if (offset > NVRAM_ADDR_MSK)
10907                 return -EINVAL;
10908
10909         ret = tg3_nvram_lock(tp);
10910         if (ret)
10911                 return ret;
10912
10913         tg3_enable_nvram_access(tp);
10914
10915         tw32(NVRAM_ADDR, offset);
10916         ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
10917                 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
10918
10919         if (ret == 0)
10920                 *val = swab32(tr32(NVRAM_RDDATA));
10921
10922         tg3_disable_nvram_access(tp);
10923
10924         tg3_nvram_unlock(tp);
10925
10926         return ret;
10927 }
10928
10929 static int tg3_nvram_read_le(struct tg3 *tp, u32 offset, __le32 *val)
10930 {
10931         u32 v;
10932         int res = tg3_nvram_read(tp, offset, &v);
10933         if (!res)
10934                 *val = cpu_to_le32(v);
10935         return res;
10936 }
10937
10938 static int tg3_nvram_read_swab(struct tg3 *tp, u32 offset, u32 *val)
10939 {
10940         int err;
10941         u32 tmp;
10942
10943         err = tg3_nvram_read(tp, offset, &tmp);
10944         *val = swab32(tmp);
10945         return err;
10946 }
10947
10948 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
10949                                     u32 offset, u32 len, u8 *buf)
10950 {
10951         int i, j, rc = 0;
10952         u32 val;
10953
10954         for (i = 0; i < len; i += 4) {
10955                 u32 addr;
10956                 __le32 data;
10957
10958                 addr = offset + i;
10959
10960                 memcpy(&data, buf + i, 4);
10961
10962                 tw32(GRC_EEPROM_DATA, le32_to_cpu(data));
10963
10964                 val = tr32(GRC_EEPROM_ADDR);
10965                 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
10966
10967                 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
10968                         EEPROM_ADDR_READ);
10969                 tw32(GRC_EEPROM_ADDR, val |
10970                         (0 << EEPROM_ADDR_DEVID_SHIFT) |
10971                         (addr & EEPROM_ADDR_ADDR_MASK) |
10972                         EEPROM_ADDR_START |
10973                         EEPROM_ADDR_WRITE);
10974
10975                 for (j = 0; j < 1000; j++) {
10976                         val = tr32(GRC_EEPROM_ADDR);
10977
10978                         if (val & EEPROM_ADDR_COMPLETE)
10979                                 break;
10980                         msleep(1);
10981                 }
10982                 if (!(val & EEPROM_ADDR_COMPLETE)) {
10983                         rc = -EBUSY;
10984                         break;
10985                 }
10986         }
10987
10988         return rc;
10989 }
10990
10991 /* offset and length are dword aligned */
10992 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
10993                 u8 *buf)
10994 {
10995         int ret = 0;
10996         u32 pagesize = tp->nvram_pagesize;
10997         u32 pagemask = pagesize - 1;
10998         u32 nvram_cmd;
10999         u8 *tmp;
11000
11001         tmp = kmalloc(pagesize, GFP_KERNEL);
11002         if (tmp == NULL)
11003                 return -ENOMEM;
11004
11005         while (len) {
11006                 int j;
11007                 u32 phy_addr, page_off, size;
11008
11009                 phy_addr = offset & ~pagemask;
11010
11011                 for (j = 0; j < pagesize; j += 4) {
11012                         if ((ret = tg3_nvram_read_le(tp, phy_addr + j,
11013                                                 (__le32 *) (tmp + j))))
11014                                 break;
11015                 }
11016                 if (ret)
11017                         break;
11018
11019                 page_off = offset & pagemask;
11020                 size = pagesize;
11021                 if (len < size)
11022                         size = len;
11023
11024                 len -= size;
11025
11026                 memcpy(tmp + page_off, buf, size);
11027
11028                 offset = offset + (pagesize - page_off);
11029
11030                 tg3_enable_nvram_access(tp);
11031
11032                 /*
11033                  * Before we can erase the flash page, we need
11034                  * to issue a special "write enable" command.
11035                  */
11036                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
11037
11038                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
11039                         break;
11040
11041                 /* Erase the target page */
11042                 tw32(NVRAM_ADDR, phy_addr);
11043
11044                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
11045                         NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
11046
11047                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
11048                         break;
11049
11050                 /* Issue another write enable to start the write. */
11051                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
11052
11053                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
11054                         break;
11055
11056                 for (j = 0; j < pagesize; j += 4) {
11057                         __be32 data;
11058
11059                         data = *((__be32 *) (tmp + j));
11060                         /* swab32(le32_to_cpu(data)), actually */
11061                         tw32(NVRAM_WRDATA, be32_to_cpu(data));
11062
11063                         tw32(NVRAM_ADDR, phy_addr + j);
11064
11065                         nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
11066                                 NVRAM_CMD_WR;
11067
11068                         if (j == 0)
11069                                 nvram_cmd |= NVRAM_CMD_FIRST;
11070                         else if (j == (pagesize - 4))
11071                                 nvram_cmd |= NVRAM_CMD_LAST;
11072
11073                         if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
11074                                 break;
11075                 }
11076                 if (ret)
11077                         break;
11078         }
11079
11080         nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
11081         tg3_nvram_exec_cmd(tp, nvram_cmd);
11082
11083         kfree(tmp);
11084
11085         return ret;
11086 }
11087
11088 /* offset and length are dword aligned */
11089 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
11090                 u8 *buf)
11091 {
11092         int i, ret = 0;
11093
11094         for (i = 0; i < len; i += 4, offset += 4) {
11095                 u32 page_off, phy_addr, nvram_cmd;
11096                 __be32 data;
11097
11098                 memcpy(&data, buf + i, 4);
11099                 tw32(NVRAM_WRDATA, be32_to_cpu(data));
11100
11101                 page_off = offset % tp->nvram_pagesize;
11102
11103                 phy_addr = tg3_nvram_phys_addr(tp, offset);
11104
11105                 tw32(NVRAM_ADDR, phy_addr);
11106
11107                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
11108
11109                 if ((page_off == 0) || (i == 0))
11110                         nvram_cmd |= NVRAM_CMD_FIRST;
11111                 if (page_off == (tp->nvram_pagesize - 4))
11112                         nvram_cmd |= NVRAM_CMD_LAST;
11113
11114                 if (i == (len - 4))
11115                         nvram_cmd |= NVRAM_CMD_LAST;
11116
11117                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752) &&
11118                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5755) &&
11119                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5787) &&
11120                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784) &&
11121                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761) &&
11122                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785) &&
11123                     (tp->nvram_jedecnum == JEDEC_ST) &&
11124                     (nvram_cmd & NVRAM_CMD_FIRST)) {
11125
11126                         if ((ret = tg3_nvram_exec_cmd(tp,
11127                                 NVRAM_CMD_WREN | NVRAM_CMD_GO |
11128                                 NVRAM_CMD_DONE)))
11129
11130                                 break;
11131                 }
11132                 if (!(tp->tg3_flags2 & TG3_FLG2_FLASH)) {
11133                         /* We always do complete word writes to eeprom. */
11134                         nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
11135                 }
11136
11137                 if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
11138                         break;
11139         }
11140         return ret;
11141 }
11142
11143 /* offset and length are dword aligned */
11144 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
11145 {
11146         int ret;
11147
11148         if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
11149                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
11150                        ~GRC_LCLCTRL_GPIO_OUTPUT1);
11151                 udelay(40);
11152         }
11153
11154         if (!(tp->tg3_flags & TG3_FLAG_NVRAM)) {
11155                 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
11156         }
11157         else {
11158                 u32 grc_mode;
11159
11160                 ret = tg3_nvram_lock(tp);
11161                 if (ret)
11162                         return ret;
11163
11164                 tg3_enable_nvram_access(tp);
11165                 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
11166                     !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM))
11167                         tw32(NVRAM_WRITE1, 0x406);
11168
11169                 grc_mode = tr32(GRC_MODE);
11170                 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
11171
11172                 if ((tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) ||
11173                         !(tp->tg3_flags2 & TG3_FLG2_FLASH)) {
11174
11175                         ret = tg3_nvram_write_block_buffered(tp, offset, len,
11176                                 buf);
11177                 }
11178                 else {
11179                         ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
11180                                 buf);
11181                 }
11182
11183                 grc_mode = tr32(GRC_MODE);
11184                 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
11185
11186                 tg3_disable_nvram_access(tp);
11187                 tg3_nvram_unlock(tp);
11188         }
11189
11190         if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
11191                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
11192                 udelay(40);
11193         }
11194
11195         return ret;
11196 }
11197
11198 struct subsys_tbl_ent {
11199         u16 subsys_vendor, subsys_devid;
11200         u32 phy_id;
11201 };
11202
11203 static struct subsys_tbl_ent subsys_id_to_phy_id[] = {
11204         /* Broadcom boards. */
11205         { PCI_VENDOR_ID_BROADCOM, 0x1644, PHY_ID_BCM5401 }, /* BCM95700A6 */
11206         { PCI_VENDOR_ID_BROADCOM, 0x0001, PHY_ID_BCM5701 }, /* BCM95701A5 */
11207         { PCI_VENDOR_ID_BROADCOM, 0x0002, PHY_ID_BCM8002 }, /* BCM95700T6 */
11208         { PCI_VENDOR_ID_BROADCOM, 0x0003, 0 },              /* BCM95700A9 */
11209         { PCI_VENDOR_ID_BROADCOM, 0x0005, PHY_ID_BCM5701 }, /* BCM95701T1 */
11210         { PCI_VENDOR_ID_BROADCOM, 0x0006, PHY_ID_BCM5701 }, /* BCM95701T8 */
11211         { PCI_VENDOR_ID_BROADCOM, 0x0007, 0 },              /* BCM95701A7 */
11212         { PCI_VENDOR_ID_BROADCOM, 0x0008, PHY_ID_BCM5701 }, /* BCM95701A10 */
11213         { PCI_VENDOR_ID_BROADCOM, 0x8008, PHY_ID_BCM5701 }, /* BCM95701A12 */
11214         { PCI_VENDOR_ID_BROADCOM, 0x0009, PHY_ID_BCM5703 }, /* BCM95703Ax1 */
11215         { PCI_VENDOR_ID_BROADCOM, 0x8009, PHY_ID_BCM5703 }, /* BCM95703Ax2 */
11216
11217         /* 3com boards. */
11218         { PCI_VENDOR_ID_3COM, 0x1000, PHY_ID_BCM5401 }, /* 3C996T */
11219         { PCI_VENDOR_ID_3COM, 0x1006, PHY_ID_BCM5701 }, /* 3C996BT */
11220         { PCI_VENDOR_ID_3COM, 0x1004, 0 },              /* 3C996SX */
11221         { PCI_VENDOR_ID_3COM, 0x1007, PHY_ID_BCM5701 }, /* 3C1000T */
11222         { PCI_VENDOR_ID_3COM, 0x1008, PHY_ID_BCM5701 }, /* 3C940BR01 */
11223
11224         /* DELL boards. */
11225         { PCI_VENDOR_ID_DELL, 0x00d1, PHY_ID_BCM5401 }, /* VIPER */
11226         { PCI_VENDOR_ID_DELL, 0x0106, PHY_ID_BCM5401 }, /* JAGUAR */
11227         { PCI_VENDOR_ID_DELL, 0x0109, PHY_ID_BCM5411 }, /* MERLOT */
11228         { PCI_VENDOR_ID_DELL, 0x010a, PHY_ID_BCM5411 }, /* SLIM_MERLOT */
11229
11230         /* Compaq boards. */
11231         { PCI_VENDOR_ID_COMPAQ, 0x007c, PHY_ID_BCM5701 }, /* BANSHEE */
11232         { PCI_VENDOR_ID_COMPAQ, 0x009a, PHY_ID_BCM5701 }, /* BANSHEE_2 */
11233         { PCI_VENDOR_ID_COMPAQ, 0x007d, 0 },              /* CHANGELING */
11234         { PCI_VENDOR_ID_COMPAQ, 0x0085, PHY_ID_BCM5701 }, /* NC7780 */
11235         { PCI_VENDOR_ID_COMPAQ, 0x0099, PHY_ID_BCM5701 }, /* NC7780_2 */
11236
11237         /* IBM boards. */
11238         { PCI_VENDOR_ID_IBM, 0x0281, 0 } /* IBM??? */
11239 };
11240
11241 static inline struct subsys_tbl_ent *lookup_by_subsys(struct tg3 *tp)
11242 {
11243         int i;
11244
11245         for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
11246                 if ((subsys_id_to_phy_id[i].subsys_vendor ==
11247                      tp->pdev->subsystem_vendor) &&
11248                     (subsys_id_to_phy_id[i].subsys_devid ==
11249                      tp->pdev->subsystem_device))
11250                         return &subsys_id_to_phy_id[i];
11251         }
11252         return NULL;
11253 }
11254
11255 static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
11256 {
11257         u32 val;
11258         u16 pmcsr;
11259
11260         /* On some early chips the SRAM cannot be accessed in D3hot state,
11261          * so need make sure we're in D0.
11262          */
11263         pci_read_config_word(tp->pdev, tp->pm_cap + PCI_PM_CTRL, &pmcsr);
11264         pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
11265         pci_write_config_word(tp->pdev, tp->pm_cap + PCI_PM_CTRL, pmcsr);
11266         msleep(1);
11267
11268         /* Make sure register accesses (indirect or otherwise)
11269          * will function correctly.
11270          */
11271         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
11272                                tp->misc_host_ctrl);
11273
11274         /* The memory arbiter has to be enabled in order for SRAM accesses
11275          * to succeed.  Normally on powerup the tg3 chip firmware will make
11276          * sure it is enabled, but other entities such as system netboot
11277          * code might disable it.
11278          */
11279         val = tr32(MEMARB_MODE);
11280         tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
11281
11282         tp->phy_id = PHY_ID_INVALID;
11283         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
11284
11285         /* Assume an onboard device and WOL capable by default.  */
11286         tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT | TG3_FLAG_WOL_CAP;
11287
11288         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
11289                 if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
11290                         tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT;
11291                         tp->tg3_flags2 |= TG3_FLG2_IS_NIC;
11292                 }
11293                 val = tr32(VCPU_CFGSHDW);
11294                 if (val & VCPU_CFGSHDW_ASPM_DBNC)
11295                         tp->tg3_flags |= TG3_FLAG_ASPM_WORKAROUND;
11296                 if ((val & VCPU_CFGSHDW_WOL_ENABLE) &&
11297                     (val & VCPU_CFGSHDW_WOL_MAGPKT) &&
11298                     device_may_wakeup(&tp->pdev->dev))
11299                         tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
11300                 return;
11301         }
11302
11303         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
11304         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
11305                 u32 nic_cfg, led_cfg;
11306                 u32 nic_phy_id, ver, cfg2 = 0, cfg4 = 0, eeprom_phy_id;
11307                 int eeprom_phy_serdes = 0;
11308
11309                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
11310                 tp->nic_sram_data_cfg = nic_cfg;
11311
11312                 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
11313                 ver >>= NIC_SRAM_DATA_VER_SHIFT;
11314                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700) &&
11315                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) &&
11316                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5703) &&
11317                     (ver > 0) && (ver < 0x100))
11318                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
11319
11320                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
11321                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_4, &cfg4);
11322
11323                 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
11324                     NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
11325                         eeprom_phy_serdes = 1;
11326
11327                 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
11328                 if (nic_phy_id != 0) {
11329                         u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
11330                         u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
11331
11332                         eeprom_phy_id  = (id1 >> 16) << 10;
11333                         eeprom_phy_id |= (id2 & 0xfc00) << 16;
11334                         eeprom_phy_id |= (id2 & 0x03ff) <<  0;
11335                 } else
11336                         eeprom_phy_id = 0;
11337
11338                 tp->phy_id = eeprom_phy_id;
11339                 if (eeprom_phy_serdes) {
11340                         if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
11341                                 tp->tg3_flags2 |= TG3_FLG2_MII_SERDES;
11342                         else
11343                                 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
11344                 }
11345
11346                 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
11347                         led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
11348                                     SHASTA_EXT_LED_MODE_MASK);
11349                 else
11350                         led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
11351
11352                 switch (led_cfg) {
11353                 default:
11354                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
11355                         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
11356                         break;
11357
11358                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
11359                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
11360                         break;
11361
11362                 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
11363                         tp->led_ctrl = LED_CTRL_MODE_MAC;
11364
11365                         /* Default to PHY_1_MODE if 0 (MAC_MODE) is
11366                          * read on some older 5700/5701 bootcode.
11367                          */
11368                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
11369                             ASIC_REV_5700 ||
11370                             GET_ASIC_REV(tp->pci_chip_rev_id) ==
11371                             ASIC_REV_5701)
11372                                 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
11373
11374                         break;
11375
11376                 case SHASTA_EXT_LED_SHARED:
11377                         tp->led_ctrl = LED_CTRL_MODE_SHARED;
11378                         if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
11379                             tp->pci_chip_rev_id != CHIPREV_ID_5750_A1)
11380                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
11381                                                  LED_CTRL_MODE_PHY_2);
11382                         break;
11383
11384                 case SHASTA_EXT_LED_MAC:
11385                         tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
11386                         break;
11387
11388                 case SHASTA_EXT_LED_COMBO:
11389                         tp->led_ctrl = LED_CTRL_MODE_COMBO;
11390                         if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0)
11391                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
11392                                                  LED_CTRL_MODE_PHY_2);
11393                         break;
11394
11395                 }
11396
11397                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
11398                      GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) &&
11399                     tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
11400                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
11401
11402                 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX)
11403                         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
11404
11405                 if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
11406                         tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT;
11407                         if ((tp->pdev->subsystem_vendor ==
11408                              PCI_VENDOR_ID_ARIMA) &&
11409                             (tp->pdev->subsystem_device == 0x205a ||
11410                              tp->pdev->subsystem_device == 0x2063))
11411                                 tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT;
11412                 } else {
11413                         tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT;
11414                         tp->tg3_flags2 |= TG3_FLG2_IS_NIC;
11415                 }
11416
11417                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
11418                         tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
11419                         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
11420                                 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
11421                 }
11422                 if (nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE)
11423                         tp->tg3_flags3 |= TG3_FLG3_ENABLE_APE;
11424                 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES &&
11425                     !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL))
11426                         tp->tg3_flags &= ~TG3_FLAG_WOL_CAP;
11427
11428                 if ((tp->tg3_flags & TG3_FLAG_WOL_CAP) &&
11429                     (nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE) &&
11430                     device_may_wakeup(&tp->pdev->dev))
11431                         tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
11432
11433                 if (cfg2 & (1 << 17))
11434                         tp->tg3_flags2 |= TG3_FLG2_CAPACITIVE_COUPLING;
11435
11436                 /* serdes signal pre-emphasis in register 0x590 set by */
11437                 /* bootcode if bit 18 is set */
11438                 if (cfg2 & (1 << 18))
11439                         tp->tg3_flags2 |= TG3_FLG2_SERDES_PREEMPHASIS;
11440
11441                 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
11442                         u32 cfg3;
11443
11444                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
11445                         if (cfg3 & NIC_SRAM_ASPM_DEBOUNCE)
11446                                 tp->tg3_flags |= TG3_FLAG_ASPM_WORKAROUND;
11447                 }
11448
11449                 if (cfg4 & NIC_SRAM_RGMII_STD_IBND_DISABLE)
11450                         tp->tg3_flags3 |= TG3_FLG3_RGMII_STD_IBND_DISABLE;
11451                 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_RX_EN)
11452                         tp->tg3_flags3 |= TG3_FLG3_RGMII_EXT_IBND_RX_EN;
11453                 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN)
11454                         tp->tg3_flags3 |= TG3_FLG3_RGMII_EXT_IBND_TX_EN;
11455         }
11456 }
11457
11458 static int __devinit tg3_issue_otp_command(struct tg3 *tp, u32 cmd)
11459 {
11460         int i;
11461         u32 val;
11462
11463         tw32(OTP_CTRL, cmd | OTP_CTRL_OTP_CMD_START);
11464         tw32(OTP_CTRL, cmd);
11465
11466         /* Wait for up to 1 ms for command to execute. */
11467         for (i = 0; i < 100; i++) {
11468                 val = tr32(OTP_STATUS);
11469                 if (val & OTP_STATUS_CMD_DONE)
11470                         break;
11471                 udelay(10);
11472         }
11473
11474         return (val & OTP_STATUS_CMD_DONE) ? 0 : -EBUSY;
11475 }
11476
11477 /* Read the gphy configuration from the OTP region of the chip.  The gphy
11478  * configuration is a 32-bit value that straddles the alignment boundary.
11479  * We do two 32-bit reads and then shift and merge the results.
11480  */
11481 static u32 __devinit tg3_read_otp_phycfg(struct tg3 *tp)
11482 {
11483         u32 bhalf_otp, thalf_otp;
11484
11485         tw32(OTP_MODE, OTP_MODE_OTP_THRU_GRC);
11486
11487         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_INIT))
11488                 return 0;
11489
11490         tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC1);
11491
11492         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
11493                 return 0;
11494
11495         thalf_otp = tr32(OTP_READ_DATA);
11496
11497         tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC2);
11498
11499         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
11500                 return 0;
11501
11502         bhalf_otp = tr32(OTP_READ_DATA);
11503
11504         return ((thalf_otp & 0x0000ffff) << 16) | (bhalf_otp >> 16);
11505 }
11506
11507 static int __devinit tg3_phy_probe(struct tg3 *tp)
11508 {
11509         u32 hw_phy_id_1, hw_phy_id_2;
11510         u32 hw_phy_id, hw_phy_id_masked;
11511         int err;
11512
11513         if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)
11514                 return tg3_phy_init(tp);
11515
11516         /* Reading the PHY ID register can conflict with ASF
11517          * firwmare access to the PHY hardware.
11518          */
11519         err = 0;
11520         if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) ||
11521             (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)) {
11522                 hw_phy_id = hw_phy_id_masked = PHY_ID_INVALID;
11523         } else {
11524                 /* Now read the physical PHY_ID from the chip and verify
11525                  * that it is sane.  If it doesn't look good, we fall back
11526                  * to either the hard-coded table based PHY_ID and failing
11527                  * that the value found in the eeprom area.
11528                  */
11529                 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
11530                 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
11531
11532                 hw_phy_id  = (hw_phy_id_1 & 0xffff) << 10;
11533                 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
11534                 hw_phy_id |= (hw_phy_id_2 & 0x03ff) <<  0;
11535
11536                 hw_phy_id_masked = hw_phy_id & PHY_ID_MASK;
11537         }
11538
11539         if (!err && KNOWN_PHY_ID(hw_phy_id_masked)) {
11540                 tp->phy_id = hw_phy_id;
11541                 if (hw_phy_id_masked == PHY_ID_BCM8002)
11542                         tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
11543                 else
11544                         tp->tg3_flags2 &= ~TG3_FLG2_PHY_SERDES;
11545         } else {
11546                 if (tp->phy_id != PHY_ID_INVALID) {
11547                         /* Do nothing, phy ID already set up in
11548                          * tg3_get_eeprom_hw_cfg().
11549                          */
11550                 } else {
11551                         struct subsys_tbl_ent *p;
11552
11553                         /* No eeprom signature?  Try the hardcoded
11554                          * subsys device table.
11555                          */
11556                         p = lookup_by_subsys(tp);
11557                         if (!p)
11558                                 return -ENODEV;
11559
11560                         tp->phy_id = p->phy_id;
11561                         if (!tp->phy_id ||
11562                             tp->phy_id == PHY_ID_BCM8002)
11563                                 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
11564                 }
11565         }
11566
11567         if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) &&
11568             !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) &&
11569             !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
11570                 u32 bmsr, adv_reg, tg3_ctrl, mask;
11571
11572                 tg3_readphy(tp, MII_BMSR, &bmsr);
11573                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
11574                     (bmsr & BMSR_LSTATUS))
11575                         goto skip_phy_reset;
11576
11577                 err = tg3_phy_reset(tp);
11578                 if (err)
11579                         return err;
11580
11581                 adv_reg = (ADVERTISE_10HALF | ADVERTISE_10FULL |
11582                            ADVERTISE_100HALF | ADVERTISE_100FULL |
11583                            ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
11584                 tg3_ctrl = 0;
11585                 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
11586                         tg3_ctrl = (MII_TG3_CTRL_ADV_1000_HALF |
11587                                     MII_TG3_CTRL_ADV_1000_FULL);
11588                         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
11589                             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
11590                                 tg3_ctrl |= (MII_TG3_CTRL_AS_MASTER |
11591                                              MII_TG3_CTRL_ENABLE_AS_MASTER);
11592                 }
11593
11594                 mask = (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
11595                         ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
11596                         ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full);
11597                 if (!tg3_copper_is_advertising_all(tp, mask)) {
11598                         tg3_writephy(tp, MII_ADVERTISE, adv_reg);
11599
11600                         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
11601                                 tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
11602
11603                         tg3_writephy(tp, MII_BMCR,
11604                                      BMCR_ANENABLE | BMCR_ANRESTART);
11605                 }
11606                 tg3_phy_set_wirespeed(tp);
11607
11608                 tg3_writephy(tp, MII_ADVERTISE, adv_reg);
11609                 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
11610                         tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
11611         }
11612
11613 skip_phy_reset:
11614         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
11615                 err = tg3_init_5401phy_dsp(tp);
11616                 if (err)
11617                         return err;
11618         }
11619
11620         if (!err && ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401)) {
11621                 err = tg3_init_5401phy_dsp(tp);
11622         }
11623
11624         if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
11625                 tp->link_config.advertising =
11626                         (ADVERTISED_1000baseT_Half |
11627                          ADVERTISED_1000baseT_Full |
11628                          ADVERTISED_Autoneg |
11629                          ADVERTISED_FIBRE);
11630         if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
11631                 tp->link_config.advertising &=
11632                         ~(ADVERTISED_1000baseT_Half |
11633                           ADVERTISED_1000baseT_Full);
11634
11635         return err;
11636 }
11637
11638 static void __devinit tg3_read_partno(struct tg3 *tp)
11639 {
11640         unsigned char vpd_data[256];
11641         unsigned int i;
11642         u32 magic;
11643
11644         if (tg3_nvram_read_swab(tp, 0x0, &magic))
11645                 goto out_not_found;
11646
11647         if (magic == TG3_EEPROM_MAGIC) {
11648                 for (i = 0; i < 256; i += 4) {
11649                         u32 tmp;
11650
11651                         if (tg3_nvram_read(tp, 0x100 + i, &tmp))
11652                                 goto out_not_found;
11653
11654                         vpd_data[i + 0] = ((tmp >>  0) & 0xff);
11655                         vpd_data[i + 1] = ((tmp >>  8) & 0xff);
11656                         vpd_data[i + 2] = ((tmp >> 16) & 0xff);
11657                         vpd_data[i + 3] = ((tmp >> 24) & 0xff);
11658                 }
11659         } else {
11660                 int vpd_cap;
11661
11662                 vpd_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_VPD);
11663                 for (i = 0; i < 256; i += 4) {
11664                         u32 tmp, j = 0;
11665                         __le32 v;
11666                         u16 tmp16;
11667
11668                         pci_write_config_word(tp->pdev, vpd_cap + PCI_VPD_ADDR,
11669                                               i);
11670                         while (j++ < 100) {
11671                                 pci_read_config_word(tp->pdev, vpd_cap +
11672                                                      PCI_VPD_ADDR, &tmp16);
11673                                 if (tmp16 & 0x8000)
11674                                         break;
11675                                 msleep(1);
11676                         }
11677                         if (!(tmp16 & 0x8000))
11678                                 goto out_not_found;
11679
11680                         pci_read_config_dword(tp->pdev, vpd_cap + PCI_VPD_DATA,
11681                                               &tmp);
11682                         v = cpu_to_le32(tmp);
11683                         memcpy(&vpd_data[i], &v, 4);
11684                 }
11685         }
11686
11687         /* Now parse and find the part number. */
11688         for (i = 0; i < 254; ) {
11689                 unsigned char val = vpd_data[i];
11690                 unsigned int block_end;
11691
11692                 if (val == 0x82 || val == 0x91) {
11693                         i = (i + 3 +
11694                              (vpd_data[i + 1] +
11695                               (vpd_data[i + 2] << 8)));
11696                         continue;
11697                 }
11698
11699                 if (val != 0x90)
11700                         goto out_not_found;
11701
11702                 block_end = (i + 3 +
11703                              (vpd_data[i + 1] +
11704                               (vpd_data[i + 2] << 8)));
11705                 i += 3;
11706
11707                 if (block_end > 256)
11708                         goto out_not_found;
11709
11710                 while (i < (block_end - 2)) {
11711                         if (vpd_data[i + 0] == 'P' &&
11712                             vpd_data[i + 1] == 'N') {
11713                                 int partno_len = vpd_data[i + 2];
11714
11715                                 i += 3;
11716                                 if (partno_len > 24 || (partno_len + i) > 256)
11717                                         goto out_not_found;
11718
11719                                 memcpy(tp->board_part_number,
11720                                        &vpd_data[i], partno_len);
11721
11722                                 /* Success. */
11723                                 return;
11724                         }
11725                         i += 3 + vpd_data[i + 2];
11726                 }
11727
11728                 /* Part number not found. */
11729                 goto out_not_found;
11730         }
11731
11732 out_not_found:
11733         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
11734                 strcpy(tp->board_part_number, "BCM95906");
11735         else
11736                 strcpy(tp->board_part_number, "none");
11737 }
11738
11739 static int __devinit tg3_fw_img_is_valid(struct tg3 *tp, u32 offset)
11740 {
11741         u32 val;
11742
11743         if (tg3_nvram_read_swab(tp, offset, &val) ||
11744             (val & 0xfc000000) != 0x0c000000 ||
11745             tg3_nvram_read_swab(tp, offset + 4, &val) ||
11746             val != 0)
11747                 return 0;
11748
11749         return 1;
11750 }
11751
11752 static void __devinit tg3_read_fw_ver(struct tg3 *tp)
11753 {
11754         u32 val, offset, start;
11755         u32 ver_offset;
11756         int i, bcnt;
11757
11758         if (tg3_nvram_read_swab(tp, 0, &val))
11759                 return;
11760
11761         if (val != TG3_EEPROM_MAGIC)
11762                 return;
11763
11764         if (tg3_nvram_read_swab(tp, 0xc, &offset) ||
11765             tg3_nvram_read_swab(tp, 0x4, &start))
11766                 return;
11767
11768         offset = tg3_nvram_logical_addr(tp, offset);
11769
11770         if (!tg3_fw_img_is_valid(tp, offset) ||
11771             tg3_nvram_read_swab(tp, offset + 8, &ver_offset))
11772                 return;
11773
11774         offset = offset + ver_offset - start;
11775         for (i = 0; i < 16; i += 4) {
11776                 __le32 v;
11777                 if (tg3_nvram_read_le(tp, offset + i, &v))
11778                         return;
11779
11780                 memcpy(tp->fw_ver + i, &v, 4);
11781         }
11782
11783         if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF) ||
11784              (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
11785                 return;
11786
11787         for (offset = TG3_NVM_DIR_START;
11788              offset < TG3_NVM_DIR_END;
11789              offset += TG3_NVM_DIRENT_SIZE) {
11790                 if (tg3_nvram_read_swab(tp, offset, &val))
11791                         return;
11792
11793                 if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI)
11794                         break;
11795         }
11796
11797         if (offset == TG3_NVM_DIR_END)
11798                 return;
11799
11800         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
11801                 start = 0x08000000;
11802         else if (tg3_nvram_read_swab(tp, offset - 4, &start))
11803                 return;
11804
11805         if (tg3_nvram_read_swab(tp, offset + 4, &offset) ||
11806             !tg3_fw_img_is_valid(tp, offset) ||
11807             tg3_nvram_read_swab(tp, offset + 8, &val))
11808                 return;
11809
11810         offset += val - start;
11811
11812         bcnt = strlen(tp->fw_ver);
11813
11814         tp->fw_ver[bcnt++] = ',';
11815         tp->fw_ver[bcnt++] = ' ';
11816
11817         for (i = 0; i < 4; i++) {
11818                 __le32 v;
11819                 if (tg3_nvram_read_le(tp, offset, &v))
11820                         return;
11821
11822                 offset += sizeof(v);
11823
11824                 if (bcnt > TG3_VER_SIZE - sizeof(v)) {
11825                         memcpy(&tp->fw_ver[bcnt], &v, TG3_VER_SIZE - bcnt);
11826                         break;
11827                 }
11828
11829                 memcpy(&tp->fw_ver[bcnt], &v, sizeof(v));
11830                 bcnt += sizeof(v);
11831         }
11832
11833         tp->fw_ver[TG3_VER_SIZE - 1] = 0;
11834 }
11835
11836 static struct pci_dev * __devinit tg3_find_peer(struct tg3 *);
11837
11838 static int __devinit tg3_get_invariants(struct tg3 *tp)
11839 {
11840         static struct pci_device_id write_reorder_chipsets[] = {
11841                 { PCI_DEVICE(PCI_VENDOR_ID_AMD,
11842                              PCI_DEVICE_ID_AMD_FE_GATE_700C) },
11843                 { PCI_DEVICE(PCI_VENDOR_ID_AMD,
11844                              PCI_DEVICE_ID_AMD_8131_BRIDGE) },
11845                 { PCI_DEVICE(PCI_VENDOR_ID_VIA,
11846                              PCI_DEVICE_ID_VIA_8385_0) },
11847                 { },
11848         };
11849         u32 misc_ctrl_reg;
11850         u32 cacheline_sz_reg;
11851         u32 pci_state_reg, grc_misc_cfg;
11852         u32 val;
11853         u16 pci_cmd;
11854         int err, pcie_cap;
11855
11856         /* Force memory write invalidate off.  If we leave it on,
11857          * then on 5700_BX chips we have to enable a workaround.
11858          * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
11859          * to match the cacheline size.  The Broadcom driver have this
11860          * workaround but turns MWI off all the times so never uses
11861          * it.  This seems to suggest that the workaround is insufficient.
11862          */
11863         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
11864         pci_cmd &= ~PCI_COMMAND_INVALIDATE;
11865         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
11866
11867         /* It is absolutely critical that TG3PCI_MISC_HOST_CTRL
11868          * has the register indirect write enable bit set before
11869          * we try to access any of the MMIO registers.  It is also
11870          * critical that the PCI-X hw workaround situation is decided
11871          * before that as well.
11872          */
11873         pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
11874                               &misc_ctrl_reg);
11875
11876         tp->pci_chip_rev_id = (misc_ctrl_reg >>
11877                                MISC_HOST_CTRL_CHIPREV_SHIFT);
11878         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_USE_PROD_ID_REG) {
11879                 u32 prod_id_asic_rev;
11880
11881                 pci_read_config_dword(tp->pdev, TG3PCI_PRODID_ASICREV,
11882                                       &prod_id_asic_rev);
11883                 tp->pci_chip_rev_id = prod_id_asic_rev & PROD_ID_ASIC_REV_MASK;
11884         }
11885
11886         /* Wrong chip ID in 5752 A0. This code can be removed later
11887          * as A0 is not in production.
11888          */
11889         if (tp->pci_chip_rev_id == CHIPREV_ID_5752_A0_HW)
11890                 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
11891
11892         /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
11893          * we need to disable memory and use config. cycles
11894          * only to access all registers. The 5702/03 chips
11895          * can mistakenly decode the special cycles from the
11896          * ICH chipsets as memory write cycles, causing corruption
11897          * of register and memory space. Only certain ICH bridges
11898          * will drive special cycles with non-zero data during the
11899          * address phase which can fall within the 5703's address
11900          * range. This is not an ICH bug as the PCI spec allows
11901          * non-zero address during special cycles. However, only
11902          * these ICH bridges are known to drive non-zero addresses
11903          * during special cycles.
11904          *
11905          * Since special cycles do not cross PCI bridges, we only
11906          * enable this workaround if the 5703 is on the secondary
11907          * bus of these ICH bridges.
11908          */
11909         if ((tp->pci_chip_rev_id == CHIPREV_ID_5703_A1) ||
11910             (tp->pci_chip_rev_id == CHIPREV_ID_5703_A2)) {
11911                 static struct tg3_dev_id {
11912                         u32     vendor;
11913                         u32     device;
11914                         u32     rev;
11915                 } ich_chipsets[] = {
11916                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
11917                           PCI_ANY_ID },
11918                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
11919                           PCI_ANY_ID },
11920                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
11921                           0xa },
11922                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
11923                           PCI_ANY_ID },
11924                         { },
11925                 };
11926                 struct tg3_dev_id *pci_id = &ich_chipsets[0];
11927                 struct pci_dev *bridge = NULL;
11928
11929                 while (pci_id->vendor != 0) {
11930                         bridge = pci_get_device(pci_id->vendor, pci_id->device,
11931                                                 bridge);
11932                         if (!bridge) {
11933                                 pci_id++;
11934                                 continue;
11935                         }
11936                         if (pci_id->rev != PCI_ANY_ID) {
11937                                 if (bridge->revision > pci_id->rev)
11938                                         continue;
11939                         }
11940                         if (bridge->subordinate &&
11941                             (bridge->subordinate->number ==
11942                              tp->pdev->bus->number)) {
11943
11944                                 tp->tg3_flags2 |= TG3_FLG2_ICH_WORKAROUND;
11945                                 pci_dev_put(bridge);
11946                                 break;
11947                         }
11948                 }
11949         }
11950
11951         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
11952                 static struct tg3_dev_id {
11953                         u32     vendor;
11954                         u32     device;
11955                 } bridge_chipsets[] = {
11956                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0 },
11957                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1 },
11958                         { },
11959                 };
11960                 struct tg3_dev_id *pci_id = &bridge_chipsets[0];
11961                 struct pci_dev *bridge = NULL;
11962
11963                 while (pci_id->vendor != 0) {
11964                         bridge = pci_get_device(pci_id->vendor,
11965                                                 pci_id->device,
11966                                                 bridge);
11967                         if (!bridge) {
11968                                 pci_id++;
11969                                 continue;
11970                         }
11971                         if (bridge->subordinate &&
11972                             (bridge->subordinate->number <=
11973                              tp->pdev->bus->number) &&
11974                             (bridge->subordinate->subordinate >=
11975                              tp->pdev->bus->number)) {
11976                                 tp->tg3_flags3 |= TG3_FLG3_5701_DMA_BUG;
11977                                 pci_dev_put(bridge);
11978                                 break;
11979                         }
11980                 }
11981         }
11982
11983         /* The EPB bridge inside 5714, 5715, and 5780 cannot support
11984          * DMA addresses > 40-bit. This bridge may have other additional
11985          * 57xx devices behind it in some 4-port NIC designs for example.
11986          * Any tg3 device found behind the bridge will also need the 40-bit
11987          * DMA workaround.
11988          */
11989         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 ||
11990             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
11991                 tp->tg3_flags2 |= TG3_FLG2_5780_CLASS;
11992                 tp->tg3_flags |= TG3_FLAG_40BIT_DMA_BUG;
11993                 tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI);
11994         }
11995         else {
11996                 struct pci_dev *bridge = NULL;
11997
11998                 do {
11999                         bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
12000                                                 PCI_DEVICE_ID_SERVERWORKS_EPB,
12001                                                 bridge);
12002                         if (bridge && bridge->subordinate &&
12003                             (bridge->subordinate->number <=
12004                              tp->pdev->bus->number) &&
12005                             (bridge->subordinate->subordinate >=
12006                              tp->pdev->bus->number)) {
12007                                 tp->tg3_flags |= TG3_FLAG_40BIT_DMA_BUG;
12008                                 pci_dev_put(bridge);
12009                                 break;
12010                         }
12011                 } while (bridge);
12012         }
12013
12014         /* Initialize misc host control in PCI block. */
12015         tp->misc_host_ctrl |= (misc_ctrl_reg &
12016                                MISC_HOST_CTRL_CHIPREV);
12017         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
12018                                tp->misc_host_ctrl);
12019
12020         pci_read_config_dword(tp->pdev, TG3PCI_CACHELINESZ,
12021                               &cacheline_sz_reg);
12022
12023         tp->pci_cacheline_sz = (cacheline_sz_reg >>  0) & 0xff;
12024         tp->pci_lat_timer    = (cacheline_sz_reg >>  8) & 0xff;
12025         tp->pci_hdr_type     = (cacheline_sz_reg >> 16) & 0xff;
12026         tp->pci_bist         = (cacheline_sz_reg >> 24) & 0xff;
12027
12028         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
12029             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714))
12030                 tp->pdev_peer = tg3_find_peer(tp);
12031
12032         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
12033             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
12034             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
12035             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
12036             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
12037             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
12038             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
12039             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 ||
12040             (tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
12041                 tp->tg3_flags2 |= TG3_FLG2_5750_PLUS;
12042
12043         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) ||
12044             (tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
12045                 tp->tg3_flags2 |= TG3_FLG2_5705_PLUS;
12046
12047         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
12048                 tp->tg3_flags |= TG3_FLAG_SUPPORT_MSI;
12049                 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX ||
12050                     GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX ||
12051                     (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 &&
12052                      tp->pci_chip_rev_id <= CHIPREV_ID_5714_A2 &&
12053                      tp->pdev_peer == tp->pdev))
12054                         tp->tg3_flags &= ~TG3_FLAG_SUPPORT_MSI;
12055
12056                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
12057                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
12058                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
12059                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
12060                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
12061                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
12062                         tp->tg3_flags2 |= TG3_FLG2_HW_TSO_2;
12063                         tp->tg3_flags2 |= TG3_FLG2_1SHOT_MSI;
12064                 } else {
12065                         tp->tg3_flags2 |= TG3_FLG2_HW_TSO_1 | TG3_FLG2_TSO_BUG;
12066                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
12067                                 ASIC_REV_5750 &&
12068                             tp->pci_chip_rev_id >= CHIPREV_ID_5750_C2)
12069                                 tp->tg3_flags2 &= ~TG3_FLG2_TSO_BUG;
12070                 }
12071         }
12072
12073         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
12074              (tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
12075                 tp->tg3_flags2 |= TG3_FLG2_JUMBO_CAPABLE;
12076
12077         pcie_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_EXP);
12078         if (pcie_cap != 0) {
12079                 tp->tg3_flags2 |= TG3_FLG2_PCI_EXPRESS;
12080
12081                 pcie_set_readrq(tp->pdev, 4096);
12082
12083                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
12084                         u16 lnkctl;
12085
12086                         pci_read_config_word(tp->pdev,
12087                                              pcie_cap + PCI_EXP_LNKCTL,
12088                                              &lnkctl);
12089                         if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN)
12090                                 tp->tg3_flags2 &= ~TG3_FLG2_HW_TSO_2;
12091                 }
12092         }
12093
12094         /* If we have an AMD 762 or VIA K8T800 chipset, write
12095          * reordering to the mailbox registers done by the host
12096          * controller can cause major troubles.  We read back from
12097          * every mailbox register write to force the writes to be
12098          * posted to the chip in order.
12099          */
12100         if (pci_dev_present(write_reorder_chipsets) &&
12101             !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
12102                 tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;
12103
12104         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
12105             tp->pci_lat_timer < 64) {
12106                 tp->pci_lat_timer = 64;
12107
12108                 cacheline_sz_reg  = ((tp->pci_cacheline_sz & 0xff) <<  0);
12109                 cacheline_sz_reg |= ((tp->pci_lat_timer    & 0xff) <<  8);
12110                 cacheline_sz_reg |= ((tp->pci_hdr_type     & 0xff) << 16);
12111                 cacheline_sz_reg |= ((tp->pci_bist         & 0xff) << 24);
12112
12113                 pci_write_config_dword(tp->pdev, TG3PCI_CACHELINESZ,
12114                                        cacheline_sz_reg);
12115         }
12116
12117         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
12118             (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
12119                 tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
12120                 if (!tp->pcix_cap) {
12121                         printk(KERN_ERR PFX "Cannot find PCI-X "
12122                                             "capability, aborting.\n");
12123                         return -EIO;
12124                 }
12125         }
12126
12127         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
12128                               &pci_state_reg);
12129
12130         if (tp->pcix_cap && (pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0) {
12131                 tp->tg3_flags |= TG3_FLAG_PCIX_MODE;
12132
12133                 /* If this is a 5700 BX chipset, and we are in PCI-X
12134                  * mode, enable register write workaround.
12135                  *
12136                  * The workaround is to use indirect register accesses
12137                  * for all chip writes not to mailbox registers.
12138                  */
12139                 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX) {
12140                         u32 pm_reg;
12141
12142                         tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
12143
12144                         /* The chip can have it's power management PCI config
12145                          * space registers clobbered due to this bug.
12146                          * So explicitly force the chip into D0 here.
12147                          */
12148                         pci_read_config_dword(tp->pdev,
12149                                               tp->pm_cap + PCI_PM_CTRL,
12150                                               &pm_reg);
12151                         pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
12152                         pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
12153                         pci_write_config_dword(tp->pdev,
12154                                                tp->pm_cap + PCI_PM_CTRL,
12155                                                pm_reg);
12156
12157                         /* Also, force SERR#/PERR# in PCI command. */
12158                         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
12159                         pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
12160                         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
12161                 }
12162         }
12163
12164         /* 5700 BX chips need to have their TX producer index mailboxes
12165          * written twice to workaround a bug.
12166          */
12167         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX)
12168                 tp->tg3_flags |= TG3_FLAG_TXD_MBOX_HWBUG;
12169
12170         if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
12171                 tp->tg3_flags |= TG3_FLAG_PCI_HIGH_SPEED;
12172         if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
12173                 tp->tg3_flags |= TG3_FLAG_PCI_32BIT;
12174
12175         /* Chip-specific fixup from Broadcom driver */
12176         if ((tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) &&
12177             (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
12178                 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
12179                 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
12180         }
12181
12182         /* Default fast path register access methods */
12183         tp->read32 = tg3_read32;
12184         tp->write32 = tg3_write32;
12185         tp->read32_mbox = tg3_read32;
12186         tp->write32_mbox = tg3_write32;
12187         tp->write32_tx_mbox = tg3_write32;
12188         tp->write32_rx_mbox = tg3_write32;
12189
12190         /* Various workaround register access methods */
12191         if (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG)
12192                 tp->write32 = tg3_write_indirect_reg32;
12193         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
12194                  ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
12195                   tp->pci_chip_rev_id == CHIPREV_ID_5750_A0)) {
12196                 /*
12197                  * Back to back register writes can cause problems on these
12198                  * chips, the workaround is to read back all reg writes
12199                  * except those to mailbox regs.
12200                  *
12201                  * See tg3_write_indirect_reg32().
12202                  */
12203                 tp->write32 = tg3_write_flush_reg32;
12204         }
12205
12206
12207         if ((tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG) ||
12208             (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)) {
12209                 tp->write32_tx_mbox = tg3_write32_tx_mbox;
12210                 if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
12211                         tp->write32_rx_mbox = tg3_write_flush_reg32;
12212         }
12213
12214         if (tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND) {
12215                 tp->read32 = tg3_read_indirect_reg32;
12216                 tp->write32 = tg3_write_indirect_reg32;
12217                 tp->read32_mbox = tg3_read_indirect_mbox;
12218                 tp->write32_mbox = tg3_write_indirect_mbox;
12219                 tp->write32_tx_mbox = tg3_write_indirect_mbox;
12220                 tp->write32_rx_mbox = tg3_write_indirect_mbox;
12221
12222                 iounmap(tp->regs);
12223                 tp->regs = NULL;
12224
12225                 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
12226                 pci_cmd &= ~PCI_COMMAND_MEMORY;
12227                 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
12228         }
12229         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
12230                 tp->read32_mbox = tg3_read32_mbox_5906;
12231                 tp->write32_mbox = tg3_write32_mbox_5906;
12232                 tp->write32_tx_mbox = tg3_write32_mbox_5906;
12233                 tp->write32_rx_mbox = tg3_write32_mbox_5906;
12234         }
12235
12236         if (tp->write32 == tg3_write_indirect_reg32 ||
12237             ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
12238              (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
12239               GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)))
12240                 tp->tg3_flags |= TG3_FLAG_SRAM_USE_CONFIG;
12241
12242         /* Get eeprom hw config before calling tg3_set_power_state().
12243          * In particular, the TG3_FLG2_IS_NIC flag must be
12244          * determined before calling tg3_set_power_state() so that
12245          * we know whether or not to switch out of Vaux power.
12246          * When the flag is set, it means that GPIO1 is used for eeprom
12247          * write protect and also implies that it is a LOM where GPIOs
12248          * are not used to switch power.
12249          */
12250         tg3_get_eeprom_hw_cfg(tp);
12251
12252         if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
12253                 /* Allow reads and writes to the
12254                  * APE register and memory space.
12255                  */
12256                 pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR |
12257                                  PCISTATE_ALLOW_APE_SHMEM_WR;
12258                 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE,
12259                                        pci_state_reg);
12260         }
12261
12262         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
12263             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
12264             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
12265                 tp->tg3_flags |= TG3_FLAG_CPMU_PRESENT;
12266
12267                 if (tp->pci_chip_rev_id == CHIPREV_ID_5784_A0 ||
12268                     tp->pci_chip_rev_id == CHIPREV_ID_5784_A1 ||
12269                     tp->pci_chip_rev_id == CHIPREV_ID_5761_A0 ||
12270                     tp->pci_chip_rev_id == CHIPREV_ID_5761_A1)
12271                         tp->tg3_flags3 |= TG3_FLG3_5761_5784_AX_FIXES;
12272         }
12273
12274         /* Set up tp->grc_local_ctrl before calling tg3_set_power_state().
12275          * GPIO1 driven high will bring 5700's external PHY out of reset.
12276          * It is also used as eeprom write protect on LOMs.
12277          */
12278         tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
12279         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
12280             (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT))
12281                 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
12282                                        GRC_LCLCTRL_GPIO_OUTPUT1);
12283         /* Unused GPIO3 must be driven as output on 5752 because there
12284          * are no pull-up resistors on unused GPIO pins.
12285          */
12286         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
12287                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
12288
12289         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
12290                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
12291
12292         if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761) {
12293                 /* Turn off the debug UART. */
12294                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
12295                 if (tp->tg3_flags2 & TG3_FLG2_IS_NIC)
12296                         /* Keep VMain power. */
12297                         tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
12298                                               GRC_LCLCTRL_GPIO_OUTPUT0;
12299         }
12300
12301         /* Force the chip into D0. */
12302         err = tg3_set_power_state(tp, PCI_D0);
12303         if (err) {
12304                 printk(KERN_ERR PFX "(%s) transition to D0 failed\n",
12305                        pci_name(tp->pdev));
12306                 return err;
12307         }
12308
12309         /* 5700 B0 chips do not support checksumming correctly due
12310          * to hardware bugs.
12311          */
12312         if (tp->pci_chip_rev_id == CHIPREV_ID_5700_B0)
12313                 tp->tg3_flags |= TG3_FLAG_BROKEN_CHECKSUMS;
12314
12315         /* Derive initial jumbo mode from MTU assigned in
12316          * ether_setup() via the alloc_etherdev() call
12317          */
12318         if (tp->dev->mtu > ETH_DATA_LEN &&
12319             !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
12320                 tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE;
12321
12322         /* Determine WakeOnLan speed to use. */
12323         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
12324             tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
12325             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0 ||
12326             tp->pci_chip_rev_id == CHIPREV_ID_5701_B2) {
12327                 tp->tg3_flags &= ~(TG3_FLAG_WOL_SPEED_100MB);
12328         } else {
12329                 tp->tg3_flags |= TG3_FLAG_WOL_SPEED_100MB;
12330         }
12331
12332         /* A few boards don't want Ethernet@WireSpeed phy feature */
12333         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
12334             ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
12335              (tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) &&
12336              (tp->pci_chip_rev_id != CHIPREV_ID_5705_A1)) ||
12337             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) ||
12338             (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES))
12339                 tp->tg3_flags2 |= TG3_FLG2_NO_ETH_WIRE_SPEED;
12340
12341         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5703_AX ||
12342             GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_AX)
12343                 tp->tg3_flags2 |= TG3_FLG2_PHY_ADC_BUG;
12344         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0)
12345                 tp->tg3_flags2 |= TG3_FLG2_PHY_5704_A0_BUG;
12346
12347         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
12348                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
12349                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
12350                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
12351                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
12352                         if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
12353                             tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
12354                                 tp->tg3_flags2 |= TG3_FLG2_PHY_JITTER_BUG;
12355                         if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
12356                                 tp->tg3_flags2 |= TG3_FLG2_PHY_ADJUST_TRIM;
12357                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906 &&
12358                            GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785)
12359                         tp->tg3_flags2 |= TG3_FLG2_PHY_BER_BUG;
12360         }
12361
12362         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
12363             GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
12364                 tp->phy_otp = tg3_read_otp_phycfg(tp);
12365                 if (tp->phy_otp == 0)
12366                         tp->phy_otp = TG3_OTP_DEFAULT;
12367         }
12368
12369         if (tp->tg3_flags & TG3_FLAG_CPMU_PRESENT)
12370                 tp->mi_mode = MAC_MI_MODE_500KHZ_CONST;
12371         else
12372                 tp->mi_mode = MAC_MI_MODE_BASE;
12373
12374         tp->coalesce_mode = 0;
12375         if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX &&
12376             GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX)
12377                 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
12378
12379         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
12380                 tp->tg3_flags3 |= TG3_FLG3_USE_PHYLIB;
12381
12382         err = tg3_mdio_init(tp);
12383         if (err)
12384                 return err;
12385
12386         /* Initialize data/descriptor byte/word swapping. */
12387         val = tr32(GRC_MODE);
12388         val &= GRC_MODE_HOST_STACKUP;
12389         tw32(GRC_MODE, val | tp->grc_mode);
12390
12391         tg3_switch_clocks(tp);
12392
12393         /* Clear this out for sanity. */
12394         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
12395
12396         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
12397                               &pci_state_reg);
12398         if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
12399             (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) == 0) {
12400                 u32 chiprevid = GET_CHIP_REV_ID(tp->misc_host_ctrl);
12401
12402                 if (chiprevid == CHIPREV_ID_5701_A0 ||
12403                     chiprevid == CHIPREV_ID_5701_B0 ||
12404                     chiprevid == CHIPREV_ID_5701_B2 ||
12405                     chiprevid == CHIPREV_ID_5701_B5) {
12406                         void __iomem *sram_base;
12407
12408                         /* Write some dummy words into the SRAM status block
12409                          * area, see if it reads back correctly.  If the return
12410                          * value is bad, force enable the PCIX workaround.
12411                          */
12412                         sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
12413
12414                         writel(0x00000000, sram_base);
12415                         writel(0x00000000, sram_base + 4);
12416                         writel(0xffffffff, sram_base + 4);
12417                         if (readl(sram_base) != 0x00000000)
12418                                 tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
12419                 }
12420         }
12421
12422         udelay(50);
12423         tg3_nvram_init(tp);
12424
12425         grc_misc_cfg = tr32(GRC_MISC_CFG);
12426         grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
12427
12428         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
12429             (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
12430              grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
12431                 tp->tg3_flags2 |= TG3_FLG2_IS_5788;
12432
12433         if (!(tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
12434             (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700))
12435                 tp->tg3_flags |= TG3_FLAG_TAGGED_STATUS;
12436         if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) {
12437                 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
12438                                       HOSTCC_MODE_CLRTICK_TXBD);
12439
12440                 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
12441                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
12442                                        tp->misc_host_ctrl);
12443         }
12444
12445         /* Preserve the APE MAC_MODE bits */
12446         if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
12447                 tp->mac_mode = tr32(MAC_MODE) |
12448                                MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
12449         else
12450                 tp->mac_mode = TG3_DEF_MAC_MODE;
12451
12452         /* these are limited to 10/100 only */
12453         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
12454              (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
12455             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
12456              tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
12457              (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901 ||
12458               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901_2 ||
12459               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5705F)) ||
12460             (tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
12461              (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5751F ||
12462               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5753F ||
12463               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5787F)) ||
12464             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
12465                 tp->tg3_flags |= TG3_FLAG_10_100_ONLY;
12466
12467         err = tg3_phy_probe(tp);
12468         if (err) {
12469                 printk(KERN_ERR PFX "(%s) phy probe failed, err %d\n",
12470                        pci_name(tp->pdev), err);
12471                 /* ... but do not return immediately ... */
12472                 tg3_mdio_fini(tp);
12473         }
12474
12475         tg3_read_partno(tp);
12476         tg3_read_fw_ver(tp);
12477
12478         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
12479                 tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
12480         } else {
12481                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
12482                         tp->tg3_flags |= TG3_FLAG_USE_MI_INTERRUPT;
12483                 else
12484                         tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
12485         }
12486
12487         /* 5700 {AX,BX} chips have a broken status block link
12488          * change bit implementation, so we must use the
12489          * status register in those cases.
12490          */
12491         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
12492                 tp->tg3_flags |= TG3_FLAG_USE_LINKCHG_REG;
12493         else
12494                 tp->tg3_flags &= ~TG3_FLAG_USE_LINKCHG_REG;
12495
12496         /* The led_ctrl is set during tg3_phy_probe, here we might
12497          * have to force the link status polling mechanism based
12498          * upon subsystem IDs.
12499          */
12500         if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
12501             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
12502             !(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
12503                 tp->tg3_flags |= (TG3_FLAG_USE_MI_INTERRUPT |
12504                                   TG3_FLAG_USE_LINKCHG_REG);
12505         }
12506
12507         /* For all SERDES we poll the MAC status register. */
12508         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
12509                 tp->tg3_flags |= TG3_FLAG_POLL_SERDES;
12510         else
12511                 tp->tg3_flags &= ~TG3_FLAG_POLL_SERDES;
12512
12513         /* All chips before 5787 can get confused if TX buffers
12514          * straddle the 4GB address boundary in some cases.
12515          */
12516         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
12517             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
12518             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
12519             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
12520             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
12521             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
12522                 tp->dev->hard_start_xmit = tg3_start_xmit;
12523         else
12524                 tp->dev->hard_start_xmit = tg3_start_xmit_dma_bug;
12525
12526         tp->rx_offset = 2;
12527         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
12528             (tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0)
12529                 tp->rx_offset = 0;
12530
12531         tp->rx_std_max_post = TG3_RX_RING_SIZE;
12532
12533         /* Increment the rx prod index on the rx std ring by at most
12534          * 8 for these chips to workaround hw errata.
12535          */
12536         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
12537             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
12538             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
12539                 tp->rx_std_max_post = 8;
12540
12541         if (tp->tg3_flags & TG3_FLAG_ASPM_WORKAROUND)
12542                 tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) &
12543                                      PCIE_PWR_MGMT_L1_THRESH_MSK;
12544
12545         return err;
12546 }
12547
12548 #ifdef CONFIG_SPARC
12549 static int __devinit tg3_get_macaddr_sparc(struct tg3 *tp)
12550 {
12551         struct net_device *dev = tp->dev;
12552         struct pci_dev *pdev = tp->pdev;
12553         struct device_node *dp = pci_device_to_OF_node(pdev);
12554         const unsigned char *addr;
12555         int len;
12556
12557         addr = of_get_property(dp, "local-mac-address", &len);
12558         if (addr && len == 6) {
12559                 memcpy(dev->dev_addr, addr, 6);
12560                 memcpy(dev->perm_addr, dev->dev_addr, 6);
12561                 return 0;
12562         }
12563         return -ENODEV;
12564 }
12565
12566 static int __devinit tg3_get_default_macaddr_sparc(struct tg3 *tp)
12567 {
12568         struct net_device *dev = tp->dev;
12569
12570         memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
12571         memcpy(dev->perm_addr, idprom->id_ethaddr, 6);
12572         return 0;
12573 }
12574 #endif
12575
12576 static int __devinit tg3_get_device_address(struct tg3 *tp)
12577 {
12578         struct net_device *dev = tp->dev;
12579         u32 hi, lo, mac_offset;
12580         int addr_ok = 0;
12581
12582 #ifdef CONFIG_SPARC
12583         if (!tg3_get_macaddr_sparc(tp))
12584                 return 0;
12585 #endif
12586
12587         mac_offset = 0x7c;
12588         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
12589             (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
12590                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
12591                         mac_offset = 0xcc;
12592                 if (tg3_nvram_lock(tp))
12593                         tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
12594                 else
12595                         tg3_nvram_unlock(tp);
12596         }
12597         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
12598                 mac_offset = 0x10;
12599
12600         /* First try to get it from MAC address mailbox. */
12601         tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
12602         if ((hi >> 16) == 0x484b) {
12603                 dev->dev_addr[0] = (hi >>  8) & 0xff;
12604                 dev->dev_addr[1] = (hi >>  0) & 0xff;
12605
12606                 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
12607                 dev->dev_addr[2] = (lo >> 24) & 0xff;
12608                 dev->dev_addr[3] = (lo >> 16) & 0xff;
12609                 dev->dev_addr[4] = (lo >>  8) & 0xff;
12610                 dev->dev_addr[5] = (lo >>  0) & 0xff;
12611
12612                 /* Some old bootcode may report a 0 MAC address in SRAM */
12613                 addr_ok = is_valid_ether_addr(&dev->dev_addr[0]);
12614         }
12615         if (!addr_ok) {
12616                 /* Next, try NVRAM. */
12617                 if (!tg3_nvram_read(tp, mac_offset + 0, &hi) &&
12618                     !tg3_nvram_read(tp, mac_offset + 4, &lo)) {
12619                         dev->dev_addr[0] = ((hi >> 16) & 0xff);
12620                         dev->dev_addr[1] = ((hi >> 24) & 0xff);
12621                         dev->dev_addr[2] = ((lo >>  0) & 0xff);
12622                         dev->dev_addr[3] = ((lo >>  8) & 0xff);
12623                         dev->dev_addr[4] = ((lo >> 16) & 0xff);
12624                         dev->dev_addr[5] = ((lo >> 24) & 0xff);
12625                 }
12626                 /* Finally just fetch it out of the MAC control regs. */
12627                 else {
12628                         hi = tr32(MAC_ADDR_0_HIGH);
12629                         lo = tr32(MAC_ADDR_0_LOW);
12630
12631                         dev->dev_addr[5] = lo & 0xff;
12632                         dev->dev_addr[4] = (lo >> 8) & 0xff;
12633                         dev->dev_addr[3] = (lo >> 16) & 0xff;
12634                         dev->dev_addr[2] = (lo >> 24) & 0xff;
12635                         dev->dev_addr[1] = hi & 0xff;
12636                         dev->dev_addr[0] = (hi >> 8) & 0xff;
12637                 }
12638         }
12639
12640         if (!is_valid_ether_addr(&dev->dev_addr[0])) {
12641 #ifdef CONFIG_SPARC
12642                 if (!tg3_get_default_macaddr_sparc(tp))
12643                         return 0;
12644 #endif
12645                 return -EINVAL;
12646         }
12647         memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
12648         return 0;
12649 }
12650
12651 #define BOUNDARY_SINGLE_CACHELINE       1
12652 #define BOUNDARY_MULTI_CACHELINE        2
12653
12654 static u32 __devinit tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
12655 {
12656         int cacheline_size;
12657         u8 byte;
12658         int goal;
12659
12660         pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
12661         if (byte == 0)
12662                 cacheline_size = 1024;
12663         else
12664                 cacheline_size = (int) byte * 4;
12665
12666         /* On 5703 and later chips, the boundary bits have no
12667          * effect.
12668          */
12669         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
12670             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
12671             !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
12672                 goto out;
12673
12674 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
12675         goal = BOUNDARY_MULTI_CACHELINE;
12676 #else
12677 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
12678         goal = BOUNDARY_SINGLE_CACHELINE;
12679 #else
12680         goal = 0;
12681 #endif
12682 #endif
12683
12684         if (!goal)
12685                 goto out;
12686
12687         /* PCI controllers on most RISC systems tend to disconnect
12688          * when a device tries to burst across a cache-line boundary.
12689          * Therefore, letting tg3 do so just wastes PCI bandwidth.
12690          *
12691          * Unfortunately, for PCI-E there are only limited
12692          * write-side controls for this, and thus for reads
12693          * we will still get the disconnects.  We'll also waste
12694          * these PCI cycles for both read and write for chips
12695          * other than 5700 and 5701 which do not implement the
12696          * boundary bits.
12697          */
12698         if ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
12699             !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
12700                 switch (cacheline_size) {
12701                 case 16:
12702                 case 32:
12703                 case 64:
12704                 case 128:
12705                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
12706                                 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
12707                                         DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
12708                         } else {
12709                                 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
12710                                         DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
12711                         }
12712                         break;
12713
12714                 case 256:
12715                         val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
12716                                 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
12717                         break;
12718
12719                 default:
12720                         val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
12721                                 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
12722                         break;
12723                 }
12724         } else if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
12725                 switch (cacheline_size) {
12726                 case 16:
12727                 case 32:
12728                 case 64:
12729                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
12730                                 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
12731                                 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
12732                                 break;
12733                         }
12734                         /* fallthrough */
12735                 case 128:
12736                 default:
12737                         val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
12738                         val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
12739                         break;
12740                 }
12741         } else {
12742                 switch (cacheline_size) {
12743                 case 16:
12744                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
12745                                 val |= (DMA_RWCTRL_READ_BNDRY_16 |
12746                                         DMA_RWCTRL_WRITE_BNDRY_16);
12747                                 break;
12748                         }
12749                         /* fallthrough */
12750                 case 32:
12751                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
12752                                 val |= (DMA_RWCTRL_READ_BNDRY_32 |
12753                                         DMA_RWCTRL_WRITE_BNDRY_32);
12754                                 break;
12755                         }
12756                         /* fallthrough */
12757                 case 64:
12758                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
12759                                 val |= (DMA_RWCTRL_READ_BNDRY_64 |
12760                                         DMA_RWCTRL_WRITE_BNDRY_64);
12761                                 break;
12762                         }
12763                         /* fallthrough */
12764                 case 128:
12765                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
12766                                 val |= (DMA_RWCTRL_READ_BNDRY_128 |
12767                                         DMA_RWCTRL_WRITE_BNDRY_128);
12768                                 break;
12769                         }
12770                         /* fallthrough */
12771                 case 256:
12772                         val |= (DMA_RWCTRL_READ_BNDRY_256 |
12773                                 DMA_RWCTRL_WRITE_BNDRY_256);
12774                         break;
12775                 case 512:
12776                         val |= (DMA_RWCTRL_READ_BNDRY_512 |
12777                                 DMA_RWCTRL_WRITE_BNDRY_512);
12778                         break;
12779                 case 1024:
12780                 default:
12781                         val |= (DMA_RWCTRL_READ_BNDRY_1024 |
12782                                 DMA_RWCTRL_WRITE_BNDRY_1024);
12783                         break;
12784                 }
12785         }
12786
12787 out:
12788         return val;
12789 }
12790
12791 static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma, int size, int to_device)
12792 {
12793         struct tg3_internal_buffer_desc test_desc;
12794         u32 sram_dma_descs;
12795         int i, ret;
12796
12797         sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
12798
12799         tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
12800         tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
12801         tw32(RDMAC_STATUS, 0);
12802         tw32(WDMAC_STATUS, 0);
12803
12804         tw32(BUFMGR_MODE, 0);
12805         tw32(FTQ_RESET, 0);
12806
12807         test_desc.addr_hi = ((u64) buf_dma) >> 32;
12808         test_desc.addr_lo = buf_dma & 0xffffffff;
12809         test_desc.nic_mbuf = 0x00002100;
12810         test_desc.len = size;
12811
12812         /*
12813          * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
12814          * the *second* time the tg3 driver was getting loaded after an
12815          * initial scan.
12816          *
12817          * Broadcom tells me:
12818          *   ...the DMA engine is connected to the GRC block and a DMA
12819          *   reset may affect the GRC block in some unpredictable way...
12820          *   The behavior of resets to individual blocks has not been tested.
12821          *
12822          * Broadcom noted the GRC reset will also reset all sub-components.
12823          */
12824         if (to_device) {
12825                 test_desc.cqid_sqid = (13 << 8) | 2;
12826
12827                 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
12828                 udelay(40);
12829         } else {
12830                 test_desc.cqid_sqid = (16 << 8) | 7;
12831
12832                 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
12833                 udelay(40);
12834         }
12835         test_desc.flags = 0x00000005;
12836
12837         for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
12838                 u32 val;
12839
12840                 val = *(((u32 *)&test_desc) + i);
12841                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
12842                                        sram_dma_descs + (i * sizeof(u32)));
12843                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
12844         }
12845         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
12846
12847         if (to_device) {
12848                 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
12849         } else {
12850                 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
12851         }
12852
12853         ret = -ENODEV;
12854         for (i = 0; i < 40; i++) {
12855                 u32 val;
12856
12857                 if (to_device)
12858                         val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
12859                 else
12860                         val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
12861                 if ((val & 0xffff) == sram_dma_descs) {
12862                         ret = 0;
12863                         break;
12864                 }
12865
12866                 udelay(100);
12867         }
12868
12869         return ret;
12870 }
12871
12872 #define TEST_BUFFER_SIZE        0x2000
12873
12874 static int __devinit tg3_test_dma(struct tg3 *tp)
12875 {
12876         dma_addr_t buf_dma;
12877         u32 *buf, saved_dma_rwctrl;
12878         int ret;
12879
12880         buf = pci_alloc_consistent(tp->pdev, TEST_BUFFER_SIZE, &buf_dma);
12881         if (!buf) {
12882                 ret = -ENOMEM;
12883                 goto out_nofree;
12884         }
12885
12886         tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
12887                           (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
12888
12889         tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
12890
12891         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
12892                 /* DMA read watermark not used on PCIE */
12893                 tp->dma_rwctrl |= 0x00180000;
12894         } else if (!(tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
12895                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
12896                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
12897                         tp->dma_rwctrl |= 0x003f0000;
12898                 else
12899                         tp->dma_rwctrl |= 0x003f000f;
12900         } else {
12901                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
12902                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
12903                         u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
12904                         u32 read_water = 0x7;
12905
12906                         /* If the 5704 is behind the EPB bridge, we can
12907                          * do the less restrictive ONE_DMA workaround for
12908                          * better performance.
12909                          */
12910                         if ((tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG) &&
12911                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
12912                                 tp->dma_rwctrl |= 0x8000;
12913                         else if (ccval == 0x6 || ccval == 0x7)
12914                                 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
12915
12916                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703)
12917                                 read_water = 4;
12918                         /* Set bit 23 to enable PCIX hw bug fix */
12919                         tp->dma_rwctrl |=
12920                                 (read_water << DMA_RWCTRL_READ_WATER_SHIFT) |
12921                                 (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
12922                                 (1 << 23);
12923                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) {
12924                         /* 5780 always in PCIX mode */
12925                         tp->dma_rwctrl |= 0x00144000;
12926                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
12927                         /* 5714 always in PCIX mode */
12928                         tp->dma_rwctrl |= 0x00148000;
12929                 } else {
12930                         tp->dma_rwctrl |= 0x001b000f;
12931                 }
12932         }
12933
12934         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
12935             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
12936                 tp->dma_rwctrl &= 0xfffffff0;
12937
12938         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
12939             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
12940                 /* Remove this if it causes problems for some boards. */
12941                 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
12942
12943                 /* On 5700/5701 chips, we need to set this bit.
12944                  * Otherwise the chip will issue cacheline transactions
12945                  * to streamable DMA memory with not all the byte
12946                  * enables turned on.  This is an error on several
12947                  * RISC PCI controllers, in particular sparc64.
12948                  *
12949                  * On 5703/5704 chips, this bit has been reassigned
12950                  * a different meaning.  In particular, it is used
12951                  * on those chips to enable a PCI-X workaround.
12952                  */
12953                 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
12954         }
12955
12956         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
12957
12958 #if 0
12959         /* Unneeded, already done by tg3_get_invariants.  */
12960         tg3_switch_clocks(tp);
12961 #endif
12962
12963         ret = 0;
12964         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
12965             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
12966                 goto out;
12967
12968         /* It is best to perform DMA test with maximum write burst size
12969          * to expose the 5700/5701 write DMA bug.
12970          */
12971         saved_dma_rwctrl = tp->dma_rwctrl;
12972         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
12973         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
12974
12975         while (1) {
12976                 u32 *p = buf, i;
12977
12978                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
12979                         p[i] = i;
12980
12981                 /* Send the buffer to the chip. */
12982                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1);
12983                 if (ret) {
12984                         printk(KERN_ERR "tg3_test_dma() Write the buffer failed %d\n", ret);
12985                         break;
12986                 }
12987
12988 #if 0
12989                 /* validate data reached card RAM correctly. */
12990                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
12991                         u32 val;
12992                         tg3_read_mem(tp, 0x2100 + (i*4), &val);
12993                         if (le32_to_cpu(val) != p[i]) {
12994                                 printk(KERN_ERR "  tg3_test_dma()  Card buffer corrupted on write! (%d != %d)\n", val, i);
12995                                 /* ret = -ENODEV here? */
12996                         }
12997                         p[i] = 0;
12998                 }
12999 #endif
13000                 /* Now read it back. */
13001                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0);
13002                 if (ret) {
13003                         printk(KERN_ERR "tg3_test_dma() Read the buffer failed %d\n", ret);
13004
13005                         break;
13006                 }
13007
13008                 /* Verify it. */
13009                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
13010                         if (p[i] == i)
13011                                 continue;
13012
13013                         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
13014                             DMA_RWCTRL_WRITE_BNDRY_16) {
13015                                 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
13016                                 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
13017                                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
13018                                 break;
13019                         } else {
13020                                 printk(KERN_ERR "tg3_test_dma() buffer corrupted on read back! (%d != %d)\n", p[i], i);
13021                                 ret = -ENODEV;
13022                                 goto out;
13023                         }
13024                 }
13025
13026                 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
13027                         /* Success. */
13028                         ret = 0;
13029                         break;
13030                 }
13031         }
13032         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
13033             DMA_RWCTRL_WRITE_BNDRY_16) {
13034                 static struct pci_device_id dma_wait_state_chipsets[] = {
13035                         { PCI_DEVICE(PCI_VENDOR_ID_APPLE,
13036                                      PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
13037                         { },
13038                 };
13039
13040                 /* DMA test passed without adjusting DMA boundary,
13041                  * now look for chipsets that are known to expose the
13042                  * DMA bug without failing the test.
13043                  */
13044                 if (pci_dev_present(dma_wait_state_chipsets)) {
13045                         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
13046                         tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
13047                 }
13048                 else
13049                         /* Safe to use the calculated DMA boundary. */
13050                         tp->dma_rwctrl = saved_dma_rwctrl;
13051
13052                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
13053         }
13054
13055 out:
13056         pci_free_consistent(tp->pdev, TEST_BUFFER_SIZE, buf, buf_dma);
13057 out_nofree:
13058         return ret;
13059 }
13060
13061 static void __devinit tg3_init_link_config(struct tg3 *tp)
13062 {
13063         tp->link_config.advertising =
13064                 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
13065                  ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
13066                  ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full |
13067                  ADVERTISED_Autoneg | ADVERTISED_MII);
13068         tp->link_config.speed = SPEED_INVALID;
13069         tp->link_config.duplex = DUPLEX_INVALID;
13070         tp->link_config.autoneg = AUTONEG_ENABLE;
13071         tp->link_config.active_speed = SPEED_INVALID;
13072         tp->link_config.active_duplex = DUPLEX_INVALID;
13073         tp->link_config.phy_is_low_power = 0;
13074         tp->link_config.orig_speed = SPEED_INVALID;
13075         tp->link_config.orig_duplex = DUPLEX_INVALID;
13076         tp->link_config.orig_autoneg = AUTONEG_INVALID;
13077 }
13078
13079 static void __devinit tg3_init_bufmgr_config(struct tg3 *tp)
13080 {
13081         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
13082                 tp->bufmgr_config.mbuf_read_dma_low_water =
13083                         DEFAULT_MB_RDMA_LOW_WATER_5705;
13084                 tp->bufmgr_config.mbuf_mac_rx_low_water =
13085                         DEFAULT_MB_MACRX_LOW_WATER_5705;
13086                 tp->bufmgr_config.mbuf_high_water =
13087                         DEFAULT_MB_HIGH_WATER_5705;
13088                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
13089                         tp->bufmgr_config.mbuf_mac_rx_low_water =
13090                                 DEFAULT_MB_MACRX_LOW_WATER_5906;
13091                         tp->bufmgr_config.mbuf_high_water =
13092                                 DEFAULT_MB_HIGH_WATER_5906;
13093                 }
13094
13095                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
13096                         DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
13097                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
13098                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
13099                 tp->bufmgr_config.mbuf_high_water_jumbo =
13100                         DEFAULT_MB_HIGH_WATER_JUMBO_5780;
13101         } else {
13102                 tp->bufmgr_config.mbuf_read_dma_low_water =
13103                         DEFAULT_MB_RDMA_LOW_WATER;
13104                 tp->bufmgr_config.mbuf_mac_rx_low_water =
13105                         DEFAULT_MB_MACRX_LOW_WATER;
13106                 tp->bufmgr_config.mbuf_high_water =
13107                         DEFAULT_MB_HIGH_WATER;
13108
13109                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
13110                         DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
13111                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
13112                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
13113                 tp->bufmgr_config.mbuf_high_water_jumbo =
13114                         DEFAULT_MB_HIGH_WATER_JUMBO;
13115         }
13116
13117         tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
13118         tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
13119 }
13120
13121 static char * __devinit tg3_phy_string(struct tg3 *tp)
13122 {
13123         switch (tp->phy_id & PHY_ID_MASK) {
13124         case PHY_ID_BCM5400:    return "5400";
13125         case PHY_ID_BCM5401:    return "5401";
13126         case PHY_ID_BCM5411:    return "5411";
13127         case PHY_ID_BCM5701:    return "5701";
13128         case PHY_ID_BCM5703:    return "5703";
13129         case PHY_ID_BCM5704:    return "5704";
13130         case PHY_ID_BCM5705:    return "5705";
13131         case PHY_ID_BCM5750:    return "5750";
13132         case PHY_ID_BCM5752:    return "5752";
13133         case PHY_ID_BCM5714:    return "5714";
13134         case PHY_ID_BCM5780:    return "5780";
13135         case PHY_ID_BCM5755:    return "5755";
13136         case PHY_ID_BCM5787:    return "5787";
13137         case PHY_ID_BCM5784:    return "5784";
13138         case PHY_ID_BCM5756:    return "5722/5756";
13139         case PHY_ID_BCM5906:    return "5906";
13140         case PHY_ID_BCM5761:    return "5761";
13141         case PHY_ID_BCM8002:    return "8002/serdes";
13142         case 0:                 return "serdes";
13143         default:                return "unknown";
13144         }
13145 }
13146
13147 static char * __devinit tg3_bus_string(struct tg3 *tp, char *str)
13148 {
13149         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
13150                 strcpy(str, "PCI Express");
13151                 return str;
13152         } else if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) {
13153                 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
13154
13155                 strcpy(str, "PCIX:");
13156
13157                 if ((clock_ctrl == 7) ||
13158                     ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
13159                      GRC_MISC_CFG_BOARD_ID_5704CIOBE))
13160                         strcat(str, "133MHz");
13161                 else if (clock_ctrl == 0)
13162                         strcat(str, "33MHz");
13163                 else if (clock_ctrl == 2)
13164                         strcat(str, "50MHz");
13165                 else if (clock_ctrl == 4)
13166                         strcat(str, "66MHz");
13167                 else if (clock_ctrl == 6)
13168                         strcat(str, "100MHz");
13169         } else {
13170                 strcpy(str, "PCI:");
13171                 if (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED)
13172                         strcat(str, "66MHz");
13173                 else
13174                         strcat(str, "33MHz");
13175         }
13176         if (tp->tg3_flags & TG3_FLAG_PCI_32BIT)
13177                 strcat(str, ":32-bit");
13178         else
13179                 strcat(str, ":64-bit");
13180         return str;
13181 }
13182
13183 static struct pci_dev * __devinit tg3_find_peer(struct tg3 *tp)
13184 {
13185         struct pci_dev *peer;
13186         unsigned int func, devnr = tp->pdev->devfn & ~7;
13187
13188         for (func = 0; func < 8; func++) {
13189                 peer = pci_get_slot(tp->pdev->bus, devnr | func);
13190                 if (peer && peer != tp->pdev)
13191                         break;
13192                 pci_dev_put(peer);
13193         }
13194         /* 5704 can be configured in single-port mode, set peer to
13195          * tp->pdev in that case.
13196          */
13197         if (!peer) {
13198                 peer = tp->pdev;
13199                 return peer;
13200         }
13201
13202         /*
13203          * We don't need to keep the refcount elevated; there's no way
13204          * to remove one half of this device without removing the other
13205          */
13206         pci_dev_put(peer);
13207
13208         return peer;
13209 }
13210
13211 static void __devinit tg3_init_coal(struct tg3 *tp)
13212 {
13213         struct ethtool_coalesce *ec = &tp->coal;
13214
13215         memset(ec, 0, sizeof(*ec));
13216         ec->cmd = ETHTOOL_GCOALESCE;
13217         ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
13218         ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
13219         ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
13220         ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
13221         ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
13222         ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
13223         ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
13224         ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
13225         ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
13226
13227         if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
13228                                  HOSTCC_MODE_CLRTICK_TXBD)) {
13229                 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
13230                 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
13231                 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
13232                 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
13233         }
13234
13235         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
13236                 ec->rx_coalesce_usecs_irq = 0;
13237                 ec->tx_coalesce_usecs_irq = 0;
13238                 ec->stats_block_coalesce_usecs = 0;
13239         }
13240 }
13241
13242 static int __devinit tg3_init_one(struct pci_dev *pdev,
13243                                   const struct pci_device_id *ent)
13244 {
13245         static int tg3_version_printed = 0;
13246         resource_size_t tg3reg_base;
13247         unsigned long tg3reg_len;
13248         struct net_device *dev;
13249         struct tg3 *tp;
13250         int err, pm_cap;
13251         char str[40];
13252         u64 dma_mask, persist_dma_mask;
13253         DECLARE_MAC_BUF(mac);
13254
13255         if (tg3_version_printed++ == 0)
13256                 printk(KERN_INFO "%s", version);
13257
13258         err = pci_enable_device(pdev);
13259         if (err) {
13260                 printk(KERN_ERR PFX "Cannot enable PCI device, "
13261                        "aborting.\n");
13262                 return err;
13263         }
13264
13265         if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
13266                 printk(KERN_ERR PFX "Cannot find proper PCI device "
13267                        "base address, aborting.\n");
13268                 err = -ENODEV;
13269                 goto err_out_disable_pdev;
13270         }
13271
13272         err = pci_request_regions(pdev, DRV_MODULE_NAME);
13273         if (err) {
13274                 printk(KERN_ERR PFX "Cannot obtain PCI resources, "
13275                        "aborting.\n");
13276                 goto err_out_disable_pdev;
13277         }
13278
13279         pci_set_master(pdev);
13280
13281         /* Find power-management capability. */
13282         pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
13283         if (pm_cap == 0) {
13284                 printk(KERN_ERR PFX "Cannot find PowerManagement capability, "
13285                        "aborting.\n");
13286                 err = -EIO;
13287                 goto err_out_free_res;
13288         }
13289
13290         tg3reg_base = pci_resource_start(pdev, 0);
13291         tg3reg_len = pci_resource_len(pdev, 0);
13292
13293         dev = alloc_etherdev(sizeof(*tp));
13294         if (!dev) {
13295                 printk(KERN_ERR PFX "Etherdev alloc failed, aborting.\n");
13296                 err = -ENOMEM;
13297                 goto err_out_free_res;
13298         }
13299
13300         SET_NETDEV_DEV(dev, &pdev->dev);
13301
13302 #if TG3_VLAN_TAG_USED
13303         dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
13304         dev->vlan_rx_register = tg3_vlan_rx_register;
13305 #endif
13306
13307         tp = netdev_priv(dev);
13308         tp->pdev = pdev;
13309         tp->dev = dev;
13310         tp->pm_cap = pm_cap;
13311         tp->rx_mode = TG3_DEF_RX_MODE;
13312         tp->tx_mode = TG3_DEF_TX_MODE;
13313
13314         if (tg3_debug > 0)
13315                 tp->msg_enable = tg3_debug;
13316         else
13317                 tp->msg_enable = TG3_DEF_MSG_ENABLE;
13318
13319         /* The word/byte swap controls here control register access byte
13320          * swapping.  DMA data byte swapping is controlled in the GRC_MODE
13321          * setting below.
13322          */
13323         tp->misc_host_ctrl =
13324                 MISC_HOST_CTRL_MASK_PCI_INT |
13325                 MISC_HOST_CTRL_WORD_SWAP |
13326                 MISC_HOST_CTRL_INDIR_ACCESS |
13327                 MISC_HOST_CTRL_PCISTATE_RW;
13328
13329         /* The NONFRM (non-frame) byte/word swap controls take effect
13330          * on descriptor entries, anything which isn't packet data.
13331          *
13332          * The StrongARM chips on the board (one for tx, one for rx)
13333          * are running in big-endian mode.
13334          */
13335         tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
13336                         GRC_MODE_WSWAP_NONFRM_DATA);
13337 #ifdef __BIG_ENDIAN
13338         tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
13339 #endif
13340         spin_lock_init(&tp->lock);
13341         spin_lock_init(&tp->indirect_lock);
13342         INIT_WORK(&tp->reset_task, tg3_reset_task);
13343
13344         tp->regs = ioremap_nocache(tg3reg_base, tg3reg_len);
13345         if (!tp->regs) {
13346                 printk(KERN_ERR PFX "Cannot map device registers, "
13347                        "aborting.\n");
13348                 err = -ENOMEM;
13349                 goto err_out_free_dev;
13350         }
13351
13352         tg3_init_link_config(tp);
13353
13354         tp->rx_pending = TG3_DEF_RX_RING_PENDING;
13355         tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
13356         tp->tx_pending = TG3_DEF_TX_RING_PENDING;
13357
13358         dev->open = tg3_open;
13359         dev->stop = tg3_close;
13360         dev->get_stats = tg3_get_stats;
13361         dev->set_multicast_list = tg3_set_rx_mode;
13362         dev->set_mac_address = tg3_set_mac_addr;
13363         dev->do_ioctl = tg3_ioctl;
13364         dev->tx_timeout = tg3_tx_timeout;
13365         netif_napi_add(dev, &tp->napi, tg3_poll, 64);
13366         dev->ethtool_ops = &tg3_ethtool_ops;
13367         dev->watchdog_timeo = TG3_TX_TIMEOUT;
13368         dev->change_mtu = tg3_change_mtu;
13369         dev->irq = pdev->irq;
13370 #ifdef CONFIG_NET_POLL_CONTROLLER
13371         dev->poll_controller = tg3_poll_controller;
13372 #endif
13373
13374         err = tg3_get_invariants(tp);
13375         if (err) {
13376                 printk(KERN_ERR PFX "Problem fetching invariants of chip, "
13377                        "aborting.\n");
13378                 goto err_out_iounmap;
13379         }
13380
13381         /* The EPB bridge inside 5714, 5715, and 5780 and any
13382          * device behind the EPB cannot support DMA addresses > 40-bit.
13383          * On 64-bit systems with IOMMU, use 40-bit dma_mask.
13384          * On 64-bit systems without IOMMU, use 64-bit dma_mask and
13385          * do DMA address check in tg3_start_xmit().
13386          */
13387         if (tp->tg3_flags2 & TG3_FLG2_IS_5788)
13388                 persist_dma_mask = dma_mask = DMA_32BIT_MASK;
13389         else if (tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG) {
13390                 persist_dma_mask = dma_mask = DMA_40BIT_MASK;
13391 #ifdef CONFIG_HIGHMEM
13392                 dma_mask = DMA_64BIT_MASK;
13393 #endif
13394         } else
13395                 persist_dma_mask = dma_mask = DMA_64BIT_MASK;
13396
13397         /* Configure DMA attributes. */
13398         if (dma_mask > DMA_32BIT_MASK) {
13399                 err = pci_set_dma_mask(pdev, dma_mask);
13400                 if (!err) {
13401                         dev->features |= NETIF_F_HIGHDMA;
13402                         err = pci_set_consistent_dma_mask(pdev,
13403                                                           persist_dma_mask);
13404                         if (err < 0) {
13405                                 printk(KERN_ERR PFX "Unable to obtain 64 bit "
13406                                        "DMA for consistent allocations\n");
13407                                 goto err_out_iounmap;
13408                         }
13409                 }
13410         }
13411         if (err || dma_mask == DMA_32BIT_MASK) {
13412                 err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
13413                 if (err) {
13414                         printk(KERN_ERR PFX "No usable DMA configuration, "
13415                                "aborting.\n");
13416                         goto err_out_iounmap;
13417                 }
13418         }
13419
13420         tg3_init_bufmgr_config(tp);
13421
13422         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
13423                 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
13424         }
13425         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
13426             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
13427             tp->pci_chip_rev_id == CHIPREV_ID_5705_A0 ||
13428             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 ||
13429             (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0) {
13430                 tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
13431         } else {
13432                 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE | TG3_FLG2_TSO_BUG;
13433         }
13434
13435         /* TSO is on by default on chips that support hardware TSO.
13436          * Firmware TSO on older chips gives lower performance, so it
13437          * is off by default, but can be enabled using ethtool.
13438          */
13439         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
13440                 dev->features |= NETIF_F_TSO;
13441                 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_2) &&
13442                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906))
13443                         dev->features |= NETIF_F_TSO6;
13444                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
13445                     (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
13446                      GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) ||
13447                         GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
13448                         dev->features |= NETIF_F_TSO_ECN;
13449         }
13450
13451
13452         if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 &&
13453             !(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) &&
13454             !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
13455                 tp->tg3_flags2 |= TG3_FLG2_MAX_RXPEND_64;
13456                 tp->rx_pending = 63;
13457         }
13458
13459         err = tg3_get_device_address(tp);
13460         if (err) {
13461                 printk(KERN_ERR PFX "Could not obtain valid ethernet address, "
13462                        "aborting.\n");
13463                 goto err_out_iounmap;
13464         }
13465
13466         if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
13467                 if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
13468                         printk(KERN_ERR PFX "Cannot find proper PCI device "
13469                                "base address for APE, aborting.\n");
13470                         err = -ENODEV;
13471                         goto err_out_iounmap;
13472                 }
13473
13474                 tg3reg_base = pci_resource_start(pdev, 2);
13475                 tg3reg_len = pci_resource_len(pdev, 2);
13476
13477                 tp->aperegs = ioremap_nocache(tg3reg_base, tg3reg_len);
13478                 if (!tp->aperegs) {
13479                         printk(KERN_ERR PFX "Cannot map APE registers, "
13480                                "aborting.\n");
13481                         err = -ENOMEM;
13482                         goto err_out_iounmap;
13483                 }
13484
13485                 tg3_ape_lock_init(tp);
13486         }
13487
13488         /*
13489          * Reset chip in case UNDI or EFI driver did not shutdown
13490          * DMA self test will enable WDMAC and we'll see (spurious)
13491          * pending DMA on the PCI bus at that point.
13492          */
13493         if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
13494             (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
13495                 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
13496                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
13497         }
13498
13499         err = tg3_test_dma(tp);
13500         if (err) {
13501                 printk(KERN_ERR PFX "DMA engine test failed, aborting.\n");
13502                 goto err_out_apeunmap;
13503         }
13504
13505         /* Tigon3 can do ipv4 only... and some chips have buggy
13506          * checksumming.
13507          */
13508         if ((tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) == 0) {
13509                 dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
13510                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
13511                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
13512                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
13513                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
13514                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
13515                         dev->features |= NETIF_F_IPV6_CSUM;
13516
13517                 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
13518         } else
13519                 tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
13520
13521         /* flow control autonegotiation is default behavior */
13522         tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
13523         tp->link_config.flowctrl = TG3_FLOW_CTRL_TX | TG3_FLOW_CTRL_RX;
13524
13525         tg3_init_coal(tp);
13526
13527         pci_set_drvdata(pdev, dev);
13528
13529         err = register_netdev(dev);
13530         if (err) {
13531                 printk(KERN_ERR PFX "Cannot register net device, "
13532                        "aborting.\n");
13533                 goto err_out_apeunmap;
13534         }
13535
13536         printk(KERN_INFO "%s: Tigon3 [partno(%s) rev %04x PHY(%s)] "
13537                "(%s) %s Ethernet %s\n",
13538                dev->name,
13539                tp->board_part_number,
13540                tp->pci_chip_rev_id,
13541                tg3_phy_string(tp),
13542                tg3_bus_string(tp, str),
13543                ((tp->tg3_flags & TG3_FLAG_10_100_ONLY) ? "10/100Base-TX" :
13544                 ((tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) ? "1000Base-SX" :
13545                  "10/100/1000Base-T")),
13546                print_mac(mac, dev->dev_addr));
13547
13548         printk(KERN_INFO "%s: RXcsums[%d] LinkChgREG[%d] "
13549                "MIirq[%d] ASF[%d] WireSpeed[%d] TSOcap[%d]\n",
13550                dev->name,
13551                (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0,
13552                (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) != 0,
13553                (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) != 0,
13554                (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0,
13555                (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED) == 0,
13556                (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) != 0);
13557         printk(KERN_INFO "%s: dma_rwctrl[%08x] dma_mask[%d-bit]\n",
13558                dev->name, tp->dma_rwctrl,
13559                (pdev->dma_mask == DMA_32BIT_MASK) ? 32 :
13560                 (((u64) pdev->dma_mask == DMA_40BIT_MASK) ? 40 : 64));
13561
13562         return 0;
13563
13564 err_out_apeunmap:
13565         if (tp->aperegs) {
13566                 iounmap(tp->aperegs);
13567                 tp->aperegs = NULL;
13568         }
13569
13570 err_out_iounmap:
13571         if (tp->regs) {
13572                 iounmap(tp->regs);
13573                 tp->regs = NULL;
13574         }
13575
13576 err_out_free_dev:
13577         free_netdev(dev);
13578
13579 err_out_free_res:
13580         pci_release_regions(pdev);
13581
13582 err_out_disable_pdev:
13583         pci_disable_device(pdev);
13584         pci_set_drvdata(pdev, NULL);
13585         return err;
13586 }
13587
13588 static void __devexit tg3_remove_one(struct pci_dev *pdev)
13589 {
13590         struct net_device *dev = pci_get_drvdata(pdev);
13591
13592         if (dev) {
13593                 struct tg3 *tp = netdev_priv(dev);
13594
13595                 flush_scheduled_work();
13596
13597                 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
13598                         tg3_phy_fini(tp);
13599                         tg3_mdio_fini(tp);
13600                 }
13601
13602                 unregister_netdev(dev);
13603                 if (tp->aperegs) {
13604                         iounmap(tp->aperegs);
13605                         tp->aperegs = NULL;
13606                 }
13607                 if (tp->regs) {
13608                         iounmap(tp->regs);
13609                         tp->regs = NULL;
13610                 }
13611                 free_netdev(dev);
13612                 pci_release_regions(pdev);
13613                 pci_disable_device(pdev);
13614                 pci_set_drvdata(pdev, NULL);
13615         }
13616 }
13617
13618 static int tg3_suspend(struct pci_dev *pdev, pm_message_t state)
13619 {
13620         struct net_device *dev = pci_get_drvdata(pdev);
13621         struct tg3 *tp = netdev_priv(dev);
13622         pci_power_t target_state;
13623         int err;
13624
13625         /* PCI register 4 needs to be saved whether netif_running() or not.
13626          * MSI address and data need to be saved if using MSI and
13627          * netif_running().
13628          */
13629         pci_save_state(pdev);
13630
13631         if (!netif_running(dev))
13632                 return 0;
13633
13634         flush_scheduled_work();
13635         tg3_phy_stop(tp);
13636         tg3_netif_stop(tp);
13637
13638         del_timer_sync(&tp->timer);
13639
13640         tg3_full_lock(tp, 1);
13641         tg3_disable_ints(tp);
13642         tg3_full_unlock(tp);
13643
13644         netif_device_detach(dev);
13645
13646         tg3_full_lock(tp, 0);
13647         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
13648         tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
13649         tg3_full_unlock(tp);
13650
13651         target_state = pdev->pm_cap ? pci_target_state(pdev) : PCI_D3hot;
13652
13653         err = tg3_set_power_state(tp, target_state);
13654         if (err) {
13655                 int err2;
13656
13657                 tg3_full_lock(tp, 0);
13658
13659                 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
13660                 err2 = tg3_restart_hw(tp, 1);
13661                 if (err2)
13662                         goto out;
13663
13664                 tp->timer.expires = jiffies + tp->timer_offset;
13665                 add_timer(&tp->timer);
13666
13667                 netif_device_attach(dev);
13668                 tg3_netif_start(tp);
13669
13670 out:
13671                 tg3_full_unlock(tp);
13672
13673                 if (!err2)
13674                         tg3_phy_start(tp);
13675         }
13676
13677         return err;
13678 }
13679
13680 static int tg3_resume(struct pci_dev *pdev)
13681 {
13682         struct net_device *dev = pci_get_drvdata(pdev);
13683         struct tg3 *tp = netdev_priv(dev);
13684         int err;
13685
13686         pci_restore_state(tp->pdev);
13687
13688         if (!netif_running(dev))
13689                 return 0;
13690
13691         err = tg3_set_power_state(tp, PCI_D0);
13692         if (err)
13693                 return err;
13694
13695         netif_device_attach(dev);
13696
13697         tg3_full_lock(tp, 0);
13698
13699         tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
13700         err = tg3_restart_hw(tp, 1);
13701         if (err)
13702                 goto out;
13703
13704         tp->timer.expires = jiffies + tp->timer_offset;
13705         add_timer(&tp->timer);
13706
13707         tg3_netif_start(tp);
13708
13709 out:
13710         tg3_full_unlock(tp);
13711
13712         if (!err)
13713                 tg3_phy_start(tp);
13714
13715         return err;
13716 }
13717
13718 static struct pci_driver tg3_driver = {
13719         .name           = DRV_MODULE_NAME,
13720         .id_table       = tg3_pci_tbl,
13721         .probe          = tg3_init_one,
13722         .remove         = __devexit_p(tg3_remove_one),
13723         .suspend        = tg3_suspend,
13724         .resume         = tg3_resume
13725 };
13726
13727 static int __init tg3_init(void)
13728 {
13729         return pci_register_driver(&tg3_driver);
13730 }
13731
13732 static void __exit tg3_cleanup(void)
13733 {
13734         pci_unregister_driver(&tg3_driver);
13735 }
13736
13737 module_init(tg3_init);
13738 module_exit(tg3_cleanup);