tg3: Use SKB DMA helper functions for TX.
[linux-2.6] / drivers / net / tg3.c
1 /*
2  * tg3.c: Broadcom Tigon3 ethernet driver.
3  *
4  * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5  * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6  * Copyright (C) 2004 Sun Microsystems Inc.
7  * Copyright (C) 2005-2007 Broadcom Corporation.
8  *
9  * Firmware is:
10  *      Derived from proprietary unpublished source code,
11  *      Copyright (C) 2000-2003 Broadcom Corporation.
12  *
13  *      Permission is hereby granted for the distribution of this firmware
14  *      data in hexadecimal or equivalent format, provided this copyright
15  *      notice is accompanying it.
16  */
17
18
19 #include <linux/module.h>
20 #include <linux/moduleparam.h>
21 #include <linux/kernel.h>
22 #include <linux/types.h>
23 #include <linux/compiler.h>
24 #include <linux/slab.h>
25 #include <linux/delay.h>
26 #include <linux/in.h>
27 #include <linux/init.h>
28 #include <linux/ioport.h>
29 #include <linux/pci.h>
30 #include <linux/netdevice.h>
31 #include <linux/etherdevice.h>
32 #include <linux/skbuff.h>
33 #include <linux/ethtool.h>
34 #include <linux/mii.h>
35 #include <linux/phy.h>
36 #include <linux/brcmphy.h>
37 #include <linux/if_vlan.h>
38 #include <linux/ip.h>
39 #include <linux/tcp.h>
40 #include <linux/workqueue.h>
41 #include <linux/prefetch.h>
42 #include <linux/dma-mapping.h>
43
44 #include <net/checksum.h>
45 #include <net/ip.h>
46
47 #include <asm/system.h>
48 #include <asm/io.h>
49 #include <asm/byteorder.h>
50 #include <asm/uaccess.h>
51
52 #ifdef CONFIG_SPARC
53 #include <asm/idprom.h>
54 #include <asm/prom.h>
55 #endif
56
57 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
58 #define TG3_VLAN_TAG_USED 1
59 #else
60 #define TG3_VLAN_TAG_USED 0
61 #endif
62
63 #define TG3_TSO_SUPPORT 1
64
65 #include "tg3.h"
66
67 #define DRV_MODULE_NAME         "tg3"
68 #define PFX DRV_MODULE_NAME     ": "
69 #define DRV_MODULE_VERSION      "3.94"
70 #define DRV_MODULE_RELDATE      "August 14, 2008"
71
72 #define TG3_DEF_MAC_MODE        0
73 #define TG3_DEF_RX_MODE         0
74 #define TG3_DEF_TX_MODE         0
75 #define TG3_DEF_MSG_ENABLE        \
76         (NETIF_MSG_DRV          | \
77          NETIF_MSG_PROBE        | \
78          NETIF_MSG_LINK         | \
79          NETIF_MSG_TIMER        | \
80          NETIF_MSG_IFDOWN       | \
81          NETIF_MSG_IFUP         | \
82          NETIF_MSG_RX_ERR       | \
83          NETIF_MSG_TX_ERR)
84
85 /* length of time before we decide the hardware is borked,
86  * and dev->tx_timeout() should be called to fix the problem
87  */
88 #define TG3_TX_TIMEOUT                  (5 * HZ)
89
90 /* hardware minimum and maximum for a single frame's data payload */
91 #define TG3_MIN_MTU                     60
92 #define TG3_MAX_MTU(tp) \
93         ((tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) ? 9000 : 1500)
94
95 /* These numbers seem to be hard coded in the NIC firmware somehow.
96  * You can't change the ring sizes, but you can change where you place
97  * them in the NIC onboard memory.
98  */
99 #define TG3_RX_RING_SIZE                512
100 #define TG3_DEF_RX_RING_PENDING         200
101 #define TG3_RX_JUMBO_RING_SIZE          256
102 #define TG3_DEF_RX_JUMBO_RING_PENDING   100
103
104 /* Do not place this n-ring entries value into the tp struct itself,
105  * we really want to expose these constants to GCC so that modulo et
106  * al.  operations are done with shifts and masks instead of with
107  * hw multiply/modulo instructions.  Another solution would be to
108  * replace things like '% foo' with '& (foo - 1)'.
109  */
110 #define TG3_RX_RCB_RING_SIZE(tp)        \
111         ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ?  512 : 1024)
112
113 #define TG3_TX_RING_SIZE                512
114 #define TG3_DEF_TX_RING_PENDING         (TG3_TX_RING_SIZE - 1)
115
116 #define TG3_RX_RING_BYTES       (sizeof(struct tg3_rx_buffer_desc) * \
117                                  TG3_RX_RING_SIZE)
118 #define TG3_RX_JUMBO_RING_BYTES (sizeof(struct tg3_rx_buffer_desc) * \
119                                  TG3_RX_JUMBO_RING_SIZE)
120 #define TG3_RX_RCB_RING_BYTES(tp) (sizeof(struct tg3_rx_buffer_desc) * \
121                                    TG3_RX_RCB_RING_SIZE(tp))
122 #define TG3_TX_RING_BYTES       (sizeof(struct tg3_tx_buffer_desc) * \
123                                  TG3_TX_RING_SIZE)
124 #define NEXT_TX(N)              (((N) + 1) & (TG3_TX_RING_SIZE - 1))
125
126 #define RX_PKT_BUF_SZ           (1536 + tp->rx_offset + 64)
127 #define RX_JUMBO_PKT_BUF_SZ     (9046 + tp->rx_offset + 64)
128
129 /* minimum number of free TX descriptors required to wake up TX process */
130 #define TG3_TX_WAKEUP_THRESH(tp)                ((tp)->tx_pending / 4)
131
132 /* number of ETHTOOL_GSTATS u64's */
133 #define TG3_NUM_STATS           (sizeof(struct tg3_ethtool_stats)/sizeof(u64))
134
135 #define TG3_NUM_TEST            6
136
137 static char version[] __devinitdata =
138         DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
139
140 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
141 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
142 MODULE_LICENSE("GPL");
143 MODULE_VERSION(DRV_MODULE_VERSION);
144
145 static int tg3_debug = -1;      /* -1 == use TG3_DEF_MSG_ENABLE as value */
146 module_param(tg3_debug, int, 0);
147 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
148
149 static struct pci_device_id tg3_pci_tbl[] = {
150         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
151         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
152         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
153         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
154         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
155         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
156         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
157         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
158         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
159         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
160         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
161         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
162         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
163         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
164         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
165         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
166         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
167         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
168         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901)},
169         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2)},
170         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
171         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F)},
172         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5720)},
173         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
174         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
175         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750)},
176         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
177         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750M)},
178         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
179         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F)},
180         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
181         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
182         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
183         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
184         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F)},
185         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
186         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
187         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
188         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
189         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
190         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
191         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
192         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
193         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F)},
194         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
195         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
196         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
197         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
198         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
199         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
200         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
201         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
202         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
203         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
204         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
205         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
206         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
207         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
208         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5785)},
209         {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
210         {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
211         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
212         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
213         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
214         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
215         {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
216         {}
217 };
218
219 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
220
221 static const struct {
222         const char string[ETH_GSTRING_LEN];
223 } ethtool_stats_keys[TG3_NUM_STATS] = {
224         { "rx_octets" },
225         { "rx_fragments" },
226         { "rx_ucast_packets" },
227         { "rx_mcast_packets" },
228         { "rx_bcast_packets" },
229         { "rx_fcs_errors" },
230         { "rx_align_errors" },
231         { "rx_xon_pause_rcvd" },
232         { "rx_xoff_pause_rcvd" },
233         { "rx_mac_ctrl_rcvd" },
234         { "rx_xoff_entered" },
235         { "rx_frame_too_long_errors" },
236         { "rx_jabbers" },
237         { "rx_undersize_packets" },
238         { "rx_in_length_errors" },
239         { "rx_out_length_errors" },
240         { "rx_64_or_less_octet_packets" },
241         { "rx_65_to_127_octet_packets" },
242         { "rx_128_to_255_octet_packets" },
243         { "rx_256_to_511_octet_packets" },
244         { "rx_512_to_1023_octet_packets" },
245         { "rx_1024_to_1522_octet_packets" },
246         { "rx_1523_to_2047_octet_packets" },
247         { "rx_2048_to_4095_octet_packets" },
248         { "rx_4096_to_8191_octet_packets" },
249         { "rx_8192_to_9022_octet_packets" },
250
251         { "tx_octets" },
252         { "tx_collisions" },
253
254         { "tx_xon_sent" },
255         { "tx_xoff_sent" },
256         { "tx_flow_control" },
257         { "tx_mac_errors" },
258         { "tx_single_collisions" },
259         { "tx_mult_collisions" },
260         { "tx_deferred" },
261         { "tx_excessive_collisions" },
262         { "tx_late_collisions" },
263         { "tx_collide_2times" },
264         { "tx_collide_3times" },
265         { "tx_collide_4times" },
266         { "tx_collide_5times" },
267         { "tx_collide_6times" },
268         { "tx_collide_7times" },
269         { "tx_collide_8times" },
270         { "tx_collide_9times" },
271         { "tx_collide_10times" },
272         { "tx_collide_11times" },
273         { "tx_collide_12times" },
274         { "tx_collide_13times" },
275         { "tx_collide_14times" },
276         { "tx_collide_15times" },
277         { "tx_ucast_packets" },
278         { "tx_mcast_packets" },
279         { "tx_bcast_packets" },
280         { "tx_carrier_sense_errors" },
281         { "tx_discards" },
282         { "tx_errors" },
283
284         { "dma_writeq_full" },
285         { "dma_write_prioq_full" },
286         { "rxbds_empty" },
287         { "rx_discards" },
288         { "rx_errors" },
289         { "rx_threshold_hit" },
290
291         { "dma_readq_full" },
292         { "dma_read_prioq_full" },
293         { "tx_comp_queue_full" },
294
295         { "ring_set_send_prod_index" },
296         { "ring_status_update" },
297         { "nic_irqs" },
298         { "nic_avoided_irqs" },
299         { "nic_tx_threshold_hit" }
300 };
301
302 static const struct {
303         const char string[ETH_GSTRING_LEN];
304 } ethtool_test_keys[TG3_NUM_TEST] = {
305         { "nvram test     (online) " },
306         { "link test      (online) " },
307         { "register test  (offline)" },
308         { "memory test    (offline)" },
309         { "loopback test  (offline)" },
310         { "interrupt test (offline)" },
311 };
312
313 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
314 {
315         writel(val, tp->regs + off);
316 }
317
318 static u32 tg3_read32(struct tg3 *tp, u32 off)
319 {
320         return (readl(tp->regs + off));
321 }
322
323 static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
324 {
325         writel(val, tp->aperegs + off);
326 }
327
328 static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
329 {
330         return (readl(tp->aperegs + off));
331 }
332
333 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
334 {
335         unsigned long flags;
336
337         spin_lock_irqsave(&tp->indirect_lock, flags);
338         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
339         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
340         spin_unlock_irqrestore(&tp->indirect_lock, flags);
341 }
342
343 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
344 {
345         writel(val, tp->regs + off);
346         readl(tp->regs + off);
347 }
348
349 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
350 {
351         unsigned long flags;
352         u32 val;
353
354         spin_lock_irqsave(&tp->indirect_lock, flags);
355         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
356         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
357         spin_unlock_irqrestore(&tp->indirect_lock, flags);
358         return val;
359 }
360
361 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
362 {
363         unsigned long flags;
364
365         if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
366                 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
367                                        TG3_64BIT_REG_LOW, val);
368                 return;
369         }
370         if (off == (MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW)) {
371                 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
372                                        TG3_64BIT_REG_LOW, val);
373                 return;
374         }
375
376         spin_lock_irqsave(&tp->indirect_lock, flags);
377         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
378         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
379         spin_unlock_irqrestore(&tp->indirect_lock, flags);
380
381         /* In indirect mode when disabling interrupts, we also need
382          * to clear the interrupt bit in the GRC local ctrl register.
383          */
384         if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
385             (val == 0x1)) {
386                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
387                                        tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
388         }
389 }
390
391 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
392 {
393         unsigned long flags;
394         u32 val;
395
396         spin_lock_irqsave(&tp->indirect_lock, flags);
397         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
398         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
399         spin_unlock_irqrestore(&tp->indirect_lock, flags);
400         return val;
401 }
402
403 /* usec_wait specifies the wait time in usec when writing to certain registers
404  * where it is unsafe to read back the register without some delay.
405  * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
406  * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
407  */
408 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
409 {
410         if ((tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) ||
411             (tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND))
412                 /* Non-posted methods */
413                 tp->write32(tp, off, val);
414         else {
415                 /* Posted method */
416                 tg3_write32(tp, off, val);
417                 if (usec_wait)
418                         udelay(usec_wait);
419                 tp->read32(tp, off);
420         }
421         /* Wait again after the read for the posted method to guarantee that
422          * the wait time is met.
423          */
424         if (usec_wait)
425                 udelay(usec_wait);
426 }
427
428 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
429 {
430         tp->write32_mbox(tp, off, val);
431         if (!(tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) &&
432             !(tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND))
433                 tp->read32_mbox(tp, off);
434 }
435
436 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
437 {
438         void __iomem *mbox = tp->regs + off;
439         writel(val, mbox);
440         if (tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG)
441                 writel(val, mbox);
442         if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
443                 readl(mbox);
444 }
445
446 static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
447 {
448         return (readl(tp->regs + off + GRCMBOX_BASE));
449 }
450
451 static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
452 {
453         writel(val, tp->regs + off + GRCMBOX_BASE);
454 }
455
456 #define tw32_mailbox(reg, val)  tp->write32_mbox(tp, reg, val)
457 #define tw32_mailbox_f(reg, val)        tw32_mailbox_flush(tp, (reg), (val))
458 #define tw32_rx_mbox(reg, val)  tp->write32_rx_mbox(tp, reg, val)
459 #define tw32_tx_mbox(reg, val)  tp->write32_tx_mbox(tp, reg, val)
460 #define tr32_mailbox(reg)       tp->read32_mbox(tp, reg)
461
462 #define tw32(reg,val)           tp->write32(tp, reg, val)
463 #define tw32_f(reg,val)         _tw32_flush(tp,(reg),(val), 0)
464 #define tw32_wait_f(reg,val,us) _tw32_flush(tp,(reg),(val), (us))
465 #define tr32(reg)               tp->read32(tp, reg)
466
467 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
468 {
469         unsigned long flags;
470
471         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) &&
472             (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
473                 return;
474
475         spin_lock_irqsave(&tp->indirect_lock, flags);
476         if (tp->tg3_flags & TG3_FLAG_SRAM_USE_CONFIG) {
477                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
478                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
479
480                 /* Always leave this as zero. */
481                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
482         } else {
483                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
484                 tw32_f(TG3PCI_MEM_WIN_DATA, val);
485
486                 /* Always leave this as zero. */
487                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
488         }
489         spin_unlock_irqrestore(&tp->indirect_lock, flags);
490 }
491
492 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
493 {
494         unsigned long flags;
495
496         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) &&
497             (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
498                 *val = 0;
499                 return;
500         }
501
502         spin_lock_irqsave(&tp->indirect_lock, flags);
503         if (tp->tg3_flags & TG3_FLAG_SRAM_USE_CONFIG) {
504                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
505                 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
506
507                 /* Always leave this as zero. */
508                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
509         } else {
510                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
511                 *val = tr32(TG3PCI_MEM_WIN_DATA);
512
513                 /* Always leave this as zero. */
514                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
515         }
516         spin_unlock_irqrestore(&tp->indirect_lock, flags);
517 }
518
519 static void tg3_ape_lock_init(struct tg3 *tp)
520 {
521         int i;
522
523         /* Make sure the driver hasn't any stale locks. */
524         for (i = 0; i < 8; i++)
525                 tg3_ape_write32(tp, TG3_APE_LOCK_GRANT + 4 * i,
526                                 APE_LOCK_GRANT_DRIVER);
527 }
528
529 static int tg3_ape_lock(struct tg3 *tp, int locknum)
530 {
531         int i, off;
532         int ret = 0;
533         u32 status;
534
535         if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
536                 return 0;
537
538         switch (locknum) {
539                 case TG3_APE_LOCK_GRC:
540                 case TG3_APE_LOCK_MEM:
541                         break;
542                 default:
543                         return -EINVAL;
544         }
545
546         off = 4 * locknum;
547
548         tg3_ape_write32(tp, TG3_APE_LOCK_REQ + off, APE_LOCK_REQ_DRIVER);
549
550         /* Wait for up to 1 millisecond to acquire lock. */
551         for (i = 0; i < 100; i++) {
552                 status = tg3_ape_read32(tp, TG3_APE_LOCK_GRANT + off);
553                 if (status == APE_LOCK_GRANT_DRIVER)
554                         break;
555                 udelay(10);
556         }
557
558         if (status != APE_LOCK_GRANT_DRIVER) {
559                 /* Revoke the lock request. */
560                 tg3_ape_write32(tp, TG3_APE_LOCK_GRANT + off,
561                                 APE_LOCK_GRANT_DRIVER);
562
563                 ret = -EBUSY;
564         }
565
566         return ret;
567 }
568
569 static void tg3_ape_unlock(struct tg3 *tp, int locknum)
570 {
571         int off;
572
573         if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
574                 return;
575
576         switch (locknum) {
577                 case TG3_APE_LOCK_GRC:
578                 case TG3_APE_LOCK_MEM:
579                         break;
580                 default:
581                         return;
582         }
583
584         off = 4 * locknum;
585         tg3_ape_write32(tp, TG3_APE_LOCK_GRANT + off, APE_LOCK_GRANT_DRIVER);
586 }
587
588 static void tg3_disable_ints(struct tg3 *tp)
589 {
590         tw32(TG3PCI_MISC_HOST_CTRL,
591              (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
592         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
593 }
594
595 static inline void tg3_cond_int(struct tg3 *tp)
596 {
597         if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
598             (tp->hw_status->status & SD_STATUS_UPDATED))
599                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
600         else
601                 tw32(HOSTCC_MODE, tp->coalesce_mode |
602                      (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
603 }
604
605 static void tg3_enable_ints(struct tg3 *tp)
606 {
607         tp->irq_sync = 0;
608         wmb();
609
610         tw32(TG3PCI_MISC_HOST_CTRL,
611              (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
612         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
613                        (tp->last_tag << 24));
614         if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI)
615                 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
616                                (tp->last_tag << 24));
617         tg3_cond_int(tp);
618 }
619
620 static inline unsigned int tg3_has_work(struct tg3 *tp)
621 {
622         struct tg3_hw_status *sblk = tp->hw_status;
623         unsigned int work_exists = 0;
624
625         /* check for phy events */
626         if (!(tp->tg3_flags &
627               (TG3_FLAG_USE_LINKCHG_REG |
628                TG3_FLAG_POLL_SERDES))) {
629                 if (sblk->status & SD_STATUS_LINK_CHG)
630                         work_exists = 1;
631         }
632         /* check for RX/TX work to do */
633         if (sblk->idx[0].tx_consumer != tp->tx_cons ||
634             sblk->idx[0].rx_producer != tp->rx_rcb_ptr)
635                 work_exists = 1;
636
637         return work_exists;
638 }
639
640 /* tg3_restart_ints
641  *  similar to tg3_enable_ints, but it accurately determines whether there
642  *  is new work pending and can return without flushing the PIO write
643  *  which reenables interrupts
644  */
645 static void tg3_restart_ints(struct tg3 *tp)
646 {
647         tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
648                      tp->last_tag << 24);
649         mmiowb();
650
651         /* When doing tagged status, this work check is unnecessary.
652          * The last_tag we write above tells the chip which piece of
653          * work we've completed.
654          */
655         if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
656             tg3_has_work(tp))
657                 tw32(HOSTCC_MODE, tp->coalesce_mode |
658                      (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
659 }
660
661 static inline void tg3_netif_stop(struct tg3 *tp)
662 {
663         tp->dev->trans_start = jiffies; /* prevent tx timeout */
664         napi_disable(&tp->napi);
665         netif_tx_disable(tp->dev);
666 }
667
668 static inline void tg3_netif_start(struct tg3 *tp)
669 {
670         netif_wake_queue(tp->dev);
671         /* NOTE: unconditional netif_wake_queue is only appropriate
672          * so long as all callers are assured to have free tx slots
673          * (such as after tg3_init_hw)
674          */
675         napi_enable(&tp->napi);
676         tp->hw_status->status |= SD_STATUS_UPDATED;
677         tg3_enable_ints(tp);
678 }
679
680 static void tg3_switch_clocks(struct tg3 *tp)
681 {
682         u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
683         u32 orig_clock_ctrl;
684
685         if ((tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) ||
686             (tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
687                 return;
688
689         orig_clock_ctrl = clock_ctrl;
690         clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
691                        CLOCK_CTRL_CLKRUN_OENABLE |
692                        0x1f);
693         tp->pci_clock_ctrl = clock_ctrl;
694
695         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
696                 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
697                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
698                                     clock_ctrl | CLOCK_CTRL_625_CORE, 40);
699                 }
700         } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
701                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
702                             clock_ctrl |
703                             (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
704                             40);
705                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
706                             clock_ctrl | (CLOCK_CTRL_ALTCLK),
707                             40);
708         }
709         tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
710 }
711
712 #define PHY_BUSY_LOOPS  5000
713
714 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
715 {
716         u32 frame_val;
717         unsigned int loops;
718         int ret;
719
720         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
721                 tw32_f(MAC_MI_MODE,
722                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
723                 udelay(80);
724         }
725
726         *val = 0x0;
727
728         frame_val  = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
729                       MI_COM_PHY_ADDR_MASK);
730         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
731                       MI_COM_REG_ADDR_MASK);
732         frame_val |= (MI_COM_CMD_READ | MI_COM_START);
733
734         tw32_f(MAC_MI_COM, frame_val);
735
736         loops = PHY_BUSY_LOOPS;
737         while (loops != 0) {
738                 udelay(10);
739                 frame_val = tr32(MAC_MI_COM);
740
741                 if ((frame_val & MI_COM_BUSY) == 0) {
742                         udelay(5);
743                         frame_val = tr32(MAC_MI_COM);
744                         break;
745                 }
746                 loops -= 1;
747         }
748
749         ret = -EBUSY;
750         if (loops != 0) {
751                 *val = frame_val & MI_COM_DATA_MASK;
752                 ret = 0;
753         }
754
755         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
756                 tw32_f(MAC_MI_MODE, tp->mi_mode);
757                 udelay(80);
758         }
759
760         return ret;
761 }
762
763 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
764 {
765         u32 frame_val;
766         unsigned int loops;
767         int ret;
768
769         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 &&
770             (reg == MII_TG3_CTRL || reg == MII_TG3_AUX_CTRL))
771                 return 0;
772
773         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
774                 tw32_f(MAC_MI_MODE,
775                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
776                 udelay(80);
777         }
778
779         frame_val  = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
780                       MI_COM_PHY_ADDR_MASK);
781         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
782                       MI_COM_REG_ADDR_MASK);
783         frame_val |= (val & MI_COM_DATA_MASK);
784         frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
785
786         tw32_f(MAC_MI_COM, frame_val);
787
788         loops = PHY_BUSY_LOOPS;
789         while (loops != 0) {
790                 udelay(10);
791                 frame_val = tr32(MAC_MI_COM);
792                 if ((frame_val & MI_COM_BUSY) == 0) {
793                         udelay(5);
794                         frame_val = tr32(MAC_MI_COM);
795                         break;
796                 }
797                 loops -= 1;
798         }
799
800         ret = -EBUSY;
801         if (loops != 0)
802                 ret = 0;
803
804         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
805                 tw32_f(MAC_MI_MODE, tp->mi_mode);
806                 udelay(80);
807         }
808
809         return ret;
810 }
811
812 static int tg3_bmcr_reset(struct tg3 *tp)
813 {
814         u32 phy_control;
815         int limit, err;
816
817         /* OK, reset it, and poll the BMCR_RESET bit until it
818          * clears or we time out.
819          */
820         phy_control = BMCR_RESET;
821         err = tg3_writephy(tp, MII_BMCR, phy_control);
822         if (err != 0)
823                 return -EBUSY;
824
825         limit = 5000;
826         while (limit--) {
827                 err = tg3_readphy(tp, MII_BMCR, &phy_control);
828                 if (err != 0)
829                         return -EBUSY;
830
831                 if ((phy_control & BMCR_RESET) == 0) {
832                         udelay(40);
833                         break;
834                 }
835                 udelay(10);
836         }
837         if (limit <= 0)
838                 return -EBUSY;
839
840         return 0;
841 }
842
843 static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)
844 {
845         struct tg3 *tp = (struct tg3 *)bp->priv;
846         u32 val;
847
848         if (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_PAUSED)
849                 return -EAGAIN;
850
851         if (tg3_readphy(tp, reg, &val))
852                 return -EIO;
853
854         return val;
855 }
856
857 static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val)
858 {
859         struct tg3 *tp = (struct tg3 *)bp->priv;
860
861         if (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_PAUSED)
862                 return -EAGAIN;
863
864         if (tg3_writephy(tp, reg, val))
865                 return -EIO;
866
867         return 0;
868 }
869
870 static int tg3_mdio_reset(struct mii_bus *bp)
871 {
872         return 0;
873 }
874
875 static void tg3_mdio_config(struct tg3 *tp)
876 {
877         u32 val;
878
879         if (tp->mdio_bus.phy_map[PHY_ADDR]->interface !=
880             PHY_INTERFACE_MODE_RGMII)
881                 return;
882
883         val = tr32(MAC_PHYCFG1) & ~(MAC_PHYCFG1_RGMII_EXT_RX_DEC |
884                                     MAC_PHYCFG1_RGMII_SND_STAT_EN);
885         if (tp->tg3_flags3 & TG3_FLG3_RGMII_STD_IBND_DISABLE) {
886                 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_RX_EN)
887                         val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC;
888                 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_TX_EN)
889                         val |= MAC_PHYCFG1_RGMII_SND_STAT_EN;
890         }
891         tw32(MAC_PHYCFG1, val | MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV);
892
893         val = tr32(MAC_PHYCFG2) & ~(MAC_PHYCFG2_INBAND_ENABLE);
894         if (!(tp->tg3_flags3 & TG3_FLG3_RGMII_STD_IBND_DISABLE))
895                 val |= MAC_PHYCFG2_INBAND_ENABLE;
896         tw32(MAC_PHYCFG2, val);
897
898         val = tr32(MAC_EXT_RGMII_MODE);
899         val &= ~(MAC_RGMII_MODE_RX_INT_B |
900                  MAC_RGMII_MODE_RX_QUALITY |
901                  MAC_RGMII_MODE_RX_ACTIVITY |
902                  MAC_RGMII_MODE_RX_ENG_DET |
903                  MAC_RGMII_MODE_TX_ENABLE |
904                  MAC_RGMII_MODE_TX_LOWPWR |
905                  MAC_RGMII_MODE_TX_RESET);
906         if (tp->tg3_flags3 & TG3_FLG3_RGMII_STD_IBND_DISABLE) {
907                 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_RX_EN)
908                         val |= MAC_RGMII_MODE_RX_INT_B |
909                                MAC_RGMII_MODE_RX_QUALITY |
910                                MAC_RGMII_MODE_RX_ACTIVITY |
911                                MAC_RGMII_MODE_RX_ENG_DET;
912                 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_TX_EN)
913                         val |= MAC_RGMII_MODE_TX_ENABLE |
914                                MAC_RGMII_MODE_TX_LOWPWR |
915                                MAC_RGMII_MODE_TX_RESET;
916         }
917         tw32(MAC_EXT_RGMII_MODE, val);
918 }
919
920 static void tg3_mdio_start(struct tg3 *tp)
921 {
922         if (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED) {
923                 mutex_lock(&tp->mdio_bus.mdio_lock);
924                 tp->tg3_flags3 &= ~TG3_FLG3_MDIOBUS_PAUSED;
925                 mutex_unlock(&tp->mdio_bus.mdio_lock);
926         }
927
928         tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL;
929         tw32_f(MAC_MI_MODE, tp->mi_mode);
930         udelay(80);
931
932         if (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED)
933                 tg3_mdio_config(tp);
934 }
935
936 static void tg3_mdio_stop(struct tg3 *tp)
937 {
938         if (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED) {
939                 mutex_lock(&tp->mdio_bus.mdio_lock);
940                 tp->tg3_flags3 |= TG3_FLG3_MDIOBUS_PAUSED;
941                 mutex_unlock(&tp->mdio_bus.mdio_lock);
942         }
943 }
944
945 static int tg3_mdio_init(struct tg3 *tp)
946 {
947         int i;
948         u32 reg;
949         struct phy_device *phydev;
950         struct mii_bus *mdio_bus = &tp->mdio_bus;
951
952         tg3_mdio_start(tp);
953
954         if (!(tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) ||
955             (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED))
956                 return 0;
957
958         memset(mdio_bus, 0, sizeof(*mdio_bus));
959
960         mdio_bus->name     = "tg3 mdio bus";
961         snprintf(mdio_bus->id, MII_BUS_ID_SIZE, "%x",
962                  (tp->pdev->bus->number << 8) | tp->pdev->devfn);
963         mdio_bus->priv     = tp;
964         mdio_bus->dev      = &tp->pdev->dev;
965         mdio_bus->read     = &tg3_mdio_read;
966         mdio_bus->write    = &tg3_mdio_write;
967         mdio_bus->reset    = &tg3_mdio_reset;
968         mdio_bus->phy_mask = ~(1 << PHY_ADDR);
969         mdio_bus->irq      = &tp->mdio_irq[0];
970
971         for (i = 0; i < PHY_MAX_ADDR; i++)
972                 mdio_bus->irq[i] = PHY_POLL;
973
974         /* The bus registration will look for all the PHYs on the mdio bus.
975          * Unfortunately, it does not ensure the PHY is powered up before
976          * accessing the PHY ID registers.  A chip reset is the
977          * quickest way to bring the device back to an operational state..
978          */
979         if (tg3_readphy(tp, MII_BMCR, &reg) || (reg & BMCR_PDOWN))
980                 tg3_bmcr_reset(tp);
981
982         i = mdiobus_register(mdio_bus);
983         if (i) {
984                 printk(KERN_WARNING "%s: mdiobus_reg failed (0x%x)\n",
985                         tp->dev->name, i);
986                 return i;
987         }
988
989         tp->tg3_flags3 |= TG3_FLG3_MDIOBUS_INITED;
990
991         phydev = tp->mdio_bus.phy_map[PHY_ADDR];
992
993         switch (phydev->phy_id) {
994         case TG3_PHY_ID_BCM50610:
995                 phydev->interface = PHY_INTERFACE_MODE_RGMII;
996                 if (tp->tg3_flags3 & TG3_FLG3_RGMII_STD_IBND_DISABLE)
997                         phydev->dev_flags |= PHY_BRCM_STD_IBND_DISABLE;
998                 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_RX_EN)
999                         phydev->dev_flags |= PHY_BRCM_EXT_IBND_RX_ENABLE;
1000                 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_TX_EN)
1001                         phydev->dev_flags |= PHY_BRCM_EXT_IBND_TX_ENABLE;
1002                 break;
1003         case TG3_PHY_ID_BCMAC131:
1004                 phydev->interface = PHY_INTERFACE_MODE_MII;
1005                 break;
1006         }
1007
1008         tg3_mdio_config(tp);
1009
1010         return 0;
1011 }
1012
1013 static void tg3_mdio_fini(struct tg3 *tp)
1014 {
1015         if (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED) {
1016                 tp->tg3_flags3 &= ~TG3_FLG3_MDIOBUS_INITED;
1017                 mdiobus_unregister(&tp->mdio_bus);
1018                 tp->tg3_flags3 &= ~TG3_FLG3_MDIOBUS_PAUSED;
1019         }
1020 }
1021
1022 /* tp->lock is held. */
1023 static inline void tg3_generate_fw_event(struct tg3 *tp)
1024 {
1025         u32 val;
1026
1027         val = tr32(GRC_RX_CPU_EVENT);
1028         val |= GRC_RX_CPU_DRIVER_EVENT;
1029         tw32_f(GRC_RX_CPU_EVENT, val);
1030
1031         tp->last_event_jiffies = jiffies;
1032 }
1033
1034 #define TG3_FW_EVENT_TIMEOUT_USEC 2500
1035
1036 /* tp->lock is held. */
1037 static void tg3_wait_for_event_ack(struct tg3 *tp)
1038 {
1039         int i;
1040         unsigned int delay_cnt;
1041         long time_remain;
1042
1043         /* If enough time has passed, no wait is necessary. */
1044         time_remain = (long)(tp->last_event_jiffies + 1 +
1045                       usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC)) -
1046                       (long)jiffies;
1047         if (time_remain < 0)
1048                 return;
1049
1050         /* Check if we can shorten the wait time. */
1051         delay_cnt = jiffies_to_usecs(time_remain);
1052         if (delay_cnt > TG3_FW_EVENT_TIMEOUT_USEC)
1053                 delay_cnt = TG3_FW_EVENT_TIMEOUT_USEC;
1054         delay_cnt = (delay_cnt >> 3) + 1;
1055
1056         for (i = 0; i < delay_cnt; i++) {
1057                 if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
1058                         break;
1059                 udelay(8);
1060         }
1061 }
1062
1063 /* tp->lock is held. */
1064 static void tg3_ump_link_report(struct tg3 *tp)
1065 {
1066         u32 reg;
1067         u32 val;
1068
1069         if (!(tp->tg3_flags2 & TG3_FLG2_5780_CLASS) ||
1070             !(tp->tg3_flags  & TG3_FLAG_ENABLE_ASF))
1071                 return;
1072
1073         tg3_wait_for_event_ack(tp);
1074
1075         tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE);
1076
1077         tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14);
1078
1079         val = 0;
1080         if (!tg3_readphy(tp, MII_BMCR, &reg))
1081                 val = reg << 16;
1082         if (!tg3_readphy(tp, MII_BMSR, &reg))
1083                 val |= (reg & 0xffff);
1084         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, val);
1085
1086         val = 0;
1087         if (!tg3_readphy(tp, MII_ADVERTISE, &reg))
1088                 val = reg << 16;
1089         if (!tg3_readphy(tp, MII_LPA, &reg))
1090                 val |= (reg & 0xffff);
1091         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 4, val);
1092
1093         val = 0;
1094         if (!(tp->tg3_flags2 & TG3_FLG2_MII_SERDES)) {
1095                 if (!tg3_readphy(tp, MII_CTRL1000, &reg))
1096                         val = reg << 16;
1097                 if (!tg3_readphy(tp, MII_STAT1000, &reg))
1098                         val |= (reg & 0xffff);
1099         }
1100         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 8, val);
1101
1102         if (!tg3_readphy(tp, MII_PHYADDR, &reg))
1103                 val = reg << 16;
1104         else
1105                 val = 0;
1106         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 12, val);
1107
1108         tg3_generate_fw_event(tp);
1109 }
1110
1111 static void tg3_link_report(struct tg3 *tp)
1112 {
1113         if (!netif_carrier_ok(tp->dev)) {
1114                 if (netif_msg_link(tp))
1115                         printk(KERN_INFO PFX "%s: Link is down.\n",
1116                                tp->dev->name);
1117                 tg3_ump_link_report(tp);
1118         } else if (netif_msg_link(tp)) {
1119                 printk(KERN_INFO PFX "%s: Link is up at %d Mbps, %s duplex.\n",
1120                        tp->dev->name,
1121                        (tp->link_config.active_speed == SPEED_1000 ?
1122                         1000 :
1123                         (tp->link_config.active_speed == SPEED_100 ?
1124                          100 : 10)),
1125                        (tp->link_config.active_duplex == DUPLEX_FULL ?
1126                         "full" : "half"));
1127
1128                 printk(KERN_INFO PFX
1129                        "%s: Flow control is %s for TX and %s for RX.\n",
1130                        tp->dev->name,
1131                        (tp->link_config.active_flowctrl & TG3_FLOW_CTRL_TX) ?
1132                        "on" : "off",
1133                        (tp->link_config.active_flowctrl & TG3_FLOW_CTRL_RX) ?
1134                        "on" : "off");
1135                 tg3_ump_link_report(tp);
1136         }
1137 }
1138
1139 static u16 tg3_advert_flowctrl_1000T(u8 flow_ctrl)
1140 {
1141         u16 miireg;
1142
1143         if ((flow_ctrl & TG3_FLOW_CTRL_TX) && (flow_ctrl & TG3_FLOW_CTRL_RX))
1144                 miireg = ADVERTISE_PAUSE_CAP;
1145         else if (flow_ctrl & TG3_FLOW_CTRL_TX)
1146                 miireg = ADVERTISE_PAUSE_ASYM;
1147         else if (flow_ctrl & TG3_FLOW_CTRL_RX)
1148                 miireg = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1149         else
1150                 miireg = 0;
1151
1152         return miireg;
1153 }
1154
1155 static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
1156 {
1157         u16 miireg;
1158
1159         if ((flow_ctrl & TG3_FLOW_CTRL_TX) && (flow_ctrl & TG3_FLOW_CTRL_RX))
1160                 miireg = ADVERTISE_1000XPAUSE;
1161         else if (flow_ctrl & TG3_FLOW_CTRL_TX)
1162                 miireg = ADVERTISE_1000XPSE_ASYM;
1163         else if (flow_ctrl & TG3_FLOW_CTRL_RX)
1164                 miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1165         else
1166                 miireg = 0;
1167
1168         return miireg;
1169 }
1170
1171 static u8 tg3_resolve_flowctrl_1000T(u16 lcladv, u16 rmtadv)
1172 {
1173         u8 cap = 0;
1174
1175         if (lcladv & ADVERTISE_PAUSE_CAP) {
1176                 if (lcladv & ADVERTISE_PAUSE_ASYM) {
1177                         if (rmtadv & LPA_PAUSE_CAP)
1178                                 cap = TG3_FLOW_CTRL_TX | TG3_FLOW_CTRL_RX;
1179                         else if (rmtadv & LPA_PAUSE_ASYM)
1180                                 cap = TG3_FLOW_CTRL_RX;
1181                 } else {
1182                         if (rmtadv & LPA_PAUSE_CAP)
1183                                 cap = TG3_FLOW_CTRL_TX | TG3_FLOW_CTRL_RX;
1184                 }
1185         } else if (lcladv & ADVERTISE_PAUSE_ASYM) {
1186                 if ((rmtadv & LPA_PAUSE_CAP) && (rmtadv & LPA_PAUSE_ASYM))
1187                         cap = TG3_FLOW_CTRL_TX;
1188         }
1189
1190         return cap;
1191 }
1192
1193 static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
1194 {
1195         u8 cap = 0;
1196
1197         if (lcladv & ADVERTISE_1000XPAUSE) {
1198                 if (lcladv & ADVERTISE_1000XPSE_ASYM) {
1199                         if (rmtadv & LPA_1000XPAUSE)
1200                                 cap = TG3_FLOW_CTRL_TX | TG3_FLOW_CTRL_RX;
1201                         else if (rmtadv & LPA_1000XPAUSE_ASYM)
1202                                 cap = TG3_FLOW_CTRL_RX;
1203                 } else {
1204                         if (rmtadv & LPA_1000XPAUSE)
1205                                 cap = TG3_FLOW_CTRL_TX | TG3_FLOW_CTRL_RX;
1206                 }
1207         } else if (lcladv & ADVERTISE_1000XPSE_ASYM) {
1208                 if ((rmtadv & LPA_1000XPAUSE) && (rmtadv & LPA_1000XPAUSE_ASYM))
1209                         cap = TG3_FLOW_CTRL_TX;
1210         }
1211
1212         return cap;
1213 }
1214
1215 static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv)
1216 {
1217         u8 autoneg;
1218         u8 flowctrl = 0;
1219         u32 old_rx_mode = tp->rx_mode;
1220         u32 old_tx_mode = tp->tx_mode;
1221
1222         if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)
1223                 autoneg = tp->mdio_bus.phy_map[PHY_ADDR]->autoneg;
1224         else
1225                 autoneg = tp->link_config.autoneg;
1226
1227         if (autoneg == AUTONEG_ENABLE &&
1228             (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG)) {
1229                 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
1230                         flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv);
1231                 else
1232                         flowctrl = tg3_resolve_flowctrl_1000T(lcladv, rmtadv);
1233         } else
1234                 flowctrl = tp->link_config.flowctrl;
1235
1236         tp->link_config.active_flowctrl = flowctrl;
1237
1238         if (flowctrl & TG3_FLOW_CTRL_RX)
1239                 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1240         else
1241                 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1242
1243         if (old_rx_mode != tp->rx_mode)
1244                 tw32_f(MAC_RX_MODE, tp->rx_mode);
1245
1246         if (flowctrl & TG3_FLOW_CTRL_TX)
1247                 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1248         else
1249                 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1250
1251         if (old_tx_mode != tp->tx_mode)
1252                 tw32_f(MAC_TX_MODE, tp->tx_mode);
1253 }
1254
1255 static void tg3_adjust_link(struct net_device *dev)
1256 {
1257         u8 oldflowctrl, linkmesg = 0;
1258         u32 mac_mode, lcl_adv, rmt_adv;
1259         struct tg3 *tp = netdev_priv(dev);
1260         struct phy_device *phydev = tp->mdio_bus.phy_map[PHY_ADDR];
1261
1262         spin_lock(&tp->lock);
1263
1264         mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK |
1265                                     MAC_MODE_HALF_DUPLEX);
1266
1267         oldflowctrl = tp->link_config.active_flowctrl;
1268
1269         if (phydev->link) {
1270                 lcl_adv = 0;
1271                 rmt_adv = 0;
1272
1273                 if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10)
1274                         mac_mode |= MAC_MODE_PORT_MODE_MII;
1275                 else
1276                         mac_mode |= MAC_MODE_PORT_MODE_GMII;
1277
1278                 if (phydev->duplex == DUPLEX_HALF)
1279                         mac_mode |= MAC_MODE_HALF_DUPLEX;
1280                 else {
1281                         lcl_adv = tg3_advert_flowctrl_1000T(
1282                                   tp->link_config.flowctrl);
1283
1284                         if (phydev->pause)
1285                                 rmt_adv = LPA_PAUSE_CAP;
1286                         if (phydev->asym_pause)
1287                                 rmt_adv |= LPA_PAUSE_ASYM;
1288                 }
1289
1290                 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
1291         } else
1292                 mac_mode |= MAC_MODE_PORT_MODE_GMII;
1293
1294         if (mac_mode != tp->mac_mode) {
1295                 tp->mac_mode = mac_mode;
1296                 tw32_f(MAC_MODE, tp->mac_mode);
1297                 udelay(40);
1298         }
1299
1300         if (phydev->speed == SPEED_1000 && phydev->duplex == DUPLEX_HALF)
1301                 tw32(MAC_TX_LENGTHS,
1302                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1303                       (6 << TX_LENGTHS_IPG_SHIFT) |
1304                       (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
1305         else
1306                 tw32(MAC_TX_LENGTHS,
1307                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1308                       (6 << TX_LENGTHS_IPG_SHIFT) |
1309                       (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
1310
1311         if ((phydev->link && tp->link_config.active_speed == SPEED_INVALID) ||
1312             (!phydev->link && tp->link_config.active_speed != SPEED_INVALID) ||
1313             phydev->speed != tp->link_config.active_speed ||
1314             phydev->duplex != tp->link_config.active_duplex ||
1315             oldflowctrl != tp->link_config.active_flowctrl)
1316             linkmesg = 1;
1317
1318         tp->link_config.active_speed = phydev->speed;
1319         tp->link_config.active_duplex = phydev->duplex;
1320
1321         spin_unlock(&tp->lock);
1322
1323         if (linkmesg)
1324                 tg3_link_report(tp);
1325 }
1326
1327 static int tg3_phy_init(struct tg3 *tp)
1328 {
1329         struct phy_device *phydev;
1330
1331         if (tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED)
1332                 return 0;
1333
1334         /* Bring the PHY back to a known state. */
1335         tg3_bmcr_reset(tp);
1336
1337         phydev = tp->mdio_bus.phy_map[PHY_ADDR];
1338
1339         /* Attach the MAC to the PHY. */
1340         phydev = phy_connect(tp->dev, phydev->dev.bus_id, tg3_adjust_link,
1341                              phydev->dev_flags, phydev->interface);
1342         if (IS_ERR(phydev)) {
1343                 printk(KERN_ERR "%s: Could not attach to PHY\n", tp->dev->name);
1344                 return PTR_ERR(phydev);
1345         }
1346
1347         tp->tg3_flags3 |= TG3_FLG3_PHY_CONNECTED;
1348
1349         /* Mask with MAC supported features. */
1350         phydev->supported &= (PHY_GBIT_FEATURES |
1351                               SUPPORTED_Pause |
1352                               SUPPORTED_Asym_Pause);
1353
1354         phydev->advertising = phydev->supported;
1355
1356         printk(KERN_INFO
1357                "%s: attached PHY driver [%s] (mii_bus:phy_addr=%s)\n",
1358                tp->dev->name, phydev->drv->name, phydev->dev.bus_id);
1359
1360         return 0;
1361 }
1362
1363 static void tg3_phy_start(struct tg3 *tp)
1364 {
1365         struct phy_device *phydev;
1366
1367         if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
1368                 return;
1369
1370         phydev = tp->mdio_bus.phy_map[PHY_ADDR];
1371
1372         if (tp->link_config.phy_is_low_power) {
1373                 tp->link_config.phy_is_low_power = 0;
1374                 phydev->speed = tp->link_config.orig_speed;
1375                 phydev->duplex = tp->link_config.orig_duplex;
1376                 phydev->autoneg = tp->link_config.orig_autoneg;
1377                 phydev->advertising = tp->link_config.orig_advertising;
1378         }
1379
1380         phy_start(phydev);
1381
1382         phy_start_aneg(phydev);
1383 }
1384
1385 static void tg3_phy_stop(struct tg3 *tp)
1386 {
1387         if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
1388                 return;
1389
1390         phy_stop(tp->mdio_bus.phy_map[PHY_ADDR]);
1391 }
1392
1393 static void tg3_phy_fini(struct tg3 *tp)
1394 {
1395         if (tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED) {
1396                 phy_disconnect(tp->mdio_bus.phy_map[PHY_ADDR]);
1397                 tp->tg3_flags3 &= ~TG3_FLG3_PHY_CONNECTED;
1398         }
1399 }
1400
1401 static void tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
1402 {
1403         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1404         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
1405 }
1406
1407 static void tg3_phy_toggle_automdix(struct tg3 *tp, int enable)
1408 {
1409         u32 phy;
1410
1411         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
1412             (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES))
1413                 return;
1414
1415         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1416                 u32 ephy;
1417
1418                 if (!tg3_readphy(tp, MII_TG3_EPHY_TEST, &ephy)) {
1419                         tg3_writephy(tp, MII_TG3_EPHY_TEST,
1420                                      ephy | MII_TG3_EPHY_SHADOW_EN);
1421                         if (!tg3_readphy(tp, MII_TG3_EPHYTST_MISCCTRL, &phy)) {
1422                                 if (enable)
1423                                         phy |= MII_TG3_EPHYTST_MISCCTRL_MDIX;
1424                                 else
1425                                         phy &= ~MII_TG3_EPHYTST_MISCCTRL_MDIX;
1426                                 tg3_writephy(tp, MII_TG3_EPHYTST_MISCCTRL, phy);
1427                         }
1428                         tg3_writephy(tp, MII_TG3_EPHY_TEST, ephy);
1429                 }
1430         } else {
1431                 phy = MII_TG3_AUXCTL_MISC_RDSEL_MISC |
1432                       MII_TG3_AUXCTL_SHDWSEL_MISC;
1433                 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, phy) &&
1434                     !tg3_readphy(tp, MII_TG3_AUX_CTRL, &phy)) {
1435                         if (enable)
1436                                 phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
1437                         else
1438                                 phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
1439                         phy |= MII_TG3_AUXCTL_MISC_WREN;
1440                         tg3_writephy(tp, MII_TG3_AUX_CTRL, phy);
1441                 }
1442         }
1443 }
1444
1445 static void tg3_phy_set_wirespeed(struct tg3 *tp)
1446 {
1447         u32 val;
1448
1449         if (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED)
1450                 return;
1451
1452         if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x7007) &&
1453             !tg3_readphy(tp, MII_TG3_AUX_CTRL, &val))
1454                 tg3_writephy(tp, MII_TG3_AUX_CTRL,
1455                              (val | (1 << 15) | (1 << 4)));
1456 }
1457
1458 static void tg3_phy_apply_otp(struct tg3 *tp)
1459 {
1460         u32 otp, phy;
1461
1462         if (!tp->phy_otp)
1463                 return;
1464
1465         otp = tp->phy_otp;
1466
1467         /* Enable SM_DSP clock and tx 6dB coding. */
1468         phy = MII_TG3_AUXCTL_SHDWSEL_AUXCTL |
1469               MII_TG3_AUXCTL_ACTL_SMDSP_ENA |
1470               MII_TG3_AUXCTL_ACTL_TX_6DB;
1471         tg3_writephy(tp, MII_TG3_AUX_CTRL, phy);
1472
1473         phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
1474         phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT;
1475         tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy);
1476
1477         phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) |
1478               ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT);
1479         tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy);
1480
1481         phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT);
1482         phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ;
1483         tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy);
1484
1485         phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT);
1486         tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy);
1487
1488         phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT);
1489         tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy);
1490
1491         phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) |
1492               ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
1493         tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);
1494
1495         /* Turn off SM_DSP clock. */
1496         phy = MII_TG3_AUXCTL_SHDWSEL_AUXCTL |
1497               MII_TG3_AUXCTL_ACTL_TX_6DB;
1498         tg3_writephy(tp, MII_TG3_AUX_CTRL, phy);
1499 }
1500
1501 static int tg3_wait_macro_done(struct tg3 *tp)
1502 {
1503         int limit = 100;
1504
1505         while (limit--) {
1506                 u32 tmp32;
1507
1508                 if (!tg3_readphy(tp, 0x16, &tmp32)) {
1509                         if ((tmp32 & 0x1000) == 0)
1510                                 break;
1511                 }
1512         }
1513         if (limit <= 0)
1514                 return -EBUSY;
1515
1516         return 0;
1517 }
1518
1519 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
1520 {
1521         static const u32 test_pat[4][6] = {
1522         { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
1523         { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
1524         { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
1525         { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
1526         };
1527         int chan;
1528
1529         for (chan = 0; chan < 4; chan++) {
1530                 int i;
1531
1532                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1533                              (chan * 0x2000) | 0x0200);
1534                 tg3_writephy(tp, 0x16, 0x0002);
1535
1536                 for (i = 0; i < 6; i++)
1537                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
1538                                      test_pat[chan][i]);
1539
1540                 tg3_writephy(tp, 0x16, 0x0202);
1541                 if (tg3_wait_macro_done(tp)) {
1542                         *resetp = 1;
1543                         return -EBUSY;
1544                 }
1545
1546                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1547                              (chan * 0x2000) | 0x0200);
1548                 tg3_writephy(tp, 0x16, 0x0082);
1549                 if (tg3_wait_macro_done(tp)) {
1550                         *resetp = 1;
1551                         return -EBUSY;
1552                 }
1553
1554                 tg3_writephy(tp, 0x16, 0x0802);
1555                 if (tg3_wait_macro_done(tp)) {
1556                         *resetp = 1;
1557                         return -EBUSY;
1558                 }
1559
1560                 for (i = 0; i < 6; i += 2) {
1561                         u32 low, high;
1562
1563                         if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
1564                             tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
1565                             tg3_wait_macro_done(tp)) {
1566                                 *resetp = 1;
1567                                 return -EBUSY;
1568                         }
1569                         low &= 0x7fff;
1570                         high &= 0x000f;
1571                         if (low != test_pat[chan][i] ||
1572                             high != test_pat[chan][i+1]) {
1573                                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
1574                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
1575                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
1576
1577                                 return -EBUSY;
1578                         }
1579                 }
1580         }
1581
1582         return 0;
1583 }
1584
1585 static int tg3_phy_reset_chanpat(struct tg3 *tp)
1586 {
1587         int chan;
1588
1589         for (chan = 0; chan < 4; chan++) {
1590                 int i;
1591
1592                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1593                              (chan * 0x2000) | 0x0200);
1594                 tg3_writephy(tp, 0x16, 0x0002);
1595                 for (i = 0; i < 6; i++)
1596                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
1597                 tg3_writephy(tp, 0x16, 0x0202);
1598                 if (tg3_wait_macro_done(tp))
1599                         return -EBUSY;
1600         }
1601
1602         return 0;
1603 }
1604
1605 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
1606 {
1607         u32 reg32, phy9_orig;
1608         int retries, do_phy_reset, err;
1609
1610         retries = 10;
1611         do_phy_reset = 1;
1612         do {
1613                 if (do_phy_reset) {
1614                         err = tg3_bmcr_reset(tp);
1615                         if (err)
1616                                 return err;
1617                         do_phy_reset = 0;
1618                 }
1619
1620                 /* Disable transmitter and interrupt.  */
1621                 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
1622                         continue;
1623
1624                 reg32 |= 0x3000;
1625                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
1626
1627                 /* Set full-duplex, 1000 mbps.  */
1628                 tg3_writephy(tp, MII_BMCR,
1629                              BMCR_FULLDPLX | TG3_BMCR_SPEED1000);
1630
1631                 /* Set to master mode.  */
1632                 if (tg3_readphy(tp, MII_TG3_CTRL, &phy9_orig))
1633                         continue;
1634
1635                 tg3_writephy(tp, MII_TG3_CTRL,
1636                              (MII_TG3_CTRL_AS_MASTER |
1637                               MII_TG3_CTRL_ENABLE_AS_MASTER));
1638
1639                 /* Enable SM_DSP_CLOCK and 6dB.  */
1640                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1641
1642                 /* Block the PHY control access.  */
1643                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
1644                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0800);
1645
1646                 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
1647                 if (!err)
1648                         break;
1649         } while (--retries);
1650
1651         err = tg3_phy_reset_chanpat(tp);
1652         if (err)
1653                 return err;
1654
1655         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
1656         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0000);
1657
1658         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
1659         tg3_writephy(tp, 0x16, 0x0000);
1660
1661         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
1662             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
1663                 /* Set Extended packet length bit for jumbo frames */
1664                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4400);
1665         }
1666         else {
1667                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1668         }
1669
1670         tg3_writephy(tp, MII_TG3_CTRL, phy9_orig);
1671
1672         if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32)) {
1673                 reg32 &= ~0x3000;
1674                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
1675         } else if (!err)
1676                 err = -EBUSY;
1677
1678         return err;
1679 }
1680
1681 /* This will reset the tigon3 PHY if there is no valid
1682  * link unless the FORCE argument is non-zero.
1683  */
1684 static int tg3_phy_reset(struct tg3 *tp)
1685 {
1686         u32 cpmuctrl;
1687         u32 phy_status;
1688         int err;
1689
1690         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1691                 u32 val;
1692
1693                 val = tr32(GRC_MISC_CFG);
1694                 tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
1695                 udelay(40);
1696         }
1697         err  = tg3_readphy(tp, MII_BMSR, &phy_status);
1698         err |= tg3_readphy(tp, MII_BMSR, &phy_status);
1699         if (err != 0)
1700                 return -EBUSY;
1701
1702         if (netif_running(tp->dev) && netif_carrier_ok(tp->dev)) {
1703                 netif_carrier_off(tp->dev);
1704                 tg3_link_report(tp);
1705         }
1706
1707         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
1708             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
1709             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
1710                 err = tg3_phy_reset_5703_4_5(tp);
1711                 if (err)
1712                         return err;
1713                 goto out;
1714         }
1715
1716         cpmuctrl = 0;
1717         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
1718             GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
1719                 cpmuctrl = tr32(TG3_CPMU_CTRL);
1720                 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY)
1721                         tw32(TG3_CPMU_CTRL,
1722                              cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY);
1723         }
1724
1725         err = tg3_bmcr_reset(tp);
1726         if (err)
1727                 return err;
1728
1729         if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) {
1730                 u32 phy;
1731
1732                 phy = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz;
1733                 tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, phy);
1734
1735                 tw32(TG3_CPMU_CTRL, cpmuctrl);
1736         }
1737
1738         if (tp->tg3_flags3 & TG3_FLG3_5761_5784_AX_FIXES) {
1739                 u32 val;
1740
1741                 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
1742                 if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
1743                     CPMU_LSPD_1000MB_MACCLK_12_5) {
1744                         val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
1745                         udelay(40);
1746                         tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
1747                 }
1748
1749                 /* Disable GPHY autopowerdown. */
1750                 tg3_writephy(tp, MII_TG3_MISC_SHDW,
1751                              MII_TG3_MISC_SHDW_WREN |
1752                              MII_TG3_MISC_SHDW_APD_SEL |
1753                              MII_TG3_MISC_SHDW_APD_WKTM_84MS);
1754         }
1755
1756         tg3_phy_apply_otp(tp);
1757
1758 out:
1759         if (tp->tg3_flags2 & TG3_FLG2_PHY_ADC_BUG) {
1760                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1761                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1762                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x2aaa);
1763                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1764                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0323);
1765                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1766         }
1767         if (tp->tg3_flags2 & TG3_FLG2_PHY_5704_A0_BUG) {
1768                 tg3_writephy(tp, 0x1c, 0x8d68);
1769                 tg3_writephy(tp, 0x1c, 0x8d68);
1770         }
1771         if (tp->tg3_flags2 & TG3_FLG2_PHY_BER_BUG) {
1772                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1773                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1774                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x310b);
1775                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1776                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x9506);
1777                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x401f);
1778                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x14e2);
1779                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1780         }
1781         else if (tp->tg3_flags2 & TG3_FLG2_PHY_JITTER_BUG) {
1782                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1783                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1784                 if (tp->tg3_flags2 & TG3_FLG2_PHY_ADJUST_TRIM) {
1785                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
1786                         tg3_writephy(tp, MII_TG3_TEST1,
1787                                      MII_TG3_TEST1_TRIM_EN | 0x4);
1788                 } else
1789                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
1790                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1791         }
1792         /* Set Extended packet length bit (bit 14) on all chips that */
1793         /* support jumbo frames */
1794         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
1795                 /* Cannot do read-modify-write on 5401 */
1796                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
1797         } else if (tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) {
1798                 u32 phy_reg;
1799
1800                 /* Set bit 14 with read-modify-write to preserve other bits */
1801                 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0007) &&
1802                     !tg3_readphy(tp, MII_TG3_AUX_CTRL, &phy_reg))
1803                         tg3_writephy(tp, MII_TG3_AUX_CTRL, phy_reg | 0x4000);
1804         }
1805
1806         /* Set phy register 0x10 bit 0 to high fifo elasticity to support
1807          * jumbo frames transmission.
1808          */
1809         if (tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) {
1810                 u32 phy_reg;
1811
1812                 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &phy_reg))
1813                     tg3_writephy(tp, MII_TG3_EXT_CTRL,
1814                                  phy_reg | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
1815         }
1816
1817         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1818                 /* adjust output voltage */
1819                 tg3_writephy(tp, MII_TG3_EPHY_PTEST, 0x12);
1820         }
1821
1822         tg3_phy_toggle_automdix(tp, 1);
1823         tg3_phy_set_wirespeed(tp);
1824         return 0;
1825 }
1826
1827 static void tg3_frob_aux_power(struct tg3 *tp)
1828 {
1829         struct tg3 *tp_peer = tp;
1830
1831         if ((tp->tg3_flags2 & TG3_FLG2_IS_NIC) == 0)
1832                 return;
1833
1834         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
1835             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)) {
1836                 struct net_device *dev_peer;
1837
1838                 dev_peer = pci_get_drvdata(tp->pdev_peer);
1839                 /* remove_one() may have been run on the peer. */
1840                 if (!dev_peer)
1841                         tp_peer = tp;
1842                 else
1843                         tp_peer = netdev_priv(dev_peer);
1844         }
1845
1846         if ((tp->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
1847             (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0 ||
1848             (tp_peer->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
1849             (tp_peer->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0) {
1850                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1851                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1852                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1853                                     (GRC_LCLCTRL_GPIO_OE0 |
1854                                      GRC_LCLCTRL_GPIO_OE1 |
1855                                      GRC_LCLCTRL_GPIO_OE2 |
1856                                      GRC_LCLCTRL_GPIO_OUTPUT0 |
1857                                      GRC_LCLCTRL_GPIO_OUTPUT1),
1858                                     100);
1859                 } else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761) {
1860                         /* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
1861                         u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
1862                                              GRC_LCLCTRL_GPIO_OE1 |
1863                                              GRC_LCLCTRL_GPIO_OE2 |
1864                                              GRC_LCLCTRL_GPIO_OUTPUT0 |
1865                                              GRC_LCLCTRL_GPIO_OUTPUT1 |
1866                                              tp->grc_local_ctrl;
1867                         tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, 100);
1868
1869                         grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2;
1870                         tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, 100);
1871
1872                         grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0;
1873                         tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, 100);
1874                 } else {
1875                         u32 no_gpio2;
1876                         u32 grc_local_ctrl = 0;
1877
1878                         if (tp_peer != tp &&
1879                             (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
1880                                 return;
1881
1882                         /* Workaround to prevent overdrawing Amps. */
1883                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
1884                             ASIC_REV_5714) {
1885                                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
1886                                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1887                                             grc_local_ctrl, 100);
1888                         }
1889
1890                         /* On 5753 and variants, GPIO2 cannot be used. */
1891                         no_gpio2 = tp->nic_sram_data_cfg &
1892                                     NIC_SRAM_DATA_CFG_NO_GPIO2;
1893
1894                         grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
1895                                          GRC_LCLCTRL_GPIO_OE1 |
1896                                          GRC_LCLCTRL_GPIO_OE2 |
1897                                          GRC_LCLCTRL_GPIO_OUTPUT1 |
1898                                          GRC_LCLCTRL_GPIO_OUTPUT2;
1899                         if (no_gpio2) {
1900                                 grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
1901                                                     GRC_LCLCTRL_GPIO_OUTPUT2);
1902                         }
1903                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1904                                                     grc_local_ctrl, 100);
1905
1906                         grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
1907
1908                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1909                                                     grc_local_ctrl, 100);
1910
1911                         if (!no_gpio2) {
1912                                 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
1913                                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1914                                             grc_local_ctrl, 100);
1915                         }
1916                 }
1917         } else {
1918                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
1919                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
1920                         if (tp_peer != tp &&
1921                             (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
1922                                 return;
1923
1924                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1925                                     (GRC_LCLCTRL_GPIO_OE1 |
1926                                      GRC_LCLCTRL_GPIO_OUTPUT1), 100);
1927
1928                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1929                                     GRC_LCLCTRL_GPIO_OE1, 100);
1930
1931                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1932                                     (GRC_LCLCTRL_GPIO_OE1 |
1933                                      GRC_LCLCTRL_GPIO_OUTPUT1), 100);
1934                 }
1935         }
1936 }
1937
1938 static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
1939 {
1940         if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
1941                 return 1;
1942         else if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411) {
1943                 if (speed != SPEED_10)
1944                         return 1;
1945         } else if (speed == SPEED_10)
1946                 return 1;
1947
1948         return 0;
1949 }
1950
1951 static int tg3_setup_phy(struct tg3 *, int);
1952
1953 #define RESET_KIND_SHUTDOWN     0
1954 #define RESET_KIND_INIT         1
1955 #define RESET_KIND_SUSPEND      2
1956
1957 static void tg3_write_sig_post_reset(struct tg3 *, int);
1958 static int tg3_halt_cpu(struct tg3 *, u32);
1959 static int tg3_nvram_lock(struct tg3 *);
1960 static void tg3_nvram_unlock(struct tg3 *);
1961
1962 static void tg3_power_down_phy(struct tg3 *tp)
1963 {
1964         u32 val;
1965
1966         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
1967                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
1968                         u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
1969                         u32 serdes_cfg = tr32(MAC_SERDES_CFG);
1970
1971                         sg_dig_ctrl |=
1972                                 SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
1973                         tw32(SG_DIG_CTRL, sg_dig_ctrl);
1974                         tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
1975                 }
1976                 return;
1977         }
1978
1979         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1980                 tg3_bmcr_reset(tp);
1981                 val = tr32(GRC_MISC_CFG);
1982                 tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
1983                 udelay(40);
1984                 return;
1985         } else if (!(tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)) {
1986                 tg3_writephy(tp, MII_TG3_EXT_CTRL,
1987                              MII_TG3_EXT_CTRL_FORCE_LED_OFF);
1988                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x01b2);
1989         }
1990
1991         /* The PHY should not be powered down on some chips because
1992          * of bugs.
1993          */
1994         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1995             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
1996             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 &&
1997              (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)))
1998                 return;
1999
2000         if (tp->tg3_flags3 & TG3_FLG3_5761_5784_AX_FIXES) {
2001                 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2002                 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2003                 val |= CPMU_LSPD_1000MB_MACCLK_12_5;
2004                 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2005         }
2006
2007         tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
2008 }
2009
2010 static int tg3_set_power_state(struct tg3 *tp, pci_power_t state)
2011 {
2012         u32 misc_host_ctrl;
2013
2014         /* Make sure register accesses (indirect or otherwise)
2015          * will function correctly.
2016          */
2017         pci_write_config_dword(tp->pdev,
2018                                TG3PCI_MISC_HOST_CTRL,
2019                                tp->misc_host_ctrl);
2020
2021         switch (state) {
2022         case PCI_D0:
2023                 pci_enable_wake(tp->pdev, state, false);
2024                 pci_set_power_state(tp->pdev, PCI_D0);
2025
2026                 /* Switch out of Vaux if it is a NIC */
2027                 if (tp->tg3_flags2 & TG3_FLG2_IS_NIC)
2028                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl, 100);
2029
2030                 return 0;
2031
2032         case PCI_D1:
2033         case PCI_D2:
2034         case PCI_D3hot:
2035                 break;
2036
2037         default:
2038                 printk(KERN_ERR PFX "%s: Invalid power state (D%d) requested\n",
2039                         tp->dev->name, state);
2040                 return -EINVAL;
2041         }
2042         misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
2043         tw32(TG3PCI_MISC_HOST_CTRL,
2044              misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
2045
2046         if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
2047                 if ((tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED) &&
2048                     !tp->link_config.phy_is_low_power) {
2049                         struct phy_device *phydev;
2050                         u32 advertising;
2051
2052                         phydev = tp->mdio_bus.phy_map[PHY_ADDR];
2053
2054                         tp->link_config.phy_is_low_power = 1;
2055
2056                         tp->link_config.orig_speed = phydev->speed;
2057                         tp->link_config.orig_duplex = phydev->duplex;
2058                         tp->link_config.orig_autoneg = phydev->autoneg;
2059                         tp->link_config.orig_advertising = phydev->advertising;
2060
2061                         advertising = ADVERTISED_TP |
2062                                       ADVERTISED_Pause |
2063                                       ADVERTISED_Autoneg |
2064                                       ADVERTISED_10baseT_Half;
2065
2066                         if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) ||
2067                             (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)) {
2068                                 if (tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB)
2069                                         advertising |=
2070                                                 ADVERTISED_100baseT_Half |
2071                                                 ADVERTISED_100baseT_Full |
2072                                                 ADVERTISED_10baseT_Full;
2073                                 else
2074                                         advertising |= ADVERTISED_10baseT_Full;
2075                         }
2076
2077                         phydev->advertising = advertising;
2078
2079                         phy_start_aneg(phydev);
2080                 }
2081         } else {
2082                 if (tp->link_config.phy_is_low_power == 0) {
2083                         tp->link_config.phy_is_low_power = 1;
2084                         tp->link_config.orig_speed = tp->link_config.speed;
2085                         tp->link_config.orig_duplex = tp->link_config.duplex;
2086                         tp->link_config.orig_autoneg = tp->link_config.autoneg;
2087                 }
2088
2089                 if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)) {
2090                         tp->link_config.speed = SPEED_10;
2091                         tp->link_config.duplex = DUPLEX_HALF;
2092                         tp->link_config.autoneg = AUTONEG_ENABLE;
2093                         tg3_setup_phy(tp, 0);
2094                 }
2095         }
2096
2097         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2098                 u32 val;
2099
2100                 val = tr32(GRC_VCPU_EXT_CTRL);
2101                 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
2102         } else if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
2103                 int i;
2104                 u32 val;
2105
2106                 for (i = 0; i < 200; i++) {
2107                         tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
2108                         if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
2109                                 break;
2110                         msleep(1);
2111                 }
2112         }
2113         if (tp->tg3_flags & TG3_FLAG_WOL_CAP)
2114                 tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
2115                                                      WOL_DRV_STATE_SHUTDOWN |
2116                                                      WOL_DRV_WOL |
2117                                                      WOL_SET_MAGIC_PKT);
2118
2119         if (tp->tg3_flags & TG3_FLAG_WOL_ENABLE) {
2120                 u32 mac_mode;
2121
2122                 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
2123                         if (!(tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)) {
2124                                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x5a);
2125                                 udelay(40);
2126                         }
2127
2128                         if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
2129                                 mac_mode = MAC_MODE_PORT_MODE_GMII;
2130                         else
2131                                 mac_mode = MAC_MODE_PORT_MODE_MII;
2132
2133                         mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
2134                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
2135                             ASIC_REV_5700) {
2136                                 u32 speed = (tp->tg3_flags &
2137                                              TG3_FLAG_WOL_SPEED_100MB) ?
2138                                              SPEED_100 : SPEED_10;
2139                                 if (tg3_5700_link_polarity(tp, speed))
2140                                         mac_mode |= MAC_MODE_LINK_POLARITY;
2141                                 else
2142                                         mac_mode &= ~MAC_MODE_LINK_POLARITY;
2143                         }
2144                 } else {
2145                         mac_mode = MAC_MODE_PORT_MODE_TBI;
2146                 }
2147
2148                 if (!(tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
2149                         tw32(MAC_LED_CTRL, tp->led_ctrl);
2150
2151                 if (pci_pme_capable(tp->pdev, state) &&
2152                      (tp->tg3_flags & TG3_FLAG_WOL_ENABLE))
2153                         mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
2154
2155                 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
2156                         mac_mode |= tp->mac_mode &
2157                                     (MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN);
2158                         if (mac_mode & MAC_MODE_APE_TX_EN)
2159                                 mac_mode |= MAC_MODE_TDE_ENABLE;
2160                 }
2161
2162                 tw32_f(MAC_MODE, mac_mode);
2163                 udelay(100);
2164
2165                 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
2166                 udelay(10);
2167         }
2168
2169         if (!(tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB) &&
2170             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2171              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
2172                 u32 base_val;
2173
2174                 base_val = tp->pci_clock_ctrl;
2175                 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
2176                              CLOCK_CTRL_TXCLK_DISABLE);
2177
2178                 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
2179                             CLOCK_CTRL_PWRDOWN_PLL133, 40);
2180         } else if ((tp->tg3_flags2 & TG3_FLG2_5780_CLASS) ||
2181                    (tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) ||
2182                    (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)) {
2183                 /* do nothing */
2184         } else if (!((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
2185                      (tp->tg3_flags & TG3_FLAG_ENABLE_ASF))) {
2186                 u32 newbits1, newbits2;
2187
2188                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2189                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2190                         newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
2191                                     CLOCK_CTRL_TXCLK_DISABLE |
2192                                     CLOCK_CTRL_ALTCLK);
2193                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
2194                 } else if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
2195                         newbits1 = CLOCK_CTRL_625_CORE;
2196                         newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
2197                 } else {
2198                         newbits1 = CLOCK_CTRL_ALTCLK;
2199                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
2200                 }
2201
2202                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
2203                             40);
2204
2205                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
2206                             40);
2207
2208                 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
2209                         u32 newbits3;
2210
2211                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2212                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2213                                 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
2214                                             CLOCK_CTRL_TXCLK_DISABLE |
2215                                             CLOCK_CTRL_44MHZ_CORE);
2216                         } else {
2217                                 newbits3 = CLOCK_CTRL_44MHZ_CORE;
2218                         }
2219
2220                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
2221                                     tp->pci_clock_ctrl | newbits3, 40);
2222                 }
2223         }
2224
2225         if (!(tp->tg3_flags & TG3_FLAG_WOL_ENABLE) &&
2226             !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF) &&
2227             !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
2228                 tg3_power_down_phy(tp);
2229
2230         tg3_frob_aux_power(tp);
2231
2232         /* Workaround for unstable PLL clock */
2233         if ((GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX) ||
2234             (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX)) {
2235                 u32 val = tr32(0x7d00);
2236
2237                 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
2238                 tw32(0x7d00, val);
2239                 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
2240                         int err;
2241
2242                         err = tg3_nvram_lock(tp);
2243                         tg3_halt_cpu(tp, RX_CPU_BASE);
2244                         if (!err)
2245                                 tg3_nvram_unlock(tp);
2246                 }
2247         }
2248
2249         tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
2250
2251         if (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)
2252                 pci_enable_wake(tp->pdev, state, true);
2253
2254         /* Finally, set the new power state. */
2255         pci_set_power_state(tp->pdev, state);
2256
2257         return 0;
2258 }
2259
2260 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
2261 {
2262         switch (val & MII_TG3_AUX_STAT_SPDMASK) {
2263         case MII_TG3_AUX_STAT_10HALF:
2264                 *speed = SPEED_10;
2265                 *duplex = DUPLEX_HALF;
2266                 break;
2267
2268         case MII_TG3_AUX_STAT_10FULL:
2269                 *speed = SPEED_10;
2270                 *duplex = DUPLEX_FULL;
2271                 break;
2272
2273         case MII_TG3_AUX_STAT_100HALF:
2274                 *speed = SPEED_100;
2275                 *duplex = DUPLEX_HALF;
2276                 break;
2277
2278         case MII_TG3_AUX_STAT_100FULL:
2279                 *speed = SPEED_100;
2280                 *duplex = DUPLEX_FULL;
2281                 break;
2282
2283         case MII_TG3_AUX_STAT_1000HALF:
2284                 *speed = SPEED_1000;
2285                 *duplex = DUPLEX_HALF;
2286                 break;
2287
2288         case MII_TG3_AUX_STAT_1000FULL:
2289                 *speed = SPEED_1000;
2290                 *duplex = DUPLEX_FULL;
2291                 break;
2292
2293         default:
2294                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2295                         *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
2296                                  SPEED_10;
2297                         *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
2298                                   DUPLEX_HALF;
2299                         break;
2300                 }
2301                 *speed = SPEED_INVALID;
2302                 *duplex = DUPLEX_INVALID;
2303                 break;
2304         }
2305 }
2306
2307 static void tg3_phy_copper_begin(struct tg3 *tp)
2308 {
2309         u32 new_adv;
2310         int i;
2311
2312         if (tp->link_config.phy_is_low_power) {
2313                 /* Entering low power mode.  Disable gigabit and
2314                  * 100baseT advertisements.
2315                  */
2316                 tg3_writephy(tp, MII_TG3_CTRL, 0);
2317
2318                 new_adv = (ADVERTISE_10HALF | ADVERTISE_10FULL |
2319                            ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
2320                 if (tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB)
2321                         new_adv |= (ADVERTISE_100HALF | ADVERTISE_100FULL);
2322
2323                 tg3_writephy(tp, MII_ADVERTISE, new_adv);
2324         } else if (tp->link_config.speed == SPEED_INVALID) {
2325                 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
2326                         tp->link_config.advertising &=
2327                                 ~(ADVERTISED_1000baseT_Half |
2328                                   ADVERTISED_1000baseT_Full);
2329
2330                 new_adv = ADVERTISE_CSMA;
2331                 if (tp->link_config.advertising & ADVERTISED_10baseT_Half)
2332                         new_adv |= ADVERTISE_10HALF;
2333                 if (tp->link_config.advertising & ADVERTISED_10baseT_Full)
2334                         new_adv |= ADVERTISE_10FULL;
2335                 if (tp->link_config.advertising & ADVERTISED_100baseT_Half)
2336                         new_adv |= ADVERTISE_100HALF;
2337                 if (tp->link_config.advertising & ADVERTISED_100baseT_Full)
2338                         new_adv |= ADVERTISE_100FULL;
2339
2340                 new_adv |= tg3_advert_flowctrl_1000T(tp->link_config.flowctrl);
2341
2342                 tg3_writephy(tp, MII_ADVERTISE, new_adv);
2343
2344                 if (tp->link_config.advertising &
2345                     (ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full)) {
2346                         new_adv = 0;
2347                         if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
2348                                 new_adv |= MII_TG3_CTRL_ADV_1000_HALF;
2349                         if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
2350                                 new_adv |= MII_TG3_CTRL_ADV_1000_FULL;
2351                         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY) &&
2352                             (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
2353                              tp->pci_chip_rev_id == CHIPREV_ID_5701_B0))
2354                                 new_adv |= (MII_TG3_CTRL_AS_MASTER |
2355                                             MII_TG3_CTRL_ENABLE_AS_MASTER);
2356                         tg3_writephy(tp, MII_TG3_CTRL, new_adv);
2357                 } else {
2358                         tg3_writephy(tp, MII_TG3_CTRL, 0);
2359                 }
2360         } else {
2361                 new_adv = tg3_advert_flowctrl_1000T(tp->link_config.flowctrl);
2362                 new_adv |= ADVERTISE_CSMA;
2363
2364                 /* Asking for a specific link mode. */
2365                 if (tp->link_config.speed == SPEED_1000) {
2366                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
2367
2368                         if (tp->link_config.duplex == DUPLEX_FULL)
2369                                 new_adv = MII_TG3_CTRL_ADV_1000_FULL;
2370                         else
2371                                 new_adv = MII_TG3_CTRL_ADV_1000_HALF;
2372                         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
2373                             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
2374                                 new_adv |= (MII_TG3_CTRL_AS_MASTER |
2375                                             MII_TG3_CTRL_ENABLE_AS_MASTER);
2376                 } else {
2377                         if (tp->link_config.speed == SPEED_100) {
2378                                 if (tp->link_config.duplex == DUPLEX_FULL)
2379                                         new_adv |= ADVERTISE_100FULL;
2380                                 else
2381                                         new_adv |= ADVERTISE_100HALF;
2382                         } else {
2383                                 if (tp->link_config.duplex == DUPLEX_FULL)
2384                                         new_adv |= ADVERTISE_10FULL;
2385                                 else
2386                                         new_adv |= ADVERTISE_10HALF;
2387                         }
2388                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
2389
2390                         new_adv = 0;
2391                 }
2392
2393                 tg3_writephy(tp, MII_TG3_CTRL, new_adv);
2394         }
2395
2396         if (tp->link_config.autoneg == AUTONEG_DISABLE &&
2397             tp->link_config.speed != SPEED_INVALID) {
2398                 u32 bmcr, orig_bmcr;
2399
2400                 tp->link_config.active_speed = tp->link_config.speed;
2401                 tp->link_config.active_duplex = tp->link_config.duplex;
2402
2403                 bmcr = 0;
2404                 switch (tp->link_config.speed) {
2405                 default:
2406                 case SPEED_10:
2407                         break;
2408
2409                 case SPEED_100:
2410                         bmcr |= BMCR_SPEED100;
2411                         break;
2412
2413                 case SPEED_1000:
2414                         bmcr |= TG3_BMCR_SPEED1000;
2415                         break;
2416                 }
2417
2418                 if (tp->link_config.duplex == DUPLEX_FULL)
2419                         bmcr |= BMCR_FULLDPLX;
2420
2421                 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
2422                     (bmcr != orig_bmcr)) {
2423                         tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
2424                         for (i = 0; i < 1500; i++) {
2425                                 u32 tmp;
2426
2427                                 udelay(10);
2428                                 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
2429                                     tg3_readphy(tp, MII_BMSR, &tmp))
2430                                         continue;
2431                                 if (!(tmp & BMSR_LSTATUS)) {
2432                                         udelay(40);
2433                                         break;
2434                                 }
2435                         }
2436                         tg3_writephy(tp, MII_BMCR, bmcr);
2437                         udelay(40);
2438                 }
2439         } else {
2440                 tg3_writephy(tp, MII_BMCR,
2441                              BMCR_ANENABLE | BMCR_ANRESTART);
2442         }
2443 }
2444
2445 static int tg3_init_5401phy_dsp(struct tg3 *tp)
2446 {
2447         int err;
2448
2449         /* Turn off tap power management. */
2450         /* Set Extended packet length bit */
2451         err  = tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
2452
2453         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0012);
2454         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1804);
2455
2456         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0013);
2457         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1204);
2458
2459         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
2460         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0132);
2461
2462         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
2463         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0232);
2464
2465         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
2466         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0a20);
2467
2468         udelay(40);
2469
2470         return err;
2471 }
2472
2473 static int tg3_copper_is_advertising_all(struct tg3 *tp, u32 mask)
2474 {
2475         u32 adv_reg, all_mask = 0;
2476
2477         if (mask & ADVERTISED_10baseT_Half)
2478                 all_mask |= ADVERTISE_10HALF;
2479         if (mask & ADVERTISED_10baseT_Full)
2480                 all_mask |= ADVERTISE_10FULL;
2481         if (mask & ADVERTISED_100baseT_Half)
2482                 all_mask |= ADVERTISE_100HALF;
2483         if (mask & ADVERTISED_100baseT_Full)
2484                 all_mask |= ADVERTISE_100FULL;
2485
2486         if (tg3_readphy(tp, MII_ADVERTISE, &adv_reg))
2487                 return 0;
2488
2489         if ((adv_reg & all_mask) != all_mask)
2490                 return 0;
2491         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
2492                 u32 tg3_ctrl;
2493
2494                 all_mask = 0;
2495                 if (mask & ADVERTISED_1000baseT_Half)
2496                         all_mask |= ADVERTISE_1000HALF;
2497                 if (mask & ADVERTISED_1000baseT_Full)
2498                         all_mask |= ADVERTISE_1000FULL;
2499
2500                 if (tg3_readphy(tp, MII_TG3_CTRL, &tg3_ctrl))
2501                         return 0;
2502
2503                 if ((tg3_ctrl & all_mask) != all_mask)
2504                         return 0;
2505         }
2506         return 1;
2507 }
2508
2509 static int tg3_adv_1000T_flowctrl_ok(struct tg3 *tp, u32 *lcladv, u32 *rmtadv)
2510 {
2511         u32 curadv, reqadv;
2512
2513         if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
2514                 return 1;
2515
2516         curadv = *lcladv & (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
2517         reqadv = tg3_advert_flowctrl_1000T(tp->link_config.flowctrl);
2518
2519         if (tp->link_config.active_duplex == DUPLEX_FULL) {
2520                 if (curadv != reqadv)
2521                         return 0;
2522
2523                 if (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG)
2524                         tg3_readphy(tp, MII_LPA, rmtadv);
2525         } else {
2526                 /* Reprogram the advertisement register, even if it
2527                  * does not affect the current link.  If the link
2528                  * gets renegotiated in the future, we can save an
2529                  * additional renegotiation cycle by advertising
2530                  * it correctly in the first place.
2531                  */
2532                 if (curadv != reqadv) {
2533                         *lcladv &= ~(ADVERTISE_PAUSE_CAP |
2534                                      ADVERTISE_PAUSE_ASYM);
2535                         tg3_writephy(tp, MII_ADVERTISE, *lcladv | reqadv);
2536                 }
2537         }
2538
2539         return 1;
2540 }
2541
2542 static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
2543 {
2544         int current_link_up;
2545         u32 bmsr, dummy;
2546         u32 lcl_adv, rmt_adv;
2547         u16 current_speed;
2548         u8 current_duplex;
2549         int i, err;
2550
2551         tw32(MAC_EVENT, 0);
2552
2553         tw32_f(MAC_STATUS,
2554              (MAC_STATUS_SYNC_CHANGED |
2555               MAC_STATUS_CFG_CHANGED |
2556               MAC_STATUS_MI_COMPLETION |
2557               MAC_STATUS_LNKSTATE_CHANGED));
2558         udelay(40);
2559
2560         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
2561                 tw32_f(MAC_MI_MODE,
2562                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
2563                 udelay(80);
2564         }
2565
2566         tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x02);
2567
2568         /* Some third-party PHYs need to be reset on link going
2569          * down.
2570          */
2571         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
2572              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2573              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
2574             netif_carrier_ok(tp->dev)) {
2575                 tg3_readphy(tp, MII_BMSR, &bmsr);
2576                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
2577                     !(bmsr & BMSR_LSTATUS))
2578                         force_reset = 1;
2579         }
2580         if (force_reset)
2581                 tg3_phy_reset(tp);
2582
2583         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
2584                 tg3_readphy(tp, MII_BMSR, &bmsr);
2585                 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
2586                     !(tp->tg3_flags & TG3_FLAG_INIT_COMPLETE))
2587                         bmsr = 0;
2588
2589                 if (!(bmsr & BMSR_LSTATUS)) {
2590                         err = tg3_init_5401phy_dsp(tp);
2591                         if (err)
2592                                 return err;
2593
2594                         tg3_readphy(tp, MII_BMSR, &bmsr);
2595                         for (i = 0; i < 1000; i++) {
2596                                 udelay(10);
2597                                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
2598                                     (bmsr & BMSR_LSTATUS)) {
2599                                         udelay(40);
2600                                         break;
2601                                 }
2602                         }
2603
2604                         if ((tp->phy_id & PHY_ID_REV_MASK) == PHY_REV_BCM5401_B0 &&
2605                             !(bmsr & BMSR_LSTATUS) &&
2606                             tp->link_config.active_speed == SPEED_1000) {
2607                                 err = tg3_phy_reset(tp);
2608                                 if (!err)
2609                                         err = tg3_init_5401phy_dsp(tp);
2610                                 if (err)
2611                                         return err;
2612                         }
2613                 }
2614         } else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
2615                    tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
2616                 /* 5701 {A0,B0} CRC bug workaround */
2617                 tg3_writephy(tp, 0x15, 0x0a75);
2618                 tg3_writephy(tp, 0x1c, 0x8c68);
2619                 tg3_writephy(tp, 0x1c, 0x8d68);
2620                 tg3_writephy(tp, 0x1c, 0x8c68);
2621         }
2622
2623         /* Clear pending interrupts... */
2624         tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
2625         tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
2626
2627         if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT)
2628                 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
2629         else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906)
2630                 tg3_writephy(tp, MII_TG3_IMASK, ~0);
2631
2632         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2633             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2634                 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
2635                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
2636                                      MII_TG3_EXT_CTRL_LNK3_LED_MODE);
2637                 else
2638                         tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
2639         }
2640
2641         current_link_up = 0;
2642         current_speed = SPEED_INVALID;
2643         current_duplex = DUPLEX_INVALID;
2644
2645         if (tp->tg3_flags2 & TG3_FLG2_CAPACITIVE_COUPLING) {
2646                 u32 val;
2647
2648                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4007);
2649                 tg3_readphy(tp, MII_TG3_AUX_CTRL, &val);
2650                 if (!(val & (1 << 10))) {
2651                         val |= (1 << 10);
2652                         tg3_writephy(tp, MII_TG3_AUX_CTRL, val);
2653                         goto relink;
2654                 }
2655         }
2656
2657         bmsr = 0;
2658         for (i = 0; i < 100; i++) {
2659                 tg3_readphy(tp, MII_BMSR, &bmsr);
2660                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
2661                     (bmsr & BMSR_LSTATUS))
2662                         break;
2663                 udelay(40);
2664         }
2665
2666         if (bmsr & BMSR_LSTATUS) {
2667                 u32 aux_stat, bmcr;
2668
2669                 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
2670                 for (i = 0; i < 2000; i++) {
2671                         udelay(10);
2672                         if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
2673                             aux_stat)
2674                                 break;
2675                 }
2676
2677                 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
2678                                              &current_speed,
2679                                              &current_duplex);
2680
2681                 bmcr = 0;
2682                 for (i = 0; i < 200; i++) {
2683                         tg3_readphy(tp, MII_BMCR, &bmcr);
2684                         if (tg3_readphy(tp, MII_BMCR, &bmcr))
2685                                 continue;
2686                         if (bmcr && bmcr != 0x7fff)
2687                                 break;
2688                         udelay(10);
2689                 }
2690
2691                 lcl_adv = 0;
2692                 rmt_adv = 0;
2693
2694                 tp->link_config.active_speed = current_speed;
2695                 tp->link_config.active_duplex = current_duplex;
2696
2697                 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2698                         if ((bmcr & BMCR_ANENABLE) &&
2699                             tg3_copper_is_advertising_all(tp,
2700                                                 tp->link_config.advertising)) {
2701                                 if (tg3_adv_1000T_flowctrl_ok(tp, &lcl_adv,
2702                                                                   &rmt_adv))
2703                                         current_link_up = 1;
2704                         }
2705                 } else {
2706                         if (!(bmcr & BMCR_ANENABLE) &&
2707                             tp->link_config.speed == current_speed &&
2708                             tp->link_config.duplex == current_duplex &&
2709                             tp->link_config.flowctrl ==
2710                             tp->link_config.active_flowctrl) {
2711                                 current_link_up = 1;
2712                         }
2713                 }
2714
2715                 if (current_link_up == 1 &&
2716                     tp->link_config.active_duplex == DUPLEX_FULL)
2717                         tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
2718         }
2719
2720 relink:
2721         if (current_link_up == 0 || tp->link_config.phy_is_low_power) {
2722                 u32 tmp;
2723
2724                 tg3_phy_copper_begin(tp);
2725
2726                 tg3_readphy(tp, MII_BMSR, &tmp);
2727                 if (!tg3_readphy(tp, MII_BMSR, &tmp) &&
2728                     (tmp & BMSR_LSTATUS))
2729                         current_link_up = 1;
2730         }
2731
2732         tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
2733         if (current_link_up == 1) {
2734                 if (tp->link_config.active_speed == SPEED_100 ||
2735                     tp->link_config.active_speed == SPEED_10)
2736                         tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
2737                 else
2738                         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
2739         } else
2740                 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
2741
2742         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
2743         if (tp->link_config.active_duplex == DUPLEX_HALF)
2744                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
2745
2746         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
2747                 if (current_link_up == 1 &&
2748                     tg3_5700_link_polarity(tp, tp->link_config.active_speed))
2749                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
2750                 else
2751                         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
2752         }
2753
2754         /* ??? Without this setting Netgear GA302T PHY does not
2755          * ??? send/receive packets...
2756          */
2757         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411 &&
2758             tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) {
2759                 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
2760                 tw32_f(MAC_MI_MODE, tp->mi_mode);
2761                 udelay(80);
2762         }
2763
2764         tw32_f(MAC_MODE, tp->mac_mode);
2765         udelay(40);
2766
2767         if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
2768                 /* Polled via timer. */
2769                 tw32_f(MAC_EVENT, 0);
2770         } else {
2771                 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2772         }
2773         udelay(40);
2774
2775         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 &&
2776             current_link_up == 1 &&
2777             tp->link_config.active_speed == SPEED_1000 &&
2778             ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ||
2779              (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED))) {
2780                 udelay(120);
2781                 tw32_f(MAC_STATUS,
2782                      (MAC_STATUS_SYNC_CHANGED |
2783                       MAC_STATUS_CFG_CHANGED));
2784                 udelay(40);
2785                 tg3_write_mem(tp,
2786                               NIC_SRAM_FIRMWARE_MBOX,
2787                               NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
2788         }
2789
2790         if (current_link_up != netif_carrier_ok(tp->dev)) {
2791                 if (current_link_up)
2792                         netif_carrier_on(tp->dev);
2793                 else
2794                         netif_carrier_off(tp->dev);
2795                 tg3_link_report(tp);
2796         }
2797
2798         return 0;
2799 }
2800
2801 struct tg3_fiber_aneginfo {
2802         int state;
2803 #define ANEG_STATE_UNKNOWN              0
2804 #define ANEG_STATE_AN_ENABLE            1
2805 #define ANEG_STATE_RESTART_INIT         2
2806 #define ANEG_STATE_RESTART              3
2807 #define ANEG_STATE_DISABLE_LINK_OK      4
2808 #define ANEG_STATE_ABILITY_DETECT_INIT  5
2809 #define ANEG_STATE_ABILITY_DETECT       6
2810 #define ANEG_STATE_ACK_DETECT_INIT      7
2811 #define ANEG_STATE_ACK_DETECT           8
2812 #define ANEG_STATE_COMPLETE_ACK_INIT    9
2813 #define ANEG_STATE_COMPLETE_ACK         10
2814 #define ANEG_STATE_IDLE_DETECT_INIT     11
2815 #define ANEG_STATE_IDLE_DETECT          12
2816 #define ANEG_STATE_LINK_OK              13
2817 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT  14
2818 #define ANEG_STATE_NEXT_PAGE_WAIT       15
2819
2820         u32 flags;
2821 #define MR_AN_ENABLE            0x00000001
2822 #define MR_RESTART_AN           0x00000002
2823 #define MR_AN_COMPLETE          0x00000004
2824 #define MR_PAGE_RX              0x00000008
2825 #define MR_NP_LOADED            0x00000010
2826 #define MR_TOGGLE_TX            0x00000020
2827 #define MR_LP_ADV_FULL_DUPLEX   0x00000040
2828 #define MR_LP_ADV_HALF_DUPLEX   0x00000080
2829 #define MR_LP_ADV_SYM_PAUSE     0x00000100
2830 #define MR_LP_ADV_ASYM_PAUSE    0x00000200
2831 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
2832 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
2833 #define MR_LP_ADV_NEXT_PAGE     0x00001000
2834 #define MR_TOGGLE_RX            0x00002000
2835 #define MR_NP_RX                0x00004000
2836
2837 #define MR_LINK_OK              0x80000000
2838
2839         unsigned long link_time, cur_time;
2840
2841         u32 ability_match_cfg;
2842         int ability_match_count;
2843
2844         char ability_match, idle_match, ack_match;
2845
2846         u32 txconfig, rxconfig;
2847 #define ANEG_CFG_NP             0x00000080
2848 #define ANEG_CFG_ACK            0x00000040
2849 #define ANEG_CFG_RF2            0x00000020
2850 #define ANEG_CFG_RF1            0x00000010
2851 #define ANEG_CFG_PS2            0x00000001
2852 #define ANEG_CFG_PS1            0x00008000
2853 #define ANEG_CFG_HD             0x00004000
2854 #define ANEG_CFG_FD             0x00002000
2855 #define ANEG_CFG_INVAL          0x00001f06
2856
2857 };
2858 #define ANEG_OK         0
2859 #define ANEG_DONE       1
2860 #define ANEG_TIMER_ENAB 2
2861 #define ANEG_FAILED     -1
2862
2863 #define ANEG_STATE_SETTLE_TIME  10000
2864
2865 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
2866                                    struct tg3_fiber_aneginfo *ap)
2867 {
2868         u16 flowctrl;
2869         unsigned long delta;
2870         u32 rx_cfg_reg;
2871         int ret;
2872
2873         if (ap->state == ANEG_STATE_UNKNOWN) {
2874                 ap->rxconfig = 0;
2875                 ap->link_time = 0;
2876                 ap->cur_time = 0;
2877                 ap->ability_match_cfg = 0;
2878                 ap->ability_match_count = 0;
2879                 ap->ability_match = 0;
2880                 ap->idle_match = 0;
2881                 ap->ack_match = 0;
2882         }
2883         ap->cur_time++;
2884
2885         if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
2886                 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
2887
2888                 if (rx_cfg_reg != ap->ability_match_cfg) {
2889                         ap->ability_match_cfg = rx_cfg_reg;
2890                         ap->ability_match = 0;
2891                         ap->ability_match_count = 0;
2892                 } else {
2893                         if (++ap->ability_match_count > 1) {
2894                                 ap->ability_match = 1;
2895                                 ap->ability_match_cfg = rx_cfg_reg;
2896                         }
2897                 }
2898                 if (rx_cfg_reg & ANEG_CFG_ACK)
2899                         ap->ack_match = 1;
2900                 else
2901                         ap->ack_match = 0;
2902
2903                 ap->idle_match = 0;
2904         } else {
2905                 ap->idle_match = 1;
2906                 ap->ability_match_cfg = 0;
2907                 ap->ability_match_count = 0;
2908                 ap->ability_match = 0;
2909                 ap->ack_match = 0;
2910
2911                 rx_cfg_reg = 0;
2912         }
2913
2914         ap->rxconfig = rx_cfg_reg;
2915         ret = ANEG_OK;
2916
2917         switch(ap->state) {
2918         case ANEG_STATE_UNKNOWN:
2919                 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
2920                         ap->state = ANEG_STATE_AN_ENABLE;
2921
2922                 /* fallthru */
2923         case ANEG_STATE_AN_ENABLE:
2924                 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
2925                 if (ap->flags & MR_AN_ENABLE) {
2926                         ap->link_time = 0;
2927                         ap->cur_time = 0;
2928                         ap->ability_match_cfg = 0;
2929                         ap->ability_match_count = 0;
2930                         ap->ability_match = 0;
2931                         ap->idle_match = 0;
2932                         ap->ack_match = 0;
2933
2934                         ap->state = ANEG_STATE_RESTART_INIT;
2935                 } else {
2936                         ap->state = ANEG_STATE_DISABLE_LINK_OK;
2937                 }
2938                 break;
2939
2940         case ANEG_STATE_RESTART_INIT:
2941                 ap->link_time = ap->cur_time;
2942                 ap->flags &= ~(MR_NP_LOADED);
2943                 ap->txconfig = 0;
2944                 tw32(MAC_TX_AUTO_NEG, 0);
2945                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
2946                 tw32_f(MAC_MODE, tp->mac_mode);
2947                 udelay(40);
2948
2949                 ret = ANEG_TIMER_ENAB;
2950                 ap->state = ANEG_STATE_RESTART;
2951
2952                 /* fallthru */
2953         case ANEG_STATE_RESTART:
2954                 delta = ap->cur_time - ap->link_time;
2955                 if (delta > ANEG_STATE_SETTLE_TIME) {
2956                         ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
2957                 } else {
2958                         ret = ANEG_TIMER_ENAB;
2959                 }
2960                 break;
2961
2962         case ANEG_STATE_DISABLE_LINK_OK:
2963                 ret = ANEG_DONE;
2964                 break;
2965
2966         case ANEG_STATE_ABILITY_DETECT_INIT:
2967                 ap->flags &= ~(MR_TOGGLE_TX);
2968                 ap->txconfig = ANEG_CFG_FD;
2969                 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
2970                 if (flowctrl & ADVERTISE_1000XPAUSE)
2971                         ap->txconfig |= ANEG_CFG_PS1;
2972                 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
2973                         ap->txconfig |= ANEG_CFG_PS2;
2974                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
2975                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
2976                 tw32_f(MAC_MODE, tp->mac_mode);
2977                 udelay(40);
2978
2979                 ap->state = ANEG_STATE_ABILITY_DETECT;
2980                 break;
2981
2982         case ANEG_STATE_ABILITY_DETECT:
2983                 if (ap->ability_match != 0 && ap->rxconfig != 0) {
2984                         ap->state = ANEG_STATE_ACK_DETECT_INIT;
2985                 }
2986                 break;
2987
2988         case ANEG_STATE_ACK_DETECT_INIT:
2989                 ap->txconfig |= ANEG_CFG_ACK;
2990                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
2991                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
2992                 tw32_f(MAC_MODE, tp->mac_mode);
2993                 udelay(40);
2994
2995                 ap->state = ANEG_STATE_ACK_DETECT;
2996
2997                 /* fallthru */
2998         case ANEG_STATE_ACK_DETECT:
2999                 if (ap->ack_match != 0) {
3000                         if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
3001                             (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
3002                                 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
3003                         } else {
3004                                 ap->state = ANEG_STATE_AN_ENABLE;
3005                         }
3006                 } else if (ap->ability_match != 0 &&
3007                            ap->rxconfig == 0) {
3008                         ap->state = ANEG_STATE_AN_ENABLE;
3009                 }
3010                 break;
3011
3012         case ANEG_STATE_COMPLETE_ACK_INIT:
3013                 if (ap->rxconfig & ANEG_CFG_INVAL) {
3014                         ret = ANEG_FAILED;
3015                         break;
3016                 }
3017                 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
3018                                MR_LP_ADV_HALF_DUPLEX |
3019                                MR_LP_ADV_SYM_PAUSE |
3020                                MR_LP_ADV_ASYM_PAUSE |
3021                                MR_LP_ADV_REMOTE_FAULT1 |
3022                                MR_LP_ADV_REMOTE_FAULT2 |
3023                                MR_LP_ADV_NEXT_PAGE |
3024                                MR_TOGGLE_RX |
3025                                MR_NP_RX);
3026                 if (ap->rxconfig & ANEG_CFG_FD)
3027                         ap->flags |= MR_LP_ADV_FULL_DUPLEX;
3028                 if (ap->rxconfig & ANEG_CFG_HD)
3029                         ap->flags |= MR_LP_ADV_HALF_DUPLEX;
3030                 if (ap->rxconfig & ANEG_CFG_PS1)
3031                         ap->flags |= MR_LP_ADV_SYM_PAUSE;
3032                 if (ap->rxconfig & ANEG_CFG_PS2)
3033                         ap->flags |= MR_LP_ADV_ASYM_PAUSE;
3034                 if (ap->rxconfig & ANEG_CFG_RF1)
3035                         ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
3036                 if (ap->rxconfig & ANEG_CFG_RF2)
3037                         ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
3038                 if (ap->rxconfig & ANEG_CFG_NP)
3039                         ap->flags |= MR_LP_ADV_NEXT_PAGE;
3040
3041                 ap->link_time = ap->cur_time;
3042
3043                 ap->flags ^= (MR_TOGGLE_TX);
3044                 if (ap->rxconfig & 0x0008)
3045                         ap->flags |= MR_TOGGLE_RX;
3046                 if (ap->rxconfig & ANEG_CFG_NP)
3047                         ap->flags |= MR_NP_RX;
3048                 ap->flags |= MR_PAGE_RX;
3049
3050                 ap->state = ANEG_STATE_COMPLETE_ACK;
3051                 ret = ANEG_TIMER_ENAB;
3052                 break;
3053
3054         case ANEG_STATE_COMPLETE_ACK:
3055                 if (ap->ability_match != 0 &&
3056                     ap->rxconfig == 0) {
3057                         ap->state = ANEG_STATE_AN_ENABLE;
3058                         break;
3059                 }
3060                 delta = ap->cur_time - ap->link_time;
3061                 if (delta > ANEG_STATE_SETTLE_TIME) {
3062                         if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
3063                                 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
3064                         } else {
3065                                 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
3066                                     !(ap->flags & MR_NP_RX)) {
3067                                         ap->state = ANEG_STATE_IDLE_DETECT_INIT;
3068                                 } else {
3069                                         ret = ANEG_FAILED;
3070                                 }
3071                         }
3072                 }
3073                 break;
3074
3075         case ANEG_STATE_IDLE_DETECT_INIT:
3076                 ap->link_time = ap->cur_time;
3077                 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
3078                 tw32_f(MAC_MODE, tp->mac_mode);
3079                 udelay(40);
3080
3081                 ap->state = ANEG_STATE_IDLE_DETECT;
3082                 ret = ANEG_TIMER_ENAB;
3083                 break;
3084
3085         case ANEG_STATE_IDLE_DETECT:
3086                 if (ap->ability_match != 0 &&
3087                     ap->rxconfig == 0) {
3088                         ap->state = ANEG_STATE_AN_ENABLE;
3089                         break;
3090                 }
3091                 delta = ap->cur_time - ap->link_time;
3092                 if (delta > ANEG_STATE_SETTLE_TIME) {
3093                         /* XXX another gem from the Broadcom driver :( */
3094                         ap->state = ANEG_STATE_LINK_OK;
3095                 }
3096                 break;
3097
3098         case ANEG_STATE_LINK_OK:
3099                 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
3100                 ret = ANEG_DONE;
3101                 break;
3102
3103         case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
3104                 /* ??? unimplemented */
3105                 break;
3106
3107         case ANEG_STATE_NEXT_PAGE_WAIT:
3108                 /* ??? unimplemented */
3109                 break;
3110
3111         default:
3112                 ret = ANEG_FAILED;
3113                 break;
3114         }
3115
3116         return ret;
3117 }
3118
3119 static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags)
3120 {
3121         int res = 0;
3122         struct tg3_fiber_aneginfo aninfo;
3123         int status = ANEG_FAILED;
3124         unsigned int tick;
3125         u32 tmp;
3126
3127         tw32_f(MAC_TX_AUTO_NEG, 0);
3128
3129         tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
3130         tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
3131         udelay(40);
3132
3133         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
3134         udelay(40);
3135
3136         memset(&aninfo, 0, sizeof(aninfo));
3137         aninfo.flags |= MR_AN_ENABLE;
3138         aninfo.state = ANEG_STATE_UNKNOWN;
3139         aninfo.cur_time = 0;
3140         tick = 0;
3141         while (++tick < 195000) {
3142                 status = tg3_fiber_aneg_smachine(tp, &aninfo);
3143                 if (status == ANEG_DONE || status == ANEG_FAILED)
3144                         break;
3145
3146                 udelay(1);
3147         }
3148
3149         tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
3150         tw32_f(MAC_MODE, tp->mac_mode);
3151         udelay(40);
3152
3153         *txflags = aninfo.txconfig;
3154         *rxflags = aninfo.flags;
3155
3156         if (status == ANEG_DONE &&
3157             (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
3158                              MR_LP_ADV_FULL_DUPLEX)))
3159                 res = 1;
3160
3161         return res;
3162 }
3163
3164 static void tg3_init_bcm8002(struct tg3 *tp)
3165 {
3166         u32 mac_status = tr32(MAC_STATUS);
3167         int i;
3168
3169         /* Reset when initting first time or we have a link. */
3170         if ((tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) &&
3171             !(mac_status & MAC_STATUS_PCS_SYNCED))
3172                 return;
3173
3174         /* Set PLL lock range. */
3175         tg3_writephy(tp, 0x16, 0x8007);
3176
3177         /* SW reset */
3178         tg3_writephy(tp, MII_BMCR, BMCR_RESET);
3179
3180         /* Wait for reset to complete. */
3181         /* XXX schedule_timeout() ... */
3182         for (i = 0; i < 500; i++)
3183                 udelay(10);
3184
3185         /* Config mode; select PMA/Ch 1 regs. */
3186         tg3_writephy(tp, 0x10, 0x8411);
3187
3188         /* Enable auto-lock and comdet, select txclk for tx. */
3189         tg3_writephy(tp, 0x11, 0x0a10);
3190
3191         tg3_writephy(tp, 0x18, 0x00a0);
3192         tg3_writephy(tp, 0x16, 0x41ff);
3193
3194         /* Assert and deassert POR. */
3195         tg3_writephy(tp, 0x13, 0x0400);
3196         udelay(40);
3197         tg3_writephy(tp, 0x13, 0x0000);
3198
3199         tg3_writephy(tp, 0x11, 0x0a50);
3200         udelay(40);
3201         tg3_writephy(tp, 0x11, 0x0a10);
3202
3203         /* Wait for signal to stabilize */
3204         /* XXX schedule_timeout() ... */
3205         for (i = 0; i < 15000; i++)
3206                 udelay(10);
3207
3208         /* Deselect the channel register so we can read the PHYID
3209          * later.
3210          */
3211         tg3_writephy(tp, 0x10, 0x8011);
3212 }
3213
3214 static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
3215 {
3216         u16 flowctrl;
3217         u32 sg_dig_ctrl, sg_dig_status;
3218         u32 serdes_cfg, expected_sg_dig_ctrl;
3219         int workaround, port_a;
3220         int current_link_up;
3221
3222         serdes_cfg = 0;
3223         expected_sg_dig_ctrl = 0;
3224         workaround = 0;
3225         port_a = 1;
3226         current_link_up = 0;
3227
3228         if (tp->pci_chip_rev_id != CHIPREV_ID_5704_A0 &&
3229             tp->pci_chip_rev_id != CHIPREV_ID_5704_A1) {
3230                 workaround = 1;
3231                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
3232                         port_a = 0;
3233
3234                 /* preserve bits 0-11,13,14 for signal pre-emphasis */
3235                 /* preserve bits 20-23 for voltage regulator */
3236                 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
3237         }
3238
3239         sg_dig_ctrl = tr32(SG_DIG_CTRL);
3240
3241         if (tp->link_config.autoneg != AUTONEG_ENABLE) {
3242                 if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) {
3243                         if (workaround) {
3244                                 u32 val = serdes_cfg;
3245
3246                                 if (port_a)
3247                                         val |= 0xc010000;
3248                                 else
3249                                         val |= 0x4010000;
3250                                 tw32_f(MAC_SERDES_CFG, val);
3251                         }
3252
3253                         tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
3254                 }
3255                 if (mac_status & MAC_STATUS_PCS_SYNCED) {
3256                         tg3_setup_flow_control(tp, 0, 0);
3257                         current_link_up = 1;
3258                 }
3259                 goto out;
3260         }
3261
3262         /* Want auto-negotiation.  */
3263         expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP;
3264
3265         flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
3266         if (flowctrl & ADVERTISE_1000XPAUSE)
3267                 expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP;
3268         if (flowctrl & ADVERTISE_1000XPSE_ASYM)
3269                 expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE;
3270
3271         if (sg_dig_ctrl != expected_sg_dig_ctrl) {
3272                 if ((tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT) &&
3273                     tp->serdes_counter &&
3274                     ((mac_status & (MAC_STATUS_PCS_SYNCED |
3275                                     MAC_STATUS_RCVD_CFG)) ==
3276                      MAC_STATUS_PCS_SYNCED)) {
3277                         tp->serdes_counter--;
3278                         current_link_up = 1;
3279                         goto out;
3280                 }
3281 restart_autoneg:
3282                 if (workaround)
3283                         tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
3284                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET);
3285                 udelay(5);
3286                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
3287
3288                 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
3289                 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3290         } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
3291                                  MAC_STATUS_SIGNAL_DET)) {
3292                 sg_dig_status = tr32(SG_DIG_STATUS);
3293                 mac_status = tr32(MAC_STATUS);
3294
3295                 if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) &&
3296                     (mac_status & MAC_STATUS_PCS_SYNCED)) {
3297                         u32 local_adv = 0, remote_adv = 0;
3298
3299                         if (sg_dig_ctrl & SG_DIG_PAUSE_CAP)
3300                                 local_adv |= ADVERTISE_1000XPAUSE;
3301                         if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE)
3302                                 local_adv |= ADVERTISE_1000XPSE_ASYM;
3303
3304                         if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE)
3305                                 remote_adv |= LPA_1000XPAUSE;
3306                         if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE)
3307                                 remote_adv |= LPA_1000XPAUSE_ASYM;
3308
3309                         tg3_setup_flow_control(tp, local_adv, remote_adv);
3310                         current_link_up = 1;
3311                         tp->serdes_counter = 0;
3312                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3313                 } else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) {
3314                         if (tp->serdes_counter)
3315                                 tp->serdes_counter--;
3316                         else {
3317                                 if (workaround) {
3318                                         u32 val = serdes_cfg;
3319
3320                                         if (port_a)
3321                                                 val |= 0xc010000;
3322                                         else
3323                                                 val |= 0x4010000;
3324
3325                                         tw32_f(MAC_SERDES_CFG, val);
3326                                 }
3327
3328                                 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
3329                                 udelay(40);
3330
3331                                 /* Link parallel detection - link is up */
3332                                 /* only if we have PCS_SYNC and not */
3333                                 /* receiving config code words */
3334                                 mac_status = tr32(MAC_STATUS);
3335                                 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
3336                                     !(mac_status & MAC_STATUS_RCVD_CFG)) {
3337                                         tg3_setup_flow_control(tp, 0, 0);
3338                                         current_link_up = 1;
3339                                         tp->tg3_flags2 |=
3340                                                 TG3_FLG2_PARALLEL_DETECT;
3341                                         tp->serdes_counter =
3342                                                 SERDES_PARALLEL_DET_TIMEOUT;
3343                                 } else
3344                                         goto restart_autoneg;
3345                         }
3346                 }
3347         } else {
3348                 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
3349                 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3350         }
3351
3352 out:
3353         return current_link_up;
3354 }
3355
3356 static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
3357 {
3358         int current_link_up = 0;
3359
3360         if (!(mac_status & MAC_STATUS_PCS_SYNCED))
3361                 goto out;
3362
3363         if (tp->link_config.autoneg == AUTONEG_ENABLE) {
3364                 u32 txflags, rxflags;
3365                 int i;
3366
3367                 if (fiber_autoneg(tp, &txflags, &rxflags)) {
3368                         u32 local_adv = 0, remote_adv = 0;
3369
3370                         if (txflags & ANEG_CFG_PS1)
3371                                 local_adv |= ADVERTISE_1000XPAUSE;
3372                         if (txflags & ANEG_CFG_PS2)
3373                                 local_adv |= ADVERTISE_1000XPSE_ASYM;
3374
3375                         if (rxflags & MR_LP_ADV_SYM_PAUSE)
3376                                 remote_adv |= LPA_1000XPAUSE;
3377                         if (rxflags & MR_LP_ADV_ASYM_PAUSE)
3378                                 remote_adv |= LPA_1000XPAUSE_ASYM;
3379
3380                         tg3_setup_flow_control(tp, local_adv, remote_adv);
3381
3382                         current_link_up = 1;
3383                 }
3384                 for (i = 0; i < 30; i++) {
3385                         udelay(20);
3386                         tw32_f(MAC_STATUS,
3387                                (MAC_STATUS_SYNC_CHANGED |
3388                                 MAC_STATUS_CFG_CHANGED));
3389                         udelay(40);
3390                         if ((tr32(MAC_STATUS) &
3391                              (MAC_STATUS_SYNC_CHANGED |
3392                               MAC_STATUS_CFG_CHANGED)) == 0)
3393                                 break;
3394                 }
3395
3396                 mac_status = tr32(MAC_STATUS);
3397                 if (current_link_up == 0 &&
3398                     (mac_status & MAC_STATUS_PCS_SYNCED) &&
3399                     !(mac_status & MAC_STATUS_RCVD_CFG))
3400                         current_link_up = 1;
3401         } else {
3402                 tg3_setup_flow_control(tp, 0, 0);
3403
3404                 /* Forcing 1000FD link up. */
3405                 current_link_up = 1;
3406
3407                 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
3408                 udelay(40);
3409
3410                 tw32_f(MAC_MODE, tp->mac_mode);
3411                 udelay(40);
3412         }
3413
3414 out:
3415         return current_link_up;
3416 }
3417
3418 static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
3419 {
3420         u32 orig_pause_cfg;
3421         u16 orig_active_speed;
3422         u8 orig_active_duplex;
3423         u32 mac_status;
3424         int current_link_up;
3425         int i;
3426
3427         orig_pause_cfg = tp->link_config.active_flowctrl;
3428         orig_active_speed = tp->link_config.active_speed;
3429         orig_active_duplex = tp->link_config.active_duplex;
3430
3431         if (!(tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG) &&
3432             netif_carrier_ok(tp->dev) &&
3433             (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE)) {
3434                 mac_status = tr32(MAC_STATUS);
3435                 mac_status &= (MAC_STATUS_PCS_SYNCED |
3436                                MAC_STATUS_SIGNAL_DET |
3437                                MAC_STATUS_CFG_CHANGED |
3438                                MAC_STATUS_RCVD_CFG);
3439                 if (mac_status == (MAC_STATUS_PCS_SYNCED |
3440                                    MAC_STATUS_SIGNAL_DET)) {
3441                         tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
3442                                             MAC_STATUS_CFG_CHANGED));
3443                         return 0;
3444                 }
3445         }
3446
3447         tw32_f(MAC_TX_AUTO_NEG, 0);
3448
3449         tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
3450         tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
3451         tw32_f(MAC_MODE, tp->mac_mode);
3452         udelay(40);
3453
3454         if (tp->phy_id == PHY_ID_BCM8002)
3455                 tg3_init_bcm8002(tp);
3456
3457         /* Enable link change event even when serdes polling.  */
3458         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
3459         udelay(40);
3460
3461         current_link_up = 0;
3462         mac_status = tr32(MAC_STATUS);
3463
3464         if (tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG)
3465                 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
3466         else
3467                 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
3468
3469         tp->hw_status->status =
3470                 (SD_STATUS_UPDATED |
3471                  (tp->hw_status->status & ~SD_STATUS_LINK_CHG));
3472
3473         for (i = 0; i < 100; i++) {
3474                 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
3475                                     MAC_STATUS_CFG_CHANGED));
3476                 udelay(5);
3477                 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
3478                                          MAC_STATUS_CFG_CHANGED |
3479                                          MAC_STATUS_LNKSTATE_CHANGED)) == 0)
3480                         break;
3481         }
3482
3483         mac_status = tr32(MAC_STATUS);
3484         if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
3485                 current_link_up = 0;
3486                 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
3487                     tp->serdes_counter == 0) {
3488                         tw32_f(MAC_MODE, (tp->mac_mode |
3489                                           MAC_MODE_SEND_CONFIGS));
3490                         udelay(1);
3491                         tw32_f(MAC_MODE, tp->mac_mode);
3492                 }
3493         }
3494
3495         if (current_link_up == 1) {
3496                 tp->link_config.active_speed = SPEED_1000;
3497                 tp->link_config.active_duplex = DUPLEX_FULL;
3498                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
3499                                     LED_CTRL_LNKLED_OVERRIDE |
3500                                     LED_CTRL_1000MBPS_ON));
3501         } else {
3502                 tp->link_config.active_speed = SPEED_INVALID;
3503                 tp->link_config.active_duplex = DUPLEX_INVALID;
3504                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
3505                                     LED_CTRL_LNKLED_OVERRIDE |
3506                                     LED_CTRL_TRAFFIC_OVERRIDE));
3507         }
3508
3509         if (current_link_up != netif_carrier_ok(tp->dev)) {
3510                 if (current_link_up)
3511                         netif_carrier_on(tp->dev);
3512                 else
3513                         netif_carrier_off(tp->dev);
3514                 tg3_link_report(tp);
3515         } else {
3516                 u32 now_pause_cfg = tp->link_config.active_flowctrl;
3517                 if (orig_pause_cfg != now_pause_cfg ||
3518                     orig_active_speed != tp->link_config.active_speed ||
3519                     orig_active_duplex != tp->link_config.active_duplex)
3520                         tg3_link_report(tp);
3521         }
3522
3523         return 0;
3524 }
3525
3526 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
3527 {
3528         int current_link_up, err = 0;
3529         u32 bmsr, bmcr;
3530         u16 current_speed;
3531         u8 current_duplex;
3532         u32 local_adv, remote_adv;
3533
3534         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
3535         tw32_f(MAC_MODE, tp->mac_mode);
3536         udelay(40);
3537
3538         tw32(MAC_EVENT, 0);
3539
3540         tw32_f(MAC_STATUS,
3541              (MAC_STATUS_SYNC_CHANGED |
3542               MAC_STATUS_CFG_CHANGED |
3543               MAC_STATUS_MI_COMPLETION |
3544               MAC_STATUS_LNKSTATE_CHANGED));
3545         udelay(40);
3546
3547         if (force_reset)
3548                 tg3_phy_reset(tp);
3549
3550         current_link_up = 0;
3551         current_speed = SPEED_INVALID;
3552         current_duplex = DUPLEX_INVALID;
3553
3554         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
3555         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
3556         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
3557                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
3558                         bmsr |= BMSR_LSTATUS;
3559                 else
3560                         bmsr &= ~BMSR_LSTATUS;
3561         }
3562
3563         err |= tg3_readphy(tp, MII_BMCR, &bmcr);
3564
3565         if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
3566             (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT)) {
3567                 /* do nothing, just check for link up at the end */
3568         } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
3569                 u32 adv, new_adv;
3570
3571                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
3572                 new_adv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
3573                                   ADVERTISE_1000XPAUSE |
3574                                   ADVERTISE_1000XPSE_ASYM |
3575                                   ADVERTISE_SLCT);
3576
3577                 new_adv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
3578
3579                 if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
3580                         new_adv |= ADVERTISE_1000XHALF;
3581                 if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
3582                         new_adv |= ADVERTISE_1000XFULL;
3583
3584                 if ((new_adv != adv) || !(bmcr & BMCR_ANENABLE)) {
3585                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
3586                         bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
3587                         tg3_writephy(tp, MII_BMCR, bmcr);
3588
3589                         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
3590                         tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
3591                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3592
3593                         return err;
3594                 }
3595         } else {
3596                 u32 new_bmcr;
3597
3598                 bmcr &= ~BMCR_SPEED1000;
3599                 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
3600
3601                 if (tp->link_config.duplex == DUPLEX_FULL)
3602                         new_bmcr |= BMCR_FULLDPLX;
3603
3604                 if (new_bmcr != bmcr) {
3605                         /* BMCR_SPEED1000 is a reserved bit that needs
3606                          * to be set on write.
3607                          */
3608                         new_bmcr |= BMCR_SPEED1000;
3609
3610                         /* Force a linkdown */
3611                         if (netif_carrier_ok(tp->dev)) {
3612                                 u32 adv;
3613
3614                                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
3615                                 adv &= ~(ADVERTISE_1000XFULL |
3616                                          ADVERTISE_1000XHALF |
3617                                          ADVERTISE_SLCT);
3618                                 tg3_writephy(tp, MII_ADVERTISE, adv);
3619                                 tg3_writephy(tp, MII_BMCR, bmcr |
3620                                                            BMCR_ANRESTART |
3621                                                            BMCR_ANENABLE);
3622                                 udelay(10);
3623                                 netif_carrier_off(tp->dev);
3624                         }
3625                         tg3_writephy(tp, MII_BMCR, new_bmcr);
3626                         bmcr = new_bmcr;
3627                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
3628                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
3629                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
3630                             ASIC_REV_5714) {
3631                                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
3632                                         bmsr |= BMSR_LSTATUS;
3633                                 else
3634                                         bmsr &= ~BMSR_LSTATUS;
3635                         }
3636                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3637                 }
3638         }
3639
3640         if (bmsr & BMSR_LSTATUS) {
3641                 current_speed = SPEED_1000;
3642                 current_link_up = 1;
3643                 if (bmcr & BMCR_FULLDPLX)
3644                         current_duplex = DUPLEX_FULL;
3645                 else
3646                         current_duplex = DUPLEX_HALF;
3647
3648                 local_adv = 0;
3649                 remote_adv = 0;
3650
3651                 if (bmcr & BMCR_ANENABLE) {
3652                         u32 common;
3653
3654                         err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
3655                         err |= tg3_readphy(tp, MII_LPA, &remote_adv);
3656                         common = local_adv & remote_adv;
3657                         if (common & (ADVERTISE_1000XHALF |
3658                                       ADVERTISE_1000XFULL)) {
3659                                 if (common & ADVERTISE_1000XFULL)
3660                                         current_duplex = DUPLEX_FULL;
3661                                 else
3662                                         current_duplex = DUPLEX_HALF;
3663                         }
3664                         else
3665                                 current_link_up = 0;
3666                 }
3667         }
3668
3669         if (current_link_up == 1 && current_duplex == DUPLEX_FULL)
3670                 tg3_setup_flow_control(tp, local_adv, remote_adv);
3671
3672         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
3673         if (tp->link_config.active_duplex == DUPLEX_HALF)
3674                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
3675
3676         tw32_f(MAC_MODE, tp->mac_mode);
3677         udelay(40);
3678
3679         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
3680
3681         tp->link_config.active_speed = current_speed;
3682         tp->link_config.active_duplex = current_duplex;
3683
3684         if (current_link_up != netif_carrier_ok(tp->dev)) {
3685                 if (current_link_up)
3686                         netif_carrier_on(tp->dev);
3687                 else {
3688                         netif_carrier_off(tp->dev);
3689                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3690                 }
3691                 tg3_link_report(tp);
3692         }
3693         return err;
3694 }
3695
3696 static void tg3_serdes_parallel_detect(struct tg3 *tp)
3697 {
3698         if (tp->serdes_counter) {
3699                 /* Give autoneg time to complete. */
3700                 tp->serdes_counter--;
3701                 return;
3702         }
3703         if (!netif_carrier_ok(tp->dev) &&
3704             (tp->link_config.autoneg == AUTONEG_ENABLE)) {
3705                 u32 bmcr;
3706
3707                 tg3_readphy(tp, MII_BMCR, &bmcr);
3708                 if (bmcr & BMCR_ANENABLE) {
3709                         u32 phy1, phy2;
3710
3711                         /* Select shadow register 0x1f */
3712                         tg3_writephy(tp, 0x1c, 0x7c00);
3713                         tg3_readphy(tp, 0x1c, &phy1);
3714
3715                         /* Select expansion interrupt status register */
3716                         tg3_writephy(tp, 0x17, 0x0f01);
3717                         tg3_readphy(tp, 0x15, &phy2);
3718                         tg3_readphy(tp, 0x15, &phy2);
3719
3720                         if ((phy1 & 0x10) && !(phy2 & 0x20)) {
3721                                 /* We have signal detect and not receiving
3722                                  * config code words, link is up by parallel
3723                                  * detection.
3724                                  */
3725
3726                                 bmcr &= ~BMCR_ANENABLE;
3727                                 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
3728                                 tg3_writephy(tp, MII_BMCR, bmcr);
3729                                 tp->tg3_flags2 |= TG3_FLG2_PARALLEL_DETECT;
3730                         }
3731                 }
3732         }
3733         else if (netif_carrier_ok(tp->dev) &&
3734                  (tp->link_config.autoneg == AUTONEG_ENABLE) &&
3735                  (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT)) {
3736                 u32 phy2;
3737
3738                 /* Select expansion interrupt status register */
3739                 tg3_writephy(tp, 0x17, 0x0f01);
3740                 tg3_readphy(tp, 0x15, &phy2);
3741                 if (phy2 & 0x20) {
3742                         u32 bmcr;
3743
3744                         /* Config code words received, turn on autoneg. */
3745                         tg3_readphy(tp, MII_BMCR, &bmcr);
3746                         tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
3747
3748                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3749
3750                 }
3751         }
3752 }
3753
3754 static int tg3_setup_phy(struct tg3 *tp, int force_reset)
3755 {
3756         int err;
3757
3758         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
3759                 err = tg3_setup_fiber_phy(tp, force_reset);
3760         } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
3761                 err = tg3_setup_fiber_mii_phy(tp, force_reset);
3762         } else {
3763                 err = tg3_setup_copper_phy(tp, force_reset);
3764         }
3765
3766         if (tp->pci_chip_rev_id == CHIPREV_ID_5784_A0 ||
3767             tp->pci_chip_rev_id == CHIPREV_ID_5784_A1) {
3768                 u32 val, scale;
3769
3770                 val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
3771                 if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5)
3772                         scale = 65;
3773                 else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25)
3774                         scale = 6;
3775                 else
3776                         scale = 12;
3777
3778                 val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK;
3779                 val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT);
3780                 tw32(GRC_MISC_CFG, val);
3781         }
3782
3783         if (tp->link_config.active_speed == SPEED_1000 &&
3784             tp->link_config.active_duplex == DUPLEX_HALF)
3785                 tw32(MAC_TX_LENGTHS,
3786                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
3787                       (6 << TX_LENGTHS_IPG_SHIFT) |
3788                       (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
3789         else
3790                 tw32(MAC_TX_LENGTHS,
3791                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
3792                       (6 << TX_LENGTHS_IPG_SHIFT) |
3793                       (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
3794
3795         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
3796                 if (netif_carrier_ok(tp->dev)) {
3797                         tw32(HOSTCC_STAT_COAL_TICKS,
3798                              tp->coal.stats_block_coalesce_usecs);
3799                 } else {
3800                         tw32(HOSTCC_STAT_COAL_TICKS, 0);
3801                 }
3802         }
3803
3804         if (tp->tg3_flags & TG3_FLAG_ASPM_WORKAROUND) {
3805                 u32 val = tr32(PCIE_PWR_MGMT_THRESH);
3806                 if (!netif_carrier_ok(tp->dev))
3807                         val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
3808                               tp->pwrmgmt_thresh;
3809                 else
3810                         val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
3811                 tw32(PCIE_PWR_MGMT_THRESH, val);
3812         }
3813
3814         return err;
3815 }
3816
3817 /* This is called whenever we suspect that the system chipset is re-
3818  * ordering the sequence of MMIO to the tx send mailbox. The symptom
3819  * is bogus tx completions. We try to recover by setting the
3820  * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
3821  * in the workqueue.
3822  */
3823 static void tg3_tx_recover(struct tg3 *tp)
3824 {
3825         BUG_ON((tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) ||
3826                tp->write32_tx_mbox == tg3_write_indirect_mbox);
3827
3828         printk(KERN_WARNING PFX "%s: The system may be re-ordering memory-"
3829                "mapped I/O cycles to the network device, attempting to "
3830                "recover. Please report the problem to the driver maintainer "
3831                "and include system chipset information.\n", tp->dev->name);
3832
3833         spin_lock(&tp->lock);
3834         tp->tg3_flags |= TG3_FLAG_TX_RECOVERY_PENDING;
3835         spin_unlock(&tp->lock);
3836 }
3837
3838 static inline u32 tg3_tx_avail(struct tg3 *tp)
3839 {
3840         smp_mb();
3841         return (tp->tx_pending -
3842                 ((tp->tx_prod - tp->tx_cons) & (TG3_TX_RING_SIZE - 1)));
3843 }
3844
3845 /* Tigon3 never reports partial packet sends.  So we do not
3846  * need special logic to handle SKBs that have not had all
3847  * of their frags sent yet, like SunGEM does.
3848  */
3849 static void tg3_tx(struct tg3 *tp)
3850 {
3851         u32 hw_idx = tp->hw_status->idx[0].tx_consumer;
3852         u32 sw_idx = tp->tx_cons;
3853
3854         while (sw_idx != hw_idx) {
3855                 struct tx_ring_info *ri = &tp->tx_buffers[sw_idx];
3856                 struct sk_buff *skb = ri->skb;
3857                 int i, tx_bug = 0;
3858
3859                 if (unlikely(skb == NULL)) {
3860                         tg3_tx_recover(tp);
3861                         return;
3862                 }
3863
3864                 skb_dma_unmap(&tp->pdev->dev, skb, DMA_TO_DEVICE);
3865
3866                 ri->skb = NULL;
3867
3868                 sw_idx = NEXT_TX(sw_idx);
3869
3870                 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
3871                         ri = &tp->tx_buffers[sw_idx];
3872                         if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
3873                                 tx_bug = 1;
3874                         sw_idx = NEXT_TX(sw_idx);
3875                 }
3876
3877                 dev_kfree_skb(skb);
3878
3879                 if (unlikely(tx_bug)) {
3880                         tg3_tx_recover(tp);
3881                         return;
3882                 }
3883         }
3884
3885         tp->tx_cons = sw_idx;
3886
3887         /* Need to make the tx_cons update visible to tg3_start_xmit()
3888          * before checking for netif_queue_stopped().  Without the
3889          * memory barrier, there is a small possibility that tg3_start_xmit()
3890          * will miss it and cause the queue to be stopped forever.
3891          */
3892         smp_mb();
3893
3894         if (unlikely(netif_queue_stopped(tp->dev) &&
3895                      (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp)))) {
3896                 netif_tx_lock(tp->dev);
3897                 if (netif_queue_stopped(tp->dev) &&
3898                     (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp)))
3899                         netif_wake_queue(tp->dev);
3900                 netif_tx_unlock(tp->dev);
3901         }
3902 }
3903
3904 /* Returns size of skb allocated or < 0 on error.
3905  *
3906  * We only need to fill in the address because the other members
3907  * of the RX descriptor are invariant, see tg3_init_rings.
3908  *
3909  * Note the purposeful assymetry of cpu vs. chip accesses.  For
3910  * posting buffers we only dirty the first cache line of the RX
3911  * descriptor (containing the address).  Whereas for the RX status
3912  * buffers the cpu only reads the last cacheline of the RX descriptor
3913  * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
3914  */
3915 static int tg3_alloc_rx_skb(struct tg3 *tp, u32 opaque_key,
3916                             int src_idx, u32 dest_idx_unmasked)
3917 {
3918         struct tg3_rx_buffer_desc *desc;
3919         struct ring_info *map, *src_map;
3920         struct sk_buff *skb;
3921         dma_addr_t mapping;
3922         int skb_size, dest_idx;
3923
3924         src_map = NULL;
3925         switch (opaque_key) {
3926         case RXD_OPAQUE_RING_STD:
3927                 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
3928                 desc = &tp->rx_std[dest_idx];
3929                 map = &tp->rx_std_buffers[dest_idx];
3930                 if (src_idx >= 0)
3931                         src_map = &tp->rx_std_buffers[src_idx];
3932                 skb_size = tp->rx_pkt_buf_sz;
3933                 break;
3934
3935         case RXD_OPAQUE_RING_JUMBO:
3936                 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
3937                 desc = &tp->rx_jumbo[dest_idx];
3938                 map = &tp->rx_jumbo_buffers[dest_idx];
3939                 if (src_idx >= 0)
3940                         src_map = &tp->rx_jumbo_buffers[src_idx];
3941                 skb_size = RX_JUMBO_PKT_BUF_SZ;
3942                 break;
3943
3944         default:
3945                 return -EINVAL;
3946         }
3947
3948         /* Do not overwrite any of the map or rp information
3949          * until we are sure we can commit to a new buffer.
3950          *
3951          * Callers depend upon this behavior and assume that
3952          * we leave everything unchanged if we fail.
3953          */
3954         skb = netdev_alloc_skb(tp->dev, skb_size);
3955         if (skb == NULL)
3956                 return -ENOMEM;
3957
3958         skb_reserve(skb, tp->rx_offset);
3959
3960         mapping = pci_map_single(tp->pdev, skb->data,
3961                                  skb_size - tp->rx_offset,
3962                                  PCI_DMA_FROMDEVICE);
3963
3964         map->skb = skb;
3965         pci_unmap_addr_set(map, mapping, mapping);
3966
3967         if (src_map != NULL)
3968                 src_map->skb = NULL;
3969
3970         desc->addr_hi = ((u64)mapping >> 32);
3971         desc->addr_lo = ((u64)mapping & 0xffffffff);
3972
3973         return skb_size;
3974 }
3975
3976 /* We only need to move over in the address because the other
3977  * members of the RX descriptor are invariant.  See notes above
3978  * tg3_alloc_rx_skb for full details.
3979  */
3980 static void tg3_recycle_rx(struct tg3 *tp, u32 opaque_key,
3981                            int src_idx, u32 dest_idx_unmasked)
3982 {
3983         struct tg3_rx_buffer_desc *src_desc, *dest_desc;
3984         struct ring_info *src_map, *dest_map;
3985         int dest_idx;
3986
3987         switch (opaque_key) {
3988         case RXD_OPAQUE_RING_STD:
3989                 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
3990                 dest_desc = &tp->rx_std[dest_idx];
3991                 dest_map = &tp->rx_std_buffers[dest_idx];
3992                 src_desc = &tp->rx_std[src_idx];
3993                 src_map = &tp->rx_std_buffers[src_idx];
3994                 break;
3995
3996         case RXD_OPAQUE_RING_JUMBO:
3997                 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
3998                 dest_desc = &tp->rx_jumbo[dest_idx];
3999                 dest_map = &tp->rx_jumbo_buffers[dest_idx];
4000                 src_desc = &tp->rx_jumbo[src_idx];
4001                 src_map = &tp->rx_jumbo_buffers[src_idx];
4002                 break;
4003
4004         default:
4005                 return;
4006         }
4007
4008         dest_map->skb = src_map->skb;
4009         pci_unmap_addr_set(dest_map, mapping,
4010                            pci_unmap_addr(src_map, mapping));
4011         dest_desc->addr_hi = src_desc->addr_hi;
4012         dest_desc->addr_lo = src_desc->addr_lo;
4013
4014         src_map->skb = NULL;
4015 }
4016
4017 #if TG3_VLAN_TAG_USED
4018 static int tg3_vlan_rx(struct tg3 *tp, struct sk_buff *skb, u16 vlan_tag)
4019 {
4020         return vlan_hwaccel_receive_skb(skb, tp->vlgrp, vlan_tag);
4021 }
4022 #endif
4023
4024 /* The RX ring scheme is composed of multiple rings which post fresh
4025  * buffers to the chip, and one special ring the chip uses to report
4026  * status back to the host.
4027  *
4028  * The special ring reports the status of received packets to the
4029  * host.  The chip does not write into the original descriptor the
4030  * RX buffer was obtained from.  The chip simply takes the original
4031  * descriptor as provided by the host, updates the status and length
4032  * field, then writes this into the next status ring entry.
4033  *
4034  * Each ring the host uses to post buffers to the chip is described
4035  * by a TG3_BDINFO entry in the chips SRAM area.  When a packet arrives,
4036  * it is first placed into the on-chip ram.  When the packet's length
4037  * is known, it walks down the TG3_BDINFO entries to select the ring.
4038  * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
4039  * which is within the range of the new packet's length is chosen.
4040  *
4041  * The "separate ring for rx status" scheme may sound queer, but it makes
4042  * sense from a cache coherency perspective.  If only the host writes
4043  * to the buffer post rings, and only the chip writes to the rx status
4044  * rings, then cache lines never move beyond shared-modified state.
4045  * If both the host and chip were to write into the same ring, cache line
4046  * eviction could occur since both entities want it in an exclusive state.
4047  */
4048 static int tg3_rx(struct tg3 *tp, int budget)
4049 {
4050         u32 work_mask, rx_std_posted = 0;
4051         u32 sw_idx = tp->rx_rcb_ptr;
4052         u16 hw_idx;
4053         int received;
4054
4055         hw_idx = tp->hw_status->idx[0].rx_producer;
4056         /*
4057          * We need to order the read of hw_idx and the read of
4058          * the opaque cookie.
4059          */
4060         rmb();
4061         work_mask = 0;
4062         received = 0;
4063         while (sw_idx != hw_idx && budget > 0) {
4064                 struct tg3_rx_buffer_desc *desc = &tp->rx_rcb[sw_idx];
4065                 unsigned int len;
4066                 struct sk_buff *skb;
4067                 dma_addr_t dma_addr;
4068                 u32 opaque_key, desc_idx, *post_ptr;
4069
4070                 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
4071                 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
4072                 if (opaque_key == RXD_OPAQUE_RING_STD) {
4073                         dma_addr = pci_unmap_addr(&tp->rx_std_buffers[desc_idx],
4074                                                   mapping);
4075                         skb = tp->rx_std_buffers[desc_idx].skb;
4076                         post_ptr = &tp->rx_std_ptr;
4077                         rx_std_posted++;
4078                 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
4079                         dma_addr = pci_unmap_addr(&tp->rx_jumbo_buffers[desc_idx],
4080                                                   mapping);
4081                         skb = tp->rx_jumbo_buffers[desc_idx].skb;
4082                         post_ptr = &tp->rx_jumbo_ptr;
4083                 }
4084                 else {
4085                         goto next_pkt_nopost;
4086                 }
4087
4088                 work_mask |= opaque_key;
4089
4090                 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
4091                     (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
4092                 drop_it:
4093                         tg3_recycle_rx(tp, opaque_key,
4094                                        desc_idx, *post_ptr);
4095                 drop_it_no_recycle:
4096                         /* Other statistics kept track of by card. */
4097                         tp->net_stats.rx_dropped++;
4098                         goto next_pkt;
4099                 }
4100
4101                 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4; /* omit crc */
4102
4103                 if (len > RX_COPY_THRESHOLD
4104                         && tp->rx_offset == 2
4105                         /* rx_offset != 2 iff this is a 5701 card running
4106                          * in PCI-X mode [see tg3_get_invariants()] */
4107                 ) {
4108                         int skb_size;
4109
4110                         skb_size = tg3_alloc_rx_skb(tp, opaque_key,
4111                                                     desc_idx, *post_ptr);
4112                         if (skb_size < 0)
4113                                 goto drop_it;
4114
4115                         pci_unmap_single(tp->pdev, dma_addr,
4116                                          skb_size - tp->rx_offset,
4117                                          PCI_DMA_FROMDEVICE);
4118
4119                         skb_put(skb, len);
4120                 } else {
4121                         struct sk_buff *copy_skb;
4122
4123                         tg3_recycle_rx(tp, opaque_key,
4124                                        desc_idx, *post_ptr);
4125
4126                         copy_skb = netdev_alloc_skb(tp->dev, len + 2);
4127                         if (copy_skb == NULL)
4128                                 goto drop_it_no_recycle;
4129
4130                         skb_reserve(copy_skb, 2);
4131                         skb_put(copy_skb, len);
4132                         pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
4133                         skb_copy_from_linear_data(skb, copy_skb->data, len);
4134                         pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
4135
4136                         /* We'll reuse the original ring buffer. */
4137                         skb = copy_skb;
4138                 }
4139
4140                 if ((tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) &&
4141                     (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
4142                     (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
4143                       >> RXD_TCPCSUM_SHIFT) == 0xffff))
4144                         skb->ip_summed = CHECKSUM_UNNECESSARY;
4145                 else
4146                         skb->ip_summed = CHECKSUM_NONE;
4147
4148                 skb->protocol = eth_type_trans(skb, tp->dev);
4149 #if TG3_VLAN_TAG_USED
4150                 if (tp->vlgrp != NULL &&
4151                     desc->type_flags & RXD_FLAG_VLAN) {
4152                         tg3_vlan_rx(tp, skb,
4153                                     desc->err_vlan & RXD_VLAN_MASK);
4154                 } else
4155 #endif
4156                         netif_receive_skb(skb);
4157
4158                 tp->dev->last_rx = jiffies;
4159                 received++;
4160                 budget--;
4161
4162 next_pkt:
4163                 (*post_ptr)++;
4164
4165                 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
4166                         u32 idx = *post_ptr % TG3_RX_RING_SIZE;
4167
4168                         tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX +
4169                                      TG3_64BIT_REG_LOW, idx);
4170                         work_mask &= ~RXD_OPAQUE_RING_STD;
4171                         rx_std_posted = 0;
4172                 }
4173 next_pkt_nopost:
4174                 sw_idx++;
4175                 sw_idx &= (TG3_RX_RCB_RING_SIZE(tp) - 1);
4176
4177                 /* Refresh hw_idx to see if there is new work */
4178                 if (sw_idx == hw_idx) {
4179                         hw_idx = tp->hw_status->idx[0].rx_producer;
4180                         rmb();
4181                 }
4182         }
4183
4184         /* ACK the status ring. */
4185         tp->rx_rcb_ptr = sw_idx;
4186         tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW, sw_idx);
4187
4188         /* Refill RX ring(s). */
4189         if (work_mask & RXD_OPAQUE_RING_STD) {
4190                 sw_idx = tp->rx_std_ptr % TG3_RX_RING_SIZE;
4191                 tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
4192                              sw_idx);
4193         }
4194         if (work_mask & RXD_OPAQUE_RING_JUMBO) {
4195                 sw_idx = tp->rx_jumbo_ptr % TG3_RX_JUMBO_RING_SIZE;
4196                 tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
4197                              sw_idx);
4198         }
4199         mmiowb();
4200
4201         return received;
4202 }
4203
4204 static int tg3_poll_work(struct tg3 *tp, int work_done, int budget)
4205 {
4206         struct tg3_hw_status *sblk = tp->hw_status;
4207
4208         /* handle link change and other phy events */
4209         if (!(tp->tg3_flags &
4210               (TG3_FLAG_USE_LINKCHG_REG |
4211                TG3_FLAG_POLL_SERDES))) {
4212                 if (sblk->status & SD_STATUS_LINK_CHG) {
4213                         sblk->status = SD_STATUS_UPDATED |
4214                                 (sblk->status & ~SD_STATUS_LINK_CHG);
4215                         spin_lock(&tp->lock);
4216                         if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
4217                                 tw32_f(MAC_STATUS,
4218                                      (MAC_STATUS_SYNC_CHANGED |
4219                                       MAC_STATUS_CFG_CHANGED |
4220                                       MAC_STATUS_MI_COMPLETION |
4221                                       MAC_STATUS_LNKSTATE_CHANGED));
4222                                 udelay(40);
4223                         } else
4224                                 tg3_setup_phy(tp, 0);
4225                         spin_unlock(&tp->lock);
4226                 }
4227         }
4228
4229         /* run TX completion thread */
4230         if (sblk->idx[0].tx_consumer != tp->tx_cons) {
4231                 tg3_tx(tp);
4232                 if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING))
4233                         return work_done;
4234         }
4235
4236         /* run RX thread, within the bounds set by NAPI.
4237          * All RX "locking" is done by ensuring outside
4238          * code synchronizes with tg3->napi.poll()
4239          */
4240         if (sblk->idx[0].rx_producer != tp->rx_rcb_ptr)
4241                 work_done += tg3_rx(tp, budget - work_done);
4242
4243         return work_done;
4244 }
4245
4246 static int tg3_poll(struct napi_struct *napi, int budget)
4247 {
4248         struct tg3 *tp = container_of(napi, struct tg3, napi);
4249         int work_done = 0;
4250         struct tg3_hw_status *sblk = tp->hw_status;
4251
4252         while (1) {
4253                 work_done = tg3_poll_work(tp, work_done, budget);
4254
4255                 if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING))
4256                         goto tx_recovery;
4257
4258                 if (unlikely(work_done >= budget))
4259                         break;
4260
4261                 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) {
4262                         /* tp->last_tag is used in tg3_restart_ints() below
4263                          * to tell the hw how much work has been processed,
4264                          * so we must read it before checking for more work.
4265                          */
4266                         tp->last_tag = sblk->status_tag;
4267                         rmb();
4268                 } else
4269                         sblk->status &= ~SD_STATUS_UPDATED;
4270
4271                 if (likely(!tg3_has_work(tp))) {
4272                         netif_rx_complete(tp->dev, napi);
4273                         tg3_restart_ints(tp);
4274                         break;
4275                 }
4276         }
4277
4278         return work_done;
4279
4280 tx_recovery:
4281         /* work_done is guaranteed to be less than budget. */
4282         netif_rx_complete(tp->dev, napi);
4283         schedule_work(&tp->reset_task);
4284         return work_done;
4285 }
4286
4287 static void tg3_irq_quiesce(struct tg3 *tp)
4288 {
4289         BUG_ON(tp->irq_sync);
4290
4291         tp->irq_sync = 1;
4292         smp_mb();
4293
4294         synchronize_irq(tp->pdev->irq);
4295 }
4296
4297 static inline int tg3_irq_sync(struct tg3 *tp)
4298 {
4299         return tp->irq_sync;
4300 }
4301
4302 /* Fully shutdown all tg3 driver activity elsewhere in the system.
4303  * If irq_sync is non-zero, then the IRQ handler must be synchronized
4304  * with as well.  Most of the time, this is not necessary except when
4305  * shutting down the device.
4306  */
4307 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
4308 {
4309         spin_lock_bh(&tp->lock);
4310         if (irq_sync)
4311                 tg3_irq_quiesce(tp);
4312 }
4313
4314 static inline void tg3_full_unlock(struct tg3 *tp)
4315 {
4316         spin_unlock_bh(&tp->lock);
4317 }
4318
4319 /* One-shot MSI handler - Chip automatically disables interrupt
4320  * after sending MSI so driver doesn't have to do it.
4321  */
4322 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
4323 {
4324         struct net_device *dev = dev_id;
4325         struct tg3 *tp = netdev_priv(dev);
4326
4327         prefetch(tp->hw_status);
4328         prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
4329
4330         if (likely(!tg3_irq_sync(tp)))
4331                 netif_rx_schedule(dev, &tp->napi);
4332
4333         return IRQ_HANDLED;
4334 }
4335
4336 /* MSI ISR - No need to check for interrupt sharing and no need to
4337  * flush status block and interrupt mailbox. PCI ordering rules
4338  * guarantee that MSI will arrive after the status block.
4339  */
4340 static irqreturn_t tg3_msi(int irq, void *dev_id)
4341 {
4342         struct net_device *dev = dev_id;
4343         struct tg3 *tp = netdev_priv(dev);
4344
4345         prefetch(tp->hw_status);
4346         prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
4347         /*
4348          * Writing any value to intr-mbox-0 clears PCI INTA# and
4349          * chip-internal interrupt pending events.
4350          * Writing non-zero to intr-mbox-0 additional tells the
4351          * NIC to stop sending us irqs, engaging "in-intr-handler"
4352          * event coalescing.
4353          */
4354         tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
4355         if (likely(!tg3_irq_sync(tp)))
4356                 netif_rx_schedule(dev, &tp->napi);
4357
4358         return IRQ_RETVAL(1);
4359 }
4360
4361 static irqreturn_t tg3_interrupt(int irq, void *dev_id)
4362 {
4363         struct net_device *dev = dev_id;
4364         struct tg3 *tp = netdev_priv(dev);
4365         struct tg3_hw_status *sblk = tp->hw_status;
4366         unsigned int handled = 1;
4367
4368         /* In INTx mode, it is possible for the interrupt to arrive at
4369          * the CPU before the status block posted prior to the interrupt.
4370          * Reading the PCI State register will confirm whether the
4371          * interrupt is ours and will flush the status block.
4372          */
4373         if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
4374                 if ((tp->tg3_flags & TG3_FLAG_CHIP_RESETTING) ||
4375                     (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
4376                         handled = 0;
4377                         goto out;
4378                 }
4379         }
4380
4381         /*
4382          * Writing any value to intr-mbox-0 clears PCI INTA# and
4383          * chip-internal interrupt pending events.
4384          * Writing non-zero to intr-mbox-0 additional tells the
4385          * NIC to stop sending us irqs, engaging "in-intr-handler"
4386          * event coalescing.
4387          *
4388          * Flush the mailbox to de-assert the IRQ immediately to prevent
4389          * spurious interrupts.  The flush impacts performance but
4390          * excessive spurious interrupts can be worse in some cases.
4391          */
4392         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
4393         if (tg3_irq_sync(tp))
4394                 goto out;
4395         sblk->status &= ~SD_STATUS_UPDATED;
4396         if (likely(tg3_has_work(tp))) {
4397                 prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
4398                 netif_rx_schedule(dev, &tp->napi);
4399         } else {
4400                 /* No work, shared interrupt perhaps?  re-enable
4401                  * interrupts, and flush that PCI write
4402                  */
4403                 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
4404                                0x00000000);
4405         }
4406 out:
4407         return IRQ_RETVAL(handled);
4408 }
4409
4410 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
4411 {
4412         struct net_device *dev = dev_id;
4413         struct tg3 *tp = netdev_priv(dev);
4414         struct tg3_hw_status *sblk = tp->hw_status;
4415         unsigned int handled = 1;
4416
4417         /* In INTx mode, it is possible for the interrupt to arrive at
4418          * the CPU before the status block posted prior to the interrupt.
4419          * Reading the PCI State register will confirm whether the
4420          * interrupt is ours and will flush the status block.
4421          */
4422         if (unlikely(sblk->status_tag == tp->last_tag)) {
4423                 if ((tp->tg3_flags & TG3_FLAG_CHIP_RESETTING) ||
4424                     (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
4425                         handled = 0;
4426                         goto out;
4427                 }
4428         }
4429
4430         /*
4431          * writing any value to intr-mbox-0 clears PCI INTA# and
4432          * chip-internal interrupt pending events.
4433          * writing non-zero to intr-mbox-0 additional tells the
4434          * NIC to stop sending us irqs, engaging "in-intr-handler"
4435          * event coalescing.
4436          *
4437          * Flush the mailbox to de-assert the IRQ immediately to prevent
4438          * spurious interrupts.  The flush impacts performance but
4439          * excessive spurious interrupts can be worse in some cases.
4440          */
4441         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
4442         if (tg3_irq_sync(tp))
4443                 goto out;
4444         if (netif_rx_schedule_prep(dev, &tp->napi)) {
4445                 prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
4446                 /* Update last_tag to mark that this status has been
4447                  * seen. Because interrupt may be shared, we may be
4448                  * racing with tg3_poll(), so only update last_tag
4449                  * if tg3_poll() is not scheduled.
4450                  */
4451                 tp->last_tag = sblk->status_tag;
4452                 __netif_rx_schedule(dev, &tp->napi);
4453         }
4454 out:
4455         return IRQ_RETVAL(handled);
4456 }
4457
4458 /* ISR for interrupt test */
4459 static irqreturn_t tg3_test_isr(int irq, void *dev_id)
4460 {
4461         struct net_device *dev = dev_id;
4462         struct tg3 *tp = netdev_priv(dev);
4463         struct tg3_hw_status *sblk = tp->hw_status;
4464
4465         if ((sblk->status & SD_STATUS_UPDATED) ||
4466             !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
4467                 tg3_disable_ints(tp);
4468                 return IRQ_RETVAL(1);
4469         }
4470         return IRQ_RETVAL(0);
4471 }
4472
4473 static int tg3_init_hw(struct tg3 *, int);
4474 static int tg3_halt(struct tg3 *, int, int);
4475
4476 /* Restart hardware after configuration changes, self-test, etc.
4477  * Invoked with tp->lock held.
4478  */
4479 static int tg3_restart_hw(struct tg3 *tp, int reset_phy)
4480         __releases(tp->lock)
4481         __acquires(tp->lock)
4482 {
4483         int err;
4484
4485         err = tg3_init_hw(tp, reset_phy);
4486         if (err) {
4487                 printk(KERN_ERR PFX "%s: Failed to re-initialize device, "
4488                        "aborting.\n", tp->dev->name);
4489                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
4490                 tg3_full_unlock(tp);
4491                 del_timer_sync(&tp->timer);
4492                 tp->irq_sync = 0;
4493                 napi_enable(&tp->napi);
4494                 dev_close(tp->dev);
4495                 tg3_full_lock(tp, 0);
4496         }
4497         return err;
4498 }
4499
4500 #ifdef CONFIG_NET_POLL_CONTROLLER
4501 static void tg3_poll_controller(struct net_device *dev)
4502 {
4503         struct tg3 *tp = netdev_priv(dev);
4504
4505         tg3_interrupt(tp->pdev->irq, dev);
4506 }
4507 #endif
4508
4509 static void tg3_reset_task(struct work_struct *work)
4510 {
4511         struct tg3 *tp = container_of(work, struct tg3, reset_task);
4512         int err;
4513         unsigned int restart_timer;
4514
4515         tg3_full_lock(tp, 0);
4516
4517         if (!netif_running(tp->dev)) {
4518                 tg3_full_unlock(tp);
4519                 return;
4520         }
4521
4522         tg3_full_unlock(tp);
4523
4524         tg3_phy_stop(tp);
4525
4526         tg3_netif_stop(tp);
4527
4528         tg3_full_lock(tp, 1);
4529
4530         restart_timer = tp->tg3_flags2 & TG3_FLG2_RESTART_TIMER;
4531         tp->tg3_flags2 &= ~TG3_FLG2_RESTART_TIMER;
4532
4533         if (tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING) {
4534                 tp->write32_tx_mbox = tg3_write32_tx_mbox;
4535                 tp->write32_rx_mbox = tg3_write_flush_reg32;
4536                 tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;
4537                 tp->tg3_flags &= ~TG3_FLAG_TX_RECOVERY_PENDING;
4538         }
4539
4540         tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
4541         err = tg3_init_hw(tp, 1);
4542         if (err)
4543                 goto out;
4544
4545         tg3_netif_start(tp);
4546
4547         if (restart_timer)
4548                 mod_timer(&tp->timer, jiffies + 1);
4549
4550 out:
4551         tg3_full_unlock(tp);
4552
4553         if (!err)
4554                 tg3_phy_start(tp);
4555 }
4556
4557 static void tg3_dump_short_state(struct tg3 *tp)
4558 {
4559         printk(KERN_ERR PFX "DEBUG: MAC_TX_STATUS[%08x] MAC_RX_STATUS[%08x]\n",
4560                tr32(MAC_TX_STATUS), tr32(MAC_RX_STATUS));
4561         printk(KERN_ERR PFX "DEBUG: RDMAC_STATUS[%08x] WDMAC_STATUS[%08x]\n",
4562                tr32(RDMAC_STATUS), tr32(WDMAC_STATUS));
4563 }
4564
4565 static void tg3_tx_timeout(struct net_device *dev)
4566 {
4567         struct tg3 *tp = netdev_priv(dev);
4568
4569         if (netif_msg_tx_err(tp)) {
4570                 printk(KERN_ERR PFX "%s: transmit timed out, resetting\n",
4571                        dev->name);
4572                 tg3_dump_short_state(tp);
4573         }
4574
4575         schedule_work(&tp->reset_task);
4576 }
4577
4578 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
4579 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
4580 {
4581         u32 base = (u32) mapping & 0xffffffff;
4582
4583         return ((base > 0xffffdcc0) &&
4584                 (base + len + 8 < base));
4585 }
4586
4587 /* Test for DMA addresses > 40-bit */
4588 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
4589                                           int len)
4590 {
4591 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
4592         if (tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG)
4593                 return (((u64) mapping + len) > DMA_40BIT_MASK);
4594         return 0;
4595 #else
4596         return 0;
4597 #endif
4598 }
4599
4600 static void tg3_set_txd(struct tg3 *, int, dma_addr_t, int, u32, u32);
4601
4602 /* Workaround 4GB and 40-bit hardware DMA bugs. */
4603 static int tigon3_dma_hwbug_workaround(struct tg3 *tp, struct sk_buff *skb,
4604                                        u32 last_plus_one, u32 *start,
4605                                        u32 base_flags, u32 mss)
4606 {
4607         struct sk_buff *new_skb;
4608         dma_addr_t new_addr = 0;
4609         u32 entry = *start;
4610         int i, ret = 0;
4611
4612         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
4613                 new_skb = skb_copy(skb, GFP_ATOMIC);
4614         else {
4615                 int more_headroom = 4 - ((unsigned long)skb->data & 3);
4616
4617                 new_skb = skb_copy_expand(skb,
4618                                           skb_headroom(skb) + more_headroom,
4619                                           skb_tailroom(skb), GFP_ATOMIC);
4620         }
4621
4622         if (!new_skb) {
4623                 ret = -1;
4624         } else {
4625                 /* New SKB is guaranteed to be linear. */
4626                 entry = *start;
4627                 ret = skb_dma_map(&tp->pdev->dev, new_skb, DMA_TO_DEVICE);
4628                 new_addr = skb_shinfo(new_skb)->dma_maps[0];
4629
4630                 /* Make sure new skb does not cross any 4G boundaries.
4631                  * Drop the packet if it does.
4632                  */
4633                 if (ret || tg3_4g_overflow_test(new_addr, new_skb->len)) {
4634                         ret = -1;
4635                         dev_kfree_skb(new_skb);
4636                         new_skb = NULL;
4637                 } else {
4638                         tg3_set_txd(tp, entry, new_addr, new_skb->len,
4639                                     base_flags, 1 | (mss << 1));
4640                         *start = NEXT_TX(entry);
4641                 }
4642         }
4643
4644         /* Now clean up the sw ring entries. */
4645         i = 0;
4646         while (entry != last_plus_one) {
4647                 if (i == 0) {
4648                         tp->tx_buffers[entry].skb = new_skb;
4649                 } else {
4650                         tp->tx_buffers[entry].skb = NULL;
4651                 }
4652                 entry = NEXT_TX(entry);
4653                 i++;
4654         }
4655
4656         skb_dma_unmap(&tp->pdev->dev, skb, DMA_TO_DEVICE);
4657         dev_kfree_skb(skb);
4658
4659         return ret;
4660 }
4661
4662 static void tg3_set_txd(struct tg3 *tp, int entry,
4663                         dma_addr_t mapping, int len, u32 flags,
4664                         u32 mss_and_is_end)
4665 {
4666         struct tg3_tx_buffer_desc *txd = &tp->tx_ring[entry];
4667         int is_end = (mss_and_is_end & 0x1);
4668         u32 mss = (mss_and_is_end >> 1);
4669         u32 vlan_tag = 0;
4670
4671         if (is_end)
4672                 flags |= TXD_FLAG_END;
4673         if (flags & TXD_FLAG_VLAN) {
4674                 vlan_tag = flags >> 16;
4675                 flags &= 0xffff;
4676         }
4677         vlan_tag |= (mss << TXD_MSS_SHIFT);
4678
4679         txd->addr_hi = ((u64) mapping >> 32);
4680         txd->addr_lo = ((u64) mapping & 0xffffffff);
4681         txd->len_flags = (len << TXD_LEN_SHIFT) | flags;
4682         txd->vlan_tag = vlan_tag << TXD_VLAN_TAG_SHIFT;
4683 }
4684
4685 /* hard_start_xmit for devices that don't have any bugs and
4686  * support TG3_FLG2_HW_TSO_2 only.
4687  */
4688 static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
4689 {
4690         struct tg3 *tp = netdev_priv(dev);
4691         u32 len, entry, base_flags, mss;
4692         struct skb_shared_info *sp;
4693         dma_addr_t mapping;
4694
4695         len = skb_headlen(skb);
4696
4697         /* We are running in BH disabled context with netif_tx_lock
4698          * and TX reclaim runs via tp->napi.poll inside of a software
4699          * interrupt.  Furthermore, IRQ processing runs lockless so we have
4700          * no IRQ context deadlocks to worry about either.  Rejoice!
4701          */
4702         if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->nr_frags + 1))) {
4703                 if (!netif_queue_stopped(dev)) {
4704                         netif_stop_queue(dev);
4705
4706                         /* This is a hard error, log it. */
4707                         printk(KERN_ERR PFX "%s: BUG! Tx Ring full when "
4708                                "queue awake!\n", dev->name);
4709                 }
4710                 return NETDEV_TX_BUSY;
4711         }
4712
4713         entry = tp->tx_prod;
4714         base_flags = 0;
4715         mss = 0;
4716         if ((mss = skb_shinfo(skb)->gso_size) != 0) {
4717                 int tcp_opt_len, ip_tcp_len;
4718
4719                 if (skb_header_cloned(skb) &&
4720                     pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
4721                         dev_kfree_skb(skb);
4722                         goto out_unlock;
4723                 }
4724
4725                 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
4726                         mss |= (skb_headlen(skb) - ETH_HLEN) << 9;
4727                 else {
4728                         struct iphdr *iph = ip_hdr(skb);
4729
4730                         tcp_opt_len = tcp_optlen(skb);
4731                         ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
4732
4733                         iph->check = 0;
4734                         iph->tot_len = htons(mss + ip_tcp_len + tcp_opt_len);
4735                         mss |= (ip_tcp_len + tcp_opt_len) << 9;
4736                 }
4737
4738                 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
4739                                TXD_FLAG_CPU_POST_DMA);
4740
4741                 tcp_hdr(skb)->check = 0;
4742
4743         }
4744         else if (skb->ip_summed == CHECKSUM_PARTIAL)
4745                 base_flags |= TXD_FLAG_TCPUDP_CSUM;
4746 #if TG3_VLAN_TAG_USED
4747         if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
4748                 base_flags |= (TXD_FLAG_VLAN |
4749                                (vlan_tx_tag_get(skb) << 16));
4750 #endif
4751
4752         if (skb_dma_map(&tp->pdev->dev, skb, DMA_TO_DEVICE)) {
4753                 dev_kfree_skb(skb);
4754                 goto out_unlock;
4755         }
4756
4757         sp = skb_shinfo(skb);
4758
4759         mapping = sp->dma_maps[0];
4760
4761         tp->tx_buffers[entry].skb = skb;
4762
4763         tg3_set_txd(tp, entry, mapping, len, base_flags,
4764                     (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
4765
4766         entry = NEXT_TX(entry);
4767
4768         /* Now loop through additional data fragments, and queue them. */
4769         if (skb_shinfo(skb)->nr_frags > 0) {
4770                 unsigned int i, last;
4771
4772                 last = skb_shinfo(skb)->nr_frags - 1;
4773                 for (i = 0; i <= last; i++) {
4774                         skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4775
4776                         len = frag->size;
4777                         mapping = sp->dma_maps[i + 1];
4778                         tp->tx_buffers[entry].skb = NULL;
4779
4780                         tg3_set_txd(tp, entry, mapping, len,
4781                                     base_flags, (i == last) | (mss << 1));
4782
4783                         entry = NEXT_TX(entry);
4784                 }
4785         }
4786
4787         /* Packets are ready, update Tx producer idx local and on card. */
4788         tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry);
4789
4790         tp->tx_prod = entry;
4791         if (unlikely(tg3_tx_avail(tp) <= (MAX_SKB_FRAGS + 1))) {
4792                 netif_stop_queue(dev);
4793                 if (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp))
4794                         netif_wake_queue(tp->dev);
4795         }
4796
4797 out_unlock:
4798         mmiowb();
4799
4800         dev->trans_start = jiffies;
4801
4802         return NETDEV_TX_OK;
4803 }
4804
4805 static int tg3_start_xmit_dma_bug(struct sk_buff *, struct net_device *);
4806
4807 /* Use GSO to workaround a rare TSO bug that may be triggered when the
4808  * TSO header is greater than 80 bytes.
4809  */
4810 static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
4811 {
4812         struct sk_buff *segs, *nskb;
4813
4814         /* Estimate the number of fragments in the worst case */
4815         if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->gso_segs * 3))) {
4816                 netif_stop_queue(tp->dev);
4817                 if (tg3_tx_avail(tp) <= (skb_shinfo(skb)->gso_segs * 3))
4818                         return NETDEV_TX_BUSY;
4819
4820                 netif_wake_queue(tp->dev);
4821         }
4822
4823         segs = skb_gso_segment(skb, tp->dev->features & ~NETIF_F_TSO);
4824         if (IS_ERR(segs))
4825                 goto tg3_tso_bug_end;
4826
4827         do {
4828                 nskb = segs;
4829                 segs = segs->next;
4830                 nskb->next = NULL;
4831                 tg3_start_xmit_dma_bug(nskb, tp->dev);
4832         } while (segs);
4833
4834 tg3_tso_bug_end:
4835         dev_kfree_skb(skb);
4836
4837         return NETDEV_TX_OK;
4838 }
4839
4840 /* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
4841  * support TG3_FLG2_HW_TSO_1 or firmware TSO only.
4842  */
4843 static int tg3_start_xmit_dma_bug(struct sk_buff *skb, struct net_device *dev)
4844 {
4845         struct tg3 *tp = netdev_priv(dev);
4846         u32 len, entry, base_flags, mss;
4847         struct skb_shared_info *sp;
4848         int would_hit_hwbug;
4849         dma_addr_t mapping;
4850
4851         len = skb_headlen(skb);
4852
4853         /* We are running in BH disabled context with netif_tx_lock
4854          * and TX reclaim runs via tp->napi.poll inside of a software
4855          * interrupt.  Furthermore, IRQ processing runs lockless so we have
4856          * no IRQ context deadlocks to worry about either.  Rejoice!
4857          */
4858         if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->nr_frags + 1))) {
4859                 if (!netif_queue_stopped(dev)) {
4860                         netif_stop_queue(dev);
4861
4862                         /* This is a hard error, log it. */
4863                         printk(KERN_ERR PFX "%s: BUG! Tx Ring full when "
4864                                "queue awake!\n", dev->name);
4865                 }
4866                 return NETDEV_TX_BUSY;
4867         }
4868
4869         entry = tp->tx_prod;
4870         base_flags = 0;
4871         if (skb->ip_summed == CHECKSUM_PARTIAL)
4872                 base_flags |= TXD_FLAG_TCPUDP_CSUM;
4873         mss = 0;
4874         if ((mss = skb_shinfo(skb)->gso_size) != 0) {
4875                 struct iphdr *iph;
4876                 int tcp_opt_len, ip_tcp_len, hdr_len;
4877
4878                 if (skb_header_cloned(skb) &&
4879                     pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
4880                         dev_kfree_skb(skb);
4881                         goto out_unlock;
4882                 }
4883
4884                 tcp_opt_len = tcp_optlen(skb);
4885                 ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
4886
4887                 hdr_len = ip_tcp_len + tcp_opt_len;
4888                 if (unlikely((ETH_HLEN + hdr_len) > 80) &&
4889                              (tp->tg3_flags2 & TG3_FLG2_TSO_BUG))
4890                         return (tg3_tso_bug(tp, skb));
4891
4892                 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
4893                                TXD_FLAG_CPU_POST_DMA);
4894
4895                 iph = ip_hdr(skb);
4896                 iph->check = 0;
4897                 iph->tot_len = htons(mss + hdr_len);
4898                 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
4899                         tcp_hdr(skb)->check = 0;
4900                         base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
4901                 } else
4902                         tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
4903                                                                  iph->daddr, 0,
4904                                                                  IPPROTO_TCP,
4905                                                                  0);
4906
4907                 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO) ||
4908                     (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)) {
4909                         if (tcp_opt_len || iph->ihl > 5) {
4910                                 int tsflags;
4911
4912                                 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
4913                                 mss |= (tsflags << 11);
4914                         }
4915                 } else {
4916                         if (tcp_opt_len || iph->ihl > 5) {
4917                                 int tsflags;
4918
4919                                 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
4920                                 base_flags |= tsflags << 12;
4921                         }
4922                 }
4923         }
4924 #if TG3_VLAN_TAG_USED
4925         if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
4926                 base_flags |= (TXD_FLAG_VLAN |
4927                                (vlan_tx_tag_get(skb) << 16));
4928 #endif
4929
4930         if (skb_dma_map(&tp->pdev->dev, skb, DMA_TO_DEVICE)) {
4931                 dev_kfree_skb(skb);
4932                 goto out_unlock;
4933         }
4934
4935         sp = skb_shinfo(skb);
4936
4937         mapping = sp->dma_maps[0];
4938
4939         tp->tx_buffers[entry].skb = skb;
4940
4941         would_hit_hwbug = 0;
4942
4943         if (tp->tg3_flags3 & TG3_FLG3_5701_DMA_BUG)
4944                 would_hit_hwbug = 1;
4945         else if (tg3_4g_overflow_test(mapping, len))
4946                 would_hit_hwbug = 1;
4947
4948         tg3_set_txd(tp, entry, mapping, len, base_flags,
4949                     (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
4950
4951         entry = NEXT_TX(entry);
4952
4953         /* Now loop through additional data fragments, and queue them. */
4954         if (skb_shinfo(skb)->nr_frags > 0) {
4955                 unsigned int i, last;
4956
4957                 last = skb_shinfo(skb)->nr_frags - 1;
4958                 for (i = 0; i <= last; i++) {
4959                         skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4960
4961                         len = frag->size;
4962                         mapping = sp->dma_maps[i + 1];
4963
4964                         tp->tx_buffers[entry].skb = NULL;
4965
4966                         if (tg3_4g_overflow_test(mapping, len))
4967                                 would_hit_hwbug = 1;
4968
4969                         if (tg3_40bit_overflow_test(tp, mapping, len))
4970                                 would_hit_hwbug = 1;
4971
4972                         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
4973                                 tg3_set_txd(tp, entry, mapping, len,
4974                                             base_flags, (i == last)|(mss << 1));
4975                         else
4976                                 tg3_set_txd(tp, entry, mapping, len,
4977                                             base_flags, (i == last));
4978
4979                         entry = NEXT_TX(entry);
4980                 }
4981         }
4982
4983         if (would_hit_hwbug) {
4984                 u32 last_plus_one = entry;
4985                 u32 start;
4986
4987                 start = entry - 1 - skb_shinfo(skb)->nr_frags;
4988                 start &= (TG3_TX_RING_SIZE - 1);
4989
4990                 /* If the workaround fails due to memory/mapping
4991                  * failure, silently drop this packet.
4992                  */
4993                 if (tigon3_dma_hwbug_workaround(tp, skb, last_plus_one,
4994                                                 &start, base_flags, mss))
4995                         goto out_unlock;
4996
4997                 entry = start;
4998         }
4999
5000         /* Packets are ready, update Tx producer idx local and on card. */
5001         tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry);
5002
5003         tp->tx_prod = entry;
5004         if (unlikely(tg3_tx_avail(tp) <= (MAX_SKB_FRAGS + 1))) {
5005                 netif_stop_queue(dev);
5006                 if (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp))
5007                         netif_wake_queue(tp->dev);
5008         }
5009
5010 out_unlock:
5011         mmiowb();
5012
5013         dev->trans_start = jiffies;
5014
5015         return NETDEV_TX_OK;
5016 }
5017
5018 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
5019                                int new_mtu)
5020 {
5021         dev->mtu = new_mtu;
5022
5023         if (new_mtu > ETH_DATA_LEN) {
5024                 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
5025                         tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
5026                         ethtool_op_set_tso(dev, 0);
5027                 }
5028                 else
5029                         tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE;
5030         } else {
5031                 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
5032                         tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
5033                 tp->tg3_flags &= ~TG3_FLAG_JUMBO_RING_ENABLE;
5034         }
5035 }
5036
5037 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
5038 {
5039         struct tg3 *tp = netdev_priv(dev);
5040         int err;
5041
5042         if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
5043                 return -EINVAL;
5044
5045         if (!netif_running(dev)) {
5046                 /* We'll just catch it later when the
5047                  * device is up'd.
5048                  */
5049                 tg3_set_mtu(dev, tp, new_mtu);
5050                 return 0;
5051         }
5052
5053         tg3_phy_stop(tp);
5054
5055         tg3_netif_stop(tp);
5056
5057         tg3_full_lock(tp, 1);
5058
5059         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
5060
5061         tg3_set_mtu(dev, tp, new_mtu);
5062
5063         err = tg3_restart_hw(tp, 0);
5064
5065         if (!err)
5066                 tg3_netif_start(tp);
5067
5068         tg3_full_unlock(tp);
5069
5070         if (!err)
5071                 tg3_phy_start(tp);
5072
5073         return err;
5074 }
5075
5076 /* Free up pending packets in all rx/tx rings.
5077  *
5078  * The chip has been shut down and the driver detached from
5079  * the networking, so no interrupts or new tx packets will
5080  * end up in the driver.  tp->{tx,}lock is not held and we are not
5081  * in an interrupt context and thus may sleep.
5082  */
5083 static void tg3_free_rings(struct tg3 *tp)
5084 {
5085         struct ring_info *rxp;
5086         int i;
5087
5088         for (i = 0; i < TG3_RX_RING_SIZE; i++) {
5089                 rxp = &tp->rx_std_buffers[i];
5090
5091                 if (rxp->skb == NULL)
5092                         continue;
5093                 pci_unmap_single(tp->pdev,
5094                                  pci_unmap_addr(rxp, mapping),
5095                                  tp->rx_pkt_buf_sz - tp->rx_offset,
5096                                  PCI_DMA_FROMDEVICE);
5097                 dev_kfree_skb_any(rxp->skb);
5098                 rxp->skb = NULL;
5099         }
5100
5101         for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
5102                 rxp = &tp->rx_jumbo_buffers[i];
5103
5104                 if (rxp->skb == NULL)
5105                         continue;
5106                 pci_unmap_single(tp->pdev,
5107                                  pci_unmap_addr(rxp, mapping),
5108                                  RX_JUMBO_PKT_BUF_SZ - tp->rx_offset,
5109                                  PCI_DMA_FROMDEVICE);
5110                 dev_kfree_skb_any(rxp->skb);
5111                 rxp->skb = NULL;
5112         }
5113
5114         for (i = 0; i < TG3_TX_RING_SIZE; ) {
5115                 struct tx_ring_info *txp;
5116                 struct sk_buff *skb;
5117
5118                 txp = &tp->tx_buffers[i];
5119                 skb = txp->skb;
5120
5121                 if (skb == NULL) {
5122                         i++;
5123                         continue;
5124                 }
5125
5126                 skb_dma_unmap(&tp->pdev->dev, skb, DMA_TO_DEVICE);
5127
5128                 txp->skb = NULL;
5129
5130                 i += skb_shinfo(skb)->nr_frags + 1;
5131
5132                 dev_kfree_skb_any(skb);
5133         }
5134 }
5135
5136 /* Initialize tx/rx rings for packet processing.
5137  *
5138  * The chip has been shut down and the driver detached from
5139  * the networking, so no interrupts or new tx packets will
5140  * end up in the driver.  tp->{tx,}lock are held and thus
5141  * we may not sleep.
5142  */
5143 static int tg3_init_rings(struct tg3 *tp)
5144 {
5145         u32 i;
5146
5147         /* Free up all the SKBs. */
5148         tg3_free_rings(tp);
5149
5150         /* Zero out all descriptors. */
5151         memset(tp->rx_std, 0, TG3_RX_RING_BYTES);
5152         memset(tp->rx_jumbo, 0, TG3_RX_JUMBO_RING_BYTES);
5153         memset(tp->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
5154         memset(tp->tx_ring, 0, TG3_TX_RING_BYTES);
5155
5156         tp->rx_pkt_buf_sz = RX_PKT_BUF_SZ;
5157         if ((tp->tg3_flags2 & TG3_FLG2_5780_CLASS) &&
5158             (tp->dev->mtu > ETH_DATA_LEN))
5159                 tp->rx_pkt_buf_sz = RX_JUMBO_PKT_BUF_SZ;
5160
5161         /* Initialize invariants of the rings, we only set this
5162          * stuff once.  This works because the card does not
5163          * write into the rx buffer posting rings.
5164          */
5165         for (i = 0; i < TG3_RX_RING_SIZE; i++) {
5166                 struct tg3_rx_buffer_desc *rxd;
5167
5168                 rxd = &tp->rx_std[i];
5169                 rxd->idx_len = (tp->rx_pkt_buf_sz - tp->rx_offset - 64)
5170                         << RXD_LEN_SHIFT;
5171                 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
5172                 rxd->opaque = (RXD_OPAQUE_RING_STD |
5173                                (i << RXD_OPAQUE_INDEX_SHIFT));
5174         }
5175
5176         if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
5177                 for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
5178                         struct tg3_rx_buffer_desc *rxd;
5179
5180                         rxd = &tp->rx_jumbo[i];
5181                         rxd->idx_len = (RX_JUMBO_PKT_BUF_SZ - tp->rx_offset - 64)
5182                                 << RXD_LEN_SHIFT;
5183                         rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
5184                                 RXD_FLAG_JUMBO;
5185                         rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
5186                                (i << RXD_OPAQUE_INDEX_SHIFT));
5187                 }
5188         }
5189
5190         /* Now allocate fresh SKBs for each rx ring. */
5191         for (i = 0; i < tp->rx_pending; i++) {
5192                 if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_STD, -1, i) < 0) {
5193                         printk(KERN_WARNING PFX
5194                                "%s: Using a smaller RX standard ring, "
5195                                "only %d out of %d buffers were allocated "
5196                                "successfully.\n",
5197                                tp->dev->name, i, tp->rx_pending);
5198                         if (i == 0)
5199                                 return -ENOMEM;
5200                         tp->rx_pending = i;
5201                         break;
5202                 }
5203         }
5204
5205         if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
5206                 for (i = 0; i < tp->rx_jumbo_pending; i++) {
5207                         if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_JUMBO,
5208                                              -1, i) < 0) {
5209                                 printk(KERN_WARNING PFX
5210                                        "%s: Using a smaller RX jumbo ring, "
5211                                        "only %d out of %d buffers were "
5212                                        "allocated successfully.\n",
5213                                        tp->dev->name, i, tp->rx_jumbo_pending);
5214                                 if (i == 0) {
5215                                         tg3_free_rings(tp);
5216                                         return -ENOMEM;
5217                                 }
5218                                 tp->rx_jumbo_pending = i;
5219                                 break;
5220                         }
5221                 }
5222         }
5223         return 0;
5224 }
5225
5226 /*
5227  * Must not be invoked with interrupt sources disabled and
5228  * the hardware shutdown down.
5229  */
5230 static void tg3_free_consistent(struct tg3 *tp)
5231 {
5232         kfree(tp->rx_std_buffers);
5233         tp->rx_std_buffers = NULL;
5234         if (tp->rx_std) {
5235                 pci_free_consistent(tp->pdev, TG3_RX_RING_BYTES,
5236                                     tp->rx_std, tp->rx_std_mapping);
5237                 tp->rx_std = NULL;
5238         }
5239         if (tp->rx_jumbo) {
5240                 pci_free_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
5241                                     tp->rx_jumbo, tp->rx_jumbo_mapping);
5242                 tp->rx_jumbo = NULL;
5243         }
5244         if (tp->rx_rcb) {
5245                 pci_free_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
5246                                     tp->rx_rcb, tp->rx_rcb_mapping);
5247                 tp->rx_rcb = NULL;
5248         }
5249         if (tp->tx_ring) {
5250                 pci_free_consistent(tp->pdev, TG3_TX_RING_BYTES,
5251                         tp->tx_ring, tp->tx_desc_mapping);
5252                 tp->tx_ring = NULL;
5253         }
5254         if (tp->hw_status) {
5255                 pci_free_consistent(tp->pdev, TG3_HW_STATUS_SIZE,
5256                                     tp->hw_status, tp->status_mapping);
5257                 tp->hw_status = NULL;
5258         }
5259         if (tp->hw_stats) {
5260                 pci_free_consistent(tp->pdev, sizeof(struct tg3_hw_stats),
5261                                     tp->hw_stats, tp->stats_mapping);
5262                 tp->hw_stats = NULL;
5263         }
5264 }
5265
5266 /*
5267  * Must not be invoked with interrupt sources disabled and
5268  * the hardware shutdown down.  Can sleep.
5269  */
5270 static int tg3_alloc_consistent(struct tg3 *tp)
5271 {
5272         tp->rx_std_buffers = kzalloc((sizeof(struct ring_info) *
5273                                       (TG3_RX_RING_SIZE +
5274                                        TG3_RX_JUMBO_RING_SIZE)) +
5275                                      (sizeof(struct tx_ring_info) *
5276                                       TG3_TX_RING_SIZE),
5277                                      GFP_KERNEL);
5278         if (!tp->rx_std_buffers)
5279                 return -ENOMEM;
5280
5281         tp->rx_jumbo_buffers = &tp->rx_std_buffers[TG3_RX_RING_SIZE];
5282         tp->tx_buffers = (struct tx_ring_info *)
5283                 &tp->rx_jumbo_buffers[TG3_RX_JUMBO_RING_SIZE];
5284
5285         tp->rx_std = pci_alloc_consistent(tp->pdev, TG3_RX_RING_BYTES,
5286                                           &tp->rx_std_mapping);
5287         if (!tp->rx_std)
5288                 goto err_out;
5289
5290         tp->rx_jumbo = pci_alloc_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
5291                                             &tp->rx_jumbo_mapping);
5292
5293         if (!tp->rx_jumbo)
5294                 goto err_out;
5295
5296         tp->rx_rcb = pci_alloc_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
5297                                           &tp->rx_rcb_mapping);
5298         if (!tp->rx_rcb)
5299                 goto err_out;
5300
5301         tp->tx_ring = pci_alloc_consistent(tp->pdev, TG3_TX_RING_BYTES,
5302                                            &tp->tx_desc_mapping);
5303         if (!tp->tx_ring)
5304                 goto err_out;
5305
5306         tp->hw_status = pci_alloc_consistent(tp->pdev,
5307                                              TG3_HW_STATUS_SIZE,
5308                                              &tp->status_mapping);
5309         if (!tp->hw_status)
5310                 goto err_out;
5311
5312         tp->hw_stats = pci_alloc_consistent(tp->pdev,
5313                                             sizeof(struct tg3_hw_stats),
5314                                             &tp->stats_mapping);
5315         if (!tp->hw_stats)
5316                 goto err_out;
5317
5318         memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
5319         memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
5320
5321         return 0;
5322
5323 err_out:
5324         tg3_free_consistent(tp);
5325         return -ENOMEM;
5326 }
5327
5328 #define MAX_WAIT_CNT 1000
5329
5330 /* To stop a block, clear the enable bit and poll till it
5331  * clears.  tp->lock is held.
5332  */
5333 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, int silent)
5334 {
5335         unsigned int i;
5336         u32 val;
5337
5338         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
5339                 switch (ofs) {
5340                 case RCVLSC_MODE:
5341                 case DMAC_MODE:
5342                 case MBFREE_MODE:
5343                 case BUFMGR_MODE:
5344                 case MEMARB_MODE:
5345                         /* We can't enable/disable these bits of the
5346                          * 5705/5750, just say success.
5347                          */
5348                         return 0;
5349
5350                 default:
5351                         break;
5352                 }
5353         }
5354
5355         val = tr32(ofs);
5356         val &= ~enable_bit;
5357         tw32_f(ofs, val);
5358
5359         for (i = 0; i < MAX_WAIT_CNT; i++) {
5360                 udelay(100);
5361                 val = tr32(ofs);
5362                 if ((val & enable_bit) == 0)
5363                         break;
5364         }
5365
5366         if (i == MAX_WAIT_CNT && !silent) {
5367                 printk(KERN_ERR PFX "tg3_stop_block timed out, "
5368                        "ofs=%lx enable_bit=%x\n",
5369                        ofs, enable_bit);
5370                 return -ENODEV;
5371         }
5372
5373         return 0;
5374 }
5375
5376 /* tp->lock is held. */
5377 static int tg3_abort_hw(struct tg3 *tp, int silent)
5378 {
5379         int i, err;
5380
5381         tg3_disable_ints(tp);
5382
5383         tp->rx_mode &= ~RX_MODE_ENABLE;
5384         tw32_f(MAC_RX_MODE, tp->rx_mode);
5385         udelay(10);
5386
5387         err  = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
5388         err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
5389         err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
5390         err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
5391         err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
5392         err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
5393
5394         err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
5395         err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
5396         err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
5397         err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
5398         err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
5399         err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
5400         err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
5401
5402         tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
5403         tw32_f(MAC_MODE, tp->mac_mode);
5404         udelay(40);
5405
5406         tp->tx_mode &= ~TX_MODE_ENABLE;
5407         tw32_f(MAC_TX_MODE, tp->tx_mode);
5408
5409         for (i = 0; i < MAX_WAIT_CNT; i++) {
5410                 udelay(100);
5411                 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
5412                         break;
5413         }
5414         if (i >= MAX_WAIT_CNT) {
5415                 printk(KERN_ERR PFX "tg3_abort_hw timed out for %s, "
5416                        "TX_MODE_ENABLE will not clear MAC_TX_MODE=%08x\n",
5417                        tp->dev->name, tr32(MAC_TX_MODE));
5418                 err |= -ENODEV;
5419         }
5420
5421         err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
5422         err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
5423         err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
5424
5425         tw32(FTQ_RESET, 0xffffffff);
5426         tw32(FTQ_RESET, 0x00000000);
5427
5428         err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
5429         err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
5430
5431         if (tp->hw_status)
5432                 memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
5433         if (tp->hw_stats)
5434                 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
5435
5436         return err;
5437 }
5438
5439 /* tp->lock is held. */
5440 static int tg3_nvram_lock(struct tg3 *tp)
5441 {
5442         if (tp->tg3_flags & TG3_FLAG_NVRAM) {
5443                 int i;
5444
5445                 if (tp->nvram_lock_cnt == 0) {
5446                         tw32(NVRAM_SWARB, SWARB_REQ_SET1);
5447                         for (i = 0; i < 8000; i++) {
5448                                 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
5449                                         break;
5450                                 udelay(20);
5451                         }
5452                         if (i == 8000) {
5453                                 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
5454                                 return -ENODEV;
5455                         }
5456                 }
5457                 tp->nvram_lock_cnt++;
5458         }
5459         return 0;
5460 }
5461
5462 /* tp->lock is held. */
5463 static void tg3_nvram_unlock(struct tg3 *tp)
5464 {
5465         if (tp->tg3_flags & TG3_FLAG_NVRAM) {
5466                 if (tp->nvram_lock_cnt > 0)
5467                         tp->nvram_lock_cnt--;
5468                 if (tp->nvram_lock_cnt == 0)
5469                         tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
5470         }
5471 }
5472
5473 /* tp->lock is held. */
5474 static void tg3_enable_nvram_access(struct tg3 *tp)
5475 {
5476         if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
5477             !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM)) {
5478                 u32 nvaccess = tr32(NVRAM_ACCESS);
5479
5480                 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
5481         }
5482 }
5483
5484 /* tp->lock is held. */
5485 static void tg3_disable_nvram_access(struct tg3 *tp)
5486 {
5487         if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
5488             !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM)) {
5489                 u32 nvaccess = tr32(NVRAM_ACCESS);
5490
5491                 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
5492         }
5493 }
5494
5495 static void tg3_ape_send_event(struct tg3 *tp, u32 event)
5496 {
5497         int i;
5498         u32 apedata;
5499
5500         apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
5501         if (apedata != APE_SEG_SIG_MAGIC)
5502                 return;
5503
5504         apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
5505         if (!(apedata & APE_FW_STATUS_READY))
5506                 return;
5507
5508         /* Wait for up to 1 millisecond for APE to service previous event. */
5509         for (i = 0; i < 10; i++) {
5510                 if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
5511                         return;
5512
5513                 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
5514
5515                 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
5516                         tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
5517                                         event | APE_EVENT_STATUS_EVENT_PENDING);
5518
5519                 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
5520
5521                 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
5522                         break;
5523
5524                 udelay(100);
5525         }
5526
5527         if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
5528                 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
5529 }
5530
5531 static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
5532 {
5533         u32 event;
5534         u32 apedata;
5535
5536         if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
5537                 return;
5538
5539         switch (kind) {
5540                 case RESET_KIND_INIT:
5541                         tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
5542                                         APE_HOST_SEG_SIG_MAGIC);
5543                         tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
5544                                         APE_HOST_SEG_LEN_MAGIC);
5545                         apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
5546                         tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
5547                         tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
5548                                         APE_HOST_DRIVER_ID_MAGIC);
5549                         tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
5550                                         APE_HOST_BEHAV_NO_PHYLOCK);
5551
5552                         event = APE_EVENT_STATUS_STATE_START;
5553                         break;
5554                 case RESET_KIND_SHUTDOWN:
5555                         event = APE_EVENT_STATUS_STATE_UNLOAD;
5556                         break;
5557                 case RESET_KIND_SUSPEND:
5558                         event = APE_EVENT_STATUS_STATE_SUSPEND;
5559                         break;
5560                 default:
5561                         return;
5562         }
5563
5564         event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
5565
5566         tg3_ape_send_event(tp, event);
5567 }
5568
5569 /* tp->lock is held. */
5570 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
5571 {
5572         tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
5573                       NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
5574
5575         if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
5576                 switch (kind) {
5577                 case RESET_KIND_INIT:
5578                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5579                                       DRV_STATE_START);
5580                         break;
5581
5582                 case RESET_KIND_SHUTDOWN:
5583                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5584                                       DRV_STATE_UNLOAD);
5585                         break;
5586
5587                 case RESET_KIND_SUSPEND:
5588                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5589                                       DRV_STATE_SUSPEND);
5590                         break;
5591
5592                 default:
5593                         break;
5594                 }
5595         }
5596
5597         if (kind == RESET_KIND_INIT ||
5598             kind == RESET_KIND_SUSPEND)
5599                 tg3_ape_driver_state_change(tp, kind);
5600 }
5601
5602 /* tp->lock is held. */
5603 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
5604 {
5605         if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
5606                 switch (kind) {
5607                 case RESET_KIND_INIT:
5608                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5609                                       DRV_STATE_START_DONE);
5610                         break;
5611
5612                 case RESET_KIND_SHUTDOWN:
5613                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5614                                       DRV_STATE_UNLOAD_DONE);
5615                         break;
5616
5617                 default:
5618                         break;
5619                 }
5620         }
5621
5622         if (kind == RESET_KIND_SHUTDOWN)
5623                 tg3_ape_driver_state_change(tp, kind);
5624 }
5625
5626 /* tp->lock is held. */
5627 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
5628 {
5629         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
5630                 switch (kind) {
5631                 case RESET_KIND_INIT:
5632                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5633                                       DRV_STATE_START);
5634                         break;
5635
5636                 case RESET_KIND_SHUTDOWN:
5637                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5638                                       DRV_STATE_UNLOAD);
5639                         break;
5640
5641                 case RESET_KIND_SUSPEND:
5642                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5643                                       DRV_STATE_SUSPEND);
5644                         break;
5645
5646                 default:
5647                         break;
5648                 }
5649         }
5650 }
5651
5652 static int tg3_poll_fw(struct tg3 *tp)
5653 {
5654         int i;
5655         u32 val;
5656
5657         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
5658                 /* Wait up to 20ms for init done. */
5659                 for (i = 0; i < 200; i++) {
5660                         if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
5661                                 return 0;
5662                         udelay(100);
5663                 }
5664                 return -ENODEV;
5665         }
5666
5667         /* Wait for firmware initialization to complete. */
5668         for (i = 0; i < 100000; i++) {
5669                 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
5670                 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
5671                         break;
5672                 udelay(10);
5673         }
5674
5675         /* Chip might not be fitted with firmware.  Some Sun onboard
5676          * parts are configured like that.  So don't signal the timeout
5677          * of the above loop as an error, but do report the lack of
5678          * running firmware once.
5679          */
5680         if (i >= 100000 &&
5681             !(tp->tg3_flags2 & TG3_FLG2_NO_FWARE_REPORTED)) {
5682                 tp->tg3_flags2 |= TG3_FLG2_NO_FWARE_REPORTED;
5683
5684                 printk(KERN_INFO PFX "%s: No firmware running.\n",
5685                        tp->dev->name);
5686         }
5687
5688         return 0;
5689 }
5690
5691 /* Save PCI command register before chip reset */
5692 static void tg3_save_pci_state(struct tg3 *tp)
5693 {
5694         pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd);
5695 }
5696
5697 /* Restore PCI state after chip reset */
5698 static void tg3_restore_pci_state(struct tg3 *tp)
5699 {
5700         u32 val;
5701
5702         /* Re-enable indirect register accesses. */
5703         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
5704                                tp->misc_host_ctrl);
5705
5706         /* Set MAX PCI retry to zero. */
5707         val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
5708         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
5709             (tp->tg3_flags & TG3_FLAG_PCIX_MODE))
5710                 val |= PCISTATE_RETRY_SAME_DMA;
5711         /* Allow reads and writes to the APE register and memory space. */
5712         if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
5713                 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
5714                        PCISTATE_ALLOW_APE_SHMEM_WR;
5715         pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
5716
5717         pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
5718
5719         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)
5720                 pcie_set_readrq(tp->pdev, 4096);
5721         else {
5722                 pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
5723                                       tp->pci_cacheline_sz);
5724                 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
5725                                       tp->pci_lat_timer);
5726         }
5727
5728         /* Make sure PCI-X relaxed ordering bit is clear. */
5729         if (tp->pcix_cap) {
5730                 u16 pcix_cmd;
5731
5732                 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
5733                                      &pcix_cmd);
5734                 pcix_cmd &= ~PCI_X_CMD_ERO;
5735                 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
5736                                       pcix_cmd);
5737         }
5738
5739         if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
5740
5741                 /* Chip reset on 5780 will reset MSI enable bit,
5742                  * so need to restore it.
5743                  */
5744                 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
5745                         u16 ctrl;
5746
5747                         pci_read_config_word(tp->pdev,
5748                                              tp->msi_cap + PCI_MSI_FLAGS,
5749                                              &ctrl);
5750                         pci_write_config_word(tp->pdev,
5751                                               tp->msi_cap + PCI_MSI_FLAGS,
5752                                               ctrl | PCI_MSI_FLAGS_ENABLE);
5753                         val = tr32(MSGINT_MODE);
5754                         tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
5755                 }
5756         }
5757 }
5758
5759 static void tg3_stop_fw(struct tg3 *);
5760
5761 /* tp->lock is held. */
5762 static int tg3_chip_reset(struct tg3 *tp)
5763 {
5764         u32 val;
5765         void (*write_op)(struct tg3 *, u32, u32);
5766         int err;
5767
5768         tg3_nvram_lock(tp);
5769
5770         tg3_mdio_stop(tp);
5771
5772         tg3_ape_lock(tp, TG3_APE_LOCK_GRC);
5773
5774         /* No matching tg3_nvram_unlock() after this because
5775          * chip reset below will undo the nvram lock.
5776          */
5777         tp->nvram_lock_cnt = 0;
5778
5779         /* GRC_MISC_CFG core clock reset will clear the memory
5780          * enable bit in PCI register 4 and the MSI enable bit
5781          * on some chips, so we save relevant registers here.
5782          */
5783         tg3_save_pci_state(tp);
5784
5785         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
5786             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
5787             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
5788             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
5789             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
5790             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
5791                 tw32(GRC_FASTBOOT_PC, 0);
5792
5793         /*
5794          * We must avoid the readl() that normally takes place.
5795          * It locks machines, causes machine checks, and other
5796          * fun things.  So, temporarily disable the 5701
5797          * hardware workaround, while we do the reset.
5798          */
5799         write_op = tp->write32;
5800         if (write_op == tg3_write_flush_reg32)
5801                 tp->write32 = tg3_write32;
5802
5803         /* Prevent the irq handler from reading or writing PCI registers
5804          * during chip reset when the memory enable bit in the PCI command
5805          * register may be cleared.  The chip does not generate interrupt
5806          * at this time, but the irq handler may still be called due to irq
5807          * sharing or irqpoll.
5808          */
5809         tp->tg3_flags |= TG3_FLAG_CHIP_RESETTING;
5810         if (tp->hw_status) {
5811                 tp->hw_status->status = 0;
5812                 tp->hw_status->status_tag = 0;
5813         }
5814         tp->last_tag = 0;
5815         smp_mb();
5816         synchronize_irq(tp->pdev->irq);
5817
5818         /* do the reset */
5819         val = GRC_MISC_CFG_CORECLK_RESET;
5820
5821         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
5822                 if (tr32(0x7e2c) == 0x60) {
5823                         tw32(0x7e2c, 0x20);
5824                 }
5825                 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
5826                         tw32(GRC_MISC_CFG, (1 << 29));
5827                         val |= (1 << 29);
5828                 }
5829         }
5830
5831         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
5832                 tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
5833                 tw32(GRC_VCPU_EXT_CTRL,
5834                      tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
5835         }
5836
5837         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
5838                 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
5839         tw32(GRC_MISC_CFG, val);
5840
5841         /* restore 5701 hardware bug workaround write method */
5842         tp->write32 = write_op;
5843
5844         /* Unfortunately, we have to delay before the PCI read back.
5845          * Some 575X chips even will not respond to a PCI cfg access
5846          * when the reset command is given to the chip.
5847          *
5848          * How do these hardware designers expect things to work
5849          * properly if the PCI write is posted for a long period
5850          * of time?  It is always necessary to have some method by
5851          * which a register read back can occur to push the write
5852          * out which does the reset.
5853          *
5854          * For most tg3 variants the trick below was working.
5855          * Ho hum...
5856          */
5857         udelay(120);
5858
5859         /* Flush PCI posted writes.  The normal MMIO registers
5860          * are inaccessible at this time so this is the only
5861          * way to make this reliably (actually, this is no longer
5862          * the case, see above).  I tried to use indirect
5863          * register read/write but this upset some 5701 variants.
5864          */
5865         pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
5866
5867         udelay(120);
5868
5869         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
5870                 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) {
5871                         int i;
5872                         u32 cfg_val;
5873
5874                         /* Wait for link training to complete.  */
5875                         for (i = 0; i < 5000; i++)
5876                                 udelay(100);
5877
5878                         pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
5879                         pci_write_config_dword(tp->pdev, 0xc4,
5880                                                cfg_val | (1 << 15));
5881                 }
5882                 /* Set PCIE max payload size and clear error status.  */
5883                 pci_write_config_dword(tp->pdev, 0xd8, 0xf5000);
5884         }
5885
5886         tg3_restore_pci_state(tp);
5887
5888         tp->tg3_flags &= ~TG3_FLAG_CHIP_RESETTING;
5889
5890         val = 0;
5891         if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
5892                 val = tr32(MEMARB_MODE);
5893         tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
5894
5895         if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A3) {
5896                 tg3_stop_fw(tp);
5897                 tw32(0x5000, 0x400);
5898         }
5899
5900         tw32(GRC_MODE, tp->grc_mode);
5901
5902         if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0) {
5903                 val = tr32(0xc4);
5904
5905                 tw32(0xc4, val | (1 << 15));
5906         }
5907
5908         if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
5909             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
5910                 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
5911                 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0)
5912                         tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
5913                 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
5914         }
5915
5916         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
5917                 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
5918                 tw32_f(MAC_MODE, tp->mac_mode);
5919         } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
5920                 tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
5921                 tw32_f(MAC_MODE, tp->mac_mode);
5922         } else if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
5923                 tp->mac_mode &= (MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN);
5924                 if (tp->mac_mode & MAC_MODE_APE_TX_EN)
5925                         tp->mac_mode |= MAC_MODE_TDE_ENABLE;
5926                 tw32_f(MAC_MODE, tp->mac_mode);
5927         } else
5928                 tw32_f(MAC_MODE, 0);
5929         udelay(40);
5930
5931         tg3_mdio_start(tp);
5932
5933         tg3_ape_unlock(tp, TG3_APE_LOCK_GRC);
5934
5935         err = tg3_poll_fw(tp);
5936         if (err)
5937                 return err;
5938
5939         if ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
5940             tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
5941                 val = tr32(0x7c00);
5942
5943                 tw32(0x7c00, val | (1 << 25));
5944         }
5945
5946         /* Reprobe ASF enable state.  */
5947         tp->tg3_flags &= ~TG3_FLAG_ENABLE_ASF;
5948         tp->tg3_flags2 &= ~TG3_FLG2_ASF_NEW_HANDSHAKE;
5949         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
5950         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
5951                 u32 nic_cfg;
5952
5953                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
5954                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
5955                         tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
5956                         tp->last_event_jiffies = jiffies;
5957                         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
5958                                 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
5959                 }
5960         }
5961
5962         return 0;
5963 }
5964
5965 /* tp->lock is held. */
5966 static void tg3_stop_fw(struct tg3 *tp)
5967 {
5968         if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) &&
5969            !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)) {
5970                 /* Wait for RX cpu to ACK the previous event. */
5971                 tg3_wait_for_event_ack(tp);
5972
5973                 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
5974
5975                 tg3_generate_fw_event(tp);
5976
5977                 /* Wait for RX cpu to ACK this event. */
5978                 tg3_wait_for_event_ack(tp);
5979         }
5980 }
5981
5982 /* tp->lock is held. */
5983 static int tg3_halt(struct tg3 *tp, int kind, int silent)
5984 {
5985         int err;
5986
5987         tg3_stop_fw(tp);
5988
5989         tg3_write_sig_pre_reset(tp, kind);
5990
5991         tg3_abort_hw(tp, silent);
5992         err = tg3_chip_reset(tp);
5993
5994         tg3_write_sig_legacy(tp, kind);
5995         tg3_write_sig_post_reset(tp, kind);
5996
5997         if (err)
5998                 return err;
5999
6000         return 0;
6001 }
6002
6003 #define TG3_FW_RELEASE_MAJOR    0x0
6004 #define TG3_FW_RELASE_MINOR     0x0
6005 #define TG3_FW_RELEASE_FIX      0x0
6006 #define TG3_FW_START_ADDR       0x08000000
6007 #define TG3_FW_TEXT_ADDR        0x08000000
6008 #define TG3_FW_TEXT_LEN         0x9c0
6009 #define TG3_FW_RODATA_ADDR      0x080009c0
6010 #define TG3_FW_RODATA_LEN       0x60
6011 #define TG3_FW_DATA_ADDR        0x08000a40
6012 #define TG3_FW_DATA_LEN         0x20
6013 #define TG3_FW_SBSS_ADDR        0x08000a60
6014 #define TG3_FW_SBSS_LEN         0xc
6015 #define TG3_FW_BSS_ADDR         0x08000a70
6016 #define TG3_FW_BSS_LEN          0x10
6017
6018 static const u32 tg3FwText[(TG3_FW_TEXT_LEN / sizeof(u32)) + 1] = {
6019         0x00000000, 0x10000003, 0x00000000, 0x0000000d, 0x0000000d, 0x3c1d0800,
6020         0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100000, 0x0e000018, 0x00000000,
6021         0x0000000d, 0x3c1d0800, 0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100034,
6022         0x0e00021c, 0x00000000, 0x0000000d, 0x00000000, 0x00000000, 0x00000000,
6023         0x27bdffe0, 0x3c1cc000, 0xafbf0018, 0xaf80680c, 0x0e00004c, 0x241b2105,
6024         0x97850000, 0x97870002, 0x9782002c, 0x9783002e, 0x3c040800, 0x248409c0,
6025         0xafa00014, 0x00021400, 0x00621825, 0x00052c00, 0xafa30010, 0x8f860010,
6026         0x00e52825, 0x0e000060, 0x24070102, 0x3c02ac00, 0x34420100, 0x3c03ac01,
6027         0x34630100, 0xaf820490, 0x3c02ffff, 0xaf820494, 0xaf830498, 0xaf82049c,
6028         0x24020001, 0xaf825ce0, 0x0e00003f, 0xaf825d00, 0x0e000140, 0x00000000,
6029         0x8fbf0018, 0x03e00008, 0x27bd0020, 0x2402ffff, 0xaf825404, 0x8f835400,
6030         0x34630400, 0xaf835400, 0xaf825404, 0x3c020800, 0x24420034, 0xaf82541c,
6031         0x03e00008, 0xaf805400, 0x00000000, 0x00000000, 0x3c020800, 0x34423000,
6032         0x3c030800, 0x34633000, 0x3c040800, 0x348437ff, 0x3c010800, 0xac220a64,
6033         0x24020040, 0x3c010800, 0xac220a68, 0x3c010800, 0xac200a60, 0xac600000,
6034         0x24630004, 0x0083102b, 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000,
6035         0x00804821, 0x8faa0010, 0x3c020800, 0x8c420a60, 0x3c040800, 0x8c840a68,
6036         0x8fab0014, 0x24430001, 0x0044102b, 0x3c010800, 0xac230a60, 0x14400003,
6037         0x00004021, 0x3c010800, 0xac200a60, 0x3c020800, 0x8c420a60, 0x3c030800,
6038         0x8c630a64, 0x91240000, 0x00021140, 0x00431021, 0x00481021, 0x25080001,
6039         0xa0440000, 0x29020008, 0x1440fff4, 0x25290001, 0x3c020800, 0x8c420a60,
6040         0x3c030800, 0x8c630a64, 0x8f84680c, 0x00021140, 0x00431021, 0xac440008,
6041         0xac45000c, 0xac460010, 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c,
6042         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
6043         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
6044         0, 0, 0, 0, 0, 0,
6045         0x02000008, 0x00000000, 0x0a0001e3, 0x3c0a0001, 0x0a0001e3, 0x3c0a0002,
6046         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
6047         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
6048         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
6049         0x0a0001e3, 0x3c0a0007, 0x0a0001e3, 0x3c0a0008, 0x0a0001e3, 0x3c0a0009,
6050         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a000b,
6051         0x0a0001e3, 0x3c0a000c, 0x0a0001e3, 0x3c0a000d, 0x0a0001e3, 0x00000000,
6052         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a000e, 0x0a0001e3, 0x00000000,
6053         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
6054         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
6055         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a0013, 0x0a0001e3, 0x3c0a0014,
6056         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
6057         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
6058         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
6059         0x27bdffe0, 0x00001821, 0x00001021, 0xafbf0018, 0xafb10014, 0xafb00010,
6060         0x3c010800, 0x00220821, 0xac200a70, 0x3c010800, 0x00220821, 0xac200a74,
6061         0x3c010800, 0x00220821, 0xac200a78, 0x24630001, 0x1860fff5, 0x2442000c,
6062         0x24110001, 0x8f906810, 0x32020004, 0x14400005, 0x24040001, 0x3c020800,
6063         0x8c420a78, 0x18400003, 0x00002021, 0x0e000182, 0x00000000, 0x32020001,
6064         0x10400003, 0x00000000, 0x0e000169, 0x00000000, 0x0a000153, 0xaf915028,
6065         0x8fbf0018, 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0020, 0x3c050800,
6066         0x8ca50a70, 0x3c060800, 0x8cc60a80, 0x3c070800, 0x8ce70a78, 0x27bdffe0,
6067         0x3c040800, 0x248409d0, 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014,
6068         0x0e00017b, 0x00002021, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x24020001,
6069         0x8f836810, 0x00821004, 0x00021027, 0x00621824, 0x03e00008, 0xaf836810,
6070         0x27bdffd8, 0xafbf0024, 0x1080002e, 0xafb00020, 0x8f825cec, 0xafa20018,
6071         0x8f825cec, 0x3c100800, 0x26100a78, 0xafa2001c, 0x34028000, 0xaf825cec,
6072         0x8e020000, 0x18400016, 0x00000000, 0x3c020800, 0x94420a74, 0x8fa3001c,
6073         0x000221c0, 0xac830004, 0x8fa2001c, 0x3c010800, 0x0e000201, 0xac220a74,
6074         0x10400005, 0x00000000, 0x8e020000, 0x24420001, 0x0a0001df, 0xae020000,
6075         0x3c020800, 0x8c420a70, 0x00021c02, 0x000321c0, 0x0a0001c5, 0xafa2001c,
6076         0x0e000201, 0x00000000, 0x1040001f, 0x00000000, 0x8e020000, 0x8fa3001c,
6077         0x24420001, 0x3c010800, 0xac230a70, 0x3c010800, 0xac230a74, 0x0a0001df,
6078         0xae020000, 0x3c100800, 0x26100a78, 0x8e020000, 0x18400028, 0x00000000,
6079         0x0e000201, 0x00000000, 0x14400024, 0x00000000, 0x8e020000, 0x3c030800,
6080         0x8c630a70, 0x2442ffff, 0xafa3001c, 0x18400006, 0xae020000, 0x00031402,
6081         0x000221c0, 0x8c820004, 0x3c010800, 0xac220a70, 0x97a2001e, 0x2442ff00,
6082         0x2c420300, 0x1440000b, 0x24024000, 0x3c040800, 0x248409dc, 0xafa00010,
6083         0xafa00014, 0x8fa6001c, 0x24050008, 0x0e000060, 0x00003821, 0x0a0001df,
6084         0x00000000, 0xaf825cf8, 0x3c020800, 0x8c420a40, 0x8fa3001c, 0x24420001,
6085         0xaf835cf8, 0x3c010800, 0xac220a40, 0x8fbf0024, 0x8fb00020, 0x03e00008,
6086         0x27bd0028, 0x27bdffe0, 0x3c040800, 0x248409e8, 0x00002821, 0x00003021,
6087         0x00003821, 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014, 0x8fbf0018,
6088         0x03e00008, 0x27bd0020, 0x8f82680c, 0x8f85680c, 0x00021827, 0x0003182b,
6089         0x00031823, 0x00431024, 0x00441021, 0x00a2282b, 0x10a00006, 0x00000000,
6090         0x00401821, 0x8f82680c, 0x0043102b, 0x1440fffd, 0x00000000, 0x03e00008,
6091         0x00000000, 0x3c040800, 0x8c840000, 0x3c030800, 0x8c630a40, 0x0064102b,
6092         0x54400002, 0x00831023, 0x00641023, 0x2c420008, 0x03e00008, 0x38420001,
6093         0x27bdffe0, 0x00802821, 0x3c040800, 0x24840a00, 0x00003021, 0x00003821,
6094         0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014, 0x0a000216, 0x00000000,
6095         0x8fbf0018, 0x03e00008, 0x27bd0020, 0x00000000, 0x27bdffe0, 0x3c1cc000,
6096         0xafbf0018, 0x0e00004c, 0xaf80680c, 0x3c040800, 0x24840a10, 0x03802821,
6097         0x00003021, 0x00003821, 0xafa00010, 0x0e000060, 0xafa00014, 0x2402ffff,
6098         0xaf825404, 0x3c0200aa, 0x0e000234, 0xaf825434, 0x8fbf0018, 0x03e00008,
6099         0x27bd0020, 0x00000000, 0x00000000, 0x00000000, 0x27bdffe8, 0xafb00010,
6100         0x24100001, 0xafbf0014, 0x3c01c003, 0xac200000, 0x8f826810, 0x30422000,
6101         0x10400003, 0x00000000, 0x0e000246, 0x00000000, 0x0a00023a, 0xaf905428,
6102         0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x27bdfff8, 0x8f845d0c,
6103         0x3c0200ff, 0x3c030800, 0x8c630a50, 0x3442fff8, 0x00821024, 0x1043001e,
6104         0x3c0500ff, 0x34a5fff8, 0x3c06c003, 0x3c074000, 0x00851824, 0x8c620010,
6105         0x3c010800, 0xac230a50, 0x30420008, 0x10400005, 0x00871025, 0x8cc20000,
6106         0x24420001, 0xacc20000, 0x00871025, 0xaf825d0c, 0x8fa20000, 0x24420001,
6107         0xafa20000, 0x8fa20000, 0x8fa20000, 0x24420001, 0xafa20000, 0x8fa20000,
6108         0x8f845d0c, 0x3c030800, 0x8c630a50, 0x00851024, 0x1443ffe8, 0x00851824,
6109         0x27bd0008, 0x03e00008, 0x00000000, 0x00000000, 0x00000000
6110 };
6111
6112 static const u32 tg3FwRodata[(TG3_FW_RODATA_LEN / sizeof(u32)) + 1] = {
6113         0x35373031, 0x726c7341, 0x00000000, 0x00000000, 0x53774576, 0x656e7430,
6114         0x00000000, 0x726c7045, 0x76656e74, 0x31000000, 0x556e6b6e, 0x45766e74,
6115         0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x66617461, 0x6c457272,
6116         0x00000000, 0x00000000, 0x4d61696e, 0x43707542, 0x00000000, 0x00000000,
6117         0x00000000
6118 };
6119
6120 #if 0 /* All zeros, don't eat up space with it. */
6121 u32 tg3FwData[(TG3_FW_DATA_LEN / sizeof(u32)) + 1] = {
6122         0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
6123         0x00000000, 0x00000000, 0x00000000, 0x00000000
6124 };
6125 #endif
6126
6127 #define RX_CPU_SCRATCH_BASE     0x30000
6128 #define RX_CPU_SCRATCH_SIZE     0x04000
6129 #define TX_CPU_SCRATCH_BASE     0x34000
6130 #define TX_CPU_SCRATCH_SIZE     0x04000
6131
6132 /* tp->lock is held. */
6133 static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
6134 {
6135         int i;
6136
6137         BUG_ON(offset == TX_CPU_BASE &&
6138             (tp->tg3_flags2 & TG3_FLG2_5705_PLUS));
6139
6140         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
6141                 u32 val = tr32(GRC_VCPU_EXT_CTRL);
6142
6143                 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
6144                 return 0;
6145         }
6146         if (offset == RX_CPU_BASE) {
6147                 for (i = 0; i < 10000; i++) {
6148                         tw32(offset + CPU_STATE, 0xffffffff);
6149                         tw32(offset + CPU_MODE,  CPU_MODE_HALT);
6150                         if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
6151                                 break;
6152                 }
6153
6154                 tw32(offset + CPU_STATE, 0xffffffff);
6155                 tw32_f(offset + CPU_MODE,  CPU_MODE_HALT);
6156                 udelay(10);
6157         } else {
6158                 for (i = 0; i < 10000; i++) {
6159                         tw32(offset + CPU_STATE, 0xffffffff);
6160                         tw32(offset + CPU_MODE,  CPU_MODE_HALT);
6161                         if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
6162                                 break;
6163                 }
6164         }
6165
6166         if (i >= 10000) {
6167                 printk(KERN_ERR PFX "tg3_reset_cpu timed out for %s, "
6168                        "and %s CPU\n",
6169                        tp->dev->name,
6170                        (offset == RX_CPU_BASE ? "RX" : "TX"));
6171                 return -ENODEV;
6172         }
6173
6174         /* Clear firmware's nvram arbitration. */
6175         if (tp->tg3_flags & TG3_FLAG_NVRAM)
6176                 tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
6177         return 0;
6178 }
6179
6180 struct fw_info {
6181         unsigned int text_base;
6182         unsigned int text_len;
6183         const u32 *text_data;
6184         unsigned int rodata_base;
6185         unsigned int rodata_len;
6186         const u32 *rodata_data;
6187         unsigned int data_base;
6188         unsigned int data_len;
6189         const u32 *data_data;
6190 };
6191
6192 /* tp->lock is held. */
6193 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base, u32 cpu_scratch_base,
6194                                  int cpu_scratch_size, struct fw_info *info)
6195 {
6196         int err, lock_err, i;
6197         void (*write_op)(struct tg3 *, u32, u32);
6198
6199         if (cpu_base == TX_CPU_BASE &&
6200             (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6201                 printk(KERN_ERR PFX "tg3_load_firmware_cpu: Trying to load "
6202                        "TX cpu firmware on %s which is 5705.\n",
6203                        tp->dev->name);
6204                 return -EINVAL;
6205         }
6206
6207         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
6208                 write_op = tg3_write_mem;
6209         else
6210                 write_op = tg3_write_indirect_reg32;
6211
6212         /* It is possible that bootcode is still loading at this point.
6213          * Get the nvram lock first before halting the cpu.
6214          */
6215         lock_err = tg3_nvram_lock(tp);
6216         err = tg3_halt_cpu(tp, cpu_base);
6217         if (!lock_err)
6218                 tg3_nvram_unlock(tp);
6219         if (err)
6220                 goto out;
6221
6222         for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
6223                 write_op(tp, cpu_scratch_base + i, 0);
6224         tw32(cpu_base + CPU_STATE, 0xffffffff);
6225         tw32(cpu_base + CPU_MODE, tr32(cpu_base+CPU_MODE)|CPU_MODE_HALT);
6226         for (i = 0; i < (info->text_len / sizeof(u32)); i++)
6227                 write_op(tp, (cpu_scratch_base +
6228                               (info->text_base & 0xffff) +
6229                               (i * sizeof(u32))),
6230                          (info->text_data ?
6231                           info->text_data[i] : 0));
6232         for (i = 0; i < (info->rodata_len / sizeof(u32)); i++)
6233                 write_op(tp, (cpu_scratch_base +
6234                               (info->rodata_base & 0xffff) +
6235                               (i * sizeof(u32))),
6236                          (info->rodata_data ?
6237                           info->rodata_data[i] : 0));
6238         for (i = 0; i < (info->data_len / sizeof(u32)); i++)
6239                 write_op(tp, (cpu_scratch_base +
6240                               (info->data_base & 0xffff) +
6241                               (i * sizeof(u32))),
6242                          (info->data_data ?
6243                           info->data_data[i] : 0));
6244
6245         err = 0;
6246
6247 out:
6248         return err;
6249 }
6250
6251 /* tp->lock is held. */
6252 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
6253 {
6254         struct fw_info info;
6255         int err, i;
6256
6257         info.text_base = TG3_FW_TEXT_ADDR;
6258         info.text_len = TG3_FW_TEXT_LEN;
6259         info.text_data = &tg3FwText[0];
6260         info.rodata_base = TG3_FW_RODATA_ADDR;
6261         info.rodata_len = TG3_FW_RODATA_LEN;
6262         info.rodata_data = &tg3FwRodata[0];
6263         info.data_base = TG3_FW_DATA_ADDR;
6264         info.data_len = TG3_FW_DATA_LEN;
6265         info.data_data = NULL;
6266
6267         err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
6268                                     RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
6269                                     &info);
6270         if (err)
6271                 return err;
6272
6273         err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
6274                                     TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
6275                                     &info);
6276         if (err)
6277                 return err;
6278
6279         /* Now startup only the RX cpu. */
6280         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
6281         tw32_f(RX_CPU_BASE + CPU_PC,    TG3_FW_TEXT_ADDR);
6282
6283         for (i = 0; i < 5; i++) {
6284                 if (tr32(RX_CPU_BASE + CPU_PC) == TG3_FW_TEXT_ADDR)
6285                         break;
6286                 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
6287                 tw32(RX_CPU_BASE + CPU_MODE,  CPU_MODE_HALT);
6288                 tw32_f(RX_CPU_BASE + CPU_PC,    TG3_FW_TEXT_ADDR);
6289                 udelay(1000);
6290         }
6291         if (i >= 5) {
6292                 printk(KERN_ERR PFX "tg3_load_firmware fails for %s "
6293                        "to set RX CPU PC, is %08x should be %08x\n",
6294                        tp->dev->name, tr32(RX_CPU_BASE + CPU_PC),
6295                        TG3_FW_TEXT_ADDR);
6296                 return -ENODEV;
6297         }
6298         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
6299         tw32_f(RX_CPU_BASE + CPU_MODE,  0x00000000);
6300
6301         return 0;
6302 }
6303
6304
6305 #define TG3_TSO_FW_RELEASE_MAJOR        0x1
6306 #define TG3_TSO_FW_RELASE_MINOR         0x6
6307 #define TG3_TSO_FW_RELEASE_FIX          0x0
6308 #define TG3_TSO_FW_START_ADDR           0x08000000
6309 #define TG3_TSO_FW_TEXT_ADDR            0x08000000
6310 #define TG3_TSO_FW_TEXT_LEN             0x1aa0
6311 #define TG3_TSO_FW_RODATA_ADDR          0x08001aa0
6312 #define TG3_TSO_FW_RODATA_LEN           0x60
6313 #define TG3_TSO_FW_DATA_ADDR            0x08001b20
6314 #define TG3_TSO_FW_DATA_LEN             0x30
6315 #define TG3_TSO_FW_SBSS_ADDR            0x08001b50
6316 #define TG3_TSO_FW_SBSS_LEN             0x2c
6317 #define TG3_TSO_FW_BSS_ADDR             0x08001b80
6318 #define TG3_TSO_FW_BSS_LEN              0x894
6319
6320 static const u32 tg3TsoFwText[(TG3_TSO_FW_TEXT_LEN / 4) + 1] = {
6321         0x0e000003, 0x00000000, 0x08001b24, 0x00000000, 0x10000003, 0x00000000,
6322         0x0000000d, 0x0000000d, 0x3c1d0800, 0x37bd4000, 0x03a0f021, 0x3c100800,
6323         0x26100000, 0x0e000010, 0x00000000, 0x0000000d, 0x27bdffe0, 0x3c04fefe,
6324         0xafbf0018, 0x0e0005d8, 0x34840002, 0x0e000668, 0x00000000, 0x3c030800,
6325         0x90631b68, 0x24020002, 0x3c040800, 0x24841aac, 0x14620003, 0x24050001,
6326         0x3c040800, 0x24841aa0, 0x24060006, 0x00003821, 0xafa00010, 0x0e00067c,
6327         0xafa00014, 0x8f625c50, 0x34420001, 0xaf625c50, 0x8f625c90, 0x34420001,
6328         0xaf625c90, 0x2402ffff, 0x0e000034, 0xaf625404, 0x8fbf0018, 0x03e00008,
6329         0x27bd0020, 0x00000000, 0x00000000, 0x00000000, 0x27bdffe0, 0xafbf001c,
6330         0xafb20018, 0xafb10014, 0x0e00005b, 0xafb00010, 0x24120002, 0x24110001,
6331         0x8f706820, 0x32020100, 0x10400003, 0x00000000, 0x0e0000bb, 0x00000000,
6332         0x8f706820, 0x32022000, 0x10400004, 0x32020001, 0x0e0001f0, 0x24040001,
6333         0x32020001, 0x10400003, 0x00000000, 0x0e0000a3, 0x00000000, 0x3c020800,
6334         0x90421b98, 0x14520003, 0x00000000, 0x0e0004c0, 0x00000000, 0x0a00003c,
6335         0xaf715028, 0x8fbf001c, 0x8fb20018, 0x8fb10014, 0x8fb00010, 0x03e00008,
6336         0x27bd0020, 0x27bdffe0, 0x3c040800, 0x24841ac0, 0x00002821, 0x00003021,
6337         0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014, 0x3c040800,
6338         0x248423d8, 0xa4800000, 0x3c010800, 0xa0201b98, 0x3c010800, 0xac201b9c,
6339         0x3c010800, 0xac201ba0, 0x3c010800, 0xac201ba4, 0x3c010800, 0xac201bac,
6340         0x3c010800, 0xac201bb8, 0x3c010800, 0xac201bbc, 0x8f624434, 0x3c010800,
6341         0xac221b88, 0x8f624438, 0x3c010800, 0xac221b8c, 0x8f624410, 0xac80f7a8,
6342         0x3c010800, 0xac201b84, 0x3c010800, 0xac2023e0, 0x3c010800, 0xac2023c8,
6343         0x3c010800, 0xac2023cc, 0x3c010800, 0xac202400, 0x3c010800, 0xac221b90,
6344         0x8f620068, 0x24030007, 0x00021702, 0x10430005, 0x00000000, 0x8f620068,
6345         0x00021702, 0x14400004, 0x24020001, 0x3c010800, 0x0a000097, 0xac20240c,
6346         0xac820034, 0x3c040800, 0x24841acc, 0x3c050800, 0x8ca5240c, 0x00003021,
6347         0x00003821, 0xafa00010, 0x0e00067c, 0xafa00014, 0x8fbf0018, 0x03e00008,
6348         0x27bd0020, 0x27bdffe0, 0x3c040800, 0x24841ad8, 0x00002821, 0x00003021,
6349         0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014, 0x0e00005b,
6350         0x00000000, 0x0e0000b4, 0x00002021, 0x8fbf0018, 0x03e00008, 0x27bd0020,
6351         0x24020001, 0x8f636820, 0x00821004, 0x00021027, 0x00621824, 0x03e00008,
6352         0xaf636820, 0x27bdffd0, 0xafbf002c, 0xafb60028, 0xafb50024, 0xafb40020,
6353         0xafb3001c, 0xafb20018, 0xafb10014, 0xafb00010, 0x8f675c5c, 0x3c030800,
6354         0x24631bbc, 0x8c620000, 0x14470005, 0x3c0200ff, 0x3c020800, 0x90421b98,
6355         0x14400119, 0x3c0200ff, 0x3442fff8, 0x00e28824, 0xac670000, 0x00111902,
6356         0x306300ff, 0x30e20003, 0x000211c0, 0x00622825, 0x00a04021, 0x00071602,
6357         0x3c030800, 0x90631b98, 0x3044000f, 0x14600036, 0x00804821, 0x24020001,
6358         0x3c010800, 0xa0221b98, 0x00051100, 0x00821025, 0x3c010800, 0xac201b9c,
6359         0x3c010800, 0xac201ba0, 0x3c010800, 0xac201ba4, 0x3c010800, 0xac201bac,
6360         0x3c010800, 0xac201bb8, 0x3c010800, 0xac201bb0, 0x3c010800, 0xac201bb4,
6361         0x3c010800, 0xa42223d8, 0x9622000c, 0x30437fff, 0x3c010800, 0xa4222410,
6362         0x30428000, 0x3c010800, 0xa4231bc6, 0x10400005, 0x24020001, 0x3c010800,
6363         0xac2223f4, 0x0a000102, 0x2406003e, 0x24060036, 0x3c010800, 0xac2023f4,
6364         0x9622000a, 0x3c030800, 0x94631bc6, 0x3c010800, 0xac2023f0, 0x3c010800,
6365         0xac2023f8, 0x00021302, 0x00021080, 0x00c21021, 0x00621821, 0x3c010800,
6366         0xa42223d0, 0x3c010800, 0x0a000115, 0xa4231b96, 0x9622000c, 0x3c010800,
6367         0xa42223ec, 0x3c040800, 0x24841b9c, 0x8c820000, 0x00021100, 0x3c010800,
6368         0x00220821, 0xac311bc8, 0x8c820000, 0x00021100, 0x3c010800, 0x00220821,
6369         0xac271bcc, 0x8c820000, 0x25030001, 0x306601ff, 0x00021100, 0x3c010800,
6370         0x00220821, 0xac261bd0, 0x8c820000, 0x00021100, 0x3c010800, 0x00220821,
6371         0xac291bd4, 0x96230008, 0x3c020800, 0x8c421bac, 0x00432821, 0x3c010800,
6372         0xac251bac, 0x9622000a, 0x30420004, 0x14400018, 0x00061100, 0x8f630c14,
6373         0x3063000f, 0x2c620002, 0x1440000b, 0x3c02c000, 0x8f630c14, 0x3c020800,
6374         0x8c421b40, 0x3063000f, 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002,
6375         0x1040fff7, 0x3c02c000, 0x00e21825, 0xaf635c5c, 0x8f625c50, 0x30420002,
6376         0x10400014, 0x00000000, 0x0a000147, 0x00000000, 0x3c030800, 0x8c631b80,
6377         0x3c040800, 0x94841b94, 0x01221025, 0x3c010800, 0xa42223da, 0x24020001,
6378         0x3c010800, 0xac221bb8, 0x24630001, 0x0085202a, 0x3c010800, 0x10800003,
6379         0xac231b80, 0x3c010800, 0xa4251b94, 0x3c060800, 0x24c61b9c, 0x8cc20000,
6380         0x24420001, 0xacc20000, 0x28420080, 0x14400005, 0x00000000, 0x0e000656,
6381         0x24040002, 0x0a0001e6, 0x00000000, 0x3c020800, 0x8c421bb8, 0x10400078,
6382         0x24020001, 0x3c050800, 0x90a51b98, 0x14a20072, 0x00000000, 0x3c150800,
6383         0x96b51b96, 0x3c040800, 0x8c841bac, 0x32a3ffff, 0x0083102a, 0x1440006c,
6384         0x00000000, 0x14830003, 0x00000000, 0x3c010800, 0xac2523f0, 0x1060005c,
6385         0x00009021, 0x24d60004, 0x0060a021, 0x24d30014, 0x8ec20000, 0x00028100,
6386         0x3c110800, 0x02308821, 0x0e000625, 0x8e311bc8, 0x00402821, 0x10a00054,
6387         0x00000000, 0x9628000a, 0x31020040, 0x10400005, 0x2407180c, 0x8e22000c,
6388         0x2407188c, 0x00021400, 0xaca20018, 0x3c030800, 0x00701821, 0x8c631bd0,
6389         0x3c020800, 0x00501021, 0x8c421bd4, 0x00031d00, 0x00021400, 0x00621825,
6390         0xaca30014, 0x8ec30004, 0x96220008, 0x00432023, 0x3242ffff, 0x3083ffff,
6391         0x00431021, 0x0282102a, 0x14400002, 0x02b23023, 0x00803021, 0x8e620000,
6392         0x30c4ffff, 0x00441021, 0xae620000, 0x8e220000, 0xaca20000, 0x8e220004,
6393         0x8e63fff4, 0x00431021, 0xaca20004, 0xa4a6000e, 0x8e62fff4, 0x00441021,
6394         0xae62fff4, 0x96230008, 0x0043102a, 0x14400005, 0x02469021, 0x8e62fff0,
6395         0xae60fff4, 0x24420001, 0xae62fff0, 0xaca00008, 0x3242ffff, 0x14540008,
6396         0x24020305, 0x31020080, 0x54400001, 0x34e70010, 0x24020905, 0xa4a2000c,
6397         0x0a0001cb, 0x34e70020, 0xa4a2000c, 0x3c020800, 0x8c4223f0, 0x10400003,
6398         0x3c024b65, 0x0a0001d3, 0x34427654, 0x3c02b49a, 0x344289ab, 0xaca2001c,
6399         0x30e2ffff, 0xaca20010, 0x0e0005a2, 0x00a02021, 0x3242ffff, 0x0054102b,
6400         0x1440ffa9, 0x00000000, 0x24020002, 0x3c010800, 0x0a0001e6, 0xa0221b98,
6401         0x8ec2083c, 0x24420001, 0x0a0001e6, 0xaec2083c, 0x0e0004c0, 0x00000000,
6402         0x8fbf002c, 0x8fb60028, 0x8fb50024, 0x8fb40020, 0x8fb3001c, 0x8fb20018,
6403         0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0030, 0x27bdffd0, 0xafbf0028,
6404         0xafb30024, 0xafb20020, 0xafb1001c, 0xafb00018, 0x8f725c9c, 0x3c0200ff,
6405         0x3442fff8, 0x3c070800, 0x24e71bb4, 0x02428824, 0x9623000e, 0x8ce20000,
6406         0x00431021, 0xace20000, 0x8e220010, 0x30420020, 0x14400011, 0x00809821,
6407         0x0e00063b, 0x02202021, 0x3c02c000, 0x02421825, 0xaf635c9c, 0x8f625c90,
6408         0x30420002, 0x1040011e, 0x00000000, 0xaf635c9c, 0x8f625c90, 0x30420002,
6409         0x10400119, 0x00000000, 0x0a00020d, 0x00000000, 0x8e240008, 0x8e230014,
6410         0x00041402, 0x000231c0, 0x00031502, 0x304201ff, 0x2442ffff, 0x3042007f,
6411         0x00031942, 0x30637800, 0x00021100, 0x24424000, 0x00624821, 0x9522000a,
6412         0x3084ffff, 0x30420008, 0x104000b0, 0x000429c0, 0x3c020800, 0x8c422400,
6413         0x14400024, 0x24c50008, 0x94c20014, 0x3c010800, 0xa42223d0, 0x8cc40010,
6414         0x00041402, 0x3c010800, 0xa42223d2, 0x3c010800, 0xa42423d4, 0x94c2000e,
6415         0x3083ffff, 0x00431023, 0x3c010800, 0xac222408, 0x94c2001a, 0x3c010800,
6416         0xac262400, 0x3c010800, 0xac322404, 0x3c010800, 0xac2223fc, 0x3c02c000,
6417         0x02421825, 0xaf635c9c, 0x8f625c90, 0x30420002, 0x104000e5, 0x00000000,
6418         0xaf635c9c, 0x8f625c90, 0x30420002, 0x104000e0, 0x00000000, 0x0a000246,
6419         0x00000000, 0x94c2000e, 0x3c030800, 0x946323d4, 0x00434023, 0x3103ffff,
6420         0x2c620008, 0x1040001c, 0x00000000, 0x94c20014, 0x24420028, 0x00a22821,
6421         0x00031042, 0x1840000b, 0x00002021, 0x24e60848, 0x00403821, 0x94a30000,
6422         0x8cc20000, 0x24840001, 0x00431021, 0xacc20000, 0x0087102a, 0x1440fff9,
6423         0x24a50002, 0x31020001, 0x1040001f, 0x3c024000, 0x3c040800, 0x248423fc,
6424         0xa0a00001, 0x94a30000, 0x8c820000, 0x00431021, 0x0a000285, 0xac820000,
6425         0x8f626800, 0x3c030010, 0x00431024, 0x10400009, 0x00000000, 0x94c2001a,
6426         0x3c030800, 0x8c6323fc, 0x00431021, 0x3c010800, 0xac2223fc, 0x0a000286,
6427         0x3c024000, 0x94c2001a, 0x94c4001c, 0x3c030800, 0x8c6323fc, 0x00441023,
6428         0x00621821, 0x3c010800, 0xac2323fc, 0x3c024000, 0x02421825, 0xaf635c9c,
6429         0x8f625c90, 0x30420002, 0x1440fffc, 0x00000000, 0x9522000a, 0x30420010,
6430         0x1040009b, 0x00000000, 0x3c030800, 0x946323d4, 0x3c070800, 0x24e72400,
6431         0x8ce40000, 0x8f626800, 0x24630030, 0x00832821, 0x3c030010, 0x00431024,
6432         0x1440000a, 0x00000000, 0x94a20004, 0x3c040800, 0x8c842408, 0x3c030800,
6433         0x8c6323fc, 0x00441023, 0x00621821, 0x3c010800, 0xac2323fc, 0x3c040800,
6434         0x8c8423fc, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402, 0x00822021,
6435         0x00041027, 0xa4a20006, 0x3c030800, 0x8c632404, 0x3c0200ff, 0x3442fff8,
6436         0x00628824, 0x96220008, 0x24050001, 0x24034000, 0x000231c0, 0x00801021,
6437         0xa4c2001a, 0xa4c0001c, 0xace00000, 0x3c010800, 0xac251b60, 0xaf635cb8,
6438         0x8f625cb0, 0x30420002, 0x10400003, 0x00000000, 0x3c010800, 0xac201b60,
6439         0x8e220008, 0xaf625cb8, 0x8f625cb0, 0x30420002, 0x10400003, 0x00000000,
6440         0x3c010800, 0xac201b60, 0x3c020800, 0x8c421b60, 0x1040ffec, 0x00000000,
6441         0x3c040800, 0x0e00063b, 0x8c842404, 0x0a00032a, 0x00000000, 0x3c030800,
6442         0x90631b98, 0x24020002, 0x14620003, 0x3c034b65, 0x0a0002e1, 0x00008021,
6443         0x8e22001c, 0x34637654, 0x10430002, 0x24100002, 0x24100001, 0x00c02021,
6444         0x0e000350, 0x02003021, 0x24020003, 0x3c010800, 0xa0221b98, 0x24020002,
6445         0x1202000a, 0x24020001, 0x3c030800, 0x8c6323f0, 0x10620006, 0x00000000,
6446         0x3c020800, 0x944223d8, 0x00021400, 0x0a00031f, 0xae220014, 0x3c040800,
6447         0x248423da, 0x94820000, 0x00021400, 0xae220014, 0x3c020800, 0x8c421bbc,
6448         0x3c03c000, 0x3c010800, 0xa0201b98, 0x00431025, 0xaf625c5c, 0x8f625c50,
6449         0x30420002, 0x10400009, 0x00000000, 0x2484f7e2, 0x8c820000, 0x00431025,
6450         0xaf625c5c, 0x8f625c50, 0x30420002, 0x1440fffa, 0x00000000, 0x3c020800,
6451         0x24421b84, 0x8c430000, 0x24630001, 0xac430000, 0x8f630c14, 0x3063000f,
6452         0x2c620002, 0x1440000c, 0x3c024000, 0x8f630c14, 0x3c020800, 0x8c421b40,
6453         0x3063000f, 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002, 0x1040fff7,
6454         0x00000000, 0x3c024000, 0x02421825, 0xaf635c9c, 0x8f625c90, 0x30420002,
6455         0x1440fffc, 0x00000000, 0x12600003, 0x00000000, 0x0e0004c0, 0x00000000,
6456         0x8fbf0028, 0x8fb30024, 0x8fb20020, 0x8fb1001c, 0x8fb00018, 0x03e00008,
6457         0x27bd0030, 0x8f634450, 0x3c040800, 0x24841b88, 0x8c820000, 0x00031c02,
6458         0x0043102b, 0x14400007, 0x3c038000, 0x8c840004, 0x8f624450, 0x00021c02,
6459         0x0083102b, 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024,
6460         0x1440fffd, 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3c024000,
6461         0x00822025, 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00000000,
6462         0x03e00008, 0x00000000, 0x27bdffe0, 0x00805821, 0x14c00011, 0x256e0008,
6463         0x3c020800, 0x8c4223f4, 0x10400007, 0x24020016, 0x3c010800, 0xa42223d2,
6464         0x2402002a, 0x3c010800, 0x0a000364, 0xa42223d4, 0x8d670010, 0x00071402,
6465         0x3c010800, 0xa42223d2, 0x3c010800, 0xa42723d4, 0x3c040800, 0x948423d4,
6466         0x3c030800, 0x946323d2, 0x95cf0006, 0x3c020800, 0x944223d0, 0x00832023,
6467         0x01e2c023, 0x3065ffff, 0x24a20028, 0x01c24821, 0x3082ffff, 0x14c0001a,
6468         0x01226021, 0x9582000c, 0x3042003f, 0x3c010800, 0xa42223d6, 0x95820004,
6469         0x95830006, 0x3c010800, 0xac2023e4, 0x3c010800, 0xac2023e8, 0x00021400,
6470         0x00431025, 0x3c010800, 0xac221bc0, 0x95220004, 0x3c010800, 0xa4221bc4,
6471         0x95230002, 0x01e51023, 0x0043102a, 0x10400010, 0x24020001, 0x3c010800,
6472         0x0a000398, 0xac2223f8, 0x3c030800, 0x8c6323e8, 0x3c020800, 0x94421bc4,
6473         0x00431021, 0xa5220004, 0x3c020800, 0x94421bc0, 0xa5820004, 0x3c020800,
6474         0x8c421bc0, 0xa5820006, 0x3c020800, 0x8c4223f0, 0x3c0d0800, 0x8dad23e4,
6475         0x3c0a0800, 0x144000e5, 0x8d4a23e8, 0x3c020800, 0x94421bc4, 0x004a1821,
6476         0x3063ffff, 0x0062182b, 0x24020002, 0x10c2000d, 0x01435023, 0x3c020800,
6477         0x944223d6, 0x30420009, 0x10400008, 0x00000000, 0x9582000c, 0x3042fff6,
6478         0xa582000c, 0x3c020800, 0x944223d6, 0x30420009, 0x01a26823, 0x3c020800,
6479         0x8c4223f8, 0x1040004a, 0x01203821, 0x3c020800, 0x944223d2, 0x00004021,
6480         0xa520000a, 0x01e21023, 0xa5220002, 0x3082ffff, 0x00021042, 0x18400008,
6481         0x00003021, 0x00401821, 0x94e20000, 0x25080001, 0x00c23021, 0x0103102a,
6482         0x1440fffb, 0x24e70002, 0x00061c02, 0x30c2ffff, 0x00623021, 0x00061402,
6483         0x00c23021, 0x00c02821, 0x00061027, 0xa522000a, 0x00003021, 0x2527000c,
6484         0x00004021, 0x94e20000, 0x25080001, 0x00c23021, 0x2d020004, 0x1440fffb,
6485         0x24e70002, 0x95220002, 0x00004021, 0x91230009, 0x00442023, 0x01803821,
6486         0x3082ffff, 0xa4e00010, 0x00621821, 0x00021042, 0x18400010, 0x00c33021,
6487         0x00404821, 0x94e20000, 0x24e70002, 0x00c23021, 0x30e2007f, 0x14400006,
6488         0x25080001, 0x8d630000, 0x3c02007f, 0x3442ff80, 0x00625824, 0x25670008,
6489         0x0109102a, 0x1440fff3, 0x00000000, 0x30820001, 0x10400005, 0x00061c02,
6490         0xa0e00001, 0x94e20000, 0x00c23021, 0x00061c02, 0x30c2ffff, 0x00623021,
6491         0x00061402, 0x00c23021, 0x0a00047d, 0x30c6ffff, 0x24020002, 0x14c20081,
6492         0x00000000, 0x3c020800, 0x8c42240c, 0x14400007, 0x00000000, 0x3c020800,
6493         0x944223d2, 0x95230002, 0x01e21023, 0x10620077, 0x00000000, 0x3c020800,
6494         0x944223d2, 0x01e21023, 0xa5220002, 0x3c020800, 0x8c42240c, 0x1040001a,
6495         0x31e3ffff, 0x8dc70010, 0x3c020800, 0x94421b96, 0x00e04021, 0x00072c02,
6496         0x00aa2021, 0x00431023, 0x00823823, 0x00072402, 0x30e2ffff, 0x00823821,
6497         0x00071027, 0xa522000a, 0x3102ffff, 0x3c040800, 0x948423d4, 0x00453023,
6498         0x00e02821, 0x00641823, 0x006d1821, 0x00c33021, 0x00061c02, 0x30c2ffff,
6499         0x0a00047d, 0x00623021, 0x01203821, 0x00004021, 0x3082ffff, 0x00021042,
6500         0x18400008, 0x00003021, 0x00401821, 0x94e20000, 0x25080001, 0x00c23021,
6501         0x0103102a, 0x1440fffb, 0x24e70002, 0x00061c02, 0x30c2ffff, 0x00623021,
6502         0x00061402, 0x00c23021, 0x00c02821, 0x00061027, 0xa522000a, 0x00003021,
6503         0x2527000c, 0x00004021, 0x94e20000, 0x25080001, 0x00c23021, 0x2d020004,
6504         0x1440fffb, 0x24e70002, 0x95220002, 0x00004021, 0x91230009, 0x00442023,
6505         0x01803821, 0x3082ffff, 0xa4e00010, 0x3c040800, 0x948423d4, 0x00621821,
6506         0x00c33021, 0x00061c02, 0x30c2ffff, 0x00623021, 0x00061c02, 0x3c020800,
6507         0x944223d0, 0x00c34821, 0x00441023, 0x00021fc2, 0x00431021, 0x00021043,
6508         0x18400010, 0x00003021, 0x00402021, 0x94e20000, 0x24e70002, 0x00c23021,
6509         0x30e2007f, 0x14400006, 0x25080001, 0x8d630000, 0x3c02007f, 0x3442ff80,
6510         0x00625824, 0x25670008, 0x0104102a, 0x1440fff3, 0x00000000, 0x3c020800,
6511         0x944223ec, 0x00c23021, 0x3122ffff, 0x00c23021, 0x00061c02, 0x30c2ffff,
6512         0x00623021, 0x00061402, 0x00c23021, 0x00c04021, 0x00061027, 0xa5820010,
6513         0xadc00014, 0x0a00049d, 0xadc00000, 0x8dc70010, 0x00e04021, 0x11400007,
6514         0x00072c02, 0x00aa3021, 0x00061402, 0x30c3ffff, 0x00433021, 0x00061402,
6515         0x00c22821, 0x00051027, 0xa522000a, 0x3c030800, 0x946323d4, 0x3102ffff,
6516         0x01e21021, 0x00433023, 0x00cd3021, 0x00061c02, 0x30c2ffff, 0x00623021,
6517         0x00061402, 0x00c23021, 0x00c04021, 0x00061027, 0xa5820010, 0x3102ffff,
6518         0x00051c00, 0x00431025, 0xadc20010, 0x3c020800, 0x8c4223f4, 0x10400005,
6519         0x2de205eb, 0x14400002, 0x25e2fff2, 0x34028870, 0xa5c20034, 0x3c030800,
6520         0x246323e8, 0x8c620000, 0x24420001, 0xac620000, 0x3c040800, 0x8c8423e4,
6521         0x3c020800, 0x8c421bc0, 0x3303ffff, 0x00832021, 0x00431821, 0x0062102b,
6522         0x3c010800, 0xac2423e4, 0x10400003, 0x2482ffff, 0x3c010800, 0xac2223e4,
6523         0x3c010800, 0xac231bc0, 0x03e00008, 0x27bd0020, 0x27bdffb8, 0x3c050800,
6524         0x24a51b96, 0xafbf0044, 0xafbe0040, 0xafb7003c, 0xafb60038, 0xafb50034,
6525         0xafb40030, 0xafb3002c, 0xafb20028, 0xafb10024, 0xafb00020, 0x94a90000,
6526         0x3c020800, 0x944223d0, 0x3c030800, 0x8c631bb0, 0x3c040800, 0x8c841bac,
6527         0x01221023, 0x0064182a, 0xa7a9001e, 0x106000be, 0xa7a20016, 0x24be0022,
6528         0x97b6001e, 0x24b3001a, 0x24b70016, 0x8fc20000, 0x14400008, 0x00000000,
6529         0x8fc2fff8, 0x97a30016, 0x8fc4fff4, 0x00431021, 0x0082202a, 0x148000b0,
6530         0x00000000, 0x97d50818, 0x32a2ffff, 0x104000a3, 0x00009021, 0x0040a021,
6531         0x00008821, 0x0e000625, 0x00000000, 0x00403021, 0x14c00007, 0x00000000,
6532         0x3c020800, 0x8c4223dc, 0x24420001, 0x3c010800, 0x0a000596, 0xac2223dc,
6533         0x3c100800, 0x02118021, 0x8e101bc8, 0x9608000a, 0x31020040, 0x10400005,
6534         0x2407180c, 0x8e02000c, 0x2407188c, 0x00021400, 0xacc20018, 0x31020080,
6535         0x54400001, 0x34e70010, 0x3c020800, 0x00511021, 0x8c421bd0, 0x3c030800,
6536         0x00711821, 0x8c631bd4, 0x00021500, 0x00031c00, 0x00431025, 0xacc20014,
6537         0x96040008, 0x3242ffff, 0x00821021, 0x0282102a, 0x14400002, 0x02b22823,
6538         0x00802821, 0x8e020000, 0x02459021, 0xacc20000, 0x8e020004, 0x00c02021,
6539         0x26310010, 0xac820004, 0x30e2ffff, 0xac800008, 0xa485000e, 0xac820010,
6540         0x24020305, 0x0e0005a2, 0xa482000c, 0x3242ffff, 0x0054102b, 0x1440ffc5,
6541         0x3242ffff, 0x0a00058e, 0x00000000, 0x8e620000, 0x8e63fffc, 0x0043102a,
6542         0x10400067, 0x00000000, 0x8e62fff0, 0x00028900, 0x3c100800, 0x02118021,
6543         0x0e000625, 0x8e101bc8, 0x00403021, 0x14c00005, 0x00000000, 0x8e62082c,
6544         0x24420001, 0x0a000596, 0xae62082c, 0x9608000a, 0x31020040, 0x10400005,
6545         0x2407180c, 0x8e02000c, 0x2407188c, 0x00021400, 0xacc20018, 0x3c020800,
6546         0x00511021, 0x8c421bd0, 0x3c030800, 0x00711821, 0x8c631bd4, 0x00021500,
6547         0x00031c00, 0x00431025, 0xacc20014, 0x8e63fff4, 0x96020008, 0x00432023,
6548         0x3242ffff, 0x3083ffff, 0x00431021, 0x02c2102a, 0x10400003, 0x00802821,
6549         0x97a9001e, 0x01322823, 0x8e620000, 0x30a4ffff, 0x00441021, 0xae620000,
6550         0xa4c5000e, 0x8e020000, 0xacc20000, 0x8e020004, 0x8e63fff4, 0x00431021,
6551         0xacc20004, 0x8e63fff4, 0x96020008, 0x00641821, 0x0062102a, 0x14400006,
6552         0x02459021, 0x8e62fff0, 0xae60fff4, 0x24420001, 0x0a000571, 0xae62fff0,
6553         0xae63fff4, 0xacc00008, 0x3242ffff, 0x10560003, 0x31020004, 0x10400006,
6554         0x24020305, 0x31020080, 0x54400001, 0x34e70010, 0x34e70020, 0x24020905,
6555         0xa4c2000c, 0x8ee30000, 0x8ee20004, 0x14620007, 0x3c02b49a, 0x8ee20860,
6556         0x54400001, 0x34e70400, 0x3c024b65, 0x0a000588, 0x34427654, 0x344289ab,
6557         0xacc2001c, 0x30e2ffff, 0xacc20010, 0x0e0005a2, 0x00c02021, 0x3242ffff,
6558         0x0056102b, 0x1440ff9b, 0x00000000, 0x8e620000, 0x8e63fffc, 0x0043102a,
6559         0x1440ff48, 0x00000000, 0x8fbf0044, 0x8fbe0040, 0x8fb7003c, 0x8fb60038,
6560         0x8fb50034, 0x8fb40030, 0x8fb3002c, 0x8fb20028, 0x8fb10024, 0x8fb00020,
6561         0x03e00008, 0x27bd0048, 0x27bdffe8, 0xafbf0014, 0xafb00010, 0x8f624450,
6562         0x8f634410, 0x0a0005b1, 0x00808021, 0x8f626820, 0x30422000, 0x10400003,
6563         0x00000000, 0x0e0001f0, 0x00002021, 0x8f624450, 0x8f634410, 0x3042ffff,
6564         0x0043102b, 0x1440fff5, 0x00000000, 0x8f630c14, 0x3063000f, 0x2c620002,
6565         0x1440000b, 0x00000000, 0x8f630c14, 0x3c020800, 0x8c421b40, 0x3063000f,
6566         0x24420001, 0x3c010800, 0xac221b40, 0x2c620002, 0x1040fff7, 0x00000000,
6567         0xaf705c18, 0x8f625c10, 0x30420002, 0x10400009, 0x00000000, 0x8f626820,
6568         0x30422000, 0x1040fff8, 0x00000000, 0x0e0001f0, 0x00002021, 0x0a0005c4,
6569         0x00000000, 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x00000000,
6570         0x00000000, 0x00000000, 0x27bdffe8, 0x3c1bc000, 0xafbf0014, 0xafb00010,
6571         0xaf60680c, 0x8f626804, 0x34420082, 0xaf626804, 0x8f634000, 0x24020b50,
6572         0x3c010800, 0xac221b54, 0x24020b78, 0x3c010800, 0xac221b64, 0x34630002,
6573         0xaf634000, 0x0e000605, 0x00808021, 0x3c010800, 0xa0221b68, 0x304200ff,
6574         0x24030002, 0x14430005, 0x00000000, 0x3c020800, 0x8c421b54, 0x0a0005f8,
6575         0xac5000c0, 0x3c020800, 0x8c421b54, 0xac5000bc, 0x8f624434, 0x8f634438,
6576         0x8f644410, 0x3c010800, 0xac221b5c, 0x3c010800, 0xac231b6c, 0x3c010800,
6577         0xac241b58, 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x3c040800,
6578         0x8c870000, 0x3c03aa55, 0x3463aa55, 0x3c06c003, 0xac830000, 0x8cc20000,
6579         0x14430007, 0x24050002, 0x3c0355aa, 0x346355aa, 0xac830000, 0x8cc20000,
6580         0x50430001, 0x24050001, 0x3c020800, 0xac470000, 0x03e00008, 0x00a01021,
6581         0x27bdfff8, 0x18800009, 0x00002821, 0x8f63680c, 0x8f62680c, 0x1043fffe,
6582         0x00000000, 0x24a50001, 0x00a4102a, 0x1440fff9, 0x00000000, 0x03e00008,
6583         0x27bd0008, 0x8f634450, 0x3c020800, 0x8c421b5c, 0x00031c02, 0x0043102b,
6584         0x14400008, 0x3c038000, 0x3c040800, 0x8c841b6c, 0x8f624450, 0x00021c02,
6585         0x0083102b, 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024,
6586         0x1440fffd, 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3082ffff,
6587         0x2442e000, 0x2c422001, 0x14400003, 0x3c024000, 0x0a000648, 0x2402ffff,
6588         0x00822025, 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00001021,
6589         0x03e00008, 0x00000000, 0x8f624450, 0x3c030800, 0x8c631b58, 0x0a000651,
6590         0x3042ffff, 0x8f624450, 0x3042ffff, 0x0043102b, 0x1440fffc, 0x00000000,
6591         0x03e00008, 0x00000000, 0x27bdffe0, 0x00802821, 0x3c040800, 0x24841af0,
6592         0x00003021, 0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014,
6593         0x0a000660, 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x00000000,
6594         0x00000000, 0x00000000, 0x3c020800, 0x34423000, 0x3c030800, 0x34633000,
6595         0x3c040800, 0x348437ff, 0x3c010800, 0xac221b74, 0x24020040, 0x3c010800,
6596         0xac221b78, 0x3c010800, 0xac201b70, 0xac600000, 0x24630004, 0x0083102b,
6597         0x5040fffd, 0xac600000, 0x03e00008, 0x00000000, 0x00804821, 0x8faa0010,
6598         0x3c020800, 0x8c421b70, 0x3c040800, 0x8c841b78, 0x8fab0014, 0x24430001,
6599         0x0044102b, 0x3c010800, 0xac231b70, 0x14400003, 0x00004021, 0x3c010800,
6600         0xac201b70, 0x3c020800, 0x8c421b70, 0x3c030800, 0x8c631b74, 0x91240000,
6601         0x00021140, 0x00431021, 0x00481021, 0x25080001, 0xa0440000, 0x29020008,
6602         0x1440fff4, 0x25290001, 0x3c020800, 0x8c421b70, 0x3c030800, 0x8c631b74,
6603         0x8f64680c, 0x00021140, 0x00431021, 0xac440008, 0xac45000c, 0xac460010,
6604         0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c, 0x00000000, 0x00000000,
6605 };
6606
6607 static const u32 tg3TsoFwRodata[] = {
6608         0x4d61696e, 0x43707542, 0x00000000, 0x4d61696e, 0x43707541, 0x00000000,
6609         0x00000000, 0x00000000, 0x73746b6f, 0x66666c64, 0x496e0000, 0x73746b6f,
6610         0x66662a2a, 0x00000000, 0x53774576, 0x656e7430, 0x00000000, 0x00000000,
6611         0x00000000, 0x00000000, 0x66617461, 0x6c457272, 0x00000000, 0x00000000,
6612         0x00000000,
6613 };
6614
6615 static const u32 tg3TsoFwData[] = {
6616         0x00000000, 0x73746b6f, 0x66666c64, 0x5f76312e, 0x362e3000, 0x00000000,
6617         0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
6618         0x00000000,
6619 };
6620
6621 /* 5705 needs a special version of the TSO firmware.  */
6622 #define TG3_TSO5_FW_RELEASE_MAJOR       0x1
6623 #define TG3_TSO5_FW_RELASE_MINOR        0x2
6624 #define TG3_TSO5_FW_RELEASE_FIX         0x0
6625 #define TG3_TSO5_FW_START_ADDR          0x00010000
6626 #define TG3_TSO5_FW_TEXT_ADDR           0x00010000
6627 #define TG3_TSO5_FW_TEXT_LEN            0xe90
6628 #define TG3_TSO5_FW_RODATA_ADDR         0x00010e90
6629 #define TG3_TSO5_FW_RODATA_LEN          0x50
6630 #define TG3_TSO5_FW_DATA_ADDR           0x00010f00
6631 #define TG3_TSO5_FW_DATA_LEN            0x20
6632 #define TG3_TSO5_FW_SBSS_ADDR           0x00010f20
6633 #define TG3_TSO5_FW_SBSS_LEN            0x28
6634 #define TG3_TSO5_FW_BSS_ADDR            0x00010f50
6635 #define TG3_TSO5_FW_BSS_LEN             0x88
6636
6637 static const u32 tg3Tso5FwText[(TG3_TSO5_FW_TEXT_LEN / 4) + 1] = {
6638         0x0c004003, 0x00000000, 0x00010f04, 0x00000000, 0x10000003, 0x00000000,
6639         0x0000000d, 0x0000000d, 0x3c1d0001, 0x37bde000, 0x03a0f021, 0x3c100001,
6640         0x26100000, 0x0c004010, 0x00000000, 0x0000000d, 0x27bdffe0, 0x3c04fefe,
6641         0xafbf0018, 0x0c0042e8, 0x34840002, 0x0c004364, 0x00000000, 0x3c030001,
6642         0x90630f34, 0x24020002, 0x3c040001, 0x24840e9c, 0x14620003, 0x24050001,
6643         0x3c040001, 0x24840e90, 0x24060002, 0x00003821, 0xafa00010, 0x0c004378,
6644         0xafa00014, 0x0c00402c, 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020,
6645         0x00000000, 0x00000000, 0x27bdffe0, 0xafbf001c, 0xafb20018, 0xafb10014,
6646         0x0c0042d4, 0xafb00010, 0x3c128000, 0x24110001, 0x8f706810, 0x32020400,
6647         0x10400007, 0x00000000, 0x8f641008, 0x00921024, 0x14400003, 0x00000000,
6648         0x0c004064, 0x00000000, 0x3c020001, 0x90420f56, 0x10510003, 0x32020200,
6649         0x1040fff1, 0x00000000, 0x0c0041b4, 0x00000000, 0x08004034, 0x00000000,
6650         0x8fbf001c, 0x8fb20018, 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0020,
6651         0x27bdffe0, 0x3c040001, 0x24840eb0, 0x00002821, 0x00003021, 0x00003821,
6652         0xafbf0018, 0xafa00010, 0x0c004378, 0xafa00014, 0x0000d021, 0x24020130,
6653         0xaf625000, 0x3c010001, 0xa4200f50, 0x3c010001, 0xa0200f57, 0x8fbf0018,
6654         0x03e00008, 0x27bd0020, 0x00000000, 0x00000000, 0x3c030001, 0x24630f60,
6655         0x90620000, 0x27bdfff0, 0x14400003, 0x0080c021, 0x08004073, 0x00004821,
6656         0x3c022000, 0x03021024, 0x10400003, 0x24090002, 0x08004073, 0xa0600000,
6657         0x24090001, 0x00181040, 0x30431f80, 0x346f8008, 0x1520004b, 0x25eb0028,
6658         0x3c040001, 0x00832021, 0x8c848010, 0x3c050001, 0x24a50f7a, 0x00041402,
6659         0xa0a20000, 0x3c010001, 0xa0240f7b, 0x3c020001, 0x00431021, 0x94428014,
6660         0x3c010001, 0xa0220f7c, 0x3c0c0001, 0x01836021, 0x8d8c8018, 0x304200ff,
6661         0x24420008, 0x000220c3, 0x24020001, 0x3c010001, 0xa0220f60, 0x0124102b,
6662         0x1040000c, 0x00003821, 0x24a6000e, 0x01602821, 0x8ca20000, 0x8ca30004,
6663         0x24a50008, 0x24e70001, 0xacc20000, 0xacc30004, 0x00e4102b, 0x1440fff8,
6664         0x24c60008, 0x00003821, 0x3c080001, 0x25080f7b, 0x91060000, 0x3c020001,
6665         0x90420f7c, 0x2503000d, 0x00c32821, 0x00461023, 0x00021fc2, 0x00431021,
6666         0x00021043, 0x1840000c, 0x00002021, 0x91020001, 0x00461023, 0x00021fc2,
6667         0x00431021, 0x00021843, 0x94a20000, 0x24e70001, 0x00822021, 0x00e3102a,
6668         0x1440fffb, 0x24a50002, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402,
6669         0x00822021, 0x3c02ffff, 0x01821024, 0x3083ffff, 0x00431025, 0x3c010001,
6670         0x080040fa, 0xac220f80, 0x3c050001, 0x24a50f7c, 0x90a20000, 0x3c0c0001,
6671         0x01836021, 0x8d8c8018, 0x000220c2, 0x1080000e, 0x00003821, 0x01603021,
6672         0x24a5000c, 0x8ca20000, 0x8ca30004, 0x24a50008, 0x24e70001, 0xacc20000,
6673         0xacc30004, 0x00e4102b, 0x1440fff8, 0x24c60008, 0x3c050001, 0x24a50f7c,
6674         0x90a20000, 0x30430007, 0x24020004, 0x10620011, 0x28620005, 0x10400005,
6675         0x24020002, 0x10620008, 0x000710c0, 0x080040fa, 0x00000000, 0x24020006,
6676         0x1062000e, 0x000710c0, 0x080040fa, 0x00000000, 0x00a21821, 0x9463000c,
6677         0x004b1021, 0x080040fa, 0xa4430000, 0x000710c0, 0x00a21821, 0x8c63000c,
6678         0x004b1021, 0x080040fa, 0xac430000, 0x00a21821, 0x8c63000c, 0x004b2021,
6679         0x00a21021, 0xac830000, 0x94420010, 0xa4820004, 0x95e70006, 0x3c020001,
6680         0x90420f7c, 0x3c030001, 0x90630f7a, 0x00e2c823, 0x3c020001, 0x90420f7b,
6681         0x24630028, 0x01e34021, 0x24420028, 0x15200012, 0x01e23021, 0x94c2000c,
6682         0x3c010001, 0xa4220f78, 0x94c20004, 0x94c30006, 0x3c010001, 0xa4200f76,
6683         0x3c010001, 0xa4200f72, 0x00021400, 0x00431025, 0x3c010001, 0xac220f6c,
6684         0x95020004, 0x3c010001, 0x08004124, 0xa4220f70, 0x3c020001, 0x94420f70,
6685         0x3c030001, 0x94630f72, 0x00431021, 0xa5020004, 0x3c020001, 0x94420f6c,
6686         0xa4c20004, 0x3c020001, 0x8c420f6c, 0xa4c20006, 0x3c040001, 0x94840f72,
6687         0x3c020001, 0x94420f70, 0x3c0a0001, 0x954a0f76, 0x00441821, 0x3063ffff,
6688         0x0062182a, 0x24020002, 0x1122000b, 0x00832023, 0x3c030001, 0x94630f78,
6689         0x30620009, 0x10400006, 0x3062fff6, 0xa4c2000c, 0x3c020001, 0x94420f78,
6690         0x30420009, 0x01425023, 0x24020001, 0x1122001b, 0x29220002, 0x50400005,
6691         0x24020002, 0x11200007, 0x31a2ffff, 0x08004197, 0x00000000, 0x1122001d,
6692         0x24020016, 0x08004197, 0x31a2ffff, 0x3c0e0001, 0x95ce0f80, 0x10800005,
6693         0x01806821, 0x01c42021, 0x00041c02, 0x3082ffff, 0x00627021, 0x000e1027,
6694         0xa502000a, 0x3c030001, 0x90630f7b, 0x31a2ffff, 0x00e21021, 0x0800418d,
6695         0x00432023, 0x3c020001, 0x94420f80, 0x00442021, 0x00041c02, 0x3082ffff,
6696         0x00622021, 0x00807021, 0x00041027, 0x08004185, 0xa502000a, 0x3c050001,
6697         0x24a50f7a, 0x90a30000, 0x14620002, 0x24e2fff2, 0xa5e20034, 0x90a20000,
6698         0x00e21023, 0xa5020002, 0x3c030001, 0x94630f80, 0x3c020001, 0x94420f5a,
6699         0x30e5ffff, 0x00641821, 0x00451023, 0x00622023, 0x00041c02, 0x3082ffff,
6700         0x00622021, 0x00041027, 0xa502000a, 0x3c030001, 0x90630f7c, 0x24620001,
6701         0x14a20005, 0x00807021, 0x01631021, 0x90420000, 0x08004185, 0x00026200,
6702         0x24620002, 0x14a20003, 0x306200fe, 0x004b1021, 0x944c0000, 0x3c020001,
6703         0x94420f82, 0x3183ffff, 0x3c040001, 0x90840f7b, 0x00431021, 0x00e21021,
6704         0x00442023, 0x008a2021, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402,
6705         0x00822021, 0x00806821, 0x00041027, 0xa4c20010, 0x31a2ffff, 0x000e1c00,
6706         0x00431025, 0x3c040001, 0x24840f72, 0xade20010, 0x94820000, 0x3c050001,
6707         0x94a50f76, 0x3c030001, 0x8c630f6c, 0x24420001, 0x00b92821, 0xa4820000,
6708         0x3322ffff, 0x00622021, 0x0083182b, 0x3c010001, 0xa4250f76, 0x10600003,
6709         0x24a2ffff, 0x3c010001, 0xa4220f76, 0x3c024000, 0x03021025, 0x3c010001,
6710         0xac240f6c, 0xaf621008, 0x03e00008, 0x27bd0010, 0x3c030001, 0x90630f56,
6711         0x27bdffe8, 0x24020001, 0xafbf0014, 0x10620026, 0xafb00010, 0x8f620cf4,
6712         0x2442ffff, 0x3042007f, 0x00021100, 0x8c434000, 0x3c010001, 0xac230f64,
6713         0x8c434008, 0x24444000, 0x8c5c4004, 0x30620040, 0x14400002, 0x24020088,
6714         0x24020008, 0x3c010001, 0xa4220f68, 0x30620004, 0x10400005, 0x24020001,
6715         0x3c010001, 0xa0220f57, 0x080041d5, 0x00031402, 0x3c010001, 0xa0200f57,
6716         0x00031402, 0x3c010001, 0xa4220f54, 0x9483000c, 0x24020001, 0x3c010001,
6717         0xa4200f50, 0x3c010001, 0xa0220f56, 0x3c010001, 0xa4230f62, 0x24020001,
6718         0x1342001e, 0x00000000, 0x13400005, 0x24020003, 0x13420067, 0x00000000,
6719         0x080042cf, 0x00000000, 0x3c020001, 0x94420f62, 0x241a0001, 0x3c010001,
6720         0xa4200f5e, 0x3c010001, 0xa4200f52, 0x304407ff, 0x00021bc2, 0x00031823,
6721         0x3063003e, 0x34630036, 0x00021242, 0x3042003c, 0x00621821, 0x3c010001,
6722         0xa4240f58, 0x00832021, 0x24630030, 0x3c010001, 0xa4240f5a, 0x3c010001,
6723         0xa4230f5c, 0x3c060001, 0x24c60f52, 0x94c50000, 0x94c30002, 0x3c040001,
6724         0x94840f5a, 0x00651021, 0x0044102a, 0x10400013, 0x3c108000, 0x00a31021,
6725         0xa4c20000, 0x3c02a000, 0xaf620cf4, 0x3c010001, 0xa0200f56, 0x8f641008,
6726         0x00901024, 0x14400003, 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4,
6727         0x00501024, 0x104000b7, 0x00000000, 0x0800420f, 0x00000000, 0x3c030001,
6728         0x94630f50, 0x00851023, 0xa4c40000, 0x00621821, 0x3042ffff, 0x3c010001,
6729         0xa4230f50, 0xaf620ce8, 0x3c020001, 0x94420f68, 0x34420024, 0xaf620cec,
6730         0x94c30002, 0x3c020001, 0x94420f50, 0x14620012, 0x3c028000, 0x3c108000,
6731         0x3c02a000, 0xaf620cf4, 0x3c010001, 0xa0200f56, 0x8f641008, 0x00901024,
6732         0x14400003, 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4, 0x00501024,
6733         0x1440fff7, 0x00000000, 0x080042cf, 0x241a0003, 0xaf620cf4, 0x3c108000,
6734         0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064, 0x00000000,
6735         0x8f620cf4, 0x00501024, 0x1440fff7, 0x00000000, 0x080042cf, 0x241a0003,
6736         0x3c070001, 0x24e70f50, 0x94e20000, 0x03821021, 0xaf620ce0, 0x3c020001,
6737         0x8c420f64, 0xaf620ce4, 0x3c050001, 0x94a50f54, 0x94e30000, 0x3c040001,
6738         0x94840f58, 0x3c020001, 0x94420f5e, 0x00a32823, 0x00822023, 0x30a6ffff,
6739         0x3083ffff, 0x00c3102b, 0x14400043, 0x00000000, 0x3c020001, 0x94420f5c,
6740         0x00021400, 0x00621025, 0xaf620ce8, 0x94e20000, 0x3c030001, 0x94630f54,
6741         0x00441021, 0xa4e20000, 0x3042ffff, 0x14430021, 0x3c020008, 0x3c020001,
6742         0x90420f57, 0x10400006, 0x3c03000c, 0x3c020001, 0x94420f68, 0x34630624,
6743         0x0800427c, 0x0000d021, 0x3c020001, 0x94420f68, 0x3c030008, 0x34630624,
6744         0x00431025, 0xaf620cec, 0x3c108000, 0x3c02a000, 0xaf620cf4, 0x3c010001,
6745         0xa0200f56, 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064,
6746         0x00000000, 0x8f620cf4, 0x00501024, 0x10400015, 0x00000000, 0x08004283,
6747         0x00000000, 0x3c030001, 0x94630f68, 0x34420624, 0x3c108000, 0x00621825,
6748         0x3c028000, 0xaf630cec, 0xaf620cf4, 0x8f641008, 0x00901024, 0x14400003,
6749         0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4, 0x00501024, 0x1440fff7,
6750         0x00000000, 0x3c010001, 0x080042cf, 0xa4200f5e, 0x3c020001, 0x94420f5c,
6751         0x00021400, 0x00c21025, 0xaf620ce8, 0x3c020001, 0x90420f57, 0x10400009,
6752         0x3c03000c, 0x3c020001, 0x94420f68, 0x34630624, 0x0000d021, 0x00431025,
6753         0xaf620cec, 0x080042c1, 0x3c108000, 0x3c020001, 0x94420f68, 0x3c030008,
6754         0x34630604, 0x00431025, 0xaf620cec, 0x3c020001, 0x94420f5e, 0x00451021,
6755         0x3c010001, 0xa4220f5e, 0x3c108000, 0x3c02a000, 0xaf620cf4, 0x3c010001,
6756         0xa0200f56, 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064,
6757         0x00000000, 0x8f620cf4, 0x00501024, 0x1440fff7, 0x00000000, 0x8fbf0014,
6758         0x8fb00010, 0x03e00008, 0x27bd0018, 0x00000000, 0x27bdffe0, 0x3c040001,
6759         0x24840ec0, 0x00002821, 0x00003021, 0x00003821, 0xafbf0018, 0xafa00010,
6760         0x0c004378, 0xafa00014, 0x0000d021, 0x24020130, 0xaf625000, 0x3c010001,
6761         0xa4200f50, 0x3c010001, 0xa0200f57, 0x8fbf0018, 0x03e00008, 0x27bd0020,
6762         0x27bdffe8, 0x3c1bc000, 0xafbf0014, 0xafb00010, 0xaf60680c, 0x8f626804,
6763         0x34420082, 0xaf626804, 0x8f634000, 0x24020b50, 0x3c010001, 0xac220f20,
6764         0x24020b78, 0x3c010001, 0xac220f30, 0x34630002, 0xaf634000, 0x0c004315,
6765         0x00808021, 0x3c010001, 0xa0220f34, 0x304200ff, 0x24030002, 0x14430005,
6766         0x00000000, 0x3c020001, 0x8c420f20, 0x08004308, 0xac5000c0, 0x3c020001,
6767         0x8c420f20, 0xac5000bc, 0x8f624434, 0x8f634438, 0x8f644410, 0x3c010001,
6768         0xac220f28, 0x3c010001, 0xac230f38, 0x3c010001, 0xac240f24, 0x8fbf0014,
6769         0x8fb00010, 0x03e00008, 0x27bd0018, 0x03e00008, 0x24020001, 0x27bdfff8,
6770         0x18800009, 0x00002821, 0x8f63680c, 0x8f62680c, 0x1043fffe, 0x00000000,
6771         0x24a50001, 0x00a4102a, 0x1440fff9, 0x00000000, 0x03e00008, 0x27bd0008,
6772         0x8f634450, 0x3c020001, 0x8c420f28, 0x00031c02, 0x0043102b, 0x14400008,
6773         0x3c038000, 0x3c040001, 0x8c840f38, 0x8f624450, 0x00021c02, 0x0083102b,
6774         0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024, 0x1440fffd,
6775         0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3082ffff, 0x2442e000,
6776         0x2c422001, 0x14400003, 0x3c024000, 0x08004347, 0x2402ffff, 0x00822025,
6777         0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00001021, 0x03e00008,
6778         0x00000000, 0x8f624450, 0x3c030001, 0x8c630f24, 0x08004350, 0x3042ffff,
6779         0x8f624450, 0x3042ffff, 0x0043102b, 0x1440fffc, 0x00000000, 0x03e00008,
6780         0x00000000, 0x27bdffe0, 0x00802821, 0x3c040001, 0x24840ed0, 0x00003021,
6781         0x00003821, 0xafbf0018, 0xafa00010, 0x0c004378, 0xafa00014, 0x0800435f,
6782         0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x3c020001, 0x3442d600,
6783         0x3c030001, 0x3463d600, 0x3c040001, 0x3484ddff, 0x3c010001, 0xac220f40,
6784         0x24020040, 0x3c010001, 0xac220f44, 0x3c010001, 0xac200f3c, 0xac600000,
6785         0x24630004, 0x0083102b, 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000,
6786         0x00804821, 0x8faa0010, 0x3c020001, 0x8c420f3c, 0x3c040001, 0x8c840f44,
6787         0x8fab0014, 0x24430001, 0x0044102b, 0x3c010001, 0xac230f3c, 0x14400003,
6788         0x00004021, 0x3c010001, 0xac200f3c, 0x3c020001, 0x8c420f3c, 0x3c030001,
6789         0x8c630f40, 0x91240000, 0x00021140, 0x00431021, 0x00481021, 0x25080001,
6790         0xa0440000, 0x29020008, 0x1440fff4, 0x25290001, 0x3c020001, 0x8c420f3c,
6791         0x3c030001, 0x8c630f40, 0x8f64680c, 0x00021140, 0x00431021, 0xac440008,
6792         0xac45000c, 0xac460010, 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c,
6793         0x00000000, 0x00000000, 0x00000000,
6794 };
6795
6796 static const u32 tg3Tso5FwRodata[(TG3_TSO5_FW_RODATA_LEN / 4) + 1] = {
6797         0x4d61696e, 0x43707542, 0x00000000, 0x4d61696e, 0x43707541, 0x00000000,
6798         0x00000000, 0x00000000, 0x73746b6f, 0x66666c64, 0x00000000, 0x00000000,
6799         0x73746b6f, 0x66666c64, 0x00000000, 0x00000000, 0x66617461, 0x6c457272,
6800         0x00000000, 0x00000000, 0x00000000,
6801 };
6802
6803 static const u32 tg3Tso5FwData[(TG3_TSO5_FW_DATA_LEN / 4) + 1] = {
6804         0x00000000, 0x73746b6f, 0x66666c64, 0x5f76312e, 0x322e3000, 0x00000000,
6805         0x00000000, 0x00000000, 0x00000000,
6806 };
6807
6808 /* tp->lock is held. */
6809 static int tg3_load_tso_firmware(struct tg3 *tp)
6810 {
6811         struct fw_info info;
6812         unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
6813         int err, i;
6814
6815         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
6816                 return 0;
6817
6818         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
6819                 info.text_base = TG3_TSO5_FW_TEXT_ADDR;
6820                 info.text_len = TG3_TSO5_FW_TEXT_LEN;
6821                 info.text_data = &tg3Tso5FwText[0];
6822                 info.rodata_base = TG3_TSO5_FW_RODATA_ADDR;
6823                 info.rodata_len = TG3_TSO5_FW_RODATA_LEN;
6824                 info.rodata_data = &tg3Tso5FwRodata[0];
6825                 info.data_base = TG3_TSO5_FW_DATA_ADDR;
6826                 info.data_len = TG3_TSO5_FW_DATA_LEN;
6827                 info.data_data = &tg3Tso5FwData[0];
6828                 cpu_base = RX_CPU_BASE;
6829                 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
6830                 cpu_scratch_size = (info.text_len +
6831                                     info.rodata_len +
6832                                     info.data_len +
6833                                     TG3_TSO5_FW_SBSS_LEN +
6834                                     TG3_TSO5_FW_BSS_LEN);
6835         } else {
6836                 info.text_base = TG3_TSO_FW_TEXT_ADDR;
6837                 info.text_len = TG3_TSO_FW_TEXT_LEN;
6838                 info.text_data = &tg3TsoFwText[0];
6839                 info.rodata_base = TG3_TSO_FW_RODATA_ADDR;
6840                 info.rodata_len = TG3_TSO_FW_RODATA_LEN;
6841                 info.rodata_data = &tg3TsoFwRodata[0];
6842                 info.data_base = TG3_TSO_FW_DATA_ADDR;
6843                 info.data_len = TG3_TSO_FW_DATA_LEN;
6844                 info.data_data = &tg3TsoFwData[0];
6845                 cpu_base = TX_CPU_BASE;
6846                 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
6847                 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
6848         }
6849
6850         err = tg3_load_firmware_cpu(tp, cpu_base,
6851                                     cpu_scratch_base, cpu_scratch_size,
6852                                     &info);
6853         if (err)
6854                 return err;
6855
6856         /* Now startup the cpu. */
6857         tw32(cpu_base + CPU_STATE, 0xffffffff);
6858         tw32_f(cpu_base + CPU_PC,    info.text_base);
6859
6860         for (i = 0; i < 5; i++) {
6861                 if (tr32(cpu_base + CPU_PC) == info.text_base)
6862                         break;
6863                 tw32(cpu_base + CPU_STATE, 0xffffffff);
6864                 tw32(cpu_base + CPU_MODE,  CPU_MODE_HALT);
6865                 tw32_f(cpu_base + CPU_PC,    info.text_base);
6866                 udelay(1000);
6867         }
6868         if (i >= 5) {
6869                 printk(KERN_ERR PFX "tg3_load_tso_firmware fails for %s "
6870                        "to set CPU PC, is %08x should be %08x\n",
6871                        tp->dev->name, tr32(cpu_base + CPU_PC),
6872                        info.text_base);
6873                 return -ENODEV;
6874         }
6875         tw32(cpu_base + CPU_STATE, 0xffffffff);
6876         tw32_f(cpu_base + CPU_MODE,  0x00000000);
6877         return 0;
6878 }
6879
6880
6881 /* tp->lock is held. */
6882 static void __tg3_set_mac_addr(struct tg3 *tp, int skip_mac_1)
6883 {
6884         u32 addr_high, addr_low;
6885         int i;
6886
6887         addr_high = ((tp->dev->dev_addr[0] << 8) |
6888                      tp->dev->dev_addr[1]);
6889         addr_low = ((tp->dev->dev_addr[2] << 24) |
6890                     (tp->dev->dev_addr[3] << 16) |
6891                     (tp->dev->dev_addr[4] <<  8) |
6892                     (tp->dev->dev_addr[5] <<  0));
6893         for (i = 0; i < 4; i++) {
6894                 if (i == 1 && skip_mac_1)
6895                         continue;
6896                 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
6897                 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
6898         }
6899
6900         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
6901             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
6902                 for (i = 0; i < 12; i++) {
6903                         tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
6904                         tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
6905                 }
6906         }
6907
6908         addr_high = (tp->dev->dev_addr[0] +
6909                      tp->dev->dev_addr[1] +
6910                      tp->dev->dev_addr[2] +
6911                      tp->dev->dev_addr[3] +
6912                      tp->dev->dev_addr[4] +
6913                      tp->dev->dev_addr[5]) &
6914                 TX_BACKOFF_SEED_MASK;
6915         tw32(MAC_TX_BACKOFF_SEED, addr_high);
6916 }
6917
6918 static int tg3_set_mac_addr(struct net_device *dev, void *p)
6919 {
6920         struct tg3 *tp = netdev_priv(dev);
6921         struct sockaddr *addr = p;
6922         int err = 0, skip_mac_1 = 0;
6923
6924         if (!is_valid_ether_addr(addr->sa_data))
6925                 return -EINVAL;
6926
6927         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
6928
6929         if (!netif_running(dev))
6930                 return 0;
6931
6932         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
6933                 u32 addr0_high, addr0_low, addr1_high, addr1_low;
6934
6935                 addr0_high = tr32(MAC_ADDR_0_HIGH);
6936                 addr0_low = tr32(MAC_ADDR_0_LOW);
6937                 addr1_high = tr32(MAC_ADDR_1_HIGH);
6938                 addr1_low = tr32(MAC_ADDR_1_LOW);
6939
6940                 /* Skip MAC addr 1 if ASF is using it. */
6941                 if ((addr0_high != addr1_high || addr0_low != addr1_low) &&
6942                     !(addr1_high == 0 && addr1_low == 0))
6943                         skip_mac_1 = 1;
6944         }
6945         spin_lock_bh(&tp->lock);
6946         __tg3_set_mac_addr(tp, skip_mac_1);
6947         spin_unlock_bh(&tp->lock);
6948
6949         return err;
6950 }
6951
6952 /* tp->lock is held. */
6953 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
6954                            dma_addr_t mapping, u32 maxlen_flags,
6955                            u32 nic_addr)
6956 {
6957         tg3_write_mem(tp,
6958                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
6959                       ((u64) mapping >> 32));
6960         tg3_write_mem(tp,
6961                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
6962                       ((u64) mapping & 0xffffffff));
6963         tg3_write_mem(tp,
6964                       (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
6965                        maxlen_flags);
6966
6967         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
6968                 tg3_write_mem(tp,
6969                               (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
6970                               nic_addr);
6971 }
6972
6973 static void __tg3_set_rx_mode(struct net_device *);
6974 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
6975 {
6976         tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
6977         tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
6978         tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
6979         tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
6980         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6981                 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
6982                 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
6983         }
6984         tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
6985         tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
6986         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6987                 u32 val = ec->stats_block_coalesce_usecs;
6988
6989                 if (!netif_carrier_ok(tp->dev))
6990                         val = 0;
6991
6992                 tw32(HOSTCC_STAT_COAL_TICKS, val);
6993         }
6994 }
6995
6996 /* tp->lock is held. */
6997 static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
6998 {
6999         u32 val, rdmac_mode;
7000         int i, err, limit;
7001
7002         tg3_disable_ints(tp);
7003
7004         tg3_stop_fw(tp);
7005
7006         tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
7007
7008         if (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) {
7009                 tg3_abort_hw(tp, 1);
7010         }
7011
7012         if (reset_phy &&
7013             !(tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB))
7014                 tg3_phy_reset(tp);
7015
7016         err = tg3_chip_reset(tp);
7017         if (err)
7018                 return err;
7019
7020         tg3_write_sig_legacy(tp, RESET_KIND_INIT);
7021
7022         if (tp->pci_chip_rev_id == CHIPREV_ID_5784_A0 ||
7023             tp->pci_chip_rev_id == CHIPREV_ID_5784_A1) {
7024                 val = tr32(TG3_CPMU_CTRL);
7025                 val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE);
7026                 tw32(TG3_CPMU_CTRL, val);
7027
7028                 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
7029                 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
7030                 val |= CPMU_LSPD_10MB_MACCLK_6_25;
7031                 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
7032
7033                 val = tr32(TG3_CPMU_LNK_AWARE_PWRMD);
7034                 val &= ~CPMU_LNK_AWARE_MACCLK_MASK;
7035                 val |= CPMU_LNK_AWARE_MACCLK_6_25;
7036                 tw32(TG3_CPMU_LNK_AWARE_PWRMD, val);
7037
7038                 val = tr32(TG3_CPMU_HST_ACC);
7039                 val &= ~CPMU_HST_ACC_MACCLK_MASK;
7040                 val |= CPMU_HST_ACC_MACCLK_6_25;
7041                 tw32(TG3_CPMU_HST_ACC, val);
7042         }
7043
7044         /* This works around an issue with Athlon chipsets on
7045          * B3 tigon3 silicon.  This bit has no effect on any
7046          * other revision.  But do not set this on PCI Express
7047          * chips and don't even touch the clocks if the CPMU is present.
7048          */
7049         if (!(tp->tg3_flags & TG3_FLAG_CPMU_PRESENT)) {
7050                 if (!(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
7051                         tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
7052                 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
7053         }
7054
7055         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
7056             (tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
7057                 val = tr32(TG3PCI_PCISTATE);
7058                 val |= PCISTATE_RETRY_SAME_DMA;
7059                 tw32(TG3PCI_PCISTATE, val);
7060         }
7061
7062         if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
7063                 /* Allow reads and writes to the
7064                  * APE register and memory space.
7065                  */
7066                 val = tr32(TG3PCI_PCISTATE);
7067                 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
7068                        PCISTATE_ALLOW_APE_SHMEM_WR;
7069                 tw32(TG3PCI_PCISTATE, val);
7070         }
7071
7072         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_BX) {
7073                 /* Enable some hw fixes.  */
7074                 val = tr32(TG3PCI_MSI_DATA);
7075                 val |= (1 << 26) | (1 << 28) | (1 << 29);
7076                 tw32(TG3PCI_MSI_DATA, val);
7077         }
7078
7079         /* Descriptor ring init may make accesses to the
7080          * NIC SRAM area to setup the TX descriptors, so we
7081          * can only do this after the hardware has been
7082          * successfully reset.
7083          */
7084         err = tg3_init_rings(tp);
7085         if (err)
7086                 return err;
7087
7088         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 &&
7089             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761 &&
7090             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785) {
7091                 /* This value is determined during the probe time DMA
7092                  * engine test, tg3_test_dma.
7093                  */
7094                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
7095         }
7096
7097         tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
7098                           GRC_MODE_4X_NIC_SEND_RINGS |
7099                           GRC_MODE_NO_TX_PHDR_CSUM |
7100                           GRC_MODE_NO_RX_PHDR_CSUM);
7101         tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
7102
7103         /* Pseudo-header checksum is done by hardware logic and not
7104          * the offload processers, so make the chip do the pseudo-
7105          * header checksums on receive.  For transmit it is more
7106          * convenient to do the pseudo-header checksum in software
7107          * as Linux does that on transmit for us in all cases.
7108          */
7109         tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
7110
7111         tw32(GRC_MODE,
7112              tp->grc_mode |
7113              (GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP));
7114
7115         /* Setup the timer prescalar register.  Clock is always 66Mhz. */
7116         val = tr32(GRC_MISC_CFG);
7117         val &= ~0xff;
7118         val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
7119         tw32(GRC_MISC_CFG, val);
7120
7121         /* Initialize MBUF/DESC pool. */
7122         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
7123                 /* Do nothing.  */
7124         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
7125                 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
7126                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
7127                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
7128                 else
7129                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
7130                 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
7131                 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
7132         }
7133         else if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
7134                 int fw_len;
7135
7136                 fw_len = (TG3_TSO5_FW_TEXT_LEN +
7137                           TG3_TSO5_FW_RODATA_LEN +
7138                           TG3_TSO5_FW_DATA_LEN +
7139                           TG3_TSO5_FW_SBSS_LEN +
7140                           TG3_TSO5_FW_BSS_LEN);
7141                 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
7142                 tw32(BUFMGR_MB_POOL_ADDR,
7143                      NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
7144                 tw32(BUFMGR_MB_POOL_SIZE,
7145                      NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
7146         }
7147
7148         if (tp->dev->mtu <= ETH_DATA_LEN) {
7149                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
7150                      tp->bufmgr_config.mbuf_read_dma_low_water);
7151                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
7152                      tp->bufmgr_config.mbuf_mac_rx_low_water);
7153                 tw32(BUFMGR_MB_HIGH_WATER,
7154                      tp->bufmgr_config.mbuf_high_water);
7155         } else {
7156                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
7157                      tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
7158                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
7159                      tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
7160                 tw32(BUFMGR_MB_HIGH_WATER,
7161                      tp->bufmgr_config.mbuf_high_water_jumbo);
7162         }
7163         tw32(BUFMGR_DMA_LOW_WATER,
7164              tp->bufmgr_config.dma_low_water);
7165         tw32(BUFMGR_DMA_HIGH_WATER,
7166              tp->bufmgr_config.dma_high_water);
7167
7168         tw32(BUFMGR_MODE, BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE);
7169         for (i = 0; i < 2000; i++) {
7170                 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
7171                         break;
7172                 udelay(10);
7173         }
7174         if (i >= 2000) {
7175                 printk(KERN_ERR PFX "tg3_reset_hw cannot enable BUFMGR for %s.\n",
7176                        tp->dev->name);
7177                 return -ENODEV;
7178         }
7179
7180         /* Setup replenish threshold. */
7181         val = tp->rx_pending / 8;
7182         if (val == 0)
7183                 val = 1;
7184         else if (val > tp->rx_std_max_post)
7185                 val = tp->rx_std_max_post;
7186         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
7187                 if (tp->pci_chip_rev_id == CHIPREV_ID_5906_A1)
7188                         tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
7189
7190                 if (val > (TG3_RX_INTERNAL_RING_SZ_5906 / 2))
7191                         val = TG3_RX_INTERNAL_RING_SZ_5906 / 2;
7192         }
7193
7194         tw32(RCVBDI_STD_THRESH, val);
7195
7196         /* Initialize TG3_BDINFO's at:
7197          *  RCVDBDI_STD_BD:     standard eth size rx ring
7198          *  RCVDBDI_JUMBO_BD:   jumbo frame rx ring
7199          *  RCVDBDI_MINI_BD:    small frame rx ring (??? does not work)
7200          *
7201          * like so:
7202          *  TG3_BDINFO_HOST_ADDR:       high/low parts of DMA address of ring
7203          *  TG3_BDINFO_MAXLEN_FLAGS:    (rx max buffer size << 16) |
7204          *                              ring attribute flags
7205          *  TG3_BDINFO_NIC_ADDR:        location of descriptors in nic SRAM
7206          *
7207          * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
7208          * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
7209          *
7210          * The size of each ring is fixed in the firmware, but the location is
7211          * configurable.
7212          */
7213         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
7214              ((u64) tp->rx_std_mapping >> 32));
7215         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
7216              ((u64) tp->rx_std_mapping & 0xffffffff));
7217         tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
7218              NIC_SRAM_RX_BUFFER_DESC);
7219
7220         /* Don't even try to program the JUMBO/MINI buffer descriptor
7221          * configs on 5705.
7222          */
7223         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
7224                 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS,
7225                      RX_STD_MAX_SIZE_5705 << BDINFO_FLAGS_MAXLEN_SHIFT);
7226         } else {
7227                 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS,
7228                      RX_STD_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT);
7229
7230                 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
7231                      BDINFO_FLAGS_DISABLED);
7232
7233                 /* Setup replenish threshold. */
7234                 tw32(RCVBDI_JUMBO_THRESH, tp->rx_jumbo_pending / 8);
7235
7236                 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
7237                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
7238                              ((u64) tp->rx_jumbo_mapping >> 32));
7239                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
7240                              ((u64) tp->rx_jumbo_mapping & 0xffffffff));
7241                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
7242                              RX_JUMBO_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT);
7243                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
7244                              NIC_SRAM_RX_JUMBO_BUFFER_DESC);
7245                 } else {
7246                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
7247                              BDINFO_FLAGS_DISABLED);
7248                 }
7249
7250         }
7251
7252         /* There is only one send ring on 5705/5750, no need to explicitly
7253          * disable the others.
7254          */
7255         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
7256                 /* Clear out send RCB ring in SRAM. */
7257                 for (i = NIC_SRAM_SEND_RCB; i < NIC_SRAM_RCV_RET_RCB; i += TG3_BDINFO_SIZE)
7258                         tg3_write_mem(tp, i + TG3_BDINFO_MAXLEN_FLAGS,
7259                                       BDINFO_FLAGS_DISABLED);
7260         }
7261
7262         tp->tx_prod = 0;
7263         tp->tx_cons = 0;
7264         tw32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0);
7265         tw32_tx_mbox(MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0);
7266
7267         tg3_set_bdinfo(tp, NIC_SRAM_SEND_RCB,
7268                        tp->tx_desc_mapping,
7269                        (TG3_TX_RING_SIZE <<
7270                         BDINFO_FLAGS_MAXLEN_SHIFT),
7271                        NIC_SRAM_TX_BUFFER_DESC);
7272
7273         /* There is only one receive return ring on 5705/5750, no need
7274          * to explicitly disable the others.
7275          */
7276         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
7277                 for (i = NIC_SRAM_RCV_RET_RCB; i < NIC_SRAM_STATS_BLK;
7278                      i += TG3_BDINFO_SIZE) {
7279                         tg3_write_mem(tp, i + TG3_BDINFO_MAXLEN_FLAGS,
7280                                       BDINFO_FLAGS_DISABLED);
7281                 }
7282         }
7283
7284         tp->rx_rcb_ptr = 0;
7285         tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW, 0);
7286
7287         tg3_set_bdinfo(tp, NIC_SRAM_RCV_RET_RCB,
7288                        tp->rx_rcb_mapping,
7289                        (TG3_RX_RCB_RING_SIZE(tp) <<
7290                         BDINFO_FLAGS_MAXLEN_SHIFT),
7291                        0);
7292
7293         tp->rx_std_ptr = tp->rx_pending;
7294         tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
7295                      tp->rx_std_ptr);
7296
7297         tp->rx_jumbo_ptr = (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) ?
7298                                                 tp->rx_jumbo_pending : 0;
7299         tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
7300                      tp->rx_jumbo_ptr);
7301
7302         /* Initialize MAC address and backoff seed. */
7303         __tg3_set_mac_addr(tp, 0);
7304
7305         /* MTU + ethernet header + FCS + optional VLAN tag */
7306         tw32(MAC_RX_MTU_SIZE, tp->dev->mtu + ETH_HLEN + 8);
7307
7308         /* The slot time is changed by tg3_setup_phy if we
7309          * run at gigabit with half duplex.
7310          */
7311         tw32(MAC_TX_LENGTHS,
7312              (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
7313              (6 << TX_LENGTHS_IPG_SHIFT) |
7314              (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
7315
7316         /* Receive rules. */
7317         tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
7318         tw32(RCVLPC_CONFIG, 0x0181);
7319
7320         /* Calculate RDMAC_MODE setting early, we need it to determine
7321          * the RCVLPC_STATE_ENABLE mask.
7322          */
7323         rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
7324                       RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
7325                       RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
7326                       RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
7327                       RDMAC_MODE_LNGREAD_ENAB);
7328
7329         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
7330             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
7331                 rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
7332                               RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
7333                               RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
7334
7335         /* If statement applies to 5705 and 5750 PCI devices only */
7336         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
7337              tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
7338             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)) {
7339                 if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE &&
7340                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
7341                         rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
7342                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
7343                            !(tp->tg3_flags2 & TG3_FLG2_IS_5788)) {
7344                         rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
7345                 }
7346         }
7347
7348         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)
7349                 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
7350
7351         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
7352                 rdmac_mode |= (1 << 27);
7353
7354         /* Receive/send statistics. */
7355         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
7356                 val = tr32(RCVLPC_STATS_ENABLE);
7357                 val &= ~RCVLPC_STATSENAB_DACK_FIX;
7358                 tw32(RCVLPC_STATS_ENABLE, val);
7359         } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
7360                    (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
7361                 val = tr32(RCVLPC_STATS_ENABLE);
7362                 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
7363                 tw32(RCVLPC_STATS_ENABLE, val);
7364         } else {
7365                 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
7366         }
7367         tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
7368         tw32(SNDDATAI_STATSENAB, 0xffffff);
7369         tw32(SNDDATAI_STATSCTRL,
7370              (SNDDATAI_SCTRL_ENABLE |
7371               SNDDATAI_SCTRL_FASTUPD));
7372
7373         /* Setup host coalescing engine. */
7374         tw32(HOSTCC_MODE, 0);
7375         for (i = 0; i < 2000; i++) {
7376                 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
7377                         break;
7378                 udelay(10);
7379         }
7380
7381         __tg3_set_coalesce(tp, &tp->coal);
7382
7383         /* set status block DMA address */
7384         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
7385              ((u64) tp->status_mapping >> 32));
7386         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
7387              ((u64) tp->status_mapping & 0xffffffff));
7388
7389         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
7390                 /* Status/statistics block address.  See tg3_timer,
7391                  * the tg3_periodic_fetch_stats call there, and
7392                  * tg3_get_stats to see how this works for 5705/5750 chips.
7393                  */
7394                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
7395                      ((u64) tp->stats_mapping >> 32));
7396                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
7397                      ((u64) tp->stats_mapping & 0xffffffff));
7398                 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
7399                 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
7400         }
7401
7402         tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
7403
7404         tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
7405         tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
7406         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
7407                 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
7408
7409         /* Clear statistics/status block in chip, and status block in ram. */
7410         for (i = NIC_SRAM_STATS_BLK;
7411              i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
7412              i += sizeof(u32)) {
7413                 tg3_write_mem(tp, i, 0);
7414                 udelay(40);
7415         }
7416         memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
7417
7418         if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
7419                 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
7420                 /* reset to prevent losing 1st rx packet intermittently */
7421                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
7422                 udelay(10);
7423         }
7424
7425         if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
7426                 tp->mac_mode &= MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
7427         else
7428                 tp->mac_mode = 0;
7429         tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
7430                 MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE | MAC_MODE_FHDE_ENABLE;
7431         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
7432             !(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
7433             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
7434                 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
7435         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
7436         udelay(40);
7437
7438         /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
7439          * If TG3_FLG2_IS_NIC is zero, we should read the
7440          * register to preserve the GPIO settings for LOMs. The GPIOs,
7441          * whether used as inputs or outputs, are set by boot code after
7442          * reset.
7443          */
7444         if (!(tp->tg3_flags2 & TG3_FLG2_IS_NIC)) {
7445                 u32 gpio_mask;
7446
7447                 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
7448                             GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
7449                             GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
7450
7451                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
7452                         gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
7453                                      GRC_LCLCTRL_GPIO_OUTPUT3;
7454
7455                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
7456                         gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
7457
7458                 tp->grc_local_ctrl &= ~gpio_mask;
7459                 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
7460
7461                 /* GPIO1 must be driven high for eeprom write protect */
7462                 if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT)
7463                         tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
7464                                                GRC_LCLCTRL_GPIO_OUTPUT1);
7465         }
7466         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
7467         udelay(100);
7468
7469         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0);
7470         tp->last_tag = 0;
7471
7472         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
7473                 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
7474                 udelay(40);
7475         }
7476
7477         val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
7478                WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
7479                WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
7480                WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
7481                WDMAC_MODE_LNGREAD_ENAB);
7482
7483         /* If statement applies to 5705 and 5750 PCI devices only */
7484         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
7485              tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
7486             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
7487                 if ((tp->tg3_flags & TG3_FLG2_TSO_CAPABLE) &&
7488                     (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
7489                      tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
7490                         /* nothing */
7491                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
7492                            !(tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
7493                            !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
7494                         val |= WDMAC_MODE_RX_ACCEL;
7495                 }
7496         }
7497
7498         /* Enable host coalescing bug fix */
7499         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755) ||
7500             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787) ||
7501             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784) ||
7502             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) ||
7503             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785))
7504                 val |= WDMAC_MODE_STATUS_TAG_FIX;
7505
7506         tw32_f(WDMAC_MODE, val);
7507         udelay(40);
7508
7509         if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) {
7510                 u16 pcix_cmd;
7511
7512                 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
7513                                      &pcix_cmd);
7514                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) {
7515                         pcix_cmd &= ~PCI_X_CMD_MAX_READ;
7516                         pcix_cmd |= PCI_X_CMD_READ_2K;
7517                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
7518                         pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ);
7519                         pcix_cmd |= PCI_X_CMD_READ_2K;
7520                 }
7521                 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
7522                                       pcix_cmd);
7523         }
7524
7525         tw32_f(RDMAC_MODE, rdmac_mode);
7526         udelay(40);
7527
7528         tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
7529         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
7530                 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
7531
7532         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
7533                 tw32(SNDDATAC_MODE,
7534                      SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY);
7535         else
7536                 tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
7537
7538         tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
7539         tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
7540         tw32(RCVDBDI_MODE, RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ);
7541         tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
7542         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
7543                 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
7544         tw32(SNDBDI_MODE, SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE);
7545         tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
7546
7547         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
7548                 err = tg3_load_5701_a0_firmware_fix(tp);
7549                 if (err)
7550                         return err;
7551         }
7552
7553         if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
7554                 err = tg3_load_tso_firmware(tp);
7555                 if (err)
7556                         return err;
7557         }
7558
7559         tp->tx_mode = TX_MODE_ENABLE;
7560         tw32_f(MAC_TX_MODE, tp->tx_mode);
7561         udelay(100);
7562
7563         tp->rx_mode = RX_MODE_ENABLE;
7564         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
7565             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
7566             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
7567             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
7568                 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
7569
7570         tw32_f(MAC_RX_MODE, tp->rx_mode);
7571         udelay(10);
7572
7573         tw32(MAC_LED_CTRL, tp->led_ctrl);
7574
7575         tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
7576         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
7577                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
7578                 udelay(10);
7579         }
7580         tw32_f(MAC_RX_MODE, tp->rx_mode);
7581         udelay(10);
7582
7583         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
7584                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) &&
7585                         !(tp->tg3_flags2 & TG3_FLG2_SERDES_PREEMPHASIS)) {
7586                         /* Set drive transmission level to 1.2V  */
7587                         /* only if the signal pre-emphasis bit is not set  */
7588                         val = tr32(MAC_SERDES_CFG);
7589                         val &= 0xfffff000;
7590                         val |= 0x880;
7591                         tw32(MAC_SERDES_CFG, val);
7592                 }
7593                 if (tp->pci_chip_rev_id == CHIPREV_ID_5703_A1)
7594                         tw32(MAC_SERDES_CFG, 0x616000);
7595         }
7596
7597         /* Prevent chip from dropping frames when flow control
7598          * is enabled.
7599          */
7600         tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, 2);
7601
7602         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
7603             (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
7604                 /* Use hardware link auto-negotiation */
7605                 tp->tg3_flags2 |= TG3_FLG2_HW_AUTONEG;
7606         }
7607
7608         if ((tp->tg3_flags2 & TG3_FLG2_MII_SERDES) &&
7609             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)) {
7610                 u32 tmp;
7611
7612                 tmp = tr32(SERDES_RX_CTRL);
7613                 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
7614                 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
7615                 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
7616                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
7617         }
7618
7619         if (!(tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)) {
7620                 if (tp->link_config.phy_is_low_power) {
7621                         tp->link_config.phy_is_low_power = 0;
7622                         tp->link_config.speed = tp->link_config.orig_speed;
7623                         tp->link_config.duplex = tp->link_config.orig_duplex;
7624                         tp->link_config.autoneg = tp->link_config.orig_autoneg;
7625                 }
7626
7627                 err = tg3_setup_phy(tp, 0);
7628                 if (err)
7629                         return err;
7630
7631                 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
7632                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906) {
7633                         u32 tmp;
7634
7635                         /* Clear CRC stats. */
7636                         if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
7637                                 tg3_writephy(tp, MII_TG3_TEST1,
7638                                              tmp | MII_TG3_TEST1_CRC_EN);
7639                                 tg3_readphy(tp, 0x14, &tmp);
7640                         }
7641                 }
7642         }
7643
7644         __tg3_set_rx_mode(tp->dev);
7645
7646         /* Initialize receive rules. */
7647         tw32(MAC_RCV_RULE_0,  0xc2000000 & RCV_RULE_DISABLE_MASK);
7648         tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
7649         tw32(MAC_RCV_RULE_1,  0x86000004 & RCV_RULE_DISABLE_MASK);
7650         tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
7651
7652         if ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
7653             !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
7654                 limit = 8;
7655         else
7656                 limit = 16;
7657         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF)
7658                 limit -= 4;
7659         switch (limit) {
7660         case 16:
7661                 tw32(MAC_RCV_RULE_15,  0); tw32(MAC_RCV_VALUE_15,  0);
7662         case 15:
7663                 tw32(MAC_RCV_RULE_14,  0); tw32(MAC_RCV_VALUE_14,  0);
7664         case 14:
7665                 tw32(MAC_RCV_RULE_13,  0); tw32(MAC_RCV_VALUE_13,  0);
7666         case 13:
7667                 tw32(MAC_RCV_RULE_12,  0); tw32(MAC_RCV_VALUE_12,  0);
7668         case 12:
7669                 tw32(MAC_RCV_RULE_11,  0); tw32(MAC_RCV_VALUE_11,  0);
7670         case 11:
7671                 tw32(MAC_RCV_RULE_10,  0); tw32(MAC_RCV_VALUE_10,  0);
7672         case 10:
7673                 tw32(MAC_RCV_RULE_9,  0); tw32(MAC_RCV_VALUE_9,  0);
7674         case 9:
7675                 tw32(MAC_RCV_RULE_8,  0); tw32(MAC_RCV_VALUE_8,  0);
7676         case 8:
7677                 tw32(MAC_RCV_RULE_7,  0); tw32(MAC_RCV_VALUE_7,  0);
7678         case 7:
7679                 tw32(MAC_RCV_RULE_6,  0); tw32(MAC_RCV_VALUE_6,  0);
7680         case 6:
7681                 tw32(MAC_RCV_RULE_5,  0); tw32(MAC_RCV_VALUE_5,  0);
7682         case 5:
7683                 tw32(MAC_RCV_RULE_4,  0); tw32(MAC_RCV_VALUE_4,  0);
7684         case 4:
7685                 /* tw32(MAC_RCV_RULE_3,  0); tw32(MAC_RCV_VALUE_3,  0); */
7686         case 3:
7687                 /* tw32(MAC_RCV_RULE_2,  0); tw32(MAC_RCV_VALUE_2,  0); */
7688         case 2:
7689         case 1:
7690
7691         default:
7692                 break;
7693         }
7694
7695         if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
7696                 /* Write our heartbeat update interval to APE. */
7697                 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS,
7698                                 APE_HOST_HEARTBEAT_INT_DISABLE);
7699
7700         tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
7701
7702         return 0;
7703 }
7704
7705 /* Called at device open time to get the chip ready for
7706  * packet processing.  Invoked with tp->lock held.
7707  */
7708 static int tg3_init_hw(struct tg3 *tp, int reset_phy)
7709 {
7710         tg3_switch_clocks(tp);
7711
7712         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
7713
7714         return tg3_reset_hw(tp, reset_phy);
7715 }
7716
7717 #define TG3_STAT_ADD32(PSTAT, REG) \
7718 do {    u32 __val = tr32(REG); \
7719         (PSTAT)->low += __val; \
7720         if ((PSTAT)->low < __val) \
7721                 (PSTAT)->high += 1; \
7722 } while (0)
7723
7724 static void tg3_periodic_fetch_stats(struct tg3 *tp)
7725 {
7726         struct tg3_hw_stats *sp = tp->hw_stats;
7727
7728         if (!netif_carrier_ok(tp->dev))
7729                 return;
7730
7731         TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
7732         TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
7733         TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
7734         TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
7735         TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
7736         TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
7737         TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
7738         TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
7739         TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
7740         TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
7741         TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
7742         TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
7743         TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
7744
7745         TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
7746         TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
7747         TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
7748         TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
7749         TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
7750         TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
7751         TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
7752         TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
7753         TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
7754         TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
7755         TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
7756         TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
7757         TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
7758         TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
7759
7760         TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
7761         TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
7762         TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
7763 }
7764
7765 static void tg3_timer(unsigned long __opaque)
7766 {
7767         struct tg3 *tp = (struct tg3 *) __opaque;
7768
7769         if (tp->irq_sync)
7770                 goto restart_timer;
7771
7772         spin_lock(&tp->lock);
7773
7774         if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) {
7775                 /* All of this garbage is because when using non-tagged
7776                  * IRQ status the mailbox/status_block protocol the chip
7777                  * uses with the cpu is race prone.
7778                  */
7779                 if (tp->hw_status->status & SD_STATUS_UPDATED) {
7780                         tw32(GRC_LOCAL_CTRL,
7781                              tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
7782                 } else {
7783                         tw32(HOSTCC_MODE, tp->coalesce_mode |
7784                              (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
7785                 }
7786
7787                 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
7788                         tp->tg3_flags2 |= TG3_FLG2_RESTART_TIMER;
7789                         spin_unlock(&tp->lock);
7790                         schedule_work(&tp->reset_task);
7791                         return;
7792                 }
7793         }
7794
7795         /* This part only runs once per second. */
7796         if (!--tp->timer_counter) {
7797                 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
7798                         tg3_periodic_fetch_stats(tp);
7799
7800                 if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
7801                         u32 mac_stat;
7802                         int phy_event;
7803
7804                         mac_stat = tr32(MAC_STATUS);
7805
7806                         phy_event = 0;
7807                         if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) {
7808                                 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
7809                                         phy_event = 1;
7810                         } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
7811                                 phy_event = 1;
7812
7813                         if (phy_event)
7814                                 tg3_setup_phy(tp, 0);
7815                 } else if (tp->tg3_flags & TG3_FLAG_POLL_SERDES) {
7816                         u32 mac_stat = tr32(MAC_STATUS);
7817                         int need_setup = 0;
7818
7819                         if (netif_carrier_ok(tp->dev) &&
7820                             (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
7821                                 need_setup = 1;
7822                         }
7823                         if (! netif_carrier_ok(tp->dev) &&
7824                             (mac_stat & (MAC_STATUS_PCS_SYNCED |
7825                                          MAC_STATUS_SIGNAL_DET))) {
7826                                 need_setup = 1;
7827                         }
7828                         if (need_setup) {
7829                                 if (!tp->serdes_counter) {
7830                                         tw32_f(MAC_MODE,
7831                                              (tp->mac_mode &
7832                                               ~MAC_MODE_PORT_MODE_MASK));
7833                                         udelay(40);
7834                                         tw32_f(MAC_MODE, tp->mac_mode);
7835                                         udelay(40);
7836                                 }
7837                                 tg3_setup_phy(tp, 0);
7838                         }
7839                 } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
7840                         tg3_serdes_parallel_detect(tp);
7841
7842                 tp->timer_counter = tp->timer_multiplier;
7843         }
7844
7845         /* Heartbeat is only sent once every 2 seconds.
7846          *
7847          * The heartbeat is to tell the ASF firmware that the host
7848          * driver is still alive.  In the event that the OS crashes,
7849          * ASF needs to reset the hardware to free up the FIFO space
7850          * that may be filled with rx packets destined for the host.
7851          * If the FIFO is full, ASF will no longer function properly.
7852          *
7853          * Unintended resets have been reported on real time kernels
7854          * where the timer doesn't run on time.  Netpoll will also have
7855          * same problem.
7856          *
7857          * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
7858          * to check the ring condition when the heartbeat is expiring
7859          * before doing the reset.  This will prevent most unintended
7860          * resets.
7861          */
7862         if (!--tp->asf_counter) {
7863                 if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) &&
7864                     !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)) {
7865                         tg3_wait_for_event_ack(tp);
7866
7867                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
7868                                       FWCMD_NICDRV_ALIVE3);
7869                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
7870                         /* 5 seconds timeout */
7871                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, 5);
7872
7873                         tg3_generate_fw_event(tp);
7874                 }
7875                 tp->asf_counter = tp->asf_multiplier;
7876         }
7877
7878         spin_unlock(&tp->lock);
7879
7880 restart_timer:
7881         tp->timer.expires = jiffies + tp->timer_offset;
7882         add_timer(&tp->timer);
7883 }
7884
7885 static int tg3_request_irq(struct tg3 *tp)
7886 {
7887         irq_handler_t fn;
7888         unsigned long flags;
7889         struct net_device *dev = tp->dev;
7890
7891         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7892                 fn = tg3_msi;
7893                 if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI)
7894                         fn = tg3_msi_1shot;
7895                 flags = IRQF_SAMPLE_RANDOM;
7896         } else {
7897                 fn = tg3_interrupt;
7898                 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
7899                         fn = tg3_interrupt_tagged;
7900                 flags = IRQF_SHARED | IRQF_SAMPLE_RANDOM;
7901         }
7902         return (request_irq(tp->pdev->irq, fn, flags, dev->name, dev));
7903 }
7904
7905 static int tg3_test_interrupt(struct tg3 *tp)
7906 {
7907         struct net_device *dev = tp->dev;
7908         int err, i, intr_ok = 0;
7909
7910         if (!netif_running(dev))
7911                 return -ENODEV;
7912
7913         tg3_disable_ints(tp);
7914
7915         free_irq(tp->pdev->irq, dev);
7916
7917         err = request_irq(tp->pdev->irq, tg3_test_isr,
7918                           IRQF_SHARED | IRQF_SAMPLE_RANDOM, dev->name, dev);
7919         if (err)
7920                 return err;
7921
7922         tp->hw_status->status &= ~SD_STATUS_UPDATED;
7923         tg3_enable_ints(tp);
7924
7925         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
7926                HOSTCC_MODE_NOW);
7927
7928         for (i = 0; i < 5; i++) {
7929                 u32 int_mbox, misc_host_ctrl;
7930
7931                 int_mbox = tr32_mailbox(MAILBOX_INTERRUPT_0 +
7932                                         TG3_64BIT_REG_LOW);
7933                 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
7934
7935                 if ((int_mbox != 0) ||
7936                     (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) {
7937                         intr_ok = 1;
7938                         break;
7939                 }
7940
7941                 msleep(10);
7942         }
7943
7944         tg3_disable_ints(tp);
7945
7946         free_irq(tp->pdev->irq, dev);
7947
7948         err = tg3_request_irq(tp);
7949
7950         if (err)
7951                 return err;
7952
7953         if (intr_ok)
7954                 return 0;
7955
7956         return -EIO;
7957 }
7958
7959 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
7960  * successfully restored
7961  */
7962 static int tg3_test_msi(struct tg3 *tp)
7963 {
7964         struct net_device *dev = tp->dev;
7965         int err;
7966         u16 pci_cmd;
7967
7968         if (!(tp->tg3_flags2 & TG3_FLG2_USING_MSI))
7969                 return 0;
7970
7971         /* Turn off SERR reporting in case MSI terminates with Master
7972          * Abort.
7973          */
7974         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
7975         pci_write_config_word(tp->pdev, PCI_COMMAND,
7976                               pci_cmd & ~PCI_COMMAND_SERR);
7977
7978         err = tg3_test_interrupt(tp);
7979
7980         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
7981
7982         if (!err)
7983                 return 0;
7984
7985         /* other failures */
7986         if (err != -EIO)
7987                 return err;
7988
7989         /* MSI test failed, go back to INTx mode */
7990         printk(KERN_WARNING PFX "%s: No interrupt was generated using MSI, "
7991                "switching to INTx mode. Please report this failure to "
7992                "the PCI maintainer and include system chipset information.\n",
7993                        tp->dev->name);
7994
7995         free_irq(tp->pdev->irq, dev);
7996         pci_disable_msi(tp->pdev);
7997
7998         tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
7999
8000         err = tg3_request_irq(tp);
8001         if (err)
8002                 return err;
8003
8004         /* Need to reset the chip because the MSI cycle may have terminated
8005          * with Master Abort.
8006          */
8007         tg3_full_lock(tp, 1);
8008
8009         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
8010         err = tg3_init_hw(tp, 1);
8011
8012         tg3_full_unlock(tp);
8013
8014         if (err)
8015                 free_irq(tp->pdev->irq, dev);
8016
8017         return err;
8018 }
8019
8020 static int tg3_open(struct net_device *dev)
8021 {
8022         struct tg3 *tp = netdev_priv(dev);
8023         int err;
8024
8025         netif_carrier_off(tp->dev);
8026
8027         err = tg3_set_power_state(tp, PCI_D0);
8028         if (err)
8029                 return err;
8030
8031         tg3_full_lock(tp, 0);
8032
8033         tg3_disable_ints(tp);
8034         tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
8035
8036         tg3_full_unlock(tp);
8037
8038         /* The placement of this call is tied
8039          * to the setup and use of Host TX descriptors.
8040          */
8041         err = tg3_alloc_consistent(tp);
8042         if (err)
8043                 return err;
8044
8045         if (tp->tg3_flags & TG3_FLAG_SUPPORT_MSI) {
8046                 /* All MSI supporting chips should support tagged
8047                  * status.  Assert that this is the case.
8048                  */
8049                 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) {
8050                         printk(KERN_WARNING PFX "%s: MSI without TAGGED? "
8051                                "Not using MSI.\n", tp->dev->name);
8052                 } else if (pci_enable_msi(tp->pdev) == 0) {
8053                         u32 msi_mode;
8054
8055                         msi_mode = tr32(MSGINT_MODE);
8056                         tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
8057                         tp->tg3_flags2 |= TG3_FLG2_USING_MSI;
8058                 }
8059         }
8060         err = tg3_request_irq(tp);
8061
8062         if (err) {
8063                 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
8064                         pci_disable_msi(tp->pdev);
8065                         tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
8066                 }
8067                 tg3_free_consistent(tp);
8068                 return err;
8069         }
8070
8071         napi_enable(&tp->napi);
8072
8073         tg3_full_lock(tp, 0);
8074
8075         err = tg3_init_hw(tp, 1);
8076         if (err) {
8077                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
8078                 tg3_free_rings(tp);
8079         } else {
8080                 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
8081                         tp->timer_offset = HZ;
8082                 else
8083                         tp->timer_offset = HZ / 10;
8084
8085                 BUG_ON(tp->timer_offset > HZ);
8086                 tp->timer_counter = tp->timer_multiplier =
8087                         (HZ / tp->timer_offset);
8088                 tp->asf_counter = tp->asf_multiplier =
8089                         ((HZ / tp->timer_offset) * 2);
8090
8091                 init_timer(&tp->timer);
8092                 tp->timer.expires = jiffies + tp->timer_offset;
8093                 tp->timer.data = (unsigned long) tp;
8094                 tp->timer.function = tg3_timer;
8095         }
8096
8097         tg3_full_unlock(tp);
8098
8099         if (err) {
8100                 napi_disable(&tp->napi);
8101                 free_irq(tp->pdev->irq, dev);
8102                 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
8103                         pci_disable_msi(tp->pdev);
8104                         tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
8105                 }
8106                 tg3_free_consistent(tp);
8107                 return err;
8108         }
8109
8110         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
8111                 err = tg3_test_msi(tp);
8112
8113                 if (err) {
8114                         tg3_full_lock(tp, 0);
8115
8116                         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
8117                                 pci_disable_msi(tp->pdev);
8118                                 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
8119                         }
8120                         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
8121                         tg3_free_rings(tp);
8122                         tg3_free_consistent(tp);
8123
8124                         tg3_full_unlock(tp);
8125
8126                         napi_disable(&tp->napi);
8127
8128                         return err;
8129                 }
8130
8131                 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
8132                         if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI) {
8133                                 u32 val = tr32(PCIE_TRANSACTION_CFG);
8134
8135                                 tw32(PCIE_TRANSACTION_CFG,
8136                                      val | PCIE_TRANS_CFG_1SHOT_MSI);
8137                         }
8138                 }
8139         }
8140
8141         tg3_phy_start(tp);
8142
8143         tg3_full_lock(tp, 0);
8144
8145         add_timer(&tp->timer);
8146         tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
8147         tg3_enable_ints(tp);
8148
8149         tg3_full_unlock(tp);
8150
8151         netif_start_queue(dev);
8152
8153         return 0;
8154 }
8155
8156 #if 0
8157 /*static*/ void tg3_dump_state(struct tg3 *tp)
8158 {
8159         u32 val32, val32_2, val32_3, val32_4, val32_5;
8160         u16 val16;
8161         int i;
8162
8163         pci_read_config_word(tp->pdev, PCI_STATUS, &val16);
8164         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE, &val32);
8165         printk("DEBUG: PCI status [%04x] TG3PCI state[%08x]\n",
8166                val16, val32);
8167
8168         /* MAC block */
8169         printk("DEBUG: MAC_MODE[%08x] MAC_STATUS[%08x]\n",
8170                tr32(MAC_MODE), tr32(MAC_STATUS));
8171         printk("       MAC_EVENT[%08x] MAC_LED_CTRL[%08x]\n",
8172                tr32(MAC_EVENT), tr32(MAC_LED_CTRL));
8173         printk("DEBUG: MAC_TX_MODE[%08x] MAC_TX_STATUS[%08x]\n",
8174                tr32(MAC_TX_MODE), tr32(MAC_TX_STATUS));
8175         printk("       MAC_RX_MODE[%08x] MAC_RX_STATUS[%08x]\n",
8176                tr32(MAC_RX_MODE), tr32(MAC_RX_STATUS));
8177
8178         /* Send data initiator control block */
8179         printk("DEBUG: SNDDATAI_MODE[%08x] SNDDATAI_STATUS[%08x]\n",
8180                tr32(SNDDATAI_MODE), tr32(SNDDATAI_STATUS));
8181         printk("       SNDDATAI_STATSCTRL[%08x]\n",
8182                tr32(SNDDATAI_STATSCTRL));
8183
8184         /* Send data completion control block */
8185         printk("DEBUG: SNDDATAC_MODE[%08x]\n", tr32(SNDDATAC_MODE));
8186
8187         /* Send BD ring selector block */
8188         printk("DEBUG: SNDBDS_MODE[%08x] SNDBDS_STATUS[%08x]\n",
8189                tr32(SNDBDS_MODE), tr32(SNDBDS_STATUS));
8190
8191         /* Send BD initiator control block */
8192         printk("DEBUG: SNDBDI_MODE[%08x] SNDBDI_STATUS[%08x]\n",
8193                tr32(SNDBDI_MODE), tr32(SNDBDI_STATUS));
8194
8195         /* Send BD completion control block */
8196         printk("DEBUG: SNDBDC_MODE[%08x]\n", tr32(SNDBDC_MODE));
8197
8198         /* Receive list placement control block */
8199         printk("DEBUG: RCVLPC_MODE[%08x] RCVLPC_STATUS[%08x]\n",
8200                tr32(RCVLPC_MODE), tr32(RCVLPC_STATUS));
8201         printk("       RCVLPC_STATSCTRL[%08x]\n",
8202                tr32(RCVLPC_STATSCTRL));
8203
8204         /* Receive data and receive BD initiator control block */
8205         printk("DEBUG: RCVDBDI_MODE[%08x] RCVDBDI_STATUS[%08x]\n",
8206                tr32(RCVDBDI_MODE), tr32(RCVDBDI_STATUS));
8207
8208         /* Receive data completion control block */
8209         printk("DEBUG: RCVDCC_MODE[%08x]\n",
8210                tr32(RCVDCC_MODE));
8211
8212         /* Receive BD initiator control block */
8213         printk("DEBUG: RCVBDI_MODE[%08x] RCVBDI_STATUS[%08x]\n",
8214                tr32(RCVBDI_MODE), tr32(RCVBDI_STATUS));
8215
8216         /* Receive BD completion control block */
8217         printk("DEBUG: RCVCC_MODE[%08x] RCVCC_STATUS[%08x]\n",
8218                tr32(RCVCC_MODE), tr32(RCVCC_STATUS));
8219
8220         /* Receive list selector control block */
8221         printk("DEBUG: RCVLSC_MODE[%08x] RCVLSC_STATUS[%08x]\n",
8222                tr32(RCVLSC_MODE), tr32(RCVLSC_STATUS));
8223
8224         /* Mbuf cluster free block */
8225         printk("DEBUG: MBFREE_MODE[%08x] MBFREE_STATUS[%08x]\n",
8226                tr32(MBFREE_MODE), tr32(MBFREE_STATUS));
8227
8228         /* Host coalescing control block */
8229         printk("DEBUG: HOSTCC_MODE[%08x] HOSTCC_STATUS[%08x]\n",
8230                tr32(HOSTCC_MODE), tr32(HOSTCC_STATUS));
8231         printk("DEBUG: HOSTCC_STATS_BLK_HOST_ADDR[%08x%08x]\n",
8232                tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
8233                tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
8234         printk("DEBUG: HOSTCC_STATUS_BLK_HOST_ADDR[%08x%08x]\n",
8235                tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
8236                tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
8237         printk("DEBUG: HOSTCC_STATS_BLK_NIC_ADDR[%08x]\n",
8238                tr32(HOSTCC_STATS_BLK_NIC_ADDR));
8239         printk("DEBUG: HOSTCC_STATUS_BLK_NIC_ADDR[%08x]\n",
8240                tr32(HOSTCC_STATUS_BLK_NIC_ADDR));
8241
8242         /* Memory arbiter control block */
8243         printk("DEBUG: MEMARB_MODE[%08x] MEMARB_STATUS[%08x]\n",
8244                tr32(MEMARB_MODE), tr32(MEMARB_STATUS));
8245
8246         /* Buffer manager control block */
8247         printk("DEBUG: BUFMGR_MODE[%08x] BUFMGR_STATUS[%08x]\n",
8248                tr32(BUFMGR_MODE), tr32(BUFMGR_STATUS));
8249         printk("DEBUG: BUFMGR_MB_POOL_ADDR[%08x] BUFMGR_MB_POOL_SIZE[%08x]\n",
8250                tr32(BUFMGR_MB_POOL_ADDR), tr32(BUFMGR_MB_POOL_SIZE));
8251         printk("DEBUG: BUFMGR_DMA_DESC_POOL_ADDR[%08x] "
8252                "BUFMGR_DMA_DESC_POOL_SIZE[%08x]\n",
8253                tr32(BUFMGR_DMA_DESC_POOL_ADDR),
8254                tr32(BUFMGR_DMA_DESC_POOL_SIZE));
8255
8256         /* Read DMA control block */
8257         printk("DEBUG: RDMAC_MODE[%08x] RDMAC_STATUS[%08x]\n",
8258                tr32(RDMAC_MODE), tr32(RDMAC_STATUS));
8259
8260         /* Write DMA control block */
8261         printk("DEBUG: WDMAC_MODE[%08x] WDMAC_STATUS[%08x]\n",
8262                tr32(WDMAC_MODE), tr32(WDMAC_STATUS));
8263
8264         /* DMA completion block */
8265         printk("DEBUG: DMAC_MODE[%08x]\n",
8266                tr32(DMAC_MODE));
8267
8268         /* GRC block */
8269         printk("DEBUG: GRC_MODE[%08x] GRC_MISC_CFG[%08x]\n",
8270                tr32(GRC_MODE), tr32(GRC_MISC_CFG));
8271         printk("DEBUG: GRC_LOCAL_CTRL[%08x]\n",
8272                tr32(GRC_LOCAL_CTRL));
8273
8274         /* TG3_BDINFOs */
8275         printk("DEBUG: RCVDBDI_JUMBO_BD[%08x%08x:%08x:%08x]\n",
8276                tr32(RCVDBDI_JUMBO_BD + 0x0),
8277                tr32(RCVDBDI_JUMBO_BD + 0x4),
8278                tr32(RCVDBDI_JUMBO_BD + 0x8),
8279                tr32(RCVDBDI_JUMBO_BD + 0xc));
8280         printk("DEBUG: RCVDBDI_STD_BD[%08x%08x:%08x:%08x]\n",
8281                tr32(RCVDBDI_STD_BD + 0x0),
8282                tr32(RCVDBDI_STD_BD + 0x4),
8283                tr32(RCVDBDI_STD_BD + 0x8),
8284                tr32(RCVDBDI_STD_BD + 0xc));
8285         printk("DEBUG: RCVDBDI_MINI_BD[%08x%08x:%08x:%08x]\n",
8286                tr32(RCVDBDI_MINI_BD + 0x0),
8287                tr32(RCVDBDI_MINI_BD + 0x4),
8288                tr32(RCVDBDI_MINI_BD + 0x8),
8289                tr32(RCVDBDI_MINI_BD + 0xc));
8290
8291         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x0, &val32);
8292         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x4, &val32_2);
8293         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x8, &val32_3);
8294         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0xc, &val32_4);
8295         printk("DEBUG: SRAM_SEND_RCB_0[%08x%08x:%08x:%08x]\n",
8296                val32, val32_2, val32_3, val32_4);
8297
8298         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x0, &val32);
8299         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x4, &val32_2);
8300         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x8, &val32_3);
8301         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0xc, &val32_4);
8302         printk("DEBUG: SRAM_RCV_RET_RCB_0[%08x%08x:%08x:%08x]\n",
8303                val32, val32_2, val32_3, val32_4);
8304
8305         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x0, &val32);
8306         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x4, &val32_2);
8307         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x8, &val32_3);
8308         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0xc, &val32_4);
8309         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x10, &val32_5);
8310         printk("DEBUG: SRAM_STATUS_BLK[%08x:%08x:%08x:%08x:%08x]\n",
8311                val32, val32_2, val32_3, val32_4, val32_5);
8312
8313         /* SW status block */
8314         printk("DEBUG: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
8315                tp->hw_status->status,
8316                tp->hw_status->status_tag,
8317                tp->hw_status->rx_jumbo_consumer,
8318                tp->hw_status->rx_consumer,
8319                tp->hw_status->rx_mini_consumer,
8320                tp->hw_status->idx[0].rx_producer,
8321                tp->hw_status->idx[0].tx_consumer);
8322
8323         /* SW statistics block */
8324         printk("DEBUG: Host statistics block [%08x:%08x:%08x:%08x]\n",
8325                ((u32 *)tp->hw_stats)[0],
8326                ((u32 *)tp->hw_stats)[1],
8327                ((u32 *)tp->hw_stats)[2],
8328                ((u32 *)tp->hw_stats)[3]);
8329
8330         /* Mailboxes */
8331         printk("DEBUG: SNDHOST_PROD[%08x%08x] SNDNIC_PROD[%08x%08x]\n",
8332                tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + 0x0),
8333                tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + 0x4),
8334                tr32_mailbox(MAILBOX_SNDNIC_PROD_IDX_0 + 0x0),
8335                tr32_mailbox(MAILBOX_SNDNIC_PROD_IDX_0 + 0x4));
8336
8337         /* NIC side send descriptors. */
8338         for (i = 0; i < 6; i++) {
8339                 unsigned long txd;
8340
8341                 txd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_TX_BUFFER_DESC
8342                         + (i * sizeof(struct tg3_tx_buffer_desc));
8343                 printk("DEBUG: NIC TXD(%d)[%08x:%08x:%08x:%08x]\n",
8344                        i,
8345                        readl(txd + 0x0), readl(txd + 0x4),
8346                        readl(txd + 0x8), readl(txd + 0xc));
8347         }
8348
8349         /* NIC side RX descriptors. */
8350         for (i = 0; i < 6; i++) {
8351                 unsigned long rxd;
8352
8353                 rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_BUFFER_DESC
8354                         + (i * sizeof(struct tg3_rx_buffer_desc));
8355                 printk("DEBUG: NIC RXD_STD(%d)[0][%08x:%08x:%08x:%08x]\n",
8356                        i,
8357                        readl(rxd + 0x0), readl(rxd + 0x4),
8358                        readl(rxd + 0x8), readl(rxd + 0xc));
8359                 rxd += (4 * sizeof(u32));
8360                 printk("DEBUG: NIC RXD_STD(%d)[1][%08x:%08x:%08x:%08x]\n",
8361                        i,
8362                        readl(rxd + 0x0), readl(rxd + 0x4),
8363                        readl(rxd + 0x8), readl(rxd + 0xc));
8364         }
8365
8366         for (i = 0; i < 6; i++) {
8367                 unsigned long rxd;
8368
8369                 rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_JUMBO_BUFFER_DESC
8370                         + (i * sizeof(struct tg3_rx_buffer_desc));
8371                 printk("DEBUG: NIC RXD_JUMBO(%d)[0][%08x:%08x:%08x:%08x]\n",
8372                        i,
8373                        readl(rxd + 0x0), readl(rxd + 0x4),
8374                        readl(rxd + 0x8), readl(rxd + 0xc));
8375                 rxd += (4 * sizeof(u32));
8376                 printk("DEBUG: NIC RXD_JUMBO(%d)[1][%08x:%08x:%08x:%08x]\n",
8377                        i,
8378                        readl(rxd + 0x0), readl(rxd + 0x4),
8379                        readl(rxd + 0x8), readl(rxd + 0xc));
8380         }
8381 }
8382 #endif
8383
8384 static struct net_device_stats *tg3_get_stats(struct net_device *);
8385 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *);
8386
8387 static int tg3_close(struct net_device *dev)
8388 {
8389         struct tg3 *tp = netdev_priv(dev);
8390
8391         napi_disable(&tp->napi);
8392         cancel_work_sync(&tp->reset_task);
8393
8394         netif_stop_queue(dev);
8395
8396         del_timer_sync(&tp->timer);
8397
8398         tg3_full_lock(tp, 1);
8399 #if 0
8400         tg3_dump_state(tp);
8401 #endif
8402
8403         tg3_disable_ints(tp);
8404
8405         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
8406         tg3_free_rings(tp);
8407         tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
8408
8409         tg3_full_unlock(tp);
8410
8411         free_irq(tp->pdev->irq, dev);
8412         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
8413                 pci_disable_msi(tp->pdev);
8414                 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
8415         }
8416
8417         memcpy(&tp->net_stats_prev, tg3_get_stats(tp->dev),
8418                sizeof(tp->net_stats_prev));
8419         memcpy(&tp->estats_prev, tg3_get_estats(tp),
8420                sizeof(tp->estats_prev));
8421
8422         tg3_free_consistent(tp);
8423
8424         tg3_set_power_state(tp, PCI_D3hot);
8425
8426         netif_carrier_off(tp->dev);
8427
8428         return 0;
8429 }
8430
8431 static inline unsigned long get_stat64(tg3_stat64_t *val)
8432 {
8433         unsigned long ret;
8434
8435 #if (BITS_PER_LONG == 32)
8436         ret = val->low;
8437 #else
8438         ret = ((u64)val->high << 32) | ((u64)val->low);
8439 #endif
8440         return ret;
8441 }
8442
8443 static inline u64 get_estat64(tg3_stat64_t *val)
8444 {
8445        return ((u64)val->high << 32) | ((u64)val->low);
8446 }
8447
8448 static unsigned long calc_crc_errors(struct tg3 *tp)
8449 {
8450         struct tg3_hw_stats *hw_stats = tp->hw_stats;
8451
8452         if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
8453             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
8454              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
8455                 u32 val;
8456
8457                 spin_lock_bh(&tp->lock);
8458                 if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) {
8459                         tg3_writephy(tp, MII_TG3_TEST1,
8460                                      val | MII_TG3_TEST1_CRC_EN);
8461                         tg3_readphy(tp, 0x14, &val);
8462                 } else
8463                         val = 0;
8464                 spin_unlock_bh(&tp->lock);
8465
8466                 tp->phy_crc_errors += val;
8467
8468                 return tp->phy_crc_errors;
8469         }
8470
8471         return get_stat64(&hw_stats->rx_fcs_errors);
8472 }
8473
8474 #define ESTAT_ADD(member) \
8475         estats->member =        old_estats->member + \
8476                                 get_estat64(&hw_stats->member)
8477
8478 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *tp)
8479 {
8480         struct tg3_ethtool_stats *estats = &tp->estats;
8481         struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
8482         struct tg3_hw_stats *hw_stats = tp->hw_stats;
8483
8484         if (!hw_stats)
8485                 return old_estats;
8486
8487         ESTAT_ADD(rx_octets);
8488         ESTAT_ADD(rx_fragments);
8489         ESTAT_ADD(rx_ucast_packets);
8490         ESTAT_ADD(rx_mcast_packets);
8491         ESTAT_ADD(rx_bcast_packets);
8492         ESTAT_ADD(rx_fcs_errors);
8493         ESTAT_ADD(rx_align_errors);
8494         ESTAT_ADD(rx_xon_pause_rcvd);
8495         ESTAT_ADD(rx_xoff_pause_rcvd);
8496         ESTAT_ADD(rx_mac_ctrl_rcvd);
8497         ESTAT_ADD(rx_xoff_entered);
8498         ESTAT_ADD(rx_frame_too_long_errors);
8499         ESTAT_ADD(rx_jabbers);
8500         ESTAT_ADD(rx_undersize_packets);
8501         ESTAT_ADD(rx_in_length_errors);
8502         ESTAT_ADD(rx_out_length_errors);
8503         ESTAT_ADD(rx_64_or_less_octet_packets);
8504         ESTAT_ADD(rx_65_to_127_octet_packets);
8505         ESTAT_ADD(rx_128_to_255_octet_packets);
8506         ESTAT_ADD(rx_256_to_511_octet_packets);
8507         ESTAT_ADD(rx_512_to_1023_octet_packets);
8508         ESTAT_ADD(rx_1024_to_1522_octet_packets);
8509         ESTAT_ADD(rx_1523_to_2047_octet_packets);
8510         ESTAT_ADD(rx_2048_to_4095_octet_packets);
8511         ESTAT_ADD(rx_4096_to_8191_octet_packets);
8512         ESTAT_ADD(rx_8192_to_9022_octet_packets);
8513
8514         ESTAT_ADD(tx_octets);
8515         ESTAT_ADD(tx_collisions);
8516         ESTAT_ADD(tx_xon_sent);
8517         ESTAT_ADD(tx_xoff_sent);
8518         ESTAT_ADD(tx_flow_control);
8519         ESTAT_ADD(tx_mac_errors);
8520         ESTAT_ADD(tx_single_collisions);
8521         ESTAT_ADD(tx_mult_collisions);
8522         ESTAT_ADD(tx_deferred);
8523         ESTAT_ADD(tx_excessive_collisions);
8524         ESTAT_ADD(tx_late_collisions);
8525         ESTAT_ADD(tx_collide_2times);
8526         ESTAT_ADD(tx_collide_3times);
8527         ESTAT_ADD(tx_collide_4times);
8528         ESTAT_ADD(tx_collide_5times);
8529         ESTAT_ADD(tx_collide_6times);
8530         ESTAT_ADD(tx_collide_7times);
8531         ESTAT_ADD(tx_collide_8times);
8532         ESTAT_ADD(tx_collide_9times);
8533         ESTAT_ADD(tx_collide_10times);
8534         ESTAT_ADD(tx_collide_11times);
8535         ESTAT_ADD(tx_collide_12times);
8536         ESTAT_ADD(tx_collide_13times);
8537         ESTAT_ADD(tx_collide_14times);
8538         ESTAT_ADD(tx_collide_15times);
8539         ESTAT_ADD(tx_ucast_packets);
8540         ESTAT_ADD(tx_mcast_packets);
8541         ESTAT_ADD(tx_bcast_packets);
8542         ESTAT_ADD(tx_carrier_sense_errors);
8543         ESTAT_ADD(tx_discards);
8544         ESTAT_ADD(tx_errors);
8545
8546         ESTAT_ADD(dma_writeq_full);
8547         ESTAT_ADD(dma_write_prioq_full);
8548         ESTAT_ADD(rxbds_empty);
8549         ESTAT_ADD(rx_discards);
8550         ESTAT_ADD(rx_errors);
8551         ESTAT_ADD(rx_threshold_hit);
8552
8553         ESTAT_ADD(dma_readq_full);
8554         ESTAT_ADD(dma_read_prioq_full);
8555         ESTAT_ADD(tx_comp_queue_full);
8556
8557         ESTAT_ADD(ring_set_send_prod_index);
8558         ESTAT_ADD(ring_status_update);
8559         ESTAT_ADD(nic_irqs);
8560         ESTAT_ADD(nic_avoided_irqs);
8561         ESTAT_ADD(nic_tx_threshold_hit);
8562
8563         return estats;
8564 }
8565
8566 static struct net_device_stats *tg3_get_stats(struct net_device *dev)
8567 {
8568         struct tg3 *tp = netdev_priv(dev);
8569         struct net_device_stats *stats = &tp->net_stats;
8570         struct net_device_stats *old_stats = &tp->net_stats_prev;
8571         struct tg3_hw_stats *hw_stats = tp->hw_stats;
8572
8573         if (!hw_stats)
8574                 return old_stats;
8575
8576         stats->rx_packets = old_stats->rx_packets +
8577                 get_stat64(&hw_stats->rx_ucast_packets) +
8578                 get_stat64(&hw_stats->rx_mcast_packets) +
8579                 get_stat64(&hw_stats->rx_bcast_packets);
8580
8581         stats->tx_packets = old_stats->tx_packets +
8582                 get_stat64(&hw_stats->tx_ucast_packets) +
8583                 get_stat64(&hw_stats->tx_mcast_packets) +
8584                 get_stat64(&hw_stats->tx_bcast_packets);
8585
8586         stats->rx_bytes = old_stats->rx_bytes +
8587                 get_stat64(&hw_stats->rx_octets);
8588         stats->tx_bytes = old_stats->tx_bytes +
8589                 get_stat64(&hw_stats->tx_octets);
8590
8591         stats->rx_errors = old_stats->rx_errors +
8592                 get_stat64(&hw_stats->rx_errors);
8593         stats->tx_errors = old_stats->tx_errors +
8594                 get_stat64(&hw_stats->tx_errors) +
8595                 get_stat64(&hw_stats->tx_mac_errors) +
8596                 get_stat64(&hw_stats->tx_carrier_sense_errors) +
8597                 get_stat64(&hw_stats->tx_discards);
8598
8599         stats->multicast = old_stats->multicast +
8600                 get_stat64(&hw_stats->rx_mcast_packets);
8601         stats->collisions = old_stats->collisions +
8602                 get_stat64(&hw_stats->tx_collisions);
8603
8604         stats->rx_length_errors = old_stats->rx_length_errors +
8605                 get_stat64(&hw_stats->rx_frame_too_long_errors) +
8606                 get_stat64(&hw_stats->rx_undersize_packets);
8607
8608         stats->rx_over_errors = old_stats->rx_over_errors +
8609                 get_stat64(&hw_stats->rxbds_empty);
8610         stats->rx_frame_errors = old_stats->rx_frame_errors +
8611                 get_stat64(&hw_stats->rx_align_errors);
8612         stats->tx_aborted_errors = old_stats->tx_aborted_errors +
8613                 get_stat64(&hw_stats->tx_discards);
8614         stats->tx_carrier_errors = old_stats->tx_carrier_errors +
8615                 get_stat64(&hw_stats->tx_carrier_sense_errors);
8616
8617         stats->rx_crc_errors = old_stats->rx_crc_errors +
8618                 calc_crc_errors(tp);
8619
8620         stats->rx_missed_errors = old_stats->rx_missed_errors +
8621                 get_stat64(&hw_stats->rx_discards);
8622
8623         return stats;
8624 }
8625
8626 static inline u32 calc_crc(unsigned char *buf, int len)
8627 {
8628         u32 reg;
8629         u32 tmp;
8630         int j, k;
8631
8632         reg = 0xffffffff;
8633
8634         for (j = 0; j < len; j++) {
8635                 reg ^= buf[j];
8636
8637                 for (k = 0; k < 8; k++) {
8638                         tmp = reg & 0x01;
8639
8640                         reg >>= 1;
8641
8642                         if (tmp) {
8643                                 reg ^= 0xedb88320;
8644                         }
8645                 }
8646         }
8647
8648         return ~reg;
8649 }
8650
8651 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
8652 {
8653         /* accept or reject all multicast frames */
8654         tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
8655         tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
8656         tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
8657         tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
8658 }
8659
8660 static void __tg3_set_rx_mode(struct net_device *dev)
8661 {
8662         struct tg3 *tp = netdev_priv(dev);
8663         u32 rx_mode;
8664
8665         rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
8666                                   RX_MODE_KEEP_VLAN_TAG);
8667
8668         /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
8669          * flag clear.
8670          */
8671 #if TG3_VLAN_TAG_USED
8672         if (!tp->vlgrp &&
8673             !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
8674                 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
8675 #else
8676         /* By definition, VLAN is disabled always in this
8677          * case.
8678          */
8679         if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
8680                 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
8681 #endif
8682
8683         if (dev->flags & IFF_PROMISC) {
8684                 /* Promiscuous mode. */
8685                 rx_mode |= RX_MODE_PROMISC;
8686         } else if (dev->flags & IFF_ALLMULTI) {
8687                 /* Accept all multicast. */
8688                 tg3_set_multi (tp, 1);
8689         } else if (dev->mc_count < 1) {
8690                 /* Reject all multicast. */
8691                 tg3_set_multi (tp, 0);
8692         } else {
8693                 /* Accept one or more multicast(s). */
8694                 struct dev_mc_list *mclist;
8695                 unsigned int i;
8696                 u32 mc_filter[4] = { 0, };
8697                 u32 regidx;
8698                 u32 bit;
8699                 u32 crc;
8700
8701                 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
8702                      i++, mclist = mclist->next) {
8703
8704                         crc = calc_crc (mclist->dmi_addr, ETH_ALEN);
8705                         bit = ~crc & 0x7f;
8706                         regidx = (bit & 0x60) >> 5;
8707                         bit &= 0x1f;
8708                         mc_filter[regidx] |= (1 << bit);
8709                 }
8710
8711                 tw32(MAC_HASH_REG_0, mc_filter[0]);
8712                 tw32(MAC_HASH_REG_1, mc_filter[1]);
8713                 tw32(MAC_HASH_REG_2, mc_filter[2]);
8714                 tw32(MAC_HASH_REG_3, mc_filter[3]);
8715         }
8716
8717         if (rx_mode != tp->rx_mode) {
8718                 tp->rx_mode = rx_mode;
8719                 tw32_f(MAC_RX_MODE, rx_mode);
8720                 udelay(10);
8721         }
8722 }
8723
8724 static void tg3_set_rx_mode(struct net_device *dev)
8725 {
8726         struct tg3 *tp = netdev_priv(dev);
8727
8728         if (!netif_running(dev))
8729                 return;
8730
8731         tg3_full_lock(tp, 0);
8732         __tg3_set_rx_mode(dev);
8733         tg3_full_unlock(tp);
8734 }
8735
8736 #define TG3_REGDUMP_LEN         (32 * 1024)
8737
8738 static int tg3_get_regs_len(struct net_device *dev)
8739 {
8740         return TG3_REGDUMP_LEN;
8741 }
8742
8743 static void tg3_get_regs(struct net_device *dev,
8744                 struct ethtool_regs *regs, void *_p)
8745 {
8746         u32 *p = _p;
8747         struct tg3 *tp = netdev_priv(dev);
8748         u8 *orig_p = _p;
8749         int i;
8750
8751         regs->version = 0;
8752
8753         memset(p, 0, TG3_REGDUMP_LEN);
8754
8755         if (tp->link_config.phy_is_low_power)
8756                 return;
8757
8758         tg3_full_lock(tp, 0);
8759
8760 #define __GET_REG32(reg)        (*(p)++ = tr32(reg))
8761 #define GET_REG32_LOOP(base,len)                \
8762 do {    p = (u32 *)(orig_p + (base));           \
8763         for (i = 0; i < len; i += 4)            \
8764                 __GET_REG32((base) + i);        \
8765 } while (0)
8766 #define GET_REG32_1(reg)                        \
8767 do {    p = (u32 *)(orig_p + (reg));            \
8768         __GET_REG32((reg));                     \
8769 } while (0)
8770
8771         GET_REG32_LOOP(TG3PCI_VENDOR, 0xb0);
8772         GET_REG32_LOOP(MAILBOX_INTERRUPT_0, 0x200);
8773         GET_REG32_LOOP(MAC_MODE, 0x4f0);
8774         GET_REG32_LOOP(SNDDATAI_MODE, 0xe0);
8775         GET_REG32_1(SNDDATAC_MODE);
8776         GET_REG32_LOOP(SNDBDS_MODE, 0x80);
8777         GET_REG32_LOOP(SNDBDI_MODE, 0x48);
8778         GET_REG32_1(SNDBDC_MODE);
8779         GET_REG32_LOOP(RCVLPC_MODE, 0x20);
8780         GET_REG32_LOOP(RCVLPC_SELLST_BASE, 0x15c);
8781         GET_REG32_LOOP(RCVDBDI_MODE, 0x0c);
8782         GET_REG32_LOOP(RCVDBDI_JUMBO_BD, 0x3c);
8783         GET_REG32_LOOP(RCVDBDI_BD_PROD_IDX_0, 0x44);
8784         GET_REG32_1(RCVDCC_MODE);
8785         GET_REG32_LOOP(RCVBDI_MODE, 0x20);
8786         GET_REG32_LOOP(RCVCC_MODE, 0x14);
8787         GET_REG32_LOOP(RCVLSC_MODE, 0x08);
8788         GET_REG32_1(MBFREE_MODE);
8789         GET_REG32_LOOP(HOSTCC_MODE, 0x100);
8790         GET_REG32_LOOP(MEMARB_MODE, 0x10);
8791         GET_REG32_LOOP(BUFMGR_MODE, 0x58);
8792         GET_REG32_LOOP(RDMAC_MODE, 0x08);
8793         GET_REG32_LOOP(WDMAC_MODE, 0x08);
8794         GET_REG32_1(RX_CPU_MODE);
8795         GET_REG32_1(RX_CPU_STATE);
8796         GET_REG32_1(RX_CPU_PGMCTR);
8797         GET_REG32_1(RX_CPU_HWBKPT);
8798         GET_REG32_1(TX_CPU_MODE);
8799         GET_REG32_1(TX_CPU_STATE);
8800         GET_REG32_1(TX_CPU_PGMCTR);
8801         GET_REG32_LOOP(GRCMBOX_INTERRUPT_0, 0x110);
8802         GET_REG32_LOOP(FTQ_RESET, 0x120);
8803         GET_REG32_LOOP(MSGINT_MODE, 0x0c);
8804         GET_REG32_1(DMAC_MODE);
8805         GET_REG32_LOOP(GRC_MODE, 0x4c);
8806         if (tp->tg3_flags & TG3_FLAG_NVRAM)
8807                 GET_REG32_LOOP(NVRAM_CMD, 0x24);
8808
8809 #undef __GET_REG32
8810 #undef GET_REG32_LOOP
8811 #undef GET_REG32_1
8812
8813         tg3_full_unlock(tp);
8814 }
8815
8816 static int tg3_get_eeprom_len(struct net_device *dev)
8817 {
8818         struct tg3 *tp = netdev_priv(dev);
8819
8820         return tp->nvram_size;
8821 }
8822
8823 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val);
8824 static int tg3_nvram_read_le(struct tg3 *tp, u32 offset, __le32 *val);
8825 static int tg3_nvram_read_swab(struct tg3 *tp, u32 offset, u32 *val);
8826
8827 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
8828 {
8829         struct tg3 *tp = netdev_priv(dev);
8830         int ret;
8831         u8  *pd;
8832         u32 i, offset, len, b_offset, b_count;
8833         __le32 val;
8834
8835         if (tp->link_config.phy_is_low_power)
8836                 return -EAGAIN;
8837
8838         offset = eeprom->offset;
8839         len = eeprom->len;
8840         eeprom->len = 0;
8841
8842         eeprom->magic = TG3_EEPROM_MAGIC;
8843
8844         if (offset & 3) {
8845                 /* adjustments to start on required 4 byte boundary */
8846                 b_offset = offset & 3;
8847                 b_count = 4 - b_offset;
8848                 if (b_count > len) {
8849                         /* i.e. offset=1 len=2 */
8850                         b_count = len;
8851                 }
8852                 ret = tg3_nvram_read_le(tp, offset-b_offset, &val);
8853                 if (ret)
8854                         return ret;
8855                 memcpy(data, ((char*)&val) + b_offset, b_count);
8856                 len -= b_count;
8857                 offset += b_count;
8858                 eeprom->len += b_count;
8859         }
8860
8861         /* read bytes upto the last 4 byte boundary */
8862         pd = &data[eeprom->len];
8863         for (i = 0; i < (len - (len & 3)); i += 4) {
8864                 ret = tg3_nvram_read_le(tp, offset + i, &val);
8865                 if (ret) {
8866                         eeprom->len += i;
8867                         return ret;
8868                 }
8869                 memcpy(pd + i, &val, 4);
8870         }
8871         eeprom->len += i;
8872
8873         if (len & 3) {
8874                 /* read last bytes not ending on 4 byte boundary */
8875                 pd = &data[eeprom->len];
8876                 b_count = len & 3;
8877                 b_offset = offset + len - b_count;
8878                 ret = tg3_nvram_read_le(tp, b_offset, &val);
8879                 if (ret)
8880                         return ret;
8881                 memcpy(pd, &val, b_count);
8882                 eeprom->len += b_count;
8883         }
8884         return 0;
8885 }
8886
8887 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf);
8888
8889 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
8890 {
8891         struct tg3 *tp = netdev_priv(dev);
8892         int ret;
8893         u32 offset, len, b_offset, odd_len;
8894         u8 *buf;
8895         __le32 start, end;
8896
8897         if (tp->link_config.phy_is_low_power)
8898                 return -EAGAIN;
8899
8900         if (eeprom->magic != TG3_EEPROM_MAGIC)
8901                 return -EINVAL;
8902
8903         offset = eeprom->offset;
8904         len = eeprom->len;
8905
8906         if ((b_offset = (offset & 3))) {
8907                 /* adjustments to start on required 4 byte boundary */
8908                 ret = tg3_nvram_read_le(tp, offset-b_offset, &start);
8909                 if (ret)
8910                         return ret;
8911                 len += b_offset;
8912                 offset &= ~3;
8913                 if (len < 4)
8914                         len = 4;
8915         }
8916
8917         odd_len = 0;
8918         if (len & 3) {
8919                 /* adjustments to end on required 4 byte boundary */
8920                 odd_len = 1;
8921                 len = (len + 3) & ~3;
8922                 ret = tg3_nvram_read_le(tp, offset+len-4, &end);
8923                 if (ret)
8924                         return ret;
8925         }
8926
8927         buf = data;
8928         if (b_offset || odd_len) {
8929                 buf = kmalloc(len, GFP_KERNEL);
8930                 if (!buf)
8931                         return -ENOMEM;
8932                 if (b_offset)
8933                         memcpy(buf, &start, 4);
8934                 if (odd_len)
8935                         memcpy(buf+len-4, &end, 4);
8936                 memcpy(buf + b_offset, data, eeprom->len);
8937         }
8938
8939         ret = tg3_nvram_write_block(tp, offset, len, buf);
8940
8941         if (buf != data)
8942                 kfree(buf);
8943
8944         return ret;
8945 }
8946
8947 static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
8948 {
8949         struct tg3 *tp = netdev_priv(dev);
8950
8951         if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
8952                 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
8953                         return -EAGAIN;
8954                 return phy_ethtool_gset(tp->mdio_bus.phy_map[PHY_ADDR], cmd);
8955         }
8956
8957         cmd->supported = (SUPPORTED_Autoneg);
8958
8959         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
8960                 cmd->supported |= (SUPPORTED_1000baseT_Half |
8961                                    SUPPORTED_1000baseT_Full);
8962
8963         if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)) {
8964                 cmd->supported |= (SUPPORTED_100baseT_Half |
8965                                   SUPPORTED_100baseT_Full |
8966                                   SUPPORTED_10baseT_Half |
8967                                   SUPPORTED_10baseT_Full |
8968                                   SUPPORTED_TP);
8969                 cmd->port = PORT_TP;
8970         } else {
8971                 cmd->supported |= SUPPORTED_FIBRE;
8972                 cmd->port = PORT_FIBRE;
8973         }
8974
8975         cmd->advertising = tp->link_config.advertising;
8976         if (netif_running(dev)) {
8977                 cmd->speed = tp->link_config.active_speed;
8978                 cmd->duplex = tp->link_config.active_duplex;
8979         }
8980         cmd->phy_address = PHY_ADDR;
8981         cmd->transceiver = 0;
8982         cmd->autoneg = tp->link_config.autoneg;
8983         cmd->maxtxpkt = 0;
8984         cmd->maxrxpkt = 0;
8985         return 0;
8986 }
8987
8988 static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
8989 {
8990         struct tg3 *tp = netdev_priv(dev);
8991
8992         if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
8993                 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
8994                         return -EAGAIN;
8995                 return phy_ethtool_sset(tp->mdio_bus.phy_map[PHY_ADDR], cmd);
8996         }
8997
8998         if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) {
8999                 /* These are the only valid advertisement bits allowed.  */
9000                 if (cmd->autoneg == AUTONEG_ENABLE &&
9001                     (cmd->advertising & ~(ADVERTISED_1000baseT_Half |
9002                                           ADVERTISED_1000baseT_Full |
9003                                           ADVERTISED_Autoneg |
9004                                           ADVERTISED_FIBRE)))
9005                         return -EINVAL;
9006                 /* Fiber can only do SPEED_1000.  */
9007                 else if ((cmd->autoneg != AUTONEG_ENABLE) &&
9008                          (cmd->speed != SPEED_1000))
9009                         return -EINVAL;
9010         /* Copper cannot force SPEED_1000.  */
9011         } else if ((cmd->autoneg != AUTONEG_ENABLE) &&
9012                    (cmd->speed == SPEED_1000))
9013                 return -EINVAL;
9014         else if ((cmd->speed == SPEED_1000) &&
9015                  (tp->tg3_flags & TG3_FLAG_10_100_ONLY))
9016                 return -EINVAL;
9017
9018         tg3_full_lock(tp, 0);
9019
9020         tp->link_config.autoneg = cmd->autoneg;
9021         if (cmd->autoneg == AUTONEG_ENABLE) {
9022                 tp->link_config.advertising = (cmd->advertising |
9023                                               ADVERTISED_Autoneg);
9024                 tp->link_config.speed = SPEED_INVALID;
9025                 tp->link_config.duplex = DUPLEX_INVALID;
9026         } else {
9027                 tp->link_config.advertising = 0;
9028                 tp->link_config.speed = cmd->speed;
9029                 tp->link_config.duplex = cmd->duplex;
9030         }
9031
9032         tp->link_config.orig_speed = tp->link_config.speed;
9033         tp->link_config.orig_duplex = tp->link_config.duplex;
9034         tp->link_config.orig_autoneg = tp->link_config.autoneg;
9035
9036         if (netif_running(dev))
9037                 tg3_setup_phy(tp, 1);
9038
9039         tg3_full_unlock(tp);
9040
9041         return 0;
9042 }
9043
9044 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
9045 {
9046         struct tg3 *tp = netdev_priv(dev);
9047
9048         strcpy(info->driver, DRV_MODULE_NAME);
9049         strcpy(info->version, DRV_MODULE_VERSION);
9050         strcpy(info->fw_version, tp->fw_ver);
9051         strcpy(info->bus_info, pci_name(tp->pdev));
9052 }
9053
9054 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
9055 {
9056         struct tg3 *tp = netdev_priv(dev);
9057
9058         if ((tp->tg3_flags & TG3_FLAG_WOL_CAP) &&
9059             device_can_wakeup(&tp->pdev->dev))
9060                 wol->supported = WAKE_MAGIC;
9061         else
9062                 wol->supported = 0;
9063         wol->wolopts = 0;
9064         if (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)
9065                 wol->wolopts = WAKE_MAGIC;
9066         memset(&wol->sopass, 0, sizeof(wol->sopass));
9067 }
9068
9069 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
9070 {
9071         struct tg3 *tp = netdev_priv(dev);
9072         struct device *dp = &tp->pdev->dev;
9073
9074         if (wol->wolopts & ~WAKE_MAGIC)
9075                 return -EINVAL;
9076         if ((wol->wolopts & WAKE_MAGIC) &&
9077             !((tp->tg3_flags & TG3_FLAG_WOL_CAP) && device_can_wakeup(dp)))
9078                 return -EINVAL;
9079
9080         spin_lock_bh(&tp->lock);
9081         if (wol->wolopts & WAKE_MAGIC) {
9082                 tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
9083                 device_set_wakeup_enable(dp, true);
9084         } else {
9085                 tp->tg3_flags &= ~TG3_FLAG_WOL_ENABLE;
9086                 device_set_wakeup_enable(dp, false);
9087         }
9088         spin_unlock_bh(&tp->lock);
9089
9090         return 0;
9091 }
9092
9093 static u32 tg3_get_msglevel(struct net_device *dev)
9094 {
9095         struct tg3 *tp = netdev_priv(dev);
9096         return tp->msg_enable;
9097 }
9098
9099 static void tg3_set_msglevel(struct net_device *dev, u32 value)
9100 {
9101         struct tg3 *tp = netdev_priv(dev);
9102         tp->msg_enable = value;
9103 }
9104
9105 static int tg3_set_tso(struct net_device *dev, u32 value)
9106 {
9107         struct tg3 *tp = netdev_priv(dev);
9108
9109         if (!(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
9110                 if (value)
9111                         return -EINVAL;
9112                 return 0;
9113         }
9114         if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_2) &&
9115             (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906)) {
9116                 if (value) {
9117                         dev->features |= NETIF_F_TSO6;
9118                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
9119                             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
9120                              GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) ||
9121                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
9122                                 dev->features |= NETIF_F_TSO_ECN;
9123                 } else
9124                         dev->features &= ~(NETIF_F_TSO6 | NETIF_F_TSO_ECN);
9125         }
9126         return ethtool_op_set_tso(dev, value);
9127 }
9128
9129 static int tg3_nway_reset(struct net_device *dev)
9130 {
9131         struct tg3 *tp = netdev_priv(dev);
9132         int r;
9133
9134         if (!netif_running(dev))
9135                 return -EAGAIN;
9136
9137         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
9138                 return -EINVAL;
9139
9140         if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
9141                 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
9142                         return -EAGAIN;
9143                 r = phy_start_aneg(tp->mdio_bus.phy_map[PHY_ADDR]);
9144         } else {
9145                 u32 bmcr;
9146
9147                 spin_lock_bh(&tp->lock);
9148                 r = -EINVAL;
9149                 tg3_readphy(tp, MII_BMCR, &bmcr);
9150                 if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
9151                     ((bmcr & BMCR_ANENABLE) ||
9152                      (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT))) {
9153                         tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
9154                                                    BMCR_ANENABLE);
9155                         r = 0;
9156                 }
9157                 spin_unlock_bh(&tp->lock);
9158         }
9159
9160         return r;
9161 }
9162
9163 static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
9164 {
9165         struct tg3 *tp = netdev_priv(dev);
9166
9167         ering->rx_max_pending = TG3_RX_RING_SIZE - 1;
9168         ering->rx_mini_max_pending = 0;
9169         if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE)
9170                 ering->rx_jumbo_max_pending = TG3_RX_JUMBO_RING_SIZE - 1;
9171         else
9172                 ering->rx_jumbo_max_pending = 0;
9173
9174         ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
9175
9176         ering->rx_pending = tp->rx_pending;
9177         ering->rx_mini_pending = 0;
9178         if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE)
9179                 ering->rx_jumbo_pending = tp->rx_jumbo_pending;
9180         else
9181                 ering->rx_jumbo_pending = 0;
9182
9183         ering->tx_pending = tp->tx_pending;
9184 }
9185
9186 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
9187 {
9188         struct tg3 *tp = netdev_priv(dev);
9189         int irq_sync = 0, err = 0;
9190
9191         if ((ering->rx_pending > TG3_RX_RING_SIZE - 1) ||
9192             (ering->rx_jumbo_pending > TG3_RX_JUMBO_RING_SIZE - 1) ||
9193             (ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
9194             (ering->tx_pending <= MAX_SKB_FRAGS) ||
9195             ((tp->tg3_flags2 & TG3_FLG2_TSO_BUG) &&
9196              (ering->tx_pending <= (MAX_SKB_FRAGS * 3))))
9197                 return -EINVAL;
9198
9199         if (netif_running(dev)) {
9200                 tg3_phy_stop(tp);
9201                 tg3_netif_stop(tp);
9202                 irq_sync = 1;
9203         }
9204
9205         tg3_full_lock(tp, irq_sync);
9206
9207         tp->rx_pending = ering->rx_pending;
9208
9209         if ((tp->tg3_flags2 & TG3_FLG2_MAX_RXPEND_64) &&
9210             tp->rx_pending > 63)
9211                 tp->rx_pending = 63;
9212         tp->rx_jumbo_pending = ering->rx_jumbo_pending;
9213         tp->tx_pending = ering->tx_pending;
9214
9215         if (netif_running(dev)) {
9216                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9217                 err = tg3_restart_hw(tp, 1);
9218                 if (!err)
9219                         tg3_netif_start(tp);
9220         }
9221
9222         tg3_full_unlock(tp);
9223
9224         if (irq_sync && !err)
9225                 tg3_phy_start(tp);
9226
9227         return err;
9228 }
9229
9230 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
9231 {
9232         struct tg3 *tp = netdev_priv(dev);
9233
9234         epause->autoneg = (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG) != 0;
9235
9236         if (tp->link_config.active_flowctrl & TG3_FLOW_CTRL_RX)
9237                 epause->rx_pause = 1;
9238         else
9239                 epause->rx_pause = 0;
9240
9241         if (tp->link_config.active_flowctrl & TG3_FLOW_CTRL_TX)
9242                 epause->tx_pause = 1;
9243         else
9244                 epause->tx_pause = 0;
9245 }
9246
9247 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
9248 {
9249         struct tg3 *tp = netdev_priv(dev);
9250         int err = 0;
9251
9252         if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
9253                 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
9254                         return -EAGAIN;
9255
9256                 if (epause->autoneg) {
9257                         u32 newadv;
9258                         struct phy_device *phydev;
9259
9260                         phydev = tp->mdio_bus.phy_map[PHY_ADDR];
9261
9262                         if (epause->rx_pause) {
9263                                 if (epause->tx_pause)
9264                                         newadv = ADVERTISED_Pause;
9265                                 else
9266                                         newadv = ADVERTISED_Pause |
9267                                                  ADVERTISED_Asym_Pause;
9268                         } else if (epause->tx_pause) {
9269                                 newadv = ADVERTISED_Asym_Pause;
9270                         } else
9271                                 newadv = 0;
9272
9273                         if (tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED) {
9274                                 u32 oldadv = phydev->advertising &
9275                                              (ADVERTISED_Pause |
9276                                               ADVERTISED_Asym_Pause);
9277                                 if (oldadv != newadv) {
9278                                         phydev->advertising &=
9279                                                 ~(ADVERTISED_Pause |
9280                                                   ADVERTISED_Asym_Pause);
9281                                         phydev->advertising |= newadv;
9282                                         err = phy_start_aneg(phydev);
9283                                 }
9284                         } else {
9285                                 tp->link_config.advertising &=
9286                                                 ~(ADVERTISED_Pause |
9287                                                   ADVERTISED_Asym_Pause);
9288                                 tp->link_config.advertising |= newadv;
9289                         }
9290                 } else {
9291                         if (epause->rx_pause)
9292                                 tp->link_config.flowctrl |= TG3_FLOW_CTRL_RX;
9293                         else
9294                                 tp->link_config.flowctrl &= ~TG3_FLOW_CTRL_RX;
9295
9296                         if (epause->tx_pause)
9297                                 tp->link_config.flowctrl |= TG3_FLOW_CTRL_TX;
9298                         else
9299                                 tp->link_config.flowctrl &= ~TG3_FLOW_CTRL_TX;
9300
9301                         if (netif_running(dev))
9302                                 tg3_setup_flow_control(tp, 0, 0);
9303                 }
9304         } else {
9305                 int irq_sync = 0;
9306
9307                 if (netif_running(dev)) {
9308                         tg3_netif_stop(tp);
9309                         irq_sync = 1;
9310                 }
9311
9312                 tg3_full_lock(tp, irq_sync);
9313
9314                 if (epause->autoneg)
9315                         tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
9316                 else
9317                         tp->tg3_flags &= ~TG3_FLAG_PAUSE_AUTONEG;
9318                 if (epause->rx_pause)
9319                         tp->link_config.flowctrl |= TG3_FLOW_CTRL_RX;
9320                 else
9321                         tp->link_config.flowctrl &= ~TG3_FLOW_CTRL_RX;
9322                 if (epause->tx_pause)
9323                         tp->link_config.flowctrl |= TG3_FLOW_CTRL_TX;
9324                 else
9325                         tp->link_config.flowctrl &= ~TG3_FLOW_CTRL_TX;
9326
9327                 if (netif_running(dev)) {
9328                         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9329                         err = tg3_restart_hw(tp, 1);
9330                         if (!err)
9331                                 tg3_netif_start(tp);
9332                 }
9333
9334                 tg3_full_unlock(tp);
9335         }
9336
9337         return err;
9338 }
9339
9340 static u32 tg3_get_rx_csum(struct net_device *dev)
9341 {
9342         struct tg3 *tp = netdev_priv(dev);
9343         return (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0;
9344 }
9345
9346 static int tg3_set_rx_csum(struct net_device *dev, u32 data)
9347 {
9348         struct tg3 *tp = netdev_priv(dev);
9349
9350         if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
9351                 if (data != 0)
9352                         return -EINVAL;
9353                 return 0;
9354         }
9355
9356         spin_lock_bh(&tp->lock);
9357         if (data)
9358                 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
9359         else
9360                 tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
9361         spin_unlock_bh(&tp->lock);
9362
9363         return 0;
9364 }
9365
9366 static int tg3_set_tx_csum(struct net_device *dev, u32 data)
9367 {
9368         struct tg3 *tp = netdev_priv(dev);
9369
9370         if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
9371                 if (data != 0)
9372                         return -EINVAL;
9373                 return 0;
9374         }
9375
9376         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
9377             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
9378             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
9379             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
9380             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
9381                 ethtool_op_set_tx_ipv6_csum(dev, data);
9382         else
9383                 ethtool_op_set_tx_csum(dev, data);
9384
9385         return 0;
9386 }
9387
9388 static int tg3_get_sset_count (struct net_device *dev, int sset)
9389 {
9390         switch (sset) {
9391         case ETH_SS_TEST:
9392                 return TG3_NUM_TEST;
9393         case ETH_SS_STATS:
9394                 return TG3_NUM_STATS;
9395         default:
9396                 return -EOPNOTSUPP;
9397         }
9398 }
9399
9400 static void tg3_get_strings (struct net_device *dev, u32 stringset, u8 *buf)
9401 {
9402         switch (stringset) {
9403         case ETH_SS_STATS:
9404                 memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
9405                 break;
9406         case ETH_SS_TEST:
9407                 memcpy(buf, &ethtool_test_keys, sizeof(ethtool_test_keys));
9408                 break;
9409         default:
9410                 WARN_ON(1);     /* we need a WARN() */
9411                 break;
9412         }
9413 }
9414
9415 static int tg3_phys_id(struct net_device *dev, u32 data)
9416 {
9417         struct tg3 *tp = netdev_priv(dev);
9418         int i;
9419
9420         if (!netif_running(tp->dev))
9421                 return -EAGAIN;
9422
9423         if (data == 0)
9424                 data = UINT_MAX / 2;
9425
9426         for (i = 0; i < (data * 2); i++) {
9427                 if ((i % 2) == 0)
9428                         tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
9429                                            LED_CTRL_1000MBPS_ON |
9430                                            LED_CTRL_100MBPS_ON |
9431                                            LED_CTRL_10MBPS_ON |
9432                                            LED_CTRL_TRAFFIC_OVERRIDE |
9433                                            LED_CTRL_TRAFFIC_BLINK |
9434                                            LED_CTRL_TRAFFIC_LED);
9435
9436                 else
9437                         tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
9438                                            LED_CTRL_TRAFFIC_OVERRIDE);
9439
9440                 if (msleep_interruptible(500))
9441                         break;
9442         }
9443         tw32(MAC_LED_CTRL, tp->led_ctrl);
9444         return 0;
9445 }
9446
9447 static void tg3_get_ethtool_stats (struct net_device *dev,
9448                                    struct ethtool_stats *estats, u64 *tmp_stats)
9449 {
9450         struct tg3 *tp = netdev_priv(dev);
9451         memcpy(tmp_stats, tg3_get_estats(tp), sizeof(tp->estats));
9452 }
9453
9454 #define NVRAM_TEST_SIZE 0x100
9455 #define NVRAM_SELFBOOT_FORMAT1_0_SIZE   0x14
9456 #define NVRAM_SELFBOOT_FORMAT1_2_SIZE   0x18
9457 #define NVRAM_SELFBOOT_FORMAT1_3_SIZE   0x1c
9458 #define NVRAM_SELFBOOT_HW_SIZE 0x20
9459 #define NVRAM_SELFBOOT_DATA_SIZE 0x1c
9460
9461 static int tg3_test_nvram(struct tg3 *tp)
9462 {
9463         u32 csum, magic;
9464         __le32 *buf;
9465         int i, j, k, err = 0, size;
9466
9467         if (tg3_nvram_read_swab(tp, 0, &magic) != 0)
9468                 return -EIO;
9469
9470         if (magic == TG3_EEPROM_MAGIC)
9471                 size = NVRAM_TEST_SIZE;
9472         else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) {
9473                 if ((magic & TG3_EEPROM_SB_FORMAT_MASK) ==
9474                     TG3_EEPROM_SB_FORMAT_1) {
9475                         switch (magic & TG3_EEPROM_SB_REVISION_MASK) {
9476                         case TG3_EEPROM_SB_REVISION_0:
9477                                 size = NVRAM_SELFBOOT_FORMAT1_0_SIZE;
9478                                 break;
9479                         case TG3_EEPROM_SB_REVISION_2:
9480                                 size = NVRAM_SELFBOOT_FORMAT1_2_SIZE;
9481                                 break;
9482                         case TG3_EEPROM_SB_REVISION_3:
9483                                 size = NVRAM_SELFBOOT_FORMAT1_3_SIZE;
9484                                 break;
9485                         default:
9486                                 return 0;
9487                         }
9488                 } else
9489                         return 0;
9490         } else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
9491                 size = NVRAM_SELFBOOT_HW_SIZE;
9492         else
9493                 return -EIO;
9494
9495         buf = kmalloc(size, GFP_KERNEL);
9496         if (buf == NULL)
9497                 return -ENOMEM;
9498
9499         err = -EIO;
9500         for (i = 0, j = 0; i < size; i += 4, j++) {
9501                 if ((err = tg3_nvram_read_le(tp, i, &buf[j])) != 0)
9502                         break;
9503         }
9504         if (i < size)
9505                 goto out;
9506
9507         /* Selfboot format */
9508         magic = swab32(le32_to_cpu(buf[0]));
9509         if ((magic & TG3_EEPROM_MAGIC_FW_MSK) ==
9510             TG3_EEPROM_MAGIC_FW) {
9511                 u8 *buf8 = (u8 *) buf, csum8 = 0;
9512
9513                 if ((magic & TG3_EEPROM_SB_REVISION_MASK) ==
9514                     TG3_EEPROM_SB_REVISION_2) {
9515                         /* For rev 2, the csum doesn't include the MBA. */
9516                         for (i = 0; i < TG3_EEPROM_SB_F1R2_MBA_OFF; i++)
9517                                 csum8 += buf8[i];
9518                         for (i = TG3_EEPROM_SB_F1R2_MBA_OFF + 4; i < size; i++)
9519                                 csum8 += buf8[i];
9520                 } else {
9521                         for (i = 0; i < size; i++)
9522                                 csum8 += buf8[i];
9523                 }
9524
9525                 if (csum8 == 0) {
9526                         err = 0;
9527                         goto out;
9528                 }
9529
9530                 err = -EIO;
9531                 goto out;
9532         }
9533
9534         if ((magic & TG3_EEPROM_MAGIC_HW_MSK) ==
9535             TG3_EEPROM_MAGIC_HW) {
9536                 u8 data[NVRAM_SELFBOOT_DATA_SIZE];
9537                 u8 parity[NVRAM_SELFBOOT_DATA_SIZE];
9538                 u8 *buf8 = (u8 *) buf;
9539
9540                 /* Separate the parity bits and the data bytes.  */
9541                 for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) {
9542                         if ((i == 0) || (i == 8)) {
9543                                 int l;
9544                                 u8 msk;
9545
9546                                 for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1)
9547                                         parity[k++] = buf8[i] & msk;
9548                                 i++;
9549                         }
9550                         else if (i == 16) {
9551                                 int l;
9552                                 u8 msk;
9553
9554                                 for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1)
9555                                         parity[k++] = buf8[i] & msk;
9556                                 i++;
9557
9558                                 for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1)
9559                                         parity[k++] = buf8[i] & msk;
9560                                 i++;
9561                         }
9562                         data[j++] = buf8[i];
9563                 }
9564
9565                 err = -EIO;
9566                 for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) {
9567                         u8 hw8 = hweight8(data[i]);
9568
9569                         if ((hw8 & 0x1) && parity[i])
9570                                 goto out;
9571                         else if (!(hw8 & 0x1) && !parity[i])
9572                                 goto out;
9573                 }
9574                 err = 0;
9575                 goto out;
9576         }
9577
9578         /* Bootstrap checksum at offset 0x10 */
9579         csum = calc_crc((unsigned char *) buf, 0x10);
9580         if(csum != le32_to_cpu(buf[0x10/4]))
9581                 goto out;
9582
9583         /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
9584         csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
9585         if (csum != le32_to_cpu(buf[0xfc/4]))
9586                  goto out;
9587
9588         err = 0;
9589
9590 out:
9591         kfree(buf);
9592         return err;
9593 }
9594
9595 #define TG3_SERDES_TIMEOUT_SEC  2
9596 #define TG3_COPPER_TIMEOUT_SEC  6
9597
9598 static int tg3_test_link(struct tg3 *tp)
9599 {
9600         int i, max;
9601
9602         if (!netif_running(tp->dev))
9603                 return -ENODEV;
9604
9605         if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
9606                 max = TG3_SERDES_TIMEOUT_SEC;
9607         else
9608                 max = TG3_COPPER_TIMEOUT_SEC;
9609
9610         for (i = 0; i < max; i++) {
9611                 if (netif_carrier_ok(tp->dev))
9612                         return 0;
9613
9614                 if (msleep_interruptible(1000))
9615                         break;
9616         }
9617
9618         return -EIO;
9619 }
9620
9621 /* Only test the commonly used registers */
9622 static int tg3_test_registers(struct tg3 *tp)
9623 {
9624         int i, is_5705, is_5750;
9625         u32 offset, read_mask, write_mask, val, save_val, read_val;
9626         static struct {
9627                 u16 offset;
9628                 u16 flags;
9629 #define TG3_FL_5705     0x1
9630 #define TG3_FL_NOT_5705 0x2
9631 #define TG3_FL_NOT_5788 0x4
9632 #define TG3_FL_NOT_5750 0x8
9633                 u32 read_mask;
9634                 u32 write_mask;
9635         } reg_tbl[] = {
9636                 /* MAC Control Registers */
9637                 { MAC_MODE, TG3_FL_NOT_5705,
9638                         0x00000000, 0x00ef6f8c },
9639                 { MAC_MODE, TG3_FL_5705,
9640                         0x00000000, 0x01ef6b8c },
9641                 { MAC_STATUS, TG3_FL_NOT_5705,
9642                         0x03800107, 0x00000000 },
9643                 { MAC_STATUS, TG3_FL_5705,
9644                         0x03800100, 0x00000000 },
9645                 { MAC_ADDR_0_HIGH, 0x0000,
9646                         0x00000000, 0x0000ffff },
9647                 { MAC_ADDR_0_LOW, 0x0000,
9648                         0x00000000, 0xffffffff },
9649                 { MAC_RX_MTU_SIZE, 0x0000,
9650                         0x00000000, 0x0000ffff },
9651                 { MAC_TX_MODE, 0x0000,
9652                         0x00000000, 0x00000070 },
9653                 { MAC_TX_LENGTHS, 0x0000,
9654                         0x00000000, 0x00003fff },
9655                 { MAC_RX_MODE, TG3_FL_NOT_5705,
9656                         0x00000000, 0x000007fc },
9657                 { MAC_RX_MODE, TG3_FL_5705,
9658                         0x00000000, 0x000007dc },
9659                 { MAC_HASH_REG_0, 0x0000,
9660                         0x00000000, 0xffffffff },
9661                 { MAC_HASH_REG_1, 0x0000,
9662                         0x00000000, 0xffffffff },
9663                 { MAC_HASH_REG_2, 0x0000,
9664                         0x00000000, 0xffffffff },
9665                 { MAC_HASH_REG_3, 0x0000,
9666                         0x00000000, 0xffffffff },
9667
9668                 /* Receive Data and Receive BD Initiator Control Registers. */
9669                 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
9670                         0x00000000, 0xffffffff },
9671                 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
9672                         0x00000000, 0xffffffff },
9673                 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
9674                         0x00000000, 0x00000003 },
9675                 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
9676                         0x00000000, 0xffffffff },
9677                 { RCVDBDI_STD_BD+0, 0x0000,
9678                         0x00000000, 0xffffffff },
9679                 { RCVDBDI_STD_BD+4, 0x0000,
9680                         0x00000000, 0xffffffff },
9681                 { RCVDBDI_STD_BD+8, 0x0000,
9682                         0x00000000, 0xffff0002 },
9683                 { RCVDBDI_STD_BD+0xc, 0x0000,
9684                         0x00000000, 0xffffffff },
9685
9686                 /* Receive BD Initiator Control Registers. */
9687                 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
9688                         0x00000000, 0xffffffff },
9689                 { RCVBDI_STD_THRESH, TG3_FL_5705,
9690                         0x00000000, 0x000003ff },
9691                 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
9692                         0x00000000, 0xffffffff },
9693
9694                 /* Host Coalescing Control Registers. */
9695                 { HOSTCC_MODE, TG3_FL_NOT_5705,
9696                         0x00000000, 0x00000004 },
9697                 { HOSTCC_MODE, TG3_FL_5705,
9698                         0x00000000, 0x000000f6 },
9699                 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
9700                         0x00000000, 0xffffffff },
9701                 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
9702                         0x00000000, 0x000003ff },
9703                 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
9704                         0x00000000, 0xffffffff },
9705                 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
9706                         0x00000000, 0x000003ff },
9707                 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
9708                         0x00000000, 0xffffffff },
9709                 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
9710                         0x00000000, 0x000000ff },
9711                 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
9712                         0x00000000, 0xffffffff },
9713                 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
9714                         0x00000000, 0x000000ff },
9715                 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
9716                         0x00000000, 0xffffffff },
9717                 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
9718                         0x00000000, 0xffffffff },
9719                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
9720                         0x00000000, 0xffffffff },
9721                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
9722                         0x00000000, 0x000000ff },
9723                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
9724                         0x00000000, 0xffffffff },
9725                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
9726                         0x00000000, 0x000000ff },
9727                 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
9728                         0x00000000, 0xffffffff },
9729                 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
9730                         0x00000000, 0xffffffff },
9731                 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
9732                         0x00000000, 0xffffffff },
9733                 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
9734                         0x00000000, 0xffffffff },
9735                 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
9736                         0x00000000, 0xffffffff },
9737                 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
9738                         0xffffffff, 0x00000000 },
9739                 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
9740                         0xffffffff, 0x00000000 },
9741
9742                 /* Buffer Manager Control Registers. */
9743                 { BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750,
9744                         0x00000000, 0x007fff80 },
9745                 { BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750,
9746                         0x00000000, 0x007fffff },
9747                 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
9748                         0x00000000, 0x0000003f },
9749                 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
9750                         0x00000000, 0x000001ff },
9751                 { BUFMGR_MB_HIGH_WATER, 0x0000,
9752                         0x00000000, 0x000001ff },
9753                 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
9754                         0xffffffff, 0x00000000 },
9755                 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
9756                         0xffffffff, 0x00000000 },
9757
9758                 /* Mailbox Registers */
9759                 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
9760                         0x00000000, 0x000001ff },
9761                 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
9762                         0x00000000, 0x000001ff },
9763                 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
9764                         0x00000000, 0x000007ff },
9765                 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
9766                         0x00000000, 0x000001ff },
9767
9768                 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
9769         };
9770
9771         is_5705 = is_5750 = 0;
9772         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
9773                 is_5705 = 1;
9774                 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
9775                         is_5750 = 1;
9776         }
9777
9778         for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
9779                 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
9780                         continue;
9781
9782                 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
9783                         continue;
9784
9785                 if ((tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
9786                     (reg_tbl[i].flags & TG3_FL_NOT_5788))
9787                         continue;
9788
9789                 if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750))
9790                         continue;
9791
9792                 offset = (u32) reg_tbl[i].offset;
9793                 read_mask = reg_tbl[i].read_mask;
9794                 write_mask = reg_tbl[i].write_mask;
9795
9796                 /* Save the original register content */
9797                 save_val = tr32(offset);
9798
9799                 /* Determine the read-only value. */
9800                 read_val = save_val & read_mask;
9801
9802                 /* Write zero to the register, then make sure the read-only bits
9803                  * are not changed and the read/write bits are all zeros.
9804                  */
9805                 tw32(offset, 0);
9806
9807                 val = tr32(offset);
9808
9809                 /* Test the read-only and read/write bits. */
9810                 if (((val & read_mask) != read_val) || (val & write_mask))
9811                         goto out;
9812
9813                 /* Write ones to all the bits defined by RdMask and WrMask, then
9814                  * make sure the read-only bits are not changed and the
9815                  * read/write bits are all ones.
9816                  */
9817                 tw32(offset, read_mask | write_mask);
9818
9819                 val = tr32(offset);
9820
9821                 /* Test the read-only bits. */
9822                 if ((val & read_mask) != read_val)
9823                         goto out;
9824
9825                 /* Test the read/write bits. */
9826                 if ((val & write_mask) != write_mask)
9827                         goto out;
9828
9829                 tw32(offset, save_val);
9830         }
9831
9832         return 0;
9833
9834 out:
9835         if (netif_msg_hw(tp))
9836                 printk(KERN_ERR PFX "Register test failed at offset %x\n",
9837                        offset);
9838         tw32(offset, save_val);
9839         return -EIO;
9840 }
9841
9842 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
9843 {
9844         static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
9845         int i;
9846         u32 j;
9847
9848         for (i = 0; i < ARRAY_SIZE(test_pattern); i++) {
9849                 for (j = 0; j < len; j += 4) {
9850                         u32 val;
9851
9852                         tg3_write_mem(tp, offset + j, test_pattern[i]);
9853                         tg3_read_mem(tp, offset + j, &val);
9854                         if (val != test_pattern[i])
9855                                 return -EIO;
9856                 }
9857         }
9858         return 0;
9859 }
9860
9861 static int tg3_test_memory(struct tg3 *tp)
9862 {
9863         static struct mem_entry {
9864                 u32 offset;
9865                 u32 len;
9866         } mem_tbl_570x[] = {
9867                 { 0x00000000, 0x00b50},
9868                 { 0x00002000, 0x1c000},
9869                 { 0xffffffff, 0x00000}
9870         }, mem_tbl_5705[] = {
9871                 { 0x00000100, 0x0000c},
9872                 { 0x00000200, 0x00008},
9873                 { 0x00004000, 0x00800},
9874                 { 0x00006000, 0x01000},
9875                 { 0x00008000, 0x02000},
9876                 { 0x00010000, 0x0e000},
9877                 { 0xffffffff, 0x00000}
9878         }, mem_tbl_5755[] = {
9879                 { 0x00000200, 0x00008},
9880                 { 0x00004000, 0x00800},
9881                 { 0x00006000, 0x00800},
9882                 { 0x00008000, 0x02000},
9883                 { 0x00010000, 0x0c000},
9884                 { 0xffffffff, 0x00000}
9885         }, mem_tbl_5906[] = {
9886                 { 0x00000200, 0x00008},
9887                 { 0x00004000, 0x00400},
9888                 { 0x00006000, 0x00400},
9889                 { 0x00008000, 0x01000},
9890                 { 0x00010000, 0x01000},
9891                 { 0xffffffff, 0x00000}
9892         };
9893         struct mem_entry *mem_tbl;
9894         int err = 0;
9895         int i;
9896
9897         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
9898                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
9899                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
9900                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
9901                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
9902                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
9903                         mem_tbl = mem_tbl_5755;
9904                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
9905                         mem_tbl = mem_tbl_5906;
9906                 else
9907                         mem_tbl = mem_tbl_5705;
9908         } else
9909                 mem_tbl = mem_tbl_570x;
9910
9911         for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
9912                 if ((err = tg3_do_mem_test(tp, mem_tbl[i].offset,
9913                     mem_tbl[i].len)) != 0)
9914                         break;
9915         }
9916
9917         return err;
9918 }
9919
9920 #define TG3_MAC_LOOPBACK        0
9921 #define TG3_PHY_LOOPBACK        1
9922
9923 static int tg3_run_loopback(struct tg3 *tp, int loopback_mode)
9924 {
9925         u32 mac_mode, rx_start_idx, rx_idx, tx_idx, opaque_key;
9926         u32 desc_idx;
9927         struct sk_buff *skb, *rx_skb;
9928         u8 *tx_data;
9929         dma_addr_t map;
9930         int num_pkts, tx_len, rx_len, i, err;
9931         struct tg3_rx_buffer_desc *desc;
9932
9933         if (loopback_mode == TG3_MAC_LOOPBACK) {
9934                 /* HW errata - mac loopback fails in some cases on 5780.
9935                  * Normal traffic and PHY loopback are not affected by
9936                  * errata.
9937                  */
9938                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780)
9939                         return 0;
9940
9941                 mac_mode = (tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK) |
9942                            MAC_MODE_PORT_INT_LPBACK;
9943                 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
9944                         mac_mode |= MAC_MODE_LINK_POLARITY;
9945                 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
9946                         mac_mode |= MAC_MODE_PORT_MODE_MII;
9947                 else
9948                         mac_mode |= MAC_MODE_PORT_MODE_GMII;
9949                 tw32(MAC_MODE, mac_mode);
9950         } else if (loopback_mode == TG3_PHY_LOOPBACK) {
9951                 u32 val;
9952
9953                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
9954                         u32 phytest;
9955
9956                         if (!tg3_readphy(tp, MII_TG3_EPHY_TEST, &phytest)) {
9957                                 u32 phy;
9958
9959                                 tg3_writephy(tp, MII_TG3_EPHY_TEST,
9960                                              phytest | MII_TG3_EPHY_SHADOW_EN);
9961                                 if (!tg3_readphy(tp, 0x1b, &phy))
9962                                         tg3_writephy(tp, 0x1b, phy & ~0x20);
9963                                 tg3_writephy(tp, MII_TG3_EPHY_TEST, phytest);
9964                         }
9965                         val = BMCR_LOOPBACK | BMCR_FULLDPLX | BMCR_SPEED100;
9966                 } else
9967                         val = BMCR_LOOPBACK | BMCR_FULLDPLX | BMCR_SPEED1000;
9968
9969                 tg3_phy_toggle_automdix(tp, 0);
9970
9971                 tg3_writephy(tp, MII_BMCR, val);
9972                 udelay(40);
9973
9974                 mac_mode = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
9975                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
9976                         tg3_writephy(tp, MII_TG3_EPHY_PTEST, 0x1800);
9977                         mac_mode |= MAC_MODE_PORT_MODE_MII;
9978                 } else
9979                         mac_mode |= MAC_MODE_PORT_MODE_GMII;
9980
9981                 /* reset to prevent losing 1st rx packet intermittently */
9982                 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
9983                         tw32_f(MAC_RX_MODE, RX_MODE_RESET);
9984                         udelay(10);
9985                         tw32_f(MAC_RX_MODE, tp->rx_mode);
9986                 }
9987                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
9988                         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401)
9989                                 mac_mode &= ~MAC_MODE_LINK_POLARITY;
9990                         else if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411)
9991                                 mac_mode |= MAC_MODE_LINK_POLARITY;
9992                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
9993                                      MII_TG3_EXT_CTRL_LNK3_LED_MODE);
9994                 }
9995                 tw32(MAC_MODE, mac_mode);
9996         }
9997         else
9998                 return -EINVAL;
9999
10000         err = -EIO;
10001
10002         tx_len = 1514;
10003         skb = netdev_alloc_skb(tp->dev, tx_len);
10004         if (!skb)
10005                 return -ENOMEM;
10006
10007         tx_data = skb_put(skb, tx_len);
10008         memcpy(tx_data, tp->dev->dev_addr, 6);
10009         memset(tx_data + 6, 0x0, 8);
10010
10011         tw32(MAC_RX_MTU_SIZE, tx_len + 4);
10012
10013         for (i = 14; i < tx_len; i++)
10014                 tx_data[i] = (u8) (i & 0xff);
10015
10016         map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
10017
10018         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
10019              HOSTCC_MODE_NOW);
10020
10021         udelay(10);
10022
10023         rx_start_idx = tp->hw_status->idx[0].rx_producer;
10024
10025         num_pkts = 0;
10026
10027         tg3_set_txd(tp, tp->tx_prod, map, tx_len, 0, 1);
10028
10029         tp->tx_prod++;
10030         num_pkts++;
10031
10032         tw32_tx_mbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW,
10033                      tp->tx_prod);
10034         tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW);
10035
10036         udelay(10);
10037
10038         /* 250 usec to allow enough time on some 10/100 Mbps devices.  */
10039         for (i = 0; i < 25; i++) {
10040                 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
10041                        HOSTCC_MODE_NOW);
10042
10043                 udelay(10);
10044
10045                 tx_idx = tp->hw_status->idx[0].tx_consumer;
10046                 rx_idx = tp->hw_status->idx[0].rx_producer;
10047                 if ((tx_idx == tp->tx_prod) &&
10048                     (rx_idx == (rx_start_idx + num_pkts)))
10049                         break;
10050         }
10051
10052         pci_unmap_single(tp->pdev, map, tx_len, PCI_DMA_TODEVICE);
10053         dev_kfree_skb(skb);
10054
10055         if (tx_idx != tp->tx_prod)
10056                 goto out;
10057
10058         if (rx_idx != rx_start_idx + num_pkts)
10059                 goto out;
10060
10061         desc = &tp->rx_rcb[rx_start_idx];
10062         desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
10063         opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
10064         if (opaque_key != RXD_OPAQUE_RING_STD)
10065                 goto out;
10066
10067         if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
10068             (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
10069                 goto out;
10070
10071         rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4;
10072         if (rx_len != tx_len)
10073                 goto out;
10074
10075         rx_skb = tp->rx_std_buffers[desc_idx].skb;
10076
10077         map = pci_unmap_addr(&tp->rx_std_buffers[desc_idx], mapping);
10078         pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len, PCI_DMA_FROMDEVICE);
10079
10080         for (i = 14; i < tx_len; i++) {
10081                 if (*(rx_skb->data + i) != (u8) (i & 0xff))
10082                         goto out;
10083         }
10084         err = 0;
10085
10086         /* tg3_free_rings will unmap and free the rx_skb */
10087 out:
10088         return err;
10089 }
10090
10091 #define TG3_MAC_LOOPBACK_FAILED         1
10092 #define TG3_PHY_LOOPBACK_FAILED         2
10093 #define TG3_LOOPBACK_FAILED             (TG3_MAC_LOOPBACK_FAILED |      \
10094                                          TG3_PHY_LOOPBACK_FAILED)
10095
10096 static int tg3_test_loopback(struct tg3 *tp)
10097 {
10098         int err = 0;
10099         u32 cpmuctrl = 0;
10100
10101         if (!netif_running(tp->dev))
10102                 return TG3_LOOPBACK_FAILED;
10103
10104         err = tg3_reset_hw(tp, 1);
10105         if (err)
10106                 return TG3_LOOPBACK_FAILED;
10107
10108         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
10109             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
10110             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
10111                 int i;
10112                 u32 status;
10113
10114                 tw32(TG3_CPMU_MUTEX_REQ, CPMU_MUTEX_REQ_DRIVER);
10115
10116                 /* Wait for up to 40 microseconds to acquire lock. */
10117                 for (i = 0; i < 4; i++) {
10118                         status = tr32(TG3_CPMU_MUTEX_GNT);
10119                         if (status == CPMU_MUTEX_GNT_DRIVER)
10120                                 break;
10121                         udelay(10);
10122                 }
10123
10124                 if (status != CPMU_MUTEX_GNT_DRIVER)
10125                         return TG3_LOOPBACK_FAILED;
10126
10127                 /* Turn off link-based power management. */
10128                 cpmuctrl = tr32(TG3_CPMU_CTRL);
10129                 tw32(TG3_CPMU_CTRL,
10130                      cpmuctrl & ~(CPMU_CTRL_LINK_SPEED_MODE |
10131                                   CPMU_CTRL_LINK_AWARE_MODE));
10132         }
10133
10134         if (tg3_run_loopback(tp, TG3_MAC_LOOPBACK))
10135                 err |= TG3_MAC_LOOPBACK_FAILED;
10136
10137         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
10138             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
10139             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
10140                 tw32(TG3_CPMU_CTRL, cpmuctrl);
10141
10142                 /* Release the mutex */
10143                 tw32(TG3_CPMU_MUTEX_GNT, CPMU_MUTEX_GNT_DRIVER);
10144         }
10145
10146         if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
10147             !(tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)) {
10148                 if (tg3_run_loopback(tp, TG3_PHY_LOOPBACK))
10149                         err |= TG3_PHY_LOOPBACK_FAILED;
10150         }
10151
10152         return err;
10153 }
10154
10155 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
10156                           u64 *data)
10157 {
10158         struct tg3 *tp = netdev_priv(dev);
10159
10160         if (tp->link_config.phy_is_low_power)
10161                 tg3_set_power_state(tp, PCI_D0);
10162
10163         memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
10164
10165         if (tg3_test_nvram(tp) != 0) {
10166                 etest->flags |= ETH_TEST_FL_FAILED;
10167                 data[0] = 1;
10168         }
10169         if (tg3_test_link(tp) != 0) {
10170                 etest->flags |= ETH_TEST_FL_FAILED;
10171                 data[1] = 1;
10172         }
10173         if (etest->flags & ETH_TEST_FL_OFFLINE) {
10174                 int err, err2 = 0, irq_sync = 0;
10175
10176                 if (netif_running(dev)) {
10177                         tg3_phy_stop(tp);
10178                         tg3_netif_stop(tp);
10179                         irq_sync = 1;
10180                 }
10181
10182                 tg3_full_lock(tp, irq_sync);
10183
10184                 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
10185                 err = tg3_nvram_lock(tp);
10186                 tg3_halt_cpu(tp, RX_CPU_BASE);
10187                 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
10188                         tg3_halt_cpu(tp, TX_CPU_BASE);
10189                 if (!err)
10190                         tg3_nvram_unlock(tp);
10191
10192                 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
10193                         tg3_phy_reset(tp);
10194
10195                 if (tg3_test_registers(tp) != 0) {
10196                         etest->flags |= ETH_TEST_FL_FAILED;
10197                         data[2] = 1;
10198                 }
10199                 if (tg3_test_memory(tp) != 0) {
10200                         etest->flags |= ETH_TEST_FL_FAILED;
10201                         data[3] = 1;
10202                 }
10203                 if ((data[4] = tg3_test_loopback(tp)) != 0)
10204                         etest->flags |= ETH_TEST_FL_FAILED;
10205
10206                 tg3_full_unlock(tp);
10207
10208                 if (tg3_test_interrupt(tp) != 0) {
10209                         etest->flags |= ETH_TEST_FL_FAILED;
10210                         data[5] = 1;
10211                 }
10212
10213                 tg3_full_lock(tp, 0);
10214
10215                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10216                 if (netif_running(dev)) {
10217                         tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
10218                         err2 = tg3_restart_hw(tp, 1);
10219                         if (!err2)
10220                                 tg3_netif_start(tp);
10221                 }
10222
10223                 tg3_full_unlock(tp);
10224
10225                 if (irq_sync && !err2)
10226                         tg3_phy_start(tp);
10227         }
10228         if (tp->link_config.phy_is_low_power)
10229                 tg3_set_power_state(tp, PCI_D3hot);
10230
10231 }
10232
10233 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
10234 {
10235         struct mii_ioctl_data *data = if_mii(ifr);
10236         struct tg3 *tp = netdev_priv(dev);
10237         int err;
10238
10239         if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
10240                 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
10241                         return -EAGAIN;
10242                 return phy_mii_ioctl(tp->mdio_bus.phy_map[PHY_ADDR], data, cmd);
10243         }
10244
10245         switch(cmd) {
10246         case SIOCGMIIPHY:
10247                 data->phy_id = PHY_ADDR;
10248
10249                 /* fallthru */
10250         case SIOCGMIIREG: {
10251                 u32 mii_regval;
10252
10253                 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
10254                         break;                  /* We have no PHY */
10255
10256                 if (tp->link_config.phy_is_low_power)
10257                         return -EAGAIN;
10258
10259                 spin_lock_bh(&tp->lock);
10260                 err = tg3_readphy(tp, data->reg_num & 0x1f, &mii_regval);
10261                 spin_unlock_bh(&tp->lock);
10262
10263                 data->val_out = mii_regval;
10264
10265                 return err;
10266         }
10267
10268         case SIOCSMIIREG:
10269                 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
10270                         break;                  /* We have no PHY */
10271
10272                 if (!capable(CAP_NET_ADMIN))
10273                         return -EPERM;
10274
10275                 if (tp->link_config.phy_is_low_power)
10276                         return -EAGAIN;
10277
10278                 spin_lock_bh(&tp->lock);
10279                 err = tg3_writephy(tp, data->reg_num & 0x1f, data->val_in);
10280                 spin_unlock_bh(&tp->lock);
10281
10282                 return err;
10283
10284         default:
10285                 /* do nothing */
10286                 break;
10287         }
10288         return -EOPNOTSUPP;
10289 }
10290
10291 #if TG3_VLAN_TAG_USED
10292 static void tg3_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
10293 {
10294         struct tg3 *tp = netdev_priv(dev);
10295
10296         if (netif_running(dev))
10297                 tg3_netif_stop(tp);
10298
10299         tg3_full_lock(tp, 0);
10300
10301         tp->vlgrp = grp;
10302
10303         /* Update RX_MODE_KEEP_VLAN_TAG bit in RX_MODE register. */
10304         __tg3_set_rx_mode(dev);
10305
10306         if (netif_running(dev))
10307                 tg3_netif_start(tp);
10308
10309         tg3_full_unlock(tp);
10310 }
10311 #endif
10312
10313 static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
10314 {
10315         struct tg3 *tp = netdev_priv(dev);
10316
10317         memcpy(ec, &tp->coal, sizeof(*ec));
10318         return 0;
10319 }
10320
10321 static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
10322 {
10323         struct tg3 *tp = netdev_priv(dev);
10324         u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
10325         u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
10326
10327         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
10328                 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
10329                 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
10330                 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
10331                 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
10332         }
10333
10334         if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
10335             (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
10336             (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
10337             (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
10338             (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
10339             (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
10340             (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
10341             (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
10342             (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
10343             (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
10344                 return -EINVAL;
10345
10346         /* No rx interrupts will be generated if both are zero */
10347         if ((ec->rx_coalesce_usecs == 0) &&
10348             (ec->rx_max_coalesced_frames == 0))
10349                 return -EINVAL;
10350
10351         /* No tx interrupts will be generated if both are zero */
10352         if ((ec->tx_coalesce_usecs == 0) &&
10353             (ec->tx_max_coalesced_frames == 0))
10354                 return -EINVAL;
10355
10356         /* Only copy relevant parameters, ignore all others. */
10357         tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
10358         tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
10359         tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
10360         tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
10361         tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
10362         tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
10363         tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
10364         tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
10365         tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
10366
10367         if (netif_running(dev)) {
10368                 tg3_full_lock(tp, 0);
10369                 __tg3_set_coalesce(tp, &tp->coal);
10370                 tg3_full_unlock(tp);
10371         }
10372         return 0;
10373 }
10374
10375 static const struct ethtool_ops tg3_ethtool_ops = {
10376         .get_settings           = tg3_get_settings,
10377         .set_settings           = tg3_set_settings,
10378         .get_drvinfo            = tg3_get_drvinfo,
10379         .get_regs_len           = tg3_get_regs_len,
10380         .get_regs               = tg3_get_regs,
10381         .get_wol                = tg3_get_wol,
10382         .set_wol                = tg3_set_wol,
10383         .get_msglevel           = tg3_get_msglevel,
10384         .set_msglevel           = tg3_set_msglevel,
10385         .nway_reset             = tg3_nway_reset,
10386         .get_link               = ethtool_op_get_link,
10387         .get_eeprom_len         = tg3_get_eeprom_len,
10388         .get_eeprom             = tg3_get_eeprom,
10389         .set_eeprom             = tg3_set_eeprom,
10390         .get_ringparam          = tg3_get_ringparam,
10391         .set_ringparam          = tg3_set_ringparam,
10392         .get_pauseparam         = tg3_get_pauseparam,
10393         .set_pauseparam         = tg3_set_pauseparam,
10394         .get_rx_csum            = tg3_get_rx_csum,
10395         .set_rx_csum            = tg3_set_rx_csum,
10396         .set_tx_csum            = tg3_set_tx_csum,
10397         .set_sg                 = ethtool_op_set_sg,
10398         .set_tso                = tg3_set_tso,
10399         .self_test              = tg3_self_test,
10400         .get_strings            = tg3_get_strings,
10401         .phys_id                = tg3_phys_id,
10402         .get_ethtool_stats      = tg3_get_ethtool_stats,
10403         .get_coalesce           = tg3_get_coalesce,
10404         .set_coalesce           = tg3_set_coalesce,
10405         .get_sset_count         = tg3_get_sset_count,
10406 };
10407
10408 static void __devinit tg3_get_eeprom_size(struct tg3 *tp)
10409 {
10410         u32 cursize, val, magic;
10411
10412         tp->nvram_size = EEPROM_CHIP_SIZE;
10413
10414         if (tg3_nvram_read_swab(tp, 0, &magic) != 0)
10415                 return;
10416
10417         if ((magic != TG3_EEPROM_MAGIC) &&
10418             ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) &&
10419             ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW))
10420                 return;
10421
10422         /*
10423          * Size the chip by reading offsets at increasing powers of two.
10424          * When we encounter our validation signature, we know the addressing
10425          * has wrapped around, and thus have our chip size.
10426          */
10427         cursize = 0x10;
10428
10429         while (cursize < tp->nvram_size) {
10430                 if (tg3_nvram_read_swab(tp, cursize, &val) != 0)
10431                         return;
10432
10433                 if (val == magic)
10434                         break;
10435
10436                 cursize <<= 1;
10437         }
10438
10439         tp->nvram_size = cursize;
10440 }
10441
10442 static void __devinit tg3_get_nvram_size(struct tg3 *tp)
10443 {
10444         u32 val;
10445
10446         if (tg3_nvram_read_swab(tp, 0, &val) != 0)
10447                 return;
10448
10449         /* Selfboot format */
10450         if (val != TG3_EEPROM_MAGIC) {
10451                 tg3_get_eeprom_size(tp);
10452                 return;
10453         }
10454
10455         if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
10456                 if (val != 0) {
10457                         tp->nvram_size = (val >> 16) * 1024;
10458                         return;
10459                 }
10460         }
10461         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
10462 }
10463
10464 static void __devinit tg3_get_nvram_info(struct tg3 *tp)
10465 {
10466         u32 nvcfg1;
10467
10468         nvcfg1 = tr32(NVRAM_CFG1);
10469         if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
10470                 tp->tg3_flags2 |= TG3_FLG2_FLASH;
10471         }
10472         else {
10473                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
10474                 tw32(NVRAM_CFG1, nvcfg1);
10475         }
10476
10477         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) ||
10478             (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
10479                 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
10480                         case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
10481                                 tp->nvram_jedecnum = JEDEC_ATMEL;
10482                                 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
10483                                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10484                                 break;
10485                         case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
10486                                 tp->nvram_jedecnum = JEDEC_ATMEL;
10487                                 tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
10488                                 break;
10489                         case FLASH_VENDOR_ATMEL_EEPROM:
10490                                 tp->nvram_jedecnum = JEDEC_ATMEL;
10491                                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
10492                                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10493                                 break;
10494                         case FLASH_VENDOR_ST:
10495                                 tp->nvram_jedecnum = JEDEC_ST;
10496                                 tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
10497                                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10498                                 break;
10499                         case FLASH_VENDOR_SAIFUN:
10500                                 tp->nvram_jedecnum = JEDEC_SAIFUN;
10501                                 tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
10502                                 break;
10503                         case FLASH_VENDOR_SST_SMALL:
10504                         case FLASH_VENDOR_SST_LARGE:
10505                                 tp->nvram_jedecnum = JEDEC_SST;
10506                                 tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
10507                                 break;
10508                 }
10509         }
10510         else {
10511                 tp->nvram_jedecnum = JEDEC_ATMEL;
10512                 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
10513                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10514         }
10515 }
10516
10517 static void __devinit tg3_get_5752_nvram_info(struct tg3 *tp)
10518 {
10519         u32 nvcfg1;
10520
10521         nvcfg1 = tr32(NVRAM_CFG1);
10522
10523         /* NVRAM protection for TPM */
10524         if (nvcfg1 & (1 << 27))
10525                 tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM;
10526
10527         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
10528                 case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
10529                 case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
10530                         tp->nvram_jedecnum = JEDEC_ATMEL;
10531                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10532                         break;
10533                 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
10534                         tp->nvram_jedecnum = JEDEC_ATMEL;
10535                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10536                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
10537                         break;
10538                 case FLASH_5752VENDOR_ST_M45PE10:
10539                 case FLASH_5752VENDOR_ST_M45PE20:
10540                 case FLASH_5752VENDOR_ST_M45PE40:
10541                         tp->nvram_jedecnum = JEDEC_ST;
10542                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10543                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
10544                         break;
10545         }
10546
10547         if (tp->tg3_flags2 & TG3_FLG2_FLASH) {
10548                 switch (nvcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
10549                         case FLASH_5752PAGE_SIZE_256:
10550                                 tp->nvram_pagesize = 256;
10551                                 break;
10552                         case FLASH_5752PAGE_SIZE_512:
10553                                 tp->nvram_pagesize = 512;
10554                                 break;
10555                         case FLASH_5752PAGE_SIZE_1K:
10556                                 tp->nvram_pagesize = 1024;
10557                                 break;
10558                         case FLASH_5752PAGE_SIZE_2K:
10559                                 tp->nvram_pagesize = 2048;
10560                                 break;
10561                         case FLASH_5752PAGE_SIZE_4K:
10562                                 tp->nvram_pagesize = 4096;
10563                                 break;
10564                         case FLASH_5752PAGE_SIZE_264:
10565                                 tp->nvram_pagesize = 264;
10566                                 break;
10567                 }
10568         }
10569         else {
10570                 /* For eeprom, set pagesize to maximum eeprom size */
10571                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
10572
10573                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
10574                 tw32(NVRAM_CFG1, nvcfg1);
10575         }
10576 }
10577
10578 static void __devinit tg3_get_5755_nvram_info(struct tg3 *tp)
10579 {
10580         u32 nvcfg1, protect = 0;
10581
10582         nvcfg1 = tr32(NVRAM_CFG1);
10583
10584         /* NVRAM protection for TPM */
10585         if (nvcfg1 & (1 << 27)) {
10586                 tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM;
10587                 protect = 1;
10588         }
10589
10590         nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
10591         switch (nvcfg1) {
10592                 case FLASH_5755VENDOR_ATMEL_FLASH_1:
10593                 case FLASH_5755VENDOR_ATMEL_FLASH_2:
10594                 case FLASH_5755VENDOR_ATMEL_FLASH_3:
10595                 case FLASH_5755VENDOR_ATMEL_FLASH_5:
10596                         tp->nvram_jedecnum = JEDEC_ATMEL;
10597                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10598                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
10599                         tp->nvram_pagesize = 264;
10600                         if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 ||
10601                             nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5)
10602                                 tp->nvram_size = (protect ? 0x3e200 :
10603                                                   TG3_NVRAM_SIZE_512KB);
10604                         else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2)
10605                                 tp->nvram_size = (protect ? 0x1f200 :
10606                                                   TG3_NVRAM_SIZE_256KB);
10607                         else
10608                                 tp->nvram_size = (protect ? 0x1f200 :
10609                                                   TG3_NVRAM_SIZE_128KB);
10610                         break;
10611                 case FLASH_5752VENDOR_ST_M45PE10:
10612                 case FLASH_5752VENDOR_ST_M45PE20:
10613                 case FLASH_5752VENDOR_ST_M45PE40:
10614                         tp->nvram_jedecnum = JEDEC_ST;
10615                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10616                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
10617                         tp->nvram_pagesize = 256;
10618                         if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10)
10619                                 tp->nvram_size = (protect ?
10620                                                   TG3_NVRAM_SIZE_64KB :
10621                                                   TG3_NVRAM_SIZE_128KB);
10622                         else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20)
10623                                 tp->nvram_size = (protect ?
10624                                                   TG3_NVRAM_SIZE_64KB :
10625                                                   TG3_NVRAM_SIZE_256KB);
10626                         else
10627                                 tp->nvram_size = (protect ?
10628                                                   TG3_NVRAM_SIZE_128KB :
10629                                                   TG3_NVRAM_SIZE_512KB);
10630                         break;
10631         }
10632 }
10633
10634 static void __devinit tg3_get_5787_nvram_info(struct tg3 *tp)
10635 {
10636         u32 nvcfg1;
10637
10638         nvcfg1 = tr32(NVRAM_CFG1);
10639
10640         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
10641                 case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
10642                 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
10643                 case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
10644                 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
10645                         tp->nvram_jedecnum = JEDEC_ATMEL;
10646                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10647                         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
10648
10649                         nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
10650                         tw32(NVRAM_CFG1, nvcfg1);
10651                         break;
10652                 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
10653                 case FLASH_5755VENDOR_ATMEL_FLASH_1:
10654                 case FLASH_5755VENDOR_ATMEL_FLASH_2:
10655                 case FLASH_5755VENDOR_ATMEL_FLASH_3:
10656                         tp->nvram_jedecnum = JEDEC_ATMEL;
10657                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10658                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
10659                         tp->nvram_pagesize = 264;
10660                         break;
10661                 case FLASH_5752VENDOR_ST_M45PE10:
10662                 case FLASH_5752VENDOR_ST_M45PE20:
10663                 case FLASH_5752VENDOR_ST_M45PE40:
10664                         tp->nvram_jedecnum = JEDEC_ST;
10665                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10666                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
10667                         tp->nvram_pagesize = 256;
10668                         break;
10669         }
10670 }
10671
10672 static void __devinit tg3_get_5761_nvram_info(struct tg3 *tp)
10673 {
10674         u32 nvcfg1, protect = 0;
10675
10676         nvcfg1 = tr32(NVRAM_CFG1);
10677
10678         /* NVRAM protection for TPM */
10679         if (nvcfg1 & (1 << 27)) {
10680                 tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM;
10681                 protect = 1;
10682         }
10683
10684         nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
10685         switch (nvcfg1) {
10686                 case FLASH_5761VENDOR_ATMEL_ADB021D:
10687                 case FLASH_5761VENDOR_ATMEL_ADB041D:
10688                 case FLASH_5761VENDOR_ATMEL_ADB081D:
10689                 case FLASH_5761VENDOR_ATMEL_ADB161D:
10690                 case FLASH_5761VENDOR_ATMEL_MDB021D:
10691                 case FLASH_5761VENDOR_ATMEL_MDB041D:
10692                 case FLASH_5761VENDOR_ATMEL_MDB081D:
10693                 case FLASH_5761VENDOR_ATMEL_MDB161D:
10694                         tp->nvram_jedecnum = JEDEC_ATMEL;
10695                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10696                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
10697                         tp->tg3_flags3 |= TG3_FLG3_NO_NVRAM_ADDR_TRANS;
10698                         tp->nvram_pagesize = 256;
10699                         break;
10700                 case FLASH_5761VENDOR_ST_A_M45PE20:
10701                 case FLASH_5761VENDOR_ST_A_M45PE40:
10702                 case FLASH_5761VENDOR_ST_A_M45PE80:
10703                 case FLASH_5761VENDOR_ST_A_M45PE16:
10704                 case FLASH_5761VENDOR_ST_M_M45PE20:
10705                 case FLASH_5761VENDOR_ST_M_M45PE40:
10706                 case FLASH_5761VENDOR_ST_M_M45PE80:
10707                 case FLASH_5761VENDOR_ST_M_M45PE16:
10708                         tp->nvram_jedecnum = JEDEC_ST;
10709                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10710                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
10711                         tp->nvram_pagesize = 256;
10712                         break;
10713         }
10714
10715         if (protect) {
10716                 tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT);
10717         } else {
10718                 switch (nvcfg1) {
10719                         case FLASH_5761VENDOR_ATMEL_ADB161D:
10720                         case FLASH_5761VENDOR_ATMEL_MDB161D:
10721                         case FLASH_5761VENDOR_ST_A_M45PE16:
10722                         case FLASH_5761VENDOR_ST_M_M45PE16:
10723                                 tp->nvram_size = TG3_NVRAM_SIZE_2MB;
10724                                 break;
10725                         case FLASH_5761VENDOR_ATMEL_ADB081D:
10726                         case FLASH_5761VENDOR_ATMEL_MDB081D:
10727                         case FLASH_5761VENDOR_ST_A_M45PE80:
10728                         case FLASH_5761VENDOR_ST_M_M45PE80:
10729                                 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
10730                                 break;
10731                         case FLASH_5761VENDOR_ATMEL_ADB041D:
10732                         case FLASH_5761VENDOR_ATMEL_MDB041D:
10733                         case FLASH_5761VENDOR_ST_A_M45PE40:
10734                         case FLASH_5761VENDOR_ST_M_M45PE40:
10735                                 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
10736                                 break;
10737                         case FLASH_5761VENDOR_ATMEL_ADB021D:
10738                         case FLASH_5761VENDOR_ATMEL_MDB021D:
10739                         case FLASH_5761VENDOR_ST_A_M45PE20:
10740                         case FLASH_5761VENDOR_ST_M_M45PE20:
10741                                 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
10742                                 break;
10743                 }
10744         }
10745 }
10746
10747 static void __devinit tg3_get_5906_nvram_info(struct tg3 *tp)
10748 {
10749         tp->nvram_jedecnum = JEDEC_ATMEL;
10750         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10751         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
10752 }
10753
10754 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
10755 static void __devinit tg3_nvram_init(struct tg3 *tp)
10756 {
10757         tw32_f(GRC_EEPROM_ADDR,
10758              (EEPROM_ADDR_FSM_RESET |
10759               (EEPROM_DEFAULT_CLOCK_PERIOD <<
10760                EEPROM_ADDR_CLKPERD_SHIFT)));
10761
10762         msleep(1);
10763
10764         /* Enable seeprom accesses. */
10765         tw32_f(GRC_LOCAL_CTRL,
10766              tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
10767         udelay(100);
10768
10769         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
10770             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
10771                 tp->tg3_flags |= TG3_FLAG_NVRAM;
10772
10773                 if (tg3_nvram_lock(tp)) {
10774                         printk(KERN_WARNING PFX "%s: Cannot get nvarm lock, "
10775                                "tg3_nvram_init failed.\n", tp->dev->name);
10776                         return;
10777                 }
10778                 tg3_enable_nvram_access(tp);
10779
10780                 tp->nvram_size = 0;
10781
10782                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
10783                         tg3_get_5752_nvram_info(tp);
10784                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
10785                         tg3_get_5755_nvram_info(tp);
10786                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
10787                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
10788                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
10789                         tg3_get_5787_nvram_info(tp);
10790                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
10791                         tg3_get_5761_nvram_info(tp);
10792                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
10793                         tg3_get_5906_nvram_info(tp);
10794                 else
10795                         tg3_get_nvram_info(tp);
10796
10797                 if (tp->nvram_size == 0)
10798                         tg3_get_nvram_size(tp);
10799
10800                 tg3_disable_nvram_access(tp);
10801                 tg3_nvram_unlock(tp);
10802
10803         } else {
10804                 tp->tg3_flags &= ~(TG3_FLAG_NVRAM | TG3_FLAG_NVRAM_BUFFERED);
10805
10806                 tg3_get_eeprom_size(tp);
10807         }
10808 }
10809
10810 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
10811                                         u32 offset, u32 *val)
10812 {
10813         u32 tmp;
10814         int i;
10815
10816         if (offset > EEPROM_ADDR_ADDR_MASK ||
10817             (offset % 4) != 0)
10818                 return -EINVAL;
10819
10820         tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
10821                                         EEPROM_ADDR_DEVID_MASK |
10822                                         EEPROM_ADDR_READ);
10823         tw32(GRC_EEPROM_ADDR,
10824              tmp |
10825              (0 << EEPROM_ADDR_DEVID_SHIFT) |
10826              ((offset << EEPROM_ADDR_ADDR_SHIFT) &
10827               EEPROM_ADDR_ADDR_MASK) |
10828              EEPROM_ADDR_READ | EEPROM_ADDR_START);
10829
10830         for (i = 0; i < 1000; i++) {
10831                 tmp = tr32(GRC_EEPROM_ADDR);
10832
10833                 if (tmp & EEPROM_ADDR_COMPLETE)
10834                         break;
10835                 msleep(1);
10836         }
10837         if (!(tmp & EEPROM_ADDR_COMPLETE))
10838                 return -EBUSY;
10839
10840         *val = tr32(GRC_EEPROM_DATA);
10841         return 0;
10842 }
10843
10844 #define NVRAM_CMD_TIMEOUT 10000
10845
10846 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
10847 {
10848         int i;
10849
10850         tw32(NVRAM_CMD, nvram_cmd);
10851         for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
10852                 udelay(10);
10853                 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
10854                         udelay(10);
10855                         break;
10856                 }
10857         }
10858         if (i == NVRAM_CMD_TIMEOUT) {
10859                 return -EBUSY;
10860         }
10861         return 0;
10862 }
10863
10864 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
10865 {
10866         if ((tp->tg3_flags & TG3_FLAG_NVRAM) &&
10867             (tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) &&
10868             (tp->tg3_flags2 & TG3_FLG2_FLASH) &&
10869            !(tp->tg3_flags3 & TG3_FLG3_NO_NVRAM_ADDR_TRANS) &&
10870             (tp->nvram_jedecnum == JEDEC_ATMEL))
10871
10872                 addr = ((addr / tp->nvram_pagesize) <<
10873                         ATMEL_AT45DB0X1B_PAGE_POS) +
10874                        (addr % tp->nvram_pagesize);
10875
10876         return addr;
10877 }
10878
10879 static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
10880 {
10881         if ((tp->tg3_flags & TG3_FLAG_NVRAM) &&
10882             (tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) &&
10883             (tp->tg3_flags2 & TG3_FLG2_FLASH) &&
10884            !(tp->tg3_flags3 & TG3_FLG3_NO_NVRAM_ADDR_TRANS) &&
10885             (tp->nvram_jedecnum == JEDEC_ATMEL))
10886
10887                 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
10888                         tp->nvram_pagesize) +
10889                        (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
10890
10891         return addr;
10892 }
10893
10894 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
10895 {
10896         int ret;
10897
10898         if (!(tp->tg3_flags & TG3_FLAG_NVRAM))
10899                 return tg3_nvram_read_using_eeprom(tp, offset, val);
10900
10901         offset = tg3_nvram_phys_addr(tp, offset);
10902
10903         if (offset > NVRAM_ADDR_MSK)
10904                 return -EINVAL;
10905
10906         ret = tg3_nvram_lock(tp);
10907         if (ret)
10908                 return ret;
10909
10910         tg3_enable_nvram_access(tp);
10911
10912         tw32(NVRAM_ADDR, offset);
10913         ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
10914                 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
10915
10916         if (ret == 0)
10917                 *val = swab32(tr32(NVRAM_RDDATA));
10918
10919         tg3_disable_nvram_access(tp);
10920
10921         tg3_nvram_unlock(tp);
10922
10923         return ret;
10924 }
10925
10926 static int tg3_nvram_read_le(struct tg3 *tp, u32 offset, __le32 *val)
10927 {
10928         u32 v;
10929         int res = tg3_nvram_read(tp, offset, &v);
10930         if (!res)
10931                 *val = cpu_to_le32(v);
10932         return res;
10933 }
10934
10935 static int tg3_nvram_read_swab(struct tg3 *tp, u32 offset, u32 *val)
10936 {
10937         int err;
10938         u32 tmp;
10939
10940         err = tg3_nvram_read(tp, offset, &tmp);
10941         *val = swab32(tmp);
10942         return err;
10943 }
10944
10945 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
10946                                     u32 offset, u32 len, u8 *buf)
10947 {
10948         int i, j, rc = 0;
10949         u32 val;
10950
10951         for (i = 0; i < len; i += 4) {
10952                 u32 addr;
10953                 __le32 data;
10954
10955                 addr = offset + i;
10956
10957                 memcpy(&data, buf + i, 4);
10958
10959                 tw32(GRC_EEPROM_DATA, le32_to_cpu(data));
10960
10961                 val = tr32(GRC_EEPROM_ADDR);
10962                 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
10963
10964                 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
10965                         EEPROM_ADDR_READ);
10966                 tw32(GRC_EEPROM_ADDR, val |
10967                         (0 << EEPROM_ADDR_DEVID_SHIFT) |
10968                         (addr & EEPROM_ADDR_ADDR_MASK) |
10969                         EEPROM_ADDR_START |
10970                         EEPROM_ADDR_WRITE);
10971
10972                 for (j = 0; j < 1000; j++) {
10973                         val = tr32(GRC_EEPROM_ADDR);
10974
10975                         if (val & EEPROM_ADDR_COMPLETE)
10976                                 break;
10977                         msleep(1);
10978                 }
10979                 if (!(val & EEPROM_ADDR_COMPLETE)) {
10980                         rc = -EBUSY;
10981                         break;
10982                 }
10983         }
10984
10985         return rc;
10986 }
10987
10988 /* offset and length are dword aligned */
10989 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
10990                 u8 *buf)
10991 {
10992         int ret = 0;
10993         u32 pagesize = tp->nvram_pagesize;
10994         u32 pagemask = pagesize - 1;
10995         u32 nvram_cmd;
10996         u8 *tmp;
10997
10998         tmp = kmalloc(pagesize, GFP_KERNEL);
10999         if (tmp == NULL)
11000                 return -ENOMEM;
11001
11002         while (len) {
11003                 int j;
11004                 u32 phy_addr, page_off, size;
11005
11006                 phy_addr = offset & ~pagemask;
11007
11008                 for (j = 0; j < pagesize; j += 4) {
11009                         if ((ret = tg3_nvram_read_le(tp, phy_addr + j,
11010                                                 (__le32 *) (tmp + j))))
11011                                 break;
11012                 }
11013                 if (ret)
11014                         break;
11015
11016                 page_off = offset & pagemask;
11017                 size = pagesize;
11018                 if (len < size)
11019                         size = len;
11020
11021                 len -= size;
11022
11023                 memcpy(tmp + page_off, buf, size);
11024
11025                 offset = offset + (pagesize - page_off);
11026
11027                 tg3_enable_nvram_access(tp);
11028
11029                 /*
11030                  * Before we can erase the flash page, we need
11031                  * to issue a special "write enable" command.
11032                  */
11033                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
11034
11035                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
11036                         break;
11037
11038                 /* Erase the target page */
11039                 tw32(NVRAM_ADDR, phy_addr);
11040
11041                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
11042                         NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
11043
11044                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
11045                         break;
11046
11047                 /* Issue another write enable to start the write. */
11048                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
11049
11050                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
11051                         break;
11052
11053                 for (j = 0; j < pagesize; j += 4) {
11054                         __be32 data;
11055
11056                         data = *((__be32 *) (tmp + j));
11057                         /* swab32(le32_to_cpu(data)), actually */
11058                         tw32(NVRAM_WRDATA, be32_to_cpu(data));
11059
11060                         tw32(NVRAM_ADDR, phy_addr + j);
11061
11062                         nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
11063                                 NVRAM_CMD_WR;
11064
11065                         if (j == 0)
11066                                 nvram_cmd |= NVRAM_CMD_FIRST;
11067                         else if (j == (pagesize - 4))
11068                                 nvram_cmd |= NVRAM_CMD_LAST;
11069
11070                         if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
11071                                 break;
11072                 }
11073                 if (ret)
11074                         break;
11075         }
11076
11077         nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
11078         tg3_nvram_exec_cmd(tp, nvram_cmd);
11079
11080         kfree(tmp);
11081
11082         return ret;
11083 }
11084
11085 /* offset and length are dword aligned */
11086 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
11087                 u8 *buf)
11088 {
11089         int i, ret = 0;
11090
11091         for (i = 0; i < len; i += 4, offset += 4) {
11092                 u32 page_off, phy_addr, nvram_cmd;
11093                 __be32 data;
11094
11095                 memcpy(&data, buf + i, 4);
11096                 tw32(NVRAM_WRDATA, be32_to_cpu(data));
11097
11098                 page_off = offset % tp->nvram_pagesize;
11099
11100                 phy_addr = tg3_nvram_phys_addr(tp, offset);
11101
11102                 tw32(NVRAM_ADDR, phy_addr);
11103
11104                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
11105
11106                 if ((page_off == 0) || (i == 0))
11107                         nvram_cmd |= NVRAM_CMD_FIRST;
11108                 if (page_off == (tp->nvram_pagesize - 4))
11109                         nvram_cmd |= NVRAM_CMD_LAST;
11110
11111                 if (i == (len - 4))
11112                         nvram_cmd |= NVRAM_CMD_LAST;
11113
11114                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752) &&
11115                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5755) &&
11116                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5787) &&
11117                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784) &&
11118                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761) &&
11119                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785) &&
11120                     (tp->nvram_jedecnum == JEDEC_ST) &&
11121                     (nvram_cmd & NVRAM_CMD_FIRST)) {
11122
11123                         if ((ret = tg3_nvram_exec_cmd(tp,
11124                                 NVRAM_CMD_WREN | NVRAM_CMD_GO |
11125                                 NVRAM_CMD_DONE)))
11126
11127                                 break;
11128                 }
11129                 if (!(tp->tg3_flags2 & TG3_FLG2_FLASH)) {
11130                         /* We always do complete word writes to eeprom. */
11131                         nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
11132                 }
11133
11134                 if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
11135                         break;
11136         }
11137         return ret;
11138 }
11139
11140 /* offset and length are dword aligned */
11141 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
11142 {
11143         int ret;
11144
11145         if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
11146                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
11147                        ~GRC_LCLCTRL_GPIO_OUTPUT1);
11148                 udelay(40);
11149         }
11150
11151         if (!(tp->tg3_flags & TG3_FLAG_NVRAM)) {
11152                 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
11153         }
11154         else {
11155                 u32 grc_mode;
11156
11157                 ret = tg3_nvram_lock(tp);
11158                 if (ret)
11159                         return ret;
11160
11161                 tg3_enable_nvram_access(tp);
11162                 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
11163                     !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM))
11164                         tw32(NVRAM_WRITE1, 0x406);
11165
11166                 grc_mode = tr32(GRC_MODE);
11167                 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
11168
11169                 if ((tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) ||
11170                         !(tp->tg3_flags2 & TG3_FLG2_FLASH)) {
11171
11172                         ret = tg3_nvram_write_block_buffered(tp, offset, len,
11173                                 buf);
11174                 }
11175                 else {
11176                         ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
11177                                 buf);
11178                 }
11179
11180                 grc_mode = tr32(GRC_MODE);
11181                 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
11182
11183                 tg3_disable_nvram_access(tp);
11184                 tg3_nvram_unlock(tp);
11185         }
11186
11187         if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
11188                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
11189                 udelay(40);
11190         }
11191
11192         return ret;
11193 }
11194
11195 struct subsys_tbl_ent {
11196         u16 subsys_vendor, subsys_devid;
11197         u32 phy_id;
11198 };
11199
11200 static struct subsys_tbl_ent subsys_id_to_phy_id[] = {
11201         /* Broadcom boards. */
11202         { PCI_VENDOR_ID_BROADCOM, 0x1644, PHY_ID_BCM5401 }, /* BCM95700A6 */
11203         { PCI_VENDOR_ID_BROADCOM, 0x0001, PHY_ID_BCM5701 }, /* BCM95701A5 */
11204         { PCI_VENDOR_ID_BROADCOM, 0x0002, PHY_ID_BCM8002 }, /* BCM95700T6 */
11205         { PCI_VENDOR_ID_BROADCOM, 0x0003, 0 },              /* BCM95700A9 */
11206         { PCI_VENDOR_ID_BROADCOM, 0x0005, PHY_ID_BCM5701 }, /* BCM95701T1 */
11207         { PCI_VENDOR_ID_BROADCOM, 0x0006, PHY_ID_BCM5701 }, /* BCM95701T8 */
11208         { PCI_VENDOR_ID_BROADCOM, 0x0007, 0 },              /* BCM95701A7 */
11209         { PCI_VENDOR_ID_BROADCOM, 0x0008, PHY_ID_BCM5701 }, /* BCM95701A10 */
11210         { PCI_VENDOR_ID_BROADCOM, 0x8008, PHY_ID_BCM5701 }, /* BCM95701A12 */
11211         { PCI_VENDOR_ID_BROADCOM, 0x0009, PHY_ID_BCM5703 }, /* BCM95703Ax1 */
11212         { PCI_VENDOR_ID_BROADCOM, 0x8009, PHY_ID_BCM5703 }, /* BCM95703Ax2 */
11213
11214         /* 3com boards. */
11215         { PCI_VENDOR_ID_3COM, 0x1000, PHY_ID_BCM5401 }, /* 3C996T */
11216         { PCI_VENDOR_ID_3COM, 0x1006, PHY_ID_BCM5701 }, /* 3C996BT */
11217         { PCI_VENDOR_ID_3COM, 0x1004, 0 },              /* 3C996SX */
11218         { PCI_VENDOR_ID_3COM, 0x1007, PHY_ID_BCM5701 }, /* 3C1000T */
11219         { PCI_VENDOR_ID_3COM, 0x1008, PHY_ID_BCM5701 }, /* 3C940BR01 */
11220
11221         /* DELL boards. */
11222         { PCI_VENDOR_ID_DELL, 0x00d1, PHY_ID_BCM5401 }, /* VIPER */
11223         { PCI_VENDOR_ID_DELL, 0x0106, PHY_ID_BCM5401 }, /* JAGUAR */
11224         { PCI_VENDOR_ID_DELL, 0x0109, PHY_ID_BCM5411 }, /* MERLOT */
11225         { PCI_VENDOR_ID_DELL, 0x010a, PHY_ID_BCM5411 }, /* SLIM_MERLOT */
11226
11227         /* Compaq boards. */
11228         { PCI_VENDOR_ID_COMPAQ, 0x007c, PHY_ID_BCM5701 }, /* BANSHEE */
11229         { PCI_VENDOR_ID_COMPAQ, 0x009a, PHY_ID_BCM5701 }, /* BANSHEE_2 */
11230         { PCI_VENDOR_ID_COMPAQ, 0x007d, 0 },              /* CHANGELING */
11231         { PCI_VENDOR_ID_COMPAQ, 0x0085, PHY_ID_BCM5701 }, /* NC7780 */
11232         { PCI_VENDOR_ID_COMPAQ, 0x0099, PHY_ID_BCM5701 }, /* NC7780_2 */
11233
11234         /* IBM boards. */
11235         { PCI_VENDOR_ID_IBM, 0x0281, 0 } /* IBM??? */
11236 };
11237
11238 static inline struct subsys_tbl_ent *lookup_by_subsys(struct tg3 *tp)
11239 {
11240         int i;
11241
11242         for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
11243                 if ((subsys_id_to_phy_id[i].subsys_vendor ==
11244                      tp->pdev->subsystem_vendor) &&
11245                     (subsys_id_to_phy_id[i].subsys_devid ==
11246                      tp->pdev->subsystem_device))
11247                         return &subsys_id_to_phy_id[i];
11248         }
11249         return NULL;
11250 }
11251
11252 static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
11253 {
11254         u32 val;
11255         u16 pmcsr;
11256
11257         /* On some early chips the SRAM cannot be accessed in D3hot state,
11258          * so need make sure we're in D0.
11259          */
11260         pci_read_config_word(tp->pdev, tp->pm_cap + PCI_PM_CTRL, &pmcsr);
11261         pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
11262         pci_write_config_word(tp->pdev, tp->pm_cap + PCI_PM_CTRL, pmcsr);
11263         msleep(1);
11264
11265         /* Make sure register accesses (indirect or otherwise)
11266          * will function correctly.
11267          */
11268         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
11269                                tp->misc_host_ctrl);
11270
11271         /* The memory arbiter has to be enabled in order for SRAM accesses
11272          * to succeed.  Normally on powerup the tg3 chip firmware will make
11273          * sure it is enabled, but other entities such as system netboot
11274          * code might disable it.
11275          */
11276         val = tr32(MEMARB_MODE);
11277         tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
11278
11279         tp->phy_id = PHY_ID_INVALID;
11280         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
11281
11282         /* Assume an onboard device and WOL capable by default.  */
11283         tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT | TG3_FLAG_WOL_CAP;
11284
11285         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
11286                 if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
11287                         tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT;
11288                         tp->tg3_flags2 |= TG3_FLG2_IS_NIC;
11289                 }
11290                 val = tr32(VCPU_CFGSHDW);
11291                 if (val & VCPU_CFGSHDW_ASPM_DBNC)
11292                         tp->tg3_flags |= TG3_FLAG_ASPM_WORKAROUND;
11293                 if ((val & VCPU_CFGSHDW_WOL_ENABLE) &&
11294                     (val & VCPU_CFGSHDW_WOL_MAGPKT) &&
11295                     device_may_wakeup(&tp->pdev->dev))
11296                         tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
11297                 return;
11298         }
11299
11300         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
11301         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
11302                 u32 nic_cfg, led_cfg;
11303                 u32 nic_phy_id, ver, cfg2 = 0, cfg4 = 0, eeprom_phy_id;
11304                 int eeprom_phy_serdes = 0;
11305
11306                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
11307                 tp->nic_sram_data_cfg = nic_cfg;
11308
11309                 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
11310                 ver >>= NIC_SRAM_DATA_VER_SHIFT;
11311                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700) &&
11312                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) &&
11313                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5703) &&
11314                     (ver > 0) && (ver < 0x100))
11315                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
11316
11317                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
11318                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_4, &cfg4);
11319
11320                 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
11321                     NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
11322                         eeprom_phy_serdes = 1;
11323
11324                 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
11325                 if (nic_phy_id != 0) {
11326                         u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
11327                         u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
11328
11329                         eeprom_phy_id  = (id1 >> 16) << 10;
11330                         eeprom_phy_id |= (id2 & 0xfc00) << 16;
11331                         eeprom_phy_id |= (id2 & 0x03ff) <<  0;
11332                 } else
11333                         eeprom_phy_id = 0;
11334
11335                 tp->phy_id = eeprom_phy_id;
11336                 if (eeprom_phy_serdes) {
11337                         if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
11338                                 tp->tg3_flags2 |= TG3_FLG2_MII_SERDES;
11339                         else
11340                                 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
11341                 }
11342
11343                 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
11344                         led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
11345                                     SHASTA_EXT_LED_MODE_MASK);
11346                 else
11347                         led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
11348
11349                 switch (led_cfg) {
11350                 default:
11351                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
11352                         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
11353                         break;
11354
11355                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
11356                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
11357                         break;
11358
11359                 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
11360                         tp->led_ctrl = LED_CTRL_MODE_MAC;
11361
11362                         /* Default to PHY_1_MODE if 0 (MAC_MODE) is
11363                          * read on some older 5700/5701 bootcode.
11364                          */
11365                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
11366                             ASIC_REV_5700 ||
11367                             GET_ASIC_REV(tp->pci_chip_rev_id) ==
11368                             ASIC_REV_5701)
11369                                 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
11370
11371                         break;
11372
11373                 case SHASTA_EXT_LED_SHARED:
11374                         tp->led_ctrl = LED_CTRL_MODE_SHARED;
11375                         if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
11376                             tp->pci_chip_rev_id != CHIPREV_ID_5750_A1)
11377                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
11378                                                  LED_CTRL_MODE_PHY_2);
11379                         break;
11380
11381                 case SHASTA_EXT_LED_MAC:
11382                         tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
11383                         break;
11384
11385                 case SHASTA_EXT_LED_COMBO:
11386                         tp->led_ctrl = LED_CTRL_MODE_COMBO;
11387                         if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0)
11388                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
11389                                                  LED_CTRL_MODE_PHY_2);
11390                         break;
11391
11392                 }
11393
11394                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
11395                      GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) &&
11396                     tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
11397                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
11398
11399                 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX)
11400                         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
11401
11402                 if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
11403                         tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT;
11404                         if ((tp->pdev->subsystem_vendor ==
11405                              PCI_VENDOR_ID_ARIMA) &&
11406                             (tp->pdev->subsystem_device == 0x205a ||
11407                              tp->pdev->subsystem_device == 0x2063))
11408                                 tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT;
11409                 } else {
11410                         tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT;
11411                         tp->tg3_flags2 |= TG3_FLG2_IS_NIC;
11412                 }
11413
11414                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
11415                         tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
11416                         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
11417                                 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
11418                 }
11419                 if (nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE)
11420                         tp->tg3_flags3 |= TG3_FLG3_ENABLE_APE;
11421                 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES &&
11422                     !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL))
11423                         tp->tg3_flags &= ~TG3_FLAG_WOL_CAP;
11424
11425                 if ((tp->tg3_flags & TG3_FLAG_WOL_CAP) &&
11426                     (nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE) &&
11427                     device_may_wakeup(&tp->pdev->dev))
11428                         tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
11429
11430                 if (cfg2 & (1 << 17))
11431                         tp->tg3_flags2 |= TG3_FLG2_CAPACITIVE_COUPLING;
11432
11433                 /* serdes signal pre-emphasis in register 0x590 set by */
11434                 /* bootcode if bit 18 is set */
11435                 if (cfg2 & (1 << 18))
11436                         tp->tg3_flags2 |= TG3_FLG2_SERDES_PREEMPHASIS;
11437
11438                 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
11439                         u32 cfg3;
11440
11441                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
11442                         if (cfg3 & NIC_SRAM_ASPM_DEBOUNCE)
11443                                 tp->tg3_flags |= TG3_FLAG_ASPM_WORKAROUND;
11444                 }
11445
11446                 if (cfg4 & NIC_SRAM_RGMII_STD_IBND_DISABLE)
11447                         tp->tg3_flags3 |= TG3_FLG3_RGMII_STD_IBND_DISABLE;
11448                 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_RX_EN)
11449                         tp->tg3_flags3 |= TG3_FLG3_RGMII_EXT_IBND_RX_EN;
11450                 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN)
11451                         tp->tg3_flags3 |= TG3_FLG3_RGMII_EXT_IBND_TX_EN;
11452         }
11453 }
11454
11455 static int __devinit tg3_issue_otp_command(struct tg3 *tp, u32 cmd)
11456 {
11457         int i;
11458         u32 val;
11459
11460         tw32(OTP_CTRL, cmd | OTP_CTRL_OTP_CMD_START);
11461         tw32(OTP_CTRL, cmd);
11462
11463         /* Wait for up to 1 ms for command to execute. */
11464         for (i = 0; i < 100; i++) {
11465                 val = tr32(OTP_STATUS);
11466                 if (val & OTP_STATUS_CMD_DONE)
11467                         break;
11468                 udelay(10);
11469         }
11470
11471         return (val & OTP_STATUS_CMD_DONE) ? 0 : -EBUSY;
11472 }
11473
11474 /* Read the gphy configuration from the OTP region of the chip.  The gphy
11475  * configuration is a 32-bit value that straddles the alignment boundary.
11476  * We do two 32-bit reads and then shift and merge the results.
11477  */
11478 static u32 __devinit tg3_read_otp_phycfg(struct tg3 *tp)
11479 {
11480         u32 bhalf_otp, thalf_otp;
11481
11482         tw32(OTP_MODE, OTP_MODE_OTP_THRU_GRC);
11483
11484         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_INIT))
11485                 return 0;
11486
11487         tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC1);
11488
11489         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
11490                 return 0;
11491
11492         thalf_otp = tr32(OTP_READ_DATA);
11493
11494         tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC2);
11495
11496         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
11497                 return 0;
11498
11499         bhalf_otp = tr32(OTP_READ_DATA);
11500
11501         return ((thalf_otp & 0x0000ffff) << 16) | (bhalf_otp >> 16);
11502 }
11503
11504 static int __devinit tg3_phy_probe(struct tg3 *tp)
11505 {
11506         u32 hw_phy_id_1, hw_phy_id_2;
11507         u32 hw_phy_id, hw_phy_id_masked;
11508         int err;
11509
11510         if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)
11511                 return tg3_phy_init(tp);
11512
11513         /* Reading the PHY ID register can conflict with ASF
11514          * firwmare access to the PHY hardware.
11515          */
11516         err = 0;
11517         if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) ||
11518             (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)) {
11519                 hw_phy_id = hw_phy_id_masked = PHY_ID_INVALID;
11520         } else {
11521                 /* Now read the physical PHY_ID from the chip and verify
11522                  * that it is sane.  If it doesn't look good, we fall back
11523                  * to either the hard-coded table based PHY_ID and failing
11524                  * that the value found in the eeprom area.
11525                  */
11526                 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
11527                 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
11528
11529                 hw_phy_id  = (hw_phy_id_1 & 0xffff) << 10;
11530                 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
11531                 hw_phy_id |= (hw_phy_id_2 & 0x03ff) <<  0;
11532
11533                 hw_phy_id_masked = hw_phy_id & PHY_ID_MASK;
11534         }
11535
11536         if (!err && KNOWN_PHY_ID(hw_phy_id_masked)) {
11537                 tp->phy_id = hw_phy_id;
11538                 if (hw_phy_id_masked == PHY_ID_BCM8002)
11539                         tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
11540                 else
11541                         tp->tg3_flags2 &= ~TG3_FLG2_PHY_SERDES;
11542         } else {
11543                 if (tp->phy_id != PHY_ID_INVALID) {
11544                         /* Do nothing, phy ID already set up in
11545                          * tg3_get_eeprom_hw_cfg().
11546                          */
11547                 } else {
11548                         struct subsys_tbl_ent *p;
11549
11550                         /* No eeprom signature?  Try the hardcoded
11551                          * subsys device table.
11552                          */
11553                         p = lookup_by_subsys(tp);
11554                         if (!p)
11555                                 return -ENODEV;
11556
11557                         tp->phy_id = p->phy_id;
11558                         if (!tp->phy_id ||
11559                             tp->phy_id == PHY_ID_BCM8002)
11560                                 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
11561                 }
11562         }
11563
11564         if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) &&
11565             !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) &&
11566             !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
11567                 u32 bmsr, adv_reg, tg3_ctrl, mask;
11568
11569                 tg3_readphy(tp, MII_BMSR, &bmsr);
11570                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
11571                     (bmsr & BMSR_LSTATUS))
11572                         goto skip_phy_reset;
11573
11574                 err = tg3_phy_reset(tp);
11575                 if (err)
11576                         return err;
11577
11578                 adv_reg = (ADVERTISE_10HALF | ADVERTISE_10FULL |
11579                            ADVERTISE_100HALF | ADVERTISE_100FULL |
11580                            ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
11581                 tg3_ctrl = 0;
11582                 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
11583                         tg3_ctrl = (MII_TG3_CTRL_ADV_1000_HALF |
11584                                     MII_TG3_CTRL_ADV_1000_FULL);
11585                         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
11586                             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
11587                                 tg3_ctrl |= (MII_TG3_CTRL_AS_MASTER |
11588                                              MII_TG3_CTRL_ENABLE_AS_MASTER);
11589                 }
11590
11591                 mask = (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
11592                         ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
11593                         ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full);
11594                 if (!tg3_copper_is_advertising_all(tp, mask)) {
11595                         tg3_writephy(tp, MII_ADVERTISE, adv_reg);
11596
11597                         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
11598                                 tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
11599
11600                         tg3_writephy(tp, MII_BMCR,
11601                                      BMCR_ANENABLE | BMCR_ANRESTART);
11602                 }
11603                 tg3_phy_set_wirespeed(tp);
11604
11605                 tg3_writephy(tp, MII_ADVERTISE, adv_reg);
11606                 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
11607                         tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
11608         }
11609
11610 skip_phy_reset:
11611         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
11612                 err = tg3_init_5401phy_dsp(tp);
11613                 if (err)
11614                         return err;
11615         }
11616
11617         if (!err && ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401)) {
11618                 err = tg3_init_5401phy_dsp(tp);
11619         }
11620
11621         if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
11622                 tp->link_config.advertising =
11623                         (ADVERTISED_1000baseT_Half |
11624                          ADVERTISED_1000baseT_Full |
11625                          ADVERTISED_Autoneg |
11626                          ADVERTISED_FIBRE);
11627         if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
11628                 tp->link_config.advertising &=
11629                         ~(ADVERTISED_1000baseT_Half |
11630                           ADVERTISED_1000baseT_Full);
11631
11632         return err;
11633 }
11634
11635 static void __devinit tg3_read_partno(struct tg3 *tp)
11636 {
11637         unsigned char vpd_data[256];
11638         unsigned int i;
11639         u32 magic;
11640
11641         if (tg3_nvram_read_swab(tp, 0x0, &magic))
11642                 goto out_not_found;
11643
11644         if (magic == TG3_EEPROM_MAGIC) {
11645                 for (i = 0; i < 256; i += 4) {
11646                         u32 tmp;
11647
11648                         if (tg3_nvram_read(tp, 0x100 + i, &tmp))
11649                                 goto out_not_found;
11650
11651                         vpd_data[i + 0] = ((tmp >>  0) & 0xff);
11652                         vpd_data[i + 1] = ((tmp >>  8) & 0xff);
11653                         vpd_data[i + 2] = ((tmp >> 16) & 0xff);
11654                         vpd_data[i + 3] = ((tmp >> 24) & 0xff);
11655                 }
11656         } else {
11657                 int vpd_cap;
11658
11659                 vpd_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_VPD);
11660                 for (i = 0; i < 256; i += 4) {
11661                         u32 tmp, j = 0;
11662                         __le32 v;
11663                         u16 tmp16;
11664
11665                         pci_write_config_word(tp->pdev, vpd_cap + PCI_VPD_ADDR,
11666                                               i);
11667                         while (j++ < 100) {
11668                                 pci_read_config_word(tp->pdev, vpd_cap +
11669                                                      PCI_VPD_ADDR, &tmp16);
11670                                 if (tmp16 & 0x8000)
11671                                         break;
11672                                 msleep(1);
11673                         }
11674                         if (!(tmp16 & 0x8000))
11675                                 goto out_not_found;
11676
11677                         pci_read_config_dword(tp->pdev, vpd_cap + PCI_VPD_DATA,
11678                                               &tmp);
11679                         v = cpu_to_le32(tmp);
11680                         memcpy(&vpd_data[i], &v, 4);
11681                 }
11682         }
11683
11684         /* Now parse and find the part number. */
11685         for (i = 0; i < 254; ) {
11686                 unsigned char val = vpd_data[i];
11687                 unsigned int block_end;
11688
11689                 if (val == 0x82 || val == 0x91) {
11690                         i = (i + 3 +
11691                              (vpd_data[i + 1] +
11692                               (vpd_data[i + 2] << 8)));
11693                         continue;
11694                 }
11695
11696                 if (val != 0x90)
11697                         goto out_not_found;
11698
11699                 block_end = (i + 3 +
11700                              (vpd_data[i + 1] +
11701                               (vpd_data[i + 2] << 8)));
11702                 i += 3;
11703
11704                 if (block_end > 256)
11705                         goto out_not_found;
11706
11707                 while (i < (block_end - 2)) {
11708                         if (vpd_data[i + 0] == 'P' &&
11709                             vpd_data[i + 1] == 'N') {
11710                                 int partno_len = vpd_data[i + 2];
11711
11712                                 i += 3;
11713                                 if (partno_len > 24 || (partno_len + i) > 256)
11714                                         goto out_not_found;
11715
11716                                 memcpy(tp->board_part_number,
11717                                        &vpd_data[i], partno_len);
11718
11719                                 /* Success. */
11720                                 return;
11721                         }
11722                         i += 3 + vpd_data[i + 2];
11723                 }
11724
11725                 /* Part number not found. */
11726                 goto out_not_found;
11727         }
11728
11729 out_not_found:
11730         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
11731                 strcpy(tp->board_part_number, "BCM95906");
11732         else
11733                 strcpy(tp->board_part_number, "none");
11734 }
11735
11736 static int __devinit tg3_fw_img_is_valid(struct tg3 *tp, u32 offset)
11737 {
11738         u32 val;
11739
11740         if (tg3_nvram_read_swab(tp, offset, &val) ||
11741             (val & 0xfc000000) != 0x0c000000 ||
11742             tg3_nvram_read_swab(tp, offset + 4, &val) ||
11743             val != 0)
11744                 return 0;
11745
11746         return 1;
11747 }
11748
11749 static void __devinit tg3_read_fw_ver(struct tg3 *tp)
11750 {
11751         u32 val, offset, start;
11752         u32 ver_offset;
11753         int i, bcnt;
11754
11755         if (tg3_nvram_read_swab(tp, 0, &val))
11756                 return;
11757
11758         if (val != TG3_EEPROM_MAGIC)
11759                 return;
11760
11761         if (tg3_nvram_read_swab(tp, 0xc, &offset) ||
11762             tg3_nvram_read_swab(tp, 0x4, &start))
11763                 return;
11764
11765         offset = tg3_nvram_logical_addr(tp, offset);
11766
11767         if (!tg3_fw_img_is_valid(tp, offset) ||
11768             tg3_nvram_read_swab(tp, offset + 8, &ver_offset))
11769                 return;
11770
11771         offset = offset + ver_offset - start;
11772         for (i = 0; i < 16; i += 4) {
11773                 __le32 v;
11774                 if (tg3_nvram_read_le(tp, offset + i, &v))
11775                         return;
11776
11777                 memcpy(tp->fw_ver + i, &v, 4);
11778         }
11779
11780         if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF) ||
11781              (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
11782                 return;
11783
11784         for (offset = TG3_NVM_DIR_START;
11785              offset < TG3_NVM_DIR_END;
11786              offset += TG3_NVM_DIRENT_SIZE) {
11787                 if (tg3_nvram_read_swab(tp, offset, &val))
11788                         return;
11789
11790                 if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI)
11791                         break;
11792         }
11793
11794         if (offset == TG3_NVM_DIR_END)
11795                 return;
11796
11797         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
11798                 start = 0x08000000;
11799         else if (tg3_nvram_read_swab(tp, offset - 4, &start))
11800                 return;
11801
11802         if (tg3_nvram_read_swab(tp, offset + 4, &offset) ||
11803             !tg3_fw_img_is_valid(tp, offset) ||
11804             tg3_nvram_read_swab(tp, offset + 8, &val))
11805                 return;
11806
11807         offset += val - start;
11808
11809         bcnt = strlen(tp->fw_ver);
11810
11811         tp->fw_ver[bcnt++] = ',';
11812         tp->fw_ver[bcnt++] = ' ';
11813
11814         for (i = 0; i < 4; i++) {
11815                 __le32 v;
11816                 if (tg3_nvram_read_le(tp, offset, &v))
11817                         return;
11818
11819                 offset += sizeof(v);
11820
11821                 if (bcnt > TG3_VER_SIZE - sizeof(v)) {
11822                         memcpy(&tp->fw_ver[bcnt], &v, TG3_VER_SIZE - bcnt);
11823                         break;
11824                 }
11825
11826                 memcpy(&tp->fw_ver[bcnt], &v, sizeof(v));
11827                 bcnt += sizeof(v);
11828         }
11829
11830         tp->fw_ver[TG3_VER_SIZE - 1] = 0;
11831 }
11832
11833 static struct pci_dev * __devinit tg3_find_peer(struct tg3 *);
11834
11835 static int __devinit tg3_get_invariants(struct tg3 *tp)
11836 {
11837         static struct pci_device_id write_reorder_chipsets[] = {
11838                 { PCI_DEVICE(PCI_VENDOR_ID_AMD,
11839                              PCI_DEVICE_ID_AMD_FE_GATE_700C) },
11840                 { PCI_DEVICE(PCI_VENDOR_ID_AMD,
11841                              PCI_DEVICE_ID_AMD_8131_BRIDGE) },
11842                 { PCI_DEVICE(PCI_VENDOR_ID_VIA,
11843                              PCI_DEVICE_ID_VIA_8385_0) },
11844                 { },
11845         };
11846         u32 misc_ctrl_reg;
11847         u32 cacheline_sz_reg;
11848         u32 pci_state_reg, grc_misc_cfg;
11849         u32 val;
11850         u16 pci_cmd;
11851         int err, pcie_cap;
11852
11853         /* Force memory write invalidate off.  If we leave it on,
11854          * then on 5700_BX chips we have to enable a workaround.
11855          * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
11856          * to match the cacheline size.  The Broadcom driver have this
11857          * workaround but turns MWI off all the times so never uses
11858          * it.  This seems to suggest that the workaround is insufficient.
11859          */
11860         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
11861         pci_cmd &= ~PCI_COMMAND_INVALIDATE;
11862         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
11863
11864         /* It is absolutely critical that TG3PCI_MISC_HOST_CTRL
11865          * has the register indirect write enable bit set before
11866          * we try to access any of the MMIO registers.  It is also
11867          * critical that the PCI-X hw workaround situation is decided
11868          * before that as well.
11869          */
11870         pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
11871                               &misc_ctrl_reg);
11872
11873         tp->pci_chip_rev_id = (misc_ctrl_reg >>
11874                                MISC_HOST_CTRL_CHIPREV_SHIFT);
11875         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_USE_PROD_ID_REG) {
11876                 u32 prod_id_asic_rev;
11877
11878                 pci_read_config_dword(tp->pdev, TG3PCI_PRODID_ASICREV,
11879                                       &prod_id_asic_rev);
11880                 tp->pci_chip_rev_id = prod_id_asic_rev & PROD_ID_ASIC_REV_MASK;
11881         }
11882
11883         /* Wrong chip ID in 5752 A0. This code can be removed later
11884          * as A0 is not in production.
11885          */
11886         if (tp->pci_chip_rev_id == CHIPREV_ID_5752_A0_HW)
11887                 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
11888
11889         /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
11890          * we need to disable memory and use config. cycles
11891          * only to access all registers. The 5702/03 chips
11892          * can mistakenly decode the special cycles from the
11893          * ICH chipsets as memory write cycles, causing corruption
11894          * of register and memory space. Only certain ICH bridges
11895          * will drive special cycles with non-zero data during the
11896          * address phase which can fall within the 5703's address
11897          * range. This is not an ICH bug as the PCI spec allows
11898          * non-zero address during special cycles. However, only
11899          * these ICH bridges are known to drive non-zero addresses
11900          * during special cycles.
11901          *
11902          * Since special cycles do not cross PCI bridges, we only
11903          * enable this workaround if the 5703 is on the secondary
11904          * bus of these ICH bridges.
11905          */
11906         if ((tp->pci_chip_rev_id == CHIPREV_ID_5703_A1) ||
11907             (tp->pci_chip_rev_id == CHIPREV_ID_5703_A2)) {
11908                 static struct tg3_dev_id {
11909                         u32     vendor;
11910                         u32     device;
11911                         u32     rev;
11912                 } ich_chipsets[] = {
11913                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
11914                           PCI_ANY_ID },
11915                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
11916                           PCI_ANY_ID },
11917                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
11918                           0xa },
11919                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
11920                           PCI_ANY_ID },
11921                         { },
11922                 };
11923                 struct tg3_dev_id *pci_id = &ich_chipsets[0];
11924                 struct pci_dev *bridge = NULL;
11925
11926                 while (pci_id->vendor != 0) {
11927                         bridge = pci_get_device(pci_id->vendor, pci_id->device,
11928                                                 bridge);
11929                         if (!bridge) {
11930                                 pci_id++;
11931                                 continue;
11932                         }
11933                         if (pci_id->rev != PCI_ANY_ID) {
11934                                 if (bridge->revision > pci_id->rev)
11935                                         continue;
11936                         }
11937                         if (bridge->subordinate &&
11938                             (bridge->subordinate->number ==
11939                              tp->pdev->bus->number)) {
11940
11941                                 tp->tg3_flags2 |= TG3_FLG2_ICH_WORKAROUND;
11942                                 pci_dev_put(bridge);
11943                                 break;
11944                         }
11945                 }
11946         }
11947
11948         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
11949                 static struct tg3_dev_id {
11950                         u32     vendor;
11951                         u32     device;
11952                 } bridge_chipsets[] = {
11953                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0 },
11954                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1 },
11955                         { },
11956                 };
11957                 struct tg3_dev_id *pci_id = &bridge_chipsets[0];
11958                 struct pci_dev *bridge = NULL;
11959
11960                 while (pci_id->vendor != 0) {
11961                         bridge = pci_get_device(pci_id->vendor,
11962                                                 pci_id->device,
11963                                                 bridge);
11964                         if (!bridge) {
11965                                 pci_id++;
11966                                 continue;
11967                         }
11968                         if (bridge->subordinate &&
11969                             (bridge->subordinate->number <=
11970                              tp->pdev->bus->number) &&
11971                             (bridge->subordinate->subordinate >=
11972                              tp->pdev->bus->number)) {
11973                                 tp->tg3_flags3 |= TG3_FLG3_5701_DMA_BUG;
11974                                 pci_dev_put(bridge);
11975                                 break;
11976                         }
11977                 }
11978         }
11979
11980         /* The EPB bridge inside 5714, 5715, and 5780 cannot support
11981          * DMA addresses > 40-bit. This bridge may have other additional
11982          * 57xx devices behind it in some 4-port NIC designs for example.
11983          * Any tg3 device found behind the bridge will also need the 40-bit
11984          * DMA workaround.
11985          */
11986         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 ||
11987             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
11988                 tp->tg3_flags2 |= TG3_FLG2_5780_CLASS;
11989                 tp->tg3_flags |= TG3_FLAG_40BIT_DMA_BUG;
11990                 tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI);
11991         }
11992         else {
11993                 struct pci_dev *bridge = NULL;
11994
11995                 do {
11996                         bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
11997                                                 PCI_DEVICE_ID_SERVERWORKS_EPB,
11998                                                 bridge);
11999                         if (bridge && bridge->subordinate &&
12000                             (bridge->subordinate->number <=
12001                              tp->pdev->bus->number) &&
12002                             (bridge->subordinate->subordinate >=
12003                              tp->pdev->bus->number)) {
12004                                 tp->tg3_flags |= TG3_FLAG_40BIT_DMA_BUG;
12005                                 pci_dev_put(bridge);
12006                                 break;
12007                         }
12008                 } while (bridge);
12009         }
12010
12011         /* Initialize misc host control in PCI block. */
12012         tp->misc_host_ctrl |= (misc_ctrl_reg &
12013                                MISC_HOST_CTRL_CHIPREV);
12014         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
12015                                tp->misc_host_ctrl);
12016
12017         pci_read_config_dword(tp->pdev, TG3PCI_CACHELINESZ,
12018                               &cacheline_sz_reg);
12019
12020         tp->pci_cacheline_sz = (cacheline_sz_reg >>  0) & 0xff;
12021         tp->pci_lat_timer    = (cacheline_sz_reg >>  8) & 0xff;
12022         tp->pci_hdr_type     = (cacheline_sz_reg >> 16) & 0xff;
12023         tp->pci_bist         = (cacheline_sz_reg >> 24) & 0xff;
12024
12025         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
12026             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714))
12027                 tp->pdev_peer = tg3_find_peer(tp);
12028
12029         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
12030             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
12031             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
12032             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
12033             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
12034             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
12035             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
12036             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 ||
12037             (tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
12038                 tp->tg3_flags2 |= TG3_FLG2_5750_PLUS;
12039
12040         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) ||
12041             (tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
12042                 tp->tg3_flags2 |= TG3_FLG2_5705_PLUS;
12043
12044         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
12045                 tp->tg3_flags |= TG3_FLAG_SUPPORT_MSI;
12046                 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX ||
12047                     GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX ||
12048                     (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 &&
12049                      tp->pci_chip_rev_id <= CHIPREV_ID_5714_A2 &&
12050                      tp->pdev_peer == tp->pdev))
12051                         tp->tg3_flags &= ~TG3_FLAG_SUPPORT_MSI;
12052
12053                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
12054                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
12055                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
12056                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
12057                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
12058                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
12059                         tp->tg3_flags2 |= TG3_FLG2_HW_TSO_2;
12060                         tp->tg3_flags2 |= TG3_FLG2_1SHOT_MSI;
12061                 } else {
12062                         tp->tg3_flags2 |= TG3_FLG2_HW_TSO_1 | TG3_FLG2_TSO_BUG;
12063                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
12064                                 ASIC_REV_5750 &&
12065                             tp->pci_chip_rev_id >= CHIPREV_ID_5750_C2)
12066                                 tp->tg3_flags2 &= ~TG3_FLG2_TSO_BUG;
12067                 }
12068         }
12069
12070         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
12071              (tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
12072                 tp->tg3_flags2 |= TG3_FLG2_JUMBO_CAPABLE;
12073
12074         pcie_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_EXP);
12075         if (pcie_cap != 0) {
12076                 tp->tg3_flags2 |= TG3_FLG2_PCI_EXPRESS;
12077
12078                 pcie_set_readrq(tp->pdev, 4096);
12079
12080                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
12081                         u16 lnkctl;
12082
12083                         pci_read_config_word(tp->pdev,
12084                                              pcie_cap + PCI_EXP_LNKCTL,
12085                                              &lnkctl);
12086                         if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN)
12087                                 tp->tg3_flags2 &= ~TG3_FLG2_HW_TSO_2;
12088                 }
12089         }
12090
12091         /* If we have an AMD 762 or VIA K8T800 chipset, write
12092          * reordering to the mailbox registers done by the host
12093          * controller can cause major troubles.  We read back from
12094          * every mailbox register write to force the writes to be
12095          * posted to the chip in order.
12096          */
12097         if (pci_dev_present(write_reorder_chipsets) &&
12098             !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
12099                 tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;
12100
12101         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
12102             tp->pci_lat_timer < 64) {
12103                 tp->pci_lat_timer = 64;
12104
12105                 cacheline_sz_reg  = ((tp->pci_cacheline_sz & 0xff) <<  0);
12106                 cacheline_sz_reg |= ((tp->pci_lat_timer    & 0xff) <<  8);
12107                 cacheline_sz_reg |= ((tp->pci_hdr_type     & 0xff) << 16);
12108                 cacheline_sz_reg |= ((tp->pci_bist         & 0xff) << 24);
12109
12110                 pci_write_config_dword(tp->pdev, TG3PCI_CACHELINESZ,
12111                                        cacheline_sz_reg);
12112         }
12113
12114         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
12115             (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
12116                 tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
12117                 if (!tp->pcix_cap) {
12118                         printk(KERN_ERR PFX "Cannot find PCI-X "
12119                                             "capability, aborting.\n");
12120                         return -EIO;
12121                 }
12122         }
12123
12124         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
12125                               &pci_state_reg);
12126
12127         if (tp->pcix_cap && (pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0) {
12128                 tp->tg3_flags |= TG3_FLAG_PCIX_MODE;
12129
12130                 /* If this is a 5700 BX chipset, and we are in PCI-X
12131                  * mode, enable register write workaround.
12132                  *
12133                  * The workaround is to use indirect register accesses
12134                  * for all chip writes not to mailbox registers.
12135                  */
12136                 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX) {
12137                         u32 pm_reg;
12138
12139                         tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
12140
12141                         /* The chip can have it's power management PCI config
12142                          * space registers clobbered due to this bug.
12143                          * So explicitly force the chip into D0 here.
12144                          */
12145                         pci_read_config_dword(tp->pdev,
12146                                               tp->pm_cap + PCI_PM_CTRL,
12147                                               &pm_reg);
12148                         pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
12149                         pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
12150                         pci_write_config_dword(tp->pdev,
12151                                                tp->pm_cap + PCI_PM_CTRL,
12152                                                pm_reg);
12153
12154                         /* Also, force SERR#/PERR# in PCI command. */
12155                         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
12156                         pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
12157                         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
12158                 }
12159         }
12160
12161         /* 5700 BX chips need to have their TX producer index mailboxes
12162          * written twice to workaround a bug.
12163          */
12164         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX)
12165                 tp->tg3_flags |= TG3_FLAG_TXD_MBOX_HWBUG;
12166
12167         if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
12168                 tp->tg3_flags |= TG3_FLAG_PCI_HIGH_SPEED;
12169         if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
12170                 tp->tg3_flags |= TG3_FLAG_PCI_32BIT;
12171
12172         /* Chip-specific fixup from Broadcom driver */
12173         if ((tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) &&
12174             (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
12175                 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
12176                 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
12177         }
12178
12179         /* Default fast path register access methods */
12180         tp->read32 = tg3_read32;
12181         tp->write32 = tg3_write32;
12182         tp->read32_mbox = tg3_read32;
12183         tp->write32_mbox = tg3_write32;
12184         tp->write32_tx_mbox = tg3_write32;
12185         tp->write32_rx_mbox = tg3_write32;
12186
12187         /* Various workaround register access methods */
12188         if (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG)
12189                 tp->write32 = tg3_write_indirect_reg32;
12190         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
12191                  ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
12192                   tp->pci_chip_rev_id == CHIPREV_ID_5750_A0)) {
12193                 /*
12194                  * Back to back register writes can cause problems on these
12195                  * chips, the workaround is to read back all reg writes
12196                  * except those to mailbox regs.
12197                  *
12198                  * See tg3_write_indirect_reg32().
12199                  */
12200                 tp->write32 = tg3_write_flush_reg32;
12201         }
12202
12203
12204         if ((tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG) ||
12205             (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)) {
12206                 tp->write32_tx_mbox = tg3_write32_tx_mbox;
12207                 if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
12208                         tp->write32_rx_mbox = tg3_write_flush_reg32;
12209         }
12210
12211         if (tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND) {
12212                 tp->read32 = tg3_read_indirect_reg32;
12213                 tp->write32 = tg3_write_indirect_reg32;
12214                 tp->read32_mbox = tg3_read_indirect_mbox;
12215                 tp->write32_mbox = tg3_write_indirect_mbox;
12216                 tp->write32_tx_mbox = tg3_write_indirect_mbox;
12217                 tp->write32_rx_mbox = tg3_write_indirect_mbox;
12218
12219                 iounmap(tp->regs);
12220                 tp->regs = NULL;
12221
12222                 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
12223                 pci_cmd &= ~PCI_COMMAND_MEMORY;
12224                 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
12225         }
12226         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
12227                 tp->read32_mbox = tg3_read32_mbox_5906;
12228                 tp->write32_mbox = tg3_write32_mbox_5906;
12229                 tp->write32_tx_mbox = tg3_write32_mbox_5906;
12230                 tp->write32_rx_mbox = tg3_write32_mbox_5906;
12231         }
12232
12233         if (tp->write32 == tg3_write_indirect_reg32 ||
12234             ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
12235              (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
12236               GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)))
12237                 tp->tg3_flags |= TG3_FLAG_SRAM_USE_CONFIG;
12238
12239         /* Get eeprom hw config before calling tg3_set_power_state().
12240          * In particular, the TG3_FLG2_IS_NIC flag must be
12241          * determined before calling tg3_set_power_state() so that
12242          * we know whether or not to switch out of Vaux power.
12243          * When the flag is set, it means that GPIO1 is used for eeprom
12244          * write protect and also implies that it is a LOM where GPIOs
12245          * are not used to switch power.
12246          */
12247         tg3_get_eeprom_hw_cfg(tp);
12248
12249         if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
12250                 /* Allow reads and writes to the
12251                  * APE register and memory space.
12252                  */
12253                 pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR |
12254                                  PCISTATE_ALLOW_APE_SHMEM_WR;
12255                 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE,
12256                                        pci_state_reg);
12257         }
12258
12259         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
12260             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
12261             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
12262                 tp->tg3_flags |= TG3_FLAG_CPMU_PRESENT;
12263
12264                 if (tp->pci_chip_rev_id == CHIPREV_ID_5784_A0 ||
12265                     tp->pci_chip_rev_id == CHIPREV_ID_5784_A1 ||
12266                     tp->pci_chip_rev_id == CHIPREV_ID_5761_A0 ||
12267                     tp->pci_chip_rev_id == CHIPREV_ID_5761_A1)
12268                         tp->tg3_flags3 |= TG3_FLG3_5761_5784_AX_FIXES;
12269         }
12270
12271         /* Set up tp->grc_local_ctrl before calling tg3_set_power_state().
12272          * GPIO1 driven high will bring 5700's external PHY out of reset.
12273          * It is also used as eeprom write protect on LOMs.
12274          */
12275         tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
12276         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
12277             (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT))
12278                 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
12279                                        GRC_LCLCTRL_GPIO_OUTPUT1);
12280         /* Unused GPIO3 must be driven as output on 5752 because there
12281          * are no pull-up resistors on unused GPIO pins.
12282          */
12283         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
12284                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
12285
12286         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
12287                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
12288
12289         if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761) {
12290                 /* Turn off the debug UART. */
12291                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
12292                 if (tp->tg3_flags2 & TG3_FLG2_IS_NIC)
12293                         /* Keep VMain power. */
12294                         tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
12295                                               GRC_LCLCTRL_GPIO_OUTPUT0;
12296         }
12297
12298         /* Force the chip into D0. */
12299         err = tg3_set_power_state(tp, PCI_D0);
12300         if (err) {
12301                 printk(KERN_ERR PFX "(%s) transition to D0 failed\n",
12302                        pci_name(tp->pdev));
12303                 return err;
12304         }
12305
12306         /* 5700 B0 chips do not support checksumming correctly due
12307          * to hardware bugs.
12308          */
12309         if (tp->pci_chip_rev_id == CHIPREV_ID_5700_B0)
12310                 tp->tg3_flags |= TG3_FLAG_BROKEN_CHECKSUMS;
12311
12312         /* Derive initial jumbo mode from MTU assigned in
12313          * ether_setup() via the alloc_etherdev() call
12314          */
12315         if (tp->dev->mtu > ETH_DATA_LEN &&
12316             !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
12317                 tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE;
12318
12319         /* Determine WakeOnLan speed to use. */
12320         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
12321             tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
12322             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0 ||
12323             tp->pci_chip_rev_id == CHIPREV_ID_5701_B2) {
12324                 tp->tg3_flags &= ~(TG3_FLAG_WOL_SPEED_100MB);
12325         } else {
12326                 tp->tg3_flags |= TG3_FLAG_WOL_SPEED_100MB;
12327         }
12328
12329         /* A few boards don't want Ethernet@WireSpeed phy feature */
12330         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
12331             ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
12332              (tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) &&
12333              (tp->pci_chip_rev_id != CHIPREV_ID_5705_A1)) ||
12334             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) ||
12335             (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES))
12336                 tp->tg3_flags2 |= TG3_FLG2_NO_ETH_WIRE_SPEED;
12337
12338         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5703_AX ||
12339             GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_AX)
12340                 tp->tg3_flags2 |= TG3_FLG2_PHY_ADC_BUG;
12341         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0)
12342                 tp->tg3_flags2 |= TG3_FLG2_PHY_5704_A0_BUG;
12343
12344         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
12345                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
12346                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
12347                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
12348                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
12349                         if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
12350                             tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
12351                                 tp->tg3_flags2 |= TG3_FLG2_PHY_JITTER_BUG;
12352                         if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
12353                                 tp->tg3_flags2 |= TG3_FLG2_PHY_ADJUST_TRIM;
12354                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906 &&
12355                            GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785)
12356                         tp->tg3_flags2 |= TG3_FLG2_PHY_BER_BUG;
12357         }
12358
12359         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
12360             GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
12361                 tp->phy_otp = tg3_read_otp_phycfg(tp);
12362                 if (tp->phy_otp == 0)
12363                         tp->phy_otp = TG3_OTP_DEFAULT;
12364         }
12365
12366         if (tp->tg3_flags & TG3_FLAG_CPMU_PRESENT)
12367                 tp->mi_mode = MAC_MI_MODE_500KHZ_CONST;
12368         else
12369                 tp->mi_mode = MAC_MI_MODE_BASE;
12370
12371         tp->coalesce_mode = 0;
12372         if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX &&
12373             GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX)
12374                 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
12375
12376         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
12377                 tp->tg3_flags3 |= TG3_FLG3_USE_PHYLIB;
12378
12379         err = tg3_mdio_init(tp);
12380         if (err)
12381                 return err;
12382
12383         /* Initialize data/descriptor byte/word swapping. */
12384         val = tr32(GRC_MODE);
12385         val &= GRC_MODE_HOST_STACKUP;
12386         tw32(GRC_MODE, val | tp->grc_mode);
12387
12388         tg3_switch_clocks(tp);
12389
12390         /* Clear this out for sanity. */
12391         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
12392
12393         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
12394                               &pci_state_reg);
12395         if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
12396             (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) == 0) {
12397                 u32 chiprevid = GET_CHIP_REV_ID(tp->misc_host_ctrl);
12398
12399                 if (chiprevid == CHIPREV_ID_5701_A0 ||
12400                     chiprevid == CHIPREV_ID_5701_B0 ||
12401                     chiprevid == CHIPREV_ID_5701_B2 ||
12402                     chiprevid == CHIPREV_ID_5701_B5) {
12403                         void __iomem *sram_base;
12404
12405                         /* Write some dummy words into the SRAM status block
12406                          * area, see if it reads back correctly.  If the return
12407                          * value is bad, force enable the PCIX workaround.
12408                          */
12409                         sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
12410
12411                         writel(0x00000000, sram_base);
12412                         writel(0x00000000, sram_base + 4);
12413                         writel(0xffffffff, sram_base + 4);
12414                         if (readl(sram_base) != 0x00000000)
12415                                 tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
12416                 }
12417         }
12418
12419         udelay(50);
12420         tg3_nvram_init(tp);
12421
12422         grc_misc_cfg = tr32(GRC_MISC_CFG);
12423         grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
12424
12425         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
12426             (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
12427              grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
12428                 tp->tg3_flags2 |= TG3_FLG2_IS_5788;
12429
12430         if (!(tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
12431             (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700))
12432                 tp->tg3_flags |= TG3_FLAG_TAGGED_STATUS;
12433         if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) {
12434                 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
12435                                       HOSTCC_MODE_CLRTICK_TXBD);
12436
12437                 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
12438                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
12439                                        tp->misc_host_ctrl);
12440         }
12441
12442         /* Preserve the APE MAC_MODE bits */
12443         if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
12444                 tp->mac_mode = tr32(MAC_MODE) |
12445                                MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
12446         else
12447                 tp->mac_mode = TG3_DEF_MAC_MODE;
12448
12449         /* these are limited to 10/100 only */
12450         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
12451              (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
12452             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
12453              tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
12454              (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901 ||
12455               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901_2 ||
12456               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5705F)) ||
12457             (tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
12458              (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5751F ||
12459               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5753F ||
12460               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5787F)) ||
12461             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
12462                 tp->tg3_flags |= TG3_FLAG_10_100_ONLY;
12463
12464         err = tg3_phy_probe(tp);
12465         if (err) {
12466                 printk(KERN_ERR PFX "(%s) phy probe failed, err %d\n",
12467                        pci_name(tp->pdev), err);
12468                 /* ... but do not return immediately ... */
12469                 tg3_mdio_fini(tp);
12470         }
12471
12472         tg3_read_partno(tp);
12473         tg3_read_fw_ver(tp);
12474
12475         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
12476                 tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
12477         } else {
12478                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
12479                         tp->tg3_flags |= TG3_FLAG_USE_MI_INTERRUPT;
12480                 else
12481                         tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
12482         }
12483
12484         /* 5700 {AX,BX} chips have a broken status block link
12485          * change bit implementation, so we must use the
12486          * status register in those cases.
12487          */
12488         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
12489                 tp->tg3_flags |= TG3_FLAG_USE_LINKCHG_REG;
12490         else
12491                 tp->tg3_flags &= ~TG3_FLAG_USE_LINKCHG_REG;
12492
12493         /* The led_ctrl is set during tg3_phy_probe, here we might
12494          * have to force the link status polling mechanism based
12495          * upon subsystem IDs.
12496          */
12497         if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
12498             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
12499             !(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
12500                 tp->tg3_flags |= (TG3_FLAG_USE_MI_INTERRUPT |
12501                                   TG3_FLAG_USE_LINKCHG_REG);
12502         }
12503
12504         /* For all SERDES we poll the MAC status register. */
12505         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
12506                 tp->tg3_flags |= TG3_FLAG_POLL_SERDES;
12507         else
12508                 tp->tg3_flags &= ~TG3_FLAG_POLL_SERDES;
12509
12510         /* All chips before 5787 can get confused if TX buffers
12511          * straddle the 4GB address boundary in some cases.
12512          */
12513         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
12514             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
12515             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
12516             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
12517             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
12518             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
12519                 tp->dev->hard_start_xmit = tg3_start_xmit;
12520         else
12521                 tp->dev->hard_start_xmit = tg3_start_xmit_dma_bug;
12522
12523         tp->rx_offset = 2;
12524         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
12525             (tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0)
12526                 tp->rx_offset = 0;
12527
12528         tp->rx_std_max_post = TG3_RX_RING_SIZE;
12529
12530         /* Increment the rx prod index on the rx std ring by at most
12531          * 8 for these chips to workaround hw errata.
12532          */
12533         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
12534             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
12535             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
12536                 tp->rx_std_max_post = 8;
12537
12538         if (tp->tg3_flags & TG3_FLAG_ASPM_WORKAROUND)
12539                 tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) &
12540                                      PCIE_PWR_MGMT_L1_THRESH_MSK;
12541
12542         return err;
12543 }
12544
12545 #ifdef CONFIG_SPARC
12546 static int __devinit tg3_get_macaddr_sparc(struct tg3 *tp)
12547 {
12548         struct net_device *dev = tp->dev;
12549         struct pci_dev *pdev = tp->pdev;
12550         struct device_node *dp = pci_device_to_OF_node(pdev);
12551         const unsigned char *addr;
12552         int len;
12553
12554         addr = of_get_property(dp, "local-mac-address", &len);
12555         if (addr && len == 6) {
12556                 memcpy(dev->dev_addr, addr, 6);
12557                 memcpy(dev->perm_addr, dev->dev_addr, 6);
12558                 return 0;
12559         }
12560         return -ENODEV;
12561 }
12562
12563 static int __devinit tg3_get_default_macaddr_sparc(struct tg3 *tp)
12564 {
12565         struct net_device *dev = tp->dev;
12566
12567         memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
12568         memcpy(dev->perm_addr, idprom->id_ethaddr, 6);
12569         return 0;
12570 }
12571 #endif
12572
12573 static int __devinit tg3_get_device_address(struct tg3 *tp)
12574 {
12575         struct net_device *dev = tp->dev;
12576         u32 hi, lo, mac_offset;
12577         int addr_ok = 0;
12578
12579 #ifdef CONFIG_SPARC
12580         if (!tg3_get_macaddr_sparc(tp))
12581                 return 0;
12582 #endif
12583
12584         mac_offset = 0x7c;
12585         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
12586             (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
12587                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
12588                         mac_offset = 0xcc;
12589                 if (tg3_nvram_lock(tp))
12590                         tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
12591                 else
12592                         tg3_nvram_unlock(tp);
12593         }
12594         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
12595                 mac_offset = 0x10;
12596
12597         /* First try to get it from MAC address mailbox. */
12598         tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
12599         if ((hi >> 16) == 0x484b) {
12600                 dev->dev_addr[0] = (hi >>  8) & 0xff;
12601                 dev->dev_addr[1] = (hi >>  0) & 0xff;
12602
12603                 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
12604                 dev->dev_addr[2] = (lo >> 24) & 0xff;
12605                 dev->dev_addr[3] = (lo >> 16) & 0xff;
12606                 dev->dev_addr[4] = (lo >>  8) & 0xff;
12607                 dev->dev_addr[5] = (lo >>  0) & 0xff;
12608
12609                 /* Some old bootcode may report a 0 MAC address in SRAM */
12610                 addr_ok = is_valid_ether_addr(&dev->dev_addr[0]);
12611         }
12612         if (!addr_ok) {
12613                 /* Next, try NVRAM. */
12614                 if (!tg3_nvram_read(tp, mac_offset + 0, &hi) &&
12615                     !tg3_nvram_read(tp, mac_offset + 4, &lo)) {
12616                         dev->dev_addr[0] = ((hi >> 16) & 0xff);
12617                         dev->dev_addr[1] = ((hi >> 24) & 0xff);
12618                         dev->dev_addr[2] = ((lo >>  0) & 0xff);
12619                         dev->dev_addr[3] = ((lo >>  8) & 0xff);
12620                         dev->dev_addr[4] = ((lo >> 16) & 0xff);
12621                         dev->dev_addr[5] = ((lo >> 24) & 0xff);
12622                 }
12623                 /* Finally just fetch it out of the MAC control regs. */
12624                 else {
12625                         hi = tr32(MAC_ADDR_0_HIGH);
12626                         lo = tr32(MAC_ADDR_0_LOW);
12627
12628                         dev->dev_addr[5] = lo & 0xff;
12629                         dev->dev_addr[4] = (lo >> 8) & 0xff;
12630                         dev->dev_addr[3] = (lo >> 16) & 0xff;
12631                         dev->dev_addr[2] = (lo >> 24) & 0xff;
12632                         dev->dev_addr[1] = hi & 0xff;
12633                         dev->dev_addr[0] = (hi >> 8) & 0xff;
12634                 }
12635         }
12636
12637         if (!is_valid_ether_addr(&dev->dev_addr[0])) {
12638 #ifdef CONFIG_SPARC
12639                 if (!tg3_get_default_macaddr_sparc(tp))
12640                         return 0;
12641 #endif
12642                 return -EINVAL;
12643         }
12644         memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
12645         return 0;
12646 }
12647
12648 #define BOUNDARY_SINGLE_CACHELINE       1
12649 #define BOUNDARY_MULTI_CACHELINE        2
12650
12651 static u32 __devinit tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
12652 {
12653         int cacheline_size;
12654         u8 byte;
12655         int goal;
12656
12657         pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
12658         if (byte == 0)
12659                 cacheline_size = 1024;
12660         else
12661                 cacheline_size = (int) byte * 4;
12662
12663         /* On 5703 and later chips, the boundary bits have no
12664          * effect.
12665          */
12666         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
12667             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
12668             !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
12669                 goto out;
12670
12671 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
12672         goal = BOUNDARY_MULTI_CACHELINE;
12673 #else
12674 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
12675         goal = BOUNDARY_SINGLE_CACHELINE;
12676 #else
12677         goal = 0;
12678 #endif
12679 #endif
12680
12681         if (!goal)
12682                 goto out;
12683
12684         /* PCI controllers on most RISC systems tend to disconnect
12685          * when a device tries to burst across a cache-line boundary.
12686          * Therefore, letting tg3 do so just wastes PCI bandwidth.
12687          *
12688          * Unfortunately, for PCI-E there are only limited
12689          * write-side controls for this, and thus for reads
12690          * we will still get the disconnects.  We'll also waste
12691          * these PCI cycles for both read and write for chips
12692          * other than 5700 and 5701 which do not implement the
12693          * boundary bits.
12694          */
12695         if ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
12696             !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
12697                 switch (cacheline_size) {
12698                 case 16:
12699                 case 32:
12700                 case 64:
12701                 case 128:
12702                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
12703                                 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
12704                                         DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
12705                         } else {
12706                                 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
12707                                         DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
12708                         }
12709                         break;
12710
12711                 case 256:
12712                         val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
12713                                 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
12714                         break;
12715
12716                 default:
12717                         val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
12718                                 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
12719                         break;
12720                 }
12721         } else if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
12722                 switch (cacheline_size) {
12723                 case 16:
12724                 case 32:
12725                 case 64:
12726                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
12727                                 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
12728                                 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
12729                                 break;
12730                         }
12731                         /* fallthrough */
12732                 case 128:
12733                 default:
12734                         val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
12735                         val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
12736                         break;
12737                 }
12738         } else {
12739                 switch (cacheline_size) {
12740                 case 16:
12741                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
12742                                 val |= (DMA_RWCTRL_READ_BNDRY_16 |
12743                                         DMA_RWCTRL_WRITE_BNDRY_16);
12744                                 break;
12745                         }
12746                         /* fallthrough */
12747                 case 32:
12748                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
12749                                 val |= (DMA_RWCTRL_READ_BNDRY_32 |
12750                                         DMA_RWCTRL_WRITE_BNDRY_32);
12751                                 break;
12752                         }
12753                         /* fallthrough */
12754                 case 64:
12755                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
12756                                 val |= (DMA_RWCTRL_READ_BNDRY_64 |
12757                                         DMA_RWCTRL_WRITE_BNDRY_64);
12758                                 break;
12759                         }
12760                         /* fallthrough */
12761                 case 128:
12762                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
12763                                 val |= (DMA_RWCTRL_READ_BNDRY_128 |
12764                                         DMA_RWCTRL_WRITE_BNDRY_128);
12765                                 break;
12766                         }
12767                         /* fallthrough */
12768                 case 256:
12769                         val |= (DMA_RWCTRL_READ_BNDRY_256 |
12770                                 DMA_RWCTRL_WRITE_BNDRY_256);
12771                         break;
12772                 case 512:
12773                         val |= (DMA_RWCTRL_READ_BNDRY_512 |
12774                                 DMA_RWCTRL_WRITE_BNDRY_512);
12775                         break;
12776                 case 1024:
12777                 default:
12778                         val |= (DMA_RWCTRL_READ_BNDRY_1024 |
12779                                 DMA_RWCTRL_WRITE_BNDRY_1024);
12780                         break;
12781                 }
12782         }
12783
12784 out:
12785         return val;
12786 }
12787
12788 static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma, int size, int to_device)
12789 {
12790         struct tg3_internal_buffer_desc test_desc;
12791         u32 sram_dma_descs;
12792         int i, ret;
12793
12794         sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
12795
12796         tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
12797         tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
12798         tw32(RDMAC_STATUS, 0);
12799         tw32(WDMAC_STATUS, 0);
12800
12801         tw32(BUFMGR_MODE, 0);
12802         tw32(FTQ_RESET, 0);
12803
12804         test_desc.addr_hi = ((u64) buf_dma) >> 32;
12805         test_desc.addr_lo = buf_dma & 0xffffffff;
12806         test_desc.nic_mbuf = 0x00002100;
12807         test_desc.len = size;
12808
12809         /*
12810          * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
12811          * the *second* time the tg3 driver was getting loaded after an
12812          * initial scan.
12813          *
12814          * Broadcom tells me:
12815          *   ...the DMA engine is connected to the GRC block and a DMA
12816          *   reset may affect the GRC block in some unpredictable way...
12817          *   The behavior of resets to individual blocks has not been tested.
12818          *
12819          * Broadcom noted the GRC reset will also reset all sub-components.
12820          */
12821         if (to_device) {
12822                 test_desc.cqid_sqid = (13 << 8) | 2;
12823
12824                 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
12825                 udelay(40);
12826         } else {
12827                 test_desc.cqid_sqid = (16 << 8) | 7;
12828
12829                 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
12830                 udelay(40);
12831         }
12832         test_desc.flags = 0x00000005;
12833
12834         for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
12835                 u32 val;
12836
12837                 val = *(((u32 *)&test_desc) + i);
12838                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
12839                                        sram_dma_descs + (i * sizeof(u32)));
12840                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
12841         }
12842         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
12843
12844         if (to_device) {
12845                 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
12846         } else {
12847                 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
12848         }
12849
12850         ret = -ENODEV;
12851         for (i = 0; i < 40; i++) {
12852                 u32 val;
12853
12854                 if (to_device)
12855                         val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
12856                 else
12857                         val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
12858                 if ((val & 0xffff) == sram_dma_descs) {
12859                         ret = 0;
12860                         break;
12861                 }
12862
12863                 udelay(100);
12864         }
12865
12866         return ret;
12867 }
12868
12869 #define TEST_BUFFER_SIZE        0x2000
12870
12871 static int __devinit tg3_test_dma(struct tg3 *tp)
12872 {
12873         dma_addr_t buf_dma;
12874         u32 *buf, saved_dma_rwctrl;
12875         int ret;
12876
12877         buf = pci_alloc_consistent(tp->pdev, TEST_BUFFER_SIZE, &buf_dma);
12878         if (!buf) {
12879                 ret = -ENOMEM;
12880                 goto out_nofree;
12881         }
12882
12883         tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
12884                           (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
12885
12886         tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
12887
12888         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
12889                 /* DMA read watermark not used on PCIE */
12890                 tp->dma_rwctrl |= 0x00180000;
12891         } else if (!(tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
12892                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
12893                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
12894                         tp->dma_rwctrl |= 0x003f0000;
12895                 else
12896                         tp->dma_rwctrl |= 0x003f000f;
12897         } else {
12898                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
12899                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
12900                         u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
12901                         u32 read_water = 0x7;
12902
12903                         /* If the 5704 is behind the EPB bridge, we can
12904                          * do the less restrictive ONE_DMA workaround for
12905                          * better performance.
12906                          */
12907                         if ((tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG) &&
12908                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
12909                                 tp->dma_rwctrl |= 0x8000;
12910                         else if (ccval == 0x6 || ccval == 0x7)
12911                                 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
12912
12913                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703)
12914                                 read_water = 4;
12915                         /* Set bit 23 to enable PCIX hw bug fix */
12916                         tp->dma_rwctrl |=
12917                                 (read_water << DMA_RWCTRL_READ_WATER_SHIFT) |
12918                                 (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
12919                                 (1 << 23);
12920                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) {
12921                         /* 5780 always in PCIX mode */
12922                         tp->dma_rwctrl |= 0x00144000;
12923                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
12924                         /* 5714 always in PCIX mode */
12925                         tp->dma_rwctrl |= 0x00148000;
12926                 } else {
12927                         tp->dma_rwctrl |= 0x001b000f;
12928                 }
12929         }
12930
12931         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
12932             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
12933                 tp->dma_rwctrl &= 0xfffffff0;
12934
12935         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
12936             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
12937                 /* Remove this if it causes problems for some boards. */
12938                 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
12939
12940                 /* On 5700/5701 chips, we need to set this bit.
12941                  * Otherwise the chip will issue cacheline transactions
12942                  * to streamable DMA memory with not all the byte
12943                  * enables turned on.  This is an error on several
12944                  * RISC PCI controllers, in particular sparc64.
12945                  *
12946                  * On 5703/5704 chips, this bit has been reassigned
12947                  * a different meaning.  In particular, it is used
12948                  * on those chips to enable a PCI-X workaround.
12949                  */
12950                 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
12951         }
12952
12953         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
12954
12955 #if 0
12956         /* Unneeded, already done by tg3_get_invariants.  */
12957         tg3_switch_clocks(tp);
12958 #endif
12959
12960         ret = 0;
12961         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
12962             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
12963                 goto out;
12964
12965         /* It is best to perform DMA test with maximum write burst size
12966          * to expose the 5700/5701 write DMA bug.
12967          */
12968         saved_dma_rwctrl = tp->dma_rwctrl;
12969         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
12970         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
12971
12972         while (1) {
12973                 u32 *p = buf, i;
12974
12975                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
12976                         p[i] = i;
12977
12978                 /* Send the buffer to the chip. */
12979                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1);
12980                 if (ret) {
12981                         printk(KERN_ERR "tg3_test_dma() Write the buffer failed %d\n", ret);
12982                         break;
12983                 }
12984
12985 #if 0
12986                 /* validate data reached card RAM correctly. */
12987                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
12988                         u32 val;
12989                         tg3_read_mem(tp, 0x2100 + (i*4), &val);
12990                         if (le32_to_cpu(val) != p[i]) {
12991                                 printk(KERN_ERR "  tg3_test_dma()  Card buffer corrupted on write! (%d != %d)\n", val, i);
12992                                 /* ret = -ENODEV here? */
12993                         }
12994                         p[i] = 0;
12995                 }
12996 #endif
12997                 /* Now read it back. */
12998                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0);
12999                 if (ret) {
13000                         printk(KERN_ERR "tg3_test_dma() Read the buffer failed %d\n", ret);
13001
13002                         break;
13003                 }
13004
13005                 /* Verify it. */
13006                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
13007                         if (p[i] == i)
13008                                 continue;
13009
13010                         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
13011                             DMA_RWCTRL_WRITE_BNDRY_16) {
13012                                 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
13013                                 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
13014                                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
13015                                 break;
13016                         } else {
13017                                 printk(KERN_ERR "tg3_test_dma() buffer corrupted on read back! (%d != %d)\n", p[i], i);
13018                                 ret = -ENODEV;
13019                                 goto out;
13020                         }
13021                 }
13022
13023                 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
13024                         /* Success. */
13025                         ret = 0;
13026                         break;
13027                 }
13028         }
13029         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
13030             DMA_RWCTRL_WRITE_BNDRY_16) {
13031                 static struct pci_device_id dma_wait_state_chipsets[] = {
13032                         { PCI_DEVICE(PCI_VENDOR_ID_APPLE,
13033                                      PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
13034                         { },
13035                 };
13036
13037                 /* DMA test passed without adjusting DMA boundary,
13038                  * now look for chipsets that are known to expose the
13039                  * DMA bug without failing the test.
13040                  */
13041                 if (pci_dev_present(dma_wait_state_chipsets)) {
13042                         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
13043                         tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
13044                 }
13045                 else
13046                         /* Safe to use the calculated DMA boundary. */
13047                         tp->dma_rwctrl = saved_dma_rwctrl;
13048
13049                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
13050         }
13051
13052 out:
13053         pci_free_consistent(tp->pdev, TEST_BUFFER_SIZE, buf, buf_dma);
13054 out_nofree:
13055         return ret;
13056 }
13057
13058 static void __devinit tg3_init_link_config(struct tg3 *tp)
13059 {
13060         tp->link_config.advertising =
13061                 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
13062                  ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
13063                  ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full |
13064                  ADVERTISED_Autoneg | ADVERTISED_MII);
13065         tp->link_config.speed = SPEED_INVALID;
13066         tp->link_config.duplex = DUPLEX_INVALID;
13067         tp->link_config.autoneg = AUTONEG_ENABLE;
13068         tp->link_config.active_speed = SPEED_INVALID;
13069         tp->link_config.active_duplex = DUPLEX_INVALID;
13070         tp->link_config.phy_is_low_power = 0;
13071         tp->link_config.orig_speed = SPEED_INVALID;
13072         tp->link_config.orig_duplex = DUPLEX_INVALID;
13073         tp->link_config.orig_autoneg = AUTONEG_INVALID;
13074 }
13075
13076 static void __devinit tg3_init_bufmgr_config(struct tg3 *tp)
13077 {
13078         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
13079                 tp->bufmgr_config.mbuf_read_dma_low_water =
13080                         DEFAULT_MB_RDMA_LOW_WATER_5705;
13081                 tp->bufmgr_config.mbuf_mac_rx_low_water =
13082                         DEFAULT_MB_MACRX_LOW_WATER_5705;
13083                 tp->bufmgr_config.mbuf_high_water =
13084                         DEFAULT_MB_HIGH_WATER_5705;
13085                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
13086                         tp->bufmgr_config.mbuf_mac_rx_low_water =
13087                                 DEFAULT_MB_MACRX_LOW_WATER_5906;
13088                         tp->bufmgr_config.mbuf_high_water =
13089                                 DEFAULT_MB_HIGH_WATER_5906;
13090                 }
13091
13092                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
13093                         DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
13094                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
13095                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
13096                 tp->bufmgr_config.mbuf_high_water_jumbo =
13097                         DEFAULT_MB_HIGH_WATER_JUMBO_5780;
13098         } else {
13099                 tp->bufmgr_config.mbuf_read_dma_low_water =
13100                         DEFAULT_MB_RDMA_LOW_WATER;
13101                 tp->bufmgr_config.mbuf_mac_rx_low_water =
13102                         DEFAULT_MB_MACRX_LOW_WATER;
13103                 tp->bufmgr_config.mbuf_high_water =
13104                         DEFAULT_MB_HIGH_WATER;
13105
13106                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
13107                         DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
13108                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
13109                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
13110                 tp->bufmgr_config.mbuf_high_water_jumbo =
13111                         DEFAULT_MB_HIGH_WATER_JUMBO;
13112         }
13113
13114         tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
13115         tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
13116 }
13117
13118 static char * __devinit tg3_phy_string(struct tg3 *tp)
13119 {
13120         switch (tp->phy_id & PHY_ID_MASK) {
13121         case PHY_ID_BCM5400:    return "5400";
13122         case PHY_ID_BCM5401:    return "5401";
13123         case PHY_ID_BCM5411:    return "5411";
13124         case PHY_ID_BCM5701:    return "5701";
13125         case PHY_ID_BCM5703:    return "5703";
13126         case PHY_ID_BCM5704:    return "5704";
13127         case PHY_ID_BCM5705:    return "5705";
13128         case PHY_ID_BCM5750:    return "5750";
13129         case PHY_ID_BCM5752:    return "5752";
13130         case PHY_ID_BCM5714:    return "5714";
13131         case PHY_ID_BCM5780:    return "5780";
13132         case PHY_ID_BCM5755:    return "5755";
13133         case PHY_ID_BCM5787:    return "5787";
13134         case PHY_ID_BCM5784:    return "5784";
13135         case PHY_ID_BCM5756:    return "5722/5756";
13136         case PHY_ID_BCM5906:    return "5906";
13137         case PHY_ID_BCM5761:    return "5761";
13138         case PHY_ID_BCM8002:    return "8002/serdes";
13139         case 0:                 return "serdes";
13140         default:                return "unknown";
13141         }
13142 }
13143
13144 static char * __devinit tg3_bus_string(struct tg3 *tp, char *str)
13145 {
13146         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
13147                 strcpy(str, "PCI Express");
13148                 return str;
13149         } else if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) {
13150                 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
13151
13152                 strcpy(str, "PCIX:");
13153
13154                 if ((clock_ctrl == 7) ||
13155                     ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
13156                      GRC_MISC_CFG_BOARD_ID_5704CIOBE))
13157                         strcat(str, "133MHz");
13158                 else if (clock_ctrl == 0)
13159                         strcat(str, "33MHz");
13160                 else if (clock_ctrl == 2)
13161                         strcat(str, "50MHz");
13162                 else if (clock_ctrl == 4)
13163                         strcat(str, "66MHz");
13164                 else if (clock_ctrl == 6)
13165                         strcat(str, "100MHz");
13166         } else {
13167                 strcpy(str, "PCI:");
13168                 if (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED)
13169                         strcat(str, "66MHz");
13170                 else
13171                         strcat(str, "33MHz");
13172         }
13173         if (tp->tg3_flags & TG3_FLAG_PCI_32BIT)
13174                 strcat(str, ":32-bit");
13175         else
13176                 strcat(str, ":64-bit");
13177         return str;
13178 }
13179
13180 static struct pci_dev * __devinit tg3_find_peer(struct tg3 *tp)
13181 {
13182         struct pci_dev *peer;
13183         unsigned int func, devnr = tp->pdev->devfn & ~7;
13184
13185         for (func = 0; func < 8; func++) {
13186                 peer = pci_get_slot(tp->pdev->bus, devnr | func);
13187                 if (peer && peer != tp->pdev)
13188                         break;
13189                 pci_dev_put(peer);
13190         }
13191         /* 5704 can be configured in single-port mode, set peer to
13192          * tp->pdev in that case.
13193          */
13194         if (!peer) {
13195                 peer = tp->pdev;
13196                 return peer;
13197         }
13198
13199         /*
13200          * We don't need to keep the refcount elevated; there's no way
13201          * to remove one half of this device without removing the other
13202          */
13203         pci_dev_put(peer);
13204
13205         return peer;
13206 }
13207
13208 static void __devinit tg3_init_coal(struct tg3 *tp)
13209 {
13210         struct ethtool_coalesce *ec = &tp->coal;
13211
13212         memset(ec, 0, sizeof(*ec));
13213         ec->cmd = ETHTOOL_GCOALESCE;
13214         ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
13215         ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
13216         ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
13217         ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
13218         ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
13219         ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
13220         ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
13221         ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
13222         ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
13223
13224         if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
13225                                  HOSTCC_MODE_CLRTICK_TXBD)) {
13226                 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
13227                 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
13228                 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
13229                 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
13230         }
13231
13232         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
13233                 ec->rx_coalesce_usecs_irq = 0;
13234                 ec->tx_coalesce_usecs_irq = 0;
13235                 ec->stats_block_coalesce_usecs = 0;
13236         }
13237 }
13238
13239 static int __devinit tg3_init_one(struct pci_dev *pdev,
13240                                   const struct pci_device_id *ent)
13241 {
13242         static int tg3_version_printed = 0;
13243         resource_size_t tg3reg_base;
13244         unsigned long tg3reg_len;
13245         struct net_device *dev;
13246         struct tg3 *tp;
13247         int err, pm_cap;
13248         char str[40];
13249         u64 dma_mask, persist_dma_mask;
13250         DECLARE_MAC_BUF(mac);
13251
13252         if (tg3_version_printed++ == 0)
13253                 printk(KERN_INFO "%s", version);
13254
13255         err = pci_enable_device(pdev);
13256         if (err) {
13257                 printk(KERN_ERR PFX "Cannot enable PCI device, "
13258                        "aborting.\n");
13259                 return err;
13260         }
13261
13262         if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
13263                 printk(KERN_ERR PFX "Cannot find proper PCI device "
13264                        "base address, aborting.\n");
13265                 err = -ENODEV;
13266                 goto err_out_disable_pdev;
13267         }
13268
13269         err = pci_request_regions(pdev, DRV_MODULE_NAME);
13270         if (err) {
13271                 printk(KERN_ERR PFX "Cannot obtain PCI resources, "
13272                        "aborting.\n");
13273                 goto err_out_disable_pdev;
13274         }
13275
13276         pci_set_master(pdev);
13277
13278         /* Find power-management capability. */
13279         pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
13280         if (pm_cap == 0) {
13281                 printk(KERN_ERR PFX "Cannot find PowerManagement capability, "
13282                        "aborting.\n");
13283                 err = -EIO;
13284                 goto err_out_free_res;
13285         }
13286
13287         tg3reg_base = pci_resource_start(pdev, 0);
13288         tg3reg_len = pci_resource_len(pdev, 0);
13289
13290         dev = alloc_etherdev(sizeof(*tp));
13291         if (!dev) {
13292                 printk(KERN_ERR PFX "Etherdev alloc failed, aborting.\n");
13293                 err = -ENOMEM;
13294                 goto err_out_free_res;
13295         }
13296
13297         SET_NETDEV_DEV(dev, &pdev->dev);
13298
13299 #if TG3_VLAN_TAG_USED
13300         dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
13301         dev->vlan_rx_register = tg3_vlan_rx_register;
13302 #endif
13303
13304         tp = netdev_priv(dev);
13305         tp->pdev = pdev;
13306         tp->dev = dev;
13307         tp->pm_cap = pm_cap;
13308         tp->rx_mode = TG3_DEF_RX_MODE;
13309         tp->tx_mode = TG3_DEF_TX_MODE;
13310
13311         if (tg3_debug > 0)
13312                 tp->msg_enable = tg3_debug;
13313         else
13314                 tp->msg_enable = TG3_DEF_MSG_ENABLE;
13315
13316         /* The word/byte swap controls here control register access byte
13317          * swapping.  DMA data byte swapping is controlled in the GRC_MODE
13318          * setting below.
13319          */
13320         tp->misc_host_ctrl =
13321                 MISC_HOST_CTRL_MASK_PCI_INT |
13322                 MISC_HOST_CTRL_WORD_SWAP |
13323                 MISC_HOST_CTRL_INDIR_ACCESS |
13324                 MISC_HOST_CTRL_PCISTATE_RW;
13325
13326         /* The NONFRM (non-frame) byte/word swap controls take effect
13327          * on descriptor entries, anything which isn't packet data.
13328          *
13329          * The StrongARM chips on the board (one for tx, one for rx)
13330          * are running in big-endian mode.
13331          */
13332         tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
13333                         GRC_MODE_WSWAP_NONFRM_DATA);
13334 #ifdef __BIG_ENDIAN
13335         tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
13336 #endif
13337         spin_lock_init(&tp->lock);
13338         spin_lock_init(&tp->indirect_lock);
13339         INIT_WORK(&tp->reset_task, tg3_reset_task);
13340
13341         tp->regs = ioremap_nocache(tg3reg_base, tg3reg_len);
13342         if (!tp->regs) {
13343                 printk(KERN_ERR PFX "Cannot map device registers, "
13344                        "aborting.\n");
13345                 err = -ENOMEM;
13346                 goto err_out_free_dev;
13347         }
13348
13349         tg3_init_link_config(tp);
13350
13351         tp->rx_pending = TG3_DEF_RX_RING_PENDING;
13352         tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
13353         tp->tx_pending = TG3_DEF_TX_RING_PENDING;
13354
13355         dev->open = tg3_open;
13356         dev->stop = tg3_close;
13357         dev->get_stats = tg3_get_stats;
13358         dev->set_multicast_list = tg3_set_rx_mode;
13359         dev->set_mac_address = tg3_set_mac_addr;
13360         dev->do_ioctl = tg3_ioctl;
13361         dev->tx_timeout = tg3_tx_timeout;
13362         netif_napi_add(dev, &tp->napi, tg3_poll, 64);
13363         dev->ethtool_ops = &tg3_ethtool_ops;
13364         dev->watchdog_timeo = TG3_TX_TIMEOUT;
13365         dev->change_mtu = tg3_change_mtu;
13366         dev->irq = pdev->irq;
13367 #ifdef CONFIG_NET_POLL_CONTROLLER
13368         dev->poll_controller = tg3_poll_controller;
13369 #endif
13370
13371         err = tg3_get_invariants(tp);
13372         if (err) {
13373                 printk(KERN_ERR PFX "Problem fetching invariants of chip, "
13374                        "aborting.\n");
13375                 goto err_out_iounmap;
13376         }
13377
13378         /* The EPB bridge inside 5714, 5715, and 5780 and any
13379          * device behind the EPB cannot support DMA addresses > 40-bit.
13380          * On 64-bit systems with IOMMU, use 40-bit dma_mask.
13381          * On 64-bit systems without IOMMU, use 64-bit dma_mask and
13382          * do DMA address check in tg3_start_xmit().
13383          */
13384         if (tp->tg3_flags2 & TG3_FLG2_IS_5788)
13385                 persist_dma_mask = dma_mask = DMA_32BIT_MASK;
13386         else if (tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG) {
13387                 persist_dma_mask = dma_mask = DMA_40BIT_MASK;
13388 #ifdef CONFIG_HIGHMEM
13389                 dma_mask = DMA_64BIT_MASK;
13390 #endif
13391         } else
13392                 persist_dma_mask = dma_mask = DMA_64BIT_MASK;
13393
13394         /* Configure DMA attributes. */
13395         if (dma_mask > DMA_32BIT_MASK) {
13396                 err = pci_set_dma_mask(pdev, dma_mask);
13397                 if (!err) {
13398                         dev->features |= NETIF_F_HIGHDMA;
13399                         err = pci_set_consistent_dma_mask(pdev,
13400                                                           persist_dma_mask);
13401                         if (err < 0) {
13402                                 printk(KERN_ERR PFX "Unable to obtain 64 bit "
13403                                        "DMA for consistent allocations\n");
13404                                 goto err_out_iounmap;
13405                         }
13406                 }
13407         }
13408         if (err || dma_mask == DMA_32BIT_MASK) {
13409                 err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
13410                 if (err) {
13411                         printk(KERN_ERR PFX "No usable DMA configuration, "
13412                                "aborting.\n");
13413                         goto err_out_iounmap;
13414                 }
13415         }
13416
13417         tg3_init_bufmgr_config(tp);
13418
13419         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
13420                 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
13421         }
13422         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
13423             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
13424             tp->pci_chip_rev_id == CHIPREV_ID_5705_A0 ||
13425             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 ||
13426             (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0) {
13427                 tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
13428         } else {
13429                 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE | TG3_FLG2_TSO_BUG;
13430         }
13431
13432         /* TSO is on by default on chips that support hardware TSO.
13433          * Firmware TSO on older chips gives lower performance, so it
13434          * is off by default, but can be enabled using ethtool.
13435          */
13436         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
13437                 dev->features |= NETIF_F_TSO;
13438                 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_2) &&
13439                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906))
13440                         dev->features |= NETIF_F_TSO6;
13441                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
13442                     (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
13443                      GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) ||
13444                         GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
13445                         dev->features |= NETIF_F_TSO_ECN;
13446         }
13447
13448
13449         if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 &&
13450             !(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) &&
13451             !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
13452                 tp->tg3_flags2 |= TG3_FLG2_MAX_RXPEND_64;
13453                 tp->rx_pending = 63;
13454         }
13455
13456         err = tg3_get_device_address(tp);
13457         if (err) {
13458                 printk(KERN_ERR PFX "Could not obtain valid ethernet address, "
13459                        "aborting.\n");
13460                 goto err_out_iounmap;
13461         }
13462
13463         if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
13464                 if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
13465                         printk(KERN_ERR PFX "Cannot find proper PCI device "
13466                                "base address for APE, aborting.\n");
13467                         err = -ENODEV;
13468                         goto err_out_iounmap;
13469                 }
13470
13471                 tg3reg_base = pci_resource_start(pdev, 2);
13472                 tg3reg_len = pci_resource_len(pdev, 2);
13473
13474                 tp->aperegs = ioremap_nocache(tg3reg_base, tg3reg_len);
13475                 if (!tp->aperegs) {
13476                         printk(KERN_ERR PFX "Cannot map APE registers, "
13477                                "aborting.\n");
13478                         err = -ENOMEM;
13479                         goto err_out_iounmap;
13480                 }
13481
13482                 tg3_ape_lock_init(tp);
13483         }
13484
13485         /*
13486          * Reset chip in case UNDI or EFI driver did not shutdown
13487          * DMA self test will enable WDMAC and we'll see (spurious)
13488          * pending DMA on the PCI bus at that point.
13489          */
13490         if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
13491             (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
13492                 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
13493                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
13494         }
13495
13496         err = tg3_test_dma(tp);
13497         if (err) {
13498                 printk(KERN_ERR PFX "DMA engine test failed, aborting.\n");
13499                 goto err_out_apeunmap;
13500         }
13501
13502         /* Tigon3 can do ipv4 only... and some chips have buggy
13503          * checksumming.
13504          */
13505         if ((tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) == 0) {
13506                 dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
13507                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
13508                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
13509                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
13510                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
13511                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
13512                         dev->features |= NETIF_F_IPV6_CSUM;
13513
13514                 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
13515         } else
13516                 tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
13517
13518         /* flow control autonegotiation is default behavior */
13519         tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
13520         tp->link_config.flowctrl = TG3_FLOW_CTRL_TX | TG3_FLOW_CTRL_RX;
13521
13522         tg3_init_coal(tp);
13523
13524         pci_set_drvdata(pdev, dev);
13525
13526         err = register_netdev(dev);
13527         if (err) {
13528                 printk(KERN_ERR PFX "Cannot register net device, "
13529                        "aborting.\n");
13530                 goto err_out_apeunmap;
13531         }
13532
13533         printk(KERN_INFO "%s: Tigon3 [partno(%s) rev %04x PHY(%s)] "
13534                "(%s) %s Ethernet %s\n",
13535                dev->name,
13536                tp->board_part_number,
13537                tp->pci_chip_rev_id,
13538                tg3_phy_string(tp),
13539                tg3_bus_string(tp, str),
13540                ((tp->tg3_flags & TG3_FLAG_10_100_ONLY) ? "10/100Base-TX" :
13541                 ((tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) ? "1000Base-SX" :
13542                  "10/100/1000Base-T")),
13543                print_mac(mac, dev->dev_addr));
13544
13545         printk(KERN_INFO "%s: RXcsums[%d] LinkChgREG[%d] "
13546                "MIirq[%d] ASF[%d] WireSpeed[%d] TSOcap[%d]\n",
13547                dev->name,
13548                (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0,
13549                (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) != 0,
13550                (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) != 0,
13551                (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0,
13552                (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED) == 0,
13553                (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) != 0);
13554         printk(KERN_INFO "%s: dma_rwctrl[%08x] dma_mask[%d-bit]\n",
13555                dev->name, tp->dma_rwctrl,
13556                (pdev->dma_mask == DMA_32BIT_MASK) ? 32 :
13557                 (((u64) pdev->dma_mask == DMA_40BIT_MASK) ? 40 : 64));
13558
13559         return 0;
13560
13561 err_out_apeunmap:
13562         if (tp->aperegs) {
13563                 iounmap(tp->aperegs);
13564                 tp->aperegs = NULL;
13565         }
13566
13567 err_out_iounmap:
13568         if (tp->regs) {
13569                 iounmap(tp->regs);
13570                 tp->regs = NULL;
13571         }
13572
13573 err_out_free_dev:
13574         free_netdev(dev);
13575
13576 err_out_free_res:
13577         pci_release_regions(pdev);
13578
13579 err_out_disable_pdev:
13580         pci_disable_device(pdev);
13581         pci_set_drvdata(pdev, NULL);
13582         return err;
13583 }
13584
13585 static void __devexit tg3_remove_one(struct pci_dev *pdev)
13586 {
13587         struct net_device *dev = pci_get_drvdata(pdev);
13588
13589         if (dev) {
13590                 struct tg3 *tp = netdev_priv(dev);
13591
13592                 flush_scheduled_work();
13593
13594                 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
13595                         tg3_phy_fini(tp);
13596                         tg3_mdio_fini(tp);
13597                 }
13598
13599                 unregister_netdev(dev);
13600                 if (tp->aperegs) {
13601                         iounmap(tp->aperegs);
13602                         tp->aperegs = NULL;
13603                 }
13604                 if (tp->regs) {
13605                         iounmap(tp->regs);
13606                         tp->regs = NULL;
13607                 }
13608                 free_netdev(dev);
13609                 pci_release_regions(pdev);
13610                 pci_disable_device(pdev);
13611                 pci_set_drvdata(pdev, NULL);
13612         }
13613 }
13614
13615 static int tg3_suspend(struct pci_dev *pdev, pm_message_t state)
13616 {
13617         struct net_device *dev = pci_get_drvdata(pdev);
13618         struct tg3 *tp = netdev_priv(dev);
13619         pci_power_t target_state;
13620         int err;
13621
13622         /* PCI register 4 needs to be saved whether netif_running() or not.
13623          * MSI address and data need to be saved if using MSI and
13624          * netif_running().
13625          */
13626         pci_save_state(pdev);
13627
13628         if (!netif_running(dev))
13629                 return 0;
13630
13631         flush_scheduled_work();
13632         tg3_phy_stop(tp);
13633         tg3_netif_stop(tp);
13634
13635         del_timer_sync(&tp->timer);
13636
13637         tg3_full_lock(tp, 1);
13638         tg3_disable_ints(tp);
13639         tg3_full_unlock(tp);
13640
13641         netif_device_detach(dev);
13642
13643         tg3_full_lock(tp, 0);
13644         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
13645         tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
13646         tg3_full_unlock(tp);
13647
13648         target_state = pdev->pm_cap ? pci_target_state(pdev) : PCI_D3hot;
13649
13650         err = tg3_set_power_state(tp, target_state);
13651         if (err) {
13652                 int err2;
13653
13654                 tg3_full_lock(tp, 0);
13655
13656                 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
13657                 err2 = tg3_restart_hw(tp, 1);
13658                 if (err2)
13659                         goto out;
13660
13661                 tp->timer.expires = jiffies + tp->timer_offset;
13662                 add_timer(&tp->timer);
13663
13664                 netif_device_attach(dev);
13665                 tg3_netif_start(tp);
13666
13667 out:
13668                 tg3_full_unlock(tp);
13669
13670                 if (!err2)
13671                         tg3_phy_start(tp);
13672         }
13673
13674         return err;
13675 }
13676
13677 static int tg3_resume(struct pci_dev *pdev)
13678 {
13679         struct net_device *dev = pci_get_drvdata(pdev);
13680         struct tg3 *tp = netdev_priv(dev);
13681         int err;
13682
13683         pci_restore_state(tp->pdev);
13684
13685         if (!netif_running(dev))
13686                 return 0;
13687
13688         err = tg3_set_power_state(tp, PCI_D0);
13689         if (err)
13690                 return err;
13691
13692         netif_device_attach(dev);
13693
13694         tg3_full_lock(tp, 0);
13695
13696         tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
13697         err = tg3_restart_hw(tp, 1);
13698         if (err)
13699                 goto out;
13700
13701         tp->timer.expires = jiffies + tp->timer_offset;
13702         add_timer(&tp->timer);
13703
13704         tg3_netif_start(tp);
13705
13706 out:
13707         tg3_full_unlock(tp);
13708
13709         if (!err)
13710                 tg3_phy_start(tp);
13711
13712         return err;
13713 }
13714
13715 static struct pci_driver tg3_driver = {
13716         .name           = DRV_MODULE_NAME,
13717         .id_table       = tg3_pci_tbl,
13718         .probe          = tg3_init_one,
13719         .remove         = __devexit_p(tg3_remove_one),
13720         .suspend        = tg3_suspend,
13721         .resume         = tg3_resume
13722 };
13723
13724 static int __init tg3_init(void)
13725 {
13726         return pci_register_driver(&tg3_driver);
13727 }
13728
13729 static void __exit tg3_cleanup(void)
13730 {
13731         pci_unregister_driver(&tg3_driver);
13732 }
13733
13734 module_init(tg3_init);
13735 module_exit(tg3_cleanup);