tg3: Preserve register settings for DASH
[linux-2.6] / drivers / net / tg3.c
1 /*
2  * tg3.c: Broadcom Tigon3 ethernet driver.
3  *
4  * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5  * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6  * Copyright (C) 2004 Sun Microsystems Inc.
7  * Copyright (C) 2005-2007 Broadcom Corporation.
8  *
9  * Firmware is:
10  *      Derived from proprietary unpublished source code,
11  *      Copyright (C) 2000-2003 Broadcom Corporation.
12  *
13  *      Permission is hereby granted for the distribution of this firmware
14  *      data in hexadecimal or equivalent format, provided this copyright
15  *      notice is accompanying it.
16  */
17
18
19 #include <linux/module.h>
20 #include <linux/moduleparam.h>
21 #include <linux/kernel.h>
22 #include <linux/types.h>
23 #include <linux/compiler.h>
24 #include <linux/slab.h>
25 #include <linux/delay.h>
26 #include <linux/in.h>
27 #include <linux/init.h>
28 #include <linux/ioport.h>
29 #include <linux/pci.h>
30 #include <linux/netdevice.h>
31 #include <linux/etherdevice.h>
32 #include <linux/skbuff.h>
33 #include <linux/ethtool.h>
34 #include <linux/mii.h>
35 #include <linux/phy.h>
36 #include <linux/brcmphy.h>
37 #include <linux/if_vlan.h>
38 #include <linux/ip.h>
39 #include <linux/tcp.h>
40 #include <linux/workqueue.h>
41 #include <linux/prefetch.h>
42 #include <linux/dma-mapping.h>
43
44 #include <net/checksum.h>
45 #include <net/ip.h>
46
47 #include <asm/system.h>
48 #include <asm/io.h>
49 #include <asm/byteorder.h>
50 #include <asm/uaccess.h>
51
52 #ifdef CONFIG_SPARC
53 #include <asm/idprom.h>
54 #include <asm/prom.h>
55 #endif
56
57 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
58 #define TG3_VLAN_TAG_USED 1
59 #else
60 #define TG3_VLAN_TAG_USED 0
61 #endif
62
63 #define TG3_TSO_SUPPORT 1
64
65 #include "tg3.h"
66
67 #define DRV_MODULE_NAME         "tg3"
68 #define PFX DRV_MODULE_NAME     ": "
69 #define DRV_MODULE_VERSION      "3.93"
70 #define DRV_MODULE_RELDATE      "May 22, 2008"
71
72 #define TG3_DEF_MAC_MODE        0
73 #define TG3_DEF_RX_MODE         0
74 #define TG3_DEF_TX_MODE         0
75 #define TG3_DEF_MSG_ENABLE        \
76         (NETIF_MSG_DRV          | \
77          NETIF_MSG_PROBE        | \
78          NETIF_MSG_LINK         | \
79          NETIF_MSG_TIMER        | \
80          NETIF_MSG_IFDOWN       | \
81          NETIF_MSG_IFUP         | \
82          NETIF_MSG_RX_ERR       | \
83          NETIF_MSG_TX_ERR)
84
85 /* length of time before we decide the hardware is borked,
86  * and dev->tx_timeout() should be called to fix the problem
87  */
88 #define TG3_TX_TIMEOUT                  (5 * HZ)
89
90 /* hardware minimum and maximum for a single frame's data payload */
91 #define TG3_MIN_MTU                     60
92 #define TG3_MAX_MTU(tp) \
93         ((tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) ? 9000 : 1500)
94
95 /* These numbers seem to be hard coded in the NIC firmware somehow.
96  * You can't change the ring sizes, but you can change where you place
97  * them in the NIC onboard memory.
98  */
99 #define TG3_RX_RING_SIZE                512
100 #define TG3_DEF_RX_RING_PENDING         200
101 #define TG3_RX_JUMBO_RING_SIZE          256
102 #define TG3_DEF_RX_JUMBO_RING_PENDING   100
103
104 /* Do not place this n-ring entries value into the tp struct itself,
105  * we really want to expose these constants to GCC so that modulo et
106  * al.  operations are done with shifts and masks instead of with
107  * hw multiply/modulo instructions.  Another solution would be to
108  * replace things like '% foo' with '& (foo - 1)'.
109  */
110 #define TG3_RX_RCB_RING_SIZE(tp)        \
111         ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ?  512 : 1024)
112
113 #define TG3_TX_RING_SIZE                512
114 #define TG3_DEF_TX_RING_PENDING         (TG3_TX_RING_SIZE - 1)
115
116 #define TG3_RX_RING_BYTES       (sizeof(struct tg3_rx_buffer_desc) * \
117                                  TG3_RX_RING_SIZE)
118 #define TG3_RX_JUMBO_RING_BYTES (sizeof(struct tg3_rx_buffer_desc) * \
119                                  TG3_RX_JUMBO_RING_SIZE)
120 #define TG3_RX_RCB_RING_BYTES(tp) (sizeof(struct tg3_rx_buffer_desc) * \
121                                    TG3_RX_RCB_RING_SIZE(tp))
122 #define TG3_TX_RING_BYTES       (sizeof(struct tg3_tx_buffer_desc) * \
123                                  TG3_TX_RING_SIZE)
124 #define NEXT_TX(N)              (((N) + 1) & (TG3_TX_RING_SIZE - 1))
125
126 #define RX_PKT_BUF_SZ           (1536 + tp->rx_offset + 64)
127 #define RX_JUMBO_PKT_BUF_SZ     (9046 + tp->rx_offset + 64)
128
129 /* minimum number of free TX descriptors required to wake up TX process */
130 #define TG3_TX_WAKEUP_THRESH(tp)                ((tp)->tx_pending / 4)
131
132 /* number of ETHTOOL_GSTATS u64's */
133 #define TG3_NUM_STATS           (sizeof(struct tg3_ethtool_stats)/sizeof(u64))
134
135 #define TG3_NUM_TEST            6
136
137 static char version[] __devinitdata =
138         DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
139
140 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
141 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
142 MODULE_LICENSE("GPL");
143 MODULE_VERSION(DRV_MODULE_VERSION);
144
145 static int tg3_debug = -1;      /* -1 == use TG3_DEF_MSG_ENABLE as value */
146 module_param(tg3_debug, int, 0);
147 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
148
149 static struct pci_device_id tg3_pci_tbl[] = {
150         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
151         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
152         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
153         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
154         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
155         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
156         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
157         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
158         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
159         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
160         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
161         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
162         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
163         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
164         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
165         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
166         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
167         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
168         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901)},
169         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2)},
170         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
171         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F)},
172         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5720)},
173         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
174         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
175         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750)},
176         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
177         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750M)},
178         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
179         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F)},
180         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
181         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
182         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
183         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
184         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F)},
185         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
186         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
187         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
188         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
189         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
190         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
191         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
192         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
193         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F)},
194         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
195         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
196         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
197         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
198         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
199         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
200         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
201         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
202         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
203         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
204         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
205         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
206         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
207         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
208         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5785)},
209         {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
210         {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
211         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
212         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
213         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
214         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
215         {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
216         {}
217 };
218
219 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
220
221 static const struct {
222         const char string[ETH_GSTRING_LEN];
223 } ethtool_stats_keys[TG3_NUM_STATS] = {
224         { "rx_octets" },
225         { "rx_fragments" },
226         { "rx_ucast_packets" },
227         { "rx_mcast_packets" },
228         { "rx_bcast_packets" },
229         { "rx_fcs_errors" },
230         { "rx_align_errors" },
231         { "rx_xon_pause_rcvd" },
232         { "rx_xoff_pause_rcvd" },
233         { "rx_mac_ctrl_rcvd" },
234         { "rx_xoff_entered" },
235         { "rx_frame_too_long_errors" },
236         { "rx_jabbers" },
237         { "rx_undersize_packets" },
238         { "rx_in_length_errors" },
239         { "rx_out_length_errors" },
240         { "rx_64_or_less_octet_packets" },
241         { "rx_65_to_127_octet_packets" },
242         { "rx_128_to_255_octet_packets" },
243         { "rx_256_to_511_octet_packets" },
244         { "rx_512_to_1023_octet_packets" },
245         { "rx_1024_to_1522_octet_packets" },
246         { "rx_1523_to_2047_octet_packets" },
247         { "rx_2048_to_4095_octet_packets" },
248         { "rx_4096_to_8191_octet_packets" },
249         { "rx_8192_to_9022_octet_packets" },
250
251         { "tx_octets" },
252         { "tx_collisions" },
253
254         { "tx_xon_sent" },
255         { "tx_xoff_sent" },
256         { "tx_flow_control" },
257         { "tx_mac_errors" },
258         { "tx_single_collisions" },
259         { "tx_mult_collisions" },
260         { "tx_deferred" },
261         { "tx_excessive_collisions" },
262         { "tx_late_collisions" },
263         { "tx_collide_2times" },
264         { "tx_collide_3times" },
265         { "tx_collide_4times" },
266         { "tx_collide_5times" },
267         { "tx_collide_6times" },
268         { "tx_collide_7times" },
269         { "tx_collide_8times" },
270         { "tx_collide_9times" },
271         { "tx_collide_10times" },
272         { "tx_collide_11times" },
273         { "tx_collide_12times" },
274         { "tx_collide_13times" },
275         { "tx_collide_14times" },
276         { "tx_collide_15times" },
277         { "tx_ucast_packets" },
278         { "tx_mcast_packets" },
279         { "tx_bcast_packets" },
280         { "tx_carrier_sense_errors" },
281         { "tx_discards" },
282         { "tx_errors" },
283
284         { "dma_writeq_full" },
285         { "dma_write_prioq_full" },
286         { "rxbds_empty" },
287         { "rx_discards" },
288         { "rx_errors" },
289         { "rx_threshold_hit" },
290
291         { "dma_readq_full" },
292         { "dma_read_prioq_full" },
293         { "tx_comp_queue_full" },
294
295         { "ring_set_send_prod_index" },
296         { "ring_status_update" },
297         { "nic_irqs" },
298         { "nic_avoided_irqs" },
299         { "nic_tx_threshold_hit" }
300 };
301
302 static const struct {
303         const char string[ETH_GSTRING_LEN];
304 } ethtool_test_keys[TG3_NUM_TEST] = {
305         { "nvram test     (online) " },
306         { "link test      (online) " },
307         { "register test  (offline)" },
308         { "memory test    (offline)" },
309         { "loopback test  (offline)" },
310         { "interrupt test (offline)" },
311 };
312
313 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
314 {
315         writel(val, tp->regs + off);
316 }
317
318 static u32 tg3_read32(struct tg3 *tp, u32 off)
319 {
320         return (readl(tp->regs + off));
321 }
322
323 static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
324 {
325         writel(val, tp->aperegs + off);
326 }
327
328 static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
329 {
330         return (readl(tp->aperegs + off));
331 }
332
333 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
334 {
335         unsigned long flags;
336
337         spin_lock_irqsave(&tp->indirect_lock, flags);
338         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
339         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
340         spin_unlock_irqrestore(&tp->indirect_lock, flags);
341 }
342
343 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
344 {
345         writel(val, tp->regs + off);
346         readl(tp->regs + off);
347 }
348
349 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
350 {
351         unsigned long flags;
352         u32 val;
353
354         spin_lock_irqsave(&tp->indirect_lock, flags);
355         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
356         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
357         spin_unlock_irqrestore(&tp->indirect_lock, flags);
358         return val;
359 }
360
361 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
362 {
363         unsigned long flags;
364
365         if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
366                 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
367                                        TG3_64BIT_REG_LOW, val);
368                 return;
369         }
370         if (off == (MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW)) {
371                 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
372                                        TG3_64BIT_REG_LOW, val);
373                 return;
374         }
375
376         spin_lock_irqsave(&tp->indirect_lock, flags);
377         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
378         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
379         spin_unlock_irqrestore(&tp->indirect_lock, flags);
380
381         /* In indirect mode when disabling interrupts, we also need
382          * to clear the interrupt bit in the GRC local ctrl register.
383          */
384         if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
385             (val == 0x1)) {
386                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
387                                        tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
388         }
389 }
390
391 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
392 {
393         unsigned long flags;
394         u32 val;
395
396         spin_lock_irqsave(&tp->indirect_lock, flags);
397         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
398         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
399         spin_unlock_irqrestore(&tp->indirect_lock, flags);
400         return val;
401 }
402
403 /* usec_wait specifies the wait time in usec when writing to certain registers
404  * where it is unsafe to read back the register without some delay.
405  * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
406  * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
407  */
408 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
409 {
410         if ((tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) ||
411             (tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND))
412                 /* Non-posted methods */
413                 tp->write32(tp, off, val);
414         else {
415                 /* Posted method */
416                 tg3_write32(tp, off, val);
417                 if (usec_wait)
418                         udelay(usec_wait);
419                 tp->read32(tp, off);
420         }
421         /* Wait again after the read for the posted method to guarantee that
422          * the wait time is met.
423          */
424         if (usec_wait)
425                 udelay(usec_wait);
426 }
427
428 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
429 {
430         tp->write32_mbox(tp, off, val);
431         if (!(tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) &&
432             !(tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND))
433                 tp->read32_mbox(tp, off);
434 }
435
436 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
437 {
438         void __iomem *mbox = tp->regs + off;
439         writel(val, mbox);
440         if (tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG)
441                 writel(val, mbox);
442         if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
443                 readl(mbox);
444 }
445
446 static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
447 {
448         return (readl(tp->regs + off + GRCMBOX_BASE));
449 }
450
451 static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
452 {
453         writel(val, tp->regs + off + GRCMBOX_BASE);
454 }
455
456 #define tw32_mailbox(reg, val)  tp->write32_mbox(tp, reg, val)
457 #define tw32_mailbox_f(reg, val)        tw32_mailbox_flush(tp, (reg), (val))
458 #define tw32_rx_mbox(reg, val)  tp->write32_rx_mbox(tp, reg, val)
459 #define tw32_tx_mbox(reg, val)  tp->write32_tx_mbox(tp, reg, val)
460 #define tr32_mailbox(reg)       tp->read32_mbox(tp, reg)
461
462 #define tw32(reg,val)           tp->write32(tp, reg, val)
463 #define tw32_f(reg,val)         _tw32_flush(tp,(reg),(val), 0)
464 #define tw32_wait_f(reg,val,us) _tw32_flush(tp,(reg),(val), (us))
465 #define tr32(reg)               tp->read32(tp, reg)
466
467 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
468 {
469         unsigned long flags;
470
471         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) &&
472             (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
473                 return;
474
475         spin_lock_irqsave(&tp->indirect_lock, flags);
476         if (tp->tg3_flags & TG3_FLAG_SRAM_USE_CONFIG) {
477                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
478                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
479
480                 /* Always leave this as zero. */
481                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
482         } else {
483                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
484                 tw32_f(TG3PCI_MEM_WIN_DATA, val);
485
486                 /* Always leave this as zero. */
487                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
488         }
489         spin_unlock_irqrestore(&tp->indirect_lock, flags);
490 }
491
492 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
493 {
494         unsigned long flags;
495
496         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) &&
497             (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
498                 *val = 0;
499                 return;
500         }
501
502         spin_lock_irqsave(&tp->indirect_lock, flags);
503         if (tp->tg3_flags & TG3_FLAG_SRAM_USE_CONFIG) {
504                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
505                 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
506
507                 /* Always leave this as zero. */
508                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
509         } else {
510                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
511                 *val = tr32(TG3PCI_MEM_WIN_DATA);
512
513                 /* Always leave this as zero. */
514                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
515         }
516         spin_unlock_irqrestore(&tp->indirect_lock, flags);
517 }
518
519 static void tg3_ape_lock_init(struct tg3 *tp)
520 {
521         int i;
522
523         /* Make sure the driver hasn't any stale locks. */
524         for (i = 0; i < 8; i++)
525                 tg3_ape_write32(tp, TG3_APE_LOCK_GRANT + 4 * i,
526                                 APE_LOCK_GRANT_DRIVER);
527 }
528
529 static int tg3_ape_lock(struct tg3 *tp, int locknum)
530 {
531         int i, off;
532         int ret = 0;
533         u32 status;
534
535         if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
536                 return 0;
537
538         switch (locknum) {
539                 case TG3_APE_LOCK_GRC:
540                 case TG3_APE_LOCK_MEM:
541                         break;
542                 default:
543                         return -EINVAL;
544         }
545
546         off = 4 * locknum;
547
548         tg3_ape_write32(tp, TG3_APE_LOCK_REQ + off, APE_LOCK_REQ_DRIVER);
549
550         /* Wait for up to 1 millisecond to acquire lock. */
551         for (i = 0; i < 100; i++) {
552                 status = tg3_ape_read32(tp, TG3_APE_LOCK_GRANT + off);
553                 if (status == APE_LOCK_GRANT_DRIVER)
554                         break;
555                 udelay(10);
556         }
557
558         if (status != APE_LOCK_GRANT_DRIVER) {
559                 /* Revoke the lock request. */
560                 tg3_ape_write32(tp, TG3_APE_LOCK_GRANT + off,
561                                 APE_LOCK_GRANT_DRIVER);
562
563                 ret = -EBUSY;
564         }
565
566         return ret;
567 }
568
569 static void tg3_ape_unlock(struct tg3 *tp, int locknum)
570 {
571         int off;
572
573         if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
574                 return;
575
576         switch (locknum) {
577                 case TG3_APE_LOCK_GRC:
578                 case TG3_APE_LOCK_MEM:
579                         break;
580                 default:
581                         return;
582         }
583
584         off = 4 * locknum;
585         tg3_ape_write32(tp, TG3_APE_LOCK_GRANT + off, APE_LOCK_GRANT_DRIVER);
586 }
587
588 static void tg3_disable_ints(struct tg3 *tp)
589 {
590         tw32(TG3PCI_MISC_HOST_CTRL,
591              (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
592         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
593 }
594
595 static inline void tg3_cond_int(struct tg3 *tp)
596 {
597         if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
598             (tp->hw_status->status & SD_STATUS_UPDATED))
599                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
600         else
601                 tw32(HOSTCC_MODE, tp->coalesce_mode |
602                      (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
603 }
604
605 static void tg3_enable_ints(struct tg3 *tp)
606 {
607         tp->irq_sync = 0;
608         wmb();
609
610         tw32(TG3PCI_MISC_HOST_CTRL,
611              (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
612         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
613                        (tp->last_tag << 24));
614         if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI)
615                 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
616                                (tp->last_tag << 24));
617         tg3_cond_int(tp);
618 }
619
620 static inline unsigned int tg3_has_work(struct tg3 *tp)
621 {
622         struct tg3_hw_status *sblk = tp->hw_status;
623         unsigned int work_exists = 0;
624
625         /* check for phy events */
626         if (!(tp->tg3_flags &
627               (TG3_FLAG_USE_LINKCHG_REG |
628                TG3_FLAG_POLL_SERDES))) {
629                 if (sblk->status & SD_STATUS_LINK_CHG)
630                         work_exists = 1;
631         }
632         /* check for RX/TX work to do */
633         if (sblk->idx[0].tx_consumer != tp->tx_cons ||
634             sblk->idx[0].rx_producer != tp->rx_rcb_ptr)
635                 work_exists = 1;
636
637         return work_exists;
638 }
639
640 /* tg3_restart_ints
641  *  similar to tg3_enable_ints, but it accurately determines whether there
642  *  is new work pending and can return without flushing the PIO write
643  *  which reenables interrupts
644  */
645 static void tg3_restart_ints(struct tg3 *tp)
646 {
647         tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
648                      tp->last_tag << 24);
649         mmiowb();
650
651         /* When doing tagged status, this work check is unnecessary.
652          * The last_tag we write above tells the chip which piece of
653          * work we've completed.
654          */
655         if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
656             tg3_has_work(tp))
657                 tw32(HOSTCC_MODE, tp->coalesce_mode |
658                      (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
659 }
660
661 static inline void tg3_netif_stop(struct tg3 *tp)
662 {
663         tp->dev->trans_start = jiffies; /* prevent tx timeout */
664         napi_disable(&tp->napi);
665         netif_tx_disable(tp->dev);
666 }
667
668 static inline void tg3_netif_start(struct tg3 *tp)
669 {
670         netif_wake_queue(tp->dev);
671         /* NOTE: unconditional netif_wake_queue is only appropriate
672          * so long as all callers are assured to have free tx slots
673          * (such as after tg3_init_hw)
674          */
675         napi_enable(&tp->napi);
676         tp->hw_status->status |= SD_STATUS_UPDATED;
677         tg3_enable_ints(tp);
678 }
679
680 static void tg3_switch_clocks(struct tg3 *tp)
681 {
682         u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
683         u32 orig_clock_ctrl;
684
685         if ((tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) ||
686             (tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
687                 return;
688
689         orig_clock_ctrl = clock_ctrl;
690         clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
691                        CLOCK_CTRL_CLKRUN_OENABLE |
692                        0x1f);
693         tp->pci_clock_ctrl = clock_ctrl;
694
695         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
696                 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
697                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
698                                     clock_ctrl | CLOCK_CTRL_625_CORE, 40);
699                 }
700         } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
701                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
702                             clock_ctrl |
703                             (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
704                             40);
705                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
706                             clock_ctrl | (CLOCK_CTRL_ALTCLK),
707                             40);
708         }
709         tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
710 }
711
712 #define PHY_BUSY_LOOPS  5000
713
714 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
715 {
716         u32 frame_val;
717         unsigned int loops;
718         int ret;
719
720         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
721                 tw32_f(MAC_MI_MODE,
722                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
723                 udelay(80);
724         }
725
726         *val = 0x0;
727
728         frame_val  = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
729                       MI_COM_PHY_ADDR_MASK);
730         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
731                       MI_COM_REG_ADDR_MASK);
732         frame_val |= (MI_COM_CMD_READ | MI_COM_START);
733
734         tw32_f(MAC_MI_COM, frame_val);
735
736         loops = PHY_BUSY_LOOPS;
737         while (loops != 0) {
738                 udelay(10);
739                 frame_val = tr32(MAC_MI_COM);
740
741                 if ((frame_val & MI_COM_BUSY) == 0) {
742                         udelay(5);
743                         frame_val = tr32(MAC_MI_COM);
744                         break;
745                 }
746                 loops -= 1;
747         }
748
749         ret = -EBUSY;
750         if (loops != 0) {
751                 *val = frame_val & MI_COM_DATA_MASK;
752                 ret = 0;
753         }
754
755         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
756                 tw32_f(MAC_MI_MODE, tp->mi_mode);
757                 udelay(80);
758         }
759
760         return ret;
761 }
762
763 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
764 {
765         u32 frame_val;
766         unsigned int loops;
767         int ret;
768
769         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 &&
770             (reg == MII_TG3_CTRL || reg == MII_TG3_AUX_CTRL))
771                 return 0;
772
773         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
774                 tw32_f(MAC_MI_MODE,
775                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
776                 udelay(80);
777         }
778
779         frame_val  = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
780                       MI_COM_PHY_ADDR_MASK);
781         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
782                       MI_COM_REG_ADDR_MASK);
783         frame_val |= (val & MI_COM_DATA_MASK);
784         frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
785
786         tw32_f(MAC_MI_COM, frame_val);
787
788         loops = PHY_BUSY_LOOPS;
789         while (loops != 0) {
790                 udelay(10);
791                 frame_val = tr32(MAC_MI_COM);
792                 if ((frame_val & MI_COM_BUSY) == 0) {
793                         udelay(5);
794                         frame_val = tr32(MAC_MI_COM);
795                         break;
796                 }
797                 loops -= 1;
798         }
799
800         ret = -EBUSY;
801         if (loops != 0)
802                 ret = 0;
803
804         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
805                 tw32_f(MAC_MI_MODE, tp->mi_mode);
806                 udelay(80);
807         }
808
809         return ret;
810 }
811
812 static int tg3_bmcr_reset(struct tg3 *tp)
813 {
814         u32 phy_control;
815         int limit, err;
816
817         /* OK, reset it, and poll the BMCR_RESET bit until it
818          * clears or we time out.
819          */
820         phy_control = BMCR_RESET;
821         err = tg3_writephy(tp, MII_BMCR, phy_control);
822         if (err != 0)
823                 return -EBUSY;
824
825         limit = 5000;
826         while (limit--) {
827                 err = tg3_readphy(tp, MII_BMCR, &phy_control);
828                 if (err != 0)
829                         return -EBUSY;
830
831                 if ((phy_control & BMCR_RESET) == 0) {
832                         udelay(40);
833                         break;
834                 }
835                 udelay(10);
836         }
837         if (limit <= 0)
838                 return -EBUSY;
839
840         return 0;
841 }
842
843 static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)
844 {
845         struct tg3 *tp = (struct tg3 *)bp->priv;
846         u32 val;
847
848         if (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_PAUSED)
849                 return -EAGAIN;
850
851         if (tg3_readphy(tp, reg, &val))
852                 return -EIO;
853
854         return val;
855 }
856
857 static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val)
858 {
859         struct tg3 *tp = (struct tg3 *)bp->priv;
860
861         if (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_PAUSED)
862                 return -EAGAIN;
863
864         if (tg3_writephy(tp, reg, val))
865                 return -EIO;
866
867         return 0;
868 }
869
870 static int tg3_mdio_reset(struct mii_bus *bp)
871 {
872         return 0;
873 }
874
875 static void tg3_mdio_config(struct tg3 *tp)
876 {
877         u32 val;
878
879         if (tp->mdio_bus.phy_map[PHY_ADDR]->interface !=
880             PHY_INTERFACE_MODE_RGMII)
881                 return;
882
883         val = tr32(MAC_PHYCFG1) & ~(MAC_PHYCFG1_RGMII_EXT_RX_DEC |
884                                     MAC_PHYCFG1_RGMII_SND_STAT_EN);
885         if (tp->tg3_flags3 & TG3_FLG3_RGMII_STD_IBND_DISABLE) {
886                 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_RX_EN)
887                         val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC;
888                 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_TX_EN)
889                         val |= MAC_PHYCFG1_RGMII_SND_STAT_EN;
890         }
891         tw32(MAC_PHYCFG1, val | MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV);
892
893         val = tr32(MAC_PHYCFG2) & ~(MAC_PHYCFG2_INBAND_ENABLE);
894         if (!(tp->tg3_flags3 & TG3_FLG3_RGMII_STD_IBND_DISABLE))
895                 val |= MAC_PHYCFG2_INBAND_ENABLE;
896         tw32(MAC_PHYCFG2, val);
897
898         val = tr32(MAC_EXT_RGMII_MODE);
899         val &= ~(MAC_RGMII_MODE_RX_INT_B |
900                  MAC_RGMII_MODE_RX_QUALITY |
901                  MAC_RGMII_MODE_RX_ACTIVITY |
902                  MAC_RGMII_MODE_RX_ENG_DET |
903                  MAC_RGMII_MODE_TX_ENABLE |
904                  MAC_RGMII_MODE_TX_LOWPWR |
905                  MAC_RGMII_MODE_TX_RESET);
906         if (tp->tg3_flags3 & TG3_FLG3_RGMII_STD_IBND_DISABLE) {
907                 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_RX_EN)
908                         val |= MAC_RGMII_MODE_RX_INT_B |
909                                MAC_RGMII_MODE_RX_QUALITY |
910                                MAC_RGMII_MODE_RX_ACTIVITY |
911                                MAC_RGMII_MODE_RX_ENG_DET;
912                 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_TX_EN)
913                         val |= MAC_RGMII_MODE_TX_ENABLE |
914                                MAC_RGMII_MODE_TX_LOWPWR |
915                                MAC_RGMII_MODE_TX_RESET;
916         }
917         tw32(MAC_EXT_RGMII_MODE, val);
918 }
919
920 static void tg3_mdio_start(struct tg3 *tp)
921 {
922         if (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED) {
923                 mutex_lock(&tp->mdio_bus.mdio_lock);
924                 tp->tg3_flags3 &= ~TG3_FLG3_MDIOBUS_PAUSED;
925                 mutex_unlock(&tp->mdio_bus.mdio_lock);
926         }
927
928         tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL;
929         tw32_f(MAC_MI_MODE, tp->mi_mode);
930         udelay(80);
931
932         if (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED)
933                 tg3_mdio_config(tp);
934 }
935
936 static void tg3_mdio_stop(struct tg3 *tp)
937 {
938         if (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED) {
939                 mutex_lock(&tp->mdio_bus.mdio_lock);
940                 tp->tg3_flags3 |= TG3_FLG3_MDIOBUS_PAUSED;
941                 mutex_unlock(&tp->mdio_bus.mdio_lock);
942         }
943 }
944
945 static int tg3_mdio_init(struct tg3 *tp)
946 {
947         int i;
948         u32 reg;
949         struct phy_device *phydev;
950         struct mii_bus *mdio_bus = &tp->mdio_bus;
951
952         tg3_mdio_start(tp);
953
954         if (!(tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) ||
955             (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED))
956                 return 0;
957
958         memset(mdio_bus, 0, sizeof(*mdio_bus));
959
960         mdio_bus->name     = "tg3 mdio bus";
961         snprintf(mdio_bus->id, MII_BUS_ID_SIZE, "%x",
962                  (tp->pdev->bus->number << 8) | tp->pdev->devfn);
963         mdio_bus->priv     = tp;
964         mdio_bus->dev      = &tp->pdev->dev;
965         mdio_bus->read     = &tg3_mdio_read;
966         mdio_bus->write    = &tg3_mdio_write;
967         mdio_bus->reset    = &tg3_mdio_reset;
968         mdio_bus->phy_mask = ~(1 << PHY_ADDR);
969         mdio_bus->irq      = &tp->mdio_irq[0];
970
971         for (i = 0; i < PHY_MAX_ADDR; i++)
972                 mdio_bus->irq[i] = PHY_POLL;
973
974         /* The bus registration will look for all the PHYs on the mdio bus.
975          * Unfortunately, it does not ensure the PHY is powered up before
976          * accessing the PHY ID registers.  A chip reset is the
977          * quickest way to bring the device back to an operational state..
978          */
979         if (tg3_readphy(tp, MII_BMCR, &reg) || (reg & BMCR_PDOWN))
980                 tg3_bmcr_reset(tp);
981
982         i = mdiobus_register(mdio_bus);
983         if (i) {
984                 printk(KERN_WARNING "%s: mdiobus_reg failed (0x%x)\n",
985                         tp->dev->name, i);
986                 return i;
987         }
988
989         tp->tg3_flags3 |= TG3_FLG3_MDIOBUS_INITED;
990
991         phydev = tp->mdio_bus.phy_map[PHY_ADDR];
992
993         switch (phydev->phy_id) {
994         case TG3_PHY_ID_BCM50610:
995                 phydev->interface = PHY_INTERFACE_MODE_RGMII;
996                 if (tp->tg3_flags3 & TG3_FLG3_RGMII_STD_IBND_DISABLE)
997                         phydev->dev_flags |= PHY_BRCM_STD_IBND_DISABLE;
998                 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_RX_EN)
999                         phydev->dev_flags |= PHY_BRCM_EXT_IBND_RX_ENABLE;
1000                 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_TX_EN)
1001                         phydev->dev_flags |= PHY_BRCM_EXT_IBND_TX_ENABLE;
1002                 break;
1003         case TG3_PHY_ID_BCMAC131:
1004                 phydev->interface = PHY_INTERFACE_MODE_MII;
1005                 break;
1006         }
1007
1008         tg3_mdio_config(tp);
1009
1010         return 0;
1011 }
1012
1013 static void tg3_mdio_fini(struct tg3 *tp)
1014 {
1015         if (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED) {
1016                 tp->tg3_flags3 &= ~TG3_FLG3_MDIOBUS_INITED;
1017                 mdiobus_unregister(&tp->mdio_bus);
1018                 tp->tg3_flags3 &= ~TG3_FLG3_MDIOBUS_PAUSED;
1019         }
1020 }
1021
1022 /* tp->lock is held. */
1023 static void tg3_wait_for_event_ack(struct tg3 *tp)
1024 {
1025         int i;
1026
1027         /* Wait for up to 2.5 milliseconds */
1028         for (i = 0; i < 250000; i++) {
1029                 if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
1030                         break;
1031                 udelay(10);
1032         }
1033 }
1034
1035 /* tp->lock is held. */
1036 static void tg3_ump_link_report(struct tg3 *tp)
1037 {
1038         u32 reg;
1039         u32 val;
1040
1041         if (!(tp->tg3_flags2 & TG3_FLG2_5780_CLASS) ||
1042             !(tp->tg3_flags  & TG3_FLAG_ENABLE_ASF))
1043                 return;
1044
1045         tg3_wait_for_event_ack(tp);
1046
1047         tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE);
1048
1049         tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14);
1050
1051         val = 0;
1052         if (!tg3_readphy(tp, MII_BMCR, &reg))
1053                 val = reg << 16;
1054         if (!tg3_readphy(tp, MII_BMSR, &reg))
1055                 val |= (reg & 0xffff);
1056         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, val);
1057
1058         val = 0;
1059         if (!tg3_readphy(tp, MII_ADVERTISE, &reg))
1060                 val = reg << 16;
1061         if (!tg3_readphy(tp, MII_LPA, &reg))
1062                 val |= (reg & 0xffff);
1063         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 4, val);
1064
1065         val = 0;
1066         if (!(tp->tg3_flags2 & TG3_FLG2_MII_SERDES)) {
1067                 if (!tg3_readphy(tp, MII_CTRL1000, &reg))
1068                         val = reg << 16;
1069                 if (!tg3_readphy(tp, MII_STAT1000, &reg))
1070                         val |= (reg & 0xffff);
1071         }
1072         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 8, val);
1073
1074         if (!tg3_readphy(tp, MII_PHYADDR, &reg))
1075                 val = reg << 16;
1076         else
1077                 val = 0;
1078         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 12, val);
1079
1080         val = tr32(GRC_RX_CPU_EVENT);
1081         val |= GRC_RX_CPU_DRIVER_EVENT;
1082         tw32_f(GRC_RX_CPU_EVENT, val);
1083 }
1084
1085 static void tg3_link_report(struct tg3 *tp)
1086 {
1087         if (!netif_carrier_ok(tp->dev)) {
1088                 if (netif_msg_link(tp))
1089                         printk(KERN_INFO PFX "%s: Link is down.\n",
1090                                tp->dev->name);
1091                 tg3_ump_link_report(tp);
1092         } else if (netif_msg_link(tp)) {
1093                 printk(KERN_INFO PFX "%s: Link is up at %d Mbps, %s duplex.\n",
1094                        tp->dev->name,
1095                        (tp->link_config.active_speed == SPEED_1000 ?
1096                         1000 :
1097                         (tp->link_config.active_speed == SPEED_100 ?
1098                          100 : 10)),
1099                        (tp->link_config.active_duplex == DUPLEX_FULL ?
1100                         "full" : "half"));
1101
1102                 printk(KERN_INFO PFX
1103                        "%s: Flow control is %s for TX and %s for RX.\n",
1104                        tp->dev->name,
1105                        (tp->link_config.active_flowctrl & TG3_FLOW_CTRL_TX) ?
1106                        "on" : "off",
1107                        (tp->link_config.active_flowctrl & TG3_FLOW_CTRL_RX) ?
1108                        "on" : "off");
1109                 tg3_ump_link_report(tp);
1110         }
1111 }
1112
1113 static u16 tg3_advert_flowctrl_1000T(u8 flow_ctrl)
1114 {
1115         u16 miireg;
1116
1117         if ((flow_ctrl & TG3_FLOW_CTRL_TX) && (flow_ctrl & TG3_FLOW_CTRL_RX))
1118                 miireg = ADVERTISE_PAUSE_CAP;
1119         else if (flow_ctrl & TG3_FLOW_CTRL_TX)
1120                 miireg = ADVERTISE_PAUSE_ASYM;
1121         else if (flow_ctrl & TG3_FLOW_CTRL_RX)
1122                 miireg = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1123         else
1124                 miireg = 0;
1125
1126         return miireg;
1127 }
1128
1129 static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
1130 {
1131         u16 miireg;
1132
1133         if ((flow_ctrl & TG3_FLOW_CTRL_TX) && (flow_ctrl & TG3_FLOW_CTRL_RX))
1134                 miireg = ADVERTISE_1000XPAUSE;
1135         else if (flow_ctrl & TG3_FLOW_CTRL_TX)
1136                 miireg = ADVERTISE_1000XPSE_ASYM;
1137         else if (flow_ctrl & TG3_FLOW_CTRL_RX)
1138                 miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1139         else
1140                 miireg = 0;
1141
1142         return miireg;
1143 }
1144
1145 static u8 tg3_resolve_flowctrl_1000T(u16 lcladv, u16 rmtadv)
1146 {
1147         u8 cap = 0;
1148
1149         if (lcladv & ADVERTISE_PAUSE_CAP) {
1150                 if (lcladv & ADVERTISE_PAUSE_ASYM) {
1151                         if (rmtadv & LPA_PAUSE_CAP)
1152                                 cap = TG3_FLOW_CTRL_TX | TG3_FLOW_CTRL_RX;
1153                         else if (rmtadv & LPA_PAUSE_ASYM)
1154                                 cap = TG3_FLOW_CTRL_RX;
1155                 } else {
1156                         if (rmtadv & LPA_PAUSE_CAP)
1157                                 cap = TG3_FLOW_CTRL_TX | TG3_FLOW_CTRL_RX;
1158                 }
1159         } else if (lcladv & ADVERTISE_PAUSE_ASYM) {
1160                 if ((rmtadv & LPA_PAUSE_CAP) && (rmtadv & LPA_PAUSE_ASYM))
1161                         cap = TG3_FLOW_CTRL_TX;
1162         }
1163
1164         return cap;
1165 }
1166
1167 static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
1168 {
1169         u8 cap = 0;
1170
1171         if (lcladv & ADVERTISE_1000XPAUSE) {
1172                 if (lcladv & ADVERTISE_1000XPSE_ASYM) {
1173                         if (rmtadv & LPA_1000XPAUSE)
1174                                 cap = TG3_FLOW_CTRL_TX | TG3_FLOW_CTRL_RX;
1175                         else if (rmtadv & LPA_1000XPAUSE_ASYM)
1176                                 cap = TG3_FLOW_CTRL_RX;
1177                 } else {
1178                         if (rmtadv & LPA_1000XPAUSE)
1179                                 cap = TG3_FLOW_CTRL_TX | TG3_FLOW_CTRL_RX;
1180                 }
1181         } else if (lcladv & ADVERTISE_1000XPSE_ASYM) {
1182                 if ((rmtadv & LPA_1000XPAUSE) && (rmtadv & LPA_1000XPAUSE_ASYM))
1183                         cap = TG3_FLOW_CTRL_TX;
1184         }
1185
1186         return cap;
1187 }
1188
1189 static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv)
1190 {
1191         u8 autoneg;
1192         u8 flowctrl = 0;
1193         u32 old_rx_mode = tp->rx_mode;
1194         u32 old_tx_mode = tp->tx_mode;
1195
1196         if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)
1197                 autoneg = tp->mdio_bus.phy_map[PHY_ADDR]->autoneg;
1198         else
1199                 autoneg = tp->link_config.autoneg;
1200
1201         if (autoneg == AUTONEG_ENABLE &&
1202             (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG)) {
1203                 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
1204                         flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv);
1205                 else
1206                         flowctrl = tg3_resolve_flowctrl_1000T(lcladv, rmtadv);
1207         } else
1208                 flowctrl = tp->link_config.flowctrl;
1209
1210         tp->link_config.active_flowctrl = flowctrl;
1211
1212         if (flowctrl & TG3_FLOW_CTRL_RX)
1213                 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1214         else
1215                 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1216
1217         if (old_rx_mode != tp->rx_mode)
1218                 tw32_f(MAC_RX_MODE, tp->rx_mode);
1219
1220         if (flowctrl & TG3_FLOW_CTRL_TX)
1221                 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1222         else
1223                 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1224
1225         if (old_tx_mode != tp->tx_mode)
1226                 tw32_f(MAC_TX_MODE, tp->tx_mode);
1227 }
1228
1229 static void tg3_adjust_link(struct net_device *dev)
1230 {
1231         u8 oldflowctrl, linkmesg = 0;
1232         u32 mac_mode, lcl_adv, rmt_adv;
1233         struct tg3 *tp = netdev_priv(dev);
1234         struct phy_device *phydev = tp->mdio_bus.phy_map[PHY_ADDR];
1235
1236         spin_lock(&tp->lock);
1237
1238         mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK |
1239                                     MAC_MODE_HALF_DUPLEX);
1240
1241         oldflowctrl = tp->link_config.active_flowctrl;
1242
1243         if (phydev->link) {
1244                 lcl_adv = 0;
1245                 rmt_adv = 0;
1246
1247                 if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10)
1248                         mac_mode |= MAC_MODE_PORT_MODE_MII;
1249                 else
1250                         mac_mode |= MAC_MODE_PORT_MODE_GMII;
1251
1252                 if (phydev->duplex == DUPLEX_HALF)
1253                         mac_mode |= MAC_MODE_HALF_DUPLEX;
1254                 else {
1255                         lcl_adv = tg3_advert_flowctrl_1000T(
1256                                   tp->link_config.flowctrl);
1257
1258                         if (phydev->pause)
1259                                 rmt_adv = LPA_PAUSE_CAP;
1260                         if (phydev->asym_pause)
1261                                 rmt_adv |= LPA_PAUSE_ASYM;
1262                 }
1263
1264                 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
1265         } else
1266                 mac_mode |= MAC_MODE_PORT_MODE_GMII;
1267
1268         if (mac_mode != tp->mac_mode) {
1269                 tp->mac_mode = mac_mode;
1270                 tw32_f(MAC_MODE, tp->mac_mode);
1271                 udelay(40);
1272         }
1273
1274         if (phydev->speed == SPEED_1000 && phydev->duplex == DUPLEX_HALF)
1275                 tw32(MAC_TX_LENGTHS,
1276                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1277                       (6 << TX_LENGTHS_IPG_SHIFT) |
1278                       (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
1279         else
1280                 tw32(MAC_TX_LENGTHS,
1281                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1282                       (6 << TX_LENGTHS_IPG_SHIFT) |
1283                       (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
1284
1285         if ((phydev->link && tp->link_config.active_speed == SPEED_INVALID) ||
1286             (!phydev->link && tp->link_config.active_speed != SPEED_INVALID) ||
1287             phydev->speed != tp->link_config.active_speed ||
1288             phydev->duplex != tp->link_config.active_duplex ||
1289             oldflowctrl != tp->link_config.active_flowctrl)
1290             linkmesg = 1;
1291
1292         tp->link_config.active_speed = phydev->speed;
1293         tp->link_config.active_duplex = phydev->duplex;
1294
1295         spin_unlock(&tp->lock);
1296
1297         if (linkmesg)
1298                 tg3_link_report(tp);
1299 }
1300
1301 static int tg3_phy_init(struct tg3 *tp)
1302 {
1303         struct phy_device *phydev;
1304
1305         if (tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED)
1306                 return 0;
1307
1308         /* Bring the PHY back to a known state. */
1309         tg3_bmcr_reset(tp);
1310
1311         phydev = tp->mdio_bus.phy_map[PHY_ADDR];
1312
1313         /* Attach the MAC to the PHY. */
1314         phydev = phy_connect(tp->dev, phydev->dev.bus_id, tg3_adjust_link,
1315                              phydev->dev_flags, phydev->interface);
1316         if (IS_ERR(phydev)) {
1317                 printk(KERN_ERR "%s: Could not attach to PHY\n", tp->dev->name);
1318                 return PTR_ERR(phydev);
1319         }
1320
1321         tp->tg3_flags3 |= TG3_FLG3_PHY_CONNECTED;
1322
1323         /* Mask with MAC supported features. */
1324         phydev->supported &= (PHY_GBIT_FEATURES |
1325                               SUPPORTED_Pause |
1326                               SUPPORTED_Asym_Pause);
1327
1328         phydev->advertising = phydev->supported;
1329
1330         printk(KERN_INFO
1331                "%s: attached PHY driver [%s] (mii_bus:phy_addr=%s)\n",
1332                tp->dev->name, phydev->drv->name, phydev->dev.bus_id);
1333
1334         return 0;
1335 }
1336
1337 static void tg3_phy_start(struct tg3 *tp)
1338 {
1339         struct phy_device *phydev;
1340
1341         if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
1342                 return;
1343
1344         phydev = tp->mdio_bus.phy_map[PHY_ADDR];
1345
1346         if (tp->link_config.phy_is_low_power) {
1347                 tp->link_config.phy_is_low_power = 0;
1348                 phydev->speed = tp->link_config.orig_speed;
1349                 phydev->duplex = tp->link_config.orig_duplex;
1350                 phydev->autoneg = tp->link_config.orig_autoneg;
1351                 phydev->advertising = tp->link_config.orig_advertising;
1352         }
1353
1354         phy_start(phydev);
1355
1356         phy_start_aneg(phydev);
1357 }
1358
1359 static void tg3_phy_stop(struct tg3 *tp)
1360 {
1361         if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
1362                 return;
1363
1364         phy_stop(tp->mdio_bus.phy_map[PHY_ADDR]);
1365 }
1366
1367 static void tg3_phy_fini(struct tg3 *tp)
1368 {
1369         if (tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED) {
1370                 phy_disconnect(tp->mdio_bus.phy_map[PHY_ADDR]);
1371                 tp->tg3_flags3 &= ~TG3_FLG3_PHY_CONNECTED;
1372         }
1373 }
1374
1375 static void tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
1376 {
1377         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1378         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
1379 }
1380
1381 static void tg3_phy_toggle_automdix(struct tg3 *tp, int enable)
1382 {
1383         u32 phy;
1384
1385         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
1386             (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES))
1387                 return;
1388
1389         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1390                 u32 ephy;
1391
1392                 if (!tg3_readphy(tp, MII_TG3_EPHY_TEST, &ephy)) {
1393                         tg3_writephy(tp, MII_TG3_EPHY_TEST,
1394                                      ephy | MII_TG3_EPHY_SHADOW_EN);
1395                         if (!tg3_readphy(tp, MII_TG3_EPHYTST_MISCCTRL, &phy)) {
1396                                 if (enable)
1397                                         phy |= MII_TG3_EPHYTST_MISCCTRL_MDIX;
1398                                 else
1399                                         phy &= ~MII_TG3_EPHYTST_MISCCTRL_MDIX;
1400                                 tg3_writephy(tp, MII_TG3_EPHYTST_MISCCTRL, phy);
1401                         }
1402                         tg3_writephy(tp, MII_TG3_EPHY_TEST, ephy);
1403                 }
1404         } else {
1405                 phy = MII_TG3_AUXCTL_MISC_RDSEL_MISC |
1406                       MII_TG3_AUXCTL_SHDWSEL_MISC;
1407                 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, phy) &&
1408                     !tg3_readphy(tp, MII_TG3_AUX_CTRL, &phy)) {
1409                         if (enable)
1410                                 phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
1411                         else
1412                                 phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
1413                         phy |= MII_TG3_AUXCTL_MISC_WREN;
1414                         tg3_writephy(tp, MII_TG3_AUX_CTRL, phy);
1415                 }
1416         }
1417 }
1418
1419 static void tg3_phy_set_wirespeed(struct tg3 *tp)
1420 {
1421         u32 val;
1422
1423         if (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED)
1424                 return;
1425
1426         if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x7007) &&
1427             !tg3_readphy(tp, MII_TG3_AUX_CTRL, &val))
1428                 tg3_writephy(tp, MII_TG3_AUX_CTRL,
1429                              (val | (1 << 15) | (1 << 4)));
1430 }
1431
1432 static void tg3_phy_apply_otp(struct tg3 *tp)
1433 {
1434         u32 otp, phy;
1435
1436         if (!tp->phy_otp)
1437                 return;
1438
1439         otp = tp->phy_otp;
1440
1441         /* Enable SM_DSP clock and tx 6dB coding. */
1442         phy = MII_TG3_AUXCTL_SHDWSEL_AUXCTL |
1443               MII_TG3_AUXCTL_ACTL_SMDSP_ENA |
1444               MII_TG3_AUXCTL_ACTL_TX_6DB;
1445         tg3_writephy(tp, MII_TG3_AUX_CTRL, phy);
1446
1447         phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
1448         phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT;
1449         tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy);
1450
1451         phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) |
1452               ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT);
1453         tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy);
1454
1455         phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT);
1456         phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ;
1457         tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy);
1458
1459         phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT);
1460         tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy);
1461
1462         phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT);
1463         tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy);
1464
1465         phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) |
1466               ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
1467         tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);
1468
1469         /* Turn off SM_DSP clock. */
1470         phy = MII_TG3_AUXCTL_SHDWSEL_AUXCTL |
1471               MII_TG3_AUXCTL_ACTL_TX_6DB;
1472         tg3_writephy(tp, MII_TG3_AUX_CTRL, phy);
1473 }
1474
1475 static int tg3_wait_macro_done(struct tg3 *tp)
1476 {
1477         int limit = 100;
1478
1479         while (limit--) {
1480                 u32 tmp32;
1481
1482                 if (!tg3_readphy(tp, 0x16, &tmp32)) {
1483                         if ((tmp32 & 0x1000) == 0)
1484                                 break;
1485                 }
1486         }
1487         if (limit <= 0)
1488                 return -EBUSY;
1489
1490         return 0;
1491 }
1492
1493 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
1494 {
1495         static const u32 test_pat[4][6] = {
1496         { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
1497         { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
1498         { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
1499         { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
1500         };
1501         int chan;
1502
1503         for (chan = 0; chan < 4; chan++) {
1504                 int i;
1505
1506                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1507                              (chan * 0x2000) | 0x0200);
1508                 tg3_writephy(tp, 0x16, 0x0002);
1509
1510                 for (i = 0; i < 6; i++)
1511                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
1512                                      test_pat[chan][i]);
1513
1514                 tg3_writephy(tp, 0x16, 0x0202);
1515                 if (tg3_wait_macro_done(tp)) {
1516                         *resetp = 1;
1517                         return -EBUSY;
1518                 }
1519
1520                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1521                              (chan * 0x2000) | 0x0200);
1522                 tg3_writephy(tp, 0x16, 0x0082);
1523                 if (tg3_wait_macro_done(tp)) {
1524                         *resetp = 1;
1525                         return -EBUSY;
1526                 }
1527
1528                 tg3_writephy(tp, 0x16, 0x0802);
1529                 if (tg3_wait_macro_done(tp)) {
1530                         *resetp = 1;
1531                         return -EBUSY;
1532                 }
1533
1534                 for (i = 0; i < 6; i += 2) {
1535                         u32 low, high;
1536
1537                         if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
1538                             tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
1539                             tg3_wait_macro_done(tp)) {
1540                                 *resetp = 1;
1541                                 return -EBUSY;
1542                         }
1543                         low &= 0x7fff;
1544                         high &= 0x000f;
1545                         if (low != test_pat[chan][i] ||
1546                             high != test_pat[chan][i+1]) {
1547                                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
1548                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
1549                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
1550
1551                                 return -EBUSY;
1552                         }
1553                 }
1554         }
1555
1556         return 0;
1557 }
1558
1559 static int tg3_phy_reset_chanpat(struct tg3 *tp)
1560 {
1561         int chan;
1562
1563         for (chan = 0; chan < 4; chan++) {
1564                 int i;
1565
1566                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1567                              (chan * 0x2000) | 0x0200);
1568                 tg3_writephy(tp, 0x16, 0x0002);
1569                 for (i = 0; i < 6; i++)
1570                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
1571                 tg3_writephy(tp, 0x16, 0x0202);
1572                 if (tg3_wait_macro_done(tp))
1573                         return -EBUSY;
1574         }
1575
1576         return 0;
1577 }
1578
1579 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
1580 {
1581         u32 reg32, phy9_orig;
1582         int retries, do_phy_reset, err;
1583
1584         retries = 10;
1585         do_phy_reset = 1;
1586         do {
1587                 if (do_phy_reset) {
1588                         err = tg3_bmcr_reset(tp);
1589                         if (err)
1590                                 return err;
1591                         do_phy_reset = 0;
1592                 }
1593
1594                 /* Disable transmitter and interrupt.  */
1595                 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
1596                         continue;
1597
1598                 reg32 |= 0x3000;
1599                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
1600
1601                 /* Set full-duplex, 1000 mbps.  */
1602                 tg3_writephy(tp, MII_BMCR,
1603                              BMCR_FULLDPLX | TG3_BMCR_SPEED1000);
1604
1605                 /* Set to master mode.  */
1606                 if (tg3_readphy(tp, MII_TG3_CTRL, &phy9_orig))
1607                         continue;
1608
1609                 tg3_writephy(tp, MII_TG3_CTRL,
1610                              (MII_TG3_CTRL_AS_MASTER |
1611                               MII_TG3_CTRL_ENABLE_AS_MASTER));
1612
1613                 /* Enable SM_DSP_CLOCK and 6dB.  */
1614                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1615
1616                 /* Block the PHY control access.  */
1617                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
1618                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0800);
1619
1620                 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
1621                 if (!err)
1622                         break;
1623         } while (--retries);
1624
1625         err = tg3_phy_reset_chanpat(tp);
1626         if (err)
1627                 return err;
1628
1629         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
1630         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0000);
1631
1632         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
1633         tg3_writephy(tp, 0x16, 0x0000);
1634
1635         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
1636             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
1637                 /* Set Extended packet length bit for jumbo frames */
1638                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4400);
1639         }
1640         else {
1641                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1642         }
1643
1644         tg3_writephy(tp, MII_TG3_CTRL, phy9_orig);
1645
1646         if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32)) {
1647                 reg32 &= ~0x3000;
1648                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
1649         } else if (!err)
1650                 err = -EBUSY;
1651
1652         return err;
1653 }
1654
1655 /* This will reset the tigon3 PHY if there is no valid
1656  * link unless the FORCE argument is non-zero.
1657  */
1658 static int tg3_phy_reset(struct tg3 *tp)
1659 {
1660         u32 cpmuctrl;
1661         u32 phy_status;
1662         int err;
1663
1664         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1665                 u32 val;
1666
1667                 val = tr32(GRC_MISC_CFG);
1668                 tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
1669                 udelay(40);
1670         }
1671         err  = tg3_readphy(tp, MII_BMSR, &phy_status);
1672         err |= tg3_readphy(tp, MII_BMSR, &phy_status);
1673         if (err != 0)
1674                 return -EBUSY;
1675
1676         if (netif_running(tp->dev) && netif_carrier_ok(tp->dev)) {
1677                 netif_carrier_off(tp->dev);
1678                 tg3_link_report(tp);
1679         }
1680
1681         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
1682             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
1683             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
1684                 err = tg3_phy_reset_5703_4_5(tp);
1685                 if (err)
1686                         return err;
1687                 goto out;
1688         }
1689
1690         cpmuctrl = 0;
1691         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
1692             GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
1693                 cpmuctrl = tr32(TG3_CPMU_CTRL);
1694                 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY)
1695                         tw32(TG3_CPMU_CTRL,
1696                              cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY);
1697         }
1698
1699         err = tg3_bmcr_reset(tp);
1700         if (err)
1701                 return err;
1702
1703         if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) {
1704                 u32 phy;
1705
1706                 phy = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz;
1707                 tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, phy);
1708
1709                 tw32(TG3_CPMU_CTRL, cpmuctrl);
1710         }
1711
1712         if (tp->tg3_flags3 & TG3_FLG3_5761_5784_AX_FIXES) {
1713                 u32 val;
1714
1715                 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
1716                 if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
1717                     CPMU_LSPD_1000MB_MACCLK_12_5) {
1718                         val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
1719                         udelay(40);
1720                         tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
1721                 }
1722
1723                 /* Disable GPHY autopowerdown. */
1724                 tg3_writephy(tp, MII_TG3_MISC_SHDW,
1725                              MII_TG3_MISC_SHDW_WREN |
1726                              MII_TG3_MISC_SHDW_APD_SEL |
1727                              MII_TG3_MISC_SHDW_APD_WKTM_84MS);
1728         }
1729
1730         tg3_phy_apply_otp(tp);
1731
1732 out:
1733         if (tp->tg3_flags2 & TG3_FLG2_PHY_ADC_BUG) {
1734                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1735                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1736                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x2aaa);
1737                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1738                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0323);
1739                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1740         }
1741         if (tp->tg3_flags2 & TG3_FLG2_PHY_5704_A0_BUG) {
1742                 tg3_writephy(tp, 0x1c, 0x8d68);
1743                 tg3_writephy(tp, 0x1c, 0x8d68);
1744         }
1745         if (tp->tg3_flags2 & TG3_FLG2_PHY_BER_BUG) {
1746                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1747                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1748                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x310b);
1749                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1750                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x9506);
1751                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x401f);
1752                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x14e2);
1753                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1754         }
1755         else if (tp->tg3_flags2 & TG3_FLG2_PHY_JITTER_BUG) {
1756                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1757                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1758                 if (tp->tg3_flags2 & TG3_FLG2_PHY_ADJUST_TRIM) {
1759                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
1760                         tg3_writephy(tp, MII_TG3_TEST1,
1761                                      MII_TG3_TEST1_TRIM_EN | 0x4);
1762                 } else
1763                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
1764                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1765         }
1766         /* Set Extended packet length bit (bit 14) on all chips that */
1767         /* support jumbo frames */
1768         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
1769                 /* Cannot do read-modify-write on 5401 */
1770                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
1771         } else if (tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) {
1772                 u32 phy_reg;
1773
1774                 /* Set bit 14 with read-modify-write to preserve other bits */
1775                 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0007) &&
1776                     !tg3_readphy(tp, MII_TG3_AUX_CTRL, &phy_reg))
1777                         tg3_writephy(tp, MII_TG3_AUX_CTRL, phy_reg | 0x4000);
1778         }
1779
1780         /* Set phy register 0x10 bit 0 to high fifo elasticity to support
1781          * jumbo frames transmission.
1782          */
1783         if (tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) {
1784                 u32 phy_reg;
1785
1786                 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &phy_reg))
1787                     tg3_writephy(tp, MII_TG3_EXT_CTRL,
1788                                  phy_reg | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
1789         }
1790
1791         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1792                 /* adjust output voltage */
1793                 tg3_writephy(tp, MII_TG3_EPHY_PTEST, 0x12);
1794         }
1795
1796         tg3_phy_toggle_automdix(tp, 1);
1797         tg3_phy_set_wirespeed(tp);
1798         return 0;
1799 }
1800
1801 static void tg3_frob_aux_power(struct tg3 *tp)
1802 {
1803         struct tg3 *tp_peer = tp;
1804
1805         if ((tp->tg3_flags2 & TG3_FLG2_IS_NIC) == 0)
1806                 return;
1807
1808         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
1809             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)) {
1810                 struct net_device *dev_peer;
1811
1812                 dev_peer = pci_get_drvdata(tp->pdev_peer);
1813                 /* remove_one() may have been run on the peer. */
1814                 if (!dev_peer)
1815                         tp_peer = tp;
1816                 else
1817                         tp_peer = netdev_priv(dev_peer);
1818         }
1819
1820         if ((tp->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
1821             (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0 ||
1822             (tp_peer->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
1823             (tp_peer->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0) {
1824                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1825                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1826                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1827                                     (GRC_LCLCTRL_GPIO_OE0 |
1828                                      GRC_LCLCTRL_GPIO_OE1 |
1829                                      GRC_LCLCTRL_GPIO_OE2 |
1830                                      GRC_LCLCTRL_GPIO_OUTPUT0 |
1831                                      GRC_LCLCTRL_GPIO_OUTPUT1),
1832                                     100);
1833                 } else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761) {
1834                         /* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
1835                         u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
1836                                              GRC_LCLCTRL_GPIO_OE1 |
1837                                              GRC_LCLCTRL_GPIO_OE2 |
1838                                              GRC_LCLCTRL_GPIO_OUTPUT0 |
1839                                              GRC_LCLCTRL_GPIO_OUTPUT1 |
1840                                              tp->grc_local_ctrl;
1841                         tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, 100);
1842
1843                         grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2;
1844                         tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, 100);
1845
1846                         grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0;
1847                         tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, 100);
1848                 } else {
1849                         u32 no_gpio2;
1850                         u32 grc_local_ctrl = 0;
1851
1852                         if (tp_peer != tp &&
1853                             (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
1854                                 return;
1855
1856                         /* Workaround to prevent overdrawing Amps. */
1857                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
1858                             ASIC_REV_5714) {
1859                                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
1860                                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1861                                             grc_local_ctrl, 100);
1862                         }
1863
1864                         /* On 5753 and variants, GPIO2 cannot be used. */
1865                         no_gpio2 = tp->nic_sram_data_cfg &
1866                                     NIC_SRAM_DATA_CFG_NO_GPIO2;
1867
1868                         grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
1869                                          GRC_LCLCTRL_GPIO_OE1 |
1870                                          GRC_LCLCTRL_GPIO_OE2 |
1871                                          GRC_LCLCTRL_GPIO_OUTPUT1 |
1872                                          GRC_LCLCTRL_GPIO_OUTPUT2;
1873                         if (no_gpio2) {
1874                                 grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
1875                                                     GRC_LCLCTRL_GPIO_OUTPUT2);
1876                         }
1877                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1878                                                     grc_local_ctrl, 100);
1879
1880                         grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
1881
1882                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1883                                                     grc_local_ctrl, 100);
1884
1885                         if (!no_gpio2) {
1886                                 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
1887                                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1888                                             grc_local_ctrl, 100);
1889                         }
1890                 }
1891         } else {
1892                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
1893                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
1894                         if (tp_peer != tp &&
1895                             (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
1896                                 return;
1897
1898                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1899                                     (GRC_LCLCTRL_GPIO_OE1 |
1900                                      GRC_LCLCTRL_GPIO_OUTPUT1), 100);
1901
1902                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1903                                     GRC_LCLCTRL_GPIO_OE1, 100);
1904
1905                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1906                                     (GRC_LCLCTRL_GPIO_OE1 |
1907                                      GRC_LCLCTRL_GPIO_OUTPUT1), 100);
1908                 }
1909         }
1910 }
1911
1912 static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
1913 {
1914         if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
1915                 return 1;
1916         else if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411) {
1917                 if (speed != SPEED_10)
1918                         return 1;
1919         } else if (speed == SPEED_10)
1920                 return 1;
1921
1922         return 0;
1923 }
1924
1925 static int tg3_setup_phy(struct tg3 *, int);
1926
1927 #define RESET_KIND_SHUTDOWN     0
1928 #define RESET_KIND_INIT         1
1929 #define RESET_KIND_SUSPEND      2
1930
1931 static void tg3_write_sig_post_reset(struct tg3 *, int);
1932 static int tg3_halt_cpu(struct tg3 *, u32);
1933 static int tg3_nvram_lock(struct tg3 *);
1934 static void tg3_nvram_unlock(struct tg3 *);
1935
1936 static void tg3_power_down_phy(struct tg3 *tp)
1937 {
1938         u32 val;
1939
1940         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
1941                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
1942                         u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
1943                         u32 serdes_cfg = tr32(MAC_SERDES_CFG);
1944
1945                         sg_dig_ctrl |=
1946                                 SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
1947                         tw32(SG_DIG_CTRL, sg_dig_ctrl);
1948                         tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
1949                 }
1950                 return;
1951         }
1952
1953         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1954                 tg3_bmcr_reset(tp);
1955                 val = tr32(GRC_MISC_CFG);
1956                 tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
1957                 udelay(40);
1958                 return;
1959         } else if (!(tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)) {
1960                 tg3_writephy(tp, MII_TG3_EXT_CTRL,
1961                              MII_TG3_EXT_CTRL_FORCE_LED_OFF);
1962                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x01b2);
1963         }
1964
1965         /* The PHY should not be powered down on some chips because
1966          * of bugs.
1967          */
1968         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1969             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
1970             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 &&
1971              (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)))
1972                 return;
1973
1974         if (tp->tg3_flags3 & TG3_FLG3_5761_5784_AX_FIXES) {
1975                 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
1976                 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
1977                 val |= CPMU_LSPD_1000MB_MACCLK_12_5;
1978                 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
1979         }
1980
1981         tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
1982 }
1983
1984 static int tg3_set_power_state(struct tg3 *tp, pci_power_t state)
1985 {
1986         u32 misc_host_ctrl;
1987
1988         /* Make sure register accesses (indirect or otherwise)
1989          * will function correctly.
1990          */
1991         pci_write_config_dword(tp->pdev,
1992                                TG3PCI_MISC_HOST_CTRL,
1993                                tp->misc_host_ctrl);
1994
1995         switch (state) {
1996         case PCI_D0:
1997                 pci_enable_wake(tp->pdev, state, false);
1998                 pci_set_power_state(tp->pdev, PCI_D0);
1999
2000                 /* Switch out of Vaux if it is a NIC */
2001                 if (tp->tg3_flags2 & TG3_FLG2_IS_NIC)
2002                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl, 100);
2003
2004                 return 0;
2005
2006         case PCI_D1:
2007         case PCI_D2:
2008         case PCI_D3hot:
2009                 break;
2010
2011         default:
2012                 printk(KERN_ERR PFX "%s: Invalid power state (D%d) requested\n",
2013                         tp->dev->name, state);
2014                 return -EINVAL;
2015         }
2016         misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
2017         tw32(TG3PCI_MISC_HOST_CTRL,
2018              misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
2019
2020         if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
2021                 if ((tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED) &&
2022                     !tp->link_config.phy_is_low_power) {
2023                         struct phy_device *phydev;
2024                         u32 advertising;
2025
2026                         phydev = tp->mdio_bus.phy_map[PHY_ADDR];
2027
2028                         tp->link_config.phy_is_low_power = 1;
2029
2030                         tp->link_config.orig_speed = phydev->speed;
2031                         tp->link_config.orig_duplex = phydev->duplex;
2032                         tp->link_config.orig_autoneg = phydev->autoneg;
2033                         tp->link_config.orig_advertising = phydev->advertising;
2034
2035                         advertising = ADVERTISED_TP |
2036                                       ADVERTISED_Pause |
2037                                       ADVERTISED_Autoneg |
2038                                       ADVERTISED_10baseT_Half;
2039
2040                         if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) ||
2041                             (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)) {
2042                                 if (tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB)
2043                                         advertising |=
2044                                                 ADVERTISED_100baseT_Half |
2045                                                 ADVERTISED_100baseT_Full |
2046                                                 ADVERTISED_10baseT_Full;
2047                                 else
2048                                         advertising |= ADVERTISED_10baseT_Full;
2049                         }
2050
2051                         phydev->advertising = advertising;
2052
2053                         phy_start_aneg(phydev);
2054                 }
2055         } else {
2056                 if (tp->link_config.phy_is_low_power == 0) {
2057                         tp->link_config.phy_is_low_power = 1;
2058                         tp->link_config.orig_speed = tp->link_config.speed;
2059                         tp->link_config.orig_duplex = tp->link_config.duplex;
2060                         tp->link_config.orig_autoneg = tp->link_config.autoneg;
2061                 }
2062
2063                 if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)) {
2064                         tp->link_config.speed = SPEED_10;
2065                         tp->link_config.duplex = DUPLEX_HALF;
2066                         tp->link_config.autoneg = AUTONEG_ENABLE;
2067                         tg3_setup_phy(tp, 0);
2068                 }
2069         }
2070
2071         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2072                 u32 val;
2073
2074                 val = tr32(GRC_VCPU_EXT_CTRL);
2075                 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
2076         } else if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
2077                 int i;
2078                 u32 val;
2079
2080                 for (i = 0; i < 200; i++) {
2081                         tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
2082                         if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
2083                                 break;
2084                         msleep(1);
2085                 }
2086         }
2087         if (tp->tg3_flags & TG3_FLAG_WOL_CAP)
2088                 tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
2089                                                      WOL_DRV_STATE_SHUTDOWN |
2090                                                      WOL_DRV_WOL |
2091                                                      WOL_SET_MAGIC_PKT);
2092
2093         if (tp->tg3_flags & TG3_FLAG_WOL_ENABLE) {
2094                 u32 mac_mode;
2095
2096                 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
2097                         if (!(tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)) {
2098                                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x5a);
2099                                 udelay(40);
2100                         }
2101
2102                         if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
2103                                 mac_mode = MAC_MODE_PORT_MODE_GMII;
2104                         else
2105                                 mac_mode = MAC_MODE_PORT_MODE_MII;
2106
2107                         mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
2108                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
2109                             ASIC_REV_5700) {
2110                                 u32 speed = (tp->tg3_flags &
2111                                              TG3_FLAG_WOL_SPEED_100MB) ?
2112                                              SPEED_100 : SPEED_10;
2113                                 if (tg3_5700_link_polarity(tp, speed))
2114                                         mac_mode |= MAC_MODE_LINK_POLARITY;
2115                                 else
2116                                         mac_mode &= ~MAC_MODE_LINK_POLARITY;
2117                         }
2118                 } else {
2119                         mac_mode = MAC_MODE_PORT_MODE_TBI;
2120                 }
2121
2122                 if (!(tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
2123                         tw32(MAC_LED_CTRL, tp->led_ctrl);
2124
2125                 if (pci_pme_capable(tp->pdev, state) &&
2126                      (tp->tg3_flags & TG3_FLAG_WOL_ENABLE))
2127                         mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
2128
2129                 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
2130                         mac_mode |= tp->mac_mode &
2131                                     (MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN);
2132                         if (mac_mode & MAC_MODE_APE_TX_EN)
2133                                 mac_mode |= MAC_MODE_TDE_ENABLE;
2134                 }
2135
2136                 tw32_f(MAC_MODE, mac_mode);
2137                 udelay(100);
2138
2139                 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
2140                 udelay(10);
2141         }
2142
2143         if (!(tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB) &&
2144             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2145              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
2146                 u32 base_val;
2147
2148                 base_val = tp->pci_clock_ctrl;
2149                 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
2150                              CLOCK_CTRL_TXCLK_DISABLE);
2151
2152                 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
2153                             CLOCK_CTRL_PWRDOWN_PLL133, 40);
2154         } else if ((tp->tg3_flags2 & TG3_FLG2_5780_CLASS) ||
2155                    (tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) ||
2156                    (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)) {
2157                 /* do nothing */
2158         } else if (!((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
2159                      (tp->tg3_flags & TG3_FLAG_ENABLE_ASF))) {
2160                 u32 newbits1, newbits2;
2161
2162                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2163                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2164                         newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
2165                                     CLOCK_CTRL_TXCLK_DISABLE |
2166                                     CLOCK_CTRL_ALTCLK);
2167                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
2168                 } else if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
2169                         newbits1 = CLOCK_CTRL_625_CORE;
2170                         newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
2171                 } else {
2172                         newbits1 = CLOCK_CTRL_ALTCLK;
2173                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
2174                 }
2175
2176                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
2177                             40);
2178
2179                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
2180                             40);
2181
2182                 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
2183                         u32 newbits3;
2184
2185                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2186                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2187                                 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
2188                                             CLOCK_CTRL_TXCLK_DISABLE |
2189                                             CLOCK_CTRL_44MHZ_CORE);
2190                         } else {
2191                                 newbits3 = CLOCK_CTRL_44MHZ_CORE;
2192                         }
2193
2194                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
2195                                     tp->pci_clock_ctrl | newbits3, 40);
2196                 }
2197         }
2198
2199         if (!(tp->tg3_flags & TG3_FLAG_WOL_ENABLE) &&
2200             !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF) &&
2201             !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
2202                 tg3_power_down_phy(tp);
2203
2204         tg3_frob_aux_power(tp);
2205
2206         /* Workaround for unstable PLL clock */
2207         if ((GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX) ||
2208             (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX)) {
2209                 u32 val = tr32(0x7d00);
2210
2211                 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
2212                 tw32(0x7d00, val);
2213                 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
2214                         int err;
2215
2216                         err = tg3_nvram_lock(tp);
2217                         tg3_halt_cpu(tp, RX_CPU_BASE);
2218                         if (!err)
2219                                 tg3_nvram_unlock(tp);
2220                 }
2221         }
2222
2223         tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
2224
2225         if (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)
2226                 pci_enable_wake(tp->pdev, state, true);
2227
2228         /* Finally, set the new power state. */
2229         pci_set_power_state(tp->pdev, state);
2230
2231         return 0;
2232 }
2233
2234 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
2235 {
2236         switch (val & MII_TG3_AUX_STAT_SPDMASK) {
2237         case MII_TG3_AUX_STAT_10HALF:
2238                 *speed = SPEED_10;
2239                 *duplex = DUPLEX_HALF;
2240                 break;
2241
2242         case MII_TG3_AUX_STAT_10FULL:
2243                 *speed = SPEED_10;
2244                 *duplex = DUPLEX_FULL;
2245                 break;
2246
2247         case MII_TG3_AUX_STAT_100HALF:
2248                 *speed = SPEED_100;
2249                 *duplex = DUPLEX_HALF;
2250                 break;
2251
2252         case MII_TG3_AUX_STAT_100FULL:
2253                 *speed = SPEED_100;
2254                 *duplex = DUPLEX_FULL;
2255                 break;
2256
2257         case MII_TG3_AUX_STAT_1000HALF:
2258                 *speed = SPEED_1000;
2259                 *duplex = DUPLEX_HALF;
2260                 break;
2261
2262         case MII_TG3_AUX_STAT_1000FULL:
2263                 *speed = SPEED_1000;
2264                 *duplex = DUPLEX_FULL;
2265                 break;
2266
2267         default:
2268                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2269                         *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
2270                                  SPEED_10;
2271                         *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
2272                                   DUPLEX_HALF;
2273                         break;
2274                 }
2275                 *speed = SPEED_INVALID;
2276                 *duplex = DUPLEX_INVALID;
2277                 break;
2278         }
2279 }
2280
2281 static void tg3_phy_copper_begin(struct tg3 *tp)
2282 {
2283         u32 new_adv;
2284         int i;
2285
2286         if (tp->link_config.phy_is_low_power) {
2287                 /* Entering low power mode.  Disable gigabit and
2288                  * 100baseT advertisements.
2289                  */
2290                 tg3_writephy(tp, MII_TG3_CTRL, 0);
2291
2292                 new_adv = (ADVERTISE_10HALF | ADVERTISE_10FULL |
2293                            ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
2294                 if (tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB)
2295                         new_adv |= (ADVERTISE_100HALF | ADVERTISE_100FULL);
2296
2297                 tg3_writephy(tp, MII_ADVERTISE, new_adv);
2298         } else if (tp->link_config.speed == SPEED_INVALID) {
2299                 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
2300                         tp->link_config.advertising &=
2301                                 ~(ADVERTISED_1000baseT_Half |
2302                                   ADVERTISED_1000baseT_Full);
2303
2304                 new_adv = ADVERTISE_CSMA;
2305                 if (tp->link_config.advertising & ADVERTISED_10baseT_Half)
2306                         new_adv |= ADVERTISE_10HALF;
2307                 if (tp->link_config.advertising & ADVERTISED_10baseT_Full)
2308                         new_adv |= ADVERTISE_10FULL;
2309                 if (tp->link_config.advertising & ADVERTISED_100baseT_Half)
2310                         new_adv |= ADVERTISE_100HALF;
2311                 if (tp->link_config.advertising & ADVERTISED_100baseT_Full)
2312                         new_adv |= ADVERTISE_100FULL;
2313
2314                 new_adv |= tg3_advert_flowctrl_1000T(tp->link_config.flowctrl);
2315
2316                 tg3_writephy(tp, MII_ADVERTISE, new_adv);
2317
2318                 if (tp->link_config.advertising &
2319                     (ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full)) {
2320                         new_adv = 0;
2321                         if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
2322                                 new_adv |= MII_TG3_CTRL_ADV_1000_HALF;
2323                         if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
2324                                 new_adv |= MII_TG3_CTRL_ADV_1000_FULL;
2325                         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY) &&
2326                             (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
2327                              tp->pci_chip_rev_id == CHIPREV_ID_5701_B0))
2328                                 new_adv |= (MII_TG3_CTRL_AS_MASTER |
2329                                             MII_TG3_CTRL_ENABLE_AS_MASTER);
2330                         tg3_writephy(tp, MII_TG3_CTRL, new_adv);
2331                 } else {
2332                         tg3_writephy(tp, MII_TG3_CTRL, 0);
2333                 }
2334         } else {
2335                 new_adv = tg3_advert_flowctrl_1000T(tp->link_config.flowctrl);
2336                 new_adv |= ADVERTISE_CSMA;
2337
2338                 /* Asking for a specific link mode. */
2339                 if (tp->link_config.speed == SPEED_1000) {
2340                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
2341
2342                         if (tp->link_config.duplex == DUPLEX_FULL)
2343                                 new_adv = MII_TG3_CTRL_ADV_1000_FULL;
2344                         else
2345                                 new_adv = MII_TG3_CTRL_ADV_1000_HALF;
2346                         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
2347                             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
2348                                 new_adv |= (MII_TG3_CTRL_AS_MASTER |
2349                                             MII_TG3_CTRL_ENABLE_AS_MASTER);
2350                 } else {
2351                         if (tp->link_config.speed == SPEED_100) {
2352                                 if (tp->link_config.duplex == DUPLEX_FULL)
2353                                         new_adv |= ADVERTISE_100FULL;
2354                                 else
2355                                         new_adv |= ADVERTISE_100HALF;
2356                         } else {
2357                                 if (tp->link_config.duplex == DUPLEX_FULL)
2358                                         new_adv |= ADVERTISE_10FULL;
2359                                 else
2360                                         new_adv |= ADVERTISE_10HALF;
2361                         }
2362                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
2363
2364                         new_adv = 0;
2365                 }
2366
2367                 tg3_writephy(tp, MII_TG3_CTRL, new_adv);
2368         }
2369
2370         if (tp->link_config.autoneg == AUTONEG_DISABLE &&
2371             tp->link_config.speed != SPEED_INVALID) {
2372                 u32 bmcr, orig_bmcr;
2373
2374                 tp->link_config.active_speed = tp->link_config.speed;
2375                 tp->link_config.active_duplex = tp->link_config.duplex;
2376
2377                 bmcr = 0;
2378                 switch (tp->link_config.speed) {
2379                 default:
2380                 case SPEED_10:
2381                         break;
2382
2383                 case SPEED_100:
2384                         bmcr |= BMCR_SPEED100;
2385                         break;
2386
2387                 case SPEED_1000:
2388                         bmcr |= TG3_BMCR_SPEED1000;
2389                         break;
2390                 }
2391
2392                 if (tp->link_config.duplex == DUPLEX_FULL)
2393                         bmcr |= BMCR_FULLDPLX;
2394
2395                 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
2396                     (bmcr != orig_bmcr)) {
2397                         tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
2398                         for (i = 0; i < 1500; i++) {
2399                                 u32 tmp;
2400
2401                                 udelay(10);
2402                                 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
2403                                     tg3_readphy(tp, MII_BMSR, &tmp))
2404                                         continue;
2405                                 if (!(tmp & BMSR_LSTATUS)) {
2406                                         udelay(40);
2407                                         break;
2408                                 }
2409                         }
2410                         tg3_writephy(tp, MII_BMCR, bmcr);
2411                         udelay(40);
2412                 }
2413         } else {
2414                 tg3_writephy(tp, MII_BMCR,
2415                              BMCR_ANENABLE | BMCR_ANRESTART);
2416         }
2417 }
2418
2419 static int tg3_init_5401phy_dsp(struct tg3 *tp)
2420 {
2421         int err;
2422
2423         /* Turn off tap power management. */
2424         /* Set Extended packet length bit */
2425         err  = tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
2426
2427         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0012);
2428         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1804);
2429
2430         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0013);
2431         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1204);
2432
2433         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
2434         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0132);
2435
2436         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
2437         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0232);
2438
2439         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
2440         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0a20);
2441
2442         udelay(40);
2443
2444         return err;
2445 }
2446
2447 static int tg3_copper_is_advertising_all(struct tg3 *tp, u32 mask)
2448 {
2449         u32 adv_reg, all_mask = 0;
2450
2451         if (mask & ADVERTISED_10baseT_Half)
2452                 all_mask |= ADVERTISE_10HALF;
2453         if (mask & ADVERTISED_10baseT_Full)
2454                 all_mask |= ADVERTISE_10FULL;
2455         if (mask & ADVERTISED_100baseT_Half)
2456                 all_mask |= ADVERTISE_100HALF;
2457         if (mask & ADVERTISED_100baseT_Full)
2458                 all_mask |= ADVERTISE_100FULL;
2459
2460         if (tg3_readphy(tp, MII_ADVERTISE, &adv_reg))
2461                 return 0;
2462
2463         if ((adv_reg & all_mask) != all_mask)
2464                 return 0;
2465         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
2466                 u32 tg3_ctrl;
2467
2468                 all_mask = 0;
2469                 if (mask & ADVERTISED_1000baseT_Half)
2470                         all_mask |= ADVERTISE_1000HALF;
2471                 if (mask & ADVERTISED_1000baseT_Full)
2472                         all_mask |= ADVERTISE_1000FULL;
2473
2474                 if (tg3_readphy(tp, MII_TG3_CTRL, &tg3_ctrl))
2475                         return 0;
2476
2477                 if ((tg3_ctrl & all_mask) != all_mask)
2478                         return 0;
2479         }
2480         return 1;
2481 }
2482
2483 static int tg3_adv_1000T_flowctrl_ok(struct tg3 *tp, u32 *lcladv, u32 *rmtadv)
2484 {
2485         u32 curadv, reqadv;
2486
2487         if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
2488                 return 1;
2489
2490         curadv = *lcladv & (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
2491         reqadv = tg3_advert_flowctrl_1000T(tp->link_config.flowctrl);
2492
2493         if (tp->link_config.active_duplex == DUPLEX_FULL) {
2494                 if (curadv != reqadv)
2495                         return 0;
2496
2497                 if (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG)
2498                         tg3_readphy(tp, MII_LPA, rmtadv);
2499         } else {
2500                 /* Reprogram the advertisement register, even if it
2501                  * does not affect the current link.  If the link
2502                  * gets renegotiated in the future, we can save an
2503                  * additional renegotiation cycle by advertising
2504                  * it correctly in the first place.
2505                  */
2506                 if (curadv != reqadv) {
2507                         *lcladv &= ~(ADVERTISE_PAUSE_CAP |
2508                                      ADVERTISE_PAUSE_ASYM);
2509                         tg3_writephy(tp, MII_ADVERTISE, *lcladv | reqadv);
2510                 }
2511         }
2512
2513         return 1;
2514 }
2515
2516 static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
2517 {
2518         int current_link_up;
2519         u32 bmsr, dummy;
2520         u32 lcl_adv, rmt_adv;
2521         u16 current_speed;
2522         u8 current_duplex;
2523         int i, err;
2524
2525         tw32(MAC_EVENT, 0);
2526
2527         tw32_f(MAC_STATUS,
2528              (MAC_STATUS_SYNC_CHANGED |
2529               MAC_STATUS_CFG_CHANGED |
2530               MAC_STATUS_MI_COMPLETION |
2531               MAC_STATUS_LNKSTATE_CHANGED));
2532         udelay(40);
2533
2534         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
2535                 tw32_f(MAC_MI_MODE,
2536                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
2537                 udelay(80);
2538         }
2539
2540         tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x02);
2541
2542         /* Some third-party PHYs need to be reset on link going
2543          * down.
2544          */
2545         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
2546              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2547              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
2548             netif_carrier_ok(tp->dev)) {
2549                 tg3_readphy(tp, MII_BMSR, &bmsr);
2550                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
2551                     !(bmsr & BMSR_LSTATUS))
2552                         force_reset = 1;
2553         }
2554         if (force_reset)
2555                 tg3_phy_reset(tp);
2556
2557         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
2558                 tg3_readphy(tp, MII_BMSR, &bmsr);
2559                 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
2560                     !(tp->tg3_flags & TG3_FLAG_INIT_COMPLETE))
2561                         bmsr = 0;
2562
2563                 if (!(bmsr & BMSR_LSTATUS)) {
2564                         err = tg3_init_5401phy_dsp(tp);
2565                         if (err)
2566                                 return err;
2567
2568                         tg3_readphy(tp, MII_BMSR, &bmsr);
2569                         for (i = 0; i < 1000; i++) {
2570                                 udelay(10);
2571                                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
2572                                     (bmsr & BMSR_LSTATUS)) {
2573                                         udelay(40);
2574                                         break;
2575                                 }
2576                         }
2577
2578                         if ((tp->phy_id & PHY_ID_REV_MASK) == PHY_REV_BCM5401_B0 &&
2579                             !(bmsr & BMSR_LSTATUS) &&
2580                             tp->link_config.active_speed == SPEED_1000) {
2581                                 err = tg3_phy_reset(tp);
2582                                 if (!err)
2583                                         err = tg3_init_5401phy_dsp(tp);
2584                                 if (err)
2585                                         return err;
2586                         }
2587                 }
2588         } else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
2589                    tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
2590                 /* 5701 {A0,B0} CRC bug workaround */
2591                 tg3_writephy(tp, 0x15, 0x0a75);
2592                 tg3_writephy(tp, 0x1c, 0x8c68);
2593                 tg3_writephy(tp, 0x1c, 0x8d68);
2594                 tg3_writephy(tp, 0x1c, 0x8c68);
2595         }
2596
2597         /* Clear pending interrupts... */
2598         tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
2599         tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
2600
2601         if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT)
2602                 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
2603         else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906)
2604                 tg3_writephy(tp, MII_TG3_IMASK, ~0);
2605
2606         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2607             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2608                 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
2609                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
2610                                      MII_TG3_EXT_CTRL_LNK3_LED_MODE);
2611                 else
2612                         tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
2613         }
2614
2615         current_link_up = 0;
2616         current_speed = SPEED_INVALID;
2617         current_duplex = DUPLEX_INVALID;
2618
2619         if (tp->tg3_flags2 & TG3_FLG2_CAPACITIVE_COUPLING) {
2620                 u32 val;
2621
2622                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4007);
2623                 tg3_readphy(tp, MII_TG3_AUX_CTRL, &val);
2624                 if (!(val & (1 << 10))) {
2625                         val |= (1 << 10);
2626                         tg3_writephy(tp, MII_TG3_AUX_CTRL, val);
2627                         goto relink;
2628                 }
2629         }
2630
2631         bmsr = 0;
2632         for (i = 0; i < 100; i++) {
2633                 tg3_readphy(tp, MII_BMSR, &bmsr);
2634                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
2635                     (bmsr & BMSR_LSTATUS))
2636                         break;
2637                 udelay(40);
2638         }
2639
2640         if (bmsr & BMSR_LSTATUS) {
2641                 u32 aux_stat, bmcr;
2642
2643                 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
2644                 for (i = 0; i < 2000; i++) {
2645                         udelay(10);
2646                         if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
2647                             aux_stat)
2648                                 break;
2649                 }
2650
2651                 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
2652                                              &current_speed,
2653                                              &current_duplex);
2654
2655                 bmcr = 0;
2656                 for (i = 0; i < 200; i++) {
2657                         tg3_readphy(tp, MII_BMCR, &bmcr);
2658                         if (tg3_readphy(tp, MII_BMCR, &bmcr))
2659                                 continue;
2660                         if (bmcr && bmcr != 0x7fff)
2661                                 break;
2662                         udelay(10);
2663                 }
2664
2665                 lcl_adv = 0;
2666                 rmt_adv = 0;
2667
2668                 tp->link_config.active_speed = current_speed;
2669                 tp->link_config.active_duplex = current_duplex;
2670
2671                 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2672                         if ((bmcr & BMCR_ANENABLE) &&
2673                             tg3_copper_is_advertising_all(tp,
2674                                                 tp->link_config.advertising)) {
2675                                 if (tg3_adv_1000T_flowctrl_ok(tp, &lcl_adv,
2676                                                                   &rmt_adv))
2677                                         current_link_up = 1;
2678                         }
2679                 } else {
2680                         if (!(bmcr & BMCR_ANENABLE) &&
2681                             tp->link_config.speed == current_speed &&
2682                             tp->link_config.duplex == current_duplex &&
2683                             tp->link_config.flowctrl ==
2684                             tp->link_config.active_flowctrl) {
2685                                 current_link_up = 1;
2686                         }
2687                 }
2688
2689                 if (current_link_up == 1 &&
2690                     tp->link_config.active_duplex == DUPLEX_FULL)
2691                         tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
2692         }
2693
2694 relink:
2695         if (current_link_up == 0 || tp->link_config.phy_is_low_power) {
2696                 u32 tmp;
2697
2698                 tg3_phy_copper_begin(tp);
2699
2700                 tg3_readphy(tp, MII_BMSR, &tmp);
2701                 if (!tg3_readphy(tp, MII_BMSR, &tmp) &&
2702                     (tmp & BMSR_LSTATUS))
2703                         current_link_up = 1;
2704         }
2705
2706         tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
2707         if (current_link_up == 1) {
2708                 if (tp->link_config.active_speed == SPEED_100 ||
2709                     tp->link_config.active_speed == SPEED_10)
2710                         tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
2711                 else
2712                         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
2713         } else
2714                 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
2715
2716         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
2717         if (tp->link_config.active_duplex == DUPLEX_HALF)
2718                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
2719
2720         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
2721                 if (current_link_up == 1 &&
2722                     tg3_5700_link_polarity(tp, tp->link_config.active_speed))
2723                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
2724                 else
2725                         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
2726         }
2727
2728         /* ??? Without this setting Netgear GA302T PHY does not
2729          * ??? send/receive packets...
2730          */
2731         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411 &&
2732             tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) {
2733                 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
2734                 tw32_f(MAC_MI_MODE, tp->mi_mode);
2735                 udelay(80);
2736         }
2737
2738         tw32_f(MAC_MODE, tp->mac_mode);
2739         udelay(40);
2740
2741         if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
2742                 /* Polled via timer. */
2743                 tw32_f(MAC_EVENT, 0);
2744         } else {
2745                 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2746         }
2747         udelay(40);
2748
2749         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 &&
2750             current_link_up == 1 &&
2751             tp->link_config.active_speed == SPEED_1000 &&
2752             ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ||
2753              (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED))) {
2754                 udelay(120);
2755                 tw32_f(MAC_STATUS,
2756                      (MAC_STATUS_SYNC_CHANGED |
2757                       MAC_STATUS_CFG_CHANGED));
2758                 udelay(40);
2759                 tg3_write_mem(tp,
2760                               NIC_SRAM_FIRMWARE_MBOX,
2761                               NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
2762         }
2763
2764         if (current_link_up != netif_carrier_ok(tp->dev)) {
2765                 if (current_link_up)
2766                         netif_carrier_on(tp->dev);
2767                 else
2768                         netif_carrier_off(tp->dev);
2769                 tg3_link_report(tp);
2770         }
2771
2772         return 0;
2773 }
2774
2775 struct tg3_fiber_aneginfo {
2776         int state;
2777 #define ANEG_STATE_UNKNOWN              0
2778 #define ANEG_STATE_AN_ENABLE            1
2779 #define ANEG_STATE_RESTART_INIT         2
2780 #define ANEG_STATE_RESTART              3
2781 #define ANEG_STATE_DISABLE_LINK_OK      4
2782 #define ANEG_STATE_ABILITY_DETECT_INIT  5
2783 #define ANEG_STATE_ABILITY_DETECT       6
2784 #define ANEG_STATE_ACK_DETECT_INIT      7
2785 #define ANEG_STATE_ACK_DETECT           8
2786 #define ANEG_STATE_COMPLETE_ACK_INIT    9
2787 #define ANEG_STATE_COMPLETE_ACK         10
2788 #define ANEG_STATE_IDLE_DETECT_INIT     11
2789 #define ANEG_STATE_IDLE_DETECT          12
2790 #define ANEG_STATE_LINK_OK              13
2791 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT  14
2792 #define ANEG_STATE_NEXT_PAGE_WAIT       15
2793
2794         u32 flags;
2795 #define MR_AN_ENABLE            0x00000001
2796 #define MR_RESTART_AN           0x00000002
2797 #define MR_AN_COMPLETE          0x00000004
2798 #define MR_PAGE_RX              0x00000008
2799 #define MR_NP_LOADED            0x00000010
2800 #define MR_TOGGLE_TX            0x00000020
2801 #define MR_LP_ADV_FULL_DUPLEX   0x00000040
2802 #define MR_LP_ADV_HALF_DUPLEX   0x00000080
2803 #define MR_LP_ADV_SYM_PAUSE     0x00000100
2804 #define MR_LP_ADV_ASYM_PAUSE    0x00000200
2805 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
2806 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
2807 #define MR_LP_ADV_NEXT_PAGE     0x00001000
2808 #define MR_TOGGLE_RX            0x00002000
2809 #define MR_NP_RX                0x00004000
2810
2811 #define MR_LINK_OK              0x80000000
2812
2813         unsigned long link_time, cur_time;
2814
2815         u32 ability_match_cfg;
2816         int ability_match_count;
2817
2818         char ability_match, idle_match, ack_match;
2819
2820         u32 txconfig, rxconfig;
2821 #define ANEG_CFG_NP             0x00000080
2822 #define ANEG_CFG_ACK            0x00000040
2823 #define ANEG_CFG_RF2            0x00000020
2824 #define ANEG_CFG_RF1            0x00000010
2825 #define ANEG_CFG_PS2            0x00000001
2826 #define ANEG_CFG_PS1            0x00008000
2827 #define ANEG_CFG_HD             0x00004000
2828 #define ANEG_CFG_FD             0x00002000
2829 #define ANEG_CFG_INVAL          0x00001f06
2830
2831 };
2832 #define ANEG_OK         0
2833 #define ANEG_DONE       1
2834 #define ANEG_TIMER_ENAB 2
2835 #define ANEG_FAILED     -1
2836
2837 #define ANEG_STATE_SETTLE_TIME  10000
2838
2839 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
2840                                    struct tg3_fiber_aneginfo *ap)
2841 {
2842         u16 flowctrl;
2843         unsigned long delta;
2844         u32 rx_cfg_reg;
2845         int ret;
2846
2847         if (ap->state == ANEG_STATE_UNKNOWN) {
2848                 ap->rxconfig = 0;
2849                 ap->link_time = 0;
2850                 ap->cur_time = 0;
2851                 ap->ability_match_cfg = 0;
2852                 ap->ability_match_count = 0;
2853                 ap->ability_match = 0;
2854                 ap->idle_match = 0;
2855                 ap->ack_match = 0;
2856         }
2857         ap->cur_time++;
2858
2859         if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
2860                 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
2861
2862                 if (rx_cfg_reg != ap->ability_match_cfg) {
2863                         ap->ability_match_cfg = rx_cfg_reg;
2864                         ap->ability_match = 0;
2865                         ap->ability_match_count = 0;
2866                 } else {
2867                         if (++ap->ability_match_count > 1) {
2868                                 ap->ability_match = 1;
2869                                 ap->ability_match_cfg = rx_cfg_reg;
2870                         }
2871                 }
2872                 if (rx_cfg_reg & ANEG_CFG_ACK)
2873                         ap->ack_match = 1;
2874                 else
2875                         ap->ack_match = 0;
2876
2877                 ap->idle_match = 0;
2878         } else {
2879                 ap->idle_match = 1;
2880                 ap->ability_match_cfg = 0;
2881                 ap->ability_match_count = 0;
2882                 ap->ability_match = 0;
2883                 ap->ack_match = 0;
2884
2885                 rx_cfg_reg = 0;
2886         }
2887
2888         ap->rxconfig = rx_cfg_reg;
2889         ret = ANEG_OK;
2890
2891         switch(ap->state) {
2892         case ANEG_STATE_UNKNOWN:
2893                 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
2894                         ap->state = ANEG_STATE_AN_ENABLE;
2895
2896                 /* fallthru */
2897         case ANEG_STATE_AN_ENABLE:
2898                 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
2899                 if (ap->flags & MR_AN_ENABLE) {
2900                         ap->link_time = 0;
2901                         ap->cur_time = 0;
2902                         ap->ability_match_cfg = 0;
2903                         ap->ability_match_count = 0;
2904                         ap->ability_match = 0;
2905                         ap->idle_match = 0;
2906                         ap->ack_match = 0;
2907
2908                         ap->state = ANEG_STATE_RESTART_INIT;
2909                 } else {
2910                         ap->state = ANEG_STATE_DISABLE_LINK_OK;
2911                 }
2912                 break;
2913
2914         case ANEG_STATE_RESTART_INIT:
2915                 ap->link_time = ap->cur_time;
2916                 ap->flags &= ~(MR_NP_LOADED);
2917                 ap->txconfig = 0;
2918                 tw32(MAC_TX_AUTO_NEG, 0);
2919                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
2920                 tw32_f(MAC_MODE, tp->mac_mode);
2921                 udelay(40);
2922
2923                 ret = ANEG_TIMER_ENAB;
2924                 ap->state = ANEG_STATE_RESTART;
2925
2926                 /* fallthru */
2927         case ANEG_STATE_RESTART:
2928                 delta = ap->cur_time - ap->link_time;
2929                 if (delta > ANEG_STATE_SETTLE_TIME) {
2930                         ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
2931                 } else {
2932                         ret = ANEG_TIMER_ENAB;
2933                 }
2934                 break;
2935
2936         case ANEG_STATE_DISABLE_LINK_OK:
2937                 ret = ANEG_DONE;
2938                 break;
2939
2940         case ANEG_STATE_ABILITY_DETECT_INIT:
2941                 ap->flags &= ~(MR_TOGGLE_TX);
2942                 ap->txconfig = ANEG_CFG_FD;
2943                 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
2944                 if (flowctrl & ADVERTISE_1000XPAUSE)
2945                         ap->txconfig |= ANEG_CFG_PS1;
2946                 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
2947                         ap->txconfig |= ANEG_CFG_PS2;
2948                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
2949                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
2950                 tw32_f(MAC_MODE, tp->mac_mode);
2951                 udelay(40);
2952
2953                 ap->state = ANEG_STATE_ABILITY_DETECT;
2954                 break;
2955
2956         case ANEG_STATE_ABILITY_DETECT:
2957                 if (ap->ability_match != 0 && ap->rxconfig != 0) {
2958                         ap->state = ANEG_STATE_ACK_DETECT_INIT;
2959                 }
2960                 break;
2961
2962         case ANEG_STATE_ACK_DETECT_INIT:
2963                 ap->txconfig |= ANEG_CFG_ACK;
2964                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
2965                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
2966                 tw32_f(MAC_MODE, tp->mac_mode);
2967                 udelay(40);
2968
2969                 ap->state = ANEG_STATE_ACK_DETECT;
2970
2971                 /* fallthru */
2972         case ANEG_STATE_ACK_DETECT:
2973                 if (ap->ack_match != 0) {
2974                         if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
2975                             (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
2976                                 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
2977                         } else {
2978                                 ap->state = ANEG_STATE_AN_ENABLE;
2979                         }
2980                 } else if (ap->ability_match != 0 &&
2981                            ap->rxconfig == 0) {
2982                         ap->state = ANEG_STATE_AN_ENABLE;
2983                 }
2984                 break;
2985
2986         case ANEG_STATE_COMPLETE_ACK_INIT:
2987                 if (ap->rxconfig & ANEG_CFG_INVAL) {
2988                         ret = ANEG_FAILED;
2989                         break;
2990                 }
2991                 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
2992                                MR_LP_ADV_HALF_DUPLEX |
2993                                MR_LP_ADV_SYM_PAUSE |
2994                                MR_LP_ADV_ASYM_PAUSE |
2995                                MR_LP_ADV_REMOTE_FAULT1 |
2996                                MR_LP_ADV_REMOTE_FAULT2 |
2997                                MR_LP_ADV_NEXT_PAGE |
2998                                MR_TOGGLE_RX |
2999                                MR_NP_RX);
3000                 if (ap->rxconfig & ANEG_CFG_FD)
3001                         ap->flags |= MR_LP_ADV_FULL_DUPLEX;
3002                 if (ap->rxconfig & ANEG_CFG_HD)
3003                         ap->flags |= MR_LP_ADV_HALF_DUPLEX;
3004                 if (ap->rxconfig & ANEG_CFG_PS1)
3005                         ap->flags |= MR_LP_ADV_SYM_PAUSE;
3006                 if (ap->rxconfig & ANEG_CFG_PS2)
3007                         ap->flags |= MR_LP_ADV_ASYM_PAUSE;
3008                 if (ap->rxconfig & ANEG_CFG_RF1)
3009                         ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
3010                 if (ap->rxconfig & ANEG_CFG_RF2)
3011                         ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
3012                 if (ap->rxconfig & ANEG_CFG_NP)
3013                         ap->flags |= MR_LP_ADV_NEXT_PAGE;
3014
3015                 ap->link_time = ap->cur_time;
3016
3017                 ap->flags ^= (MR_TOGGLE_TX);
3018                 if (ap->rxconfig & 0x0008)
3019                         ap->flags |= MR_TOGGLE_RX;
3020                 if (ap->rxconfig & ANEG_CFG_NP)
3021                         ap->flags |= MR_NP_RX;
3022                 ap->flags |= MR_PAGE_RX;
3023
3024                 ap->state = ANEG_STATE_COMPLETE_ACK;
3025                 ret = ANEG_TIMER_ENAB;
3026                 break;
3027
3028         case ANEG_STATE_COMPLETE_ACK:
3029                 if (ap->ability_match != 0 &&
3030                     ap->rxconfig == 0) {
3031                         ap->state = ANEG_STATE_AN_ENABLE;
3032                         break;
3033                 }
3034                 delta = ap->cur_time - ap->link_time;
3035                 if (delta > ANEG_STATE_SETTLE_TIME) {
3036                         if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
3037                                 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
3038                         } else {
3039                                 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
3040                                     !(ap->flags & MR_NP_RX)) {
3041                                         ap->state = ANEG_STATE_IDLE_DETECT_INIT;
3042                                 } else {
3043                                         ret = ANEG_FAILED;
3044                                 }
3045                         }
3046                 }
3047                 break;
3048
3049         case ANEG_STATE_IDLE_DETECT_INIT:
3050                 ap->link_time = ap->cur_time;
3051                 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
3052                 tw32_f(MAC_MODE, tp->mac_mode);
3053                 udelay(40);
3054
3055                 ap->state = ANEG_STATE_IDLE_DETECT;
3056                 ret = ANEG_TIMER_ENAB;
3057                 break;
3058
3059         case ANEG_STATE_IDLE_DETECT:
3060                 if (ap->ability_match != 0 &&
3061                     ap->rxconfig == 0) {
3062                         ap->state = ANEG_STATE_AN_ENABLE;
3063                         break;
3064                 }
3065                 delta = ap->cur_time - ap->link_time;
3066                 if (delta > ANEG_STATE_SETTLE_TIME) {
3067                         /* XXX another gem from the Broadcom driver :( */
3068                         ap->state = ANEG_STATE_LINK_OK;
3069                 }
3070                 break;
3071
3072         case ANEG_STATE_LINK_OK:
3073                 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
3074                 ret = ANEG_DONE;
3075                 break;
3076
3077         case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
3078                 /* ??? unimplemented */
3079                 break;
3080
3081         case ANEG_STATE_NEXT_PAGE_WAIT:
3082                 /* ??? unimplemented */
3083                 break;
3084
3085         default:
3086                 ret = ANEG_FAILED;
3087                 break;
3088         }
3089
3090         return ret;
3091 }
3092
3093 static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags)
3094 {
3095         int res = 0;
3096         struct tg3_fiber_aneginfo aninfo;
3097         int status = ANEG_FAILED;
3098         unsigned int tick;
3099         u32 tmp;
3100
3101         tw32_f(MAC_TX_AUTO_NEG, 0);
3102
3103         tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
3104         tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
3105         udelay(40);
3106
3107         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
3108         udelay(40);
3109
3110         memset(&aninfo, 0, sizeof(aninfo));
3111         aninfo.flags |= MR_AN_ENABLE;
3112         aninfo.state = ANEG_STATE_UNKNOWN;
3113         aninfo.cur_time = 0;
3114         tick = 0;
3115         while (++tick < 195000) {
3116                 status = tg3_fiber_aneg_smachine(tp, &aninfo);
3117                 if (status == ANEG_DONE || status == ANEG_FAILED)
3118                         break;
3119
3120                 udelay(1);
3121         }
3122
3123         tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
3124         tw32_f(MAC_MODE, tp->mac_mode);
3125         udelay(40);
3126
3127         *txflags = aninfo.txconfig;
3128         *rxflags = aninfo.flags;
3129
3130         if (status == ANEG_DONE &&
3131             (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
3132                              MR_LP_ADV_FULL_DUPLEX)))
3133                 res = 1;
3134
3135         return res;
3136 }
3137
3138 static void tg3_init_bcm8002(struct tg3 *tp)
3139 {
3140         u32 mac_status = tr32(MAC_STATUS);
3141         int i;
3142
3143         /* Reset when initting first time or we have a link. */
3144         if ((tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) &&
3145             !(mac_status & MAC_STATUS_PCS_SYNCED))
3146                 return;
3147
3148         /* Set PLL lock range. */
3149         tg3_writephy(tp, 0x16, 0x8007);
3150
3151         /* SW reset */
3152         tg3_writephy(tp, MII_BMCR, BMCR_RESET);
3153
3154         /* Wait for reset to complete. */
3155         /* XXX schedule_timeout() ... */
3156         for (i = 0; i < 500; i++)
3157                 udelay(10);
3158
3159         /* Config mode; select PMA/Ch 1 regs. */
3160         tg3_writephy(tp, 0x10, 0x8411);
3161
3162         /* Enable auto-lock and comdet, select txclk for tx. */
3163         tg3_writephy(tp, 0x11, 0x0a10);
3164
3165         tg3_writephy(tp, 0x18, 0x00a0);
3166         tg3_writephy(tp, 0x16, 0x41ff);
3167
3168         /* Assert and deassert POR. */
3169         tg3_writephy(tp, 0x13, 0x0400);
3170         udelay(40);
3171         tg3_writephy(tp, 0x13, 0x0000);
3172
3173         tg3_writephy(tp, 0x11, 0x0a50);
3174         udelay(40);
3175         tg3_writephy(tp, 0x11, 0x0a10);
3176
3177         /* Wait for signal to stabilize */
3178         /* XXX schedule_timeout() ... */
3179         for (i = 0; i < 15000; i++)
3180                 udelay(10);
3181
3182         /* Deselect the channel register so we can read the PHYID
3183          * later.
3184          */
3185         tg3_writephy(tp, 0x10, 0x8011);
3186 }
3187
3188 static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
3189 {
3190         u16 flowctrl;
3191         u32 sg_dig_ctrl, sg_dig_status;
3192         u32 serdes_cfg, expected_sg_dig_ctrl;
3193         int workaround, port_a;
3194         int current_link_up;
3195
3196         serdes_cfg = 0;
3197         expected_sg_dig_ctrl = 0;
3198         workaround = 0;
3199         port_a = 1;
3200         current_link_up = 0;
3201
3202         if (tp->pci_chip_rev_id != CHIPREV_ID_5704_A0 &&
3203             tp->pci_chip_rev_id != CHIPREV_ID_5704_A1) {
3204                 workaround = 1;
3205                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
3206                         port_a = 0;
3207
3208                 /* preserve bits 0-11,13,14 for signal pre-emphasis */
3209                 /* preserve bits 20-23 for voltage regulator */
3210                 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
3211         }
3212
3213         sg_dig_ctrl = tr32(SG_DIG_CTRL);
3214
3215         if (tp->link_config.autoneg != AUTONEG_ENABLE) {
3216                 if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) {
3217                         if (workaround) {
3218                                 u32 val = serdes_cfg;
3219
3220                                 if (port_a)
3221                                         val |= 0xc010000;
3222                                 else
3223                                         val |= 0x4010000;
3224                                 tw32_f(MAC_SERDES_CFG, val);
3225                         }
3226
3227                         tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
3228                 }
3229                 if (mac_status & MAC_STATUS_PCS_SYNCED) {
3230                         tg3_setup_flow_control(tp, 0, 0);
3231                         current_link_up = 1;
3232                 }
3233                 goto out;
3234         }
3235
3236         /* Want auto-negotiation.  */
3237         expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP;
3238
3239         flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
3240         if (flowctrl & ADVERTISE_1000XPAUSE)
3241                 expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP;
3242         if (flowctrl & ADVERTISE_1000XPSE_ASYM)
3243                 expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE;
3244
3245         if (sg_dig_ctrl != expected_sg_dig_ctrl) {
3246                 if ((tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT) &&
3247                     tp->serdes_counter &&
3248                     ((mac_status & (MAC_STATUS_PCS_SYNCED |
3249                                     MAC_STATUS_RCVD_CFG)) ==
3250                      MAC_STATUS_PCS_SYNCED)) {
3251                         tp->serdes_counter--;
3252                         current_link_up = 1;
3253                         goto out;
3254                 }
3255 restart_autoneg:
3256                 if (workaround)
3257                         tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
3258                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET);
3259                 udelay(5);
3260                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
3261
3262                 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
3263                 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3264         } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
3265                                  MAC_STATUS_SIGNAL_DET)) {
3266                 sg_dig_status = tr32(SG_DIG_STATUS);
3267                 mac_status = tr32(MAC_STATUS);
3268
3269                 if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) &&
3270                     (mac_status & MAC_STATUS_PCS_SYNCED)) {
3271                         u32 local_adv = 0, remote_adv = 0;
3272
3273                         if (sg_dig_ctrl & SG_DIG_PAUSE_CAP)
3274                                 local_adv |= ADVERTISE_1000XPAUSE;
3275                         if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE)
3276                                 local_adv |= ADVERTISE_1000XPSE_ASYM;
3277
3278                         if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE)
3279                                 remote_adv |= LPA_1000XPAUSE;
3280                         if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE)
3281                                 remote_adv |= LPA_1000XPAUSE_ASYM;
3282
3283                         tg3_setup_flow_control(tp, local_adv, remote_adv);
3284                         current_link_up = 1;
3285                         tp->serdes_counter = 0;
3286                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3287                 } else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) {
3288                         if (tp->serdes_counter)
3289                                 tp->serdes_counter--;
3290                         else {
3291                                 if (workaround) {
3292                                         u32 val = serdes_cfg;
3293
3294                                         if (port_a)
3295                                                 val |= 0xc010000;
3296                                         else
3297                                                 val |= 0x4010000;
3298
3299                                         tw32_f(MAC_SERDES_CFG, val);
3300                                 }
3301
3302                                 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
3303                                 udelay(40);
3304
3305                                 /* Link parallel detection - link is up */
3306                                 /* only if we have PCS_SYNC and not */
3307                                 /* receiving config code words */
3308                                 mac_status = tr32(MAC_STATUS);
3309                                 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
3310                                     !(mac_status & MAC_STATUS_RCVD_CFG)) {
3311                                         tg3_setup_flow_control(tp, 0, 0);
3312                                         current_link_up = 1;
3313                                         tp->tg3_flags2 |=
3314                                                 TG3_FLG2_PARALLEL_DETECT;
3315                                         tp->serdes_counter =
3316                                                 SERDES_PARALLEL_DET_TIMEOUT;
3317                                 } else
3318                                         goto restart_autoneg;
3319                         }
3320                 }
3321         } else {
3322                 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
3323                 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3324         }
3325
3326 out:
3327         return current_link_up;
3328 }
3329
3330 static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
3331 {
3332         int current_link_up = 0;
3333
3334         if (!(mac_status & MAC_STATUS_PCS_SYNCED))
3335                 goto out;
3336
3337         if (tp->link_config.autoneg == AUTONEG_ENABLE) {
3338                 u32 txflags, rxflags;
3339                 int i;
3340
3341                 if (fiber_autoneg(tp, &txflags, &rxflags)) {
3342                         u32 local_adv = 0, remote_adv = 0;
3343
3344                         if (txflags & ANEG_CFG_PS1)
3345                                 local_adv |= ADVERTISE_1000XPAUSE;
3346                         if (txflags & ANEG_CFG_PS2)
3347                                 local_adv |= ADVERTISE_1000XPSE_ASYM;
3348
3349                         if (rxflags & MR_LP_ADV_SYM_PAUSE)
3350                                 remote_adv |= LPA_1000XPAUSE;
3351                         if (rxflags & MR_LP_ADV_ASYM_PAUSE)
3352                                 remote_adv |= LPA_1000XPAUSE_ASYM;
3353
3354                         tg3_setup_flow_control(tp, local_adv, remote_adv);
3355
3356                         current_link_up = 1;
3357                 }
3358                 for (i = 0; i < 30; i++) {
3359                         udelay(20);
3360                         tw32_f(MAC_STATUS,
3361                                (MAC_STATUS_SYNC_CHANGED |
3362                                 MAC_STATUS_CFG_CHANGED));
3363                         udelay(40);
3364                         if ((tr32(MAC_STATUS) &
3365                              (MAC_STATUS_SYNC_CHANGED |
3366                               MAC_STATUS_CFG_CHANGED)) == 0)
3367                                 break;
3368                 }
3369
3370                 mac_status = tr32(MAC_STATUS);
3371                 if (current_link_up == 0 &&
3372                     (mac_status & MAC_STATUS_PCS_SYNCED) &&
3373                     !(mac_status & MAC_STATUS_RCVD_CFG))
3374                         current_link_up = 1;
3375         } else {
3376                 tg3_setup_flow_control(tp, 0, 0);
3377
3378                 /* Forcing 1000FD link up. */
3379                 current_link_up = 1;
3380
3381                 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
3382                 udelay(40);
3383
3384                 tw32_f(MAC_MODE, tp->mac_mode);
3385                 udelay(40);
3386         }
3387
3388 out:
3389         return current_link_up;
3390 }
3391
3392 static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
3393 {
3394         u32 orig_pause_cfg;
3395         u16 orig_active_speed;
3396         u8 orig_active_duplex;
3397         u32 mac_status;
3398         int current_link_up;
3399         int i;
3400
3401         orig_pause_cfg = tp->link_config.active_flowctrl;
3402         orig_active_speed = tp->link_config.active_speed;
3403         orig_active_duplex = tp->link_config.active_duplex;
3404
3405         if (!(tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG) &&
3406             netif_carrier_ok(tp->dev) &&
3407             (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE)) {
3408                 mac_status = tr32(MAC_STATUS);
3409                 mac_status &= (MAC_STATUS_PCS_SYNCED |
3410                                MAC_STATUS_SIGNAL_DET |
3411                                MAC_STATUS_CFG_CHANGED |
3412                                MAC_STATUS_RCVD_CFG);
3413                 if (mac_status == (MAC_STATUS_PCS_SYNCED |
3414                                    MAC_STATUS_SIGNAL_DET)) {
3415                         tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
3416                                             MAC_STATUS_CFG_CHANGED));
3417                         return 0;
3418                 }
3419         }
3420
3421         tw32_f(MAC_TX_AUTO_NEG, 0);
3422
3423         tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
3424         tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
3425         tw32_f(MAC_MODE, tp->mac_mode);
3426         udelay(40);
3427
3428         if (tp->phy_id == PHY_ID_BCM8002)
3429                 tg3_init_bcm8002(tp);
3430
3431         /* Enable link change event even when serdes polling.  */
3432         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
3433         udelay(40);
3434
3435         current_link_up = 0;
3436         mac_status = tr32(MAC_STATUS);
3437
3438         if (tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG)
3439                 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
3440         else
3441                 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
3442
3443         tp->hw_status->status =
3444                 (SD_STATUS_UPDATED |
3445                  (tp->hw_status->status & ~SD_STATUS_LINK_CHG));
3446
3447         for (i = 0; i < 100; i++) {
3448                 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
3449                                     MAC_STATUS_CFG_CHANGED));
3450                 udelay(5);
3451                 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
3452                                          MAC_STATUS_CFG_CHANGED |
3453                                          MAC_STATUS_LNKSTATE_CHANGED)) == 0)
3454                         break;
3455         }
3456
3457         mac_status = tr32(MAC_STATUS);
3458         if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
3459                 current_link_up = 0;
3460                 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
3461                     tp->serdes_counter == 0) {
3462                         tw32_f(MAC_MODE, (tp->mac_mode |
3463                                           MAC_MODE_SEND_CONFIGS));
3464                         udelay(1);
3465                         tw32_f(MAC_MODE, tp->mac_mode);
3466                 }
3467         }
3468
3469         if (current_link_up == 1) {
3470                 tp->link_config.active_speed = SPEED_1000;
3471                 tp->link_config.active_duplex = DUPLEX_FULL;
3472                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
3473                                     LED_CTRL_LNKLED_OVERRIDE |
3474                                     LED_CTRL_1000MBPS_ON));
3475         } else {
3476                 tp->link_config.active_speed = SPEED_INVALID;
3477                 tp->link_config.active_duplex = DUPLEX_INVALID;
3478                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
3479                                     LED_CTRL_LNKLED_OVERRIDE |
3480                                     LED_CTRL_TRAFFIC_OVERRIDE));
3481         }
3482
3483         if (current_link_up != netif_carrier_ok(tp->dev)) {
3484                 if (current_link_up)
3485                         netif_carrier_on(tp->dev);
3486                 else
3487                         netif_carrier_off(tp->dev);
3488                 tg3_link_report(tp);
3489         } else {
3490                 u32 now_pause_cfg = tp->link_config.active_flowctrl;
3491                 if (orig_pause_cfg != now_pause_cfg ||
3492                     orig_active_speed != tp->link_config.active_speed ||
3493                     orig_active_duplex != tp->link_config.active_duplex)
3494                         tg3_link_report(tp);
3495         }
3496
3497         return 0;
3498 }
3499
3500 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
3501 {
3502         int current_link_up, err = 0;
3503         u32 bmsr, bmcr;
3504         u16 current_speed;
3505         u8 current_duplex;
3506         u32 local_adv, remote_adv;
3507
3508         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
3509         tw32_f(MAC_MODE, tp->mac_mode);
3510         udelay(40);
3511
3512         tw32(MAC_EVENT, 0);
3513
3514         tw32_f(MAC_STATUS,
3515              (MAC_STATUS_SYNC_CHANGED |
3516               MAC_STATUS_CFG_CHANGED |
3517               MAC_STATUS_MI_COMPLETION |
3518               MAC_STATUS_LNKSTATE_CHANGED));
3519         udelay(40);
3520
3521         if (force_reset)
3522                 tg3_phy_reset(tp);
3523
3524         current_link_up = 0;
3525         current_speed = SPEED_INVALID;
3526         current_duplex = DUPLEX_INVALID;
3527
3528         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
3529         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
3530         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
3531                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
3532                         bmsr |= BMSR_LSTATUS;
3533                 else
3534                         bmsr &= ~BMSR_LSTATUS;
3535         }
3536
3537         err |= tg3_readphy(tp, MII_BMCR, &bmcr);
3538
3539         if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
3540             (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT)) {
3541                 /* do nothing, just check for link up at the end */
3542         } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
3543                 u32 adv, new_adv;
3544
3545                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
3546                 new_adv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
3547                                   ADVERTISE_1000XPAUSE |
3548                                   ADVERTISE_1000XPSE_ASYM |
3549                                   ADVERTISE_SLCT);
3550
3551                 new_adv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
3552
3553                 if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
3554                         new_adv |= ADVERTISE_1000XHALF;
3555                 if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
3556                         new_adv |= ADVERTISE_1000XFULL;
3557
3558                 if ((new_adv != adv) || !(bmcr & BMCR_ANENABLE)) {
3559                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
3560                         bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
3561                         tg3_writephy(tp, MII_BMCR, bmcr);
3562
3563                         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
3564                         tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
3565                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3566
3567                         return err;
3568                 }
3569         } else {
3570                 u32 new_bmcr;
3571
3572                 bmcr &= ~BMCR_SPEED1000;
3573                 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
3574
3575                 if (tp->link_config.duplex == DUPLEX_FULL)
3576                         new_bmcr |= BMCR_FULLDPLX;
3577
3578                 if (new_bmcr != bmcr) {
3579                         /* BMCR_SPEED1000 is a reserved bit that needs
3580                          * to be set on write.
3581                          */
3582                         new_bmcr |= BMCR_SPEED1000;
3583
3584                         /* Force a linkdown */
3585                         if (netif_carrier_ok(tp->dev)) {
3586                                 u32 adv;
3587
3588                                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
3589                                 adv &= ~(ADVERTISE_1000XFULL |
3590                                          ADVERTISE_1000XHALF |
3591                                          ADVERTISE_SLCT);
3592                                 tg3_writephy(tp, MII_ADVERTISE, adv);
3593                                 tg3_writephy(tp, MII_BMCR, bmcr |
3594                                                            BMCR_ANRESTART |
3595                                                            BMCR_ANENABLE);
3596                                 udelay(10);
3597                                 netif_carrier_off(tp->dev);
3598                         }
3599                         tg3_writephy(tp, MII_BMCR, new_bmcr);
3600                         bmcr = new_bmcr;
3601                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
3602                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
3603                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
3604                             ASIC_REV_5714) {
3605                                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
3606                                         bmsr |= BMSR_LSTATUS;
3607                                 else
3608                                         bmsr &= ~BMSR_LSTATUS;
3609                         }
3610                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3611                 }
3612         }
3613
3614         if (bmsr & BMSR_LSTATUS) {
3615                 current_speed = SPEED_1000;
3616                 current_link_up = 1;
3617                 if (bmcr & BMCR_FULLDPLX)
3618                         current_duplex = DUPLEX_FULL;
3619                 else
3620                         current_duplex = DUPLEX_HALF;
3621
3622                 local_adv = 0;
3623                 remote_adv = 0;
3624
3625                 if (bmcr & BMCR_ANENABLE) {
3626                         u32 common;
3627
3628                         err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
3629                         err |= tg3_readphy(tp, MII_LPA, &remote_adv);
3630                         common = local_adv & remote_adv;
3631                         if (common & (ADVERTISE_1000XHALF |
3632                                       ADVERTISE_1000XFULL)) {
3633                                 if (common & ADVERTISE_1000XFULL)
3634                                         current_duplex = DUPLEX_FULL;
3635                                 else
3636                                         current_duplex = DUPLEX_HALF;
3637                         }
3638                         else
3639                                 current_link_up = 0;
3640                 }
3641         }
3642
3643         if (current_link_up == 1 && current_duplex == DUPLEX_FULL)
3644                 tg3_setup_flow_control(tp, local_adv, remote_adv);
3645
3646         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
3647         if (tp->link_config.active_duplex == DUPLEX_HALF)
3648                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
3649
3650         tw32_f(MAC_MODE, tp->mac_mode);
3651         udelay(40);
3652
3653         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
3654
3655         tp->link_config.active_speed = current_speed;
3656         tp->link_config.active_duplex = current_duplex;
3657
3658         if (current_link_up != netif_carrier_ok(tp->dev)) {
3659                 if (current_link_up)
3660                         netif_carrier_on(tp->dev);
3661                 else {
3662                         netif_carrier_off(tp->dev);
3663                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3664                 }
3665                 tg3_link_report(tp);
3666         }
3667         return err;
3668 }
3669
3670 static void tg3_serdes_parallel_detect(struct tg3 *tp)
3671 {
3672         if (tp->serdes_counter) {
3673                 /* Give autoneg time to complete. */
3674                 tp->serdes_counter--;
3675                 return;
3676         }
3677         if (!netif_carrier_ok(tp->dev) &&
3678             (tp->link_config.autoneg == AUTONEG_ENABLE)) {
3679                 u32 bmcr;
3680
3681                 tg3_readphy(tp, MII_BMCR, &bmcr);
3682                 if (bmcr & BMCR_ANENABLE) {
3683                         u32 phy1, phy2;
3684
3685                         /* Select shadow register 0x1f */
3686                         tg3_writephy(tp, 0x1c, 0x7c00);
3687                         tg3_readphy(tp, 0x1c, &phy1);
3688
3689                         /* Select expansion interrupt status register */
3690                         tg3_writephy(tp, 0x17, 0x0f01);
3691                         tg3_readphy(tp, 0x15, &phy2);
3692                         tg3_readphy(tp, 0x15, &phy2);
3693
3694                         if ((phy1 & 0x10) && !(phy2 & 0x20)) {
3695                                 /* We have signal detect and not receiving
3696                                  * config code words, link is up by parallel
3697                                  * detection.
3698                                  */
3699
3700                                 bmcr &= ~BMCR_ANENABLE;
3701                                 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
3702                                 tg3_writephy(tp, MII_BMCR, bmcr);
3703                                 tp->tg3_flags2 |= TG3_FLG2_PARALLEL_DETECT;
3704                         }
3705                 }
3706         }
3707         else if (netif_carrier_ok(tp->dev) &&
3708                  (tp->link_config.autoneg == AUTONEG_ENABLE) &&
3709                  (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT)) {
3710                 u32 phy2;
3711
3712                 /* Select expansion interrupt status register */
3713                 tg3_writephy(tp, 0x17, 0x0f01);
3714                 tg3_readphy(tp, 0x15, &phy2);
3715                 if (phy2 & 0x20) {
3716                         u32 bmcr;
3717
3718                         /* Config code words received, turn on autoneg. */
3719                         tg3_readphy(tp, MII_BMCR, &bmcr);
3720                         tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
3721
3722                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3723
3724                 }
3725         }
3726 }
3727
3728 static int tg3_setup_phy(struct tg3 *tp, int force_reset)
3729 {
3730         int err;
3731
3732         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
3733                 err = tg3_setup_fiber_phy(tp, force_reset);
3734         } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
3735                 err = tg3_setup_fiber_mii_phy(tp, force_reset);
3736         } else {
3737                 err = tg3_setup_copper_phy(tp, force_reset);
3738         }
3739
3740         if (tp->pci_chip_rev_id == CHIPREV_ID_5784_A0 ||
3741             tp->pci_chip_rev_id == CHIPREV_ID_5784_A1) {
3742                 u32 val, scale;
3743
3744                 val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
3745                 if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5)
3746                         scale = 65;
3747                 else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25)
3748                         scale = 6;
3749                 else
3750                         scale = 12;
3751
3752                 val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK;
3753                 val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT);
3754                 tw32(GRC_MISC_CFG, val);
3755         }
3756
3757         if (tp->link_config.active_speed == SPEED_1000 &&
3758             tp->link_config.active_duplex == DUPLEX_HALF)
3759                 tw32(MAC_TX_LENGTHS,
3760                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
3761                       (6 << TX_LENGTHS_IPG_SHIFT) |
3762                       (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
3763         else
3764                 tw32(MAC_TX_LENGTHS,
3765                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
3766                       (6 << TX_LENGTHS_IPG_SHIFT) |
3767                       (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
3768
3769         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
3770                 if (netif_carrier_ok(tp->dev)) {
3771                         tw32(HOSTCC_STAT_COAL_TICKS,
3772                              tp->coal.stats_block_coalesce_usecs);
3773                 } else {
3774                         tw32(HOSTCC_STAT_COAL_TICKS, 0);
3775                 }
3776         }
3777
3778         if (tp->tg3_flags & TG3_FLAG_ASPM_WORKAROUND) {
3779                 u32 val = tr32(PCIE_PWR_MGMT_THRESH);
3780                 if (!netif_carrier_ok(tp->dev))
3781                         val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
3782                               tp->pwrmgmt_thresh;
3783                 else
3784                         val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
3785                 tw32(PCIE_PWR_MGMT_THRESH, val);
3786         }
3787
3788         return err;
3789 }
3790
3791 /* This is called whenever we suspect that the system chipset is re-
3792  * ordering the sequence of MMIO to the tx send mailbox. The symptom
3793  * is bogus tx completions. We try to recover by setting the
3794  * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
3795  * in the workqueue.
3796  */
3797 static void tg3_tx_recover(struct tg3 *tp)
3798 {
3799         BUG_ON((tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) ||
3800                tp->write32_tx_mbox == tg3_write_indirect_mbox);
3801
3802         printk(KERN_WARNING PFX "%s: The system may be re-ordering memory-"
3803                "mapped I/O cycles to the network device, attempting to "
3804                "recover. Please report the problem to the driver maintainer "
3805                "and include system chipset information.\n", tp->dev->name);
3806
3807         spin_lock(&tp->lock);
3808         tp->tg3_flags |= TG3_FLAG_TX_RECOVERY_PENDING;
3809         spin_unlock(&tp->lock);
3810 }
3811
3812 static inline u32 tg3_tx_avail(struct tg3 *tp)
3813 {
3814         smp_mb();
3815         return (tp->tx_pending -
3816                 ((tp->tx_prod - tp->tx_cons) & (TG3_TX_RING_SIZE - 1)));
3817 }
3818
3819 /* Tigon3 never reports partial packet sends.  So we do not
3820  * need special logic to handle SKBs that have not had all
3821  * of their frags sent yet, like SunGEM does.
3822  */
3823 static void tg3_tx(struct tg3 *tp)
3824 {
3825         u32 hw_idx = tp->hw_status->idx[0].tx_consumer;
3826         u32 sw_idx = tp->tx_cons;
3827
3828         while (sw_idx != hw_idx) {
3829                 struct tx_ring_info *ri = &tp->tx_buffers[sw_idx];
3830                 struct sk_buff *skb = ri->skb;
3831                 int i, tx_bug = 0;
3832
3833                 if (unlikely(skb == NULL)) {
3834                         tg3_tx_recover(tp);
3835                         return;
3836                 }
3837
3838                 pci_unmap_single(tp->pdev,
3839                                  pci_unmap_addr(ri, mapping),
3840                                  skb_headlen(skb),
3841                                  PCI_DMA_TODEVICE);
3842
3843                 ri->skb = NULL;
3844
3845                 sw_idx = NEXT_TX(sw_idx);
3846
3847                 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
3848                         ri = &tp->tx_buffers[sw_idx];
3849                         if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
3850                                 tx_bug = 1;
3851
3852                         pci_unmap_page(tp->pdev,
3853                                        pci_unmap_addr(ri, mapping),
3854                                        skb_shinfo(skb)->frags[i].size,
3855                                        PCI_DMA_TODEVICE);
3856
3857                         sw_idx = NEXT_TX(sw_idx);
3858                 }
3859
3860                 dev_kfree_skb(skb);
3861
3862                 if (unlikely(tx_bug)) {
3863                         tg3_tx_recover(tp);
3864                         return;
3865                 }
3866         }
3867
3868         tp->tx_cons = sw_idx;
3869
3870         /* Need to make the tx_cons update visible to tg3_start_xmit()
3871          * before checking for netif_queue_stopped().  Without the
3872          * memory barrier, there is a small possibility that tg3_start_xmit()
3873          * will miss it and cause the queue to be stopped forever.
3874          */
3875         smp_mb();
3876
3877         if (unlikely(netif_queue_stopped(tp->dev) &&
3878                      (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp)))) {
3879                 netif_tx_lock(tp->dev);
3880                 if (netif_queue_stopped(tp->dev) &&
3881                     (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp)))
3882                         netif_wake_queue(tp->dev);
3883                 netif_tx_unlock(tp->dev);
3884         }
3885 }
3886
3887 /* Returns size of skb allocated or < 0 on error.
3888  *
3889  * We only need to fill in the address because the other members
3890  * of the RX descriptor are invariant, see tg3_init_rings.
3891  *
3892  * Note the purposeful assymetry of cpu vs. chip accesses.  For
3893  * posting buffers we only dirty the first cache line of the RX
3894  * descriptor (containing the address).  Whereas for the RX status
3895  * buffers the cpu only reads the last cacheline of the RX descriptor
3896  * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
3897  */
3898 static int tg3_alloc_rx_skb(struct tg3 *tp, u32 opaque_key,
3899                             int src_idx, u32 dest_idx_unmasked)
3900 {
3901         struct tg3_rx_buffer_desc *desc;
3902         struct ring_info *map, *src_map;
3903         struct sk_buff *skb;
3904         dma_addr_t mapping;
3905         int skb_size, dest_idx;
3906
3907         src_map = NULL;
3908         switch (opaque_key) {
3909         case RXD_OPAQUE_RING_STD:
3910                 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
3911                 desc = &tp->rx_std[dest_idx];
3912                 map = &tp->rx_std_buffers[dest_idx];
3913                 if (src_idx >= 0)
3914                         src_map = &tp->rx_std_buffers[src_idx];
3915                 skb_size = tp->rx_pkt_buf_sz;
3916                 break;
3917
3918         case RXD_OPAQUE_RING_JUMBO:
3919                 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
3920                 desc = &tp->rx_jumbo[dest_idx];
3921                 map = &tp->rx_jumbo_buffers[dest_idx];
3922                 if (src_idx >= 0)
3923                         src_map = &tp->rx_jumbo_buffers[src_idx];
3924                 skb_size = RX_JUMBO_PKT_BUF_SZ;
3925                 break;
3926
3927         default:
3928                 return -EINVAL;
3929         }
3930
3931         /* Do not overwrite any of the map or rp information
3932          * until we are sure we can commit to a new buffer.
3933          *
3934          * Callers depend upon this behavior and assume that
3935          * we leave everything unchanged if we fail.
3936          */
3937         skb = netdev_alloc_skb(tp->dev, skb_size);
3938         if (skb == NULL)
3939                 return -ENOMEM;
3940
3941         skb_reserve(skb, tp->rx_offset);
3942
3943         mapping = pci_map_single(tp->pdev, skb->data,
3944                                  skb_size - tp->rx_offset,
3945                                  PCI_DMA_FROMDEVICE);
3946
3947         map->skb = skb;
3948         pci_unmap_addr_set(map, mapping, mapping);
3949
3950         if (src_map != NULL)
3951                 src_map->skb = NULL;
3952
3953         desc->addr_hi = ((u64)mapping >> 32);
3954         desc->addr_lo = ((u64)mapping & 0xffffffff);
3955
3956         return skb_size;
3957 }
3958
3959 /* We only need to move over in the address because the other
3960  * members of the RX descriptor are invariant.  See notes above
3961  * tg3_alloc_rx_skb for full details.
3962  */
3963 static void tg3_recycle_rx(struct tg3 *tp, u32 opaque_key,
3964                            int src_idx, u32 dest_idx_unmasked)
3965 {
3966         struct tg3_rx_buffer_desc *src_desc, *dest_desc;
3967         struct ring_info *src_map, *dest_map;
3968         int dest_idx;
3969
3970         switch (opaque_key) {
3971         case RXD_OPAQUE_RING_STD:
3972                 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
3973                 dest_desc = &tp->rx_std[dest_idx];
3974                 dest_map = &tp->rx_std_buffers[dest_idx];
3975                 src_desc = &tp->rx_std[src_idx];
3976                 src_map = &tp->rx_std_buffers[src_idx];
3977                 break;
3978
3979         case RXD_OPAQUE_RING_JUMBO:
3980                 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
3981                 dest_desc = &tp->rx_jumbo[dest_idx];
3982                 dest_map = &tp->rx_jumbo_buffers[dest_idx];
3983                 src_desc = &tp->rx_jumbo[src_idx];
3984                 src_map = &tp->rx_jumbo_buffers[src_idx];
3985                 break;
3986
3987         default:
3988                 return;
3989         }
3990
3991         dest_map->skb = src_map->skb;
3992         pci_unmap_addr_set(dest_map, mapping,
3993                            pci_unmap_addr(src_map, mapping));
3994         dest_desc->addr_hi = src_desc->addr_hi;
3995         dest_desc->addr_lo = src_desc->addr_lo;
3996
3997         src_map->skb = NULL;
3998 }
3999
4000 #if TG3_VLAN_TAG_USED
4001 static int tg3_vlan_rx(struct tg3 *tp, struct sk_buff *skb, u16 vlan_tag)
4002 {
4003         return vlan_hwaccel_receive_skb(skb, tp->vlgrp, vlan_tag);
4004 }
4005 #endif
4006
4007 /* The RX ring scheme is composed of multiple rings which post fresh
4008  * buffers to the chip, and one special ring the chip uses to report
4009  * status back to the host.
4010  *
4011  * The special ring reports the status of received packets to the
4012  * host.  The chip does not write into the original descriptor the
4013  * RX buffer was obtained from.  The chip simply takes the original
4014  * descriptor as provided by the host, updates the status and length
4015  * field, then writes this into the next status ring entry.
4016  *
4017  * Each ring the host uses to post buffers to the chip is described
4018  * by a TG3_BDINFO entry in the chips SRAM area.  When a packet arrives,
4019  * it is first placed into the on-chip ram.  When the packet's length
4020  * is known, it walks down the TG3_BDINFO entries to select the ring.
4021  * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
4022  * which is within the range of the new packet's length is chosen.
4023  *
4024  * The "separate ring for rx status" scheme may sound queer, but it makes
4025  * sense from a cache coherency perspective.  If only the host writes
4026  * to the buffer post rings, and only the chip writes to the rx status
4027  * rings, then cache lines never move beyond shared-modified state.
4028  * If both the host and chip were to write into the same ring, cache line
4029  * eviction could occur since both entities want it in an exclusive state.
4030  */
4031 static int tg3_rx(struct tg3 *tp, int budget)
4032 {
4033         u32 work_mask, rx_std_posted = 0;
4034         u32 sw_idx = tp->rx_rcb_ptr;
4035         u16 hw_idx;
4036         int received;
4037
4038         hw_idx = tp->hw_status->idx[0].rx_producer;
4039         /*
4040          * We need to order the read of hw_idx and the read of
4041          * the opaque cookie.
4042          */
4043         rmb();
4044         work_mask = 0;
4045         received = 0;
4046         while (sw_idx != hw_idx && budget > 0) {
4047                 struct tg3_rx_buffer_desc *desc = &tp->rx_rcb[sw_idx];
4048                 unsigned int len;
4049                 struct sk_buff *skb;
4050                 dma_addr_t dma_addr;
4051                 u32 opaque_key, desc_idx, *post_ptr;
4052
4053                 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
4054                 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
4055                 if (opaque_key == RXD_OPAQUE_RING_STD) {
4056                         dma_addr = pci_unmap_addr(&tp->rx_std_buffers[desc_idx],
4057                                                   mapping);
4058                         skb = tp->rx_std_buffers[desc_idx].skb;
4059                         post_ptr = &tp->rx_std_ptr;
4060                         rx_std_posted++;
4061                 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
4062                         dma_addr = pci_unmap_addr(&tp->rx_jumbo_buffers[desc_idx],
4063                                                   mapping);
4064                         skb = tp->rx_jumbo_buffers[desc_idx].skb;
4065                         post_ptr = &tp->rx_jumbo_ptr;
4066                 }
4067                 else {
4068                         goto next_pkt_nopost;
4069                 }
4070
4071                 work_mask |= opaque_key;
4072
4073                 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
4074                     (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
4075                 drop_it:
4076                         tg3_recycle_rx(tp, opaque_key,
4077                                        desc_idx, *post_ptr);
4078                 drop_it_no_recycle:
4079                         /* Other statistics kept track of by card. */
4080                         tp->net_stats.rx_dropped++;
4081                         goto next_pkt;
4082                 }
4083
4084                 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4; /* omit crc */
4085
4086                 if (len > RX_COPY_THRESHOLD
4087                         && tp->rx_offset == 2
4088                         /* rx_offset != 2 iff this is a 5701 card running
4089                          * in PCI-X mode [see tg3_get_invariants()] */
4090                 ) {
4091                         int skb_size;
4092
4093                         skb_size = tg3_alloc_rx_skb(tp, opaque_key,
4094                                                     desc_idx, *post_ptr);
4095                         if (skb_size < 0)
4096                                 goto drop_it;
4097
4098                         pci_unmap_single(tp->pdev, dma_addr,
4099                                          skb_size - tp->rx_offset,
4100                                          PCI_DMA_FROMDEVICE);
4101
4102                         skb_put(skb, len);
4103                 } else {
4104                         struct sk_buff *copy_skb;
4105
4106                         tg3_recycle_rx(tp, opaque_key,
4107                                        desc_idx, *post_ptr);
4108
4109                         copy_skb = netdev_alloc_skb(tp->dev, len + 2);
4110                         if (copy_skb == NULL)
4111                                 goto drop_it_no_recycle;
4112
4113                         skb_reserve(copy_skb, 2);
4114                         skb_put(copy_skb, len);
4115                         pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
4116                         skb_copy_from_linear_data(skb, copy_skb->data, len);
4117                         pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
4118
4119                         /* We'll reuse the original ring buffer. */
4120                         skb = copy_skb;
4121                 }
4122
4123                 if ((tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) &&
4124                     (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
4125                     (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
4126                       >> RXD_TCPCSUM_SHIFT) == 0xffff))
4127                         skb->ip_summed = CHECKSUM_UNNECESSARY;
4128                 else
4129                         skb->ip_summed = CHECKSUM_NONE;
4130
4131                 skb->protocol = eth_type_trans(skb, tp->dev);
4132 #if TG3_VLAN_TAG_USED
4133                 if (tp->vlgrp != NULL &&
4134                     desc->type_flags & RXD_FLAG_VLAN) {
4135                         tg3_vlan_rx(tp, skb,
4136                                     desc->err_vlan & RXD_VLAN_MASK);
4137                 } else
4138 #endif
4139                         netif_receive_skb(skb);
4140
4141                 tp->dev->last_rx = jiffies;
4142                 received++;
4143                 budget--;
4144
4145 next_pkt:
4146                 (*post_ptr)++;
4147
4148                 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
4149                         u32 idx = *post_ptr % TG3_RX_RING_SIZE;
4150
4151                         tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX +
4152                                      TG3_64BIT_REG_LOW, idx);
4153                         work_mask &= ~RXD_OPAQUE_RING_STD;
4154                         rx_std_posted = 0;
4155                 }
4156 next_pkt_nopost:
4157                 sw_idx++;
4158                 sw_idx &= (TG3_RX_RCB_RING_SIZE(tp) - 1);
4159
4160                 /* Refresh hw_idx to see if there is new work */
4161                 if (sw_idx == hw_idx) {
4162                         hw_idx = tp->hw_status->idx[0].rx_producer;
4163                         rmb();
4164                 }
4165         }
4166
4167         /* ACK the status ring. */
4168         tp->rx_rcb_ptr = sw_idx;
4169         tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW, sw_idx);
4170
4171         /* Refill RX ring(s). */
4172         if (work_mask & RXD_OPAQUE_RING_STD) {
4173                 sw_idx = tp->rx_std_ptr % TG3_RX_RING_SIZE;
4174                 tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
4175                              sw_idx);
4176         }
4177         if (work_mask & RXD_OPAQUE_RING_JUMBO) {
4178                 sw_idx = tp->rx_jumbo_ptr % TG3_RX_JUMBO_RING_SIZE;
4179                 tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
4180                              sw_idx);
4181         }
4182         mmiowb();
4183
4184         return received;
4185 }
4186
4187 static int tg3_poll_work(struct tg3 *tp, int work_done, int budget)
4188 {
4189         struct tg3_hw_status *sblk = tp->hw_status;
4190
4191         /* handle link change and other phy events */
4192         if (!(tp->tg3_flags &
4193               (TG3_FLAG_USE_LINKCHG_REG |
4194                TG3_FLAG_POLL_SERDES))) {
4195                 if (sblk->status & SD_STATUS_LINK_CHG) {
4196                         sblk->status = SD_STATUS_UPDATED |
4197                                 (sblk->status & ~SD_STATUS_LINK_CHG);
4198                         spin_lock(&tp->lock);
4199                         if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
4200                                 tw32_f(MAC_STATUS,
4201                                      (MAC_STATUS_SYNC_CHANGED |
4202                                       MAC_STATUS_CFG_CHANGED |
4203                                       MAC_STATUS_MI_COMPLETION |
4204                                       MAC_STATUS_LNKSTATE_CHANGED));
4205                                 udelay(40);
4206                         } else
4207                                 tg3_setup_phy(tp, 0);
4208                         spin_unlock(&tp->lock);
4209                 }
4210         }
4211
4212         /* run TX completion thread */
4213         if (sblk->idx[0].tx_consumer != tp->tx_cons) {
4214                 tg3_tx(tp);
4215                 if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING))
4216                         return work_done;
4217         }
4218
4219         /* run RX thread, within the bounds set by NAPI.
4220          * All RX "locking" is done by ensuring outside
4221          * code synchronizes with tg3->napi.poll()
4222          */
4223         if (sblk->idx[0].rx_producer != tp->rx_rcb_ptr)
4224                 work_done += tg3_rx(tp, budget - work_done);
4225
4226         return work_done;
4227 }
4228
4229 static int tg3_poll(struct napi_struct *napi, int budget)
4230 {
4231         struct tg3 *tp = container_of(napi, struct tg3, napi);
4232         int work_done = 0;
4233         struct tg3_hw_status *sblk = tp->hw_status;
4234
4235         while (1) {
4236                 work_done = tg3_poll_work(tp, work_done, budget);
4237
4238                 if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING))
4239                         goto tx_recovery;
4240
4241                 if (unlikely(work_done >= budget))
4242                         break;
4243
4244                 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) {
4245                         /* tp->last_tag is used in tg3_restart_ints() below
4246                          * to tell the hw how much work has been processed,
4247                          * so we must read it before checking for more work.
4248                          */
4249                         tp->last_tag = sblk->status_tag;
4250                         rmb();
4251                 } else
4252                         sblk->status &= ~SD_STATUS_UPDATED;
4253
4254                 if (likely(!tg3_has_work(tp))) {
4255                         netif_rx_complete(tp->dev, napi);
4256                         tg3_restart_ints(tp);
4257                         break;
4258                 }
4259         }
4260
4261         return work_done;
4262
4263 tx_recovery:
4264         /* work_done is guaranteed to be less than budget. */
4265         netif_rx_complete(tp->dev, napi);
4266         schedule_work(&tp->reset_task);
4267         return work_done;
4268 }
4269
4270 static void tg3_irq_quiesce(struct tg3 *tp)
4271 {
4272         BUG_ON(tp->irq_sync);
4273
4274         tp->irq_sync = 1;
4275         smp_mb();
4276
4277         synchronize_irq(tp->pdev->irq);
4278 }
4279
4280 static inline int tg3_irq_sync(struct tg3 *tp)
4281 {
4282         return tp->irq_sync;
4283 }
4284
4285 /* Fully shutdown all tg3 driver activity elsewhere in the system.
4286  * If irq_sync is non-zero, then the IRQ handler must be synchronized
4287  * with as well.  Most of the time, this is not necessary except when
4288  * shutting down the device.
4289  */
4290 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
4291 {
4292         spin_lock_bh(&tp->lock);
4293         if (irq_sync)
4294                 tg3_irq_quiesce(tp);
4295 }
4296
4297 static inline void tg3_full_unlock(struct tg3 *tp)
4298 {
4299         spin_unlock_bh(&tp->lock);
4300 }
4301
4302 /* One-shot MSI handler - Chip automatically disables interrupt
4303  * after sending MSI so driver doesn't have to do it.
4304  */
4305 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
4306 {
4307         struct net_device *dev = dev_id;
4308         struct tg3 *tp = netdev_priv(dev);
4309
4310         prefetch(tp->hw_status);
4311         prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
4312
4313         if (likely(!tg3_irq_sync(tp)))
4314                 netif_rx_schedule(dev, &tp->napi);
4315
4316         return IRQ_HANDLED;
4317 }
4318
4319 /* MSI ISR - No need to check for interrupt sharing and no need to
4320  * flush status block and interrupt mailbox. PCI ordering rules
4321  * guarantee that MSI will arrive after the status block.
4322  */
4323 static irqreturn_t tg3_msi(int irq, void *dev_id)
4324 {
4325         struct net_device *dev = dev_id;
4326         struct tg3 *tp = netdev_priv(dev);
4327
4328         prefetch(tp->hw_status);
4329         prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
4330         /*
4331          * Writing any value to intr-mbox-0 clears PCI INTA# and
4332          * chip-internal interrupt pending events.
4333          * Writing non-zero to intr-mbox-0 additional tells the
4334          * NIC to stop sending us irqs, engaging "in-intr-handler"
4335          * event coalescing.
4336          */
4337         tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
4338         if (likely(!tg3_irq_sync(tp)))
4339                 netif_rx_schedule(dev, &tp->napi);
4340
4341         return IRQ_RETVAL(1);
4342 }
4343
4344 static irqreturn_t tg3_interrupt(int irq, void *dev_id)
4345 {
4346         struct net_device *dev = dev_id;
4347         struct tg3 *tp = netdev_priv(dev);
4348         struct tg3_hw_status *sblk = tp->hw_status;
4349         unsigned int handled = 1;
4350
4351         /* In INTx mode, it is possible for the interrupt to arrive at
4352          * the CPU before the status block posted prior to the interrupt.
4353          * Reading the PCI State register will confirm whether the
4354          * interrupt is ours and will flush the status block.
4355          */
4356         if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
4357                 if ((tp->tg3_flags & TG3_FLAG_CHIP_RESETTING) ||
4358                     (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
4359                         handled = 0;
4360                         goto out;
4361                 }
4362         }
4363
4364         /*
4365          * Writing any value to intr-mbox-0 clears PCI INTA# and
4366          * chip-internal interrupt pending events.
4367          * Writing non-zero to intr-mbox-0 additional tells the
4368          * NIC to stop sending us irqs, engaging "in-intr-handler"
4369          * event coalescing.
4370          *
4371          * Flush the mailbox to de-assert the IRQ immediately to prevent
4372          * spurious interrupts.  The flush impacts performance but
4373          * excessive spurious interrupts can be worse in some cases.
4374          */
4375         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
4376         if (tg3_irq_sync(tp))
4377                 goto out;
4378         sblk->status &= ~SD_STATUS_UPDATED;
4379         if (likely(tg3_has_work(tp))) {
4380                 prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
4381                 netif_rx_schedule(dev, &tp->napi);
4382         } else {
4383                 /* No work, shared interrupt perhaps?  re-enable
4384                  * interrupts, and flush that PCI write
4385                  */
4386                 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
4387                                0x00000000);
4388         }
4389 out:
4390         return IRQ_RETVAL(handled);
4391 }
4392
4393 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
4394 {
4395         struct net_device *dev = dev_id;
4396         struct tg3 *tp = netdev_priv(dev);
4397         struct tg3_hw_status *sblk = tp->hw_status;
4398         unsigned int handled = 1;
4399
4400         /* In INTx mode, it is possible for the interrupt to arrive at
4401          * the CPU before the status block posted prior to the interrupt.
4402          * Reading the PCI State register will confirm whether the
4403          * interrupt is ours and will flush the status block.
4404          */
4405         if (unlikely(sblk->status_tag == tp->last_tag)) {
4406                 if ((tp->tg3_flags & TG3_FLAG_CHIP_RESETTING) ||
4407                     (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
4408                         handled = 0;
4409                         goto out;
4410                 }
4411         }
4412
4413         /*
4414          * writing any value to intr-mbox-0 clears PCI INTA# and
4415          * chip-internal interrupt pending events.
4416          * writing non-zero to intr-mbox-0 additional tells the
4417          * NIC to stop sending us irqs, engaging "in-intr-handler"
4418          * event coalescing.
4419          *
4420          * Flush the mailbox to de-assert the IRQ immediately to prevent
4421          * spurious interrupts.  The flush impacts performance but
4422          * excessive spurious interrupts can be worse in some cases.
4423          */
4424         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
4425         if (tg3_irq_sync(tp))
4426                 goto out;
4427         if (netif_rx_schedule_prep(dev, &tp->napi)) {
4428                 prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
4429                 /* Update last_tag to mark that this status has been
4430                  * seen. Because interrupt may be shared, we may be
4431                  * racing with tg3_poll(), so only update last_tag
4432                  * if tg3_poll() is not scheduled.
4433                  */
4434                 tp->last_tag = sblk->status_tag;
4435                 __netif_rx_schedule(dev, &tp->napi);
4436         }
4437 out:
4438         return IRQ_RETVAL(handled);
4439 }
4440
4441 /* ISR for interrupt test */
4442 static irqreturn_t tg3_test_isr(int irq, void *dev_id)
4443 {
4444         struct net_device *dev = dev_id;
4445         struct tg3 *tp = netdev_priv(dev);
4446         struct tg3_hw_status *sblk = tp->hw_status;
4447
4448         if ((sblk->status & SD_STATUS_UPDATED) ||
4449             !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
4450                 tg3_disable_ints(tp);
4451                 return IRQ_RETVAL(1);
4452         }
4453         return IRQ_RETVAL(0);
4454 }
4455
4456 static int tg3_init_hw(struct tg3 *, int);
4457 static int tg3_halt(struct tg3 *, int, int);
4458
4459 /* Restart hardware after configuration changes, self-test, etc.
4460  * Invoked with tp->lock held.
4461  */
4462 static int tg3_restart_hw(struct tg3 *tp, int reset_phy)
4463         __releases(tp->lock)
4464         __acquires(tp->lock)
4465 {
4466         int err;
4467
4468         err = tg3_init_hw(tp, reset_phy);
4469         if (err) {
4470                 printk(KERN_ERR PFX "%s: Failed to re-initialize device, "
4471                        "aborting.\n", tp->dev->name);
4472                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
4473                 tg3_full_unlock(tp);
4474                 del_timer_sync(&tp->timer);
4475                 tp->irq_sync = 0;
4476                 napi_enable(&tp->napi);
4477                 dev_close(tp->dev);
4478                 tg3_full_lock(tp, 0);
4479         }
4480         return err;
4481 }
4482
4483 #ifdef CONFIG_NET_POLL_CONTROLLER
4484 static void tg3_poll_controller(struct net_device *dev)
4485 {
4486         struct tg3 *tp = netdev_priv(dev);
4487
4488         tg3_interrupt(tp->pdev->irq, dev);
4489 }
4490 #endif
4491
4492 static void tg3_reset_task(struct work_struct *work)
4493 {
4494         struct tg3 *tp = container_of(work, struct tg3, reset_task);
4495         int err;
4496         unsigned int restart_timer;
4497
4498         tg3_full_lock(tp, 0);
4499
4500         if (!netif_running(tp->dev)) {
4501                 tg3_full_unlock(tp);
4502                 return;
4503         }
4504
4505         tg3_full_unlock(tp);
4506
4507         tg3_phy_stop(tp);
4508
4509         tg3_netif_stop(tp);
4510
4511         tg3_full_lock(tp, 1);
4512
4513         restart_timer = tp->tg3_flags2 & TG3_FLG2_RESTART_TIMER;
4514         tp->tg3_flags2 &= ~TG3_FLG2_RESTART_TIMER;
4515
4516         if (tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING) {
4517                 tp->write32_tx_mbox = tg3_write32_tx_mbox;
4518                 tp->write32_rx_mbox = tg3_write_flush_reg32;
4519                 tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;
4520                 tp->tg3_flags &= ~TG3_FLAG_TX_RECOVERY_PENDING;
4521         }
4522
4523         tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
4524         err = tg3_init_hw(tp, 1);
4525         if (err)
4526                 goto out;
4527
4528         tg3_netif_start(tp);
4529
4530         if (restart_timer)
4531                 mod_timer(&tp->timer, jiffies + 1);
4532
4533 out:
4534         tg3_full_unlock(tp);
4535
4536         if (!err)
4537                 tg3_phy_start(tp);
4538 }
4539
4540 static void tg3_dump_short_state(struct tg3 *tp)
4541 {
4542         printk(KERN_ERR PFX "DEBUG: MAC_TX_STATUS[%08x] MAC_RX_STATUS[%08x]\n",
4543                tr32(MAC_TX_STATUS), tr32(MAC_RX_STATUS));
4544         printk(KERN_ERR PFX "DEBUG: RDMAC_STATUS[%08x] WDMAC_STATUS[%08x]\n",
4545                tr32(RDMAC_STATUS), tr32(WDMAC_STATUS));
4546 }
4547
4548 static void tg3_tx_timeout(struct net_device *dev)
4549 {
4550         struct tg3 *tp = netdev_priv(dev);
4551
4552         if (netif_msg_tx_err(tp)) {
4553                 printk(KERN_ERR PFX "%s: transmit timed out, resetting\n",
4554                        dev->name);
4555                 tg3_dump_short_state(tp);
4556         }
4557
4558         schedule_work(&tp->reset_task);
4559 }
4560
4561 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
4562 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
4563 {
4564         u32 base = (u32) mapping & 0xffffffff;
4565
4566         return ((base > 0xffffdcc0) &&
4567                 (base + len + 8 < base));
4568 }
4569
4570 /* Test for DMA addresses > 40-bit */
4571 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
4572                                           int len)
4573 {
4574 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
4575         if (tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG)
4576                 return (((u64) mapping + len) > DMA_40BIT_MASK);
4577         return 0;
4578 #else
4579         return 0;
4580 #endif
4581 }
4582
4583 static void tg3_set_txd(struct tg3 *, int, dma_addr_t, int, u32, u32);
4584
4585 /* Workaround 4GB and 40-bit hardware DMA bugs. */
4586 static int tigon3_dma_hwbug_workaround(struct tg3 *tp, struct sk_buff *skb,
4587                                        u32 last_plus_one, u32 *start,
4588                                        u32 base_flags, u32 mss)
4589 {
4590         struct sk_buff *new_skb;
4591         dma_addr_t new_addr = 0;
4592         u32 entry = *start;
4593         int i, ret = 0;
4594
4595         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
4596                 new_skb = skb_copy(skb, GFP_ATOMIC);
4597         else {
4598                 int more_headroom = 4 - ((unsigned long)skb->data & 3);
4599
4600                 new_skb = skb_copy_expand(skb,
4601                                           skb_headroom(skb) + more_headroom,
4602                                           skb_tailroom(skb), GFP_ATOMIC);
4603         }
4604
4605         if (!new_skb) {
4606                 ret = -1;
4607         } else {
4608                 /* New SKB is guaranteed to be linear. */
4609                 entry = *start;
4610                 new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
4611                                           PCI_DMA_TODEVICE);
4612                 /* Make sure new skb does not cross any 4G boundaries.
4613                  * Drop the packet if it does.
4614                  */
4615                 if (tg3_4g_overflow_test(new_addr, new_skb->len)) {
4616                         ret = -1;
4617                         dev_kfree_skb(new_skb);
4618                         new_skb = NULL;
4619                 } else {
4620                         tg3_set_txd(tp, entry, new_addr, new_skb->len,
4621                                     base_flags, 1 | (mss << 1));
4622                         *start = NEXT_TX(entry);
4623                 }
4624         }
4625
4626         /* Now clean up the sw ring entries. */
4627         i = 0;
4628         while (entry != last_plus_one) {
4629                 int len;
4630
4631                 if (i == 0)
4632                         len = skb_headlen(skb);
4633                 else
4634                         len = skb_shinfo(skb)->frags[i-1].size;
4635                 pci_unmap_single(tp->pdev,
4636                                  pci_unmap_addr(&tp->tx_buffers[entry], mapping),
4637                                  len, PCI_DMA_TODEVICE);
4638                 if (i == 0) {
4639                         tp->tx_buffers[entry].skb = new_skb;
4640                         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, new_addr);
4641                 } else {
4642                         tp->tx_buffers[entry].skb = NULL;
4643                 }
4644                 entry = NEXT_TX(entry);
4645                 i++;
4646         }
4647
4648         dev_kfree_skb(skb);
4649
4650         return ret;
4651 }
4652
4653 static void tg3_set_txd(struct tg3 *tp, int entry,
4654                         dma_addr_t mapping, int len, u32 flags,
4655                         u32 mss_and_is_end)
4656 {
4657         struct tg3_tx_buffer_desc *txd = &tp->tx_ring[entry];
4658         int is_end = (mss_and_is_end & 0x1);
4659         u32 mss = (mss_and_is_end >> 1);
4660         u32 vlan_tag = 0;
4661
4662         if (is_end)
4663                 flags |= TXD_FLAG_END;
4664         if (flags & TXD_FLAG_VLAN) {
4665                 vlan_tag = flags >> 16;
4666                 flags &= 0xffff;
4667         }
4668         vlan_tag |= (mss << TXD_MSS_SHIFT);
4669
4670         txd->addr_hi = ((u64) mapping >> 32);
4671         txd->addr_lo = ((u64) mapping & 0xffffffff);
4672         txd->len_flags = (len << TXD_LEN_SHIFT) | flags;
4673         txd->vlan_tag = vlan_tag << TXD_VLAN_TAG_SHIFT;
4674 }
4675
4676 /* hard_start_xmit for devices that don't have any bugs and
4677  * support TG3_FLG2_HW_TSO_2 only.
4678  */
4679 static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
4680 {
4681         struct tg3 *tp = netdev_priv(dev);
4682         dma_addr_t mapping;
4683         u32 len, entry, base_flags, mss;
4684
4685         len = skb_headlen(skb);
4686
4687         /* We are running in BH disabled context with netif_tx_lock
4688          * and TX reclaim runs via tp->napi.poll inside of a software
4689          * interrupt.  Furthermore, IRQ processing runs lockless so we have
4690          * no IRQ context deadlocks to worry about either.  Rejoice!
4691          */
4692         if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->nr_frags + 1))) {
4693                 if (!netif_queue_stopped(dev)) {
4694                         netif_stop_queue(dev);
4695
4696                         /* This is a hard error, log it. */
4697                         printk(KERN_ERR PFX "%s: BUG! Tx Ring full when "
4698                                "queue awake!\n", dev->name);
4699                 }
4700                 return NETDEV_TX_BUSY;
4701         }
4702
4703         entry = tp->tx_prod;
4704         base_flags = 0;
4705         mss = 0;
4706         if ((mss = skb_shinfo(skb)->gso_size) != 0) {
4707                 int tcp_opt_len, ip_tcp_len;
4708
4709                 if (skb_header_cloned(skb) &&
4710                     pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
4711                         dev_kfree_skb(skb);
4712                         goto out_unlock;
4713                 }
4714
4715                 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
4716                         mss |= (skb_headlen(skb) - ETH_HLEN) << 9;
4717                 else {
4718                         struct iphdr *iph = ip_hdr(skb);
4719
4720                         tcp_opt_len = tcp_optlen(skb);
4721                         ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
4722
4723                         iph->check = 0;
4724                         iph->tot_len = htons(mss + ip_tcp_len + tcp_opt_len);
4725                         mss |= (ip_tcp_len + tcp_opt_len) << 9;
4726                 }
4727
4728                 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
4729                                TXD_FLAG_CPU_POST_DMA);
4730
4731                 tcp_hdr(skb)->check = 0;
4732
4733         }
4734         else if (skb->ip_summed == CHECKSUM_PARTIAL)
4735                 base_flags |= TXD_FLAG_TCPUDP_CSUM;
4736 #if TG3_VLAN_TAG_USED
4737         if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
4738                 base_flags |= (TXD_FLAG_VLAN |
4739                                (vlan_tx_tag_get(skb) << 16));
4740 #endif
4741
4742         /* Queue skb data, a.k.a. the main skb fragment. */
4743         mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
4744
4745         tp->tx_buffers[entry].skb = skb;
4746         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
4747
4748         tg3_set_txd(tp, entry, mapping, len, base_flags,
4749                     (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
4750
4751         entry = NEXT_TX(entry);
4752
4753         /* Now loop through additional data fragments, and queue them. */
4754         if (skb_shinfo(skb)->nr_frags > 0) {
4755                 unsigned int i, last;
4756
4757                 last = skb_shinfo(skb)->nr_frags - 1;
4758                 for (i = 0; i <= last; i++) {
4759                         skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4760
4761                         len = frag->size;
4762                         mapping = pci_map_page(tp->pdev,
4763                                                frag->page,
4764                                                frag->page_offset,
4765                                                len, PCI_DMA_TODEVICE);
4766
4767                         tp->tx_buffers[entry].skb = NULL;
4768                         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
4769
4770                         tg3_set_txd(tp, entry, mapping, len,
4771                                     base_flags, (i == last) | (mss << 1));
4772
4773                         entry = NEXT_TX(entry);
4774                 }
4775         }
4776
4777         /* Packets are ready, update Tx producer idx local and on card. */
4778         tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry);
4779
4780         tp->tx_prod = entry;
4781         if (unlikely(tg3_tx_avail(tp) <= (MAX_SKB_FRAGS + 1))) {
4782                 netif_stop_queue(dev);
4783                 if (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp))
4784                         netif_wake_queue(tp->dev);
4785         }
4786
4787 out_unlock:
4788         mmiowb();
4789
4790         dev->trans_start = jiffies;
4791
4792         return NETDEV_TX_OK;
4793 }
4794
4795 static int tg3_start_xmit_dma_bug(struct sk_buff *, struct net_device *);
4796
4797 /* Use GSO to workaround a rare TSO bug that may be triggered when the
4798  * TSO header is greater than 80 bytes.
4799  */
4800 static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
4801 {
4802         struct sk_buff *segs, *nskb;
4803
4804         /* Estimate the number of fragments in the worst case */
4805         if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->gso_segs * 3))) {
4806                 netif_stop_queue(tp->dev);
4807                 if (tg3_tx_avail(tp) <= (skb_shinfo(skb)->gso_segs * 3))
4808                         return NETDEV_TX_BUSY;
4809
4810                 netif_wake_queue(tp->dev);
4811         }
4812
4813         segs = skb_gso_segment(skb, tp->dev->features & ~NETIF_F_TSO);
4814         if (IS_ERR(segs))
4815                 goto tg3_tso_bug_end;
4816
4817         do {
4818                 nskb = segs;
4819                 segs = segs->next;
4820                 nskb->next = NULL;
4821                 tg3_start_xmit_dma_bug(nskb, tp->dev);
4822         } while (segs);
4823
4824 tg3_tso_bug_end:
4825         dev_kfree_skb(skb);
4826
4827         return NETDEV_TX_OK;
4828 }
4829
4830 /* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
4831  * support TG3_FLG2_HW_TSO_1 or firmware TSO only.
4832  */
4833 static int tg3_start_xmit_dma_bug(struct sk_buff *skb, struct net_device *dev)
4834 {
4835         struct tg3 *tp = netdev_priv(dev);
4836         dma_addr_t mapping;
4837         u32 len, entry, base_flags, mss;
4838         int would_hit_hwbug;
4839
4840         len = skb_headlen(skb);
4841
4842         /* We are running in BH disabled context with netif_tx_lock
4843          * and TX reclaim runs via tp->napi.poll inside of a software
4844          * interrupt.  Furthermore, IRQ processing runs lockless so we have
4845          * no IRQ context deadlocks to worry about either.  Rejoice!
4846          */
4847         if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->nr_frags + 1))) {
4848                 if (!netif_queue_stopped(dev)) {
4849                         netif_stop_queue(dev);
4850
4851                         /* This is a hard error, log it. */
4852                         printk(KERN_ERR PFX "%s: BUG! Tx Ring full when "
4853                                "queue awake!\n", dev->name);
4854                 }
4855                 return NETDEV_TX_BUSY;
4856         }
4857
4858         entry = tp->tx_prod;
4859         base_flags = 0;
4860         if (skb->ip_summed == CHECKSUM_PARTIAL)
4861                 base_flags |= TXD_FLAG_TCPUDP_CSUM;
4862         mss = 0;
4863         if ((mss = skb_shinfo(skb)->gso_size) != 0) {
4864                 struct iphdr *iph;
4865                 int tcp_opt_len, ip_tcp_len, hdr_len;
4866
4867                 if (skb_header_cloned(skb) &&
4868                     pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
4869                         dev_kfree_skb(skb);
4870                         goto out_unlock;
4871                 }
4872
4873                 tcp_opt_len = tcp_optlen(skb);
4874                 ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
4875
4876                 hdr_len = ip_tcp_len + tcp_opt_len;
4877                 if (unlikely((ETH_HLEN + hdr_len) > 80) &&
4878                              (tp->tg3_flags2 & TG3_FLG2_TSO_BUG))
4879                         return (tg3_tso_bug(tp, skb));
4880
4881                 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
4882                                TXD_FLAG_CPU_POST_DMA);
4883
4884                 iph = ip_hdr(skb);
4885                 iph->check = 0;
4886                 iph->tot_len = htons(mss + hdr_len);
4887                 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
4888                         tcp_hdr(skb)->check = 0;
4889                         base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
4890                 } else
4891                         tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
4892                                                                  iph->daddr, 0,
4893                                                                  IPPROTO_TCP,
4894                                                                  0);
4895
4896                 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO) ||
4897                     (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)) {
4898                         if (tcp_opt_len || iph->ihl > 5) {
4899                                 int tsflags;
4900
4901                                 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
4902                                 mss |= (tsflags << 11);
4903                         }
4904                 } else {
4905                         if (tcp_opt_len || iph->ihl > 5) {
4906                                 int tsflags;
4907
4908                                 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
4909                                 base_flags |= tsflags << 12;
4910                         }
4911                 }
4912         }
4913 #if TG3_VLAN_TAG_USED
4914         if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
4915                 base_flags |= (TXD_FLAG_VLAN |
4916                                (vlan_tx_tag_get(skb) << 16));
4917 #endif
4918
4919         /* Queue skb data, a.k.a. the main skb fragment. */
4920         mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
4921
4922         tp->tx_buffers[entry].skb = skb;
4923         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
4924
4925         would_hit_hwbug = 0;
4926
4927         if (tp->tg3_flags3 & TG3_FLG3_5701_DMA_BUG)
4928                 would_hit_hwbug = 1;
4929         else if (tg3_4g_overflow_test(mapping, len))
4930                 would_hit_hwbug = 1;
4931
4932         tg3_set_txd(tp, entry, mapping, len, base_flags,
4933                     (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
4934
4935         entry = NEXT_TX(entry);
4936
4937         /* Now loop through additional data fragments, and queue them. */
4938         if (skb_shinfo(skb)->nr_frags > 0) {
4939                 unsigned int i, last;
4940
4941                 last = skb_shinfo(skb)->nr_frags - 1;
4942                 for (i = 0; i <= last; i++) {
4943                         skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4944
4945                         len = frag->size;
4946                         mapping = pci_map_page(tp->pdev,
4947                                                frag->page,
4948                                                frag->page_offset,
4949                                                len, PCI_DMA_TODEVICE);
4950
4951                         tp->tx_buffers[entry].skb = NULL;
4952                         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
4953
4954                         if (tg3_4g_overflow_test(mapping, len))
4955                                 would_hit_hwbug = 1;
4956
4957                         if (tg3_40bit_overflow_test(tp, mapping, len))
4958                                 would_hit_hwbug = 1;
4959
4960                         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
4961                                 tg3_set_txd(tp, entry, mapping, len,
4962                                             base_flags, (i == last)|(mss << 1));
4963                         else
4964                                 tg3_set_txd(tp, entry, mapping, len,
4965                                             base_flags, (i == last));
4966
4967                         entry = NEXT_TX(entry);
4968                 }
4969         }
4970
4971         if (would_hit_hwbug) {
4972                 u32 last_plus_one = entry;
4973                 u32 start;
4974
4975                 start = entry - 1 - skb_shinfo(skb)->nr_frags;
4976                 start &= (TG3_TX_RING_SIZE - 1);
4977
4978                 /* If the workaround fails due to memory/mapping
4979                  * failure, silently drop this packet.
4980                  */
4981                 if (tigon3_dma_hwbug_workaround(tp, skb, last_plus_one,
4982                                                 &start, base_flags, mss))
4983                         goto out_unlock;
4984
4985                 entry = start;
4986         }
4987
4988         /* Packets are ready, update Tx producer idx local and on card. */
4989         tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry);
4990
4991         tp->tx_prod = entry;
4992         if (unlikely(tg3_tx_avail(tp) <= (MAX_SKB_FRAGS + 1))) {
4993                 netif_stop_queue(dev);
4994                 if (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp))
4995                         netif_wake_queue(tp->dev);
4996         }
4997
4998 out_unlock:
4999         mmiowb();
5000
5001         dev->trans_start = jiffies;
5002
5003         return NETDEV_TX_OK;
5004 }
5005
5006 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
5007                                int new_mtu)
5008 {
5009         dev->mtu = new_mtu;
5010
5011         if (new_mtu > ETH_DATA_LEN) {
5012                 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
5013                         tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
5014                         ethtool_op_set_tso(dev, 0);
5015                 }
5016                 else
5017                         tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE;
5018         } else {
5019                 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
5020                         tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
5021                 tp->tg3_flags &= ~TG3_FLAG_JUMBO_RING_ENABLE;
5022         }
5023 }
5024
5025 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
5026 {
5027         struct tg3 *tp = netdev_priv(dev);
5028         int err;
5029
5030         if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
5031                 return -EINVAL;
5032
5033         if (!netif_running(dev)) {
5034                 /* We'll just catch it later when the
5035                  * device is up'd.
5036                  */
5037                 tg3_set_mtu(dev, tp, new_mtu);
5038                 return 0;
5039         }
5040
5041         tg3_phy_stop(tp);
5042
5043         tg3_netif_stop(tp);
5044
5045         tg3_full_lock(tp, 1);
5046
5047         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
5048
5049         tg3_set_mtu(dev, tp, new_mtu);
5050
5051         err = tg3_restart_hw(tp, 0);
5052
5053         if (!err)
5054                 tg3_netif_start(tp);
5055
5056         tg3_full_unlock(tp);
5057
5058         if (!err)
5059                 tg3_phy_start(tp);
5060
5061         return err;
5062 }
5063
5064 /* Free up pending packets in all rx/tx rings.
5065  *
5066  * The chip has been shut down and the driver detached from
5067  * the networking, so no interrupts or new tx packets will
5068  * end up in the driver.  tp->{tx,}lock is not held and we are not
5069  * in an interrupt context and thus may sleep.
5070  */
5071 static void tg3_free_rings(struct tg3 *tp)
5072 {
5073         struct ring_info *rxp;
5074         int i;
5075
5076         for (i = 0; i < TG3_RX_RING_SIZE; i++) {
5077                 rxp = &tp->rx_std_buffers[i];
5078
5079                 if (rxp->skb == NULL)
5080                         continue;
5081                 pci_unmap_single(tp->pdev,
5082                                  pci_unmap_addr(rxp, mapping),
5083                                  tp->rx_pkt_buf_sz - tp->rx_offset,
5084                                  PCI_DMA_FROMDEVICE);
5085                 dev_kfree_skb_any(rxp->skb);
5086                 rxp->skb = NULL;
5087         }
5088
5089         for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
5090                 rxp = &tp->rx_jumbo_buffers[i];
5091
5092                 if (rxp->skb == NULL)
5093                         continue;
5094                 pci_unmap_single(tp->pdev,
5095                                  pci_unmap_addr(rxp, mapping),
5096                                  RX_JUMBO_PKT_BUF_SZ - tp->rx_offset,
5097                                  PCI_DMA_FROMDEVICE);
5098                 dev_kfree_skb_any(rxp->skb);
5099                 rxp->skb = NULL;
5100         }
5101
5102         for (i = 0; i < TG3_TX_RING_SIZE; ) {
5103                 struct tx_ring_info *txp;
5104                 struct sk_buff *skb;
5105                 int j;
5106
5107                 txp = &tp->tx_buffers[i];
5108                 skb = txp->skb;
5109
5110                 if (skb == NULL) {
5111                         i++;
5112                         continue;
5113                 }
5114
5115                 pci_unmap_single(tp->pdev,
5116                                  pci_unmap_addr(txp, mapping),
5117                                  skb_headlen(skb),
5118                                  PCI_DMA_TODEVICE);
5119                 txp->skb = NULL;
5120
5121                 i++;
5122
5123                 for (j = 0; j < skb_shinfo(skb)->nr_frags; j++) {
5124                         txp = &tp->tx_buffers[i & (TG3_TX_RING_SIZE - 1)];
5125                         pci_unmap_page(tp->pdev,
5126                                        pci_unmap_addr(txp, mapping),
5127                                        skb_shinfo(skb)->frags[j].size,
5128                                        PCI_DMA_TODEVICE);
5129                         i++;
5130                 }
5131
5132                 dev_kfree_skb_any(skb);
5133         }
5134 }
5135
5136 /* Initialize tx/rx rings for packet processing.
5137  *
5138  * The chip has been shut down and the driver detached from
5139  * the networking, so no interrupts or new tx packets will
5140  * end up in the driver.  tp->{tx,}lock are held and thus
5141  * we may not sleep.
5142  */
5143 static int tg3_init_rings(struct tg3 *tp)
5144 {
5145         u32 i;
5146
5147         /* Free up all the SKBs. */
5148         tg3_free_rings(tp);
5149
5150         /* Zero out all descriptors. */
5151         memset(tp->rx_std, 0, TG3_RX_RING_BYTES);
5152         memset(tp->rx_jumbo, 0, TG3_RX_JUMBO_RING_BYTES);
5153         memset(tp->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
5154         memset(tp->tx_ring, 0, TG3_TX_RING_BYTES);
5155
5156         tp->rx_pkt_buf_sz = RX_PKT_BUF_SZ;
5157         if ((tp->tg3_flags2 & TG3_FLG2_5780_CLASS) &&
5158             (tp->dev->mtu > ETH_DATA_LEN))
5159                 tp->rx_pkt_buf_sz = RX_JUMBO_PKT_BUF_SZ;
5160
5161         /* Initialize invariants of the rings, we only set this
5162          * stuff once.  This works because the card does not
5163          * write into the rx buffer posting rings.
5164          */
5165         for (i = 0; i < TG3_RX_RING_SIZE; i++) {
5166                 struct tg3_rx_buffer_desc *rxd;
5167
5168                 rxd = &tp->rx_std[i];
5169                 rxd->idx_len = (tp->rx_pkt_buf_sz - tp->rx_offset - 64)
5170                         << RXD_LEN_SHIFT;
5171                 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
5172                 rxd->opaque = (RXD_OPAQUE_RING_STD |
5173                                (i << RXD_OPAQUE_INDEX_SHIFT));
5174         }
5175
5176         if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
5177                 for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
5178                         struct tg3_rx_buffer_desc *rxd;
5179
5180                         rxd = &tp->rx_jumbo[i];
5181                         rxd->idx_len = (RX_JUMBO_PKT_BUF_SZ - tp->rx_offset - 64)
5182                                 << RXD_LEN_SHIFT;
5183                         rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
5184                                 RXD_FLAG_JUMBO;
5185                         rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
5186                                (i << RXD_OPAQUE_INDEX_SHIFT));
5187                 }
5188         }
5189
5190         /* Now allocate fresh SKBs for each rx ring. */
5191         for (i = 0; i < tp->rx_pending; i++) {
5192                 if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_STD, -1, i) < 0) {
5193                         printk(KERN_WARNING PFX
5194                                "%s: Using a smaller RX standard ring, "
5195                                "only %d out of %d buffers were allocated "
5196                                "successfully.\n",
5197                                tp->dev->name, i, tp->rx_pending);
5198                         if (i == 0)
5199                                 return -ENOMEM;
5200                         tp->rx_pending = i;
5201                         break;
5202                 }
5203         }
5204
5205         if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
5206                 for (i = 0; i < tp->rx_jumbo_pending; i++) {
5207                         if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_JUMBO,
5208                                              -1, i) < 0) {
5209                                 printk(KERN_WARNING PFX
5210                                        "%s: Using a smaller RX jumbo ring, "
5211                                        "only %d out of %d buffers were "
5212                                        "allocated successfully.\n",
5213                                        tp->dev->name, i, tp->rx_jumbo_pending);
5214                                 if (i == 0) {
5215                                         tg3_free_rings(tp);
5216                                         return -ENOMEM;
5217                                 }
5218                                 tp->rx_jumbo_pending = i;
5219                                 break;
5220                         }
5221                 }
5222         }
5223         return 0;
5224 }
5225
5226 /*
5227  * Must not be invoked with interrupt sources disabled and
5228  * the hardware shutdown down.
5229  */
5230 static void tg3_free_consistent(struct tg3 *tp)
5231 {
5232         kfree(tp->rx_std_buffers);
5233         tp->rx_std_buffers = NULL;
5234         if (tp->rx_std) {
5235                 pci_free_consistent(tp->pdev, TG3_RX_RING_BYTES,
5236                                     tp->rx_std, tp->rx_std_mapping);
5237                 tp->rx_std = NULL;
5238         }
5239         if (tp->rx_jumbo) {
5240                 pci_free_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
5241                                     tp->rx_jumbo, tp->rx_jumbo_mapping);
5242                 tp->rx_jumbo = NULL;
5243         }
5244         if (tp->rx_rcb) {
5245                 pci_free_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
5246                                     tp->rx_rcb, tp->rx_rcb_mapping);
5247                 tp->rx_rcb = NULL;
5248         }
5249         if (tp->tx_ring) {
5250                 pci_free_consistent(tp->pdev, TG3_TX_RING_BYTES,
5251                         tp->tx_ring, tp->tx_desc_mapping);
5252                 tp->tx_ring = NULL;
5253         }
5254         if (tp->hw_status) {
5255                 pci_free_consistent(tp->pdev, TG3_HW_STATUS_SIZE,
5256                                     tp->hw_status, tp->status_mapping);
5257                 tp->hw_status = NULL;
5258         }
5259         if (tp->hw_stats) {
5260                 pci_free_consistent(tp->pdev, sizeof(struct tg3_hw_stats),
5261                                     tp->hw_stats, tp->stats_mapping);
5262                 tp->hw_stats = NULL;
5263         }
5264 }
5265
5266 /*
5267  * Must not be invoked with interrupt sources disabled and
5268  * the hardware shutdown down.  Can sleep.
5269  */
5270 static int tg3_alloc_consistent(struct tg3 *tp)
5271 {
5272         tp->rx_std_buffers = kzalloc((sizeof(struct ring_info) *
5273                                       (TG3_RX_RING_SIZE +
5274                                        TG3_RX_JUMBO_RING_SIZE)) +
5275                                      (sizeof(struct tx_ring_info) *
5276                                       TG3_TX_RING_SIZE),
5277                                      GFP_KERNEL);
5278         if (!tp->rx_std_buffers)
5279                 return -ENOMEM;
5280
5281         tp->rx_jumbo_buffers = &tp->rx_std_buffers[TG3_RX_RING_SIZE];
5282         tp->tx_buffers = (struct tx_ring_info *)
5283                 &tp->rx_jumbo_buffers[TG3_RX_JUMBO_RING_SIZE];
5284
5285         tp->rx_std = pci_alloc_consistent(tp->pdev, TG3_RX_RING_BYTES,
5286                                           &tp->rx_std_mapping);
5287         if (!tp->rx_std)
5288                 goto err_out;
5289
5290         tp->rx_jumbo = pci_alloc_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
5291                                             &tp->rx_jumbo_mapping);
5292
5293         if (!tp->rx_jumbo)
5294                 goto err_out;
5295
5296         tp->rx_rcb = pci_alloc_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
5297                                           &tp->rx_rcb_mapping);
5298         if (!tp->rx_rcb)
5299                 goto err_out;
5300
5301         tp->tx_ring = pci_alloc_consistent(tp->pdev, TG3_TX_RING_BYTES,
5302                                            &tp->tx_desc_mapping);
5303         if (!tp->tx_ring)
5304                 goto err_out;
5305
5306         tp->hw_status = pci_alloc_consistent(tp->pdev,
5307                                              TG3_HW_STATUS_SIZE,
5308                                              &tp->status_mapping);
5309         if (!tp->hw_status)
5310                 goto err_out;
5311
5312         tp->hw_stats = pci_alloc_consistent(tp->pdev,
5313                                             sizeof(struct tg3_hw_stats),
5314                                             &tp->stats_mapping);
5315         if (!tp->hw_stats)
5316                 goto err_out;
5317
5318         memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
5319         memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
5320
5321         return 0;
5322
5323 err_out:
5324         tg3_free_consistent(tp);
5325         return -ENOMEM;
5326 }
5327
5328 #define MAX_WAIT_CNT 1000
5329
5330 /* To stop a block, clear the enable bit and poll till it
5331  * clears.  tp->lock is held.
5332  */
5333 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, int silent)
5334 {
5335         unsigned int i;
5336         u32 val;
5337
5338         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
5339                 switch (ofs) {
5340                 case RCVLSC_MODE:
5341                 case DMAC_MODE:
5342                 case MBFREE_MODE:
5343                 case BUFMGR_MODE:
5344                 case MEMARB_MODE:
5345                         /* We can't enable/disable these bits of the
5346                          * 5705/5750, just say success.
5347                          */
5348                         return 0;
5349
5350                 default:
5351                         break;
5352                 }
5353         }
5354
5355         val = tr32(ofs);
5356         val &= ~enable_bit;
5357         tw32_f(ofs, val);
5358
5359         for (i = 0; i < MAX_WAIT_CNT; i++) {
5360                 udelay(100);
5361                 val = tr32(ofs);
5362                 if ((val & enable_bit) == 0)
5363                         break;
5364         }
5365
5366         if (i == MAX_WAIT_CNT && !silent) {
5367                 printk(KERN_ERR PFX "tg3_stop_block timed out, "
5368                        "ofs=%lx enable_bit=%x\n",
5369                        ofs, enable_bit);
5370                 return -ENODEV;
5371         }
5372
5373         return 0;
5374 }
5375
5376 /* tp->lock is held. */
5377 static int tg3_abort_hw(struct tg3 *tp, int silent)
5378 {
5379         int i, err;
5380
5381         tg3_disable_ints(tp);
5382
5383         tp->rx_mode &= ~RX_MODE_ENABLE;
5384         tw32_f(MAC_RX_MODE, tp->rx_mode);
5385         udelay(10);
5386
5387         err  = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
5388         err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
5389         err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
5390         err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
5391         err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
5392         err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
5393
5394         err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
5395         err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
5396         err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
5397         err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
5398         err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
5399         err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
5400         err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
5401
5402         tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
5403         tw32_f(MAC_MODE, tp->mac_mode);
5404         udelay(40);
5405
5406         tp->tx_mode &= ~TX_MODE_ENABLE;
5407         tw32_f(MAC_TX_MODE, tp->tx_mode);
5408
5409         for (i = 0; i < MAX_WAIT_CNT; i++) {
5410                 udelay(100);
5411                 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
5412                         break;
5413         }
5414         if (i >= MAX_WAIT_CNT) {
5415                 printk(KERN_ERR PFX "tg3_abort_hw timed out for %s, "
5416                        "TX_MODE_ENABLE will not clear MAC_TX_MODE=%08x\n",
5417                        tp->dev->name, tr32(MAC_TX_MODE));
5418                 err |= -ENODEV;
5419         }
5420
5421         err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
5422         err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
5423         err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
5424
5425         tw32(FTQ_RESET, 0xffffffff);
5426         tw32(FTQ_RESET, 0x00000000);
5427
5428         err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
5429         err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
5430
5431         if (tp->hw_status)
5432                 memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
5433         if (tp->hw_stats)
5434                 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
5435
5436         return err;
5437 }
5438
5439 /* tp->lock is held. */
5440 static int tg3_nvram_lock(struct tg3 *tp)
5441 {
5442         if (tp->tg3_flags & TG3_FLAG_NVRAM) {
5443                 int i;
5444
5445                 if (tp->nvram_lock_cnt == 0) {
5446                         tw32(NVRAM_SWARB, SWARB_REQ_SET1);
5447                         for (i = 0; i < 8000; i++) {
5448                                 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
5449                                         break;
5450                                 udelay(20);
5451                         }
5452                         if (i == 8000) {
5453                                 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
5454                                 return -ENODEV;
5455                         }
5456                 }
5457                 tp->nvram_lock_cnt++;
5458         }
5459         return 0;
5460 }
5461
5462 /* tp->lock is held. */
5463 static void tg3_nvram_unlock(struct tg3 *tp)
5464 {
5465         if (tp->tg3_flags & TG3_FLAG_NVRAM) {
5466                 if (tp->nvram_lock_cnt > 0)
5467                         tp->nvram_lock_cnt--;
5468                 if (tp->nvram_lock_cnt == 0)
5469                         tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
5470         }
5471 }
5472
5473 /* tp->lock is held. */
5474 static void tg3_enable_nvram_access(struct tg3 *tp)
5475 {
5476         if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
5477             !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM)) {
5478                 u32 nvaccess = tr32(NVRAM_ACCESS);
5479
5480                 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
5481         }
5482 }
5483
5484 /* tp->lock is held. */
5485 static void tg3_disable_nvram_access(struct tg3 *tp)
5486 {
5487         if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
5488             !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM)) {
5489                 u32 nvaccess = tr32(NVRAM_ACCESS);
5490
5491                 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
5492         }
5493 }
5494
5495 static void tg3_ape_send_event(struct tg3 *tp, u32 event)
5496 {
5497         int i;
5498         u32 apedata;
5499
5500         apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
5501         if (apedata != APE_SEG_SIG_MAGIC)
5502                 return;
5503
5504         apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
5505         if (!(apedata & APE_FW_STATUS_READY))
5506                 return;
5507
5508         /* Wait for up to 1 millisecond for APE to service previous event. */
5509         for (i = 0; i < 10; i++) {
5510                 if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
5511                         return;
5512
5513                 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
5514
5515                 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
5516                         tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
5517                                         event | APE_EVENT_STATUS_EVENT_PENDING);
5518
5519                 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
5520
5521                 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
5522                         break;
5523
5524                 udelay(100);
5525         }
5526
5527         if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
5528                 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
5529 }
5530
5531 static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
5532 {
5533         u32 event;
5534         u32 apedata;
5535
5536         if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
5537                 return;
5538
5539         switch (kind) {
5540                 case RESET_KIND_INIT:
5541                         tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
5542                                         APE_HOST_SEG_SIG_MAGIC);
5543                         tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
5544                                         APE_HOST_SEG_LEN_MAGIC);
5545                         apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
5546                         tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
5547                         tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
5548                                         APE_HOST_DRIVER_ID_MAGIC);
5549                         tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
5550                                         APE_HOST_BEHAV_NO_PHYLOCK);
5551
5552                         event = APE_EVENT_STATUS_STATE_START;
5553                         break;
5554                 case RESET_KIND_SHUTDOWN:
5555                         event = APE_EVENT_STATUS_STATE_UNLOAD;
5556                         break;
5557                 case RESET_KIND_SUSPEND:
5558                         event = APE_EVENT_STATUS_STATE_SUSPEND;
5559                         break;
5560                 default:
5561                         return;
5562         }
5563
5564         event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
5565
5566         tg3_ape_send_event(tp, event);
5567 }
5568
5569 /* tp->lock is held. */
5570 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
5571 {
5572         tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
5573                       NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
5574
5575         if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
5576                 switch (kind) {
5577                 case RESET_KIND_INIT:
5578                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5579                                       DRV_STATE_START);
5580                         break;
5581
5582                 case RESET_KIND_SHUTDOWN:
5583                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5584                                       DRV_STATE_UNLOAD);
5585                         break;
5586
5587                 case RESET_KIND_SUSPEND:
5588                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5589                                       DRV_STATE_SUSPEND);
5590                         break;
5591
5592                 default:
5593                         break;
5594                 }
5595         }
5596
5597         if (kind == RESET_KIND_INIT ||
5598             kind == RESET_KIND_SUSPEND)
5599                 tg3_ape_driver_state_change(tp, kind);
5600 }
5601
5602 /* tp->lock is held. */
5603 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
5604 {
5605         if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
5606                 switch (kind) {
5607                 case RESET_KIND_INIT:
5608                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5609                                       DRV_STATE_START_DONE);
5610                         break;
5611
5612                 case RESET_KIND_SHUTDOWN:
5613                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5614                                       DRV_STATE_UNLOAD_DONE);
5615                         break;
5616
5617                 default:
5618                         break;
5619                 }
5620         }
5621
5622         if (kind == RESET_KIND_SHUTDOWN)
5623                 tg3_ape_driver_state_change(tp, kind);
5624 }
5625
5626 /* tp->lock is held. */
5627 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
5628 {
5629         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
5630                 switch (kind) {
5631                 case RESET_KIND_INIT:
5632                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5633                                       DRV_STATE_START);
5634                         break;
5635
5636                 case RESET_KIND_SHUTDOWN:
5637                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5638                                       DRV_STATE_UNLOAD);
5639                         break;
5640
5641                 case RESET_KIND_SUSPEND:
5642                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5643                                       DRV_STATE_SUSPEND);
5644                         break;
5645
5646                 default:
5647                         break;
5648                 }
5649         }
5650 }
5651
5652 static int tg3_poll_fw(struct tg3 *tp)
5653 {
5654         int i;
5655         u32 val;
5656
5657         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
5658                 /* Wait up to 20ms for init done. */
5659                 for (i = 0; i < 200; i++) {
5660                         if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
5661                                 return 0;
5662                         udelay(100);
5663                 }
5664                 return -ENODEV;
5665         }
5666
5667         /* Wait for firmware initialization to complete. */
5668         for (i = 0; i < 100000; i++) {
5669                 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
5670                 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
5671                         break;
5672                 udelay(10);
5673         }
5674
5675         /* Chip might not be fitted with firmware.  Some Sun onboard
5676          * parts are configured like that.  So don't signal the timeout
5677          * of the above loop as an error, but do report the lack of
5678          * running firmware once.
5679          */
5680         if (i >= 100000 &&
5681             !(tp->tg3_flags2 & TG3_FLG2_NO_FWARE_REPORTED)) {
5682                 tp->tg3_flags2 |= TG3_FLG2_NO_FWARE_REPORTED;
5683
5684                 printk(KERN_INFO PFX "%s: No firmware running.\n",
5685                        tp->dev->name);
5686         }
5687
5688         return 0;
5689 }
5690
5691 /* Save PCI command register before chip reset */
5692 static void tg3_save_pci_state(struct tg3 *tp)
5693 {
5694         pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd);
5695 }
5696
5697 /* Restore PCI state after chip reset */
5698 static void tg3_restore_pci_state(struct tg3 *tp)
5699 {
5700         u32 val;
5701
5702         /* Re-enable indirect register accesses. */
5703         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
5704                                tp->misc_host_ctrl);
5705
5706         /* Set MAX PCI retry to zero. */
5707         val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
5708         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
5709             (tp->tg3_flags & TG3_FLAG_PCIX_MODE))
5710                 val |= PCISTATE_RETRY_SAME_DMA;
5711         /* Allow reads and writes to the APE register and memory space. */
5712         if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
5713                 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
5714                        PCISTATE_ALLOW_APE_SHMEM_WR;
5715         pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
5716
5717         pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
5718
5719         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)
5720                 pcie_set_readrq(tp->pdev, 4096);
5721         else {
5722                 pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
5723                                       tp->pci_cacheline_sz);
5724                 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
5725                                       tp->pci_lat_timer);
5726         }
5727
5728         /* Make sure PCI-X relaxed ordering bit is clear. */
5729         if (tp->pcix_cap) {
5730                 u16 pcix_cmd;
5731
5732                 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
5733                                      &pcix_cmd);
5734                 pcix_cmd &= ~PCI_X_CMD_ERO;
5735                 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
5736                                       pcix_cmd);
5737         }
5738
5739         if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
5740
5741                 /* Chip reset on 5780 will reset MSI enable bit,
5742                  * so need to restore it.
5743                  */
5744                 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
5745                         u16 ctrl;
5746
5747                         pci_read_config_word(tp->pdev,
5748                                              tp->msi_cap + PCI_MSI_FLAGS,
5749                                              &ctrl);
5750                         pci_write_config_word(tp->pdev,
5751                                               tp->msi_cap + PCI_MSI_FLAGS,
5752                                               ctrl | PCI_MSI_FLAGS_ENABLE);
5753                         val = tr32(MSGINT_MODE);
5754                         tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
5755                 }
5756         }
5757 }
5758
5759 static void tg3_stop_fw(struct tg3 *);
5760
5761 /* tp->lock is held. */
5762 static int tg3_chip_reset(struct tg3 *tp)
5763 {
5764         u32 val;
5765         void (*write_op)(struct tg3 *, u32, u32);
5766         int err;
5767
5768         tg3_nvram_lock(tp);
5769
5770         tg3_mdio_stop(tp);
5771
5772         tg3_ape_lock(tp, TG3_APE_LOCK_GRC);
5773
5774         /* No matching tg3_nvram_unlock() after this because
5775          * chip reset below will undo the nvram lock.
5776          */
5777         tp->nvram_lock_cnt = 0;
5778
5779         /* GRC_MISC_CFG core clock reset will clear the memory
5780          * enable bit in PCI register 4 and the MSI enable bit
5781          * on some chips, so we save relevant registers here.
5782          */
5783         tg3_save_pci_state(tp);
5784
5785         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
5786             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
5787             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
5788             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
5789             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
5790             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
5791                 tw32(GRC_FASTBOOT_PC, 0);
5792
5793         /*
5794          * We must avoid the readl() that normally takes place.
5795          * It locks machines, causes machine checks, and other
5796          * fun things.  So, temporarily disable the 5701
5797          * hardware workaround, while we do the reset.
5798          */
5799         write_op = tp->write32;
5800         if (write_op == tg3_write_flush_reg32)
5801                 tp->write32 = tg3_write32;
5802
5803         /* Prevent the irq handler from reading or writing PCI registers
5804          * during chip reset when the memory enable bit in the PCI command
5805          * register may be cleared.  The chip does not generate interrupt
5806          * at this time, but the irq handler may still be called due to irq
5807          * sharing or irqpoll.
5808          */
5809         tp->tg3_flags |= TG3_FLAG_CHIP_RESETTING;
5810         if (tp->hw_status) {
5811                 tp->hw_status->status = 0;
5812                 tp->hw_status->status_tag = 0;
5813         }
5814         tp->last_tag = 0;
5815         smp_mb();
5816         synchronize_irq(tp->pdev->irq);
5817
5818         /* do the reset */
5819         val = GRC_MISC_CFG_CORECLK_RESET;
5820
5821         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
5822                 if (tr32(0x7e2c) == 0x60) {
5823                         tw32(0x7e2c, 0x20);
5824                 }
5825                 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
5826                         tw32(GRC_MISC_CFG, (1 << 29));
5827                         val |= (1 << 29);
5828                 }
5829         }
5830
5831         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
5832                 tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
5833                 tw32(GRC_VCPU_EXT_CTRL,
5834                      tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
5835         }
5836
5837         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
5838                 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
5839         tw32(GRC_MISC_CFG, val);
5840
5841         /* restore 5701 hardware bug workaround write method */
5842         tp->write32 = write_op;
5843
5844         /* Unfortunately, we have to delay before the PCI read back.
5845          * Some 575X chips even will not respond to a PCI cfg access
5846          * when the reset command is given to the chip.
5847          *
5848          * How do these hardware designers expect things to work
5849          * properly if the PCI write is posted for a long period
5850          * of time?  It is always necessary to have some method by
5851          * which a register read back can occur to push the write
5852          * out which does the reset.
5853          *
5854          * For most tg3 variants the trick below was working.
5855          * Ho hum...
5856          */
5857         udelay(120);
5858
5859         /* Flush PCI posted writes.  The normal MMIO registers
5860          * are inaccessible at this time so this is the only
5861          * way to make this reliably (actually, this is no longer
5862          * the case, see above).  I tried to use indirect
5863          * register read/write but this upset some 5701 variants.
5864          */
5865         pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
5866
5867         udelay(120);
5868
5869         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
5870                 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) {
5871                         int i;
5872                         u32 cfg_val;
5873
5874                         /* Wait for link training to complete.  */
5875                         for (i = 0; i < 5000; i++)
5876                                 udelay(100);
5877
5878                         pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
5879                         pci_write_config_dword(tp->pdev, 0xc4,
5880                                                cfg_val | (1 << 15));
5881                 }
5882                 /* Set PCIE max payload size and clear error status.  */
5883                 pci_write_config_dword(tp->pdev, 0xd8, 0xf5000);
5884         }
5885
5886         tg3_restore_pci_state(tp);
5887
5888         tp->tg3_flags &= ~TG3_FLAG_CHIP_RESETTING;
5889
5890         val = 0;
5891         if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
5892                 val = tr32(MEMARB_MODE);
5893         tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
5894
5895         if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A3) {
5896                 tg3_stop_fw(tp);
5897                 tw32(0x5000, 0x400);
5898         }
5899
5900         tw32(GRC_MODE, tp->grc_mode);
5901
5902         if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0) {
5903                 val = tr32(0xc4);
5904
5905                 tw32(0xc4, val | (1 << 15));
5906         }
5907
5908         if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
5909             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
5910                 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
5911                 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0)
5912                         tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
5913                 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
5914         }
5915
5916         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
5917                 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
5918                 tw32_f(MAC_MODE, tp->mac_mode);
5919         } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
5920                 tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
5921                 tw32_f(MAC_MODE, tp->mac_mode);
5922         } else if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
5923                 tp->mac_mode &= (MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN);
5924                 if (tp->mac_mode & MAC_MODE_APE_TX_EN)
5925                         tp->mac_mode |= MAC_MODE_TDE_ENABLE;
5926                 tw32_f(MAC_MODE, tp->mac_mode);
5927         } else
5928                 tw32_f(MAC_MODE, 0);
5929         udelay(40);
5930
5931         tg3_mdio_start(tp);
5932
5933         tg3_ape_unlock(tp, TG3_APE_LOCK_GRC);
5934
5935         err = tg3_poll_fw(tp);
5936         if (err)
5937                 return err;
5938
5939         if ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
5940             tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
5941                 val = tr32(0x7c00);
5942
5943                 tw32(0x7c00, val | (1 << 25));
5944         }
5945
5946         /* Reprobe ASF enable state.  */
5947         tp->tg3_flags &= ~TG3_FLAG_ENABLE_ASF;
5948         tp->tg3_flags2 &= ~TG3_FLG2_ASF_NEW_HANDSHAKE;
5949         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
5950         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
5951                 u32 nic_cfg;
5952
5953                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
5954                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
5955                         tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
5956                         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
5957                                 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
5958                 }
5959         }
5960
5961         return 0;
5962 }
5963
5964 /* tp->lock is held. */
5965 static void tg3_stop_fw(struct tg3 *tp)
5966 {
5967         if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) &&
5968            !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)) {
5969                 u32 val;
5970
5971                 /* Wait for RX cpu to ACK the previous event. */
5972                 tg3_wait_for_event_ack(tp);
5973
5974                 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
5975                 val = tr32(GRC_RX_CPU_EVENT);
5976                 val |= GRC_RX_CPU_DRIVER_EVENT;
5977                 tw32(GRC_RX_CPU_EVENT, val);
5978
5979                 /* Wait for RX cpu to ACK this event. */
5980                 tg3_wait_for_event_ack(tp);
5981         }
5982 }
5983
5984 /* tp->lock is held. */
5985 static int tg3_halt(struct tg3 *tp, int kind, int silent)
5986 {
5987         int err;
5988
5989         tg3_stop_fw(tp);
5990
5991         tg3_write_sig_pre_reset(tp, kind);
5992
5993         tg3_abort_hw(tp, silent);
5994         err = tg3_chip_reset(tp);
5995
5996         tg3_write_sig_legacy(tp, kind);
5997         tg3_write_sig_post_reset(tp, kind);
5998
5999         if (err)
6000                 return err;
6001
6002         return 0;
6003 }
6004
6005 #define TG3_FW_RELEASE_MAJOR    0x0
6006 #define TG3_FW_RELASE_MINOR     0x0
6007 #define TG3_FW_RELEASE_FIX      0x0
6008 #define TG3_FW_START_ADDR       0x08000000
6009 #define TG3_FW_TEXT_ADDR        0x08000000
6010 #define TG3_FW_TEXT_LEN         0x9c0
6011 #define TG3_FW_RODATA_ADDR      0x080009c0
6012 #define TG3_FW_RODATA_LEN       0x60
6013 #define TG3_FW_DATA_ADDR        0x08000a40
6014 #define TG3_FW_DATA_LEN         0x20
6015 #define TG3_FW_SBSS_ADDR        0x08000a60
6016 #define TG3_FW_SBSS_LEN         0xc
6017 #define TG3_FW_BSS_ADDR         0x08000a70
6018 #define TG3_FW_BSS_LEN          0x10
6019
6020 static const u32 tg3FwText[(TG3_FW_TEXT_LEN / sizeof(u32)) + 1] = {
6021         0x00000000, 0x10000003, 0x00000000, 0x0000000d, 0x0000000d, 0x3c1d0800,
6022         0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100000, 0x0e000018, 0x00000000,
6023         0x0000000d, 0x3c1d0800, 0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100034,
6024         0x0e00021c, 0x00000000, 0x0000000d, 0x00000000, 0x00000000, 0x00000000,
6025         0x27bdffe0, 0x3c1cc000, 0xafbf0018, 0xaf80680c, 0x0e00004c, 0x241b2105,
6026         0x97850000, 0x97870002, 0x9782002c, 0x9783002e, 0x3c040800, 0x248409c0,
6027         0xafa00014, 0x00021400, 0x00621825, 0x00052c00, 0xafa30010, 0x8f860010,
6028         0x00e52825, 0x0e000060, 0x24070102, 0x3c02ac00, 0x34420100, 0x3c03ac01,
6029         0x34630100, 0xaf820490, 0x3c02ffff, 0xaf820494, 0xaf830498, 0xaf82049c,
6030         0x24020001, 0xaf825ce0, 0x0e00003f, 0xaf825d00, 0x0e000140, 0x00000000,
6031         0x8fbf0018, 0x03e00008, 0x27bd0020, 0x2402ffff, 0xaf825404, 0x8f835400,
6032         0x34630400, 0xaf835400, 0xaf825404, 0x3c020800, 0x24420034, 0xaf82541c,
6033         0x03e00008, 0xaf805400, 0x00000000, 0x00000000, 0x3c020800, 0x34423000,
6034         0x3c030800, 0x34633000, 0x3c040800, 0x348437ff, 0x3c010800, 0xac220a64,
6035         0x24020040, 0x3c010800, 0xac220a68, 0x3c010800, 0xac200a60, 0xac600000,
6036         0x24630004, 0x0083102b, 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000,
6037         0x00804821, 0x8faa0010, 0x3c020800, 0x8c420a60, 0x3c040800, 0x8c840a68,
6038         0x8fab0014, 0x24430001, 0x0044102b, 0x3c010800, 0xac230a60, 0x14400003,
6039         0x00004021, 0x3c010800, 0xac200a60, 0x3c020800, 0x8c420a60, 0x3c030800,
6040         0x8c630a64, 0x91240000, 0x00021140, 0x00431021, 0x00481021, 0x25080001,
6041         0xa0440000, 0x29020008, 0x1440fff4, 0x25290001, 0x3c020800, 0x8c420a60,
6042         0x3c030800, 0x8c630a64, 0x8f84680c, 0x00021140, 0x00431021, 0xac440008,
6043         0xac45000c, 0xac460010, 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c,
6044         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
6045         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
6046         0, 0, 0, 0, 0, 0,
6047         0x02000008, 0x00000000, 0x0a0001e3, 0x3c0a0001, 0x0a0001e3, 0x3c0a0002,
6048         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
6049         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
6050         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
6051         0x0a0001e3, 0x3c0a0007, 0x0a0001e3, 0x3c0a0008, 0x0a0001e3, 0x3c0a0009,
6052         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a000b,
6053         0x0a0001e3, 0x3c0a000c, 0x0a0001e3, 0x3c0a000d, 0x0a0001e3, 0x00000000,
6054         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a000e, 0x0a0001e3, 0x00000000,
6055         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
6056         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
6057         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a0013, 0x0a0001e3, 0x3c0a0014,
6058         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
6059         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
6060         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
6061         0x27bdffe0, 0x00001821, 0x00001021, 0xafbf0018, 0xafb10014, 0xafb00010,
6062         0x3c010800, 0x00220821, 0xac200a70, 0x3c010800, 0x00220821, 0xac200a74,
6063         0x3c010800, 0x00220821, 0xac200a78, 0x24630001, 0x1860fff5, 0x2442000c,
6064         0x24110001, 0x8f906810, 0x32020004, 0x14400005, 0x24040001, 0x3c020800,
6065         0x8c420a78, 0x18400003, 0x00002021, 0x0e000182, 0x00000000, 0x32020001,
6066         0x10400003, 0x00000000, 0x0e000169, 0x00000000, 0x0a000153, 0xaf915028,
6067         0x8fbf0018, 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0020, 0x3c050800,
6068         0x8ca50a70, 0x3c060800, 0x8cc60a80, 0x3c070800, 0x8ce70a78, 0x27bdffe0,
6069         0x3c040800, 0x248409d0, 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014,
6070         0x0e00017b, 0x00002021, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x24020001,
6071         0x8f836810, 0x00821004, 0x00021027, 0x00621824, 0x03e00008, 0xaf836810,
6072         0x27bdffd8, 0xafbf0024, 0x1080002e, 0xafb00020, 0x8f825cec, 0xafa20018,
6073         0x8f825cec, 0x3c100800, 0x26100a78, 0xafa2001c, 0x34028000, 0xaf825cec,
6074         0x8e020000, 0x18400016, 0x00000000, 0x3c020800, 0x94420a74, 0x8fa3001c,
6075         0x000221c0, 0xac830004, 0x8fa2001c, 0x3c010800, 0x0e000201, 0xac220a74,
6076         0x10400005, 0x00000000, 0x8e020000, 0x24420001, 0x0a0001df, 0xae020000,
6077         0x3c020800, 0x8c420a70, 0x00021c02, 0x000321c0, 0x0a0001c5, 0xafa2001c,
6078         0x0e000201, 0x00000000, 0x1040001f, 0x00000000, 0x8e020000, 0x8fa3001c,
6079         0x24420001, 0x3c010800, 0xac230a70, 0x3c010800, 0xac230a74, 0x0a0001df,
6080         0xae020000, 0x3c100800, 0x26100a78, 0x8e020000, 0x18400028, 0x00000000,
6081         0x0e000201, 0x00000000, 0x14400024, 0x00000000, 0x8e020000, 0x3c030800,
6082         0x8c630a70, 0x2442ffff, 0xafa3001c, 0x18400006, 0xae020000, 0x00031402,
6083         0x000221c0, 0x8c820004, 0x3c010800, 0xac220a70, 0x97a2001e, 0x2442ff00,
6084         0x2c420300, 0x1440000b, 0x24024000, 0x3c040800, 0x248409dc, 0xafa00010,
6085         0xafa00014, 0x8fa6001c, 0x24050008, 0x0e000060, 0x00003821, 0x0a0001df,
6086         0x00000000, 0xaf825cf8, 0x3c020800, 0x8c420a40, 0x8fa3001c, 0x24420001,
6087         0xaf835cf8, 0x3c010800, 0xac220a40, 0x8fbf0024, 0x8fb00020, 0x03e00008,
6088         0x27bd0028, 0x27bdffe0, 0x3c040800, 0x248409e8, 0x00002821, 0x00003021,
6089         0x00003821, 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014, 0x8fbf0018,
6090         0x03e00008, 0x27bd0020, 0x8f82680c, 0x8f85680c, 0x00021827, 0x0003182b,
6091         0x00031823, 0x00431024, 0x00441021, 0x00a2282b, 0x10a00006, 0x00000000,
6092         0x00401821, 0x8f82680c, 0x0043102b, 0x1440fffd, 0x00000000, 0x03e00008,
6093         0x00000000, 0x3c040800, 0x8c840000, 0x3c030800, 0x8c630a40, 0x0064102b,
6094         0x54400002, 0x00831023, 0x00641023, 0x2c420008, 0x03e00008, 0x38420001,
6095         0x27bdffe0, 0x00802821, 0x3c040800, 0x24840a00, 0x00003021, 0x00003821,
6096         0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014, 0x0a000216, 0x00000000,
6097         0x8fbf0018, 0x03e00008, 0x27bd0020, 0x00000000, 0x27bdffe0, 0x3c1cc000,
6098         0xafbf0018, 0x0e00004c, 0xaf80680c, 0x3c040800, 0x24840a10, 0x03802821,
6099         0x00003021, 0x00003821, 0xafa00010, 0x0e000060, 0xafa00014, 0x2402ffff,
6100         0xaf825404, 0x3c0200aa, 0x0e000234, 0xaf825434, 0x8fbf0018, 0x03e00008,
6101         0x27bd0020, 0x00000000, 0x00000000, 0x00000000, 0x27bdffe8, 0xafb00010,
6102         0x24100001, 0xafbf0014, 0x3c01c003, 0xac200000, 0x8f826810, 0x30422000,
6103         0x10400003, 0x00000000, 0x0e000246, 0x00000000, 0x0a00023a, 0xaf905428,
6104         0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x27bdfff8, 0x8f845d0c,
6105         0x3c0200ff, 0x3c030800, 0x8c630a50, 0x3442fff8, 0x00821024, 0x1043001e,
6106         0x3c0500ff, 0x34a5fff8, 0x3c06c003, 0x3c074000, 0x00851824, 0x8c620010,
6107         0x3c010800, 0xac230a50, 0x30420008, 0x10400005, 0x00871025, 0x8cc20000,
6108         0x24420001, 0xacc20000, 0x00871025, 0xaf825d0c, 0x8fa20000, 0x24420001,
6109         0xafa20000, 0x8fa20000, 0x8fa20000, 0x24420001, 0xafa20000, 0x8fa20000,
6110         0x8f845d0c, 0x3c030800, 0x8c630a50, 0x00851024, 0x1443ffe8, 0x00851824,
6111         0x27bd0008, 0x03e00008, 0x00000000, 0x00000000, 0x00000000
6112 };
6113
6114 static const u32 tg3FwRodata[(TG3_FW_RODATA_LEN / sizeof(u32)) + 1] = {
6115         0x35373031, 0x726c7341, 0x00000000, 0x00000000, 0x53774576, 0x656e7430,
6116         0x00000000, 0x726c7045, 0x76656e74, 0x31000000, 0x556e6b6e, 0x45766e74,
6117         0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x66617461, 0x6c457272,
6118         0x00000000, 0x00000000, 0x4d61696e, 0x43707542, 0x00000000, 0x00000000,
6119         0x00000000
6120 };
6121
6122 #if 0 /* All zeros, don't eat up space with it. */
6123 u32 tg3FwData[(TG3_FW_DATA_LEN / sizeof(u32)) + 1] = {
6124         0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
6125         0x00000000, 0x00000000, 0x00000000, 0x00000000
6126 };
6127 #endif
6128
6129 #define RX_CPU_SCRATCH_BASE     0x30000
6130 #define RX_CPU_SCRATCH_SIZE     0x04000
6131 #define TX_CPU_SCRATCH_BASE     0x34000
6132 #define TX_CPU_SCRATCH_SIZE     0x04000
6133
6134 /* tp->lock is held. */
6135 static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
6136 {
6137         int i;
6138
6139         BUG_ON(offset == TX_CPU_BASE &&
6140             (tp->tg3_flags2 & TG3_FLG2_5705_PLUS));
6141
6142         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
6143                 u32 val = tr32(GRC_VCPU_EXT_CTRL);
6144
6145                 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
6146                 return 0;
6147         }
6148         if (offset == RX_CPU_BASE) {
6149                 for (i = 0; i < 10000; i++) {
6150                         tw32(offset + CPU_STATE, 0xffffffff);
6151                         tw32(offset + CPU_MODE,  CPU_MODE_HALT);
6152                         if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
6153                                 break;
6154                 }
6155
6156                 tw32(offset + CPU_STATE, 0xffffffff);
6157                 tw32_f(offset + CPU_MODE,  CPU_MODE_HALT);
6158                 udelay(10);
6159         } else {
6160                 for (i = 0; i < 10000; i++) {
6161                         tw32(offset + CPU_STATE, 0xffffffff);
6162                         tw32(offset + CPU_MODE,  CPU_MODE_HALT);
6163                         if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
6164                                 break;
6165                 }
6166         }
6167
6168         if (i >= 10000) {
6169                 printk(KERN_ERR PFX "tg3_reset_cpu timed out for %s, "
6170                        "and %s CPU\n",
6171                        tp->dev->name,
6172                        (offset == RX_CPU_BASE ? "RX" : "TX"));
6173                 return -ENODEV;
6174         }
6175
6176         /* Clear firmware's nvram arbitration. */
6177         if (tp->tg3_flags & TG3_FLAG_NVRAM)
6178                 tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
6179         return 0;
6180 }
6181
6182 struct fw_info {
6183         unsigned int text_base;
6184         unsigned int text_len;
6185         const u32 *text_data;
6186         unsigned int rodata_base;
6187         unsigned int rodata_len;
6188         const u32 *rodata_data;
6189         unsigned int data_base;
6190         unsigned int data_len;
6191         const u32 *data_data;
6192 };
6193
6194 /* tp->lock is held. */
6195 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base, u32 cpu_scratch_base,
6196                                  int cpu_scratch_size, struct fw_info *info)
6197 {
6198         int err, lock_err, i;
6199         void (*write_op)(struct tg3 *, u32, u32);
6200
6201         if (cpu_base == TX_CPU_BASE &&
6202             (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6203                 printk(KERN_ERR PFX "tg3_load_firmware_cpu: Trying to load "
6204                        "TX cpu firmware on %s which is 5705.\n",
6205                        tp->dev->name);
6206                 return -EINVAL;
6207         }
6208
6209         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
6210                 write_op = tg3_write_mem;
6211         else
6212                 write_op = tg3_write_indirect_reg32;
6213
6214         /* It is possible that bootcode is still loading at this point.
6215          * Get the nvram lock first before halting the cpu.
6216          */
6217         lock_err = tg3_nvram_lock(tp);
6218         err = tg3_halt_cpu(tp, cpu_base);
6219         if (!lock_err)
6220                 tg3_nvram_unlock(tp);
6221         if (err)
6222                 goto out;
6223
6224         for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
6225                 write_op(tp, cpu_scratch_base + i, 0);
6226         tw32(cpu_base + CPU_STATE, 0xffffffff);
6227         tw32(cpu_base + CPU_MODE, tr32(cpu_base+CPU_MODE)|CPU_MODE_HALT);
6228         for (i = 0; i < (info->text_len / sizeof(u32)); i++)
6229                 write_op(tp, (cpu_scratch_base +
6230                               (info->text_base & 0xffff) +
6231                               (i * sizeof(u32))),
6232                          (info->text_data ?
6233                           info->text_data[i] : 0));
6234         for (i = 0; i < (info->rodata_len / sizeof(u32)); i++)
6235                 write_op(tp, (cpu_scratch_base +
6236                               (info->rodata_base & 0xffff) +
6237                               (i * sizeof(u32))),
6238                          (info->rodata_data ?
6239                           info->rodata_data[i] : 0));
6240         for (i = 0; i < (info->data_len / sizeof(u32)); i++)
6241                 write_op(tp, (cpu_scratch_base +
6242                               (info->data_base & 0xffff) +
6243                               (i * sizeof(u32))),
6244                          (info->data_data ?
6245                           info->data_data[i] : 0));
6246
6247         err = 0;
6248
6249 out:
6250         return err;
6251 }
6252
6253 /* tp->lock is held. */
6254 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
6255 {
6256         struct fw_info info;
6257         int err, i;
6258
6259         info.text_base = TG3_FW_TEXT_ADDR;
6260         info.text_len = TG3_FW_TEXT_LEN;
6261         info.text_data = &tg3FwText[0];
6262         info.rodata_base = TG3_FW_RODATA_ADDR;
6263         info.rodata_len = TG3_FW_RODATA_LEN;
6264         info.rodata_data = &tg3FwRodata[0];
6265         info.data_base = TG3_FW_DATA_ADDR;
6266         info.data_len = TG3_FW_DATA_LEN;
6267         info.data_data = NULL;
6268
6269         err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
6270                                     RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
6271                                     &info);
6272         if (err)
6273                 return err;
6274
6275         err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
6276                                     TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
6277                                     &info);
6278         if (err)
6279                 return err;
6280
6281         /* Now startup only the RX cpu. */
6282         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
6283         tw32_f(RX_CPU_BASE + CPU_PC,    TG3_FW_TEXT_ADDR);
6284
6285         for (i = 0; i < 5; i++) {
6286                 if (tr32(RX_CPU_BASE + CPU_PC) == TG3_FW_TEXT_ADDR)
6287                         break;
6288                 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
6289                 tw32(RX_CPU_BASE + CPU_MODE,  CPU_MODE_HALT);
6290                 tw32_f(RX_CPU_BASE + CPU_PC,    TG3_FW_TEXT_ADDR);
6291                 udelay(1000);
6292         }
6293         if (i >= 5) {
6294                 printk(KERN_ERR PFX "tg3_load_firmware fails for %s "
6295                        "to set RX CPU PC, is %08x should be %08x\n",
6296                        tp->dev->name, tr32(RX_CPU_BASE + CPU_PC),
6297                        TG3_FW_TEXT_ADDR);
6298                 return -ENODEV;
6299         }
6300         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
6301         tw32_f(RX_CPU_BASE + CPU_MODE,  0x00000000);
6302
6303         return 0;
6304 }
6305
6306
6307 #define TG3_TSO_FW_RELEASE_MAJOR        0x1
6308 #define TG3_TSO_FW_RELASE_MINOR         0x6
6309 #define TG3_TSO_FW_RELEASE_FIX          0x0
6310 #define TG3_TSO_FW_START_ADDR           0x08000000
6311 #define TG3_TSO_FW_TEXT_ADDR            0x08000000
6312 #define TG3_TSO_FW_TEXT_LEN             0x1aa0
6313 #define TG3_TSO_FW_RODATA_ADDR          0x08001aa0
6314 #define TG3_TSO_FW_RODATA_LEN           0x60
6315 #define TG3_TSO_FW_DATA_ADDR            0x08001b20
6316 #define TG3_TSO_FW_DATA_LEN             0x30
6317 #define TG3_TSO_FW_SBSS_ADDR            0x08001b50
6318 #define TG3_TSO_FW_SBSS_LEN             0x2c
6319 #define TG3_TSO_FW_BSS_ADDR             0x08001b80
6320 #define TG3_TSO_FW_BSS_LEN              0x894
6321
6322 static const u32 tg3TsoFwText[(TG3_TSO_FW_TEXT_LEN / 4) + 1] = {
6323         0x0e000003, 0x00000000, 0x08001b24, 0x00000000, 0x10000003, 0x00000000,
6324         0x0000000d, 0x0000000d, 0x3c1d0800, 0x37bd4000, 0x03a0f021, 0x3c100800,
6325         0x26100000, 0x0e000010, 0x00000000, 0x0000000d, 0x27bdffe0, 0x3c04fefe,
6326         0xafbf0018, 0x0e0005d8, 0x34840002, 0x0e000668, 0x00000000, 0x3c030800,
6327         0x90631b68, 0x24020002, 0x3c040800, 0x24841aac, 0x14620003, 0x24050001,
6328         0x3c040800, 0x24841aa0, 0x24060006, 0x00003821, 0xafa00010, 0x0e00067c,
6329         0xafa00014, 0x8f625c50, 0x34420001, 0xaf625c50, 0x8f625c90, 0x34420001,
6330         0xaf625c90, 0x2402ffff, 0x0e000034, 0xaf625404, 0x8fbf0018, 0x03e00008,
6331         0x27bd0020, 0x00000000, 0x00000000, 0x00000000, 0x27bdffe0, 0xafbf001c,
6332         0xafb20018, 0xafb10014, 0x0e00005b, 0xafb00010, 0x24120002, 0x24110001,
6333         0x8f706820, 0x32020100, 0x10400003, 0x00000000, 0x0e0000bb, 0x00000000,
6334         0x8f706820, 0x32022000, 0x10400004, 0x32020001, 0x0e0001f0, 0x24040001,
6335         0x32020001, 0x10400003, 0x00000000, 0x0e0000a3, 0x00000000, 0x3c020800,
6336         0x90421b98, 0x14520003, 0x00000000, 0x0e0004c0, 0x00000000, 0x0a00003c,
6337         0xaf715028, 0x8fbf001c, 0x8fb20018, 0x8fb10014, 0x8fb00010, 0x03e00008,
6338         0x27bd0020, 0x27bdffe0, 0x3c040800, 0x24841ac0, 0x00002821, 0x00003021,
6339         0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014, 0x3c040800,
6340         0x248423d8, 0xa4800000, 0x3c010800, 0xa0201b98, 0x3c010800, 0xac201b9c,
6341         0x3c010800, 0xac201ba0, 0x3c010800, 0xac201ba4, 0x3c010800, 0xac201bac,
6342         0x3c010800, 0xac201bb8, 0x3c010800, 0xac201bbc, 0x8f624434, 0x3c010800,
6343         0xac221b88, 0x8f624438, 0x3c010800, 0xac221b8c, 0x8f624410, 0xac80f7a8,
6344         0x3c010800, 0xac201b84, 0x3c010800, 0xac2023e0, 0x3c010800, 0xac2023c8,
6345         0x3c010800, 0xac2023cc, 0x3c010800, 0xac202400, 0x3c010800, 0xac221b90,
6346         0x8f620068, 0x24030007, 0x00021702, 0x10430005, 0x00000000, 0x8f620068,
6347         0x00021702, 0x14400004, 0x24020001, 0x3c010800, 0x0a000097, 0xac20240c,
6348         0xac820034, 0x3c040800, 0x24841acc, 0x3c050800, 0x8ca5240c, 0x00003021,
6349         0x00003821, 0xafa00010, 0x0e00067c, 0xafa00014, 0x8fbf0018, 0x03e00008,
6350         0x27bd0020, 0x27bdffe0, 0x3c040800, 0x24841ad8, 0x00002821, 0x00003021,
6351         0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014, 0x0e00005b,
6352         0x00000000, 0x0e0000b4, 0x00002021, 0x8fbf0018, 0x03e00008, 0x27bd0020,
6353         0x24020001, 0x8f636820, 0x00821004, 0x00021027, 0x00621824, 0x03e00008,
6354         0xaf636820, 0x27bdffd0, 0xafbf002c, 0xafb60028, 0xafb50024, 0xafb40020,
6355         0xafb3001c, 0xafb20018, 0xafb10014, 0xafb00010, 0x8f675c5c, 0x3c030800,
6356         0x24631bbc, 0x8c620000, 0x14470005, 0x3c0200ff, 0x3c020800, 0x90421b98,
6357         0x14400119, 0x3c0200ff, 0x3442fff8, 0x00e28824, 0xac670000, 0x00111902,
6358         0x306300ff, 0x30e20003, 0x000211c0, 0x00622825, 0x00a04021, 0x00071602,
6359         0x3c030800, 0x90631b98, 0x3044000f, 0x14600036, 0x00804821, 0x24020001,
6360         0x3c010800, 0xa0221b98, 0x00051100, 0x00821025, 0x3c010800, 0xac201b9c,
6361         0x3c010800, 0xac201ba0, 0x3c010800, 0xac201ba4, 0x3c010800, 0xac201bac,
6362         0x3c010800, 0xac201bb8, 0x3c010800, 0xac201bb0, 0x3c010800, 0xac201bb4,
6363         0x3c010800, 0xa42223d8, 0x9622000c, 0x30437fff, 0x3c010800, 0xa4222410,
6364         0x30428000, 0x3c010800, 0xa4231bc6, 0x10400005, 0x24020001, 0x3c010800,
6365         0xac2223f4, 0x0a000102, 0x2406003e, 0x24060036, 0x3c010800, 0xac2023f4,
6366         0x9622000a, 0x3c030800, 0x94631bc6, 0x3c010800, 0xac2023f0, 0x3c010800,
6367         0xac2023f8, 0x00021302, 0x00021080, 0x00c21021, 0x00621821, 0x3c010800,
6368         0xa42223d0, 0x3c010800, 0x0a000115, 0xa4231b96, 0x9622000c, 0x3c010800,
6369         0xa42223ec, 0x3c040800, 0x24841b9c, 0x8c820000, 0x00021100, 0x3c010800,
6370         0x00220821, 0xac311bc8, 0x8c820000, 0x00021100, 0x3c010800, 0x00220821,
6371         0xac271bcc, 0x8c820000, 0x25030001, 0x306601ff, 0x00021100, 0x3c010800,
6372         0x00220821, 0xac261bd0, 0x8c820000, 0x00021100, 0x3c010800, 0x00220821,
6373         0xac291bd4, 0x96230008, 0x3c020800, 0x8c421bac, 0x00432821, 0x3c010800,
6374         0xac251bac, 0x9622000a, 0x30420004, 0x14400018, 0x00061100, 0x8f630c14,
6375         0x3063000f, 0x2c620002, 0x1440000b, 0x3c02c000, 0x8f630c14, 0x3c020800,
6376         0x8c421b40, 0x3063000f, 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002,
6377         0x1040fff7, 0x3c02c000, 0x00e21825, 0xaf635c5c, 0x8f625c50, 0x30420002,
6378         0x10400014, 0x00000000, 0x0a000147, 0x00000000, 0x3c030800, 0x8c631b80,
6379         0x3c040800, 0x94841b94, 0x01221025, 0x3c010800, 0xa42223da, 0x24020001,
6380         0x3c010800, 0xac221bb8, 0x24630001, 0x0085202a, 0x3c010800, 0x10800003,
6381         0xac231b80, 0x3c010800, 0xa4251b94, 0x3c060800, 0x24c61b9c, 0x8cc20000,
6382         0x24420001, 0xacc20000, 0x28420080, 0x14400005, 0x00000000, 0x0e000656,
6383         0x24040002, 0x0a0001e6, 0x00000000, 0x3c020800, 0x8c421bb8, 0x10400078,
6384         0x24020001, 0x3c050800, 0x90a51b98, 0x14a20072, 0x00000000, 0x3c150800,
6385         0x96b51b96, 0x3c040800, 0x8c841bac, 0x32a3ffff, 0x0083102a, 0x1440006c,
6386         0x00000000, 0x14830003, 0x00000000, 0x3c010800, 0xac2523f0, 0x1060005c,
6387         0x00009021, 0x24d60004, 0x0060a021, 0x24d30014, 0x8ec20000, 0x00028100,
6388         0x3c110800, 0x02308821, 0x0e000625, 0x8e311bc8, 0x00402821, 0x10a00054,
6389         0x00000000, 0x9628000a, 0x31020040, 0x10400005, 0x2407180c, 0x8e22000c,
6390         0x2407188c, 0x00021400, 0xaca20018, 0x3c030800, 0x00701821, 0x8c631bd0,
6391         0x3c020800, 0x00501021, 0x8c421bd4, 0x00031d00, 0x00021400, 0x00621825,
6392         0xaca30014, 0x8ec30004, 0x96220008, 0x00432023, 0x3242ffff, 0x3083ffff,
6393         0x00431021, 0x0282102a, 0x14400002, 0x02b23023, 0x00803021, 0x8e620000,
6394         0x30c4ffff, 0x00441021, 0xae620000, 0x8e220000, 0xaca20000, 0x8e220004,
6395         0x8e63fff4, 0x00431021, 0xaca20004, 0xa4a6000e, 0x8e62fff4, 0x00441021,
6396         0xae62fff4, 0x96230008, 0x0043102a, 0x14400005, 0x02469021, 0x8e62fff0,
6397         0xae60fff4, 0x24420001, 0xae62fff0, 0xaca00008, 0x3242ffff, 0x14540008,
6398         0x24020305, 0x31020080, 0x54400001, 0x34e70010, 0x24020905, 0xa4a2000c,
6399         0x0a0001cb, 0x34e70020, 0xa4a2000c, 0x3c020800, 0x8c4223f0, 0x10400003,
6400         0x3c024b65, 0x0a0001d3, 0x34427654, 0x3c02b49a, 0x344289ab, 0xaca2001c,
6401         0x30e2ffff, 0xaca20010, 0x0e0005a2, 0x00a02021, 0x3242ffff, 0x0054102b,
6402         0x1440ffa9, 0x00000000, 0x24020002, 0x3c010800, 0x0a0001e6, 0xa0221b98,
6403         0x8ec2083c, 0x24420001, 0x0a0001e6, 0xaec2083c, 0x0e0004c0, 0x00000000,
6404         0x8fbf002c, 0x8fb60028, 0x8fb50024, 0x8fb40020, 0x8fb3001c, 0x8fb20018,
6405         0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0030, 0x27bdffd0, 0xafbf0028,
6406         0xafb30024, 0xafb20020, 0xafb1001c, 0xafb00018, 0x8f725c9c, 0x3c0200ff,
6407         0x3442fff8, 0x3c070800, 0x24e71bb4, 0x02428824, 0x9623000e, 0x8ce20000,
6408         0x00431021, 0xace20000, 0x8e220010, 0x30420020, 0x14400011, 0x00809821,
6409         0x0e00063b, 0x02202021, 0x3c02c000, 0x02421825, 0xaf635c9c, 0x8f625c90,
6410         0x30420002, 0x1040011e, 0x00000000, 0xaf635c9c, 0x8f625c90, 0x30420002,
6411         0x10400119, 0x00000000, 0x0a00020d, 0x00000000, 0x8e240008, 0x8e230014,
6412         0x00041402, 0x000231c0, 0x00031502, 0x304201ff, 0x2442ffff, 0x3042007f,
6413         0x00031942, 0x30637800, 0x00021100, 0x24424000, 0x00624821, 0x9522000a,
6414         0x3084ffff, 0x30420008, 0x104000b0, 0x000429c0, 0x3c020800, 0x8c422400,
6415         0x14400024, 0x24c50008, 0x94c20014, 0x3c010800, 0xa42223d0, 0x8cc40010,
6416         0x00041402, 0x3c010800, 0xa42223d2, 0x3c010800, 0xa42423d4, 0x94c2000e,
6417         0x3083ffff, 0x00431023, 0x3c010800, 0xac222408, 0x94c2001a, 0x3c010800,
6418         0xac262400, 0x3c010800, 0xac322404, 0x3c010800, 0xac2223fc, 0x3c02c000,
6419         0x02421825, 0xaf635c9c, 0x8f625c90, 0x30420002, 0x104000e5, 0x00000000,
6420         0xaf635c9c, 0x8f625c90, 0x30420002, 0x104000e0, 0x00000000, 0x0a000246,
6421         0x00000000, 0x94c2000e, 0x3c030800, 0x946323d4, 0x00434023, 0x3103ffff,
6422         0x2c620008, 0x1040001c, 0x00000000, 0x94c20014, 0x24420028, 0x00a22821,
6423         0x00031042, 0x1840000b, 0x00002021, 0x24e60848, 0x00403821, 0x94a30000,
6424         0x8cc20000, 0x24840001, 0x00431021, 0xacc20000, 0x0087102a, 0x1440fff9,
6425         0x24a50002, 0x31020001, 0x1040001f, 0x3c024000, 0x3c040800, 0x248423fc,
6426         0xa0a00001, 0x94a30000, 0x8c820000, 0x00431021, 0x0a000285, 0xac820000,
6427         0x8f626800, 0x3c030010, 0x00431024, 0x10400009, 0x00000000, 0x94c2001a,
6428         0x3c030800, 0x8c6323fc, 0x00431021, 0x3c010800, 0xac2223fc, 0x0a000286,
6429         0x3c024000, 0x94c2001a, 0x94c4001c, 0x3c030800, 0x8c6323fc, 0x00441023,
6430         0x00621821, 0x3c010800, 0xac2323fc, 0x3c024000, 0x02421825, 0xaf635c9c,
6431         0x8f625c90, 0x30420002, 0x1440fffc, 0x00000000, 0x9522000a, 0x30420010,
6432         0x1040009b, 0x00000000, 0x3c030800, 0x946323d4, 0x3c070800, 0x24e72400,
6433         0x8ce40000, 0x8f626800, 0x24630030, 0x00832821, 0x3c030010, 0x00431024,
6434         0x1440000a, 0x00000000, 0x94a20004, 0x3c040800, 0x8c842408, 0x3c030800,
6435         0x8c6323fc, 0x00441023, 0x00621821, 0x3c010800, 0xac2323fc, 0x3c040800,
6436         0x8c8423fc, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402, 0x00822021,
6437         0x00041027, 0xa4a20006, 0x3c030800, 0x8c632404, 0x3c0200ff, 0x3442fff8,
6438         0x00628824, 0x96220008, 0x24050001, 0x24034000, 0x000231c0, 0x00801021,
6439         0xa4c2001a, 0xa4c0001c, 0xace00000, 0x3c010800, 0xac251b60, 0xaf635cb8,
6440         0x8f625cb0, 0x30420002, 0x10400003, 0x00000000, 0x3c010800, 0xac201b60,
6441         0x8e220008, 0xaf625cb8, 0x8f625cb0, 0x30420002, 0x10400003, 0x00000000,
6442         0x3c010800, 0xac201b60, 0x3c020800, 0x8c421b60, 0x1040ffec, 0x00000000,
6443         0x3c040800, 0x0e00063b, 0x8c842404, 0x0a00032a, 0x00000000, 0x3c030800,
6444         0x90631b98, 0x24020002, 0x14620003, 0x3c034b65, 0x0a0002e1, 0x00008021,
6445         0x8e22001c, 0x34637654, 0x10430002, 0x24100002, 0x24100001, 0x00c02021,
6446         0x0e000350, 0x02003021, 0x24020003, 0x3c010800, 0xa0221b98, 0x24020002,
6447         0x1202000a, 0x24020001, 0x3c030800, 0x8c6323f0, 0x10620006, 0x00000000,
6448         0x3c020800, 0x944223d8, 0x00021400, 0x0a00031f, 0xae220014, 0x3c040800,
6449         0x248423da, 0x94820000, 0x00021400, 0xae220014, 0x3c020800, 0x8c421bbc,
6450         0x3c03c000, 0x3c010800, 0xa0201b98, 0x00431025, 0xaf625c5c, 0x8f625c50,
6451         0x30420002, 0x10400009, 0x00000000, 0x2484f7e2, 0x8c820000, 0x00431025,
6452         0xaf625c5c, 0x8f625c50, 0x30420002, 0x1440fffa, 0x00000000, 0x3c020800,
6453         0x24421b84, 0x8c430000, 0x24630001, 0xac430000, 0x8f630c14, 0x3063000f,
6454         0x2c620002, 0x1440000c, 0x3c024000, 0x8f630c14, 0x3c020800, 0x8c421b40,
6455         0x3063000f, 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002, 0x1040fff7,
6456         0x00000000, 0x3c024000, 0x02421825, 0xaf635c9c, 0x8f625c90, 0x30420002,
6457         0x1440fffc, 0x00000000, 0x12600003, 0x00000000, 0x0e0004c0, 0x00000000,
6458         0x8fbf0028, 0x8fb30024, 0x8fb20020, 0x8fb1001c, 0x8fb00018, 0x03e00008,
6459         0x27bd0030, 0x8f634450, 0x3c040800, 0x24841b88, 0x8c820000, 0x00031c02,
6460         0x0043102b, 0x14400007, 0x3c038000, 0x8c840004, 0x8f624450, 0x00021c02,
6461         0x0083102b, 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024,
6462         0x1440fffd, 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3c024000,
6463         0x00822025, 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00000000,
6464         0x03e00008, 0x00000000, 0x27bdffe0, 0x00805821, 0x14c00011, 0x256e0008,
6465         0x3c020800, 0x8c4223f4, 0x10400007, 0x24020016, 0x3c010800, 0xa42223d2,
6466         0x2402002a, 0x3c010800, 0x0a000364, 0xa42223d4, 0x8d670010, 0x00071402,
6467         0x3c010800, 0xa42223d2, 0x3c010800, 0xa42723d4, 0x3c040800, 0x948423d4,
6468         0x3c030800, 0x946323d2, 0x95cf0006, 0x3c020800, 0x944223d0, 0x00832023,
6469         0x01e2c023, 0x3065ffff, 0x24a20028, 0x01c24821, 0x3082ffff, 0x14c0001a,
6470         0x01226021, 0x9582000c, 0x3042003f, 0x3c010800, 0xa42223d6, 0x95820004,
6471         0x95830006, 0x3c010800, 0xac2023e4, 0x3c010800, 0xac2023e8, 0x00021400,
6472         0x00431025, 0x3c010800, 0xac221bc0, 0x95220004, 0x3c010800, 0xa4221bc4,
6473         0x95230002, 0x01e51023, 0x0043102a, 0x10400010, 0x24020001, 0x3c010800,
6474         0x0a000398, 0xac2223f8, 0x3c030800, 0x8c6323e8, 0x3c020800, 0x94421bc4,
6475         0x00431021, 0xa5220004, 0x3c020800, 0x94421bc0, 0xa5820004, 0x3c020800,
6476         0x8c421bc0, 0xa5820006, 0x3c020800, 0x8c4223f0, 0x3c0d0800, 0x8dad23e4,
6477         0x3c0a0800, 0x144000e5, 0x8d4a23e8, 0x3c020800, 0x94421bc4, 0x004a1821,
6478         0x3063ffff, 0x0062182b, 0x24020002, 0x10c2000d, 0x01435023, 0x3c020800,
6479         0x944223d6, 0x30420009, 0x10400008, 0x00000000, 0x9582000c, 0x3042fff6,
6480         0xa582000c, 0x3c020800, 0x944223d6, 0x30420009, 0x01a26823, 0x3c020800,
6481         0x8c4223f8, 0x1040004a, 0x01203821, 0x3c020800, 0x944223d2, 0x00004021,
6482         0xa520000a, 0x01e21023, 0xa5220002, 0x3082ffff, 0x00021042, 0x18400008,
6483         0x00003021, 0x00401821, 0x94e20000, 0x25080001, 0x00c23021, 0x0103102a,
6484         0x1440fffb, 0x24e70002, 0x00061c02, 0x30c2ffff, 0x00623021, 0x00061402,
6485         0x00c23021, 0x00c02821, 0x00061027, 0xa522000a, 0x00003021, 0x2527000c,
6486         0x00004021, 0x94e20000, 0x25080001, 0x00c23021, 0x2d020004, 0x1440fffb,
6487         0x24e70002, 0x95220002, 0x00004021, 0x91230009, 0x00442023, 0x01803821,
6488         0x3082ffff, 0xa4e00010, 0x00621821, 0x00021042, 0x18400010, 0x00c33021,
6489         0x00404821, 0x94e20000, 0x24e70002, 0x00c23021, 0x30e2007f, 0x14400006,
6490         0x25080001, 0x8d630000, 0x3c02007f, 0x3442ff80, 0x00625824, 0x25670008,
6491         0x0109102a, 0x1440fff3, 0x00000000, 0x30820001, 0x10400005, 0x00061c02,
6492         0xa0e00001, 0x94e20000, 0x00c23021, 0x00061c02, 0x30c2ffff, 0x00623021,
6493         0x00061402, 0x00c23021, 0x0a00047d, 0x30c6ffff, 0x24020002, 0x14c20081,
6494         0x00000000, 0x3c020800, 0x8c42240c, 0x14400007, 0x00000000, 0x3c020800,
6495         0x944223d2, 0x95230002, 0x01e21023, 0x10620077, 0x00000000, 0x3c020800,
6496         0x944223d2, 0x01e21023, 0xa5220002, 0x3c020800, 0x8c42240c, 0x1040001a,
6497         0x31e3ffff, 0x8dc70010, 0x3c020800, 0x94421b96, 0x00e04021, 0x00072c02,
6498         0x00aa2021, 0x00431023, 0x00823823, 0x00072402, 0x30e2ffff, 0x00823821,
6499         0x00071027, 0xa522000a, 0x3102ffff, 0x3c040800, 0x948423d4, 0x00453023,
6500         0x00e02821, 0x00641823, 0x006d1821, 0x00c33021, 0x00061c02, 0x30c2ffff,
6501         0x0a00047d, 0x00623021, 0x01203821, 0x00004021, 0x3082ffff, 0x00021042,
6502         0x18400008, 0x00003021, 0x00401821, 0x94e20000, 0x25080001, 0x00c23021,
6503         0x0103102a, 0x1440fffb, 0x24e70002, 0x00061c02, 0x30c2ffff, 0x00623021,
6504         0x00061402, 0x00c23021, 0x00c02821, 0x00061027, 0xa522000a, 0x00003021,
6505         0x2527000c, 0x00004021, 0x94e20000, 0x25080001, 0x00c23021, 0x2d020004,
6506         0x1440fffb, 0x24e70002, 0x95220002, 0x00004021, 0x91230009, 0x00442023,
6507         0x01803821, 0x3082ffff, 0xa4e00010, 0x3c040800, 0x948423d4, 0x00621821,
6508         0x00c33021, 0x00061c02, 0x30c2ffff, 0x00623021, 0x00061c02, 0x3c020800,
6509         0x944223d0, 0x00c34821, 0x00441023, 0x00021fc2, 0x00431021, 0x00021043,
6510         0x18400010, 0x00003021, 0x00402021, 0x94e20000, 0x24e70002, 0x00c23021,
6511         0x30e2007f, 0x14400006, 0x25080001, 0x8d630000, 0x3c02007f, 0x3442ff80,
6512         0x00625824, 0x25670008, 0x0104102a, 0x1440fff3, 0x00000000, 0x3c020800,
6513         0x944223ec, 0x00c23021, 0x3122ffff, 0x00c23021, 0x00061c02, 0x30c2ffff,
6514         0x00623021, 0x00061402, 0x00c23021, 0x00c04021, 0x00061027, 0xa5820010,
6515         0xadc00014, 0x0a00049d, 0xadc00000, 0x8dc70010, 0x00e04021, 0x11400007,
6516         0x00072c02, 0x00aa3021, 0x00061402, 0x30c3ffff, 0x00433021, 0x00061402,
6517         0x00c22821, 0x00051027, 0xa522000a, 0x3c030800, 0x946323d4, 0x3102ffff,
6518         0x01e21021, 0x00433023, 0x00cd3021, 0x00061c02, 0x30c2ffff, 0x00623021,
6519         0x00061402, 0x00c23021, 0x00c04021, 0x00061027, 0xa5820010, 0x3102ffff,
6520         0x00051c00, 0x00431025, 0xadc20010, 0x3c020800, 0x8c4223f4, 0x10400005,
6521         0x2de205eb, 0x14400002, 0x25e2fff2, 0x34028870, 0xa5c20034, 0x3c030800,
6522         0x246323e8, 0x8c620000, 0x24420001, 0xac620000, 0x3c040800, 0x8c8423e4,
6523         0x3c020800, 0x8c421bc0, 0x3303ffff, 0x00832021, 0x00431821, 0x0062102b,
6524         0x3c010800, 0xac2423e4, 0x10400003, 0x2482ffff, 0x3c010800, 0xac2223e4,
6525         0x3c010800, 0xac231bc0, 0x03e00008, 0x27bd0020, 0x27bdffb8, 0x3c050800,
6526         0x24a51b96, 0xafbf0044, 0xafbe0040, 0xafb7003c, 0xafb60038, 0xafb50034,
6527         0xafb40030, 0xafb3002c, 0xafb20028, 0xafb10024, 0xafb00020, 0x94a90000,
6528         0x3c020800, 0x944223d0, 0x3c030800, 0x8c631bb0, 0x3c040800, 0x8c841bac,
6529         0x01221023, 0x0064182a, 0xa7a9001e, 0x106000be, 0xa7a20016, 0x24be0022,
6530         0x97b6001e, 0x24b3001a, 0x24b70016, 0x8fc20000, 0x14400008, 0x00000000,
6531         0x8fc2fff8, 0x97a30016, 0x8fc4fff4, 0x00431021, 0x0082202a, 0x148000b0,
6532         0x00000000, 0x97d50818, 0x32a2ffff, 0x104000a3, 0x00009021, 0x0040a021,
6533         0x00008821, 0x0e000625, 0x00000000, 0x00403021, 0x14c00007, 0x00000000,
6534         0x3c020800, 0x8c4223dc, 0x24420001, 0x3c010800, 0x0a000596, 0xac2223dc,
6535         0x3c100800, 0x02118021, 0x8e101bc8, 0x9608000a, 0x31020040, 0x10400005,
6536         0x2407180c, 0x8e02000c, 0x2407188c, 0x00021400, 0xacc20018, 0x31020080,
6537         0x54400001, 0x34e70010, 0x3c020800, 0x00511021, 0x8c421bd0, 0x3c030800,
6538         0x00711821, 0x8c631bd4, 0x00021500, 0x00031c00, 0x00431025, 0xacc20014,
6539         0x96040008, 0x3242ffff, 0x00821021, 0x0282102a, 0x14400002, 0x02b22823,
6540         0x00802821, 0x8e020000, 0x02459021, 0xacc20000, 0x8e020004, 0x00c02021,
6541         0x26310010, 0xac820004, 0x30e2ffff, 0xac800008, 0xa485000e, 0xac820010,
6542         0x24020305, 0x0e0005a2, 0xa482000c, 0x3242ffff, 0x0054102b, 0x1440ffc5,
6543         0x3242ffff, 0x0a00058e, 0x00000000, 0x8e620000, 0x8e63fffc, 0x0043102a,
6544         0x10400067, 0x00000000, 0x8e62fff0, 0x00028900, 0x3c100800, 0x02118021,
6545         0x0e000625, 0x8e101bc8, 0x00403021, 0x14c00005, 0x00000000, 0x8e62082c,
6546         0x24420001, 0x0a000596, 0xae62082c, 0x9608000a, 0x31020040, 0x10400005,
6547         0x2407180c, 0x8e02000c, 0x2407188c, 0x00021400, 0xacc20018, 0x3c020800,
6548         0x00511021, 0x8c421bd0, 0x3c030800, 0x00711821, 0x8c631bd4, 0x00021500,
6549         0x00031c00, 0x00431025, 0xacc20014, 0x8e63fff4, 0x96020008, 0x00432023,
6550         0x3242ffff, 0x3083ffff, 0x00431021, 0x02c2102a, 0x10400003, 0x00802821,
6551         0x97a9001e, 0x01322823, 0x8e620000, 0x30a4ffff, 0x00441021, 0xae620000,
6552         0xa4c5000e, 0x8e020000, 0xacc20000, 0x8e020004, 0x8e63fff4, 0x00431021,
6553         0xacc20004, 0x8e63fff4, 0x96020008, 0x00641821, 0x0062102a, 0x14400006,
6554         0x02459021, 0x8e62fff0, 0xae60fff4, 0x24420001, 0x0a000571, 0xae62fff0,
6555         0xae63fff4, 0xacc00008, 0x3242ffff, 0x10560003, 0x31020004, 0x10400006,
6556         0x24020305, 0x31020080, 0x54400001, 0x34e70010, 0x34e70020, 0x24020905,
6557         0xa4c2000c, 0x8ee30000, 0x8ee20004, 0x14620007, 0x3c02b49a, 0x8ee20860,
6558         0x54400001, 0x34e70400, 0x3c024b65, 0x0a000588, 0x34427654, 0x344289ab,
6559         0xacc2001c, 0x30e2ffff, 0xacc20010, 0x0e0005a2, 0x00c02021, 0x3242ffff,
6560         0x0056102b, 0x1440ff9b, 0x00000000, 0x8e620000, 0x8e63fffc, 0x0043102a,
6561         0x1440ff48, 0x00000000, 0x8fbf0044, 0x8fbe0040, 0x8fb7003c, 0x8fb60038,
6562         0x8fb50034, 0x8fb40030, 0x8fb3002c, 0x8fb20028, 0x8fb10024, 0x8fb00020,
6563         0x03e00008, 0x27bd0048, 0x27bdffe8, 0xafbf0014, 0xafb00010, 0x8f624450,
6564         0x8f634410, 0x0a0005b1, 0x00808021, 0x8f626820, 0x30422000, 0x10400003,
6565         0x00000000, 0x0e0001f0, 0x00002021, 0x8f624450, 0x8f634410, 0x3042ffff,
6566         0x0043102b, 0x1440fff5, 0x00000000, 0x8f630c14, 0x3063000f, 0x2c620002,
6567         0x1440000b, 0x00000000, 0x8f630c14, 0x3c020800, 0x8c421b40, 0x3063000f,
6568         0x24420001, 0x3c010800, 0xac221b40, 0x2c620002, 0x1040fff7, 0x00000000,
6569         0xaf705c18, 0x8f625c10, 0x30420002, 0x10400009, 0x00000000, 0x8f626820,
6570         0x30422000, 0x1040fff8, 0x00000000, 0x0e0001f0, 0x00002021, 0x0a0005c4,
6571         0x00000000, 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x00000000,
6572         0x00000000, 0x00000000, 0x27bdffe8, 0x3c1bc000, 0xafbf0014, 0xafb00010,
6573         0xaf60680c, 0x8f626804, 0x34420082, 0xaf626804, 0x8f634000, 0x24020b50,
6574         0x3c010800, 0xac221b54, 0x24020b78, 0x3c010800, 0xac221b64, 0x34630002,
6575         0xaf634000, 0x0e000605, 0x00808021, 0x3c010800, 0xa0221b68, 0x304200ff,
6576         0x24030002, 0x14430005, 0x00000000, 0x3c020800, 0x8c421b54, 0x0a0005f8,
6577         0xac5000c0, 0x3c020800, 0x8c421b54, 0xac5000bc, 0x8f624434, 0x8f634438,
6578         0x8f644410, 0x3c010800, 0xac221b5c, 0x3c010800, 0xac231b6c, 0x3c010800,
6579         0xac241b58, 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x3c040800,
6580         0x8c870000, 0x3c03aa55, 0x3463aa55, 0x3c06c003, 0xac830000, 0x8cc20000,
6581         0x14430007, 0x24050002, 0x3c0355aa, 0x346355aa, 0xac830000, 0x8cc20000,
6582         0x50430001, 0x24050001, 0x3c020800, 0xac470000, 0x03e00008, 0x00a01021,
6583         0x27bdfff8, 0x18800009, 0x00002821, 0x8f63680c, 0x8f62680c, 0x1043fffe,
6584         0x00000000, 0x24a50001, 0x00a4102a, 0x1440fff9, 0x00000000, 0x03e00008,
6585         0x27bd0008, 0x8f634450, 0x3c020800, 0x8c421b5c, 0x00031c02, 0x0043102b,
6586         0x14400008, 0x3c038000, 0x3c040800, 0x8c841b6c, 0x8f624450, 0x00021c02,
6587         0x0083102b, 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024,
6588         0x1440fffd, 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3082ffff,
6589         0x2442e000, 0x2c422001, 0x14400003, 0x3c024000, 0x0a000648, 0x2402ffff,
6590         0x00822025, 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00001021,
6591         0x03e00008, 0x00000000, 0x8f624450, 0x3c030800, 0x8c631b58, 0x0a000651,
6592         0x3042ffff, 0x8f624450, 0x3042ffff, 0x0043102b, 0x1440fffc, 0x00000000,
6593         0x03e00008, 0x00000000, 0x27bdffe0, 0x00802821, 0x3c040800, 0x24841af0,
6594         0x00003021, 0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014,
6595         0x0a000660, 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x00000000,
6596         0x00000000, 0x00000000, 0x3c020800, 0x34423000, 0x3c030800, 0x34633000,
6597         0x3c040800, 0x348437ff, 0x3c010800, 0xac221b74, 0x24020040, 0x3c010800,
6598         0xac221b78, 0x3c010800, 0xac201b70, 0xac600000, 0x24630004, 0x0083102b,
6599         0x5040fffd, 0xac600000, 0x03e00008, 0x00000000, 0x00804821, 0x8faa0010,
6600         0x3c020800, 0x8c421b70, 0x3c040800, 0x8c841b78, 0x8fab0014, 0x24430001,
6601         0x0044102b, 0x3c010800, 0xac231b70, 0x14400003, 0x00004021, 0x3c010800,
6602         0xac201b70, 0x3c020800, 0x8c421b70, 0x3c030800, 0x8c631b74, 0x91240000,
6603         0x00021140, 0x00431021, 0x00481021, 0x25080001, 0xa0440000, 0x29020008,
6604         0x1440fff4, 0x25290001, 0x3c020800, 0x8c421b70, 0x3c030800, 0x8c631b74,
6605         0x8f64680c, 0x00021140, 0x00431021, 0xac440008, 0xac45000c, 0xac460010,
6606         0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c, 0x00000000, 0x00000000,
6607 };
6608
6609 static const u32 tg3TsoFwRodata[] = {
6610         0x4d61696e, 0x43707542, 0x00000000, 0x4d61696e, 0x43707541, 0x00000000,
6611         0x00000000, 0x00000000, 0x73746b6f, 0x66666c64, 0x496e0000, 0x73746b6f,
6612         0x66662a2a, 0x00000000, 0x53774576, 0x656e7430, 0x00000000, 0x00000000,
6613         0x00000000, 0x00000000, 0x66617461, 0x6c457272, 0x00000000, 0x00000000,
6614         0x00000000,
6615 };
6616
6617 static const u32 tg3TsoFwData[] = {
6618         0x00000000, 0x73746b6f, 0x66666c64, 0x5f76312e, 0x362e3000, 0x00000000,
6619         0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
6620         0x00000000,
6621 };
6622
6623 /* 5705 needs a special version of the TSO firmware.  */
6624 #define TG3_TSO5_FW_RELEASE_MAJOR       0x1
6625 #define TG3_TSO5_FW_RELASE_MINOR        0x2
6626 #define TG3_TSO5_FW_RELEASE_FIX         0x0
6627 #define TG3_TSO5_FW_START_ADDR          0x00010000
6628 #define TG3_TSO5_FW_TEXT_ADDR           0x00010000
6629 #define TG3_TSO5_FW_TEXT_LEN            0xe90
6630 #define TG3_TSO5_FW_RODATA_ADDR         0x00010e90
6631 #define TG3_TSO5_FW_RODATA_LEN          0x50
6632 #define TG3_TSO5_FW_DATA_ADDR           0x00010f00
6633 #define TG3_TSO5_FW_DATA_LEN            0x20
6634 #define TG3_TSO5_FW_SBSS_ADDR           0x00010f20
6635 #define TG3_TSO5_FW_SBSS_LEN            0x28
6636 #define TG3_TSO5_FW_BSS_ADDR            0x00010f50
6637 #define TG3_TSO5_FW_BSS_LEN             0x88
6638
6639 static const u32 tg3Tso5FwText[(TG3_TSO5_FW_TEXT_LEN / 4) + 1] = {
6640         0x0c004003, 0x00000000, 0x00010f04, 0x00000000, 0x10000003, 0x00000000,
6641         0x0000000d, 0x0000000d, 0x3c1d0001, 0x37bde000, 0x03a0f021, 0x3c100001,
6642         0x26100000, 0x0c004010, 0x00000000, 0x0000000d, 0x27bdffe0, 0x3c04fefe,
6643         0xafbf0018, 0x0c0042e8, 0x34840002, 0x0c004364, 0x00000000, 0x3c030001,
6644         0x90630f34, 0x24020002, 0x3c040001, 0x24840e9c, 0x14620003, 0x24050001,
6645         0x3c040001, 0x24840e90, 0x24060002, 0x00003821, 0xafa00010, 0x0c004378,
6646         0xafa00014, 0x0c00402c, 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020,
6647         0x00000000, 0x00000000, 0x27bdffe0, 0xafbf001c, 0xafb20018, 0xafb10014,
6648         0x0c0042d4, 0xafb00010, 0x3c128000, 0x24110001, 0x8f706810, 0x32020400,
6649         0x10400007, 0x00000000, 0x8f641008, 0x00921024, 0x14400003, 0x00000000,
6650         0x0c004064, 0x00000000, 0x3c020001, 0x90420f56, 0x10510003, 0x32020200,
6651         0x1040fff1, 0x00000000, 0x0c0041b4, 0x00000000, 0x08004034, 0x00000000,
6652         0x8fbf001c, 0x8fb20018, 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0020,
6653         0x27bdffe0, 0x3c040001, 0x24840eb0, 0x00002821, 0x00003021, 0x00003821,
6654         0xafbf0018, 0xafa00010, 0x0c004378, 0xafa00014, 0x0000d021, 0x24020130,
6655         0xaf625000, 0x3c010001, 0xa4200f50, 0x3c010001, 0xa0200f57, 0x8fbf0018,
6656         0x03e00008, 0x27bd0020, 0x00000000, 0x00000000, 0x3c030001, 0x24630f60,
6657         0x90620000, 0x27bdfff0, 0x14400003, 0x0080c021, 0x08004073, 0x00004821,
6658         0x3c022000, 0x03021024, 0x10400003, 0x24090002, 0x08004073, 0xa0600000,
6659         0x24090001, 0x00181040, 0x30431f80, 0x346f8008, 0x1520004b, 0x25eb0028,
6660         0x3c040001, 0x00832021, 0x8c848010, 0x3c050001, 0x24a50f7a, 0x00041402,
6661         0xa0a20000, 0x3c010001, 0xa0240f7b, 0x3c020001, 0x00431021, 0x94428014,
6662         0x3c010001, 0xa0220f7c, 0x3c0c0001, 0x01836021, 0x8d8c8018, 0x304200ff,
6663         0x24420008, 0x000220c3, 0x24020001, 0x3c010001, 0xa0220f60, 0x0124102b,
6664         0x1040000c, 0x00003821, 0x24a6000e, 0x01602821, 0x8ca20000, 0x8ca30004,
6665         0x24a50008, 0x24e70001, 0xacc20000, 0xacc30004, 0x00e4102b, 0x1440fff8,
6666         0x24c60008, 0x00003821, 0x3c080001, 0x25080f7b, 0x91060000, 0x3c020001,
6667         0x90420f7c, 0x2503000d, 0x00c32821, 0x00461023, 0x00021fc2, 0x00431021,
6668         0x00021043, 0x1840000c, 0x00002021, 0x91020001, 0x00461023, 0x00021fc2,
6669         0x00431021, 0x00021843, 0x94a20000, 0x24e70001, 0x00822021, 0x00e3102a,
6670         0x1440fffb, 0x24a50002, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402,
6671         0x00822021, 0x3c02ffff, 0x01821024, 0x3083ffff, 0x00431025, 0x3c010001,
6672         0x080040fa, 0xac220f80, 0x3c050001, 0x24a50f7c, 0x90a20000, 0x3c0c0001,
6673         0x01836021, 0x8d8c8018, 0x000220c2, 0x1080000e, 0x00003821, 0x01603021,
6674         0x24a5000c, 0x8ca20000, 0x8ca30004, 0x24a50008, 0x24e70001, 0xacc20000,
6675         0xacc30004, 0x00e4102b, 0x1440fff8, 0x24c60008, 0x3c050001, 0x24a50f7c,
6676         0x90a20000, 0x30430007, 0x24020004, 0x10620011, 0x28620005, 0x10400005,
6677         0x24020002, 0x10620008, 0x000710c0, 0x080040fa, 0x00000000, 0x24020006,
6678         0x1062000e, 0x000710c0, 0x080040fa, 0x00000000, 0x00a21821, 0x9463000c,
6679         0x004b1021, 0x080040fa, 0xa4430000, 0x000710c0, 0x00a21821, 0x8c63000c,
6680         0x004b1021, 0x080040fa, 0xac430000, 0x00a21821, 0x8c63000c, 0x004b2021,
6681         0x00a21021, 0xac830000, 0x94420010, 0xa4820004, 0x95e70006, 0x3c020001,
6682         0x90420f7c, 0x3c030001, 0x90630f7a, 0x00e2c823, 0x3c020001, 0x90420f7b,
6683         0x24630028, 0x01e34021, 0x24420028, 0x15200012, 0x01e23021, 0x94c2000c,
6684         0x3c010001, 0xa4220f78, 0x94c20004, 0x94c30006, 0x3c010001, 0xa4200f76,
6685         0x3c010001, 0xa4200f72, 0x00021400, 0x00431025, 0x3c010001, 0xac220f6c,
6686         0x95020004, 0x3c010001, 0x08004124, 0xa4220f70, 0x3c020001, 0x94420f70,
6687         0x3c030001, 0x94630f72, 0x00431021, 0xa5020004, 0x3c020001, 0x94420f6c,
6688         0xa4c20004, 0x3c020001, 0x8c420f6c, 0xa4c20006, 0x3c040001, 0x94840f72,
6689         0x3c020001, 0x94420f70, 0x3c0a0001, 0x954a0f76, 0x00441821, 0x3063ffff,
6690         0x0062182a, 0x24020002, 0x1122000b, 0x00832023, 0x3c030001, 0x94630f78,
6691         0x30620009, 0x10400006, 0x3062fff6, 0xa4c2000c, 0x3c020001, 0x94420f78,
6692         0x30420009, 0x01425023, 0x24020001, 0x1122001b, 0x29220002, 0x50400005,
6693         0x24020002, 0x11200007, 0x31a2ffff, 0x08004197, 0x00000000, 0x1122001d,
6694         0x24020016, 0x08004197, 0x31a2ffff, 0x3c0e0001, 0x95ce0f80, 0x10800005,
6695         0x01806821, 0x01c42021, 0x00041c02, 0x3082ffff, 0x00627021, 0x000e1027,
6696         0xa502000a, 0x3c030001, 0x90630f7b, 0x31a2ffff, 0x00e21021, 0x0800418d,
6697         0x00432023, 0x3c020001, 0x94420f80, 0x00442021, 0x00041c02, 0x3082ffff,
6698         0x00622021, 0x00807021, 0x00041027, 0x08004185, 0xa502000a, 0x3c050001,
6699         0x24a50f7a, 0x90a30000, 0x14620002, 0x24e2fff2, 0xa5e20034, 0x90a20000,
6700         0x00e21023, 0xa5020002, 0x3c030001, 0x94630f80, 0x3c020001, 0x94420f5a,
6701         0x30e5ffff, 0x00641821, 0x00451023, 0x00622023, 0x00041c02, 0x3082ffff,
6702         0x00622021, 0x00041027, 0xa502000a, 0x3c030001, 0x90630f7c, 0x24620001,
6703         0x14a20005, 0x00807021, 0x01631021, 0x90420000, 0x08004185, 0x00026200,
6704         0x24620002, 0x14a20003, 0x306200fe, 0x004b1021, 0x944c0000, 0x3c020001,
6705         0x94420f82, 0x3183ffff, 0x3c040001, 0x90840f7b, 0x00431021, 0x00e21021,
6706         0x00442023, 0x008a2021, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402,
6707         0x00822021, 0x00806821, 0x00041027, 0xa4c20010, 0x31a2ffff, 0x000e1c00,
6708         0x00431025, 0x3c040001, 0x24840f72, 0xade20010, 0x94820000, 0x3c050001,
6709         0x94a50f76, 0x3c030001, 0x8c630f6c, 0x24420001, 0x00b92821, 0xa4820000,
6710         0x3322ffff, 0x00622021, 0x0083182b, 0x3c010001, 0xa4250f76, 0x10600003,
6711         0x24a2ffff, 0x3c010001, 0xa4220f76, 0x3c024000, 0x03021025, 0x3c010001,
6712         0xac240f6c, 0xaf621008, 0x03e00008, 0x27bd0010, 0x3c030001, 0x90630f56,
6713         0x27bdffe8, 0x24020001, 0xafbf0014, 0x10620026, 0xafb00010, 0x8f620cf4,
6714         0x2442ffff, 0x3042007f, 0x00021100, 0x8c434000, 0x3c010001, 0xac230f64,
6715         0x8c434008, 0x24444000, 0x8c5c4004, 0x30620040, 0x14400002, 0x24020088,
6716         0x24020008, 0x3c010001, 0xa4220f68, 0x30620004, 0x10400005, 0x24020001,
6717         0x3c010001, 0xa0220f57, 0x080041d5, 0x00031402, 0x3c010001, 0xa0200f57,
6718         0x00031402, 0x3c010001, 0xa4220f54, 0x9483000c, 0x24020001, 0x3c010001,
6719         0xa4200f50, 0x3c010001, 0xa0220f56, 0x3c010001, 0xa4230f62, 0x24020001,
6720         0x1342001e, 0x00000000, 0x13400005, 0x24020003, 0x13420067, 0x00000000,
6721         0x080042cf, 0x00000000, 0x3c020001, 0x94420f62, 0x241a0001, 0x3c010001,
6722         0xa4200f5e, 0x3c010001, 0xa4200f52, 0x304407ff, 0x00021bc2, 0x00031823,
6723         0x3063003e, 0x34630036, 0x00021242, 0x3042003c, 0x00621821, 0x3c010001,
6724         0xa4240f58, 0x00832021, 0x24630030, 0x3c010001, 0xa4240f5a, 0x3c010001,
6725         0xa4230f5c, 0x3c060001, 0x24c60f52, 0x94c50000, 0x94c30002, 0x3c040001,
6726         0x94840f5a, 0x00651021, 0x0044102a, 0x10400013, 0x3c108000, 0x00a31021,
6727         0xa4c20000, 0x3c02a000, 0xaf620cf4, 0x3c010001, 0xa0200f56, 0x8f641008,
6728         0x00901024, 0x14400003, 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4,
6729         0x00501024, 0x104000b7, 0x00000000, 0x0800420f, 0x00000000, 0x3c030001,
6730         0x94630f50, 0x00851023, 0xa4c40000, 0x00621821, 0x3042ffff, 0x3c010001,
6731         0xa4230f50, 0xaf620ce8, 0x3c020001, 0x94420f68, 0x34420024, 0xaf620cec,
6732         0x94c30002, 0x3c020001, 0x94420f50, 0x14620012, 0x3c028000, 0x3c108000,
6733         0x3c02a000, 0xaf620cf4, 0x3c010001, 0xa0200f56, 0x8f641008, 0x00901024,
6734         0x14400003, 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4, 0x00501024,
6735         0x1440fff7, 0x00000000, 0x080042cf, 0x241a0003, 0xaf620cf4, 0x3c108000,
6736         0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064, 0x00000000,
6737         0x8f620cf4, 0x00501024, 0x1440fff7, 0x00000000, 0x080042cf, 0x241a0003,
6738         0x3c070001, 0x24e70f50, 0x94e20000, 0x03821021, 0xaf620ce0, 0x3c020001,
6739         0x8c420f64, 0xaf620ce4, 0x3c050001, 0x94a50f54, 0x94e30000, 0x3c040001,
6740         0x94840f58, 0x3c020001, 0x94420f5e, 0x00a32823, 0x00822023, 0x30a6ffff,
6741         0x3083ffff, 0x00c3102b, 0x14400043, 0x00000000, 0x3c020001, 0x94420f5c,
6742         0x00021400, 0x00621025, 0xaf620ce8, 0x94e20000, 0x3c030001, 0x94630f54,
6743         0x00441021, 0xa4e20000, 0x3042ffff, 0x14430021, 0x3c020008, 0x3c020001,
6744         0x90420f57, 0x10400006, 0x3c03000c, 0x3c020001, 0x94420f68, 0x34630624,
6745         0x0800427c, 0x0000d021, 0x3c020001, 0x94420f68, 0x3c030008, 0x34630624,
6746         0x00431025, 0xaf620cec, 0x3c108000, 0x3c02a000, 0xaf620cf4, 0x3c010001,
6747         0xa0200f56, 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064,
6748         0x00000000, 0x8f620cf4, 0x00501024, 0x10400015, 0x00000000, 0x08004283,
6749         0x00000000, 0x3c030001, 0x94630f68, 0x34420624, 0x3c108000, 0x00621825,
6750         0x3c028000, 0xaf630cec, 0xaf620cf4, 0x8f641008, 0x00901024, 0x14400003,
6751         0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4, 0x00501024, 0x1440fff7,
6752         0x00000000, 0x3c010001, 0x080042cf, 0xa4200f5e, 0x3c020001, 0x94420f5c,
6753         0x00021400, 0x00c21025, 0xaf620ce8, 0x3c020001, 0x90420f57, 0x10400009,
6754         0x3c03000c, 0x3c020001, 0x94420f68, 0x34630624, 0x0000d021, 0x00431025,
6755         0xaf620cec, 0x080042c1, 0x3c108000, 0x3c020001, 0x94420f68, 0x3c030008,
6756         0x34630604, 0x00431025, 0xaf620cec, 0x3c020001, 0x94420f5e, 0x00451021,
6757         0x3c010001, 0xa4220f5e, 0x3c108000, 0x3c02a000, 0xaf620cf4, 0x3c010001,
6758         0xa0200f56, 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064,
6759         0x00000000, 0x8f620cf4, 0x00501024, 0x1440fff7, 0x00000000, 0x8fbf0014,
6760         0x8fb00010, 0x03e00008, 0x27bd0018, 0x00000000, 0x27bdffe0, 0x3c040001,
6761         0x24840ec0, 0x00002821, 0x00003021, 0x00003821, 0xafbf0018, 0xafa00010,
6762         0x0c004378, 0xafa00014, 0x0000d021, 0x24020130, 0xaf625000, 0x3c010001,
6763         0xa4200f50, 0x3c010001, 0xa0200f57, 0x8fbf0018, 0x03e00008, 0x27bd0020,
6764         0x27bdffe8, 0x3c1bc000, 0xafbf0014, 0xafb00010, 0xaf60680c, 0x8f626804,
6765         0x34420082, 0xaf626804, 0x8f634000, 0x24020b50, 0x3c010001, 0xac220f20,
6766         0x24020b78, 0x3c010001, 0xac220f30, 0x34630002, 0xaf634000, 0x0c004315,
6767         0x00808021, 0x3c010001, 0xa0220f34, 0x304200ff, 0x24030002, 0x14430005,
6768         0x00000000, 0x3c020001, 0x8c420f20, 0x08004308, 0xac5000c0, 0x3c020001,
6769         0x8c420f20, 0xac5000bc, 0x8f624434, 0x8f634438, 0x8f644410, 0x3c010001,
6770         0xac220f28, 0x3c010001, 0xac230f38, 0x3c010001, 0xac240f24, 0x8fbf0014,
6771         0x8fb00010, 0x03e00008, 0x27bd0018, 0x03e00008, 0x24020001, 0x27bdfff8,
6772         0x18800009, 0x00002821, 0x8f63680c, 0x8f62680c, 0x1043fffe, 0x00000000,
6773         0x24a50001, 0x00a4102a, 0x1440fff9, 0x00000000, 0x03e00008, 0x27bd0008,
6774         0x8f634450, 0x3c020001, 0x8c420f28, 0x00031c02, 0x0043102b, 0x14400008,
6775         0x3c038000, 0x3c040001, 0x8c840f38, 0x8f624450, 0x00021c02, 0x0083102b,
6776         0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024, 0x1440fffd,
6777         0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3082ffff, 0x2442e000,
6778         0x2c422001, 0x14400003, 0x3c024000, 0x08004347, 0x2402ffff, 0x00822025,
6779         0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00001021, 0x03e00008,
6780         0x00000000, 0x8f624450, 0x3c030001, 0x8c630f24, 0x08004350, 0x3042ffff,
6781         0x8f624450, 0x3042ffff, 0x0043102b, 0x1440fffc, 0x00000000, 0x03e00008,
6782         0x00000000, 0x27bdffe0, 0x00802821, 0x3c040001, 0x24840ed0, 0x00003021,
6783         0x00003821, 0xafbf0018, 0xafa00010, 0x0c004378, 0xafa00014, 0x0800435f,
6784         0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x3c020001, 0x3442d600,
6785         0x3c030001, 0x3463d600, 0x3c040001, 0x3484ddff, 0x3c010001, 0xac220f40,
6786         0x24020040, 0x3c010001, 0xac220f44, 0x3c010001, 0xac200f3c, 0xac600000,
6787         0x24630004, 0x0083102b, 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000,
6788         0x00804821, 0x8faa0010, 0x3c020001, 0x8c420f3c, 0x3c040001, 0x8c840f44,
6789         0x8fab0014, 0x24430001, 0x0044102b, 0x3c010001, 0xac230f3c, 0x14400003,
6790         0x00004021, 0x3c010001, 0xac200f3c, 0x3c020001, 0x8c420f3c, 0x3c030001,
6791         0x8c630f40, 0x91240000, 0x00021140, 0x00431021, 0x00481021, 0x25080001,
6792         0xa0440000, 0x29020008, 0x1440fff4, 0x25290001, 0x3c020001, 0x8c420f3c,
6793         0x3c030001, 0x8c630f40, 0x8f64680c, 0x00021140, 0x00431021, 0xac440008,
6794         0xac45000c, 0xac460010, 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c,
6795         0x00000000, 0x00000000, 0x00000000,
6796 };
6797
6798 static const u32 tg3Tso5FwRodata[(TG3_TSO5_FW_RODATA_LEN / 4) + 1] = {
6799         0x4d61696e, 0x43707542, 0x00000000, 0x4d61696e, 0x43707541, 0x00000000,
6800         0x00000000, 0x00000000, 0x73746b6f, 0x66666c64, 0x00000000, 0x00000000,
6801         0x73746b6f, 0x66666c64, 0x00000000, 0x00000000, 0x66617461, 0x6c457272,
6802         0x00000000, 0x00000000, 0x00000000,
6803 };
6804
6805 static const u32 tg3Tso5FwData[(TG3_TSO5_FW_DATA_LEN / 4) + 1] = {
6806         0x00000000, 0x73746b6f, 0x66666c64, 0x5f76312e, 0x322e3000, 0x00000000,
6807         0x00000000, 0x00000000, 0x00000000,
6808 };
6809
6810 /* tp->lock is held. */
6811 static int tg3_load_tso_firmware(struct tg3 *tp)
6812 {
6813         struct fw_info info;
6814         unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
6815         int err, i;
6816
6817         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
6818                 return 0;
6819
6820         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
6821                 info.text_base = TG3_TSO5_FW_TEXT_ADDR;
6822                 info.text_len = TG3_TSO5_FW_TEXT_LEN;
6823                 info.text_data = &tg3Tso5FwText[0];
6824                 info.rodata_base = TG3_TSO5_FW_RODATA_ADDR;
6825                 info.rodata_len = TG3_TSO5_FW_RODATA_LEN;
6826                 info.rodata_data = &tg3Tso5FwRodata[0];
6827                 info.data_base = TG3_TSO5_FW_DATA_ADDR;
6828                 info.data_len = TG3_TSO5_FW_DATA_LEN;
6829                 info.data_data = &tg3Tso5FwData[0];
6830                 cpu_base = RX_CPU_BASE;
6831                 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
6832                 cpu_scratch_size = (info.text_len +
6833                                     info.rodata_len +
6834                                     info.data_len +
6835                                     TG3_TSO5_FW_SBSS_LEN +
6836                                     TG3_TSO5_FW_BSS_LEN);
6837         } else {
6838                 info.text_base = TG3_TSO_FW_TEXT_ADDR;
6839                 info.text_len = TG3_TSO_FW_TEXT_LEN;
6840                 info.text_data = &tg3TsoFwText[0];
6841                 info.rodata_base = TG3_TSO_FW_RODATA_ADDR;
6842                 info.rodata_len = TG3_TSO_FW_RODATA_LEN;
6843                 info.rodata_data = &tg3TsoFwRodata[0];
6844                 info.data_base = TG3_TSO_FW_DATA_ADDR;
6845                 info.data_len = TG3_TSO_FW_DATA_LEN;
6846                 info.data_data = &tg3TsoFwData[0];
6847                 cpu_base = TX_CPU_BASE;
6848                 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
6849                 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
6850         }
6851
6852         err = tg3_load_firmware_cpu(tp, cpu_base,
6853                                     cpu_scratch_base, cpu_scratch_size,
6854                                     &info);
6855         if (err)
6856                 return err;
6857
6858         /* Now startup the cpu. */
6859         tw32(cpu_base + CPU_STATE, 0xffffffff);
6860         tw32_f(cpu_base + CPU_PC,    info.text_base);
6861
6862         for (i = 0; i < 5; i++) {
6863                 if (tr32(cpu_base + CPU_PC) == info.text_base)
6864                         break;
6865                 tw32(cpu_base + CPU_STATE, 0xffffffff);
6866                 tw32(cpu_base + CPU_MODE,  CPU_MODE_HALT);
6867                 tw32_f(cpu_base + CPU_PC,    info.text_base);
6868                 udelay(1000);
6869         }
6870         if (i >= 5) {
6871                 printk(KERN_ERR PFX "tg3_load_tso_firmware fails for %s "
6872                        "to set CPU PC, is %08x should be %08x\n",
6873                        tp->dev->name, tr32(cpu_base + CPU_PC),
6874                        info.text_base);
6875                 return -ENODEV;
6876         }
6877         tw32(cpu_base + CPU_STATE, 0xffffffff);
6878         tw32_f(cpu_base + CPU_MODE,  0x00000000);
6879         return 0;
6880 }
6881
6882
6883 /* tp->lock is held. */
6884 static void __tg3_set_mac_addr(struct tg3 *tp, int skip_mac_1)
6885 {
6886         u32 addr_high, addr_low;
6887         int i;
6888
6889         addr_high = ((tp->dev->dev_addr[0] << 8) |
6890                      tp->dev->dev_addr[1]);
6891         addr_low = ((tp->dev->dev_addr[2] << 24) |
6892                     (tp->dev->dev_addr[3] << 16) |
6893                     (tp->dev->dev_addr[4] <<  8) |
6894                     (tp->dev->dev_addr[5] <<  0));
6895         for (i = 0; i < 4; i++) {
6896                 if (i == 1 && skip_mac_1)
6897                         continue;
6898                 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
6899                 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
6900         }
6901
6902         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
6903             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
6904                 for (i = 0; i < 12; i++) {
6905                         tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
6906                         tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
6907                 }
6908         }
6909
6910         addr_high = (tp->dev->dev_addr[0] +
6911                      tp->dev->dev_addr[1] +
6912                      tp->dev->dev_addr[2] +
6913                      tp->dev->dev_addr[3] +
6914                      tp->dev->dev_addr[4] +
6915                      tp->dev->dev_addr[5]) &
6916                 TX_BACKOFF_SEED_MASK;
6917         tw32(MAC_TX_BACKOFF_SEED, addr_high);
6918 }
6919
6920 static int tg3_set_mac_addr(struct net_device *dev, void *p)
6921 {
6922         struct tg3 *tp = netdev_priv(dev);
6923         struct sockaddr *addr = p;
6924         int err = 0, skip_mac_1 = 0;
6925
6926         if (!is_valid_ether_addr(addr->sa_data))
6927                 return -EINVAL;
6928
6929         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
6930
6931         if (!netif_running(dev))
6932                 return 0;
6933
6934         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
6935                 u32 addr0_high, addr0_low, addr1_high, addr1_low;
6936
6937                 addr0_high = tr32(MAC_ADDR_0_HIGH);
6938                 addr0_low = tr32(MAC_ADDR_0_LOW);
6939                 addr1_high = tr32(MAC_ADDR_1_HIGH);
6940                 addr1_low = tr32(MAC_ADDR_1_LOW);
6941
6942                 /* Skip MAC addr 1 if ASF is using it. */
6943                 if ((addr0_high != addr1_high || addr0_low != addr1_low) &&
6944                     !(addr1_high == 0 && addr1_low == 0))
6945                         skip_mac_1 = 1;
6946         }
6947         spin_lock_bh(&tp->lock);
6948         __tg3_set_mac_addr(tp, skip_mac_1);
6949         spin_unlock_bh(&tp->lock);
6950
6951         return err;
6952 }
6953
6954 /* tp->lock is held. */
6955 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
6956                            dma_addr_t mapping, u32 maxlen_flags,
6957                            u32 nic_addr)
6958 {
6959         tg3_write_mem(tp,
6960                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
6961                       ((u64) mapping >> 32));
6962         tg3_write_mem(tp,
6963                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
6964                       ((u64) mapping & 0xffffffff));
6965         tg3_write_mem(tp,
6966                       (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
6967                        maxlen_flags);
6968
6969         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
6970                 tg3_write_mem(tp,
6971                               (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
6972                               nic_addr);
6973 }
6974
6975 static void __tg3_set_rx_mode(struct net_device *);
6976 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
6977 {
6978         tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
6979         tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
6980         tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
6981         tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
6982         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6983                 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
6984                 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
6985         }
6986         tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
6987         tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
6988         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6989                 u32 val = ec->stats_block_coalesce_usecs;
6990
6991                 if (!netif_carrier_ok(tp->dev))
6992                         val = 0;
6993
6994                 tw32(HOSTCC_STAT_COAL_TICKS, val);
6995         }
6996 }
6997
6998 /* tp->lock is held. */
6999 static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
7000 {
7001         u32 val, rdmac_mode;
7002         int i, err, limit;
7003
7004         tg3_disable_ints(tp);
7005
7006         tg3_stop_fw(tp);
7007
7008         tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
7009
7010         if (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) {
7011                 tg3_abort_hw(tp, 1);
7012         }
7013
7014         if (reset_phy &&
7015             !(tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB))
7016                 tg3_phy_reset(tp);
7017
7018         err = tg3_chip_reset(tp);
7019         if (err)
7020                 return err;
7021
7022         tg3_write_sig_legacy(tp, RESET_KIND_INIT);
7023
7024         if (tp->pci_chip_rev_id == CHIPREV_ID_5784_A0 ||
7025             tp->pci_chip_rev_id == CHIPREV_ID_5784_A1) {
7026                 val = tr32(TG3_CPMU_CTRL);
7027                 val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE);
7028                 tw32(TG3_CPMU_CTRL, val);
7029
7030                 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
7031                 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
7032                 val |= CPMU_LSPD_10MB_MACCLK_6_25;
7033                 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
7034
7035                 val = tr32(TG3_CPMU_LNK_AWARE_PWRMD);
7036                 val &= ~CPMU_LNK_AWARE_MACCLK_MASK;
7037                 val |= CPMU_LNK_AWARE_MACCLK_6_25;
7038                 tw32(TG3_CPMU_LNK_AWARE_PWRMD, val);
7039
7040                 val = tr32(TG3_CPMU_HST_ACC);
7041                 val &= ~CPMU_HST_ACC_MACCLK_MASK;
7042                 val |= CPMU_HST_ACC_MACCLK_6_25;
7043                 tw32(TG3_CPMU_HST_ACC, val);
7044         }
7045
7046         /* This works around an issue with Athlon chipsets on
7047          * B3 tigon3 silicon.  This bit has no effect on any
7048          * other revision.  But do not set this on PCI Express
7049          * chips and don't even touch the clocks if the CPMU is present.
7050          */
7051         if (!(tp->tg3_flags & TG3_FLAG_CPMU_PRESENT)) {
7052                 if (!(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
7053                         tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
7054                 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
7055         }
7056
7057         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
7058             (tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
7059                 val = tr32(TG3PCI_PCISTATE);
7060                 val |= PCISTATE_RETRY_SAME_DMA;
7061                 tw32(TG3PCI_PCISTATE, val);
7062         }
7063
7064         if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
7065                 /* Allow reads and writes to the
7066                  * APE register and memory space.
7067                  */
7068                 val = tr32(TG3PCI_PCISTATE);
7069                 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
7070                        PCISTATE_ALLOW_APE_SHMEM_WR;
7071                 tw32(TG3PCI_PCISTATE, val);
7072         }
7073
7074         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_BX) {
7075                 /* Enable some hw fixes.  */
7076                 val = tr32(TG3PCI_MSI_DATA);
7077                 val |= (1 << 26) | (1 << 28) | (1 << 29);
7078                 tw32(TG3PCI_MSI_DATA, val);
7079         }
7080
7081         /* Descriptor ring init may make accesses to the
7082          * NIC SRAM area to setup the TX descriptors, so we
7083          * can only do this after the hardware has been
7084          * successfully reset.
7085          */
7086         err = tg3_init_rings(tp);
7087         if (err)
7088                 return err;
7089
7090         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 &&
7091             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761 &&
7092             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785) {
7093                 /* This value is determined during the probe time DMA
7094                  * engine test, tg3_test_dma.
7095                  */
7096                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
7097         }
7098
7099         tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
7100                           GRC_MODE_4X_NIC_SEND_RINGS |
7101                           GRC_MODE_NO_TX_PHDR_CSUM |
7102                           GRC_MODE_NO_RX_PHDR_CSUM);
7103         tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
7104
7105         /* Pseudo-header checksum is done by hardware logic and not
7106          * the offload processers, so make the chip do the pseudo-
7107          * header checksums on receive.  For transmit it is more
7108          * convenient to do the pseudo-header checksum in software
7109          * as Linux does that on transmit for us in all cases.
7110          */
7111         tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
7112
7113         tw32(GRC_MODE,
7114              tp->grc_mode |
7115              (GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP));
7116
7117         /* Setup the timer prescalar register.  Clock is always 66Mhz. */
7118         val = tr32(GRC_MISC_CFG);
7119         val &= ~0xff;
7120         val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
7121         tw32(GRC_MISC_CFG, val);
7122
7123         /* Initialize MBUF/DESC pool. */
7124         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
7125                 /* Do nothing.  */
7126         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
7127                 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
7128                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
7129                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
7130                 else
7131                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
7132                 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
7133                 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
7134         }
7135         else if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
7136                 int fw_len;
7137
7138                 fw_len = (TG3_TSO5_FW_TEXT_LEN +
7139                           TG3_TSO5_FW_RODATA_LEN +
7140                           TG3_TSO5_FW_DATA_LEN +
7141                           TG3_TSO5_FW_SBSS_LEN +
7142                           TG3_TSO5_FW_BSS_LEN);
7143                 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
7144                 tw32(BUFMGR_MB_POOL_ADDR,
7145                      NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
7146                 tw32(BUFMGR_MB_POOL_SIZE,
7147                      NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
7148         }
7149
7150         if (tp->dev->mtu <= ETH_DATA_LEN) {
7151                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
7152                      tp->bufmgr_config.mbuf_read_dma_low_water);
7153                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
7154                      tp->bufmgr_config.mbuf_mac_rx_low_water);
7155                 tw32(BUFMGR_MB_HIGH_WATER,
7156                      tp->bufmgr_config.mbuf_high_water);
7157         } else {
7158                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
7159                      tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
7160                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
7161                      tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
7162                 tw32(BUFMGR_MB_HIGH_WATER,
7163                      tp->bufmgr_config.mbuf_high_water_jumbo);
7164         }
7165         tw32(BUFMGR_DMA_LOW_WATER,
7166              tp->bufmgr_config.dma_low_water);
7167         tw32(BUFMGR_DMA_HIGH_WATER,
7168              tp->bufmgr_config.dma_high_water);
7169
7170         tw32(BUFMGR_MODE, BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE);
7171         for (i = 0; i < 2000; i++) {
7172                 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
7173                         break;
7174                 udelay(10);
7175         }
7176         if (i >= 2000) {
7177                 printk(KERN_ERR PFX "tg3_reset_hw cannot enable BUFMGR for %s.\n",
7178                        tp->dev->name);
7179                 return -ENODEV;
7180         }
7181
7182         /* Setup replenish threshold. */
7183         val = tp->rx_pending / 8;
7184         if (val == 0)
7185                 val = 1;
7186         else if (val > tp->rx_std_max_post)
7187                 val = tp->rx_std_max_post;
7188         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
7189                 if (tp->pci_chip_rev_id == CHIPREV_ID_5906_A1)
7190                         tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
7191
7192                 if (val > (TG3_RX_INTERNAL_RING_SZ_5906 / 2))
7193                         val = TG3_RX_INTERNAL_RING_SZ_5906 / 2;
7194         }
7195
7196         tw32(RCVBDI_STD_THRESH, val);
7197
7198         /* Initialize TG3_BDINFO's at:
7199          *  RCVDBDI_STD_BD:     standard eth size rx ring
7200          *  RCVDBDI_JUMBO_BD:   jumbo frame rx ring
7201          *  RCVDBDI_MINI_BD:    small frame rx ring (??? does not work)
7202          *
7203          * like so:
7204          *  TG3_BDINFO_HOST_ADDR:       high/low parts of DMA address of ring
7205          *  TG3_BDINFO_MAXLEN_FLAGS:    (rx max buffer size << 16) |
7206          *                              ring attribute flags
7207          *  TG3_BDINFO_NIC_ADDR:        location of descriptors in nic SRAM
7208          *
7209          * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
7210          * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
7211          *
7212          * The size of each ring is fixed in the firmware, but the location is
7213          * configurable.
7214          */
7215         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
7216              ((u64) tp->rx_std_mapping >> 32));
7217         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
7218              ((u64) tp->rx_std_mapping & 0xffffffff));
7219         tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
7220              NIC_SRAM_RX_BUFFER_DESC);
7221
7222         /* Don't even try to program the JUMBO/MINI buffer descriptor
7223          * configs on 5705.
7224          */
7225         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
7226                 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS,
7227                      RX_STD_MAX_SIZE_5705 << BDINFO_FLAGS_MAXLEN_SHIFT);
7228         } else {
7229                 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS,
7230                      RX_STD_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT);
7231
7232                 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
7233                      BDINFO_FLAGS_DISABLED);
7234
7235                 /* Setup replenish threshold. */
7236                 tw32(RCVBDI_JUMBO_THRESH, tp->rx_jumbo_pending / 8);
7237
7238                 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
7239                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
7240                              ((u64) tp->rx_jumbo_mapping >> 32));
7241                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
7242                              ((u64) tp->rx_jumbo_mapping & 0xffffffff));
7243                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
7244                              RX_JUMBO_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT);
7245                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
7246                              NIC_SRAM_RX_JUMBO_BUFFER_DESC);
7247                 } else {
7248                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
7249                              BDINFO_FLAGS_DISABLED);
7250                 }
7251
7252         }
7253
7254         /* There is only one send ring on 5705/5750, no need to explicitly
7255          * disable the others.
7256          */
7257         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
7258                 /* Clear out send RCB ring in SRAM. */
7259                 for (i = NIC_SRAM_SEND_RCB; i < NIC_SRAM_RCV_RET_RCB; i += TG3_BDINFO_SIZE)
7260                         tg3_write_mem(tp, i + TG3_BDINFO_MAXLEN_FLAGS,
7261                                       BDINFO_FLAGS_DISABLED);
7262         }
7263
7264         tp->tx_prod = 0;
7265         tp->tx_cons = 0;
7266         tw32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0);
7267         tw32_tx_mbox(MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0);
7268
7269         tg3_set_bdinfo(tp, NIC_SRAM_SEND_RCB,
7270                        tp->tx_desc_mapping,
7271                        (TG3_TX_RING_SIZE <<
7272                         BDINFO_FLAGS_MAXLEN_SHIFT),
7273                        NIC_SRAM_TX_BUFFER_DESC);
7274
7275         /* There is only one receive return ring on 5705/5750, no need
7276          * to explicitly disable the others.
7277          */
7278         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
7279                 for (i = NIC_SRAM_RCV_RET_RCB; i < NIC_SRAM_STATS_BLK;
7280                      i += TG3_BDINFO_SIZE) {
7281                         tg3_write_mem(tp, i + TG3_BDINFO_MAXLEN_FLAGS,
7282                                       BDINFO_FLAGS_DISABLED);
7283                 }
7284         }
7285
7286         tp->rx_rcb_ptr = 0;
7287         tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW, 0);
7288
7289         tg3_set_bdinfo(tp, NIC_SRAM_RCV_RET_RCB,
7290                        tp->rx_rcb_mapping,
7291                        (TG3_RX_RCB_RING_SIZE(tp) <<
7292                         BDINFO_FLAGS_MAXLEN_SHIFT),
7293                        0);
7294
7295         tp->rx_std_ptr = tp->rx_pending;
7296         tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
7297                      tp->rx_std_ptr);
7298
7299         tp->rx_jumbo_ptr = (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) ?
7300                                                 tp->rx_jumbo_pending : 0;
7301         tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
7302                      tp->rx_jumbo_ptr);
7303
7304         /* Initialize MAC address and backoff seed. */
7305         __tg3_set_mac_addr(tp, 0);
7306
7307         /* MTU + ethernet header + FCS + optional VLAN tag */
7308         tw32(MAC_RX_MTU_SIZE, tp->dev->mtu + ETH_HLEN + 8);
7309
7310         /* The slot time is changed by tg3_setup_phy if we
7311          * run at gigabit with half duplex.
7312          */
7313         tw32(MAC_TX_LENGTHS,
7314              (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
7315              (6 << TX_LENGTHS_IPG_SHIFT) |
7316              (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
7317
7318         /* Receive rules. */
7319         tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
7320         tw32(RCVLPC_CONFIG, 0x0181);
7321
7322         /* Calculate RDMAC_MODE setting early, we need it to determine
7323          * the RCVLPC_STATE_ENABLE mask.
7324          */
7325         rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
7326                       RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
7327                       RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
7328                       RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
7329                       RDMAC_MODE_LNGREAD_ENAB);
7330
7331         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
7332             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
7333                 rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
7334                               RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
7335                               RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
7336
7337         /* If statement applies to 5705 and 5750 PCI devices only */
7338         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
7339              tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
7340             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)) {
7341                 if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE &&
7342                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
7343                         rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
7344                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
7345                            !(tp->tg3_flags2 & TG3_FLG2_IS_5788)) {
7346                         rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
7347                 }
7348         }
7349
7350         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)
7351                 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
7352
7353         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
7354                 rdmac_mode |= (1 << 27);
7355
7356         /* Receive/send statistics. */
7357         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
7358                 val = tr32(RCVLPC_STATS_ENABLE);
7359                 val &= ~RCVLPC_STATSENAB_DACK_FIX;
7360                 tw32(RCVLPC_STATS_ENABLE, val);
7361         } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
7362                    (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
7363                 val = tr32(RCVLPC_STATS_ENABLE);
7364                 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
7365                 tw32(RCVLPC_STATS_ENABLE, val);
7366         } else {
7367                 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
7368         }
7369         tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
7370         tw32(SNDDATAI_STATSENAB, 0xffffff);
7371         tw32(SNDDATAI_STATSCTRL,
7372              (SNDDATAI_SCTRL_ENABLE |
7373               SNDDATAI_SCTRL_FASTUPD));
7374
7375         /* Setup host coalescing engine. */
7376         tw32(HOSTCC_MODE, 0);
7377         for (i = 0; i < 2000; i++) {
7378                 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
7379                         break;
7380                 udelay(10);
7381         }
7382
7383         __tg3_set_coalesce(tp, &tp->coal);
7384
7385         /* set status block DMA address */
7386         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
7387              ((u64) tp->status_mapping >> 32));
7388         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
7389              ((u64) tp->status_mapping & 0xffffffff));
7390
7391         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
7392                 /* Status/statistics block address.  See tg3_timer,
7393                  * the tg3_periodic_fetch_stats call there, and
7394                  * tg3_get_stats to see how this works for 5705/5750 chips.
7395                  */
7396                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
7397                      ((u64) tp->stats_mapping >> 32));
7398                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
7399                      ((u64) tp->stats_mapping & 0xffffffff));
7400                 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
7401                 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
7402         }
7403
7404         tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
7405
7406         tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
7407         tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
7408         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
7409                 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
7410
7411         /* Clear statistics/status block in chip, and status block in ram. */
7412         for (i = NIC_SRAM_STATS_BLK;
7413              i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
7414              i += sizeof(u32)) {
7415                 tg3_write_mem(tp, i, 0);
7416                 udelay(40);
7417         }
7418         memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
7419
7420         if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
7421                 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
7422                 /* reset to prevent losing 1st rx packet intermittently */
7423                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
7424                 udelay(10);
7425         }
7426
7427         if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
7428                 tp->mac_mode &= MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
7429         else
7430                 tp->mac_mode = 0;
7431         tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
7432                 MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE | MAC_MODE_FHDE_ENABLE;
7433         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
7434             !(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
7435             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
7436                 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
7437         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
7438         udelay(40);
7439
7440         /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
7441          * If TG3_FLG2_IS_NIC is zero, we should read the
7442          * register to preserve the GPIO settings for LOMs. The GPIOs,
7443          * whether used as inputs or outputs, are set by boot code after
7444          * reset.
7445          */
7446         if (!(tp->tg3_flags2 & TG3_FLG2_IS_NIC)) {
7447                 u32 gpio_mask;
7448
7449                 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
7450                             GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
7451                             GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
7452
7453                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
7454                         gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
7455                                      GRC_LCLCTRL_GPIO_OUTPUT3;
7456
7457                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
7458                         gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
7459
7460                 tp->grc_local_ctrl &= ~gpio_mask;
7461                 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
7462
7463                 /* GPIO1 must be driven high for eeprom write protect */
7464                 if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT)
7465                         tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
7466                                                GRC_LCLCTRL_GPIO_OUTPUT1);
7467         }
7468         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
7469         udelay(100);
7470
7471         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0);
7472         tp->last_tag = 0;
7473
7474         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
7475                 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
7476                 udelay(40);
7477         }
7478
7479         val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
7480                WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
7481                WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
7482                WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
7483                WDMAC_MODE_LNGREAD_ENAB);
7484
7485         /* If statement applies to 5705 and 5750 PCI devices only */
7486         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
7487              tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
7488             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
7489                 if ((tp->tg3_flags & TG3_FLG2_TSO_CAPABLE) &&
7490                     (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
7491                      tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
7492                         /* nothing */
7493                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
7494                            !(tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
7495                            !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
7496                         val |= WDMAC_MODE_RX_ACCEL;
7497                 }
7498         }
7499
7500         /* Enable host coalescing bug fix */
7501         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755) ||
7502             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787) ||
7503             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784) ||
7504             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) ||
7505             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785))
7506                 val |= WDMAC_MODE_STATUS_TAG_FIX;
7507
7508         tw32_f(WDMAC_MODE, val);
7509         udelay(40);
7510
7511         if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) {
7512                 u16 pcix_cmd;
7513
7514                 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
7515                                      &pcix_cmd);
7516                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) {
7517                         pcix_cmd &= ~PCI_X_CMD_MAX_READ;
7518                         pcix_cmd |= PCI_X_CMD_READ_2K;
7519                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
7520                         pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ);
7521                         pcix_cmd |= PCI_X_CMD_READ_2K;
7522                 }
7523                 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
7524                                       pcix_cmd);
7525         }
7526
7527         tw32_f(RDMAC_MODE, rdmac_mode);
7528         udelay(40);
7529
7530         tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
7531         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
7532                 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
7533
7534         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
7535                 tw32(SNDDATAC_MODE,
7536                      SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY);
7537         else
7538                 tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
7539
7540         tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
7541         tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
7542         tw32(RCVDBDI_MODE, RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ);
7543         tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
7544         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
7545                 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
7546         tw32(SNDBDI_MODE, SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE);
7547         tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
7548
7549         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
7550                 err = tg3_load_5701_a0_firmware_fix(tp);
7551                 if (err)
7552                         return err;
7553         }
7554
7555         if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
7556                 err = tg3_load_tso_firmware(tp);
7557                 if (err)
7558                         return err;
7559         }
7560
7561         tp->tx_mode = TX_MODE_ENABLE;
7562         tw32_f(MAC_TX_MODE, tp->tx_mode);
7563         udelay(100);
7564
7565         tp->rx_mode = RX_MODE_ENABLE;
7566         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
7567             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
7568             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
7569             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
7570                 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
7571
7572         tw32_f(MAC_RX_MODE, tp->rx_mode);
7573         udelay(10);
7574
7575         tw32(MAC_LED_CTRL, tp->led_ctrl);
7576
7577         tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
7578         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
7579                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
7580                 udelay(10);
7581         }
7582         tw32_f(MAC_RX_MODE, tp->rx_mode);
7583         udelay(10);
7584
7585         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
7586                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) &&
7587                         !(tp->tg3_flags2 & TG3_FLG2_SERDES_PREEMPHASIS)) {
7588                         /* Set drive transmission level to 1.2V  */
7589                         /* only if the signal pre-emphasis bit is not set  */
7590                         val = tr32(MAC_SERDES_CFG);
7591                         val &= 0xfffff000;
7592                         val |= 0x880;
7593                         tw32(MAC_SERDES_CFG, val);
7594                 }
7595                 if (tp->pci_chip_rev_id == CHIPREV_ID_5703_A1)
7596                         tw32(MAC_SERDES_CFG, 0x616000);
7597         }
7598
7599         /* Prevent chip from dropping frames when flow control
7600          * is enabled.
7601          */
7602         tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, 2);
7603
7604         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
7605             (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
7606                 /* Use hardware link auto-negotiation */
7607                 tp->tg3_flags2 |= TG3_FLG2_HW_AUTONEG;
7608         }
7609
7610         if ((tp->tg3_flags2 & TG3_FLG2_MII_SERDES) &&
7611             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)) {
7612                 u32 tmp;
7613
7614                 tmp = tr32(SERDES_RX_CTRL);
7615                 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
7616                 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
7617                 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
7618                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
7619         }
7620
7621         if (!(tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)) {
7622                 if (tp->link_config.phy_is_low_power) {
7623                         tp->link_config.phy_is_low_power = 0;
7624                         tp->link_config.speed = tp->link_config.orig_speed;
7625                         tp->link_config.duplex = tp->link_config.orig_duplex;
7626                         tp->link_config.autoneg = tp->link_config.orig_autoneg;
7627                 }
7628
7629                 err = tg3_setup_phy(tp, 0);
7630                 if (err)
7631                         return err;
7632
7633                 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
7634                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906) {
7635                         u32 tmp;
7636
7637                         /* Clear CRC stats. */
7638                         if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
7639                                 tg3_writephy(tp, MII_TG3_TEST1,
7640                                              tmp | MII_TG3_TEST1_CRC_EN);
7641                                 tg3_readphy(tp, 0x14, &tmp);
7642                         }
7643                 }
7644         }
7645
7646         __tg3_set_rx_mode(tp->dev);
7647
7648         /* Initialize receive rules. */
7649         tw32(MAC_RCV_RULE_0,  0xc2000000 & RCV_RULE_DISABLE_MASK);
7650         tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
7651         tw32(MAC_RCV_RULE_1,  0x86000004 & RCV_RULE_DISABLE_MASK);
7652         tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
7653
7654         if ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
7655             !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
7656                 limit = 8;
7657         else
7658                 limit = 16;
7659         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF)
7660                 limit -= 4;
7661         switch (limit) {
7662         case 16:
7663                 tw32(MAC_RCV_RULE_15,  0); tw32(MAC_RCV_VALUE_15,  0);
7664         case 15:
7665                 tw32(MAC_RCV_RULE_14,  0); tw32(MAC_RCV_VALUE_14,  0);
7666         case 14:
7667                 tw32(MAC_RCV_RULE_13,  0); tw32(MAC_RCV_VALUE_13,  0);
7668         case 13:
7669                 tw32(MAC_RCV_RULE_12,  0); tw32(MAC_RCV_VALUE_12,  0);
7670         case 12:
7671                 tw32(MAC_RCV_RULE_11,  0); tw32(MAC_RCV_VALUE_11,  0);
7672         case 11:
7673                 tw32(MAC_RCV_RULE_10,  0); tw32(MAC_RCV_VALUE_10,  0);
7674         case 10:
7675                 tw32(MAC_RCV_RULE_9,  0); tw32(MAC_RCV_VALUE_9,  0);
7676         case 9:
7677                 tw32(MAC_RCV_RULE_8,  0); tw32(MAC_RCV_VALUE_8,  0);
7678         case 8:
7679                 tw32(MAC_RCV_RULE_7,  0); tw32(MAC_RCV_VALUE_7,  0);
7680         case 7:
7681                 tw32(MAC_RCV_RULE_6,  0); tw32(MAC_RCV_VALUE_6,  0);
7682         case 6:
7683                 tw32(MAC_RCV_RULE_5,  0); tw32(MAC_RCV_VALUE_5,  0);
7684         case 5:
7685                 tw32(MAC_RCV_RULE_4,  0); tw32(MAC_RCV_VALUE_4,  0);
7686         case 4:
7687                 /* tw32(MAC_RCV_RULE_3,  0); tw32(MAC_RCV_VALUE_3,  0); */
7688         case 3:
7689                 /* tw32(MAC_RCV_RULE_2,  0); tw32(MAC_RCV_VALUE_2,  0); */
7690         case 2:
7691         case 1:
7692
7693         default:
7694                 break;
7695         }
7696
7697         if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
7698                 /* Write our heartbeat update interval to APE. */
7699                 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS,
7700                                 APE_HOST_HEARTBEAT_INT_DISABLE);
7701
7702         tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
7703
7704         return 0;
7705 }
7706
7707 /* Called at device open time to get the chip ready for
7708  * packet processing.  Invoked with tp->lock held.
7709  */
7710 static int tg3_init_hw(struct tg3 *tp, int reset_phy)
7711 {
7712         tg3_switch_clocks(tp);
7713
7714         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
7715
7716         return tg3_reset_hw(tp, reset_phy);
7717 }
7718
7719 #define TG3_STAT_ADD32(PSTAT, REG) \
7720 do {    u32 __val = tr32(REG); \
7721         (PSTAT)->low += __val; \
7722         if ((PSTAT)->low < __val) \
7723                 (PSTAT)->high += 1; \
7724 } while (0)
7725
7726 static void tg3_periodic_fetch_stats(struct tg3 *tp)
7727 {
7728         struct tg3_hw_stats *sp = tp->hw_stats;
7729
7730         if (!netif_carrier_ok(tp->dev))
7731                 return;
7732
7733         TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
7734         TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
7735         TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
7736         TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
7737         TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
7738         TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
7739         TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
7740         TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
7741         TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
7742         TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
7743         TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
7744         TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
7745         TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
7746
7747         TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
7748         TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
7749         TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
7750         TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
7751         TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
7752         TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
7753         TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
7754         TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
7755         TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
7756         TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
7757         TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
7758         TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
7759         TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
7760         TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
7761
7762         TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
7763         TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
7764         TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
7765 }
7766
7767 static void tg3_timer(unsigned long __opaque)
7768 {
7769         struct tg3 *tp = (struct tg3 *) __opaque;
7770
7771         if (tp->irq_sync)
7772                 goto restart_timer;
7773
7774         spin_lock(&tp->lock);
7775
7776         if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) {
7777                 /* All of this garbage is because when using non-tagged
7778                  * IRQ status the mailbox/status_block protocol the chip
7779                  * uses with the cpu is race prone.
7780                  */
7781                 if (tp->hw_status->status & SD_STATUS_UPDATED) {
7782                         tw32(GRC_LOCAL_CTRL,
7783                              tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
7784                 } else {
7785                         tw32(HOSTCC_MODE, tp->coalesce_mode |
7786                              (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
7787                 }
7788
7789                 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
7790                         tp->tg3_flags2 |= TG3_FLG2_RESTART_TIMER;
7791                         spin_unlock(&tp->lock);
7792                         schedule_work(&tp->reset_task);
7793                         return;
7794                 }
7795         }
7796
7797         /* This part only runs once per second. */
7798         if (!--tp->timer_counter) {
7799                 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
7800                         tg3_periodic_fetch_stats(tp);
7801
7802                 if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
7803                         u32 mac_stat;
7804                         int phy_event;
7805
7806                         mac_stat = tr32(MAC_STATUS);
7807
7808                         phy_event = 0;
7809                         if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) {
7810                                 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
7811                                         phy_event = 1;
7812                         } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
7813                                 phy_event = 1;
7814
7815                         if (phy_event)
7816                                 tg3_setup_phy(tp, 0);
7817                 } else if (tp->tg3_flags & TG3_FLAG_POLL_SERDES) {
7818                         u32 mac_stat = tr32(MAC_STATUS);
7819                         int need_setup = 0;
7820
7821                         if (netif_carrier_ok(tp->dev) &&
7822                             (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
7823                                 need_setup = 1;
7824                         }
7825                         if (! netif_carrier_ok(tp->dev) &&
7826                             (mac_stat & (MAC_STATUS_PCS_SYNCED |
7827                                          MAC_STATUS_SIGNAL_DET))) {
7828                                 need_setup = 1;
7829                         }
7830                         if (need_setup) {
7831                                 if (!tp->serdes_counter) {
7832                                         tw32_f(MAC_MODE,
7833                                              (tp->mac_mode &
7834                                               ~MAC_MODE_PORT_MODE_MASK));
7835                                         udelay(40);
7836                                         tw32_f(MAC_MODE, tp->mac_mode);
7837                                         udelay(40);
7838                                 }
7839                                 tg3_setup_phy(tp, 0);
7840                         }
7841                 } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
7842                         tg3_serdes_parallel_detect(tp);
7843
7844                 tp->timer_counter = tp->timer_multiplier;
7845         }
7846
7847         /* Heartbeat is only sent once every 2 seconds.
7848          *
7849          * The heartbeat is to tell the ASF firmware that the host
7850          * driver is still alive.  In the event that the OS crashes,
7851          * ASF needs to reset the hardware to free up the FIFO space
7852          * that may be filled with rx packets destined for the host.
7853          * If the FIFO is full, ASF will no longer function properly.
7854          *
7855          * Unintended resets have been reported on real time kernels
7856          * where the timer doesn't run on time.  Netpoll will also have
7857          * same problem.
7858          *
7859          * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
7860          * to check the ring condition when the heartbeat is expiring
7861          * before doing the reset.  This will prevent most unintended
7862          * resets.
7863          */
7864         if (!--tp->asf_counter) {
7865                 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
7866                         u32 val;
7867
7868                         tg3_wait_for_event_ack(tp);
7869
7870                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
7871                                       FWCMD_NICDRV_ALIVE3);
7872                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
7873                         /* 5 seconds timeout */
7874                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, 5);
7875                         val = tr32(GRC_RX_CPU_EVENT);
7876                         val |= GRC_RX_CPU_DRIVER_EVENT;
7877                         tw32_f(GRC_RX_CPU_EVENT, val);
7878                 }
7879                 tp->asf_counter = tp->asf_multiplier;
7880         }
7881
7882         spin_unlock(&tp->lock);
7883
7884 restart_timer:
7885         tp->timer.expires = jiffies + tp->timer_offset;
7886         add_timer(&tp->timer);
7887 }
7888
7889 static int tg3_request_irq(struct tg3 *tp)
7890 {
7891         irq_handler_t fn;
7892         unsigned long flags;
7893         struct net_device *dev = tp->dev;
7894
7895         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7896                 fn = tg3_msi;
7897                 if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI)
7898                         fn = tg3_msi_1shot;
7899                 flags = IRQF_SAMPLE_RANDOM;
7900         } else {
7901                 fn = tg3_interrupt;
7902                 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
7903                         fn = tg3_interrupt_tagged;
7904                 flags = IRQF_SHARED | IRQF_SAMPLE_RANDOM;
7905         }
7906         return (request_irq(tp->pdev->irq, fn, flags, dev->name, dev));
7907 }
7908
7909 static int tg3_test_interrupt(struct tg3 *tp)
7910 {
7911         struct net_device *dev = tp->dev;
7912         int err, i, intr_ok = 0;
7913
7914         if (!netif_running(dev))
7915                 return -ENODEV;
7916
7917         tg3_disable_ints(tp);
7918
7919         free_irq(tp->pdev->irq, dev);
7920
7921         err = request_irq(tp->pdev->irq, tg3_test_isr,
7922                           IRQF_SHARED | IRQF_SAMPLE_RANDOM, dev->name, dev);
7923         if (err)
7924                 return err;
7925
7926         tp->hw_status->status &= ~SD_STATUS_UPDATED;
7927         tg3_enable_ints(tp);
7928
7929         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
7930                HOSTCC_MODE_NOW);
7931
7932         for (i = 0; i < 5; i++) {
7933                 u32 int_mbox, misc_host_ctrl;
7934
7935                 int_mbox = tr32_mailbox(MAILBOX_INTERRUPT_0 +
7936                                         TG3_64BIT_REG_LOW);
7937                 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
7938
7939                 if ((int_mbox != 0) ||
7940                     (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) {
7941                         intr_ok = 1;
7942                         break;
7943                 }
7944
7945                 msleep(10);
7946         }
7947
7948         tg3_disable_ints(tp);
7949
7950         free_irq(tp->pdev->irq, dev);
7951
7952         err = tg3_request_irq(tp);
7953
7954         if (err)
7955                 return err;
7956
7957         if (intr_ok)
7958                 return 0;
7959
7960         return -EIO;
7961 }
7962
7963 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
7964  * successfully restored
7965  */
7966 static int tg3_test_msi(struct tg3 *tp)
7967 {
7968         struct net_device *dev = tp->dev;
7969         int err;
7970         u16 pci_cmd;
7971
7972         if (!(tp->tg3_flags2 & TG3_FLG2_USING_MSI))
7973                 return 0;
7974
7975         /* Turn off SERR reporting in case MSI terminates with Master
7976          * Abort.
7977          */
7978         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
7979         pci_write_config_word(tp->pdev, PCI_COMMAND,
7980                               pci_cmd & ~PCI_COMMAND_SERR);
7981
7982         err = tg3_test_interrupt(tp);
7983
7984         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
7985
7986         if (!err)
7987                 return 0;
7988
7989         /* other failures */
7990         if (err != -EIO)
7991                 return err;
7992
7993         /* MSI test failed, go back to INTx mode */
7994         printk(KERN_WARNING PFX "%s: No interrupt was generated using MSI, "
7995                "switching to INTx mode. Please report this failure to "
7996                "the PCI maintainer and include system chipset information.\n",
7997                        tp->dev->name);
7998
7999         free_irq(tp->pdev->irq, dev);
8000         pci_disable_msi(tp->pdev);
8001
8002         tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
8003
8004         err = tg3_request_irq(tp);
8005         if (err)
8006                 return err;
8007
8008         /* Need to reset the chip because the MSI cycle may have terminated
8009          * with Master Abort.
8010          */
8011         tg3_full_lock(tp, 1);
8012
8013         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
8014         err = tg3_init_hw(tp, 1);
8015
8016         tg3_full_unlock(tp);
8017
8018         if (err)
8019                 free_irq(tp->pdev->irq, dev);
8020
8021         return err;
8022 }
8023
8024 static int tg3_open(struct net_device *dev)
8025 {
8026         struct tg3 *tp = netdev_priv(dev);
8027         int err;
8028
8029         netif_carrier_off(tp->dev);
8030
8031         err = tg3_set_power_state(tp, PCI_D0);
8032         if (err)
8033                 return err;
8034
8035         tg3_full_lock(tp, 0);
8036
8037         tg3_disable_ints(tp);
8038         tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
8039
8040         tg3_full_unlock(tp);
8041
8042         /* The placement of this call is tied
8043          * to the setup and use of Host TX descriptors.
8044          */
8045         err = tg3_alloc_consistent(tp);
8046         if (err)
8047                 return err;
8048
8049         if (tp->tg3_flags & TG3_FLAG_SUPPORT_MSI) {
8050                 /* All MSI supporting chips should support tagged
8051                  * status.  Assert that this is the case.
8052                  */
8053                 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) {
8054                         printk(KERN_WARNING PFX "%s: MSI without TAGGED? "
8055                                "Not using MSI.\n", tp->dev->name);
8056                 } else if (pci_enable_msi(tp->pdev) == 0) {
8057                         u32 msi_mode;
8058
8059                         msi_mode = tr32(MSGINT_MODE);
8060                         tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
8061                         tp->tg3_flags2 |= TG3_FLG2_USING_MSI;
8062                 }
8063         }
8064         err = tg3_request_irq(tp);
8065
8066         if (err) {
8067                 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
8068                         pci_disable_msi(tp->pdev);
8069                         tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
8070                 }
8071                 tg3_free_consistent(tp);
8072                 return err;
8073         }
8074
8075         napi_enable(&tp->napi);
8076
8077         tg3_full_lock(tp, 0);
8078
8079         err = tg3_init_hw(tp, 1);
8080         if (err) {
8081                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
8082                 tg3_free_rings(tp);
8083         } else {
8084                 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
8085                         tp->timer_offset = HZ;
8086                 else
8087                         tp->timer_offset = HZ / 10;
8088
8089                 BUG_ON(tp->timer_offset > HZ);
8090                 tp->timer_counter = tp->timer_multiplier =
8091                         (HZ / tp->timer_offset);
8092                 tp->asf_counter = tp->asf_multiplier =
8093                         ((HZ / tp->timer_offset) * 2);
8094
8095                 init_timer(&tp->timer);
8096                 tp->timer.expires = jiffies + tp->timer_offset;
8097                 tp->timer.data = (unsigned long) tp;
8098                 tp->timer.function = tg3_timer;
8099         }
8100
8101         tg3_full_unlock(tp);
8102
8103         if (err) {
8104                 napi_disable(&tp->napi);
8105                 free_irq(tp->pdev->irq, dev);
8106                 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
8107                         pci_disable_msi(tp->pdev);
8108                         tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
8109                 }
8110                 tg3_free_consistent(tp);
8111                 return err;
8112         }
8113
8114         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
8115                 err = tg3_test_msi(tp);
8116
8117                 if (err) {
8118                         tg3_full_lock(tp, 0);
8119
8120                         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
8121                                 pci_disable_msi(tp->pdev);
8122                                 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
8123                         }
8124                         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
8125                         tg3_free_rings(tp);
8126                         tg3_free_consistent(tp);
8127
8128                         tg3_full_unlock(tp);
8129
8130                         napi_disable(&tp->napi);
8131
8132                         return err;
8133                 }
8134
8135                 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
8136                         if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI) {
8137                                 u32 val = tr32(PCIE_TRANSACTION_CFG);
8138
8139                                 tw32(PCIE_TRANSACTION_CFG,
8140                                      val | PCIE_TRANS_CFG_1SHOT_MSI);
8141                         }
8142                 }
8143         }
8144
8145         tg3_phy_start(tp);
8146
8147         tg3_full_lock(tp, 0);
8148
8149         add_timer(&tp->timer);
8150         tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
8151         tg3_enable_ints(tp);
8152
8153         tg3_full_unlock(tp);
8154
8155         netif_start_queue(dev);
8156
8157         return 0;
8158 }
8159
8160 #if 0
8161 /*static*/ void tg3_dump_state(struct tg3 *tp)
8162 {
8163         u32 val32, val32_2, val32_3, val32_4, val32_5;
8164         u16 val16;
8165         int i;
8166
8167         pci_read_config_word(tp->pdev, PCI_STATUS, &val16);
8168         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE, &val32);
8169         printk("DEBUG: PCI status [%04x] TG3PCI state[%08x]\n",
8170                val16, val32);
8171
8172         /* MAC block */
8173         printk("DEBUG: MAC_MODE[%08x] MAC_STATUS[%08x]\n",
8174                tr32(MAC_MODE), tr32(MAC_STATUS));
8175         printk("       MAC_EVENT[%08x] MAC_LED_CTRL[%08x]\n",
8176                tr32(MAC_EVENT), tr32(MAC_LED_CTRL));
8177         printk("DEBUG: MAC_TX_MODE[%08x] MAC_TX_STATUS[%08x]\n",
8178                tr32(MAC_TX_MODE), tr32(MAC_TX_STATUS));
8179         printk("       MAC_RX_MODE[%08x] MAC_RX_STATUS[%08x]\n",
8180                tr32(MAC_RX_MODE), tr32(MAC_RX_STATUS));
8181
8182         /* Send data initiator control block */
8183         printk("DEBUG: SNDDATAI_MODE[%08x] SNDDATAI_STATUS[%08x]\n",
8184                tr32(SNDDATAI_MODE), tr32(SNDDATAI_STATUS));
8185         printk("       SNDDATAI_STATSCTRL[%08x]\n",
8186                tr32(SNDDATAI_STATSCTRL));
8187
8188         /* Send data completion control block */
8189         printk("DEBUG: SNDDATAC_MODE[%08x]\n", tr32(SNDDATAC_MODE));
8190
8191         /* Send BD ring selector block */
8192         printk("DEBUG: SNDBDS_MODE[%08x] SNDBDS_STATUS[%08x]\n",
8193                tr32(SNDBDS_MODE), tr32(SNDBDS_STATUS));
8194
8195         /* Send BD initiator control block */
8196         printk("DEBUG: SNDBDI_MODE[%08x] SNDBDI_STATUS[%08x]\n",
8197                tr32(SNDBDI_MODE), tr32(SNDBDI_STATUS));
8198
8199         /* Send BD completion control block */
8200         printk("DEBUG: SNDBDC_MODE[%08x]\n", tr32(SNDBDC_MODE));
8201
8202         /* Receive list placement control block */
8203         printk("DEBUG: RCVLPC_MODE[%08x] RCVLPC_STATUS[%08x]\n",
8204                tr32(RCVLPC_MODE), tr32(RCVLPC_STATUS));
8205         printk("       RCVLPC_STATSCTRL[%08x]\n",
8206                tr32(RCVLPC_STATSCTRL));
8207
8208         /* Receive data and receive BD initiator control block */
8209         printk("DEBUG: RCVDBDI_MODE[%08x] RCVDBDI_STATUS[%08x]\n",
8210                tr32(RCVDBDI_MODE), tr32(RCVDBDI_STATUS));
8211
8212         /* Receive data completion control block */
8213         printk("DEBUG: RCVDCC_MODE[%08x]\n",
8214                tr32(RCVDCC_MODE));
8215
8216         /* Receive BD initiator control block */
8217         printk("DEBUG: RCVBDI_MODE[%08x] RCVBDI_STATUS[%08x]\n",
8218                tr32(RCVBDI_MODE), tr32(RCVBDI_STATUS));
8219
8220         /* Receive BD completion control block */
8221         printk("DEBUG: RCVCC_MODE[%08x] RCVCC_STATUS[%08x]\n",
8222                tr32(RCVCC_MODE), tr32(RCVCC_STATUS));
8223
8224         /* Receive list selector control block */
8225         printk("DEBUG: RCVLSC_MODE[%08x] RCVLSC_STATUS[%08x]\n",
8226                tr32(RCVLSC_MODE), tr32(RCVLSC_STATUS));
8227
8228         /* Mbuf cluster free block */
8229         printk("DEBUG: MBFREE_MODE[%08x] MBFREE_STATUS[%08x]\n",
8230                tr32(MBFREE_MODE), tr32(MBFREE_STATUS));
8231
8232         /* Host coalescing control block */
8233         printk("DEBUG: HOSTCC_MODE[%08x] HOSTCC_STATUS[%08x]\n",
8234                tr32(HOSTCC_MODE), tr32(HOSTCC_STATUS));
8235         printk("DEBUG: HOSTCC_STATS_BLK_HOST_ADDR[%08x%08x]\n",
8236                tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
8237                tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
8238         printk("DEBUG: HOSTCC_STATUS_BLK_HOST_ADDR[%08x%08x]\n",
8239                tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
8240                tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
8241         printk("DEBUG: HOSTCC_STATS_BLK_NIC_ADDR[%08x]\n",
8242                tr32(HOSTCC_STATS_BLK_NIC_ADDR));
8243         printk("DEBUG: HOSTCC_STATUS_BLK_NIC_ADDR[%08x]\n",
8244                tr32(HOSTCC_STATUS_BLK_NIC_ADDR));
8245
8246         /* Memory arbiter control block */
8247         printk("DEBUG: MEMARB_MODE[%08x] MEMARB_STATUS[%08x]\n",
8248                tr32(MEMARB_MODE), tr32(MEMARB_STATUS));
8249
8250         /* Buffer manager control block */
8251         printk("DEBUG: BUFMGR_MODE[%08x] BUFMGR_STATUS[%08x]\n",
8252                tr32(BUFMGR_MODE), tr32(BUFMGR_STATUS));
8253         printk("DEBUG: BUFMGR_MB_POOL_ADDR[%08x] BUFMGR_MB_POOL_SIZE[%08x]\n",
8254                tr32(BUFMGR_MB_POOL_ADDR), tr32(BUFMGR_MB_POOL_SIZE));
8255         printk("DEBUG: BUFMGR_DMA_DESC_POOL_ADDR[%08x] "
8256                "BUFMGR_DMA_DESC_POOL_SIZE[%08x]\n",
8257                tr32(BUFMGR_DMA_DESC_POOL_ADDR),
8258                tr32(BUFMGR_DMA_DESC_POOL_SIZE));
8259
8260         /* Read DMA control block */
8261         printk("DEBUG: RDMAC_MODE[%08x] RDMAC_STATUS[%08x]\n",
8262                tr32(RDMAC_MODE), tr32(RDMAC_STATUS));
8263
8264         /* Write DMA control block */
8265         printk("DEBUG: WDMAC_MODE[%08x] WDMAC_STATUS[%08x]\n",
8266                tr32(WDMAC_MODE), tr32(WDMAC_STATUS));
8267
8268         /* DMA completion block */
8269         printk("DEBUG: DMAC_MODE[%08x]\n",
8270                tr32(DMAC_MODE));
8271
8272         /* GRC block */
8273         printk("DEBUG: GRC_MODE[%08x] GRC_MISC_CFG[%08x]\n",
8274                tr32(GRC_MODE), tr32(GRC_MISC_CFG));
8275         printk("DEBUG: GRC_LOCAL_CTRL[%08x]\n",
8276                tr32(GRC_LOCAL_CTRL));
8277
8278         /* TG3_BDINFOs */
8279         printk("DEBUG: RCVDBDI_JUMBO_BD[%08x%08x:%08x:%08x]\n",
8280                tr32(RCVDBDI_JUMBO_BD + 0x0),
8281                tr32(RCVDBDI_JUMBO_BD + 0x4),
8282                tr32(RCVDBDI_JUMBO_BD + 0x8),
8283                tr32(RCVDBDI_JUMBO_BD + 0xc));
8284         printk("DEBUG: RCVDBDI_STD_BD[%08x%08x:%08x:%08x]\n",
8285                tr32(RCVDBDI_STD_BD + 0x0),
8286                tr32(RCVDBDI_STD_BD + 0x4),
8287                tr32(RCVDBDI_STD_BD + 0x8),
8288                tr32(RCVDBDI_STD_BD + 0xc));
8289         printk("DEBUG: RCVDBDI_MINI_BD[%08x%08x:%08x:%08x]\n",
8290                tr32(RCVDBDI_MINI_BD + 0x0),
8291                tr32(RCVDBDI_MINI_BD + 0x4),
8292                tr32(RCVDBDI_MINI_BD + 0x8),
8293                tr32(RCVDBDI_MINI_BD + 0xc));
8294
8295         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x0, &val32);
8296         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x4, &val32_2);
8297         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x8, &val32_3);
8298         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0xc, &val32_4);
8299         printk("DEBUG: SRAM_SEND_RCB_0[%08x%08x:%08x:%08x]\n",
8300                val32, val32_2, val32_3, val32_4);
8301
8302         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x0, &val32);
8303         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x4, &val32_2);
8304         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x8, &val32_3);
8305         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0xc, &val32_4);
8306         printk("DEBUG: SRAM_RCV_RET_RCB_0[%08x%08x:%08x:%08x]\n",
8307                val32, val32_2, val32_3, val32_4);
8308
8309         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x0, &val32);
8310         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x4, &val32_2);
8311         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x8, &val32_3);
8312         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0xc, &val32_4);
8313         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x10, &val32_5);
8314         printk("DEBUG: SRAM_STATUS_BLK[%08x:%08x:%08x:%08x:%08x]\n",
8315                val32, val32_2, val32_3, val32_4, val32_5);
8316
8317         /* SW status block */
8318         printk("DEBUG: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
8319                tp->hw_status->status,
8320                tp->hw_status->status_tag,
8321                tp->hw_status->rx_jumbo_consumer,
8322                tp->hw_status->rx_consumer,
8323                tp->hw_status->rx_mini_consumer,
8324                tp->hw_status->idx[0].rx_producer,
8325                tp->hw_status->idx[0].tx_consumer);
8326
8327         /* SW statistics block */
8328         printk("DEBUG: Host statistics block [%08x:%08x:%08x:%08x]\n",
8329                ((u32 *)tp->hw_stats)[0],
8330                ((u32 *)tp->hw_stats)[1],
8331                ((u32 *)tp->hw_stats)[2],
8332                ((u32 *)tp->hw_stats)[3]);
8333
8334         /* Mailboxes */
8335         printk("DEBUG: SNDHOST_PROD[%08x%08x] SNDNIC_PROD[%08x%08x]\n",
8336                tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + 0x0),
8337                tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + 0x4),
8338                tr32_mailbox(MAILBOX_SNDNIC_PROD_IDX_0 + 0x0),
8339                tr32_mailbox(MAILBOX_SNDNIC_PROD_IDX_0 + 0x4));
8340
8341         /* NIC side send descriptors. */
8342         for (i = 0; i < 6; i++) {
8343                 unsigned long txd;
8344
8345                 txd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_TX_BUFFER_DESC
8346                         + (i * sizeof(struct tg3_tx_buffer_desc));
8347                 printk("DEBUG: NIC TXD(%d)[%08x:%08x:%08x:%08x]\n",
8348                        i,
8349                        readl(txd + 0x0), readl(txd + 0x4),
8350                        readl(txd + 0x8), readl(txd + 0xc));
8351         }
8352
8353         /* NIC side RX descriptors. */
8354         for (i = 0; i < 6; i++) {
8355                 unsigned long rxd;
8356
8357                 rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_BUFFER_DESC
8358                         + (i * sizeof(struct tg3_rx_buffer_desc));
8359                 printk("DEBUG: NIC RXD_STD(%d)[0][%08x:%08x:%08x:%08x]\n",
8360                        i,
8361                        readl(rxd + 0x0), readl(rxd + 0x4),
8362                        readl(rxd + 0x8), readl(rxd + 0xc));
8363                 rxd += (4 * sizeof(u32));
8364                 printk("DEBUG: NIC RXD_STD(%d)[1][%08x:%08x:%08x:%08x]\n",
8365                        i,
8366                        readl(rxd + 0x0), readl(rxd + 0x4),
8367                        readl(rxd + 0x8), readl(rxd + 0xc));
8368         }
8369
8370         for (i = 0; i < 6; i++) {
8371                 unsigned long rxd;
8372
8373                 rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_JUMBO_BUFFER_DESC
8374                         + (i * sizeof(struct tg3_rx_buffer_desc));
8375                 printk("DEBUG: NIC RXD_JUMBO(%d)[0][%08x:%08x:%08x:%08x]\n",
8376                        i,
8377                        readl(rxd + 0x0), readl(rxd + 0x4),
8378                        readl(rxd + 0x8), readl(rxd + 0xc));
8379                 rxd += (4 * sizeof(u32));
8380                 printk("DEBUG: NIC RXD_JUMBO(%d)[1][%08x:%08x:%08x:%08x]\n",
8381                        i,
8382                        readl(rxd + 0x0), readl(rxd + 0x4),
8383                        readl(rxd + 0x8), readl(rxd + 0xc));
8384         }
8385 }
8386 #endif
8387
8388 static struct net_device_stats *tg3_get_stats(struct net_device *);
8389 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *);
8390
8391 static int tg3_close(struct net_device *dev)
8392 {
8393         struct tg3 *tp = netdev_priv(dev);
8394
8395         napi_disable(&tp->napi);
8396         cancel_work_sync(&tp->reset_task);
8397
8398         netif_stop_queue(dev);
8399
8400         del_timer_sync(&tp->timer);
8401
8402         tg3_full_lock(tp, 1);
8403 #if 0
8404         tg3_dump_state(tp);
8405 #endif
8406
8407         tg3_disable_ints(tp);
8408
8409         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
8410         tg3_free_rings(tp);
8411         tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
8412
8413         tg3_full_unlock(tp);
8414
8415         free_irq(tp->pdev->irq, dev);
8416         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
8417                 pci_disable_msi(tp->pdev);
8418                 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
8419         }
8420
8421         memcpy(&tp->net_stats_prev, tg3_get_stats(tp->dev),
8422                sizeof(tp->net_stats_prev));
8423         memcpy(&tp->estats_prev, tg3_get_estats(tp),
8424                sizeof(tp->estats_prev));
8425
8426         tg3_free_consistent(tp);
8427
8428         tg3_set_power_state(tp, PCI_D3hot);
8429
8430         netif_carrier_off(tp->dev);
8431
8432         return 0;
8433 }
8434
8435 static inline unsigned long get_stat64(tg3_stat64_t *val)
8436 {
8437         unsigned long ret;
8438
8439 #if (BITS_PER_LONG == 32)
8440         ret = val->low;
8441 #else
8442         ret = ((u64)val->high << 32) | ((u64)val->low);
8443 #endif
8444         return ret;
8445 }
8446
8447 static unsigned long calc_crc_errors(struct tg3 *tp)
8448 {
8449         struct tg3_hw_stats *hw_stats = tp->hw_stats;
8450
8451         if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
8452             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
8453              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
8454                 u32 val;
8455
8456                 spin_lock_bh(&tp->lock);
8457                 if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) {
8458                         tg3_writephy(tp, MII_TG3_TEST1,
8459                                      val | MII_TG3_TEST1_CRC_EN);
8460                         tg3_readphy(tp, 0x14, &val);
8461                 } else
8462                         val = 0;
8463                 spin_unlock_bh(&tp->lock);
8464
8465                 tp->phy_crc_errors += val;
8466
8467                 return tp->phy_crc_errors;
8468         }
8469
8470         return get_stat64(&hw_stats->rx_fcs_errors);
8471 }
8472
8473 #define ESTAT_ADD(member) \
8474         estats->member =        old_estats->member + \
8475                                 get_stat64(&hw_stats->member)
8476
8477 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *tp)
8478 {
8479         struct tg3_ethtool_stats *estats = &tp->estats;
8480         struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
8481         struct tg3_hw_stats *hw_stats = tp->hw_stats;
8482
8483         if (!hw_stats)
8484                 return old_estats;
8485
8486         ESTAT_ADD(rx_octets);
8487         ESTAT_ADD(rx_fragments);
8488         ESTAT_ADD(rx_ucast_packets);
8489         ESTAT_ADD(rx_mcast_packets);
8490         ESTAT_ADD(rx_bcast_packets);
8491         ESTAT_ADD(rx_fcs_errors);
8492         ESTAT_ADD(rx_align_errors);
8493         ESTAT_ADD(rx_xon_pause_rcvd);
8494         ESTAT_ADD(rx_xoff_pause_rcvd);
8495         ESTAT_ADD(rx_mac_ctrl_rcvd);
8496         ESTAT_ADD(rx_xoff_entered);
8497         ESTAT_ADD(rx_frame_too_long_errors);
8498         ESTAT_ADD(rx_jabbers);
8499         ESTAT_ADD(rx_undersize_packets);
8500         ESTAT_ADD(rx_in_length_errors);
8501         ESTAT_ADD(rx_out_length_errors);
8502         ESTAT_ADD(rx_64_or_less_octet_packets);
8503         ESTAT_ADD(rx_65_to_127_octet_packets);
8504         ESTAT_ADD(rx_128_to_255_octet_packets);
8505         ESTAT_ADD(rx_256_to_511_octet_packets);
8506         ESTAT_ADD(rx_512_to_1023_octet_packets);
8507         ESTAT_ADD(rx_1024_to_1522_octet_packets);
8508         ESTAT_ADD(rx_1523_to_2047_octet_packets);
8509         ESTAT_ADD(rx_2048_to_4095_octet_packets);
8510         ESTAT_ADD(rx_4096_to_8191_octet_packets);
8511         ESTAT_ADD(rx_8192_to_9022_octet_packets);
8512
8513         ESTAT_ADD(tx_octets);
8514         ESTAT_ADD(tx_collisions);
8515         ESTAT_ADD(tx_xon_sent);
8516         ESTAT_ADD(tx_xoff_sent);
8517         ESTAT_ADD(tx_flow_control);
8518         ESTAT_ADD(tx_mac_errors);
8519         ESTAT_ADD(tx_single_collisions);
8520         ESTAT_ADD(tx_mult_collisions);
8521         ESTAT_ADD(tx_deferred);
8522         ESTAT_ADD(tx_excessive_collisions);
8523         ESTAT_ADD(tx_late_collisions);
8524         ESTAT_ADD(tx_collide_2times);
8525         ESTAT_ADD(tx_collide_3times);
8526         ESTAT_ADD(tx_collide_4times);
8527         ESTAT_ADD(tx_collide_5times);
8528         ESTAT_ADD(tx_collide_6times);
8529         ESTAT_ADD(tx_collide_7times);
8530         ESTAT_ADD(tx_collide_8times);
8531         ESTAT_ADD(tx_collide_9times);
8532         ESTAT_ADD(tx_collide_10times);
8533         ESTAT_ADD(tx_collide_11times);
8534         ESTAT_ADD(tx_collide_12times);
8535         ESTAT_ADD(tx_collide_13times);
8536         ESTAT_ADD(tx_collide_14times);
8537         ESTAT_ADD(tx_collide_15times);
8538         ESTAT_ADD(tx_ucast_packets);
8539         ESTAT_ADD(tx_mcast_packets);
8540         ESTAT_ADD(tx_bcast_packets);
8541         ESTAT_ADD(tx_carrier_sense_errors);
8542         ESTAT_ADD(tx_discards);
8543         ESTAT_ADD(tx_errors);
8544
8545         ESTAT_ADD(dma_writeq_full);
8546         ESTAT_ADD(dma_write_prioq_full);
8547         ESTAT_ADD(rxbds_empty);
8548         ESTAT_ADD(rx_discards);
8549         ESTAT_ADD(rx_errors);
8550         ESTAT_ADD(rx_threshold_hit);
8551
8552         ESTAT_ADD(dma_readq_full);
8553         ESTAT_ADD(dma_read_prioq_full);
8554         ESTAT_ADD(tx_comp_queue_full);
8555
8556         ESTAT_ADD(ring_set_send_prod_index);
8557         ESTAT_ADD(ring_status_update);
8558         ESTAT_ADD(nic_irqs);
8559         ESTAT_ADD(nic_avoided_irqs);
8560         ESTAT_ADD(nic_tx_threshold_hit);
8561
8562         return estats;
8563 }
8564
8565 static struct net_device_stats *tg3_get_stats(struct net_device *dev)
8566 {
8567         struct tg3 *tp = netdev_priv(dev);
8568         struct net_device_stats *stats = &tp->net_stats;
8569         struct net_device_stats *old_stats = &tp->net_stats_prev;
8570         struct tg3_hw_stats *hw_stats = tp->hw_stats;
8571
8572         if (!hw_stats)
8573                 return old_stats;
8574
8575         stats->rx_packets = old_stats->rx_packets +
8576                 get_stat64(&hw_stats->rx_ucast_packets) +
8577                 get_stat64(&hw_stats->rx_mcast_packets) +
8578                 get_stat64(&hw_stats->rx_bcast_packets);
8579
8580         stats->tx_packets = old_stats->tx_packets +
8581                 get_stat64(&hw_stats->tx_ucast_packets) +
8582                 get_stat64(&hw_stats->tx_mcast_packets) +
8583                 get_stat64(&hw_stats->tx_bcast_packets);
8584
8585         stats->rx_bytes = old_stats->rx_bytes +
8586                 get_stat64(&hw_stats->rx_octets);
8587         stats->tx_bytes = old_stats->tx_bytes +
8588                 get_stat64(&hw_stats->tx_octets);
8589
8590         stats->rx_errors = old_stats->rx_errors +
8591                 get_stat64(&hw_stats->rx_errors);
8592         stats->tx_errors = old_stats->tx_errors +
8593                 get_stat64(&hw_stats->tx_errors) +
8594                 get_stat64(&hw_stats->tx_mac_errors) +
8595                 get_stat64(&hw_stats->tx_carrier_sense_errors) +
8596                 get_stat64(&hw_stats->tx_discards);
8597
8598         stats->multicast = old_stats->multicast +
8599                 get_stat64(&hw_stats->rx_mcast_packets);
8600         stats->collisions = old_stats->collisions +
8601                 get_stat64(&hw_stats->tx_collisions);
8602
8603         stats->rx_length_errors = old_stats->rx_length_errors +
8604                 get_stat64(&hw_stats->rx_frame_too_long_errors) +
8605                 get_stat64(&hw_stats->rx_undersize_packets);
8606
8607         stats->rx_over_errors = old_stats->rx_over_errors +
8608                 get_stat64(&hw_stats->rxbds_empty);
8609         stats->rx_frame_errors = old_stats->rx_frame_errors +
8610                 get_stat64(&hw_stats->rx_align_errors);
8611         stats->tx_aborted_errors = old_stats->tx_aborted_errors +
8612                 get_stat64(&hw_stats->tx_discards);
8613         stats->tx_carrier_errors = old_stats->tx_carrier_errors +
8614                 get_stat64(&hw_stats->tx_carrier_sense_errors);
8615
8616         stats->rx_crc_errors = old_stats->rx_crc_errors +
8617                 calc_crc_errors(tp);
8618
8619         stats->rx_missed_errors = old_stats->rx_missed_errors +
8620                 get_stat64(&hw_stats->rx_discards);
8621
8622         return stats;
8623 }
8624
8625 static inline u32 calc_crc(unsigned char *buf, int len)
8626 {
8627         u32 reg;
8628         u32 tmp;
8629         int j, k;
8630
8631         reg = 0xffffffff;
8632
8633         for (j = 0; j < len; j++) {
8634                 reg ^= buf[j];
8635
8636                 for (k = 0; k < 8; k++) {
8637                         tmp = reg & 0x01;
8638
8639                         reg >>= 1;
8640
8641                         if (tmp) {
8642                                 reg ^= 0xedb88320;
8643                         }
8644                 }
8645         }
8646
8647         return ~reg;
8648 }
8649
8650 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
8651 {
8652         /* accept or reject all multicast frames */
8653         tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
8654         tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
8655         tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
8656         tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
8657 }
8658
8659 static void __tg3_set_rx_mode(struct net_device *dev)
8660 {
8661         struct tg3 *tp = netdev_priv(dev);
8662         u32 rx_mode;
8663
8664         rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
8665                                   RX_MODE_KEEP_VLAN_TAG);
8666
8667         /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
8668          * flag clear.
8669          */
8670 #if TG3_VLAN_TAG_USED
8671         if (!tp->vlgrp &&
8672             !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
8673                 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
8674 #else
8675         /* By definition, VLAN is disabled always in this
8676          * case.
8677          */
8678         if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
8679                 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
8680 #endif
8681
8682         if (dev->flags & IFF_PROMISC) {
8683                 /* Promiscuous mode. */
8684                 rx_mode |= RX_MODE_PROMISC;
8685         } else if (dev->flags & IFF_ALLMULTI) {
8686                 /* Accept all multicast. */
8687                 tg3_set_multi (tp, 1);
8688         } else if (dev->mc_count < 1) {
8689                 /* Reject all multicast. */
8690                 tg3_set_multi (tp, 0);
8691         } else {
8692                 /* Accept one or more multicast(s). */
8693                 struct dev_mc_list *mclist;
8694                 unsigned int i;
8695                 u32 mc_filter[4] = { 0, };
8696                 u32 regidx;
8697                 u32 bit;
8698                 u32 crc;
8699
8700                 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
8701                      i++, mclist = mclist->next) {
8702
8703                         crc = calc_crc (mclist->dmi_addr, ETH_ALEN);
8704                         bit = ~crc & 0x7f;
8705                         regidx = (bit & 0x60) >> 5;
8706                         bit &= 0x1f;
8707                         mc_filter[regidx] |= (1 << bit);
8708                 }
8709
8710                 tw32(MAC_HASH_REG_0, mc_filter[0]);
8711                 tw32(MAC_HASH_REG_1, mc_filter[1]);
8712                 tw32(MAC_HASH_REG_2, mc_filter[2]);
8713                 tw32(MAC_HASH_REG_3, mc_filter[3]);
8714         }
8715
8716         if (rx_mode != tp->rx_mode) {
8717                 tp->rx_mode = rx_mode;
8718                 tw32_f(MAC_RX_MODE, rx_mode);
8719                 udelay(10);
8720         }
8721 }
8722
8723 static void tg3_set_rx_mode(struct net_device *dev)
8724 {
8725         struct tg3 *tp = netdev_priv(dev);
8726
8727         if (!netif_running(dev))
8728                 return;
8729
8730         tg3_full_lock(tp, 0);
8731         __tg3_set_rx_mode(dev);
8732         tg3_full_unlock(tp);
8733 }
8734
8735 #define TG3_REGDUMP_LEN         (32 * 1024)
8736
8737 static int tg3_get_regs_len(struct net_device *dev)
8738 {
8739         return TG3_REGDUMP_LEN;
8740 }
8741
8742 static void tg3_get_regs(struct net_device *dev,
8743                 struct ethtool_regs *regs, void *_p)
8744 {
8745         u32 *p = _p;
8746         struct tg3 *tp = netdev_priv(dev);
8747         u8 *orig_p = _p;
8748         int i;
8749
8750         regs->version = 0;
8751
8752         memset(p, 0, TG3_REGDUMP_LEN);
8753
8754         if (tp->link_config.phy_is_low_power)
8755                 return;
8756
8757         tg3_full_lock(tp, 0);
8758
8759 #define __GET_REG32(reg)        (*(p)++ = tr32(reg))
8760 #define GET_REG32_LOOP(base,len)                \
8761 do {    p = (u32 *)(orig_p + (base));           \
8762         for (i = 0; i < len; i += 4)            \
8763                 __GET_REG32((base) + i);        \
8764 } while (0)
8765 #define GET_REG32_1(reg)                        \
8766 do {    p = (u32 *)(orig_p + (reg));            \
8767         __GET_REG32((reg));                     \
8768 } while (0)
8769
8770         GET_REG32_LOOP(TG3PCI_VENDOR, 0xb0);
8771         GET_REG32_LOOP(MAILBOX_INTERRUPT_0, 0x200);
8772         GET_REG32_LOOP(MAC_MODE, 0x4f0);
8773         GET_REG32_LOOP(SNDDATAI_MODE, 0xe0);
8774         GET_REG32_1(SNDDATAC_MODE);
8775         GET_REG32_LOOP(SNDBDS_MODE, 0x80);
8776         GET_REG32_LOOP(SNDBDI_MODE, 0x48);
8777         GET_REG32_1(SNDBDC_MODE);
8778         GET_REG32_LOOP(RCVLPC_MODE, 0x20);
8779         GET_REG32_LOOP(RCVLPC_SELLST_BASE, 0x15c);
8780         GET_REG32_LOOP(RCVDBDI_MODE, 0x0c);
8781         GET_REG32_LOOP(RCVDBDI_JUMBO_BD, 0x3c);
8782         GET_REG32_LOOP(RCVDBDI_BD_PROD_IDX_0, 0x44);
8783         GET_REG32_1(RCVDCC_MODE);
8784         GET_REG32_LOOP(RCVBDI_MODE, 0x20);
8785         GET_REG32_LOOP(RCVCC_MODE, 0x14);
8786         GET_REG32_LOOP(RCVLSC_MODE, 0x08);
8787         GET_REG32_1(MBFREE_MODE);
8788         GET_REG32_LOOP(HOSTCC_MODE, 0x100);
8789         GET_REG32_LOOP(MEMARB_MODE, 0x10);
8790         GET_REG32_LOOP(BUFMGR_MODE, 0x58);
8791         GET_REG32_LOOP(RDMAC_MODE, 0x08);
8792         GET_REG32_LOOP(WDMAC_MODE, 0x08);
8793         GET_REG32_1(RX_CPU_MODE);
8794         GET_REG32_1(RX_CPU_STATE);
8795         GET_REG32_1(RX_CPU_PGMCTR);
8796         GET_REG32_1(RX_CPU_HWBKPT);
8797         GET_REG32_1(TX_CPU_MODE);
8798         GET_REG32_1(TX_CPU_STATE);
8799         GET_REG32_1(TX_CPU_PGMCTR);
8800         GET_REG32_LOOP(GRCMBOX_INTERRUPT_0, 0x110);
8801         GET_REG32_LOOP(FTQ_RESET, 0x120);
8802         GET_REG32_LOOP(MSGINT_MODE, 0x0c);
8803         GET_REG32_1(DMAC_MODE);
8804         GET_REG32_LOOP(GRC_MODE, 0x4c);
8805         if (tp->tg3_flags & TG3_FLAG_NVRAM)
8806                 GET_REG32_LOOP(NVRAM_CMD, 0x24);
8807
8808 #undef __GET_REG32
8809 #undef GET_REG32_LOOP
8810 #undef GET_REG32_1
8811
8812         tg3_full_unlock(tp);
8813 }
8814
8815 static int tg3_get_eeprom_len(struct net_device *dev)
8816 {
8817         struct tg3 *tp = netdev_priv(dev);
8818
8819         return tp->nvram_size;
8820 }
8821
8822 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val);
8823 static int tg3_nvram_read_le(struct tg3 *tp, u32 offset, __le32 *val);
8824 static int tg3_nvram_read_swab(struct tg3 *tp, u32 offset, u32 *val);
8825
8826 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
8827 {
8828         struct tg3 *tp = netdev_priv(dev);
8829         int ret;
8830         u8  *pd;
8831         u32 i, offset, len, b_offset, b_count;
8832         __le32 val;
8833
8834         if (tp->link_config.phy_is_low_power)
8835                 return -EAGAIN;
8836
8837         offset = eeprom->offset;
8838         len = eeprom->len;
8839         eeprom->len = 0;
8840
8841         eeprom->magic = TG3_EEPROM_MAGIC;
8842
8843         if (offset & 3) {
8844                 /* adjustments to start on required 4 byte boundary */
8845                 b_offset = offset & 3;
8846                 b_count = 4 - b_offset;
8847                 if (b_count > len) {
8848                         /* i.e. offset=1 len=2 */
8849                         b_count = len;
8850                 }
8851                 ret = tg3_nvram_read_le(tp, offset-b_offset, &val);
8852                 if (ret)
8853                         return ret;
8854                 memcpy(data, ((char*)&val) + b_offset, b_count);
8855                 len -= b_count;
8856                 offset += b_count;
8857                 eeprom->len += b_count;
8858         }
8859
8860         /* read bytes upto the last 4 byte boundary */
8861         pd = &data[eeprom->len];
8862         for (i = 0; i < (len - (len & 3)); i += 4) {
8863                 ret = tg3_nvram_read_le(tp, offset + i, &val);
8864                 if (ret) {
8865                         eeprom->len += i;
8866                         return ret;
8867                 }
8868                 memcpy(pd + i, &val, 4);
8869         }
8870         eeprom->len += i;
8871
8872         if (len & 3) {
8873                 /* read last bytes not ending on 4 byte boundary */
8874                 pd = &data[eeprom->len];
8875                 b_count = len & 3;
8876                 b_offset = offset + len - b_count;
8877                 ret = tg3_nvram_read_le(tp, b_offset, &val);
8878                 if (ret)
8879                         return ret;
8880                 memcpy(pd, &val, b_count);
8881                 eeprom->len += b_count;
8882         }
8883         return 0;
8884 }
8885
8886 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf);
8887
8888 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
8889 {
8890         struct tg3 *tp = netdev_priv(dev);
8891         int ret;
8892         u32 offset, len, b_offset, odd_len;
8893         u8 *buf;
8894         __le32 start, end;
8895
8896         if (tp->link_config.phy_is_low_power)
8897                 return -EAGAIN;
8898
8899         if (eeprom->magic != TG3_EEPROM_MAGIC)
8900                 return -EINVAL;
8901
8902         offset = eeprom->offset;
8903         len = eeprom->len;
8904
8905         if ((b_offset = (offset & 3))) {
8906                 /* adjustments to start on required 4 byte boundary */
8907                 ret = tg3_nvram_read_le(tp, offset-b_offset, &start);
8908                 if (ret)
8909                         return ret;
8910                 len += b_offset;
8911                 offset &= ~3;
8912                 if (len < 4)
8913                         len = 4;
8914         }
8915
8916         odd_len = 0;
8917         if (len & 3) {
8918                 /* adjustments to end on required 4 byte boundary */
8919                 odd_len = 1;
8920                 len = (len + 3) & ~3;
8921                 ret = tg3_nvram_read_le(tp, offset+len-4, &end);
8922                 if (ret)
8923                         return ret;
8924         }
8925
8926         buf = data;
8927         if (b_offset || odd_len) {
8928                 buf = kmalloc(len, GFP_KERNEL);
8929                 if (!buf)
8930                         return -ENOMEM;
8931                 if (b_offset)
8932                         memcpy(buf, &start, 4);
8933                 if (odd_len)
8934                         memcpy(buf+len-4, &end, 4);
8935                 memcpy(buf + b_offset, data, eeprom->len);
8936         }
8937
8938         ret = tg3_nvram_write_block(tp, offset, len, buf);
8939
8940         if (buf != data)
8941                 kfree(buf);
8942
8943         return ret;
8944 }
8945
8946 static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
8947 {
8948         struct tg3 *tp = netdev_priv(dev);
8949
8950         if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
8951                 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
8952                         return -EAGAIN;
8953                 return phy_ethtool_gset(tp->mdio_bus.phy_map[PHY_ADDR], cmd);
8954         }
8955
8956         cmd->supported = (SUPPORTED_Autoneg);
8957
8958         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
8959                 cmd->supported |= (SUPPORTED_1000baseT_Half |
8960                                    SUPPORTED_1000baseT_Full);
8961
8962         if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)) {
8963                 cmd->supported |= (SUPPORTED_100baseT_Half |
8964                                   SUPPORTED_100baseT_Full |
8965                                   SUPPORTED_10baseT_Half |
8966                                   SUPPORTED_10baseT_Full |
8967                                   SUPPORTED_TP);
8968                 cmd->port = PORT_TP;
8969         } else {
8970                 cmd->supported |= SUPPORTED_FIBRE;
8971                 cmd->port = PORT_FIBRE;
8972         }
8973
8974         cmd->advertising = tp->link_config.advertising;
8975         if (netif_running(dev)) {
8976                 cmd->speed = tp->link_config.active_speed;
8977                 cmd->duplex = tp->link_config.active_duplex;
8978         }
8979         cmd->phy_address = PHY_ADDR;
8980         cmd->transceiver = 0;
8981         cmd->autoneg = tp->link_config.autoneg;
8982         cmd->maxtxpkt = 0;
8983         cmd->maxrxpkt = 0;
8984         return 0;
8985 }
8986
8987 static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
8988 {
8989         struct tg3 *tp = netdev_priv(dev);
8990
8991         if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
8992                 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
8993                         return -EAGAIN;
8994                 return phy_ethtool_sset(tp->mdio_bus.phy_map[PHY_ADDR], cmd);
8995         }
8996
8997         if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) {
8998                 /* These are the only valid advertisement bits allowed.  */
8999                 if (cmd->autoneg == AUTONEG_ENABLE &&
9000                     (cmd->advertising & ~(ADVERTISED_1000baseT_Half |
9001                                           ADVERTISED_1000baseT_Full |
9002                                           ADVERTISED_Autoneg |
9003                                           ADVERTISED_FIBRE)))
9004                         return -EINVAL;
9005                 /* Fiber can only do SPEED_1000.  */
9006                 else if ((cmd->autoneg != AUTONEG_ENABLE) &&
9007                          (cmd->speed != SPEED_1000))
9008                         return -EINVAL;
9009         /* Copper cannot force SPEED_1000.  */
9010         } else if ((cmd->autoneg != AUTONEG_ENABLE) &&
9011                    (cmd->speed == SPEED_1000))
9012                 return -EINVAL;
9013         else if ((cmd->speed == SPEED_1000) &&
9014                  (tp->tg3_flags & TG3_FLAG_10_100_ONLY))
9015                 return -EINVAL;
9016
9017         tg3_full_lock(tp, 0);
9018
9019         tp->link_config.autoneg = cmd->autoneg;
9020         if (cmd->autoneg == AUTONEG_ENABLE) {
9021                 tp->link_config.advertising = (cmd->advertising |
9022                                               ADVERTISED_Autoneg);
9023                 tp->link_config.speed = SPEED_INVALID;
9024                 tp->link_config.duplex = DUPLEX_INVALID;
9025         } else {
9026                 tp->link_config.advertising = 0;
9027                 tp->link_config.speed = cmd->speed;
9028                 tp->link_config.duplex = cmd->duplex;
9029         }
9030
9031         tp->link_config.orig_speed = tp->link_config.speed;
9032         tp->link_config.orig_duplex = tp->link_config.duplex;
9033         tp->link_config.orig_autoneg = tp->link_config.autoneg;
9034
9035         if (netif_running(dev))
9036                 tg3_setup_phy(tp, 1);
9037
9038         tg3_full_unlock(tp);
9039
9040         return 0;
9041 }
9042
9043 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
9044 {
9045         struct tg3 *tp = netdev_priv(dev);
9046
9047         strcpy(info->driver, DRV_MODULE_NAME);
9048         strcpy(info->version, DRV_MODULE_VERSION);
9049         strcpy(info->fw_version, tp->fw_ver);
9050         strcpy(info->bus_info, pci_name(tp->pdev));
9051 }
9052
9053 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
9054 {
9055         struct tg3 *tp = netdev_priv(dev);
9056
9057         if ((tp->tg3_flags & TG3_FLAG_WOL_CAP) &&
9058             device_can_wakeup(&tp->pdev->dev))
9059                 wol->supported = WAKE_MAGIC;
9060         else
9061                 wol->supported = 0;
9062         wol->wolopts = 0;
9063         if (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)
9064                 wol->wolopts = WAKE_MAGIC;
9065         memset(&wol->sopass, 0, sizeof(wol->sopass));
9066 }
9067
9068 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
9069 {
9070         struct tg3 *tp = netdev_priv(dev);
9071         struct device *dp = &tp->pdev->dev;
9072
9073         if (wol->wolopts & ~WAKE_MAGIC)
9074                 return -EINVAL;
9075         if ((wol->wolopts & WAKE_MAGIC) &&
9076             !((tp->tg3_flags & TG3_FLAG_WOL_CAP) && device_can_wakeup(dp)))
9077                 return -EINVAL;
9078
9079         spin_lock_bh(&tp->lock);
9080         if (wol->wolopts & WAKE_MAGIC) {
9081                 tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
9082                 device_set_wakeup_enable(dp, true);
9083         } else {
9084                 tp->tg3_flags &= ~TG3_FLAG_WOL_ENABLE;
9085                 device_set_wakeup_enable(dp, false);
9086         }
9087         spin_unlock_bh(&tp->lock);
9088
9089         return 0;
9090 }
9091
9092 static u32 tg3_get_msglevel(struct net_device *dev)
9093 {
9094         struct tg3 *tp = netdev_priv(dev);
9095         return tp->msg_enable;
9096 }
9097
9098 static void tg3_set_msglevel(struct net_device *dev, u32 value)
9099 {
9100         struct tg3 *tp = netdev_priv(dev);
9101         tp->msg_enable = value;
9102 }
9103
9104 static int tg3_set_tso(struct net_device *dev, u32 value)
9105 {
9106         struct tg3 *tp = netdev_priv(dev);
9107
9108         if (!(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
9109                 if (value)
9110                         return -EINVAL;
9111                 return 0;
9112         }
9113         if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_2) &&
9114             (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906)) {
9115                 if (value) {
9116                         dev->features |= NETIF_F_TSO6;
9117                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
9118                             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
9119                              GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) ||
9120                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
9121                                 dev->features |= NETIF_F_TSO_ECN;
9122                 } else
9123                         dev->features &= ~(NETIF_F_TSO6 | NETIF_F_TSO_ECN);
9124         }
9125         return ethtool_op_set_tso(dev, value);
9126 }
9127
9128 static int tg3_nway_reset(struct net_device *dev)
9129 {
9130         struct tg3 *tp = netdev_priv(dev);
9131         int r;
9132
9133         if (!netif_running(dev))
9134                 return -EAGAIN;
9135
9136         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
9137                 return -EINVAL;
9138
9139         if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
9140                 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
9141                         return -EAGAIN;
9142                 r = phy_start_aneg(tp->mdio_bus.phy_map[PHY_ADDR]);
9143         } else {
9144                 u32 bmcr;
9145
9146                 spin_lock_bh(&tp->lock);
9147                 r = -EINVAL;
9148                 tg3_readphy(tp, MII_BMCR, &bmcr);
9149                 if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
9150                     ((bmcr & BMCR_ANENABLE) ||
9151                      (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT))) {
9152                         tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
9153                                                    BMCR_ANENABLE);
9154                         r = 0;
9155                 }
9156                 spin_unlock_bh(&tp->lock);
9157         }
9158
9159         return r;
9160 }
9161
9162 static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
9163 {
9164         struct tg3 *tp = netdev_priv(dev);
9165
9166         ering->rx_max_pending = TG3_RX_RING_SIZE - 1;
9167         ering->rx_mini_max_pending = 0;
9168         if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE)
9169                 ering->rx_jumbo_max_pending = TG3_RX_JUMBO_RING_SIZE - 1;
9170         else
9171                 ering->rx_jumbo_max_pending = 0;
9172
9173         ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
9174
9175         ering->rx_pending = tp->rx_pending;
9176         ering->rx_mini_pending = 0;
9177         if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE)
9178                 ering->rx_jumbo_pending = tp->rx_jumbo_pending;
9179         else
9180                 ering->rx_jumbo_pending = 0;
9181
9182         ering->tx_pending = tp->tx_pending;
9183 }
9184
9185 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
9186 {
9187         struct tg3 *tp = netdev_priv(dev);
9188         int irq_sync = 0, err = 0;
9189
9190         if ((ering->rx_pending > TG3_RX_RING_SIZE - 1) ||
9191             (ering->rx_jumbo_pending > TG3_RX_JUMBO_RING_SIZE - 1) ||
9192             (ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
9193             (ering->tx_pending <= MAX_SKB_FRAGS) ||
9194             ((tp->tg3_flags2 & TG3_FLG2_TSO_BUG) &&
9195              (ering->tx_pending <= (MAX_SKB_FRAGS * 3))))
9196                 return -EINVAL;
9197
9198         if (netif_running(dev)) {
9199                 tg3_phy_stop(tp);
9200                 tg3_netif_stop(tp);
9201                 irq_sync = 1;
9202         }
9203
9204         tg3_full_lock(tp, irq_sync);
9205
9206         tp->rx_pending = ering->rx_pending;
9207
9208         if ((tp->tg3_flags2 & TG3_FLG2_MAX_RXPEND_64) &&
9209             tp->rx_pending > 63)
9210                 tp->rx_pending = 63;
9211         tp->rx_jumbo_pending = ering->rx_jumbo_pending;
9212         tp->tx_pending = ering->tx_pending;
9213
9214         if (netif_running(dev)) {
9215                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9216                 err = tg3_restart_hw(tp, 1);
9217                 if (!err)
9218                         tg3_netif_start(tp);
9219         }
9220
9221         tg3_full_unlock(tp);
9222
9223         if (irq_sync && !err)
9224                 tg3_phy_start(tp);
9225
9226         return err;
9227 }
9228
9229 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
9230 {
9231         struct tg3 *tp = netdev_priv(dev);
9232
9233         epause->autoneg = (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG) != 0;
9234
9235         if (tp->link_config.active_flowctrl & TG3_FLOW_CTRL_RX)
9236                 epause->rx_pause = 1;
9237         else
9238                 epause->rx_pause = 0;
9239
9240         if (tp->link_config.active_flowctrl & TG3_FLOW_CTRL_TX)
9241                 epause->tx_pause = 1;
9242         else
9243                 epause->tx_pause = 0;
9244 }
9245
9246 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
9247 {
9248         struct tg3 *tp = netdev_priv(dev);
9249         int err = 0;
9250
9251         if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
9252                 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
9253                         return -EAGAIN;
9254
9255                 if (epause->autoneg) {
9256                         u32 newadv;
9257                         struct phy_device *phydev;
9258
9259                         phydev = tp->mdio_bus.phy_map[PHY_ADDR];
9260
9261                         if (epause->rx_pause) {
9262                                 if (epause->tx_pause)
9263                                         newadv = ADVERTISED_Pause;
9264                                 else
9265                                         newadv = ADVERTISED_Pause |
9266                                                  ADVERTISED_Asym_Pause;
9267                         } else if (epause->tx_pause) {
9268                                 newadv = ADVERTISED_Asym_Pause;
9269                         } else
9270                                 newadv = 0;
9271
9272                         if (tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED) {
9273                                 u32 oldadv = phydev->advertising &
9274                                              (ADVERTISED_Pause |
9275                                               ADVERTISED_Asym_Pause);
9276                                 if (oldadv != newadv) {
9277                                         phydev->advertising &=
9278                                                 ~(ADVERTISED_Pause |
9279                                                   ADVERTISED_Asym_Pause);
9280                                         phydev->advertising |= newadv;
9281                                         err = phy_start_aneg(phydev);
9282                                 }
9283                         } else {
9284                                 tp->link_config.advertising &=
9285                                                 ~(ADVERTISED_Pause |
9286                                                   ADVERTISED_Asym_Pause);
9287                                 tp->link_config.advertising |= newadv;
9288                         }
9289                 } else {
9290                         if (epause->rx_pause)
9291                                 tp->link_config.flowctrl |= TG3_FLOW_CTRL_RX;
9292                         else
9293                                 tp->link_config.flowctrl &= ~TG3_FLOW_CTRL_RX;
9294
9295                         if (epause->tx_pause)
9296                                 tp->link_config.flowctrl |= TG3_FLOW_CTRL_TX;
9297                         else
9298                                 tp->link_config.flowctrl &= ~TG3_FLOW_CTRL_TX;
9299
9300                         if (netif_running(dev))
9301                                 tg3_setup_flow_control(tp, 0, 0);
9302                 }
9303         } else {
9304                 int irq_sync = 0;
9305
9306                 if (netif_running(dev)) {
9307                         tg3_netif_stop(tp);
9308                         irq_sync = 1;
9309                 }
9310
9311                 tg3_full_lock(tp, irq_sync);
9312
9313                 if (epause->autoneg)
9314                         tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
9315                 else
9316                         tp->tg3_flags &= ~TG3_FLAG_PAUSE_AUTONEG;
9317                 if (epause->rx_pause)
9318                         tp->link_config.flowctrl |= TG3_FLOW_CTRL_RX;
9319                 else
9320                         tp->link_config.flowctrl &= ~TG3_FLOW_CTRL_RX;
9321                 if (epause->tx_pause)
9322                         tp->link_config.flowctrl |= TG3_FLOW_CTRL_TX;
9323                 else
9324                         tp->link_config.flowctrl &= ~TG3_FLOW_CTRL_TX;
9325
9326                 if (netif_running(dev)) {
9327                         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9328                         err = tg3_restart_hw(tp, 1);
9329                         if (!err)
9330                                 tg3_netif_start(tp);
9331                 }
9332
9333                 tg3_full_unlock(tp);
9334         }
9335
9336         return err;
9337 }
9338
9339 static u32 tg3_get_rx_csum(struct net_device *dev)
9340 {
9341         struct tg3 *tp = netdev_priv(dev);
9342         return (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0;
9343 }
9344
9345 static int tg3_set_rx_csum(struct net_device *dev, u32 data)
9346 {
9347         struct tg3 *tp = netdev_priv(dev);
9348
9349         if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
9350                 if (data != 0)
9351                         return -EINVAL;
9352                 return 0;
9353         }
9354
9355         spin_lock_bh(&tp->lock);
9356         if (data)
9357                 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
9358         else
9359                 tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
9360         spin_unlock_bh(&tp->lock);
9361
9362         return 0;
9363 }
9364
9365 static int tg3_set_tx_csum(struct net_device *dev, u32 data)
9366 {
9367         struct tg3 *tp = netdev_priv(dev);
9368
9369         if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
9370                 if (data != 0)
9371                         return -EINVAL;
9372                 return 0;
9373         }
9374
9375         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
9376             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
9377             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
9378             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
9379             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
9380                 ethtool_op_set_tx_ipv6_csum(dev, data);
9381         else
9382                 ethtool_op_set_tx_csum(dev, data);
9383
9384         return 0;
9385 }
9386
9387 static int tg3_get_sset_count (struct net_device *dev, int sset)
9388 {
9389         switch (sset) {
9390         case ETH_SS_TEST:
9391                 return TG3_NUM_TEST;
9392         case ETH_SS_STATS:
9393                 return TG3_NUM_STATS;
9394         default:
9395                 return -EOPNOTSUPP;
9396         }
9397 }
9398
9399 static void tg3_get_strings (struct net_device *dev, u32 stringset, u8 *buf)
9400 {
9401         switch (stringset) {
9402         case ETH_SS_STATS:
9403                 memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
9404                 break;
9405         case ETH_SS_TEST:
9406                 memcpy(buf, &ethtool_test_keys, sizeof(ethtool_test_keys));
9407                 break;
9408         default:
9409                 WARN_ON(1);     /* we need a WARN() */
9410                 break;
9411         }
9412 }
9413
9414 static int tg3_phys_id(struct net_device *dev, u32 data)
9415 {
9416         struct tg3 *tp = netdev_priv(dev);
9417         int i;
9418
9419         if (!netif_running(tp->dev))
9420                 return -EAGAIN;
9421
9422         if (data == 0)
9423                 data = UINT_MAX / 2;
9424
9425         for (i = 0; i < (data * 2); i++) {
9426                 if ((i % 2) == 0)
9427                         tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
9428                                            LED_CTRL_1000MBPS_ON |
9429                                            LED_CTRL_100MBPS_ON |
9430                                            LED_CTRL_10MBPS_ON |
9431                                            LED_CTRL_TRAFFIC_OVERRIDE |
9432                                            LED_CTRL_TRAFFIC_BLINK |
9433                                            LED_CTRL_TRAFFIC_LED);
9434
9435                 else
9436                         tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
9437                                            LED_CTRL_TRAFFIC_OVERRIDE);
9438
9439                 if (msleep_interruptible(500))
9440                         break;
9441         }
9442         tw32(MAC_LED_CTRL, tp->led_ctrl);
9443         return 0;
9444 }
9445
9446 static void tg3_get_ethtool_stats (struct net_device *dev,
9447                                    struct ethtool_stats *estats, u64 *tmp_stats)
9448 {
9449         struct tg3 *tp = netdev_priv(dev);
9450         memcpy(tmp_stats, tg3_get_estats(tp), sizeof(tp->estats));
9451 }
9452
9453 #define NVRAM_TEST_SIZE 0x100
9454 #define NVRAM_SELFBOOT_FORMAT1_0_SIZE   0x14
9455 #define NVRAM_SELFBOOT_FORMAT1_2_SIZE   0x18
9456 #define NVRAM_SELFBOOT_FORMAT1_3_SIZE   0x1c
9457 #define NVRAM_SELFBOOT_HW_SIZE 0x20
9458 #define NVRAM_SELFBOOT_DATA_SIZE 0x1c
9459
9460 static int tg3_test_nvram(struct tg3 *tp)
9461 {
9462         u32 csum, magic;
9463         __le32 *buf;
9464         int i, j, k, err = 0, size;
9465
9466         if (tg3_nvram_read_swab(tp, 0, &magic) != 0)
9467                 return -EIO;
9468
9469         if (magic == TG3_EEPROM_MAGIC)
9470                 size = NVRAM_TEST_SIZE;
9471         else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) {
9472                 if ((magic & TG3_EEPROM_SB_FORMAT_MASK) ==
9473                     TG3_EEPROM_SB_FORMAT_1) {
9474                         switch (magic & TG3_EEPROM_SB_REVISION_MASK) {
9475                         case TG3_EEPROM_SB_REVISION_0:
9476                                 size = NVRAM_SELFBOOT_FORMAT1_0_SIZE;
9477                                 break;
9478                         case TG3_EEPROM_SB_REVISION_2:
9479                                 size = NVRAM_SELFBOOT_FORMAT1_2_SIZE;
9480                                 break;
9481                         case TG3_EEPROM_SB_REVISION_3:
9482                                 size = NVRAM_SELFBOOT_FORMAT1_3_SIZE;
9483                                 break;
9484                         default:
9485                                 return 0;
9486                         }
9487                 } else
9488                         return 0;
9489         } else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
9490                 size = NVRAM_SELFBOOT_HW_SIZE;
9491         else
9492                 return -EIO;
9493
9494         buf = kmalloc(size, GFP_KERNEL);
9495         if (buf == NULL)
9496                 return -ENOMEM;
9497
9498         err = -EIO;
9499         for (i = 0, j = 0; i < size; i += 4, j++) {
9500                 if ((err = tg3_nvram_read_le(tp, i, &buf[j])) != 0)
9501                         break;
9502         }
9503         if (i < size)
9504                 goto out;
9505
9506         /* Selfboot format */
9507         magic = swab32(le32_to_cpu(buf[0]));
9508         if ((magic & TG3_EEPROM_MAGIC_FW_MSK) ==
9509             TG3_EEPROM_MAGIC_FW) {
9510                 u8 *buf8 = (u8 *) buf, csum8 = 0;
9511
9512                 if ((magic & TG3_EEPROM_SB_REVISION_MASK) ==
9513                     TG3_EEPROM_SB_REVISION_2) {
9514                         /* For rev 2, the csum doesn't include the MBA. */
9515                         for (i = 0; i < TG3_EEPROM_SB_F1R2_MBA_OFF; i++)
9516                                 csum8 += buf8[i];
9517                         for (i = TG3_EEPROM_SB_F1R2_MBA_OFF + 4; i < size; i++)
9518                                 csum8 += buf8[i];
9519                 } else {
9520                         for (i = 0; i < size; i++)
9521                                 csum8 += buf8[i];
9522                 }
9523
9524                 if (csum8 == 0) {
9525                         err = 0;
9526                         goto out;
9527                 }
9528
9529                 err = -EIO;
9530                 goto out;
9531         }
9532
9533         if ((magic & TG3_EEPROM_MAGIC_HW_MSK) ==
9534             TG3_EEPROM_MAGIC_HW) {
9535                 u8 data[NVRAM_SELFBOOT_DATA_SIZE];
9536                 u8 parity[NVRAM_SELFBOOT_DATA_SIZE];
9537                 u8 *buf8 = (u8 *) buf;
9538
9539                 /* Separate the parity bits and the data bytes.  */
9540                 for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) {
9541                         if ((i == 0) || (i == 8)) {
9542                                 int l;
9543                                 u8 msk;
9544
9545                                 for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1)
9546                                         parity[k++] = buf8[i] & msk;
9547                                 i++;
9548                         }
9549                         else if (i == 16) {
9550                                 int l;
9551                                 u8 msk;
9552
9553                                 for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1)
9554                                         parity[k++] = buf8[i] & msk;
9555                                 i++;
9556
9557                                 for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1)
9558                                         parity[k++] = buf8[i] & msk;
9559                                 i++;
9560                         }
9561                         data[j++] = buf8[i];
9562                 }
9563
9564                 err = -EIO;
9565                 for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) {
9566                         u8 hw8 = hweight8(data[i]);
9567
9568                         if ((hw8 & 0x1) && parity[i])
9569                                 goto out;
9570                         else if (!(hw8 & 0x1) && !parity[i])
9571                                 goto out;
9572                 }
9573                 err = 0;
9574                 goto out;
9575         }
9576
9577         /* Bootstrap checksum at offset 0x10 */
9578         csum = calc_crc((unsigned char *) buf, 0x10);
9579         if(csum != le32_to_cpu(buf[0x10/4]))
9580                 goto out;
9581
9582         /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
9583         csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
9584         if (csum != le32_to_cpu(buf[0xfc/4]))
9585                  goto out;
9586
9587         err = 0;
9588
9589 out:
9590         kfree(buf);
9591         return err;
9592 }
9593
9594 #define TG3_SERDES_TIMEOUT_SEC  2
9595 #define TG3_COPPER_TIMEOUT_SEC  6
9596
9597 static int tg3_test_link(struct tg3 *tp)
9598 {
9599         int i, max;
9600
9601         if (!netif_running(tp->dev))
9602                 return -ENODEV;
9603
9604         if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
9605                 max = TG3_SERDES_TIMEOUT_SEC;
9606         else
9607                 max = TG3_COPPER_TIMEOUT_SEC;
9608
9609         for (i = 0; i < max; i++) {
9610                 if (netif_carrier_ok(tp->dev))
9611                         return 0;
9612
9613                 if (msleep_interruptible(1000))
9614                         break;
9615         }
9616
9617         return -EIO;
9618 }
9619
9620 /* Only test the commonly used registers */
9621 static int tg3_test_registers(struct tg3 *tp)
9622 {
9623         int i, is_5705, is_5750;
9624         u32 offset, read_mask, write_mask, val, save_val, read_val;
9625         static struct {
9626                 u16 offset;
9627                 u16 flags;
9628 #define TG3_FL_5705     0x1
9629 #define TG3_FL_NOT_5705 0x2
9630 #define TG3_FL_NOT_5788 0x4
9631 #define TG3_FL_NOT_5750 0x8
9632                 u32 read_mask;
9633                 u32 write_mask;
9634         } reg_tbl[] = {
9635                 /* MAC Control Registers */
9636                 { MAC_MODE, TG3_FL_NOT_5705,
9637                         0x00000000, 0x00ef6f8c },
9638                 { MAC_MODE, TG3_FL_5705,
9639                         0x00000000, 0x01ef6b8c },
9640                 { MAC_STATUS, TG3_FL_NOT_5705,
9641                         0x03800107, 0x00000000 },
9642                 { MAC_STATUS, TG3_FL_5705,
9643                         0x03800100, 0x00000000 },
9644                 { MAC_ADDR_0_HIGH, 0x0000,
9645                         0x00000000, 0x0000ffff },
9646                 { MAC_ADDR_0_LOW, 0x0000,
9647                         0x00000000, 0xffffffff },
9648                 { MAC_RX_MTU_SIZE, 0x0000,
9649                         0x00000000, 0x0000ffff },
9650                 { MAC_TX_MODE, 0x0000,
9651                         0x00000000, 0x00000070 },
9652                 { MAC_TX_LENGTHS, 0x0000,
9653                         0x00000000, 0x00003fff },
9654                 { MAC_RX_MODE, TG3_FL_NOT_5705,
9655                         0x00000000, 0x000007fc },
9656                 { MAC_RX_MODE, TG3_FL_5705,
9657                         0x00000000, 0x000007dc },
9658                 { MAC_HASH_REG_0, 0x0000,
9659                         0x00000000, 0xffffffff },
9660                 { MAC_HASH_REG_1, 0x0000,
9661                         0x00000000, 0xffffffff },
9662                 { MAC_HASH_REG_2, 0x0000,
9663                         0x00000000, 0xffffffff },
9664                 { MAC_HASH_REG_3, 0x0000,
9665                         0x00000000, 0xffffffff },
9666
9667                 /* Receive Data and Receive BD Initiator Control Registers. */
9668                 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
9669                         0x00000000, 0xffffffff },
9670                 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
9671                         0x00000000, 0xffffffff },
9672                 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
9673                         0x00000000, 0x00000003 },
9674                 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
9675                         0x00000000, 0xffffffff },
9676                 { RCVDBDI_STD_BD+0, 0x0000,
9677                         0x00000000, 0xffffffff },
9678                 { RCVDBDI_STD_BD+4, 0x0000,
9679                         0x00000000, 0xffffffff },
9680                 { RCVDBDI_STD_BD+8, 0x0000,
9681                         0x00000000, 0xffff0002 },
9682                 { RCVDBDI_STD_BD+0xc, 0x0000,
9683                         0x00000000, 0xffffffff },
9684
9685                 /* Receive BD Initiator Control Registers. */
9686                 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
9687                         0x00000000, 0xffffffff },
9688                 { RCVBDI_STD_THRESH, TG3_FL_5705,
9689                         0x00000000, 0x000003ff },
9690                 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
9691                         0x00000000, 0xffffffff },
9692
9693                 /* Host Coalescing Control Registers. */
9694                 { HOSTCC_MODE, TG3_FL_NOT_5705,
9695                         0x00000000, 0x00000004 },
9696                 { HOSTCC_MODE, TG3_FL_5705,
9697                         0x00000000, 0x000000f6 },
9698                 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
9699                         0x00000000, 0xffffffff },
9700                 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
9701                         0x00000000, 0x000003ff },
9702                 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
9703                         0x00000000, 0xffffffff },
9704                 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
9705                         0x00000000, 0x000003ff },
9706                 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
9707                         0x00000000, 0xffffffff },
9708                 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
9709                         0x00000000, 0x000000ff },
9710                 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
9711                         0x00000000, 0xffffffff },
9712                 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
9713                         0x00000000, 0x000000ff },
9714                 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
9715                         0x00000000, 0xffffffff },
9716                 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
9717                         0x00000000, 0xffffffff },
9718                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
9719                         0x00000000, 0xffffffff },
9720                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
9721                         0x00000000, 0x000000ff },
9722                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
9723                         0x00000000, 0xffffffff },
9724                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
9725                         0x00000000, 0x000000ff },
9726                 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
9727                         0x00000000, 0xffffffff },
9728                 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
9729                         0x00000000, 0xffffffff },
9730                 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
9731                         0x00000000, 0xffffffff },
9732                 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
9733                         0x00000000, 0xffffffff },
9734                 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
9735                         0x00000000, 0xffffffff },
9736                 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
9737                         0xffffffff, 0x00000000 },
9738                 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
9739                         0xffffffff, 0x00000000 },
9740
9741                 /* Buffer Manager Control Registers. */
9742                 { BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750,
9743                         0x00000000, 0x007fff80 },
9744                 { BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750,
9745                         0x00000000, 0x007fffff },
9746                 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
9747                         0x00000000, 0x0000003f },
9748                 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
9749                         0x00000000, 0x000001ff },
9750                 { BUFMGR_MB_HIGH_WATER, 0x0000,
9751                         0x00000000, 0x000001ff },
9752                 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
9753                         0xffffffff, 0x00000000 },
9754                 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
9755                         0xffffffff, 0x00000000 },
9756
9757                 /* Mailbox Registers */
9758                 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
9759                         0x00000000, 0x000001ff },
9760                 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
9761                         0x00000000, 0x000001ff },
9762                 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
9763                         0x00000000, 0x000007ff },
9764                 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
9765                         0x00000000, 0x000001ff },
9766
9767                 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
9768         };
9769
9770         is_5705 = is_5750 = 0;
9771         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
9772                 is_5705 = 1;
9773                 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
9774                         is_5750 = 1;
9775         }
9776
9777         for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
9778                 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
9779                         continue;
9780
9781                 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
9782                         continue;
9783
9784                 if ((tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
9785                     (reg_tbl[i].flags & TG3_FL_NOT_5788))
9786                         continue;
9787
9788                 if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750))
9789                         continue;
9790
9791                 offset = (u32) reg_tbl[i].offset;
9792                 read_mask = reg_tbl[i].read_mask;
9793                 write_mask = reg_tbl[i].write_mask;
9794
9795                 /* Save the original register content */
9796                 save_val = tr32(offset);
9797
9798                 /* Determine the read-only value. */
9799                 read_val = save_val & read_mask;
9800
9801                 /* Write zero to the register, then make sure the read-only bits
9802                  * are not changed and the read/write bits are all zeros.
9803                  */
9804                 tw32(offset, 0);
9805
9806                 val = tr32(offset);
9807
9808                 /* Test the read-only and read/write bits. */
9809                 if (((val & read_mask) != read_val) || (val & write_mask))
9810                         goto out;
9811
9812                 /* Write ones to all the bits defined by RdMask and WrMask, then
9813                  * make sure the read-only bits are not changed and the
9814                  * read/write bits are all ones.
9815                  */
9816                 tw32(offset, read_mask | write_mask);
9817
9818                 val = tr32(offset);
9819
9820                 /* Test the read-only bits. */
9821                 if ((val & read_mask) != read_val)
9822                         goto out;
9823
9824                 /* Test the read/write bits. */
9825                 if ((val & write_mask) != write_mask)
9826                         goto out;
9827
9828                 tw32(offset, save_val);
9829         }
9830
9831         return 0;
9832
9833 out:
9834         if (netif_msg_hw(tp))
9835                 printk(KERN_ERR PFX "Register test failed at offset %x\n",
9836                        offset);
9837         tw32(offset, save_val);
9838         return -EIO;
9839 }
9840
9841 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
9842 {
9843         static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
9844         int i;
9845         u32 j;
9846
9847         for (i = 0; i < ARRAY_SIZE(test_pattern); i++) {
9848                 for (j = 0; j < len; j += 4) {
9849                         u32 val;
9850
9851                         tg3_write_mem(tp, offset + j, test_pattern[i]);
9852                         tg3_read_mem(tp, offset + j, &val);
9853                         if (val != test_pattern[i])
9854                                 return -EIO;
9855                 }
9856         }
9857         return 0;
9858 }
9859
9860 static int tg3_test_memory(struct tg3 *tp)
9861 {
9862         static struct mem_entry {
9863                 u32 offset;
9864                 u32 len;
9865         } mem_tbl_570x[] = {
9866                 { 0x00000000, 0x00b50},
9867                 { 0x00002000, 0x1c000},
9868                 { 0xffffffff, 0x00000}
9869         }, mem_tbl_5705[] = {
9870                 { 0x00000100, 0x0000c},
9871                 { 0x00000200, 0x00008},
9872                 { 0x00004000, 0x00800},
9873                 { 0x00006000, 0x01000},
9874                 { 0x00008000, 0x02000},
9875                 { 0x00010000, 0x0e000},
9876                 { 0xffffffff, 0x00000}
9877         }, mem_tbl_5755[] = {
9878                 { 0x00000200, 0x00008},
9879                 { 0x00004000, 0x00800},
9880                 { 0x00006000, 0x00800},
9881                 { 0x00008000, 0x02000},
9882                 { 0x00010000, 0x0c000},
9883                 { 0xffffffff, 0x00000}
9884         }, mem_tbl_5906[] = {
9885                 { 0x00000200, 0x00008},
9886                 { 0x00004000, 0x00400},
9887                 { 0x00006000, 0x00400},
9888                 { 0x00008000, 0x01000},
9889                 { 0x00010000, 0x01000},
9890                 { 0xffffffff, 0x00000}
9891         };
9892         struct mem_entry *mem_tbl;
9893         int err = 0;
9894         int i;
9895
9896         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
9897                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
9898                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
9899                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
9900                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
9901                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
9902                         mem_tbl = mem_tbl_5755;
9903                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
9904                         mem_tbl = mem_tbl_5906;
9905                 else
9906                         mem_tbl = mem_tbl_5705;
9907         } else
9908                 mem_tbl = mem_tbl_570x;
9909
9910         for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
9911                 if ((err = tg3_do_mem_test(tp, mem_tbl[i].offset,
9912                     mem_tbl[i].len)) != 0)
9913                         break;
9914         }
9915
9916         return err;
9917 }
9918
9919 #define TG3_MAC_LOOPBACK        0
9920 #define TG3_PHY_LOOPBACK        1
9921
9922 static int tg3_run_loopback(struct tg3 *tp, int loopback_mode)
9923 {
9924         u32 mac_mode, rx_start_idx, rx_idx, tx_idx, opaque_key;
9925         u32 desc_idx;
9926         struct sk_buff *skb, *rx_skb;
9927         u8 *tx_data;
9928         dma_addr_t map;
9929         int num_pkts, tx_len, rx_len, i, err;
9930         struct tg3_rx_buffer_desc *desc;
9931
9932         if (loopback_mode == TG3_MAC_LOOPBACK) {
9933                 /* HW errata - mac loopback fails in some cases on 5780.
9934                  * Normal traffic and PHY loopback are not affected by
9935                  * errata.
9936                  */
9937                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780)
9938                         return 0;
9939
9940                 mac_mode = (tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK) |
9941                            MAC_MODE_PORT_INT_LPBACK;
9942                 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
9943                         mac_mode |= MAC_MODE_LINK_POLARITY;
9944                 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
9945                         mac_mode |= MAC_MODE_PORT_MODE_MII;
9946                 else
9947                         mac_mode |= MAC_MODE_PORT_MODE_GMII;
9948                 tw32(MAC_MODE, mac_mode);
9949         } else if (loopback_mode == TG3_PHY_LOOPBACK) {
9950                 u32 val;
9951
9952                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
9953                         u32 phytest;
9954
9955                         if (!tg3_readphy(tp, MII_TG3_EPHY_TEST, &phytest)) {
9956                                 u32 phy;
9957
9958                                 tg3_writephy(tp, MII_TG3_EPHY_TEST,
9959                                              phytest | MII_TG3_EPHY_SHADOW_EN);
9960                                 if (!tg3_readphy(tp, 0x1b, &phy))
9961                                         tg3_writephy(tp, 0x1b, phy & ~0x20);
9962                                 tg3_writephy(tp, MII_TG3_EPHY_TEST, phytest);
9963                         }
9964                         val = BMCR_LOOPBACK | BMCR_FULLDPLX | BMCR_SPEED100;
9965                 } else
9966                         val = BMCR_LOOPBACK | BMCR_FULLDPLX | BMCR_SPEED1000;
9967
9968                 tg3_phy_toggle_automdix(tp, 0);
9969
9970                 tg3_writephy(tp, MII_BMCR, val);
9971                 udelay(40);
9972
9973                 mac_mode = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
9974                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
9975                         tg3_writephy(tp, MII_TG3_EPHY_PTEST, 0x1800);
9976                         mac_mode |= MAC_MODE_PORT_MODE_MII;
9977                 } else
9978                         mac_mode |= MAC_MODE_PORT_MODE_GMII;
9979
9980                 /* reset to prevent losing 1st rx packet intermittently */
9981                 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
9982                         tw32_f(MAC_RX_MODE, RX_MODE_RESET);
9983                         udelay(10);
9984                         tw32_f(MAC_RX_MODE, tp->rx_mode);
9985                 }
9986                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
9987                         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401)
9988                                 mac_mode &= ~MAC_MODE_LINK_POLARITY;
9989                         else if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411)
9990                                 mac_mode |= MAC_MODE_LINK_POLARITY;
9991                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
9992                                      MII_TG3_EXT_CTRL_LNK3_LED_MODE);
9993                 }
9994                 tw32(MAC_MODE, mac_mode);
9995         }
9996         else
9997                 return -EINVAL;
9998
9999         err = -EIO;
10000
10001         tx_len = 1514;
10002         skb = netdev_alloc_skb(tp->dev, tx_len);
10003         if (!skb)
10004                 return -ENOMEM;
10005
10006         tx_data = skb_put(skb, tx_len);
10007         memcpy(tx_data, tp->dev->dev_addr, 6);
10008         memset(tx_data + 6, 0x0, 8);
10009
10010         tw32(MAC_RX_MTU_SIZE, tx_len + 4);
10011
10012         for (i = 14; i < tx_len; i++)
10013                 tx_data[i] = (u8) (i & 0xff);
10014
10015         map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
10016
10017         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
10018              HOSTCC_MODE_NOW);
10019
10020         udelay(10);
10021
10022         rx_start_idx = tp->hw_status->idx[0].rx_producer;
10023
10024         num_pkts = 0;
10025
10026         tg3_set_txd(tp, tp->tx_prod, map, tx_len, 0, 1);
10027
10028         tp->tx_prod++;
10029         num_pkts++;
10030
10031         tw32_tx_mbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW,
10032                      tp->tx_prod);
10033         tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW);
10034
10035         udelay(10);
10036
10037         /* 250 usec to allow enough time on some 10/100 Mbps devices.  */
10038         for (i = 0; i < 25; i++) {
10039                 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
10040                        HOSTCC_MODE_NOW);
10041
10042                 udelay(10);
10043
10044                 tx_idx = tp->hw_status->idx[0].tx_consumer;
10045                 rx_idx = tp->hw_status->idx[0].rx_producer;
10046                 if ((tx_idx == tp->tx_prod) &&
10047                     (rx_idx == (rx_start_idx + num_pkts)))
10048                         break;
10049         }
10050
10051         pci_unmap_single(tp->pdev, map, tx_len, PCI_DMA_TODEVICE);
10052         dev_kfree_skb(skb);
10053
10054         if (tx_idx != tp->tx_prod)
10055                 goto out;
10056
10057         if (rx_idx != rx_start_idx + num_pkts)
10058                 goto out;
10059
10060         desc = &tp->rx_rcb[rx_start_idx];
10061         desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
10062         opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
10063         if (opaque_key != RXD_OPAQUE_RING_STD)
10064                 goto out;
10065
10066         if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
10067             (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
10068                 goto out;
10069
10070         rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4;
10071         if (rx_len != tx_len)
10072                 goto out;
10073
10074         rx_skb = tp->rx_std_buffers[desc_idx].skb;
10075
10076         map = pci_unmap_addr(&tp->rx_std_buffers[desc_idx], mapping);
10077         pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len, PCI_DMA_FROMDEVICE);
10078
10079         for (i = 14; i < tx_len; i++) {
10080                 if (*(rx_skb->data + i) != (u8) (i & 0xff))
10081                         goto out;
10082         }
10083         err = 0;
10084
10085         /* tg3_free_rings will unmap and free the rx_skb */
10086 out:
10087         return err;
10088 }
10089
10090 #define TG3_MAC_LOOPBACK_FAILED         1
10091 #define TG3_PHY_LOOPBACK_FAILED         2
10092 #define TG3_LOOPBACK_FAILED             (TG3_MAC_LOOPBACK_FAILED |      \
10093                                          TG3_PHY_LOOPBACK_FAILED)
10094
10095 static int tg3_test_loopback(struct tg3 *tp)
10096 {
10097         int err = 0;
10098         u32 cpmuctrl = 0;
10099
10100         if (!netif_running(tp->dev))
10101                 return TG3_LOOPBACK_FAILED;
10102
10103         err = tg3_reset_hw(tp, 1);
10104         if (err)
10105                 return TG3_LOOPBACK_FAILED;
10106
10107         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
10108             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
10109             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
10110                 int i;
10111                 u32 status;
10112
10113                 tw32(TG3_CPMU_MUTEX_REQ, CPMU_MUTEX_REQ_DRIVER);
10114
10115                 /* Wait for up to 40 microseconds to acquire lock. */
10116                 for (i = 0; i < 4; i++) {
10117                         status = tr32(TG3_CPMU_MUTEX_GNT);
10118                         if (status == CPMU_MUTEX_GNT_DRIVER)
10119                                 break;
10120                         udelay(10);
10121                 }
10122
10123                 if (status != CPMU_MUTEX_GNT_DRIVER)
10124                         return TG3_LOOPBACK_FAILED;
10125
10126                 /* Turn off link-based power management. */
10127                 cpmuctrl = tr32(TG3_CPMU_CTRL);
10128                 tw32(TG3_CPMU_CTRL,
10129                      cpmuctrl & ~(CPMU_CTRL_LINK_SPEED_MODE |
10130                                   CPMU_CTRL_LINK_AWARE_MODE));
10131         }
10132
10133         if (tg3_run_loopback(tp, TG3_MAC_LOOPBACK))
10134                 err |= TG3_MAC_LOOPBACK_FAILED;
10135
10136         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
10137             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
10138             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
10139                 tw32(TG3_CPMU_CTRL, cpmuctrl);
10140
10141                 /* Release the mutex */
10142                 tw32(TG3_CPMU_MUTEX_GNT, CPMU_MUTEX_GNT_DRIVER);
10143         }
10144
10145         if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
10146             !(tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)) {
10147                 if (tg3_run_loopback(tp, TG3_PHY_LOOPBACK))
10148                         err |= TG3_PHY_LOOPBACK_FAILED;
10149         }
10150
10151         return err;
10152 }
10153
10154 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
10155                           u64 *data)
10156 {
10157         struct tg3 *tp = netdev_priv(dev);
10158
10159         if (tp->link_config.phy_is_low_power)
10160                 tg3_set_power_state(tp, PCI_D0);
10161
10162         memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
10163
10164         if (tg3_test_nvram(tp) != 0) {
10165                 etest->flags |= ETH_TEST_FL_FAILED;
10166                 data[0] = 1;
10167         }
10168         if (tg3_test_link(tp) != 0) {
10169                 etest->flags |= ETH_TEST_FL_FAILED;
10170                 data[1] = 1;
10171         }
10172         if (etest->flags & ETH_TEST_FL_OFFLINE) {
10173                 int err, err2 = 0, irq_sync = 0;
10174
10175                 if (netif_running(dev)) {
10176                         tg3_phy_stop(tp);
10177                         tg3_netif_stop(tp);
10178                         irq_sync = 1;
10179                 }
10180
10181                 tg3_full_lock(tp, irq_sync);
10182
10183                 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
10184                 err = tg3_nvram_lock(tp);
10185                 tg3_halt_cpu(tp, RX_CPU_BASE);
10186                 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
10187                         tg3_halt_cpu(tp, TX_CPU_BASE);
10188                 if (!err)
10189                         tg3_nvram_unlock(tp);
10190
10191                 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
10192                         tg3_phy_reset(tp);
10193
10194                 if (tg3_test_registers(tp) != 0) {
10195                         etest->flags |= ETH_TEST_FL_FAILED;
10196                         data[2] = 1;
10197                 }
10198                 if (tg3_test_memory(tp) != 0) {
10199                         etest->flags |= ETH_TEST_FL_FAILED;
10200                         data[3] = 1;
10201                 }
10202                 if ((data[4] = tg3_test_loopback(tp)) != 0)
10203                         etest->flags |= ETH_TEST_FL_FAILED;
10204
10205                 tg3_full_unlock(tp);
10206
10207                 if (tg3_test_interrupt(tp) != 0) {
10208                         etest->flags |= ETH_TEST_FL_FAILED;
10209                         data[5] = 1;
10210                 }
10211
10212                 tg3_full_lock(tp, 0);
10213
10214                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10215                 if (netif_running(dev)) {
10216                         tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
10217                         err2 = tg3_restart_hw(tp, 1);
10218                         if (!err2)
10219                                 tg3_netif_start(tp);
10220                 }
10221
10222                 tg3_full_unlock(tp);
10223
10224                 if (irq_sync && !err2)
10225                         tg3_phy_start(tp);
10226         }
10227         if (tp->link_config.phy_is_low_power)
10228                 tg3_set_power_state(tp, PCI_D3hot);
10229
10230 }
10231
10232 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
10233 {
10234         struct mii_ioctl_data *data = if_mii(ifr);
10235         struct tg3 *tp = netdev_priv(dev);
10236         int err;
10237
10238         if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
10239                 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
10240                         return -EAGAIN;
10241                 return phy_mii_ioctl(tp->mdio_bus.phy_map[PHY_ADDR], data, cmd);
10242         }
10243
10244         switch(cmd) {
10245         case SIOCGMIIPHY:
10246                 data->phy_id = PHY_ADDR;
10247
10248                 /* fallthru */
10249         case SIOCGMIIREG: {
10250                 u32 mii_regval;
10251
10252                 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
10253                         break;                  /* We have no PHY */
10254
10255                 if (tp->link_config.phy_is_low_power)
10256                         return -EAGAIN;
10257
10258                 spin_lock_bh(&tp->lock);
10259                 err = tg3_readphy(tp, data->reg_num & 0x1f, &mii_regval);
10260                 spin_unlock_bh(&tp->lock);
10261
10262                 data->val_out = mii_regval;
10263
10264                 return err;
10265         }
10266
10267         case SIOCSMIIREG:
10268                 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
10269                         break;                  /* We have no PHY */
10270
10271                 if (!capable(CAP_NET_ADMIN))
10272                         return -EPERM;
10273
10274                 if (tp->link_config.phy_is_low_power)
10275                         return -EAGAIN;
10276
10277                 spin_lock_bh(&tp->lock);
10278                 err = tg3_writephy(tp, data->reg_num & 0x1f, data->val_in);
10279                 spin_unlock_bh(&tp->lock);
10280
10281                 return err;
10282
10283         default:
10284                 /* do nothing */
10285                 break;
10286         }
10287         return -EOPNOTSUPP;
10288 }
10289
10290 #if TG3_VLAN_TAG_USED
10291 static void tg3_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
10292 {
10293         struct tg3 *tp = netdev_priv(dev);
10294
10295         if (netif_running(dev))
10296                 tg3_netif_stop(tp);
10297
10298         tg3_full_lock(tp, 0);
10299
10300         tp->vlgrp = grp;
10301
10302         /* Update RX_MODE_KEEP_VLAN_TAG bit in RX_MODE register. */
10303         __tg3_set_rx_mode(dev);
10304
10305         if (netif_running(dev))
10306                 tg3_netif_start(tp);
10307
10308         tg3_full_unlock(tp);
10309 }
10310 #endif
10311
10312 static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
10313 {
10314         struct tg3 *tp = netdev_priv(dev);
10315
10316         memcpy(ec, &tp->coal, sizeof(*ec));
10317         return 0;
10318 }
10319
10320 static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
10321 {
10322         struct tg3 *tp = netdev_priv(dev);
10323         u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
10324         u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
10325
10326         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
10327                 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
10328                 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
10329                 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
10330                 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
10331         }
10332
10333         if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
10334             (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
10335             (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
10336             (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
10337             (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
10338             (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
10339             (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
10340             (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
10341             (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
10342             (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
10343                 return -EINVAL;
10344
10345         /* No rx interrupts will be generated if both are zero */
10346         if ((ec->rx_coalesce_usecs == 0) &&
10347             (ec->rx_max_coalesced_frames == 0))
10348                 return -EINVAL;
10349
10350         /* No tx interrupts will be generated if both are zero */
10351         if ((ec->tx_coalesce_usecs == 0) &&
10352             (ec->tx_max_coalesced_frames == 0))
10353                 return -EINVAL;
10354
10355         /* Only copy relevant parameters, ignore all others. */
10356         tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
10357         tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
10358         tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
10359         tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
10360         tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
10361         tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
10362         tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
10363         tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
10364         tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
10365
10366         if (netif_running(dev)) {
10367                 tg3_full_lock(tp, 0);
10368                 __tg3_set_coalesce(tp, &tp->coal);
10369                 tg3_full_unlock(tp);
10370         }
10371         return 0;
10372 }
10373
10374 static const struct ethtool_ops tg3_ethtool_ops = {
10375         .get_settings           = tg3_get_settings,
10376         .set_settings           = tg3_set_settings,
10377         .get_drvinfo            = tg3_get_drvinfo,
10378         .get_regs_len           = tg3_get_regs_len,
10379         .get_regs               = tg3_get_regs,
10380         .get_wol                = tg3_get_wol,
10381         .set_wol                = tg3_set_wol,
10382         .get_msglevel           = tg3_get_msglevel,
10383         .set_msglevel           = tg3_set_msglevel,
10384         .nway_reset             = tg3_nway_reset,
10385         .get_link               = ethtool_op_get_link,
10386         .get_eeprom_len         = tg3_get_eeprom_len,
10387         .get_eeprom             = tg3_get_eeprom,
10388         .set_eeprom             = tg3_set_eeprom,
10389         .get_ringparam          = tg3_get_ringparam,
10390         .set_ringparam          = tg3_set_ringparam,
10391         .get_pauseparam         = tg3_get_pauseparam,
10392         .set_pauseparam         = tg3_set_pauseparam,
10393         .get_rx_csum            = tg3_get_rx_csum,
10394         .set_rx_csum            = tg3_set_rx_csum,
10395         .set_tx_csum            = tg3_set_tx_csum,
10396         .set_sg                 = ethtool_op_set_sg,
10397         .set_tso                = tg3_set_tso,
10398         .self_test              = tg3_self_test,
10399         .get_strings            = tg3_get_strings,
10400         .phys_id                = tg3_phys_id,
10401         .get_ethtool_stats      = tg3_get_ethtool_stats,
10402         .get_coalesce           = tg3_get_coalesce,
10403         .set_coalesce           = tg3_set_coalesce,
10404         .get_sset_count         = tg3_get_sset_count,
10405 };
10406
10407 static void __devinit tg3_get_eeprom_size(struct tg3 *tp)
10408 {
10409         u32 cursize, val, magic;
10410
10411         tp->nvram_size = EEPROM_CHIP_SIZE;
10412
10413         if (tg3_nvram_read_swab(tp, 0, &magic) != 0)
10414                 return;
10415
10416         if ((magic != TG3_EEPROM_MAGIC) &&
10417             ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) &&
10418             ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW))
10419                 return;
10420
10421         /*
10422          * Size the chip by reading offsets at increasing powers of two.
10423          * When we encounter our validation signature, we know the addressing
10424          * has wrapped around, and thus have our chip size.
10425          */
10426         cursize = 0x10;
10427
10428         while (cursize < tp->nvram_size) {
10429                 if (tg3_nvram_read_swab(tp, cursize, &val) != 0)
10430                         return;
10431
10432                 if (val == magic)
10433                         break;
10434
10435                 cursize <<= 1;
10436         }
10437
10438         tp->nvram_size = cursize;
10439 }
10440
10441 static void __devinit tg3_get_nvram_size(struct tg3 *tp)
10442 {
10443         u32 val;
10444
10445         if (tg3_nvram_read_swab(tp, 0, &val) != 0)
10446                 return;
10447
10448         /* Selfboot format */
10449         if (val != TG3_EEPROM_MAGIC) {
10450                 tg3_get_eeprom_size(tp);
10451                 return;
10452         }
10453
10454         if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
10455                 if (val != 0) {
10456                         tp->nvram_size = (val >> 16) * 1024;
10457                         return;
10458                 }
10459         }
10460         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
10461 }
10462
10463 static void __devinit tg3_get_nvram_info(struct tg3 *tp)
10464 {
10465         u32 nvcfg1;
10466
10467         nvcfg1 = tr32(NVRAM_CFG1);
10468         if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
10469                 tp->tg3_flags2 |= TG3_FLG2_FLASH;
10470         }
10471         else {
10472                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
10473                 tw32(NVRAM_CFG1, nvcfg1);
10474         }
10475
10476         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) ||
10477             (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
10478                 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
10479                         case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
10480                                 tp->nvram_jedecnum = JEDEC_ATMEL;
10481                                 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
10482                                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10483                                 break;
10484                         case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
10485                                 tp->nvram_jedecnum = JEDEC_ATMEL;
10486                                 tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
10487                                 break;
10488                         case FLASH_VENDOR_ATMEL_EEPROM:
10489                                 tp->nvram_jedecnum = JEDEC_ATMEL;
10490                                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
10491                                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10492                                 break;
10493                         case FLASH_VENDOR_ST:
10494                                 tp->nvram_jedecnum = JEDEC_ST;
10495                                 tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
10496                                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10497                                 break;
10498                         case FLASH_VENDOR_SAIFUN:
10499                                 tp->nvram_jedecnum = JEDEC_SAIFUN;
10500                                 tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
10501                                 break;
10502                         case FLASH_VENDOR_SST_SMALL:
10503                         case FLASH_VENDOR_SST_LARGE:
10504                                 tp->nvram_jedecnum = JEDEC_SST;
10505                                 tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
10506                                 break;
10507                 }
10508         }
10509         else {
10510                 tp->nvram_jedecnum = JEDEC_ATMEL;
10511                 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
10512                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10513         }
10514 }
10515
10516 static void __devinit tg3_get_5752_nvram_info(struct tg3 *tp)
10517 {
10518         u32 nvcfg1;
10519
10520         nvcfg1 = tr32(NVRAM_CFG1);
10521
10522         /* NVRAM protection for TPM */
10523         if (nvcfg1 & (1 << 27))
10524                 tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM;
10525
10526         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
10527                 case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
10528                 case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
10529                         tp->nvram_jedecnum = JEDEC_ATMEL;
10530                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10531                         break;
10532                 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
10533                         tp->nvram_jedecnum = JEDEC_ATMEL;
10534                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10535                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
10536                         break;
10537                 case FLASH_5752VENDOR_ST_M45PE10:
10538                 case FLASH_5752VENDOR_ST_M45PE20:
10539                 case FLASH_5752VENDOR_ST_M45PE40:
10540                         tp->nvram_jedecnum = JEDEC_ST;
10541                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10542                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
10543                         break;
10544         }
10545
10546         if (tp->tg3_flags2 & TG3_FLG2_FLASH) {
10547                 switch (nvcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
10548                         case FLASH_5752PAGE_SIZE_256:
10549                                 tp->nvram_pagesize = 256;
10550                                 break;
10551                         case FLASH_5752PAGE_SIZE_512:
10552                                 tp->nvram_pagesize = 512;
10553                                 break;
10554                         case FLASH_5752PAGE_SIZE_1K:
10555                                 tp->nvram_pagesize = 1024;
10556                                 break;
10557                         case FLASH_5752PAGE_SIZE_2K:
10558                                 tp->nvram_pagesize = 2048;
10559                                 break;
10560                         case FLASH_5752PAGE_SIZE_4K:
10561                                 tp->nvram_pagesize = 4096;
10562                                 break;
10563                         case FLASH_5752PAGE_SIZE_264:
10564                                 tp->nvram_pagesize = 264;
10565                                 break;
10566                 }
10567         }
10568         else {
10569                 /* For eeprom, set pagesize to maximum eeprom size */
10570                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
10571
10572                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
10573                 tw32(NVRAM_CFG1, nvcfg1);
10574         }
10575 }
10576
10577 static void __devinit tg3_get_5755_nvram_info(struct tg3 *tp)
10578 {
10579         u32 nvcfg1, protect = 0;
10580
10581         nvcfg1 = tr32(NVRAM_CFG1);
10582
10583         /* NVRAM protection for TPM */
10584         if (nvcfg1 & (1 << 27)) {
10585                 tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM;
10586                 protect = 1;
10587         }
10588
10589         nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
10590         switch (nvcfg1) {
10591                 case FLASH_5755VENDOR_ATMEL_FLASH_1:
10592                 case FLASH_5755VENDOR_ATMEL_FLASH_2:
10593                 case FLASH_5755VENDOR_ATMEL_FLASH_3:
10594                 case FLASH_5755VENDOR_ATMEL_FLASH_5:
10595                         tp->nvram_jedecnum = JEDEC_ATMEL;
10596                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10597                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
10598                         tp->nvram_pagesize = 264;
10599                         if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 ||
10600                             nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5)
10601                                 tp->nvram_size = (protect ? 0x3e200 :
10602                                                   TG3_NVRAM_SIZE_512KB);
10603                         else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2)
10604                                 tp->nvram_size = (protect ? 0x1f200 :
10605                                                   TG3_NVRAM_SIZE_256KB);
10606                         else
10607                                 tp->nvram_size = (protect ? 0x1f200 :
10608                                                   TG3_NVRAM_SIZE_128KB);
10609                         break;
10610                 case FLASH_5752VENDOR_ST_M45PE10:
10611                 case FLASH_5752VENDOR_ST_M45PE20:
10612                 case FLASH_5752VENDOR_ST_M45PE40:
10613                         tp->nvram_jedecnum = JEDEC_ST;
10614                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10615                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
10616                         tp->nvram_pagesize = 256;
10617                         if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10)
10618                                 tp->nvram_size = (protect ?
10619                                                   TG3_NVRAM_SIZE_64KB :
10620                                                   TG3_NVRAM_SIZE_128KB);
10621                         else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20)
10622                                 tp->nvram_size = (protect ?
10623                                                   TG3_NVRAM_SIZE_64KB :
10624                                                   TG3_NVRAM_SIZE_256KB);
10625                         else
10626                                 tp->nvram_size = (protect ?
10627                                                   TG3_NVRAM_SIZE_128KB :
10628                                                   TG3_NVRAM_SIZE_512KB);
10629                         break;
10630         }
10631 }
10632
10633 static void __devinit tg3_get_5787_nvram_info(struct tg3 *tp)
10634 {
10635         u32 nvcfg1;
10636
10637         nvcfg1 = tr32(NVRAM_CFG1);
10638
10639         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
10640                 case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
10641                 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
10642                 case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
10643                 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
10644                         tp->nvram_jedecnum = JEDEC_ATMEL;
10645                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10646                         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
10647
10648                         nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
10649                         tw32(NVRAM_CFG1, nvcfg1);
10650                         break;
10651                 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
10652                 case FLASH_5755VENDOR_ATMEL_FLASH_1:
10653                 case FLASH_5755VENDOR_ATMEL_FLASH_2:
10654                 case FLASH_5755VENDOR_ATMEL_FLASH_3:
10655                         tp->nvram_jedecnum = JEDEC_ATMEL;
10656                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10657                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
10658                         tp->nvram_pagesize = 264;
10659                         break;
10660                 case FLASH_5752VENDOR_ST_M45PE10:
10661                 case FLASH_5752VENDOR_ST_M45PE20:
10662                 case FLASH_5752VENDOR_ST_M45PE40:
10663                         tp->nvram_jedecnum = JEDEC_ST;
10664                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10665                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
10666                         tp->nvram_pagesize = 256;
10667                         break;
10668         }
10669 }
10670
10671 static void __devinit tg3_get_5761_nvram_info(struct tg3 *tp)
10672 {
10673         u32 nvcfg1, protect = 0;
10674
10675         nvcfg1 = tr32(NVRAM_CFG1);
10676
10677         /* NVRAM protection for TPM */
10678         if (nvcfg1 & (1 << 27)) {
10679                 tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM;
10680                 protect = 1;
10681         }
10682
10683         nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
10684         switch (nvcfg1) {
10685                 case FLASH_5761VENDOR_ATMEL_ADB021D:
10686                 case FLASH_5761VENDOR_ATMEL_ADB041D:
10687                 case FLASH_5761VENDOR_ATMEL_ADB081D:
10688                 case FLASH_5761VENDOR_ATMEL_ADB161D:
10689                 case FLASH_5761VENDOR_ATMEL_MDB021D:
10690                 case FLASH_5761VENDOR_ATMEL_MDB041D:
10691                 case FLASH_5761VENDOR_ATMEL_MDB081D:
10692                 case FLASH_5761VENDOR_ATMEL_MDB161D:
10693                         tp->nvram_jedecnum = JEDEC_ATMEL;
10694                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10695                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
10696                         tp->tg3_flags3 |= TG3_FLG3_NO_NVRAM_ADDR_TRANS;
10697                         tp->nvram_pagesize = 256;
10698                         break;
10699                 case FLASH_5761VENDOR_ST_A_M45PE20:
10700                 case FLASH_5761VENDOR_ST_A_M45PE40:
10701                 case FLASH_5761VENDOR_ST_A_M45PE80:
10702                 case FLASH_5761VENDOR_ST_A_M45PE16:
10703                 case FLASH_5761VENDOR_ST_M_M45PE20:
10704                 case FLASH_5761VENDOR_ST_M_M45PE40:
10705                 case FLASH_5761VENDOR_ST_M_M45PE80:
10706                 case FLASH_5761VENDOR_ST_M_M45PE16:
10707                         tp->nvram_jedecnum = JEDEC_ST;
10708                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10709                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
10710                         tp->nvram_pagesize = 256;
10711                         break;
10712         }
10713
10714         if (protect) {
10715                 tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT);
10716         } else {
10717                 switch (nvcfg1) {
10718                         case FLASH_5761VENDOR_ATMEL_ADB161D:
10719                         case FLASH_5761VENDOR_ATMEL_MDB161D:
10720                         case FLASH_5761VENDOR_ST_A_M45PE16:
10721                         case FLASH_5761VENDOR_ST_M_M45PE16:
10722                                 tp->nvram_size = TG3_NVRAM_SIZE_2MB;
10723                                 break;
10724                         case FLASH_5761VENDOR_ATMEL_ADB081D:
10725                         case FLASH_5761VENDOR_ATMEL_MDB081D:
10726                         case FLASH_5761VENDOR_ST_A_M45PE80:
10727                         case FLASH_5761VENDOR_ST_M_M45PE80:
10728                                 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
10729                                 break;
10730                         case FLASH_5761VENDOR_ATMEL_ADB041D:
10731                         case FLASH_5761VENDOR_ATMEL_MDB041D:
10732                         case FLASH_5761VENDOR_ST_A_M45PE40:
10733                         case FLASH_5761VENDOR_ST_M_M45PE40:
10734                                 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
10735                                 break;
10736                         case FLASH_5761VENDOR_ATMEL_ADB021D:
10737                         case FLASH_5761VENDOR_ATMEL_MDB021D:
10738                         case FLASH_5761VENDOR_ST_A_M45PE20:
10739                         case FLASH_5761VENDOR_ST_M_M45PE20:
10740                                 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
10741                                 break;
10742                 }
10743         }
10744 }
10745
10746 static void __devinit tg3_get_5906_nvram_info(struct tg3 *tp)
10747 {
10748         tp->nvram_jedecnum = JEDEC_ATMEL;
10749         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10750         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
10751 }
10752
10753 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
10754 static void __devinit tg3_nvram_init(struct tg3 *tp)
10755 {
10756         tw32_f(GRC_EEPROM_ADDR,
10757              (EEPROM_ADDR_FSM_RESET |
10758               (EEPROM_DEFAULT_CLOCK_PERIOD <<
10759                EEPROM_ADDR_CLKPERD_SHIFT)));
10760
10761         msleep(1);
10762
10763         /* Enable seeprom accesses. */
10764         tw32_f(GRC_LOCAL_CTRL,
10765              tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
10766         udelay(100);
10767
10768         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
10769             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
10770                 tp->tg3_flags |= TG3_FLAG_NVRAM;
10771
10772                 if (tg3_nvram_lock(tp)) {
10773                         printk(KERN_WARNING PFX "%s: Cannot get nvarm lock, "
10774                                "tg3_nvram_init failed.\n", tp->dev->name);
10775                         return;
10776                 }
10777                 tg3_enable_nvram_access(tp);
10778
10779                 tp->nvram_size = 0;
10780
10781                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
10782                         tg3_get_5752_nvram_info(tp);
10783                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
10784                         tg3_get_5755_nvram_info(tp);
10785                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
10786                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
10787                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
10788                         tg3_get_5787_nvram_info(tp);
10789                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
10790                         tg3_get_5761_nvram_info(tp);
10791                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
10792                         tg3_get_5906_nvram_info(tp);
10793                 else
10794                         tg3_get_nvram_info(tp);
10795
10796                 if (tp->nvram_size == 0)
10797                         tg3_get_nvram_size(tp);
10798
10799                 tg3_disable_nvram_access(tp);
10800                 tg3_nvram_unlock(tp);
10801
10802         } else {
10803                 tp->tg3_flags &= ~(TG3_FLAG_NVRAM | TG3_FLAG_NVRAM_BUFFERED);
10804
10805                 tg3_get_eeprom_size(tp);
10806         }
10807 }
10808
10809 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
10810                                         u32 offset, u32 *val)
10811 {
10812         u32 tmp;
10813         int i;
10814
10815         if (offset > EEPROM_ADDR_ADDR_MASK ||
10816             (offset % 4) != 0)
10817                 return -EINVAL;
10818
10819         tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
10820                                         EEPROM_ADDR_DEVID_MASK |
10821                                         EEPROM_ADDR_READ);
10822         tw32(GRC_EEPROM_ADDR,
10823              tmp |
10824              (0 << EEPROM_ADDR_DEVID_SHIFT) |
10825              ((offset << EEPROM_ADDR_ADDR_SHIFT) &
10826               EEPROM_ADDR_ADDR_MASK) |
10827              EEPROM_ADDR_READ | EEPROM_ADDR_START);
10828
10829         for (i = 0; i < 1000; i++) {
10830                 tmp = tr32(GRC_EEPROM_ADDR);
10831
10832                 if (tmp & EEPROM_ADDR_COMPLETE)
10833                         break;
10834                 msleep(1);
10835         }
10836         if (!(tmp & EEPROM_ADDR_COMPLETE))
10837                 return -EBUSY;
10838
10839         *val = tr32(GRC_EEPROM_DATA);
10840         return 0;
10841 }
10842
10843 #define NVRAM_CMD_TIMEOUT 10000
10844
10845 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
10846 {
10847         int i;
10848
10849         tw32(NVRAM_CMD, nvram_cmd);
10850         for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
10851                 udelay(10);
10852                 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
10853                         udelay(10);
10854                         break;
10855                 }
10856         }
10857         if (i == NVRAM_CMD_TIMEOUT) {
10858                 return -EBUSY;
10859         }
10860         return 0;
10861 }
10862
10863 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
10864 {
10865         if ((tp->tg3_flags & TG3_FLAG_NVRAM) &&
10866             (tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) &&
10867             (tp->tg3_flags2 & TG3_FLG2_FLASH) &&
10868            !(tp->tg3_flags3 & TG3_FLG3_NO_NVRAM_ADDR_TRANS) &&
10869             (tp->nvram_jedecnum == JEDEC_ATMEL))
10870
10871                 addr = ((addr / tp->nvram_pagesize) <<
10872                         ATMEL_AT45DB0X1B_PAGE_POS) +
10873                        (addr % tp->nvram_pagesize);
10874
10875         return addr;
10876 }
10877
10878 static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
10879 {
10880         if ((tp->tg3_flags & TG3_FLAG_NVRAM) &&
10881             (tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) &&
10882             (tp->tg3_flags2 & TG3_FLG2_FLASH) &&
10883            !(tp->tg3_flags3 & TG3_FLG3_NO_NVRAM_ADDR_TRANS) &&
10884             (tp->nvram_jedecnum == JEDEC_ATMEL))
10885
10886                 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
10887                         tp->nvram_pagesize) +
10888                        (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
10889
10890         return addr;
10891 }
10892
10893 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
10894 {
10895         int ret;
10896
10897         if (!(tp->tg3_flags & TG3_FLAG_NVRAM))
10898                 return tg3_nvram_read_using_eeprom(tp, offset, val);
10899
10900         offset = tg3_nvram_phys_addr(tp, offset);
10901
10902         if (offset > NVRAM_ADDR_MSK)
10903                 return -EINVAL;
10904
10905         ret = tg3_nvram_lock(tp);
10906         if (ret)
10907                 return ret;
10908
10909         tg3_enable_nvram_access(tp);
10910
10911         tw32(NVRAM_ADDR, offset);
10912         ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
10913                 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
10914
10915         if (ret == 0)
10916                 *val = swab32(tr32(NVRAM_RDDATA));
10917
10918         tg3_disable_nvram_access(tp);
10919
10920         tg3_nvram_unlock(tp);
10921
10922         return ret;
10923 }
10924
10925 static int tg3_nvram_read_le(struct tg3 *tp, u32 offset, __le32 *val)
10926 {
10927         u32 v;
10928         int res = tg3_nvram_read(tp, offset, &v);
10929         if (!res)
10930                 *val = cpu_to_le32(v);
10931         return res;
10932 }
10933
10934 static int tg3_nvram_read_swab(struct tg3 *tp, u32 offset, u32 *val)
10935 {
10936         int err;
10937         u32 tmp;
10938
10939         err = tg3_nvram_read(tp, offset, &tmp);
10940         *val = swab32(tmp);
10941         return err;
10942 }
10943
10944 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
10945                                     u32 offset, u32 len, u8 *buf)
10946 {
10947         int i, j, rc = 0;
10948         u32 val;
10949
10950         for (i = 0; i < len; i += 4) {
10951                 u32 addr;
10952                 __le32 data;
10953
10954                 addr = offset + i;
10955
10956                 memcpy(&data, buf + i, 4);
10957
10958                 tw32(GRC_EEPROM_DATA, le32_to_cpu(data));
10959
10960                 val = tr32(GRC_EEPROM_ADDR);
10961                 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
10962
10963                 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
10964                         EEPROM_ADDR_READ);
10965                 tw32(GRC_EEPROM_ADDR, val |
10966                         (0 << EEPROM_ADDR_DEVID_SHIFT) |
10967                         (addr & EEPROM_ADDR_ADDR_MASK) |
10968                         EEPROM_ADDR_START |
10969                         EEPROM_ADDR_WRITE);
10970
10971                 for (j = 0; j < 1000; j++) {
10972                         val = tr32(GRC_EEPROM_ADDR);
10973
10974                         if (val & EEPROM_ADDR_COMPLETE)
10975                                 break;
10976                         msleep(1);
10977                 }
10978                 if (!(val & EEPROM_ADDR_COMPLETE)) {
10979                         rc = -EBUSY;
10980                         break;
10981                 }
10982         }
10983
10984         return rc;
10985 }
10986
10987 /* offset and length are dword aligned */
10988 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
10989                 u8 *buf)
10990 {
10991         int ret = 0;
10992         u32 pagesize = tp->nvram_pagesize;
10993         u32 pagemask = pagesize - 1;
10994         u32 nvram_cmd;
10995         u8 *tmp;
10996
10997         tmp = kmalloc(pagesize, GFP_KERNEL);
10998         if (tmp == NULL)
10999                 return -ENOMEM;
11000
11001         while (len) {
11002                 int j;
11003                 u32 phy_addr, page_off, size;
11004
11005                 phy_addr = offset & ~pagemask;
11006
11007                 for (j = 0; j < pagesize; j += 4) {
11008                         if ((ret = tg3_nvram_read_le(tp, phy_addr + j,
11009                                                 (__le32 *) (tmp + j))))
11010                                 break;
11011                 }
11012                 if (ret)
11013                         break;
11014
11015                 page_off = offset & pagemask;
11016                 size = pagesize;
11017                 if (len < size)
11018                         size = len;
11019
11020                 len -= size;
11021
11022                 memcpy(tmp + page_off, buf, size);
11023
11024                 offset = offset + (pagesize - page_off);
11025
11026                 tg3_enable_nvram_access(tp);
11027
11028                 /*
11029                  * Before we can erase the flash page, we need
11030                  * to issue a special "write enable" command.
11031                  */
11032                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
11033
11034                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
11035                         break;
11036
11037                 /* Erase the target page */
11038                 tw32(NVRAM_ADDR, phy_addr);
11039
11040                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
11041                         NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
11042
11043                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
11044                         break;
11045
11046                 /* Issue another write enable to start the write. */
11047                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
11048
11049                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
11050                         break;
11051
11052                 for (j = 0; j < pagesize; j += 4) {
11053                         __be32 data;
11054
11055                         data = *((__be32 *) (tmp + j));
11056                         /* swab32(le32_to_cpu(data)), actually */
11057                         tw32(NVRAM_WRDATA, be32_to_cpu(data));
11058
11059                         tw32(NVRAM_ADDR, phy_addr + j);
11060
11061                         nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
11062                                 NVRAM_CMD_WR;
11063
11064                         if (j == 0)
11065                                 nvram_cmd |= NVRAM_CMD_FIRST;
11066                         else if (j == (pagesize - 4))
11067                                 nvram_cmd |= NVRAM_CMD_LAST;
11068
11069                         if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
11070                                 break;
11071                 }
11072                 if (ret)
11073                         break;
11074         }
11075
11076         nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
11077         tg3_nvram_exec_cmd(tp, nvram_cmd);
11078
11079         kfree(tmp);
11080
11081         return ret;
11082 }
11083
11084 /* offset and length are dword aligned */
11085 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
11086                 u8 *buf)
11087 {
11088         int i, ret = 0;
11089
11090         for (i = 0; i < len; i += 4, offset += 4) {
11091                 u32 page_off, phy_addr, nvram_cmd;
11092                 __be32 data;
11093
11094                 memcpy(&data, buf + i, 4);
11095                 tw32(NVRAM_WRDATA, be32_to_cpu(data));
11096
11097                 page_off = offset % tp->nvram_pagesize;
11098
11099                 phy_addr = tg3_nvram_phys_addr(tp, offset);
11100
11101                 tw32(NVRAM_ADDR, phy_addr);
11102
11103                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
11104
11105                 if ((page_off == 0) || (i == 0))
11106                         nvram_cmd |= NVRAM_CMD_FIRST;
11107                 if (page_off == (tp->nvram_pagesize - 4))
11108                         nvram_cmd |= NVRAM_CMD_LAST;
11109
11110                 if (i == (len - 4))
11111                         nvram_cmd |= NVRAM_CMD_LAST;
11112
11113                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752) &&
11114                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5755) &&
11115                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5787) &&
11116                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784) &&
11117                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761) &&
11118                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785) &&
11119                     (tp->nvram_jedecnum == JEDEC_ST) &&
11120                     (nvram_cmd & NVRAM_CMD_FIRST)) {
11121
11122                         if ((ret = tg3_nvram_exec_cmd(tp,
11123                                 NVRAM_CMD_WREN | NVRAM_CMD_GO |
11124                                 NVRAM_CMD_DONE)))
11125
11126                                 break;
11127                 }
11128                 if (!(tp->tg3_flags2 & TG3_FLG2_FLASH)) {
11129                         /* We always do complete word writes to eeprom. */
11130                         nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
11131                 }
11132
11133                 if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
11134                         break;
11135         }
11136         return ret;
11137 }
11138
11139 /* offset and length are dword aligned */
11140 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
11141 {
11142         int ret;
11143
11144         if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
11145                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
11146                        ~GRC_LCLCTRL_GPIO_OUTPUT1);
11147                 udelay(40);
11148         }
11149
11150         if (!(tp->tg3_flags & TG3_FLAG_NVRAM)) {
11151                 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
11152         }
11153         else {
11154                 u32 grc_mode;
11155
11156                 ret = tg3_nvram_lock(tp);
11157                 if (ret)
11158                         return ret;
11159
11160                 tg3_enable_nvram_access(tp);
11161                 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
11162                     !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM))
11163                         tw32(NVRAM_WRITE1, 0x406);
11164
11165                 grc_mode = tr32(GRC_MODE);
11166                 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
11167
11168                 if ((tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) ||
11169                         !(tp->tg3_flags2 & TG3_FLG2_FLASH)) {
11170
11171                         ret = tg3_nvram_write_block_buffered(tp, offset, len,
11172                                 buf);
11173                 }
11174                 else {
11175                         ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
11176                                 buf);
11177                 }
11178
11179                 grc_mode = tr32(GRC_MODE);
11180                 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
11181
11182                 tg3_disable_nvram_access(tp);
11183                 tg3_nvram_unlock(tp);
11184         }
11185
11186         if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
11187                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
11188                 udelay(40);
11189         }
11190
11191         return ret;
11192 }
11193
11194 struct subsys_tbl_ent {
11195         u16 subsys_vendor, subsys_devid;
11196         u32 phy_id;
11197 };
11198
11199 static struct subsys_tbl_ent subsys_id_to_phy_id[] = {
11200         /* Broadcom boards. */
11201         { PCI_VENDOR_ID_BROADCOM, 0x1644, PHY_ID_BCM5401 }, /* BCM95700A6 */
11202         { PCI_VENDOR_ID_BROADCOM, 0x0001, PHY_ID_BCM5701 }, /* BCM95701A5 */
11203         { PCI_VENDOR_ID_BROADCOM, 0x0002, PHY_ID_BCM8002 }, /* BCM95700T6 */
11204         { PCI_VENDOR_ID_BROADCOM, 0x0003, 0 },              /* BCM95700A9 */
11205         { PCI_VENDOR_ID_BROADCOM, 0x0005, PHY_ID_BCM5701 }, /* BCM95701T1 */
11206         { PCI_VENDOR_ID_BROADCOM, 0x0006, PHY_ID_BCM5701 }, /* BCM95701T8 */
11207         { PCI_VENDOR_ID_BROADCOM, 0x0007, 0 },              /* BCM95701A7 */
11208         { PCI_VENDOR_ID_BROADCOM, 0x0008, PHY_ID_BCM5701 }, /* BCM95701A10 */
11209         { PCI_VENDOR_ID_BROADCOM, 0x8008, PHY_ID_BCM5701 }, /* BCM95701A12 */
11210         { PCI_VENDOR_ID_BROADCOM, 0x0009, PHY_ID_BCM5703 }, /* BCM95703Ax1 */
11211         { PCI_VENDOR_ID_BROADCOM, 0x8009, PHY_ID_BCM5703 }, /* BCM95703Ax2 */
11212
11213         /* 3com boards. */
11214         { PCI_VENDOR_ID_3COM, 0x1000, PHY_ID_BCM5401 }, /* 3C996T */
11215         { PCI_VENDOR_ID_3COM, 0x1006, PHY_ID_BCM5701 }, /* 3C996BT */
11216         { PCI_VENDOR_ID_3COM, 0x1004, 0 },              /* 3C996SX */
11217         { PCI_VENDOR_ID_3COM, 0x1007, PHY_ID_BCM5701 }, /* 3C1000T */
11218         { PCI_VENDOR_ID_3COM, 0x1008, PHY_ID_BCM5701 }, /* 3C940BR01 */
11219
11220         /* DELL boards. */
11221         { PCI_VENDOR_ID_DELL, 0x00d1, PHY_ID_BCM5401 }, /* VIPER */
11222         { PCI_VENDOR_ID_DELL, 0x0106, PHY_ID_BCM5401 }, /* JAGUAR */
11223         { PCI_VENDOR_ID_DELL, 0x0109, PHY_ID_BCM5411 }, /* MERLOT */
11224         { PCI_VENDOR_ID_DELL, 0x010a, PHY_ID_BCM5411 }, /* SLIM_MERLOT */
11225
11226         /* Compaq boards. */
11227         { PCI_VENDOR_ID_COMPAQ, 0x007c, PHY_ID_BCM5701 }, /* BANSHEE */
11228         { PCI_VENDOR_ID_COMPAQ, 0x009a, PHY_ID_BCM5701 }, /* BANSHEE_2 */
11229         { PCI_VENDOR_ID_COMPAQ, 0x007d, 0 },              /* CHANGELING */
11230         { PCI_VENDOR_ID_COMPAQ, 0x0085, PHY_ID_BCM5701 }, /* NC7780 */
11231         { PCI_VENDOR_ID_COMPAQ, 0x0099, PHY_ID_BCM5701 }, /* NC7780_2 */
11232
11233         /* IBM boards. */
11234         { PCI_VENDOR_ID_IBM, 0x0281, 0 } /* IBM??? */
11235 };
11236
11237 static inline struct subsys_tbl_ent *lookup_by_subsys(struct tg3 *tp)
11238 {
11239         int i;
11240
11241         for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
11242                 if ((subsys_id_to_phy_id[i].subsys_vendor ==
11243                      tp->pdev->subsystem_vendor) &&
11244                     (subsys_id_to_phy_id[i].subsys_devid ==
11245                      tp->pdev->subsystem_device))
11246                         return &subsys_id_to_phy_id[i];
11247         }
11248         return NULL;
11249 }
11250
11251 static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
11252 {
11253         u32 val;
11254         u16 pmcsr;
11255
11256         /* On some early chips the SRAM cannot be accessed in D3hot state,
11257          * so need make sure we're in D0.
11258          */
11259         pci_read_config_word(tp->pdev, tp->pm_cap + PCI_PM_CTRL, &pmcsr);
11260         pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
11261         pci_write_config_word(tp->pdev, tp->pm_cap + PCI_PM_CTRL, pmcsr);
11262         msleep(1);
11263
11264         /* Make sure register accesses (indirect or otherwise)
11265          * will function correctly.
11266          */
11267         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
11268                                tp->misc_host_ctrl);
11269
11270         /* The memory arbiter has to be enabled in order for SRAM accesses
11271          * to succeed.  Normally on powerup the tg3 chip firmware will make
11272          * sure it is enabled, but other entities such as system netboot
11273          * code might disable it.
11274          */
11275         val = tr32(MEMARB_MODE);
11276         tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
11277
11278         tp->phy_id = PHY_ID_INVALID;
11279         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
11280
11281         /* Assume an onboard device and WOL capable by default.  */
11282         tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT | TG3_FLAG_WOL_CAP;
11283
11284         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
11285                 if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
11286                         tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT;
11287                         tp->tg3_flags2 |= TG3_FLG2_IS_NIC;
11288                 }
11289                 val = tr32(VCPU_CFGSHDW);
11290                 if (val & VCPU_CFGSHDW_ASPM_DBNC)
11291                         tp->tg3_flags |= TG3_FLAG_ASPM_WORKAROUND;
11292                 if ((val & VCPU_CFGSHDW_WOL_ENABLE) &&
11293                     (val & VCPU_CFGSHDW_WOL_MAGPKT) &&
11294                     device_may_wakeup(&tp->pdev->dev))
11295                         tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
11296                 return;
11297         }
11298
11299         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
11300         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
11301                 u32 nic_cfg, led_cfg;
11302                 u32 nic_phy_id, ver, cfg2 = 0, cfg4 = 0, eeprom_phy_id;
11303                 int eeprom_phy_serdes = 0;
11304
11305                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
11306                 tp->nic_sram_data_cfg = nic_cfg;
11307
11308                 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
11309                 ver >>= NIC_SRAM_DATA_VER_SHIFT;
11310                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700) &&
11311                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) &&
11312                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5703) &&
11313                     (ver > 0) && (ver < 0x100))
11314                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
11315
11316                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
11317                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_4, &cfg4);
11318
11319                 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
11320                     NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
11321                         eeprom_phy_serdes = 1;
11322
11323                 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
11324                 if (nic_phy_id != 0) {
11325                         u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
11326                         u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
11327
11328                         eeprom_phy_id  = (id1 >> 16) << 10;
11329                         eeprom_phy_id |= (id2 & 0xfc00) << 16;
11330                         eeprom_phy_id |= (id2 & 0x03ff) <<  0;
11331                 } else
11332                         eeprom_phy_id = 0;
11333
11334                 tp->phy_id = eeprom_phy_id;
11335                 if (eeprom_phy_serdes) {
11336                         if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
11337                                 tp->tg3_flags2 |= TG3_FLG2_MII_SERDES;
11338                         else
11339                                 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
11340                 }
11341
11342                 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
11343                         led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
11344                                     SHASTA_EXT_LED_MODE_MASK);
11345                 else
11346                         led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
11347
11348                 switch (led_cfg) {
11349                 default:
11350                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
11351                         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
11352                         break;
11353
11354                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
11355                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
11356                         break;
11357
11358                 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
11359                         tp->led_ctrl = LED_CTRL_MODE_MAC;
11360
11361                         /* Default to PHY_1_MODE if 0 (MAC_MODE) is
11362                          * read on some older 5700/5701 bootcode.
11363                          */
11364                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
11365                             ASIC_REV_5700 ||
11366                             GET_ASIC_REV(tp->pci_chip_rev_id) ==
11367                             ASIC_REV_5701)
11368                                 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
11369
11370                         break;
11371
11372                 case SHASTA_EXT_LED_SHARED:
11373                         tp->led_ctrl = LED_CTRL_MODE_SHARED;
11374                         if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
11375                             tp->pci_chip_rev_id != CHIPREV_ID_5750_A1)
11376                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
11377                                                  LED_CTRL_MODE_PHY_2);
11378                         break;
11379
11380                 case SHASTA_EXT_LED_MAC:
11381                         tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
11382                         break;
11383
11384                 case SHASTA_EXT_LED_COMBO:
11385                         tp->led_ctrl = LED_CTRL_MODE_COMBO;
11386                         if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0)
11387                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
11388                                                  LED_CTRL_MODE_PHY_2);
11389                         break;
11390
11391                 }
11392
11393                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
11394                      GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) &&
11395                     tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
11396                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
11397
11398                 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX)
11399                         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
11400
11401                 if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
11402                         tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT;
11403                         if ((tp->pdev->subsystem_vendor ==
11404                              PCI_VENDOR_ID_ARIMA) &&
11405                             (tp->pdev->subsystem_device == 0x205a ||
11406                              tp->pdev->subsystem_device == 0x2063))
11407                                 tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT;
11408                 } else {
11409                         tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT;
11410                         tp->tg3_flags2 |= TG3_FLG2_IS_NIC;
11411                 }
11412
11413                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
11414                         tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
11415                         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
11416                                 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
11417                 }
11418                 if (nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE)
11419                         tp->tg3_flags3 |= TG3_FLG3_ENABLE_APE;
11420                 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES &&
11421                     !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL))
11422                         tp->tg3_flags &= ~TG3_FLAG_WOL_CAP;
11423
11424                 if ((tp->tg3_flags & TG3_FLAG_WOL_CAP) &&
11425                     (nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE) &&
11426                     device_may_wakeup(&tp->pdev->dev))
11427                         tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
11428
11429                 if (cfg2 & (1 << 17))
11430                         tp->tg3_flags2 |= TG3_FLG2_CAPACITIVE_COUPLING;
11431
11432                 /* serdes signal pre-emphasis in register 0x590 set by */
11433                 /* bootcode if bit 18 is set */
11434                 if (cfg2 & (1 << 18))
11435                         tp->tg3_flags2 |= TG3_FLG2_SERDES_PREEMPHASIS;
11436
11437                 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
11438                         u32 cfg3;
11439
11440                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
11441                         if (cfg3 & NIC_SRAM_ASPM_DEBOUNCE)
11442                                 tp->tg3_flags |= TG3_FLAG_ASPM_WORKAROUND;
11443                 }
11444
11445                 if (cfg4 & NIC_SRAM_RGMII_STD_IBND_DISABLE)
11446                         tp->tg3_flags3 |= TG3_FLG3_RGMII_STD_IBND_DISABLE;
11447                 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_RX_EN)
11448                         tp->tg3_flags3 |= TG3_FLG3_RGMII_EXT_IBND_RX_EN;
11449                 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN)
11450                         tp->tg3_flags3 |= TG3_FLG3_RGMII_EXT_IBND_TX_EN;
11451         }
11452 }
11453
11454 static int __devinit tg3_issue_otp_command(struct tg3 *tp, u32 cmd)
11455 {
11456         int i;
11457         u32 val;
11458
11459         tw32(OTP_CTRL, cmd | OTP_CTRL_OTP_CMD_START);
11460         tw32(OTP_CTRL, cmd);
11461
11462         /* Wait for up to 1 ms for command to execute. */
11463         for (i = 0; i < 100; i++) {
11464                 val = tr32(OTP_STATUS);
11465                 if (val & OTP_STATUS_CMD_DONE)
11466                         break;
11467                 udelay(10);
11468         }
11469
11470         return (val & OTP_STATUS_CMD_DONE) ? 0 : -EBUSY;
11471 }
11472
11473 /* Read the gphy configuration from the OTP region of the chip.  The gphy
11474  * configuration is a 32-bit value that straddles the alignment boundary.
11475  * We do two 32-bit reads and then shift and merge the results.
11476  */
11477 static u32 __devinit tg3_read_otp_phycfg(struct tg3 *tp)
11478 {
11479         u32 bhalf_otp, thalf_otp;
11480
11481         tw32(OTP_MODE, OTP_MODE_OTP_THRU_GRC);
11482
11483         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_INIT))
11484                 return 0;
11485
11486         tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC1);
11487
11488         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
11489                 return 0;
11490
11491         thalf_otp = tr32(OTP_READ_DATA);
11492
11493         tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC2);
11494
11495         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
11496                 return 0;
11497
11498         bhalf_otp = tr32(OTP_READ_DATA);
11499
11500         return ((thalf_otp & 0x0000ffff) << 16) | (bhalf_otp >> 16);
11501 }
11502
11503 static int __devinit tg3_phy_probe(struct tg3 *tp)
11504 {
11505         u32 hw_phy_id_1, hw_phy_id_2;
11506         u32 hw_phy_id, hw_phy_id_masked;
11507         int err;
11508
11509         if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)
11510                 return tg3_phy_init(tp);
11511
11512         /* Reading the PHY ID register can conflict with ASF
11513          * firwmare access to the PHY hardware.
11514          */
11515         err = 0;
11516         if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) ||
11517             (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)) {
11518                 hw_phy_id = hw_phy_id_masked = PHY_ID_INVALID;
11519         } else {
11520                 /* Now read the physical PHY_ID from the chip and verify
11521                  * that it is sane.  If it doesn't look good, we fall back
11522                  * to either the hard-coded table based PHY_ID and failing
11523                  * that the value found in the eeprom area.
11524                  */
11525                 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
11526                 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
11527
11528                 hw_phy_id  = (hw_phy_id_1 & 0xffff) << 10;
11529                 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
11530                 hw_phy_id |= (hw_phy_id_2 & 0x03ff) <<  0;
11531
11532                 hw_phy_id_masked = hw_phy_id & PHY_ID_MASK;
11533         }
11534
11535         if (!err && KNOWN_PHY_ID(hw_phy_id_masked)) {
11536                 tp->phy_id = hw_phy_id;
11537                 if (hw_phy_id_masked == PHY_ID_BCM8002)
11538                         tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
11539                 else
11540                         tp->tg3_flags2 &= ~TG3_FLG2_PHY_SERDES;
11541         } else {
11542                 if (tp->phy_id != PHY_ID_INVALID) {
11543                         /* Do nothing, phy ID already set up in
11544                          * tg3_get_eeprom_hw_cfg().
11545                          */
11546                 } else {
11547                         struct subsys_tbl_ent *p;
11548
11549                         /* No eeprom signature?  Try the hardcoded
11550                          * subsys device table.
11551                          */
11552                         p = lookup_by_subsys(tp);
11553                         if (!p)
11554                                 return -ENODEV;
11555
11556                         tp->phy_id = p->phy_id;
11557                         if (!tp->phy_id ||
11558                             tp->phy_id == PHY_ID_BCM8002)
11559                                 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
11560                 }
11561         }
11562
11563         if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) &&
11564             !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) &&
11565             !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
11566                 u32 bmsr, adv_reg, tg3_ctrl, mask;
11567
11568                 tg3_readphy(tp, MII_BMSR, &bmsr);
11569                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
11570                     (bmsr & BMSR_LSTATUS))
11571                         goto skip_phy_reset;
11572
11573                 err = tg3_phy_reset(tp);
11574                 if (err)
11575                         return err;
11576
11577                 adv_reg = (ADVERTISE_10HALF | ADVERTISE_10FULL |
11578                            ADVERTISE_100HALF | ADVERTISE_100FULL |
11579                            ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
11580                 tg3_ctrl = 0;
11581                 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
11582                         tg3_ctrl = (MII_TG3_CTRL_ADV_1000_HALF |
11583                                     MII_TG3_CTRL_ADV_1000_FULL);
11584                         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
11585                             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
11586                                 tg3_ctrl |= (MII_TG3_CTRL_AS_MASTER |
11587                                              MII_TG3_CTRL_ENABLE_AS_MASTER);
11588                 }
11589
11590                 mask = (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
11591                         ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
11592                         ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full);
11593                 if (!tg3_copper_is_advertising_all(tp, mask)) {
11594                         tg3_writephy(tp, MII_ADVERTISE, adv_reg);
11595
11596                         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
11597                                 tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
11598
11599                         tg3_writephy(tp, MII_BMCR,
11600                                      BMCR_ANENABLE | BMCR_ANRESTART);
11601                 }
11602                 tg3_phy_set_wirespeed(tp);
11603
11604                 tg3_writephy(tp, MII_ADVERTISE, adv_reg);
11605                 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
11606                         tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
11607         }
11608
11609 skip_phy_reset:
11610         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
11611                 err = tg3_init_5401phy_dsp(tp);
11612                 if (err)
11613                         return err;
11614         }
11615
11616         if (!err && ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401)) {
11617                 err = tg3_init_5401phy_dsp(tp);
11618         }
11619
11620         if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
11621                 tp->link_config.advertising =
11622                         (ADVERTISED_1000baseT_Half |
11623                          ADVERTISED_1000baseT_Full |
11624                          ADVERTISED_Autoneg |
11625                          ADVERTISED_FIBRE);
11626         if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
11627                 tp->link_config.advertising &=
11628                         ~(ADVERTISED_1000baseT_Half |
11629                           ADVERTISED_1000baseT_Full);
11630
11631         return err;
11632 }
11633
11634 static void __devinit tg3_read_partno(struct tg3 *tp)
11635 {
11636         unsigned char vpd_data[256];
11637         unsigned int i;
11638         u32 magic;
11639
11640         if (tg3_nvram_read_swab(tp, 0x0, &magic))
11641                 goto out_not_found;
11642
11643         if (magic == TG3_EEPROM_MAGIC) {
11644                 for (i = 0; i < 256; i += 4) {
11645                         u32 tmp;
11646
11647                         if (tg3_nvram_read(tp, 0x100 + i, &tmp))
11648                                 goto out_not_found;
11649
11650                         vpd_data[i + 0] = ((tmp >>  0) & 0xff);
11651                         vpd_data[i + 1] = ((tmp >>  8) & 0xff);
11652                         vpd_data[i + 2] = ((tmp >> 16) & 0xff);
11653                         vpd_data[i + 3] = ((tmp >> 24) & 0xff);
11654                 }
11655         } else {
11656                 int vpd_cap;
11657
11658                 vpd_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_VPD);
11659                 for (i = 0; i < 256; i += 4) {
11660                         u32 tmp, j = 0;
11661                         __le32 v;
11662                         u16 tmp16;
11663
11664                         pci_write_config_word(tp->pdev, vpd_cap + PCI_VPD_ADDR,
11665                                               i);
11666                         while (j++ < 100) {
11667                                 pci_read_config_word(tp->pdev, vpd_cap +
11668                                                      PCI_VPD_ADDR, &tmp16);
11669                                 if (tmp16 & 0x8000)
11670                                         break;
11671                                 msleep(1);
11672                         }
11673                         if (!(tmp16 & 0x8000))
11674                                 goto out_not_found;
11675
11676                         pci_read_config_dword(tp->pdev, vpd_cap + PCI_VPD_DATA,
11677                                               &tmp);
11678                         v = cpu_to_le32(tmp);
11679                         memcpy(&vpd_data[i], &v, 4);
11680                 }
11681         }
11682
11683         /* Now parse and find the part number. */
11684         for (i = 0; i < 254; ) {
11685                 unsigned char val = vpd_data[i];
11686                 unsigned int block_end;
11687
11688                 if (val == 0x82 || val == 0x91) {
11689                         i = (i + 3 +
11690                              (vpd_data[i + 1] +
11691                               (vpd_data[i + 2] << 8)));
11692                         continue;
11693                 }
11694
11695                 if (val != 0x90)
11696                         goto out_not_found;
11697
11698                 block_end = (i + 3 +
11699                              (vpd_data[i + 1] +
11700                               (vpd_data[i + 2] << 8)));
11701                 i += 3;
11702
11703                 if (block_end > 256)
11704                         goto out_not_found;
11705
11706                 while (i < (block_end - 2)) {
11707                         if (vpd_data[i + 0] == 'P' &&
11708                             vpd_data[i + 1] == 'N') {
11709                                 int partno_len = vpd_data[i + 2];
11710
11711                                 i += 3;
11712                                 if (partno_len > 24 || (partno_len + i) > 256)
11713                                         goto out_not_found;
11714
11715                                 memcpy(tp->board_part_number,
11716                                        &vpd_data[i], partno_len);
11717
11718                                 /* Success. */
11719                                 return;
11720                         }
11721                         i += 3 + vpd_data[i + 2];
11722                 }
11723
11724                 /* Part number not found. */
11725                 goto out_not_found;
11726         }
11727
11728 out_not_found:
11729         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
11730                 strcpy(tp->board_part_number, "BCM95906");
11731         else
11732                 strcpy(tp->board_part_number, "none");
11733 }
11734
11735 static int __devinit tg3_fw_img_is_valid(struct tg3 *tp, u32 offset)
11736 {
11737         u32 val;
11738
11739         if (tg3_nvram_read_swab(tp, offset, &val) ||
11740             (val & 0xfc000000) != 0x0c000000 ||
11741             tg3_nvram_read_swab(tp, offset + 4, &val) ||
11742             val != 0)
11743                 return 0;
11744
11745         return 1;
11746 }
11747
11748 static void __devinit tg3_read_fw_ver(struct tg3 *tp)
11749 {
11750         u32 val, offset, start;
11751         u32 ver_offset;
11752         int i, bcnt;
11753
11754         if (tg3_nvram_read_swab(tp, 0, &val))
11755                 return;
11756
11757         if (val != TG3_EEPROM_MAGIC)
11758                 return;
11759
11760         if (tg3_nvram_read_swab(tp, 0xc, &offset) ||
11761             tg3_nvram_read_swab(tp, 0x4, &start))
11762                 return;
11763
11764         offset = tg3_nvram_logical_addr(tp, offset);
11765
11766         if (!tg3_fw_img_is_valid(tp, offset) ||
11767             tg3_nvram_read_swab(tp, offset + 8, &ver_offset))
11768                 return;
11769
11770         offset = offset + ver_offset - start;
11771         for (i = 0; i < 16; i += 4) {
11772                 __le32 v;
11773                 if (tg3_nvram_read_le(tp, offset + i, &v))
11774                         return;
11775
11776                 memcpy(tp->fw_ver + i, &v, 4);
11777         }
11778
11779         if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF) ||
11780              (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
11781                 return;
11782
11783         for (offset = TG3_NVM_DIR_START;
11784              offset < TG3_NVM_DIR_END;
11785              offset += TG3_NVM_DIRENT_SIZE) {
11786                 if (tg3_nvram_read_swab(tp, offset, &val))
11787                         return;
11788
11789                 if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI)
11790                         break;
11791         }
11792
11793         if (offset == TG3_NVM_DIR_END)
11794                 return;
11795
11796         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
11797                 start = 0x08000000;
11798         else if (tg3_nvram_read_swab(tp, offset - 4, &start))
11799                 return;
11800
11801         if (tg3_nvram_read_swab(tp, offset + 4, &offset) ||
11802             !tg3_fw_img_is_valid(tp, offset) ||
11803             tg3_nvram_read_swab(tp, offset + 8, &val))
11804                 return;
11805
11806         offset += val - start;
11807
11808         bcnt = strlen(tp->fw_ver);
11809
11810         tp->fw_ver[bcnt++] = ',';
11811         tp->fw_ver[bcnt++] = ' ';
11812
11813         for (i = 0; i < 4; i++) {
11814                 __le32 v;
11815                 if (tg3_nvram_read_le(tp, offset, &v))
11816                         return;
11817
11818                 offset += sizeof(v);
11819
11820                 if (bcnt > TG3_VER_SIZE - sizeof(v)) {
11821                         memcpy(&tp->fw_ver[bcnt], &v, TG3_VER_SIZE - bcnt);
11822                         break;
11823                 }
11824
11825                 memcpy(&tp->fw_ver[bcnt], &v, sizeof(v));
11826                 bcnt += sizeof(v);
11827         }
11828
11829         tp->fw_ver[TG3_VER_SIZE - 1] = 0;
11830 }
11831
11832 static struct pci_dev * __devinit tg3_find_peer(struct tg3 *);
11833
11834 static int __devinit tg3_get_invariants(struct tg3 *tp)
11835 {
11836         static struct pci_device_id write_reorder_chipsets[] = {
11837                 { PCI_DEVICE(PCI_VENDOR_ID_AMD,
11838                              PCI_DEVICE_ID_AMD_FE_GATE_700C) },
11839                 { PCI_DEVICE(PCI_VENDOR_ID_AMD,
11840                              PCI_DEVICE_ID_AMD_8131_BRIDGE) },
11841                 { PCI_DEVICE(PCI_VENDOR_ID_VIA,
11842                              PCI_DEVICE_ID_VIA_8385_0) },
11843                 { },
11844         };
11845         u32 misc_ctrl_reg;
11846         u32 cacheline_sz_reg;
11847         u32 pci_state_reg, grc_misc_cfg;
11848         u32 val;
11849         u16 pci_cmd;
11850         int err, pcie_cap;
11851
11852         /* Force memory write invalidate off.  If we leave it on,
11853          * then on 5700_BX chips we have to enable a workaround.
11854          * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
11855          * to match the cacheline size.  The Broadcom driver have this
11856          * workaround but turns MWI off all the times so never uses
11857          * it.  This seems to suggest that the workaround is insufficient.
11858          */
11859         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
11860         pci_cmd &= ~PCI_COMMAND_INVALIDATE;
11861         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
11862
11863         /* It is absolutely critical that TG3PCI_MISC_HOST_CTRL
11864          * has the register indirect write enable bit set before
11865          * we try to access any of the MMIO registers.  It is also
11866          * critical that the PCI-X hw workaround situation is decided
11867          * before that as well.
11868          */
11869         pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
11870                               &misc_ctrl_reg);
11871
11872         tp->pci_chip_rev_id = (misc_ctrl_reg >>
11873                                MISC_HOST_CTRL_CHIPREV_SHIFT);
11874         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_USE_PROD_ID_REG) {
11875                 u32 prod_id_asic_rev;
11876
11877                 pci_read_config_dword(tp->pdev, TG3PCI_PRODID_ASICREV,
11878                                       &prod_id_asic_rev);
11879                 tp->pci_chip_rev_id = prod_id_asic_rev & PROD_ID_ASIC_REV_MASK;
11880         }
11881
11882         /* Wrong chip ID in 5752 A0. This code can be removed later
11883          * as A0 is not in production.
11884          */
11885         if (tp->pci_chip_rev_id == CHIPREV_ID_5752_A0_HW)
11886                 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
11887
11888         /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
11889          * we need to disable memory and use config. cycles
11890          * only to access all registers. The 5702/03 chips
11891          * can mistakenly decode the special cycles from the
11892          * ICH chipsets as memory write cycles, causing corruption
11893          * of register and memory space. Only certain ICH bridges
11894          * will drive special cycles with non-zero data during the
11895          * address phase which can fall within the 5703's address
11896          * range. This is not an ICH bug as the PCI spec allows
11897          * non-zero address during special cycles. However, only
11898          * these ICH bridges are known to drive non-zero addresses
11899          * during special cycles.
11900          *
11901          * Since special cycles do not cross PCI bridges, we only
11902          * enable this workaround if the 5703 is on the secondary
11903          * bus of these ICH bridges.
11904          */
11905         if ((tp->pci_chip_rev_id == CHIPREV_ID_5703_A1) ||
11906             (tp->pci_chip_rev_id == CHIPREV_ID_5703_A2)) {
11907                 static struct tg3_dev_id {
11908                         u32     vendor;
11909                         u32     device;
11910                         u32     rev;
11911                 } ich_chipsets[] = {
11912                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
11913                           PCI_ANY_ID },
11914                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
11915                           PCI_ANY_ID },
11916                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
11917                           0xa },
11918                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
11919                           PCI_ANY_ID },
11920                         { },
11921                 };
11922                 struct tg3_dev_id *pci_id = &ich_chipsets[0];
11923                 struct pci_dev *bridge = NULL;
11924
11925                 while (pci_id->vendor != 0) {
11926                         bridge = pci_get_device(pci_id->vendor, pci_id->device,
11927                                                 bridge);
11928                         if (!bridge) {
11929                                 pci_id++;
11930                                 continue;
11931                         }
11932                         if (pci_id->rev != PCI_ANY_ID) {
11933                                 if (bridge->revision > pci_id->rev)
11934                                         continue;
11935                         }
11936                         if (bridge->subordinate &&
11937                             (bridge->subordinate->number ==
11938                              tp->pdev->bus->number)) {
11939
11940                                 tp->tg3_flags2 |= TG3_FLG2_ICH_WORKAROUND;
11941                                 pci_dev_put(bridge);
11942                                 break;
11943                         }
11944                 }
11945         }
11946
11947         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
11948                 static struct tg3_dev_id {
11949                         u32     vendor;
11950                         u32     device;
11951                 } bridge_chipsets[] = {
11952                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0 },
11953                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1 },
11954                         { },
11955                 };
11956                 struct tg3_dev_id *pci_id = &bridge_chipsets[0];
11957                 struct pci_dev *bridge = NULL;
11958
11959                 while (pci_id->vendor != 0) {
11960                         bridge = pci_get_device(pci_id->vendor,
11961                                                 pci_id->device,
11962                                                 bridge);
11963                         if (!bridge) {
11964                                 pci_id++;
11965                                 continue;
11966                         }
11967                         if (bridge->subordinate &&
11968                             (bridge->subordinate->number <=
11969                              tp->pdev->bus->number) &&
11970                             (bridge->subordinate->subordinate >=
11971                              tp->pdev->bus->number)) {
11972                                 tp->tg3_flags3 |= TG3_FLG3_5701_DMA_BUG;
11973                                 pci_dev_put(bridge);
11974                                 break;
11975                         }
11976                 }
11977         }
11978
11979         /* The EPB bridge inside 5714, 5715, and 5780 cannot support
11980          * DMA addresses > 40-bit. This bridge may have other additional
11981          * 57xx devices behind it in some 4-port NIC designs for example.
11982          * Any tg3 device found behind the bridge will also need the 40-bit
11983          * DMA workaround.
11984          */
11985         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 ||
11986             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
11987                 tp->tg3_flags2 |= TG3_FLG2_5780_CLASS;
11988                 tp->tg3_flags |= TG3_FLAG_40BIT_DMA_BUG;
11989                 tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI);
11990         }
11991         else {
11992                 struct pci_dev *bridge = NULL;
11993
11994                 do {
11995                         bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
11996                                                 PCI_DEVICE_ID_SERVERWORKS_EPB,
11997                                                 bridge);
11998                         if (bridge && bridge->subordinate &&
11999                             (bridge->subordinate->number <=
12000                              tp->pdev->bus->number) &&
12001                             (bridge->subordinate->subordinate >=
12002                              tp->pdev->bus->number)) {
12003                                 tp->tg3_flags |= TG3_FLAG_40BIT_DMA_BUG;
12004                                 pci_dev_put(bridge);
12005                                 break;
12006                         }
12007                 } while (bridge);
12008         }
12009
12010         /* Initialize misc host control in PCI block. */
12011         tp->misc_host_ctrl |= (misc_ctrl_reg &
12012                                MISC_HOST_CTRL_CHIPREV);
12013         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
12014                                tp->misc_host_ctrl);
12015
12016         pci_read_config_dword(tp->pdev, TG3PCI_CACHELINESZ,
12017                               &cacheline_sz_reg);
12018
12019         tp->pci_cacheline_sz = (cacheline_sz_reg >>  0) & 0xff;
12020         tp->pci_lat_timer    = (cacheline_sz_reg >>  8) & 0xff;
12021         tp->pci_hdr_type     = (cacheline_sz_reg >> 16) & 0xff;
12022         tp->pci_bist         = (cacheline_sz_reg >> 24) & 0xff;
12023
12024         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
12025             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714))
12026                 tp->pdev_peer = tg3_find_peer(tp);
12027
12028         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
12029             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
12030             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
12031             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
12032             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
12033             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
12034             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
12035             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 ||
12036             (tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
12037                 tp->tg3_flags2 |= TG3_FLG2_5750_PLUS;
12038
12039         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) ||
12040             (tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
12041                 tp->tg3_flags2 |= TG3_FLG2_5705_PLUS;
12042
12043         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
12044                 tp->tg3_flags |= TG3_FLAG_SUPPORT_MSI;
12045                 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX ||
12046                     GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX ||
12047                     (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 &&
12048                      tp->pci_chip_rev_id <= CHIPREV_ID_5714_A2 &&
12049                      tp->pdev_peer == tp->pdev))
12050                         tp->tg3_flags &= ~TG3_FLAG_SUPPORT_MSI;
12051
12052                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
12053                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
12054                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
12055                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
12056                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
12057                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
12058                         tp->tg3_flags2 |= TG3_FLG2_HW_TSO_2;
12059                         tp->tg3_flags2 |= TG3_FLG2_1SHOT_MSI;
12060                 } else {
12061                         tp->tg3_flags2 |= TG3_FLG2_HW_TSO_1 | TG3_FLG2_TSO_BUG;
12062                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
12063                                 ASIC_REV_5750 &&
12064                             tp->pci_chip_rev_id >= CHIPREV_ID_5750_C2)
12065                                 tp->tg3_flags2 &= ~TG3_FLG2_TSO_BUG;
12066                 }
12067         }
12068
12069         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
12070              (tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
12071                 tp->tg3_flags2 |= TG3_FLG2_JUMBO_CAPABLE;
12072
12073         pcie_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_EXP);
12074         if (pcie_cap != 0) {
12075                 tp->tg3_flags2 |= TG3_FLG2_PCI_EXPRESS;
12076
12077                 pcie_set_readrq(tp->pdev, 4096);
12078
12079                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
12080                         u16 lnkctl;
12081
12082                         pci_read_config_word(tp->pdev,
12083                                              pcie_cap + PCI_EXP_LNKCTL,
12084                                              &lnkctl);
12085                         if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN)
12086                                 tp->tg3_flags2 &= ~TG3_FLG2_HW_TSO_2;
12087                 }
12088         }
12089
12090         /* If we have an AMD 762 or VIA K8T800 chipset, write
12091          * reordering to the mailbox registers done by the host
12092          * controller can cause major troubles.  We read back from
12093          * every mailbox register write to force the writes to be
12094          * posted to the chip in order.
12095          */
12096         if (pci_dev_present(write_reorder_chipsets) &&
12097             !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
12098                 tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;
12099
12100         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
12101             tp->pci_lat_timer < 64) {
12102                 tp->pci_lat_timer = 64;
12103
12104                 cacheline_sz_reg  = ((tp->pci_cacheline_sz & 0xff) <<  0);
12105                 cacheline_sz_reg |= ((tp->pci_lat_timer    & 0xff) <<  8);
12106                 cacheline_sz_reg |= ((tp->pci_hdr_type     & 0xff) << 16);
12107                 cacheline_sz_reg |= ((tp->pci_bist         & 0xff) << 24);
12108
12109                 pci_write_config_dword(tp->pdev, TG3PCI_CACHELINESZ,
12110                                        cacheline_sz_reg);
12111         }
12112
12113         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
12114             (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
12115                 tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
12116                 if (!tp->pcix_cap) {
12117                         printk(KERN_ERR PFX "Cannot find PCI-X "
12118                                             "capability, aborting.\n");
12119                         return -EIO;
12120                 }
12121         }
12122
12123         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
12124                               &pci_state_reg);
12125
12126         if (tp->pcix_cap && (pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0) {
12127                 tp->tg3_flags |= TG3_FLAG_PCIX_MODE;
12128
12129                 /* If this is a 5700 BX chipset, and we are in PCI-X
12130                  * mode, enable register write workaround.
12131                  *
12132                  * The workaround is to use indirect register accesses
12133                  * for all chip writes not to mailbox registers.
12134                  */
12135                 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX) {
12136                         u32 pm_reg;
12137
12138                         tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
12139
12140                         /* The chip can have it's power management PCI config
12141                          * space registers clobbered due to this bug.
12142                          * So explicitly force the chip into D0 here.
12143                          */
12144                         pci_read_config_dword(tp->pdev,
12145                                               tp->pm_cap + PCI_PM_CTRL,
12146                                               &pm_reg);
12147                         pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
12148                         pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
12149                         pci_write_config_dword(tp->pdev,
12150                                                tp->pm_cap + PCI_PM_CTRL,
12151                                                pm_reg);
12152
12153                         /* Also, force SERR#/PERR# in PCI command. */
12154                         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
12155                         pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
12156                         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
12157                 }
12158         }
12159
12160         /* 5700 BX chips need to have their TX producer index mailboxes
12161          * written twice to workaround a bug.
12162          */
12163         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX)
12164                 tp->tg3_flags |= TG3_FLAG_TXD_MBOX_HWBUG;
12165
12166         if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
12167                 tp->tg3_flags |= TG3_FLAG_PCI_HIGH_SPEED;
12168         if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
12169                 tp->tg3_flags |= TG3_FLAG_PCI_32BIT;
12170
12171         /* Chip-specific fixup from Broadcom driver */
12172         if ((tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) &&
12173             (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
12174                 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
12175                 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
12176         }
12177
12178         /* Default fast path register access methods */
12179         tp->read32 = tg3_read32;
12180         tp->write32 = tg3_write32;
12181         tp->read32_mbox = tg3_read32;
12182         tp->write32_mbox = tg3_write32;
12183         tp->write32_tx_mbox = tg3_write32;
12184         tp->write32_rx_mbox = tg3_write32;
12185
12186         /* Various workaround register access methods */
12187         if (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG)
12188                 tp->write32 = tg3_write_indirect_reg32;
12189         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
12190                  ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
12191                   tp->pci_chip_rev_id == CHIPREV_ID_5750_A0)) {
12192                 /*
12193                  * Back to back register writes can cause problems on these
12194                  * chips, the workaround is to read back all reg writes
12195                  * except those to mailbox regs.
12196                  *
12197                  * See tg3_write_indirect_reg32().
12198                  */
12199                 tp->write32 = tg3_write_flush_reg32;
12200         }
12201
12202
12203         if ((tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG) ||
12204             (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)) {
12205                 tp->write32_tx_mbox = tg3_write32_tx_mbox;
12206                 if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
12207                         tp->write32_rx_mbox = tg3_write_flush_reg32;
12208         }
12209
12210         if (tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND) {
12211                 tp->read32 = tg3_read_indirect_reg32;
12212                 tp->write32 = tg3_write_indirect_reg32;
12213                 tp->read32_mbox = tg3_read_indirect_mbox;
12214                 tp->write32_mbox = tg3_write_indirect_mbox;
12215                 tp->write32_tx_mbox = tg3_write_indirect_mbox;
12216                 tp->write32_rx_mbox = tg3_write_indirect_mbox;
12217
12218                 iounmap(tp->regs);
12219                 tp->regs = NULL;
12220
12221                 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
12222                 pci_cmd &= ~PCI_COMMAND_MEMORY;
12223                 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
12224         }
12225         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
12226                 tp->read32_mbox = tg3_read32_mbox_5906;
12227                 tp->write32_mbox = tg3_write32_mbox_5906;
12228                 tp->write32_tx_mbox = tg3_write32_mbox_5906;
12229                 tp->write32_rx_mbox = tg3_write32_mbox_5906;
12230         }
12231
12232         if (tp->write32 == tg3_write_indirect_reg32 ||
12233             ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
12234              (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
12235               GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)))
12236                 tp->tg3_flags |= TG3_FLAG_SRAM_USE_CONFIG;
12237
12238         /* Get eeprom hw config before calling tg3_set_power_state().
12239          * In particular, the TG3_FLG2_IS_NIC flag must be
12240          * determined before calling tg3_set_power_state() so that
12241          * we know whether or not to switch out of Vaux power.
12242          * When the flag is set, it means that GPIO1 is used for eeprom
12243          * write protect and also implies that it is a LOM where GPIOs
12244          * are not used to switch power.
12245          */
12246         tg3_get_eeprom_hw_cfg(tp);
12247
12248         if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
12249                 /* Allow reads and writes to the
12250                  * APE register and memory space.
12251                  */
12252                 pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR |
12253                                  PCISTATE_ALLOW_APE_SHMEM_WR;
12254                 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE,
12255                                        pci_state_reg);
12256         }
12257
12258         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
12259             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
12260             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
12261                 tp->tg3_flags |= TG3_FLAG_CPMU_PRESENT;
12262
12263                 if (tp->pci_chip_rev_id == CHIPREV_ID_5784_A0 ||
12264                     tp->pci_chip_rev_id == CHIPREV_ID_5784_A1 ||
12265                     tp->pci_chip_rev_id == CHIPREV_ID_5761_A0 ||
12266                     tp->pci_chip_rev_id == CHIPREV_ID_5761_A1)
12267                         tp->tg3_flags3 |= TG3_FLG3_5761_5784_AX_FIXES;
12268         }
12269
12270         /* Set up tp->grc_local_ctrl before calling tg3_set_power_state().
12271          * GPIO1 driven high will bring 5700's external PHY out of reset.
12272          * It is also used as eeprom write protect on LOMs.
12273          */
12274         tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
12275         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
12276             (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT))
12277                 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
12278                                        GRC_LCLCTRL_GPIO_OUTPUT1);
12279         /* Unused GPIO3 must be driven as output on 5752 because there
12280          * are no pull-up resistors on unused GPIO pins.
12281          */
12282         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
12283                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
12284
12285         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
12286                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
12287
12288         if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761) {
12289                 /* Turn off the debug UART. */
12290                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
12291                 if (tp->tg3_flags2 & TG3_FLG2_IS_NIC)
12292                         /* Keep VMain power. */
12293                         tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
12294                                               GRC_LCLCTRL_GPIO_OUTPUT0;
12295         }
12296
12297         /* Force the chip into D0. */
12298         err = tg3_set_power_state(tp, PCI_D0);
12299         if (err) {
12300                 printk(KERN_ERR PFX "(%s) transition to D0 failed\n",
12301                        pci_name(tp->pdev));
12302                 return err;
12303         }
12304
12305         /* 5700 B0 chips do not support checksumming correctly due
12306          * to hardware bugs.
12307          */
12308         if (tp->pci_chip_rev_id == CHIPREV_ID_5700_B0)
12309                 tp->tg3_flags |= TG3_FLAG_BROKEN_CHECKSUMS;
12310
12311         /* Derive initial jumbo mode from MTU assigned in
12312          * ether_setup() via the alloc_etherdev() call
12313          */
12314         if (tp->dev->mtu > ETH_DATA_LEN &&
12315             !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
12316                 tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE;
12317
12318         /* Determine WakeOnLan speed to use. */
12319         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
12320             tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
12321             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0 ||
12322             tp->pci_chip_rev_id == CHIPREV_ID_5701_B2) {
12323                 tp->tg3_flags &= ~(TG3_FLAG_WOL_SPEED_100MB);
12324         } else {
12325                 tp->tg3_flags |= TG3_FLAG_WOL_SPEED_100MB;
12326         }
12327
12328         /* A few boards don't want Ethernet@WireSpeed phy feature */
12329         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
12330             ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
12331              (tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) &&
12332              (tp->pci_chip_rev_id != CHIPREV_ID_5705_A1)) ||
12333             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) ||
12334             (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES))
12335                 tp->tg3_flags2 |= TG3_FLG2_NO_ETH_WIRE_SPEED;
12336
12337         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5703_AX ||
12338             GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_AX)
12339                 tp->tg3_flags2 |= TG3_FLG2_PHY_ADC_BUG;
12340         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0)
12341                 tp->tg3_flags2 |= TG3_FLG2_PHY_5704_A0_BUG;
12342
12343         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
12344                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
12345                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
12346                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
12347                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
12348                         if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
12349                             tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
12350                                 tp->tg3_flags2 |= TG3_FLG2_PHY_JITTER_BUG;
12351                         if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
12352                                 tp->tg3_flags2 |= TG3_FLG2_PHY_ADJUST_TRIM;
12353                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906 &&
12354                            GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785)
12355                         tp->tg3_flags2 |= TG3_FLG2_PHY_BER_BUG;
12356         }
12357
12358         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
12359             GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
12360                 tp->phy_otp = tg3_read_otp_phycfg(tp);
12361                 if (tp->phy_otp == 0)
12362                         tp->phy_otp = TG3_OTP_DEFAULT;
12363         }
12364
12365         if (tp->tg3_flags & TG3_FLAG_CPMU_PRESENT)
12366                 tp->mi_mode = MAC_MI_MODE_500KHZ_CONST;
12367         else
12368                 tp->mi_mode = MAC_MI_MODE_BASE;
12369
12370         tp->coalesce_mode = 0;
12371         if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX &&
12372             GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX)
12373                 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
12374
12375         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
12376                 tp->tg3_flags3 |= TG3_FLG3_USE_PHYLIB;
12377
12378         err = tg3_mdio_init(tp);
12379         if (err)
12380                 return err;
12381
12382         /* Initialize data/descriptor byte/word swapping. */
12383         val = tr32(GRC_MODE);
12384         val &= GRC_MODE_HOST_STACKUP;
12385         tw32(GRC_MODE, val | tp->grc_mode);
12386
12387         tg3_switch_clocks(tp);
12388
12389         /* Clear this out for sanity. */
12390         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
12391
12392         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
12393                               &pci_state_reg);
12394         if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
12395             (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) == 0) {
12396                 u32 chiprevid = GET_CHIP_REV_ID(tp->misc_host_ctrl);
12397
12398                 if (chiprevid == CHIPREV_ID_5701_A0 ||
12399                     chiprevid == CHIPREV_ID_5701_B0 ||
12400                     chiprevid == CHIPREV_ID_5701_B2 ||
12401                     chiprevid == CHIPREV_ID_5701_B5) {
12402                         void __iomem *sram_base;
12403
12404                         /* Write some dummy words into the SRAM status block
12405                          * area, see if it reads back correctly.  If the return
12406                          * value is bad, force enable the PCIX workaround.
12407                          */
12408                         sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
12409
12410                         writel(0x00000000, sram_base);
12411                         writel(0x00000000, sram_base + 4);
12412                         writel(0xffffffff, sram_base + 4);
12413                         if (readl(sram_base) != 0x00000000)
12414                                 tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
12415                 }
12416         }
12417
12418         udelay(50);
12419         tg3_nvram_init(tp);
12420
12421         grc_misc_cfg = tr32(GRC_MISC_CFG);
12422         grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
12423
12424         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
12425             (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
12426              grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
12427                 tp->tg3_flags2 |= TG3_FLG2_IS_5788;
12428
12429         if (!(tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
12430             (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700))
12431                 tp->tg3_flags |= TG3_FLAG_TAGGED_STATUS;
12432         if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) {
12433                 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
12434                                       HOSTCC_MODE_CLRTICK_TXBD);
12435
12436                 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
12437                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
12438                                        tp->misc_host_ctrl);
12439         }
12440
12441         /* Preserve the APE MAC_MODE bits */
12442         if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
12443                 tp->mac_mode = tr32(MAC_MODE) |
12444                                MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
12445         else
12446                 tp->mac_mode = TG3_DEF_MAC_MODE;
12447
12448         /* these are limited to 10/100 only */
12449         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
12450              (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
12451             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
12452              tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
12453              (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901 ||
12454               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901_2 ||
12455               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5705F)) ||
12456             (tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
12457              (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5751F ||
12458               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5753F ||
12459               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5787F)) ||
12460             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
12461                 tp->tg3_flags |= TG3_FLAG_10_100_ONLY;
12462
12463         err = tg3_phy_probe(tp);
12464         if (err) {
12465                 printk(KERN_ERR PFX "(%s) phy probe failed, err %d\n",
12466                        pci_name(tp->pdev), err);
12467                 /* ... but do not return immediately ... */
12468                 tg3_mdio_fini(tp);
12469         }
12470
12471         tg3_read_partno(tp);
12472         tg3_read_fw_ver(tp);
12473
12474         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
12475                 tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
12476         } else {
12477                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
12478                         tp->tg3_flags |= TG3_FLAG_USE_MI_INTERRUPT;
12479                 else
12480                         tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
12481         }
12482
12483         /* 5700 {AX,BX} chips have a broken status block link
12484          * change bit implementation, so we must use the
12485          * status register in those cases.
12486          */
12487         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
12488                 tp->tg3_flags |= TG3_FLAG_USE_LINKCHG_REG;
12489         else
12490                 tp->tg3_flags &= ~TG3_FLAG_USE_LINKCHG_REG;
12491
12492         /* The led_ctrl is set during tg3_phy_probe, here we might
12493          * have to force the link status polling mechanism based
12494          * upon subsystem IDs.
12495          */
12496         if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
12497             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
12498             !(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
12499                 tp->tg3_flags |= (TG3_FLAG_USE_MI_INTERRUPT |
12500                                   TG3_FLAG_USE_LINKCHG_REG);
12501         }
12502
12503         /* For all SERDES we poll the MAC status register. */
12504         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
12505                 tp->tg3_flags |= TG3_FLAG_POLL_SERDES;
12506         else
12507                 tp->tg3_flags &= ~TG3_FLAG_POLL_SERDES;
12508
12509         /* All chips before 5787 can get confused if TX buffers
12510          * straddle the 4GB address boundary in some cases.
12511          */
12512         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
12513             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
12514             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
12515             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
12516             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
12517             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
12518                 tp->dev->hard_start_xmit = tg3_start_xmit;
12519         else
12520                 tp->dev->hard_start_xmit = tg3_start_xmit_dma_bug;
12521
12522         tp->rx_offset = 2;
12523         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
12524             (tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0)
12525                 tp->rx_offset = 0;
12526
12527         tp->rx_std_max_post = TG3_RX_RING_SIZE;
12528
12529         /* Increment the rx prod index on the rx std ring by at most
12530          * 8 for these chips to workaround hw errata.
12531          */
12532         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
12533             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
12534             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
12535                 tp->rx_std_max_post = 8;
12536
12537         if (tp->tg3_flags & TG3_FLAG_ASPM_WORKAROUND)
12538                 tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) &
12539                                      PCIE_PWR_MGMT_L1_THRESH_MSK;
12540
12541         return err;
12542 }
12543
12544 #ifdef CONFIG_SPARC
12545 static int __devinit tg3_get_macaddr_sparc(struct tg3 *tp)
12546 {
12547         struct net_device *dev = tp->dev;
12548         struct pci_dev *pdev = tp->pdev;
12549         struct device_node *dp = pci_device_to_OF_node(pdev);
12550         const unsigned char *addr;
12551         int len;
12552
12553         addr = of_get_property(dp, "local-mac-address", &len);
12554         if (addr && len == 6) {
12555                 memcpy(dev->dev_addr, addr, 6);
12556                 memcpy(dev->perm_addr, dev->dev_addr, 6);
12557                 return 0;
12558         }
12559         return -ENODEV;
12560 }
12561
12562 static int __devinit tg3_get_default_macaddr_sparc(struct tg3 *tp)
12563 {
12564         struct net_device *dev = tp->dev;
12565
12566         memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
12567         memcpy(dev->perm_addr, idprom->id_ethaddr, 6);
12568         return 0;
12569 }
12570 #endif
12571
12572 static int __devinit tg3_get_device_address(struct tg3 *tp)
12573 {
12574         struct net_device *dev = tp->dev;
12575         u32 hi, lo, mac_offset;
12576         int addr_ok = 0;
12577
12578 #ifdef CONFIG_SPARC
12579         if (!tg3_get_macaddr_sparc(tp))
12580                 return 0;
12581 #endif
12582
12583         mac_offset = 0x7c;
12584         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
12585             (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
12586                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
12587                         mac_offset = 0xcc;
12588                 if (tg3_nvram_lock(tp))
12589                         tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
12590                 else
12591                         tg3_nvram_unlock(tp);
12592         }
12593         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
12594                 mac_offset = 0x10;
12595
12596         /* First try to get it from MAC address mailbox. */
12597         tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
12598         if ((hi >> 16) == 0x484b) {
12599                 dev->dev_addr[0] = (hi >>  8) & 0xff;
12600                 dev->dev_addr[1] = (hi >>  0) & 0xff;
12601
12602                 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
12603                 dev->dev_addr[2] = (lo >> 24) & 0xff;
12604                 dev->dev_addr[3] = (lo >> 16) & 0xff;
12605                 dev->dev_addr[4] = (lo >>  8) & 0xff;
12606                 dev->dev_addr[5] = (lo >>  0) & 0xff;
12607
12608                 /* Some old bootcode may report a 0 MAC address in SRAM */
12609                 addr_ok = is_valid_ether_addr(&dev->dev_addr[0]);
12610         }
12611         if (!addr_ok) {
12612                 /* Next, try NVRAM. */
12613                 if (!tg3_nvram_read(tp, mac_offset + 0, &hi) &&
12614                     !tg3_nvram_read(tp, mac_offset + 4, &lo)) {
12615                         dev->dev_addr[0] = ((hi >> 16) & 0xff);
12616                         dev->dev_addr[1] = ((hi >> 24) & 0xff);
12617                         dev->dev_addr[2] = ((lo >>  0) & 0xff);
12618                         dev->dev_addr[3] = ((lo >>  8) & 0xff);
12619                         dev->dev_addr[4] = ((lo >> 16) & 0xff);
12620                         dev->dev_addr[5] = ((lo >> 24) & 0xff);
12621                 }
12622                 /* Finally just fetch it out of the MAC control regs. */
12623                 else {
12624                         hi = tr32(MAC_ADDR_0_HIGH);
12625                         lo = tr32(MAC_ADDR_0_LOW);
12626
12627                         dev->dev_addr[5] = lo & 0xff;
12628                         dev->dev_addr[4] = (lo >> 8) & 0xff;
12629                         dev->dev_addr[3] = (lo >> 16) & 0xff;
12630                         dev->dev_addr[2] = (lo >> 24) & 0xff;
12631                         dev->dev_addr[1] = hi & 0xff;
12632                         dev->dev_addr[0] = (hi >> 8) & 0xff;
12633                 }
12634         }
12635
12636         if (!is_valid_ether_addr(&dev->dev_addr[0])) {
12637 #ifdef CONFIG_SPARC
12638                 if (!tg3_get_default_macaddr_sparc(tp))
12639                         return 0;
12640 #endif
12641                 return -EINVAL;
12642         }
12643         memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
12644         return 0;
12645 }
12646
12647 #define BOUNDARY_SINGLE_CACHELINE       1
12648 #define BOUNDARY_MULTI_CACHELINE        2
12649
12650 static u32 __devinit tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
12651 {
12652         int cacheline_size;
12653         u8 byte;
12654         int goal;
12655
12656         pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
12657         if (byte == 0)
12658                 cacheline_size = 1024;
12659         else
12660                 cacheline_size = (int) byte * 4;
12661
12662         /* On 5703 and later chips, the boundary bits have no
12663          * effect.
12664          */
12665         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
12666             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
12667             !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
12668                 goto out;
12669
12670 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
12671         goal = BOUNDARY_MULTI_CACHELINE;
12672 #else
12673 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
12674         goal = BOUNDARY_SINGLE_CACHELINE;
12675 #else
12676         goal = 0;
12677 #endif
12678 #endif
12679
12680         if (!goal)
12681                 goto out;
12682
12683         /* PCI controllers on most RISC systems tend to disconnect
12684          * when a device tries to burst across a cache-line boundary.
12685          * Therefore, letting tg3 do so just wastes PCI bandwidth.
12686          *
12687          * Unfortunately, for PCI-E there are only limited
12688          * write-side controls for this, and thus for reads
12689          * we will still get the disconnects.  We'll also waste
12690          * these PCI cycles for both read and write for chips
12691          * other than 5700 and 5701 which do not implement the
12692          * boundary bits.
12693          */
12694         if ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
12695             !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
12696                 switch (cacheline_size) {
12697                 case 16:
12698                 case 32:
12699                 case 64:
12700                 case 128:
12701                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
12702                                 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
12703                                         DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
12704                         } else {
12705                                 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
12706                                         DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
12707                         }
12708                         break;
12709
12710                 case 256:
12711                         val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
12712                                 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
12713                         break;
12714
12715                 default:
12716                         val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
12717                                 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
12718                         break;
12719                 }
12720         } else if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
12721                 switch (cacheline_size) {
12722                 case 16:
12723                 case 32:
12724                 case 64:
12725                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
12726                                 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
12727                                 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
12728                                 break;
12729                         }
12730                         /* fallthrough */
12731                 case 128:
12732                 default:
12733                         val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
12734                         val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
12735                         break;
12736                 }
12737         } else {
12738                 switch (cacheline_size) {
12739                 case 16:
12740                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
12741                                 val |= (DMA_RWCTRL_READ_BNDRY_16 |
12742                                         DMA_RWCTRL_WRITE_BNDRY_16);
12743                                 break;
12744                         }
12745                         /* fallthrough */
12746                 case 32:
12747                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
12748                                 val |= (DMA_RWCTRL_READ_BNDRY_32 |
12749                                         DMA_RWCTRL_WRITE_BNDRY_32);
12750                                 break;
12751                         }
12752                         /* fallthrough */
12753                 case 64:
12754                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
12755                                 val |= (DMA_RWCTRL_READ_BNDRY_64 |
12756                                         DMA_RWCTRL_WRITE_BNDRY_64);
12757                                 break;
12758                         }
12759                         /* fallthrough */
12760                 case 128:
12761                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
12762                                 val |= (DMA_RWCTRL_READ_BNDRY_128 |
12763                                         DMA_RWCTRL_WRITE_BNDRY_128);
12764                                 break;
12765                         }
12766                         /* fallthrough */
12767                 case 256:
12768                         val |= (DMA_RWCTRL_READ_BNDRY_256 |
12769                                 DMA_RWCTRL_WRITE_BNDRY_256);
12770                         break;
12771                 case 512:
12772                         val |= (DMA_RWCTRL_READ_BNDRY_512 |
12773                                 DMA_RWCTRL_WRITE_BNDRY_512);
12774                         break;
12775                 case 1024:
12776                 default:
12777                         val |= (DMA_RWCTRL_READ_BNDRY_1024 |
12778                                 DMA_RWCTRL_WRITE_BNDRY_1024);
12779                         break;
12780                 }
12781         }
12782
12783 out:
12784         return val;
12785 }
12786
12787 static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma, int size, int to_device)
12788 {
12789         struct tg3_internal_buffer_desc test_desc;
12790         u32 sram_dma_descs;
12791         int i, ret;
12792
12793         sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
12794
12795         tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
12796         tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
12797         tw32(RDMAC_STATUS, 0);
12798         tw32(WDMAC_STATUS, 0);
12799
12800         tw32(BUFMGR_MODE, 0);
12801         tw32(FTQ_RESET, 0);
12802
12803         test_desc.addr_hi = ((u64) buf_dma) >> 32;
12804         test_desc.addr_lo = buf_dma & 0xffffffff;
12805         test_desc.nic_mbuf = 0x00002100;
12806         test_desc.len = size;
12807
12808         /*
12809          * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
12810          * the *second* time the tg3 driver was getting loaded after an
12811          * initial scan.
12812          *
12813          * Broadcom tells me:
12814          *   ...the DMA engine is connected to the GRC block and a DMA
12815          *   reset may affect the GRC block in some unpredictable way...
12816          *   The behavior of resets to individual blocks has not been tested.
12817          *
12818          * Broadcom noted the GRC reset will also reset all sub-components.
12819          */
12820         if (to_device) {
12821                 test_desc.cqid_sqid = (13 << 8) | 2;
12822
12823                 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
12824                 udelay(40);
12825         } else {
12826                 test_desc.cqid_sqid = (16 << 8) | 7;
12827
12828                 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
12829                 udelay(40);
12830         }
12831         test_desc.flags = 0x00000005;
12832
12833         for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
12834                 u32 val;
12835
12836                 val = *(((u32 *)&test_desc) + i);
12837                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
12838                                        sram_dma_descs + (i * sizeof(u32)));
12839                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
12840         }
12841         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
12842
12843         if (to_device) {
12844                 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
12845         } else {
12846                 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
12847         }
12848
12849         ret = -ENODEV;
12850         for (i = 0; i < 40; i++) {
12851                 u32 val;
12852
12853                 if (to_device)
12854                         val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
12855                 else
12856                         val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
12857                 if ((val & 0xffff) == sram_dma_descs) {
12858                         ret = 0;
12859                         break;
12860                 }
12861
12862                 udelay(100);
12863         }
12864
12865         return ret;
12866 }
12867
12868 #define TEST_BUFFER_SIZE        0x2000
12869
12870 static int __devinit tg3_test_dma(struct tg3 *tp)
12871 {
12872         dma_addr_t buf_dma;
12873         u32 *buf, saved_dma_rwctrl;
12874         int ret;
12875
12876         buf = pci_alloc_consistent(tp->pdev, TEST_BUFFER_SIZE, &buf_dma);
12877         if (!buf) {
12878                 ret = -ENOMEM;
12879                 goto out_nofree;
12880         }
12881
12882         tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
12883                           (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
12884
12885         tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
12886
12887         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
12888                 /* DMA read watermark not used on PCIE */
12889                 tp->dma_rwctrl |= 0x00180000;
12890         } else if (!(tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
12891                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
12892                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
12893                         tp->dma_rwctrl |= 0x003f0000;
12894                 else
12895                         tp->dma_rwctrl |= 0x003f000f;
12896         } else {
12897                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
12898                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
12899                         u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
12900                         u32 read_water = 0x7;
12901
12902                         /* If the 5704 is behind the EPB bridge, we can
12903                          * do the less restrictive ONE_DMA workaround for
12904                          * better performance.
12905                          */
12906                         if ((tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG) &&
12907                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
12908                                 tp->dma_rwctrl |= 0x8000;
12909                         else if (ccval == 0x6 || ccval == 0x7)
12910                                 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
12911
12912                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703)
12913                                 read_water = 4;
12914                         /* Set bit 23 to enable PCIX hw bug fix */
12915                         tp->dma_rwctrl |=
12916                                 (read_water << DMA_RWCTRL_READ_WATER_SHIFT) |
12917                                 (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
12918                                 (1 << 23);
12919                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) {
12920                         /* 5780 always in PCIX mode */
12921                         tp->dma_rwctrl |= 0x00144000;
12922                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
12923                         /* 5714 always in PCIX mode */
12924                         tp->dma_rwctrl |= 0x00148000;
12925                 } else {
12926                         tp->dma_rwctrl |= 0x001b000f;
12927                 }
12928         }
12929
12930         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
12931             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
12932                 tp->dma_rwctrl &= 0xfffffff0;
12933
12934         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
12935             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
12936                 /* Remove this if it causes problems for some boards. */
12937                 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
12938
12939                 /* On 5700/5701 chips, we need to set this bit.
12940                  * Otherwise the chip will issue cacheline transactions
12941                  * to streamable DMA memory with not all the byte
12942                  * enables turned on.  This is an error on several
12943                  * RISC PCI controllers, in particular sparc64.
12944                  *
12945                  * On 5703/5704 chips, this bit has been reassigned
12946                  * a different meaning.  In particular, it is used
12947                  * on those chips to enable a PCI-X workaround.
12948                  */
12949                 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
12950         }
12951
12952         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
12953
12954 #if 0
12955         /* Unneeded, already done by tg3_get_invariants.  */
12956         tg3_switch_clocks(tp);
12957 #endif
12958
12959         ret = 0;
12960         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
12961             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
12962                 goto out;
12963
12964         /* It is best to perform DMA test with maximum write burst size
12965          * to expose the 5700/5701 write DMA bug.
12966          */
12967         saved_dma_rwctrl = tp->dma_rwctrl;
12968         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
12969         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
12970
12971         while (1) {
12972                 u32 *p = buf, i;
12973
12974                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
12975                         p[i] = i;
12976
12977                 /* Send the buffer to the chip. */
12978                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1);
12979                 if (ret) {
12980                         printk(KERN_ERR "tg3_test_dma() Write the buffer failed %d\n", ret);
12981                         break;
12982                 }
12983
12984 #if 0
12985                 /* validate data reached card RAM correctly. */
12986                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
12987                         u32 val;
12988                         tg3_read_mem(tp, 0x2100 + (i*4), &val);
12989                         if (le32_to_cpu(val) != p[i]) {
12990                                 printk(KERN_ERR "  tg3_test_dma()  Card buffer corrupted on write! (%d != %d)\n", val, i);
12991                                 /* ret = -ENODEV here? */
12992                         }
12993                         p[i] = 0;
12994                 }
12995 #endif
12996                 /* Now read it back. */
12997                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0);
12998                 if (ret) {
12999                         printk(KERN_ERR "tg3_test_dma() Read the buffer failed %d\n", ret);
13000
13001                         break;
13002                 }
13003
13004                 /* Verify it. */
13005                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
13006                         if (p[i] == i)
13007                                 continue;
13008
13009                         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
13010                             DMA_RWCTRL_WRITE_BNDRY_16) {
13011                                 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
13012                                 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
13013                                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
13014                                 break;
13015                         } else {
13016                                 printk(KERN_ERR "tg3_test_dma() buffer corrupted on read back! (%d != %d)\n", p[i], i);
13017                                 ret = -ENODEV;
13018                                 goto out;
13019                         }
13020                 }
13021
13022                 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
13023                         /* Success. */
13024                         ret = 0;
13025                         break;
13026                 }
13027         }
13028         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
13029             DMA_RWCTRL_WRITE_BNDRY_16) {
13030                 static struct pci_device_id dma_wait_state_chipsets[] = {
13031                         { PCI_DEVICE(PCI_VENDOR_ID_APPLE,
13032                                      PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
13033                         { },
13034                 };
13035
13036                 /* DMA test passed without adjusting DMA boundary,
13037                  * now look for chipsets that are known to expose the
13038                  * DMA bug without failing the test.
13039                  */
13040                 if (pci_dev_present(dma_wait_state_chipsets)) {
13041                         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
13042                         tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
13043                 }
13044                 else
13045                         /* Safe to use the calculated DMA boundary. */
13046                         tp->dma_rwctrl = saved_dma_rwctrl;
13047
13048                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
13049         }
13050
13051 out:
13052         pci_free_consistent(tp->pdev, TEST_BUFFER_SIZE, buf, buf_dma);
13053 out_nofree:
13054         return ret;
13055 }
13056
13057 static void __devinit tg3_init_link_config(struct tg3 *tp)
13058 {
13059         tp->link_config.advertising =
13060                 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
13061                  ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
13062                  ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full |
13063                  ADVERTISED_Autoneg | ADVERTISED_MII);
13064         tp->link_config.speed = SPEED_INVALID;
13065         tp->link_config.duplex = DUPLEX_INVALID;
13066         tp->link_config.autoneg = AUTONEG_ENABLE;
13067         tp->link_config.active_speed = SPEED_INVALID;
13068         tp->link_config.active_duplex = DUPLEX_INVALID;
13069         tp->link_config.phy_is_low_power = 0;
13070         tp->link_config.orig_speed = SPEED_INVALID;
13071         tp->link_config.orig_duplex = DUPLEX_INVALID;
13072         tp->link_config.orig_autoneg = AUTONEG_INVALID;
13073 }
13074
13075 static void __devinit tg3_init_bufmgr_config(struct tg3 *tp)
13076 {
13077         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
13078                 tp->bufmgr_config.mbuf_read_dma_low_water =
13079                         DEFAULT_MB_RDMA_LOW_WATER_5705;
13080                 tp->bufmgr_config.mbuf_mac_rx_low_water =
13081                         DEFAULT_MB_MACRX_LOW_WATER_5705;
13082                 tp->bufmgr_config.mbuf_high_water =
13083                         DEFAULT_MB_HIGH_WATER_5705;
13084                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
13085                         tp->bufmgr_config.mbuf_mac_rx_low_water =
13086                                 DEFAULT_MB_MACRX_LOW_WATER_5906;
13087                         tp->bufmgr_config.mbuf_high_water =
13088                                 DEFAULT_MB_HIGH_WATER_5906;
13089                 }
13090
13091                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
13092                         DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
13093                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
13094                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
13095                 tp->bufmgr_config.mbuf_high_water_jumbo =
13096                         DEFAULT_MB_HIGH_WATER_JUMBO_5780;
13097         } else {
13098                 tp->bufmgr_config.mbuf_read_dma_low_water =
13099                         DEFAULT_MB_RDMA_LOW_WATER;
13100                 tp->bufmgr_config.mbuf_mac_rx_low_water =
13101                         DEFAULT_MB_MACRX_LOW_WATER;
13102                 tp->bufmgr_config.mbuf_high_water =
13103                         DEFAULT_MB_HIGH_WATER;
13104
13105                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
13106                         DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
13107                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
13108                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
13109                 tp->bufmgr_config.mbuf_high_water_jumbo =
13110                         DEFAULT_MB_HIGH_WATER_JUMBO;
13111         }
13112
13113         tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
13114         tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
13115 }
13116
13117 static char * __devinit tg3_phy_string(struct tg3 *tp)
13118 {
13119         switch (tp->phy_id & PHY_ID_MASK) {
13120         case PHY_ID_BCM5400:    return "5400";
13121         case PHY_ID_BCM5401:    return "5401";
13122         case PHY_ID_BCM5411:    return "5411";
13123         case PHY_ID_BCM5701:    return "5701";
13124         case PHY_ID_BCM5703:    return "5703";
13125         case PHY_ID_BCM5704:    return "5704";
13126         case PHY_ID_BCM5705:    return "5705";
13127         case PHY_ID_BCM5750:    return "5750";
13128         case PHY_ID_BCM5752:    return "5752";
13129         case PHY_ID_BCM5714:    return "5714";
13130         case PHY_ID_BCM5780:    return "5780";
13131         case PHY_ID_BCM5755:    return "5755";
13132         case PHY_ID_BCM5787:    return "5787";
13133         case PHY_ID_BCM5784:    return "5784";
13134         case PHY_ID_BCM5756:    return "5722/5756";
13135         case PHY_ID_BCM5906:    return "5906";
13136         case PHY_ID_BCM5761:    return "5761";
13137         case PHY_ID_BCM8002:    return "8002/serdes";
13138         case 0:                 return "serdes";
13139         default:                return "unknown";
13140         }
13141 }
13142
13143 static char * __devinit tg3_bus_string(struct tg3 *tp, char *str)
13144 {
13145         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
13146                 strcpy(str, "PCI Express");
13147                 return str;
13148         } else if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) {
13149                 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
13150
13151                 strcpy(str, "PCIX:");
13152
13153                 if ((clock_ctrl == 7) ||
13154                     ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
13155                      GRC_MISC_CFG_BOARD_ID_5704CIOBE))
13156                         strcat(str, "133MHz");
13157                 else if (clock_ctrl == 0)
13158                         strcat(str, "33MHz");
13159                 else if (clock_ctrl == 2)
13160                         strcat(str, "50MHz");
13161                 else if (clock_ctrl == 4)
13162                         strcat(str, "66MHz");
13163                 else if (clock_ctrl == 6)
13164                         strcat(str, "100MHz");
13165         } else {
13166                 strcpy(str, "PCI:");
13167                 if (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED)
13168                         strcat(str, "66MHz");
13169                 else
13170                         strcat(str, "33MHz");
13171         }
13172         if (tp->tg3_flags & TG3_FLAG_PCI_32BIT)
13173                 strcat(str, ":32-bit");
13174         else
13175                 strcat(str, ":64-bit");
13176         return str;
13177 }
13178
13179 static struct pci_dev * __devinit tg3_find_peer(struct tg3 *tp)
13180 {
13181         struct pci_dev *peer;
13182         unsigned int func, devnr = tp->pdev->devfn & ~7;
13183
13184         for (func = 0; func < 8; func++) {
13185                 peer = pci_get_slot(tp->pdev->bus, devnr | func);
13186                 if (peer && peer != tp->pdev)
13187                         break;
13188                 pci_dev_put(peer);
13189         }
13190         /* 5704 can be configured in single-port mode, set peer to
13191          * tp->pdev in that case.
13192          */
13193         if (!peer) {
13194                 peer = tp->pdev;
13195                 return peer;
13196         }
13197
13198         /*
13199          * We don't need to keep the refcount elevated; there's no way
13200          * to remove one half of this device without removing the other
13201          */
13202         pci_dev_put(peer);
13203
13204         return peer;
13205 }
13206
13207 static void __devinit tg3_init_coal(struct tg3 *tp)
13208 {
13209         struct ethtool_coalesce *ec = &tp->coal;
13210
13211         memset(ec, 0, sizeof(*ec));
13212         ec->cmd = ETHTOOL_GCOALESCE;
13213         ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
13214         ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
13215         ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
13216         ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
13217         ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
13218         ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
13219         ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
13220         ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
13221         ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
13222
13223         if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
13224                                  HOSTCC_MODE_CLRTICK_TXBD)) {
13225                 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
13226                 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
13227                 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
13228                 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
13229         }
13230
13231         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
13232                 ec->rx_coalesce_usecs_irq = 0;
13233                 ec->tx_coalesce_usecs_irq = 0;
13234                 ec->stats_block_coalesce_usecs = 0;
13235         }
13236 }
13237
13238 static int __devinit tg3_init_one(struct pci_dev *pdev,
13239                                   const struct pci_device_id *ent)
13240 {
13241         static int tg3_version_printed = 0;
13242         resource_size_t tg3reg_base;
13243         unsigned long tg3reg_len;
13244         struct net_device *dev;
13245         struct tg3 *tp;
13246         int err, pm_cap;
13247         char str[40];
13248         u64 dma_mask, persist_dma_mask;
13249         DECLARE_MAC_BUF(mac);
13250
13251         if (tg3_version_printed++ == 0)
13252                 printk(KERN_INFO "%s", version);
13253
13254         err = pci_enable_device(pdev);
13255         if (err) {
13256                 printk(KERN_ERR PFX "Cannot enable PCI device, "
13257                        "aborting.\n");
13258                 return err;
13259         }
13260
13261         if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
13262                 printk(KERN_ERR PFX "Cannot find proper PCI device "
13263                        "base address, aborting.\n");
13264                 err = -ENODEV;
13265                 goto err_out_disable_pdev;
13266         }
13267
13268         err = pci_request_regions(pdev, DRV_MODULE_NAME);
13269         if (err) {
13270                 printk(KERN_ERR PFX "Cannot obtain PCI resources, "
13271                        "aborting.\n");
13272                 goto err_out_disable_pdev;
13273         }
13274
13275         pci_set_master(pdev);
13276
13277         /* Find power-management capability. */
13278         pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
13279         if (pm_cap == 0) {
13280                 printk(KERN_ERR PFX "Cannot find PowerManagement capability, "
13281                        "aborting.\n");
13282                 err = -EIO;
13283                 goto err_out_free_res;
13284         }
13285
13286         tg3reg_base = pci_resource_start(pdev, 0);
13287         tg3reg_len = pci_resource_len(pdev, 0);
13288
13289         dev = alloc_etherdev(sizeof(*tp));
13290         if (!dev) {
13291                 printk(KERN_ERR PFX "Etherdev alloc failed, aborting.\n");
13292                 err = -ENOMEM;
13293                 goto err_out_free_res;
13294         }
13295
13296         SET_NETDEV_DEV(dev, &pdev->dev);
13297
13298 #if TG3_VLAN_TAG_USED
13299         dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
13300         dev->vlan_rx_register = tg3_vlan_rx_register;
13301 #endif
13302
13303         tp = netdev_priv(dev);
13304         tp->pdev = pdev;
13305         tp->dev = dev;
13306         tp->pm_cap = pm_cap;
13307         tp->rx_mode = TG3_DEF_RX_MODE;
13308         tp->tx_mode = TG3_DEF_TX_MODE;
13309
13310         if (tg3_debug > 0)
13311                 tp->msg_enable = tg3_debug;
13312         else
13313                 tp->msg_enable = TG3_DEF_MSG_ENABLE;
13314
13315         /* The word/byte swap controls here control register access byte
13316          * swapping.  DMA data byte swapping is controlled in the GRC_MODE
13317          * setting below.
13318          */
13319         tp->misc_host_ctrl =
13320                 MISC_HOST_CTRL_MASK_PCI_INT |
13321                 MISC_HOST_CTRL_WORD_SWAP |
13322                 MISC_HOST_CTRL_INDIR_ACCESS |
13323                 MISC_HOST_CTRL_PCISTATE_RW;
13324
13325         /* The NONFRM (non-frame) byte/word swap controls take effect
13326          * on descriptor entries, anything which isn't packet data.
13327          *
13328          * The StrongARM chips on the board (one for tx, one for rx)
13329          * are running in big-endian mode.
13330          */
13331         tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
13332                         GRC_MODE_WSWAP_NONFRM_DATA);
13333 #ifdef __BIG_ENDIAN
13334         tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
13335 #endif
13336         spin_lock_init(&tp->lock);
13337         spin_lock_init(&tp->indirect_lock);
13338         INIT_WORK(&tp->reset_task, tg3_reset_task);
13339
13340         tp->regs = ioremap_nocache(tg3reg_base, tg3reg_len);
13341         if (!tp->regs) {
13342                 printk(KERN_ERR PFX "Cannot map device registers, "
13343                        "aborting.\n");
13344                 err = -ENOMEM;
13345                 goto err_out_free_dev;
13346         }
13347
13348         tg3_init_link_config(tp);
13349
13350         tp->rx_pending = TG3_DEF_RX_RING_PENDING;
13351         tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
13352         tp->tx_pending = TG3_DEF_TX_RING_PENDING;
13353
13354         dev->open = tg3_open;
13355         dev->stop = tg3_close;
13356         dev->get_stats = tg3_get_stats;
13357         dev->set_multicast_list = tg3_set_rx_mode;
13358         dev->set_mac_address = tg3_set_mac_addr;
13359         dev->do_ioctl = tg3_ioctl;
13360         dev->tx_timeout = tg3_tx_timeout;
13361         netif_napi_add(dev, &tp->napi, tg3_poll, 64);
13362         dev->ethtool_ops = &tg3_ethtool_ops;
13363         dev->watchdog_timeo = TG3_TX_TIMEOUT;
13364         dev->change_mtu = tg3_change_mtu;
13365         dev->irq = pdev->irq;
13366 #ifdef CONFIG_NET_POLL_CONTROLLER
13367         dev->poll_controller = tg3_poll_controller;
13368 #endif
13369
13370         err = tg3_get_invariants(tp);
13371         if (err) {
13372                 printk(KERN_ERR PFX "Problem fetching invariants of chip, "
13373                        "aborting.\n");
13374                 goto err_out_iounmap;
13375         }
13376
13377         /* The EPB bridge inside 5714, 5715, and 5780 and any
13378          * device behind the EPB cannot support DMA addresses > 40-bit.
13379          * On 64-bit systems with IOMMU, use 40-bit dma_mask.
13380          * On 64-bit systems without IOMMU, use 64-bit dma_mask and
13381          * do DMA address check in tg3_start_xmit().
13382          */
13383         if (tp->tg3_flags2 & TG3_FLG2_IS_5788)
13384                 persist_dma_mask = dma_mask = DMA_32BIT_MASK;
13385         else if (tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG) {
13386                 persist_dma_mask = dma_mask = DMA_40BIT_MASK;
13387 #ifdef CONFIG_HIGHMEM
13388                 dma_mask = DMA_64BIT_MASK;
13389 #endif
13390         } else
13391                 persist_dma_mask = dma_mask = DMA_64BIT_MASK;
13392
13393         /* Configure DMA attributes. */
13394         if (dma_mask > DMA_32BIT_MASK) {
13395                 err = pci_set_dma_mask(pdev, dma_mask);
13396                 if (!err) {
13397                         dev->features |= NETIF_F_HIGHDMA;
13398                         err = pci_set_consistent_dma_mask(pdev,
13399                                                           persist_dma_mask);
13400                         if (err < 0) {
13401                                 printk(KERN_ERR PFX "Unable to obtain 64 bit "
13402                                        "DMA for consistent allocations\n");
13403                                 goto err_out_iounmap;
13404                         }
13405                 }
13406         }
13407         if (err || dma_mask == DMA_32BIT_MASK) {
13408                 err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
13409                 if (err) {
13410                         printk(KERN_ERR PFX "No usable DMA configuration, "
13411                                "aborting.\n");
13412                         goto err_out_iounmap;
13413                 }
13414         }
13415
13416         tg3_init_bufmgr_config(tp);
13417
13418         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
13419                 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
13420         }
13421         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
13422             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
13423             tp->pci_chip_rev_id == CHIPREV_ID_5705_A0 ||
13424             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 ||
13425             (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0) {
13426                 tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
13427         } else {
13428                 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE | TG3_FLG2_TSO_BUG;
13429         }
13430
13431         /* TSO is on by default on chips that support hardware TSO.
13432          * Firmware TSO on older chips gives lower performance, so it
13433          * is off by default, but can be enabled using ethtool.
13434          */
13435         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
13436                 dev->features |= NETIF_F_TSO;
13437                 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_2) &&
13438                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906))
13439                         dev->features |= NETIF_F_TSO6;
13440                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
13441                     (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
13442                      GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) ||
13443                         GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
13444                         dev->features |= NETIF_F_TSO_ECN;
13445         }
13446
13447
13448         if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 &&
13449             !(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) &&
13450             !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
13451                 tp->tg3_flags2 |= TG3_FLG2_MAX_RXPEND_64;
13452                 tp->rx_pending = 63;
13453         }
13454
13455         err = tg3_get_device_address(tp);
13456         if (err) {
13457                 printk(KERN_ERR PFX "Could not obtain valid ethernet address, "
13458                        "aborting.\n");
13459                 goto err_out_iounmap;
13460         }
13461
13462         if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
13463                 if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
13464                         printk(KERN_ERR PFX "Cannot find proper PCI device "
13465                                "base address for APE, aborting.\n");
13466                         err = -ENODEV;
13467                         goto err_out_iounmap;
13468                 }
13469
13470                 tg3reg_base = pci_resource_start(pdev, 2);
13471                 tg3reg_len = pci_resource_len(pdev, 2);
13472
13473                 tp->aperegs = ioremap_nocache(tg3reg_base, tg3reg_len);
13474                 if (!tp->aperegs) {
13475                         printk(KERN_ERR PFX "Cannot map APE registers, "
13476                                "aborting.\n");
13477                         err = -ENOMEM;
13478                         goto err_out_iounmap;
13479                 }
13480
13481                 tg3_ape_lock_init(tp);
13482         }
13483
13484         /*
13485          * Reset chip in case UNDI or EFI driver did not shutdown
13486          * DMA self test will enable WDMAC and we'll see (spurious)
13487          * pending DMA on the PCI bus at that point.
13488          */
13489         if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
13490             (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
13491                 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
13492                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
13493         }
13494
13495         err = tg3_test_dma(tp);
13496         if (err) {
13497                 printk(KERN_ERR PFX "DMA engine test failed, aborting.\n");
13498                 goto err_out_apeunmap;
13499         }
13500
13501         /* Tigon3 can do ipv4 only... and some chips have buggy
13502          * checksumming.
13503          */
13504         if ((tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) == 0) {
13505                 dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
13506                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
13507                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
13508                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
13509                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
13510                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
13511                         dev->features |= NETIF_F_IPV6_CSUM;
13512
13513                 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
13514         } else
13515                 tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
13516
13517         /* flow control autonegotiation is default behavior */
13518         tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
13519         tp->link_config.flowctrl = TG3_FLOW_CTRL_TX | TG3_FLOW_CTRL_RX;
13520
13521         tg3_init_coal(tp);
13522
13523         pci_set_drvdata(pdev, dev);
13524
13525         err = register_netdev(dev);
13526         if (err) {
13527                 printk(KERN_ERR PFX "Cannot register net device, "
13528                        "aborting.\n");
13529                 goto err_out_apeunmap;
13530         }
13531
13532         printk(KERN_INFO "%s: Tigon3 [partno(%s) rev %04x PHY(%s)] "
13533                "(%s) %s Ethernet %s\n",
13534                dev->name,
13535                tp->board_part_number,
13536                tp->pci_chip_rev_id,
13537                tg3_phy_string(tp),
13538                tg3_bus_string(tp, str),
13539                ((tp->tg3_flags & TG3_FLAG_10_100_ONLY) ? "10/100Base-TX" :
13540                 ((tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) ? "1000Base-SX" :
13541                  "10/100/1000Base-T")),
13542                print_mac(mac, dev->dev_addr));
13543
13544         printk(KERN_INFO "%s: RXcsums[%d] LinkChgREG[%d] "
13545                "MIirq[%d] ASF[%d] WireSpeed[%d] TSOcap[%d]\n",
13546                dev->name,
13547                (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0,
13548                (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) != 0,
13549                (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) != 0,
13550                (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0,
13551                (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED) == 0,
13552                (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) != 0);
13553         printk(KERN_INFO "%s: dma_rwctrl[%08x] dma_mask[%d-bit]\n",
13554                dev->name, tp->dma_rwctrl,
13555                (pdev->dma_mask == DMA_32BIT_MASK) ? 32 :
13556                 (((u64) pdev->dma_mask == DMA_40BIT_MASK) ? 40 : 64));
13557
13558         return 0;
13559
13560 err_out_apeunmap:
13561         if (tp->aperegs) {
13562                 iounmap(tp->aperegs);
13563                 tp->aperegs = NULL;
13564         }
13565
13566 err_out_iounmap:
13567         if (tp->regs) {
13568                 iounmap(tp->regs);
13569                 tp->regs = NULL;
13570         }
13571
13572 err_out_free_dev:
13573         free_netdev(dev);
13574
13575 err_out_free_res:
13576         pci_release_regions(pdev);
13577
13578 err_out_disable_pdev:
13579         pci_disable_device(pdev);
13580         pci_set_drvdata(pdev, NULL);
13581         return err;
13582 }
13583
13584 static void __devexit tg3_remove_one(struct pci_dev *pdev)
13585 {
13586         struct net_device *dev = pci_get_drvdata(pdev);
13587
13588         if (dev) {
13589                 struct tg3 *tp = netdev_priv(dev);
13590
13591                 flush_scheduled_work();
13592
13593                 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
13594                         tg3_phy_fini(tp);
13595                         tg3_mdio_fini(tp);
13596                 }
13597
13598                 unregister_netdev(dev);
13599                 if (tp->aperegs) {
13600                         iounmap(tp->aperegs);
13601                         tp->aperegs = NULL;
13602                 }
13603                 if (tp->regs) {
13604                         iounmap(tp->regs);
13605                         tp->regs = NULL;
13606                 }
13607                 free_netdev(dev);
13608                 pci_release_regions(pdev);
13609                 pci_disable_device(pdev);
13610                 pci_set_drvdata(pdev, NULL);
13611         }
13612 }
13613
13614 static int tg3_suspend(struct pci_dev *pdev, pm_message_t state)
13615 {
13616         struct net_device *dev = pci_get_drvdata(pdev);
13617         struct tg3 *tp = netdev_priv(dev);
13618         pci_power_t target_state;
13619         int err;
13620
13621         /* PCI register 4 needs to be saved whether netif_running() or not.
13622          * MSI address and data need to be saved if using MSI and
13623          * netif_running().
13624          */
13625         pci_save_state(pdev);
13626
13627         if (!netif_running(dev))
13628                 return 0;
13629
13630         flush_scheduled_work();
13631         tg3_phy_stop(tp);
13632         tg3_netif_stop(tp);
13633
13634         del_timer_sync(&tp->timer);
13635
13636         tg3_full_lock(tp, 1);
13637         tg3_disable_ints(tp);
13638         tg3_full_unlock(tp);
13639
13640         netif_device_detach(dev);
13641
13642         tg3_full_lock(tp, 0);
13643         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
13644         tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
13645         tg3_full_unlock(tp);
13646
13647         target_state = pdev->pm_cap ? pci_target_state(pdev) : PCI_D3hot;
13648
13649         err = tg3_set_power_state(tp, target_state);
13650         if (err) {
13651                 int err2;
13652
13653                 tg3_full_lock(tp, 0);
13654
13655                 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
13656                 err2 = tg3_restart_hw(tp, 1);
13657                 if (err2)
13658                         goto out;
13659
13660                 tp->timer.expires = jiffies + tp->timer_offset;
13661                 add_timer(&tp->timer);
13662
13663                 netif_device_attach(dev);
13664                 tg3_netif_start(tp);
13665
13666 out:
13667                 tg3_full_unlock(tp);
13668
13669                 if (!err2)
13670                         tg3_phy_start(tp);
13671         }
13672
13673         return err;
13674 }
13675
13676 static int tg3_resume(struct pci_dev *pdev)
13677 {
13678         struct net_device *dev = pci_get_drvdata(pdev);
13679         struct tg3 *tp = netdev_priv(dev);
13680         int err;
13681
13682         pci_restore_state(tp->pdev);
13683
13684         if (!netif_running(dev))
13685                 return 0;
13686
13687         err = tg3_set_power_state(tp, PCI_D0);
13688         if (err)
13689                 return err;
13690
13691         netif_device_attach(dev);
13692
13693         tg3_full_lock(tp, 0);
13694
13695         tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
13696         err = tg3_restart_hw(tp, 1);
13697         if (err)
13698                 goto out;
13699
13700         tp->timer.expires = jiffies + tp->timer_offset;
13701         add_timer(&tp->timer);
13702
13703         tg3_netif_start(tp);
13704
13705 out:
13706         tg3_full_unlock(tp);
13707
13708         if (!err)
13709                 tg3_phy_start(tp);
13710
13711         return err;
13712 }
13713
13714 static struct pci_driver tg3_driver = {
13715         .name           = DRV_MODULE_NAME,
13716         .id_table       = tg3_pci_tbl,
13717         .probe          = tg3_init_one,
13718         .remove         = __devexit_p(tg3_remove_one),
13719         .suspend        = tg3_suspend,
13720         .resume         = tg3_resume
13721 };
13722
13723 static int __init tg3_init(void)
13724 {
13725         return pci_register_driver(&tg3_driver);
13726 }
13727
13728 static void __exit tg3_cleanup(void)
13729 {
13730         pci_unregister_driver(&tg3_driver);
13731 }
13732
13733 module_init(tg3_init);
13734 module_exit(tg3_cleanup);