V4L/DVB (9589): Properly support capture start on em2874
[linux-2.6] / drivers / net / tg3.c
1 /*
2  * tg3.c: Broadcom Tigon3 ethernet driver.
3  *
4  * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5  * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6  * Copyright (C) 2004 Sun Microsystems Inc.
7  * Copyright (C) 2005-2007 Broadcom Corporation.
8  *
9  * Firmware is:
10  *      Derived from proprietary unpublished source code,
11  *      Copyright (C) 2000-2003 Broadcom Corporation.
12  *
13  *      Permission is hereby granted for the distribution of this firmware
14  *      data in hexadecimal or equivalent format, provided this copyright
15  *      notice is accompanying it.
16  */
17
18
19 #include <linux/module.h>
20 #include <linux/moduleparam.h>
21 #include <linux/kernel.h>
22 #include <linux/types.h>
23 #include <linux/compiler.h>
24 #include <linux/slab.h>
25 #include <linux/delay.h>
26 #include <linux/in.h>
27 #include <linux/init.h>
28 #include <linux/ioport.h>
29 #include <linux/pci.h>
30 #include <linux/netdevice.h>
31 #include <linux/etherdevice.h>
32 #include <linux/skbuff.h>
33 #include <linux/ethtool.h>
34 #include <linux/mii.h>
35 #include <linux/phy.h>
36 #include <linux/brcmphy.h>
37 #include <linux/if_vlan.h>
38 #include <linux/ip.h>
39 #include <linux/tcp.h>
40 #include <linux/workqueue.h>
41 #include <linux/prefetch.h>
42 #include <linux/dma-mapping.h>
43
44 #include <net/checksum.h>
45 #include <net/ip.h>
46
47 #include <asm/system.h>
48 #include <asm/io.h>
49 #include <asm/byteorder.h>
50 #include <asm/uaccess.h>
51
52 #ifdef CONFIG_SPARC
53 #include <asm/idprom.h>
54 #include <asm/prom.h>
55 #endif
56
57 #define BAR_0   0
58 #define BAR_2   2
59
60 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
61 #define TG3_VLAN_TAG_USED 1
62 #else
63 #define TG3_VLAN_TAG_USED 0
64 #endif
65
66 #include "tg3.h"
67
68 #define DRV_MODULE_NAME         "tg3"
69 #define PFX DRV_MODULE_NAME     ": "
70 #define DRV_MODULE_VERSION      "3.97"
71 #define DRV_MODULE_RELDATE      "December 10, 2008"
72
73 #define TG3_DEF_MAC_MODE        0
74 #define TG3_DEF_RX_MODE         0
75 #define TG3_DEF_TX_MODE         0
76 #define TG3_DEF_MSG_ENABLE        \
77         (NETIF_MSG_DRV          | \
78          NETIF_MSG_PROBE        | \
79          NETIF_MSG_LINK         | \
80          NETIF_MSG_TIMER        | \
81          NETIF_MSG_IFDOWN       | \
82          NETIF_MSG_IFUP         | \
83          NETIF_MSG_RX_ERR       | \
84          NETIF_MSG_TX_ERR)
85
86 /* length of time before we decide the hardware is borked,
87  * and dev->tx_timeout() should be called to fix the problem
88  */
89 #define TG3_TX_TIMEOUT                  (5 * HZ)
90
91 /* hardware minimum and maximum for a single frame's data payload */
92 #define TG3_MIN_MTU                     60
93 #define TG3_MAX_MTU(tp) \
94         ((tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) ? 9000 : 1500)
95
96 /* These numbers seem to be hard coded in the NIC firmware somehow.
97  * You can't change the ring sizes, but you can change where you place
98  * them in the NIC onboard memory.
99  */
100 #define TG3_RX_RING_SIZE                512
101 #define TG3_DEF_RX_RING_PENDING         200
102 #define TG3_RX_JUMBO_RING_SIZE          256
103 #define TG3_DEF_RX_JUMBO_RING_PENDING   100
104
105 /* Do not place this n-ring entries value into the tp struct itself,
106  * we really want to expose these constants to GCC so that modulo et
107  * al.  operations are done with shifts and masks instead of with
108  * hw multiply/modulo instructions.  Another solution would be to
109  * replace things like '% foo' with '& (foo - 1)'.
110  */
111 #define TG3_RX_RCB_RING_SIZE(tp)        \
112         ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ?  512 : 1024)
113
114 #define TG3_TX_RING_SIZE                512
115 #define TG3_DEF_TX_RING_PENDING         (TG3_TX_RING_SIZE - 1)
116
117 #define TG3_RX_RING_BYTES       (sizeof(struct tg3_rx_buffer_desc) * \
118                                  TG3_RX_RING_SIZE)
119 #define TG3_RX_JUMBO_RING_BYTES (sizeof(struct tg3_rx_buffer_desc) * \
120                                  TG3_RX_JUMBO_RING_SIZE)
121 #define TG3_RX_RCB_RING_BYTES(tp) (sizeof(struct tg3_rx_buffer_desc) * \
122                                    TG3_RX_RCB_RING_SIZE(tp))
123 #define TG3_TX_RING_BYTES       (sizeof(struct tg3_tx_buffer_desc) * \
124                                  TG3_TX_RING_SIZE)
125 #define NEXT_TX(N)              (((N) + 1) & (TG3_TX_RING_SIZE - 1))
126
127 #define RX_PKT_BUF_SZ           (1536 + tp->rx_offset + 64)
128 #define RX_JUMBO_PKT_BUF_SZ     (9046 + tp->rx_offset + 64)
129
130 /* minimum number of free TX descriptors required to wake up TX process */
131 #define TG3_TX_WAKEUP_THRESH(tp)                ((tp)->tx_pending / 4)
132
133 #define TG3_RAW_IP_ALIGN 2
134
135 /* number of ETHTOOL_GSTATS u64's */
136 #define TG3_NUM_STATS           (sizeof(struct tg3_ethtool_stats)/sizeof(u64))
137
138 #define TG3_NUM_TEST            6
139
140 static char version[] __devinitdata =
141         DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
142
143 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
144 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
145 MODULE_LICENSE("GPL");
146 MODULE_VERSION(DRV_MODULE_VERSION);
147
148 static int tg3_debug = -1;      /* -1 == use TG3_DEF_MSG_ENABLE as value */
149 module_param(tg3_debug, int, 0);
150 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
151
152 static struct pci_device_id tg3_pci_tbl[] = {
153         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
154         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
155         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
156         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
157         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
158         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
159         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
160         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
161         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
162         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
163         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
164         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
165         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
166         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
167         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
168         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
169         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
170         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
171         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901)},
172         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2)},
173         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
174         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F)},
175         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5720)},
176         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
177         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
178         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750)},
179         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
180         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750M)},
181         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
182         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F)},
183         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
184         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
185         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
186         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
187         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F)},
188         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
189         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
190         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
191         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
192         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
193         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
194         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
195         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
196         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F)},
197         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
198         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
199         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
200         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
201         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
202         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
203         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
204         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
205         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
206         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
207         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
208         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
209         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
210         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
211         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761S)},
212         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761SE)},
213         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5785)},
214         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780)},
215         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57760)},
216         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57790)},
217         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57720)},
218         {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
219         {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
220         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
221         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
222         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
223         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
224         {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
225         {}
226 };
227
228 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
229
230 static const struct {
231         const char string[ETH_GSTRING_LEN];
232 } ethtool_stats_keys[TG3_NUM_STATS] = {
233         { "rx_octets" },
234         { "rx_fragments" },
235         { "rx_ucast_packets" },
236         { "rx_mcast_packets" },
237         { "rx_bcast_packets" },
238         { "rx_fcs_errors" },
239         { "rx_align_errors" },
240         { "rx_xon_pause_rcvd" },
241         { "rx_xoff_pause_rcvd" },
242         { "rx_mac_ctrl_rcvd" },
243         { "rx_xoff_entered" },
244         { "rx_frame_too_long_errors" },
245         { "rx_jabbers" },
246         { "rx_undersize_packets" },
247         { "rx_in_length_errors" },
248         { "rx_out_length_errors" },
249         { "rx_64_or_less_octet_packets" },
250         { "rx_65_to_127_octet_packets" },
251         { "rx_128_to_255_octet_packets" },
252         { "rx_256_to_511_octet_packets" },
253         { "rx_512_to_1023_octet_packets" },
254         { "rx_1024_to_1522_octet_packets" },
255         { "rx_1523_to_2047_octet_packets" },
256         { "rx_2048_to_4095_octet_packets" },
257         { "rx_4096_to_8191_octet_packets" },
258         { "rx_8192_to_9022_octet_packets" },
259
260         { "tx_octets" },
261         { "tx_collisions" },
262
263         { "tx_xon_sent" },
264         { "tx_xoff_sent" },
265         { "tx_flow_control" },
266         { "tx_mac_errors" },
267         { "tx_single_collisions" },
268         { "tx_mult_collisions" },
269         { "tx_deferred" },
270         { "tx_excessive_collisions" },
271         { "tx_late_collisions" },
272         { "tx_collide_2times" },
273         { "tx_collide_3times" },
274         { "tx_collide_4times" },
275         { "tx_collide_5times" },
276         { "tx_collide_6times" },
277         { "tx_collide_7times" },
278         { "tx_collide_8times" },
279         { "tx_collide_9times" },
280         { "tx_collide_10times" },
281         { "tx_collide_11times" },
282         { "tx_collide_12times" },
283         { "tx_collide_13times" },
284         { "tx_collide_14times" },
285         { "tx_collide_15times" },
286         { "tx_ucast_packets" },
287         { "tx_mcast_packets" },
288         { "tx_bcast_packets" },
289         { "tx_carrier_sense_errors" },
290         { "tx_discards" },
291         { "tx_errors" },
292
293         { "dma_writeq_full" },
294         { "dma_write_prioq_full" },
295         { "rxbds_empty" },
296         { "rx_discards" },
297         { "rx_errors" },
298         { "rx_threshold_hit" },
299
300         { "dma_readq_full" },
301         { "dma_read_prioq_full" },
302         { "tx_comp_queue_full" },
303
304         { "ring_set_send_prod_index" },
305         { "ring_status_update" },
306         { "nic_irqs" },
307         { "nic_avoided_irqs" },
308         { "nic_tx_threshold_hit" }
309 };
310
311 static const struct {
312         const char string[ETH_GSTRING_LEN];
313 } ethtool_test_keys[TG3_NUM_TEST] = {
314         { "nvram test     (online) " },
315         { "link test      (online) " },
316         { "register test  (offline)" },
317         { "memory test    (offline)" },
318         { "loopback test  (offline)" },
319         { "interrupt test (offline)" },
320 };
321
322 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
323 {
324         writel(val, tp->regs + off);
325 }
326
327 static u32 tg3_read32(struct tg3 *tp, u32 off)
328 {
329         return (readl(tp->regs + off));
330 }
331
332 static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
333 {
334         writel(val, tp->aperegs + off);
335 }
336
337 static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
338 {
339         return (readl(tp->aperegs + off));
340 }
341
342 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
343 {
344         unsigned long flags;
345
346         spin_lock_irqsave(&tp->indirect_lock, flags);
347         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
348         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
349         spin_unlock_irqrestore(&tp->indirect_lock, flags);
350 }
351
352 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
353 {
354         writel(val, tp->regs + off);
355         readl(tp->regs + off);
356 }
357
358 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
359 {
360         unsigned long flags;
361         u32 val;
362
363         spin_lock_irqsave(&tp->indirect_lock, flags);
364         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
365         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
366         spin_unlock_irqrestore(&tp->indirect_lock, flags);
367         return val;
368 }
369
370 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
371 {
372         unsigned long flags;
373
374         if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
375                 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
376                                        TG3_64BIT_REG_LOW, val);
377                 return;
378         }
379         if (off == (MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW)) {
380                 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
381                                        TG3_64BIT_REG_LOW, val);
382                 return;
383         }
384
385         spin_lock_irqsave(&tp->indirect_lock, flags);
386         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
387         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
388         spin_unlock_irqrestore(&tp->indirect_lock, flags);
389
390         /* In indirect mode when disabling interrupts, we also need
391          * to clear the interrupt bit in the GRC local ctrl register.
392          */
393         if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
394             (val == 0x1)) {
395                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
396                                        tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
397         }
398 }
399
400 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
401 {
402         unsigned long flags;
403         u32 val;
404
405         spin_lock_irqsave(&tp->indirect_lock, flags);
406         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
407         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
408         spin_unlock_irqrestore(&tp->indirect_lock, flags);
409         return val;
410 }
411
412 /* usec_wait specifies the wait time in usec when writing to certain registers
413  * where it is unsafe to read back the register without some delay.
414  * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
415  * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
416  */
417 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
418 {
419         if ((tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) ||
420             (tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND))
421                 /* Non-posted methods */
422                 tp->write32(tp, off, val);
423         else {
424                 /* Posted method */
425                 tg3_write32(tp, off, val);
426                 if (usec_wait)
427                         udelay(usec_wait);
428                 tp->read32(tp, off);
429         }
430         /* Wait again after the read for the posted method to guarantee that
431          * the wait time is met.
432          */
433         if (usec_wait)
434                 udelay(usec_wait);
435 }
436
437 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
438 {
439         tp->write32_mbox(tp, off, val);
440         if (!(tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) &&
441             !(tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND))
442                 tp->read32_mbox(tp, off);
443 }
444
445 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
446 {
447         void __iomem *mbox = tp->regs + off;
448         writel(val, mbox);
449         if (tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG)
450                 writel(val, mbox);
451         if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
452                 readl(mbox);
453 }
454
455 static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
456 {
457         return (readl(tp->regs + off + GRCMBOX_BASE));
458 }
459
460 static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
461 {
462         writel(val, tp->regs + off + GRCMBOX_BASE);
463 }
464
465 #define tw32_mailbox(reg, val)  tp->write32_mbox(tp, reg, val)
466 #define tw32_mailbox_f(reg, val)        tw32_mailbox_flush(tp, (reg), (val))
467 #define tw32_rx_mbox(reg, val)  tp->write32_rx_mbox(tp, reg, val)
468 #define tw32_tx_mbox(reg, val)  tp->write32_tx_mbox(tp, reg, val)
469 #define tr32_mailbox(reg)       tp->read32_mbox(tp, reg)
470
471 #define tw32(reg,val)           tp->write32(tp, reg, val)
472 #define tw32_f(reg,val)         _tw32_flush(tp,(reg),(val), 0)
473 #define tw32_wait_f(reg,val,us) _tw32_flush(tp,(reg),(val), (us))
474 #define tr32(reg)               tp->read32(tp, reg)
475
476 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
477 {
478         unsigned long flags;
479
480         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) &&
481             (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
482                 return;
483
484         spin_lock_irqsave(&tp->indirect_lock, flags);
485         if (tp->tg3_flags & TG3_FLAG_SRAM_USE_CONFIG) {
486                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
487                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
488
489                 /* Always leave this as zero. */
490                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
491         } else {
492                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
493                 tw32_f(TG3PCI_MEM_WIN_DATA, val);
494
495                 /* Always leave this as zero. */
496                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
497         }
498         spin_unlock_irqrestore(&tp->indirect_lock, flags);
499 }
500
501 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
502 {
503         unsigned long flags;
504
505         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) &&
506             (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
507                 *val = 0;
508                 return;
509         }
510
511         spin_lock_irqsave(&tp->indirect_lock, flags);
512         if (tp->tg3_flags & TG3_FLAG_SRAM_USE_CONFIG) {
513                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
514                 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
515
516                 /* Always leave this as zero. */
517                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
518         } else {
519                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
520                 *val = tr32(TG3PCI_MEM_WIN_DATA);
521
522                 /* Always leave this as zero. */
523                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
524         }
525         spin_unlock_irqrestore(&tp->indirect_lock, flags);
526 }
527
528 static void tg3_ape_lock_init(struct tg3 *tp)
529 {
530         int i;
531
532         /* Make sure the driver hasn't any stale locks. */
533         for (i = 0; i < 8; i++)
534                 tg3_ape_write32(tp, TG3_APE_LOCK_GRANT + 4 * i,
535                                 APE_LOCK_GRANT_DRIVER);
536 }
537
538 static int tg3_ape_lock(struct tg3 *tp, int locknum)
539 {
540         int i, off;
541         int ret = 0;
542         u32 status;
543
544         if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
545                 return 0;
546
547         switch (locknum) {
548                 case TG3_APE_LOCK_GRC:
549                 case TG3_APE_LOCK_MEM:
550                         break;
551                 default:
552                         return -EINVAL;
553         }
554
555         off = 4 * locknum;
556
557         tg3_ape_write32(tp, TG3_APE_LOCK_REQ + off, APE_LOCK_REQ_DRIVER);
558
559         /* Wait for up to 1 millisecond to acquire lock. */
560         for (i = 0; i < 100; i++) {
561                 status = tg3_ape_read32(tp, TG3_APE_LOCK_GRANT + off);
562                 if (status == APE_LOCK_GRANT_DRIVER)
563                         break;
564                 udelay(10);
565         }
566
567         if (status != APE_LOCK_GRANT_DRIVER) {
568                 /* Revoke the lock request. */
569                 tg3_ape_write32(tp, TG3_APE_LOCK_GRANT + off,
570                                 APE_LOCK_GRANT_DRIVER);
571
572                 ret = -EBUSY;
573         }
574
575         return ret;
576 }
577
578 static void tg3_ape_unlock(struct tg3 *tp, int locknum)
579 {
580         int off;
581
582         if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
583                 return;
584
585         switch (locknum) {
586                 case TG3_APE_LOCK_GRC:
587                 case TG3_APE_LOCK_MEM:
588                         break;
589                 default:
590                         return;
591         }
592
593         off = 4 * locknum;
594         tg3_ape_write32(tp, TG3_APE_LOCK_GRANT + off, APE_LOCK_GRANT_DRIVER);
595 }
596
597 static void tg3_disable_ints(struct tg3 *tp)
598 {
599         tw32(TG3PCI_MISC_HOST_CTRL,
600              (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
601         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
602 }
603
604 static inline void tg3_cond_int(struct tg3 *tp)
605 {
606         if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
607             (tp->hw_status->status & SD_STATUS_UPDATED))
608                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
609         else
610                 tw32(HOSTCC_MODE, tp->coalesce_mode |
611                      (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
612 }
613
614 static void tg3_enable_ints(struct tg3 *tp)
615 {
616         tp->irq_sync = 0;
617         wmb();
618
619         tw32(TG3PCI_MISC_HOST_CTRL,
620              (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
621         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
622                        (tp->last_tag << 24));
623         if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI)
624                 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
625                                (tp->last_tag << 24));
626         tg3_cond_int(tp);
627 }
628
629 static inline unsigned int tg3_has_work(struct tg3 *tp)
630 {
631         struct tg3_hw_status *sblk = tp->hw_status;
632         unsigned int work_exists = 0;
633
634         /* check for phy events */
635         if (!(tp->tg3_flags &
636               (TG3_FLAG_USE_LINKCHG_REG |
637                TG3_FLAG_POLL_SERDES))) {
638                 if (sblk->status & SD_STATUS_LINK_CHG)
639                         work_exists = 1;
640         }
641         /* check for RX/TX work to do */
642         if (sblk->idx[0].tx_consumer != tp->tx_cons ||
643             sblk->idx[0].rx_producer != tp->rx_rcb_ptr)
644                 work_exists = 1;
645
646         return work_exists;
647 }
648
649 /* tg3_restart_ints
650  *  similar to tg3_enable_ints, but it accurately determines whether there
651  *  is new work pending and can return without flushing the PIO write
652  *  which reenables interrupts
653  */
654 static void tg3_restart_ints(struct tg3 *tp)
655 {
656         tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
657                      tp->last_tag << 24);
658         mmiowb();
659
660         /* When doing tagged status, this work check is unnecessary.
661          * The last_tag we write above tells the chip which piece of
662          * work we've completed.
663          */
664         if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
665             tg3_has_work(tp))
666                 tw32(HOSTCC_MODE, tp->coalesce_mode |
667                      (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
668 }
669
670 static inline void tg3_netif_stop(struct tg3 *tp)
671 {
672         tp->dev->trans_start = jiffies; /* prevent tx timeout */
673         napi_disable(&tp->napi);
674         netif_tx_disable(tp->dev);
675 }
676
677 static inline void tg3_netif_start(struct tg3 *tp)
678 {
679         netif_wake_queue(tp->dev);
680         /* NOTE: unconditional netif_wake_queue is only appropriate
681          * so long as all callers are assured to have free tx slots
682          * (such as after tg3_init_hw)
683          */
684         napi_enable(&tp->napi);
685         tp->hw_status->status |= SD_STATUS_UPDATED;
686         tg3_enable_ints(tp);
687 }
688
689 static void tg3_switch_clocks(struct tg3 *tp)
690 {
691         u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
692         u32 orig_clock_ctrl;
693
694         if ((tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) ||
695             (tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
696                 return;
697
698         orig_clock_ctrl = clock_ctrl;
699         clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
700                        CLOCK_CTRL_CLKRUN_OENABLE |
701                        0x1f);
702         tp->pci_clock_ctrl = clock_ctrl;
703
704         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
705                 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
706                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
707                                     clock_ctrl | CLOCK_CTRL_625_CORE, 40);
708                 }
709         } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
710                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
711                             clock_ctrl |
712                             (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
713                             40);
714                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
715                             clock_ctrl | (CLOCK_CTRL_ALTCLK),
716                             40);
717         }
718         tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
719 }
720
721 #define PHY_BUSY_LOOPS  5000
722
723 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
724 {
725         u32 frame_val;
726         unsigned int loops;
727         int ret;
728
729         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
730                 tw32_f(MAC_MI_MODE,
731                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
732                 udelay(80);
733         }
734
735         *val = 0x0;
736
737         frame_val  = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
738                       MI_COM_PHY_ADDR_MASK);
739         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
740                       MI_COM_REG_ADDR_MASK);
741         frame_val |= (MI_COM_CMD_READ | MI_COM_START);
742
743         tw32_f(MAC_MI_COM, frame_val);
744
745         loops = PHY_BUSY_LOOPS;
746         while (loops != 0) {
747                 udelay(10);
748                 frame_val = tr32(MAC_MI_COM);
749
750                 if ((frame_val & MI_COM_BUSY) == 0) {
751                         udelay(5);
752                         frame_val = tr32(MAC_MI_COM);
753                         break;
754                 }
755                 loops -= 1;
756         }
757
758         ret = -EBUSY;
759         if (loops != 0) {
760                 *val = frame_val & MI_COM_DATA_MASK;
761                 ret = 0;
762         }
763
764         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
765                 tw32_f(MAC_MI_MODE, tp->mi_mode);
766                 udelay(80);
767         }
768
769         return ret;
770 }
771
772 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
773 {
774         u32 frame_val;
775         unsigned int loops;
776         int ret;
777
778         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 &&
779             (reg == MII_TG3_CTRL || reg == MII_TG3_AUX_CTRL))
780                 return 0;
781
782         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
783                 tw32_f(MAC_MI_MODE,
784                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
785                 udelay(80);
786         }
787
788         frame_val  = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
789                       MI_COM_PHY_ADDR_MASK);
790         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
791                       MI_COM_REG_ADDR_MASK);
792         frame_val |= (val & MI_COM_DATA_MASK);
793         frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
794
795         tw32_f(MAC_MI_COM, frame_val);
796
797         loops = PHY_BUSY_LOOPS;
798         while (loops != 0) {
799                 udelay(10);
800                 frame_val = tr32(MAC_MI_COM);
801                 if ((frame_val & MI_COM_BUSY) == 0) {
802                         udelay(5);
803                         frame_val = tr32(MAC_MI_COM);
804                         break;
805                 }
806                 loops -= 1;
807         }
808
809         ret = -EBUSY;
810         if (loops != 0)
811                 ret = 0;
812
813         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
814                 tw32_f(MAC_MI_MODE, tp->mi_mode);
815                 udelay(80);
816         }
817
818         return ret;
819 }
820
821 static int tg3_bmcr_reset(struct tg3 *tp)
822 {
823         u32 phy_control;
824         int limit, err;
825
826         /* OK, reset it, and poll the BMCR_RESET bit until it
827          * clears or we time out.
828          */
829         phy_control = BMCR_RESET;
830         err = tg3_writephy(tp, MII_BMCR, phy_control);
831         if (err != 0)
832                 return -EBUSY;
833
834         limit = 5000;
835         while (limit--) {
836                 err = tg3_readphy(tp, MII_BMCR, &phy_control);
837                 if (err != 0)
838                         return -EBUSY;
839
840                 if ((phy_control & BMCR_RESET) == 0) {
841                         udelay(40);
842                         break;
843                 }
844                 udelay(10);
845         }
846         if (limit <= 0)
847                 return -EBUSY;
848
849         return 0;
850 }
851
852 static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)
853 {
854         struct tg3 *tp = (struct tg3 *)bp->priv;
855         u32 val;
856
857         if (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_PAUSED)
858                 return -EAGAIN;
859
860         if (tg3_readphy(tp, reg, &val))
861                 return -EIO;
862
863         return val;
864 }
865
866 static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val)
867 {
868         struct tg3 *tp = (struct tg3 *)bp->priv;
869
870         if (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_PAUSED)
871                 return -EAGAIN;
872
873         if (tg3_writephy(tp, reg, val))
874                 return -EIO;
875
876         return 0;
877 }
878
879 static int tg3_mdio_reset(struct mii_bus *bp)
880 {
881         return 0;
882 }
883
884 static void tg3_mdio_config_5785(struct tg3 *tp)
885 {
886         u32 val;
887         struct phy_device *phydev;
888
889         phydev = tp->mdio_bus->phy_map[PHY_ADDR];
890         switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
891         case TG3_PHY_ID_BCM50610:
892                 val = MAC_PHYCFG2_50610_LED_MODES;
893                 break;
894         case TG3_PHY_ID_BCMAC131:
895                 val = MAC_PHYCFG2_AC131_LED_MODES;
896                 break;
897         case TG3_PHY_ID_RTL8211C:
898                 val = MAC_PHYCFG2_RTL8211C_LED_MODES;
899                 break;
900         case TG3_PHY_ID_RTL8201E:
901                 val = MAC_PHYCFG2_RTL8201E_LED_MODES;
902                 break;
903         default:
904                 return;
905         }
906
907         if (phydev->interface != PHY_INTERFACE_MODE_RGMII) {
908                 tw32(MAC_PHYCFG2, val);
909
910                 val = tr32(MAC_PHYCFG1);
911                 val &= ~MAC_PHYCFG1_RGMII_INT;
912                 tw32(MAC_PHYCFG1, val);
913
914                 return;
915         }
916
917         if (!(tp->tg3_flags3 & TG3_FLG3_RGMII_STD_IBND_DISABLE))
918                 val |= MAC_PHYCFG2_EMODE_MASK_MASK |
919                        MAC_PHYCFG2_FMODE_MASK_MASK |
920                        MAC_PHYCFG2_GMODE_MASK_MASK |
921                        MAC_PHYCFG2_ACT_MASK_MASK   |
922                        MAC_PHYCFG2_QUAL_MASK_MASK |
923                        MAC_PHYCFG2_INBAND_ENABLE;
924
925         tw32(MAC_PHYCFG2, val);
926
927         val = tr32(MAC_PHYCFG1) & ~(MAC_PHYCFG1_RGMII_EXT_RX_DEC |
928                                     MAC_PHYCFG1_RGMII_SND_STAT_EN);
929         if (tp->tg3_flags3 & TG3_FLG3_RGMII_STD_IBND_DISABLE) {
930                 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_RX_EN)
931                         val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC;
932                 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_TX_EN)
933                         val |= MAC_PHYCFG1_RGMII_SND_STAT_EN;
934         }
935         tw32(MAC_PHYCFG1, val | MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV);
936
937         val = tr32(MAC_EXT_RGMII_MODE);
938         val &= ~(MAC_RGMII_MODE_RX_INT_B |
939                  MAC_RGMII_MODE_RX_QUALITY |
940                  MAC_RGMII_MODE_RX_ACTIVITY |
941                  MAC_RGMII_MODE_RX_ENG_DET |
942                  MAC_RGMII_MODE_TX_ENABLE |
943                  MAC_RGMII_MODE_TX_LOWPWR |
944                  MAC_RGMII_MODE_TX_RESET);
945         if (!(tp->tg3_flags3 & TG3_FLG3_RGMII_STD_IBND_DISABLE)) {
946                 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_RX_EN)
947                         val |= MAC_RGMII_MODE_RX_INT_B |
948                                MAC_RGMII_MODE_RX_QUALITY |
949                                MAC_RGMII_MODE_RX_ACTIVITY |
950                                MAC_RGMII_MODE_RX_ENG_DET;
951                 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_TX_EN)
952                         val |= MAC_RGMII_MODE_TX_ENABLE |
953                                MAC_RGMII_MODE_TX_LOWPWR |
954                                MAC_RGMII_MODE_TX_RESET;
955         }
956         tw32(MAC_EXT_RGMII_MODE, val);
957 }
958
959 static void tg3_mdio_start(struct tg3 *tp)
960 {
961         if (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED) {
962                 mutex_lock(&tp->mdio_bus->mdio_lock);
963                 tp->tg3_flags3 &= ~TG3_FLG3_MDIOBUS_PAUSED;
964                 mutex_unlock(&tp->mdio_bus->mdio_lock);
965         }
966
967         tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL;
968         tw32_f(MAC_MI_MODE, tp->mi_mode);
969         udelay(80);
970
971         if ((tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED) &&
972             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
973                 tg3_mdio_config_5785(tp);
974 }
975
976 static void tg3_mdio_stop(struct tg3 *tp)
977 {
978         if (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED) {
979                 mutex_lock(&tp->mdio_bus->mdio_lock);
980                 tp->tg3_flags3 |= TG3_FLG3_MDIOBUS_PAUSED;
981                 mutex_unlock(&tp->mdio_bus->mdio_lock);
982         }
983 }
984
985 static int tg3_mdio_init(struct tg3 *tp)
986 {
987         int i;
988         u32 reg;
989         struct phy_device *phydev;
990
991         tg3_mdio_start(tp);
992
993         if (!(tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) ||
994             (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED))
995                 return 0;
996
997         tp->mdio_bus = mdiobus_alloc();
998         if (tp->mdio_bus == NULL)
999                 return -ENOMEM;
1000
1001         tp->mdio_bus->name     = "tg3 mdio bus";
1002         snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x",
1003                  (tp->pdev->bus->number << 8) | tp->pdev->devfn);
1004         tp->mdio_bus->priv     = tp;
1005         tp->mdio_bus->parent   = &tp->pdev->dev;
1006         tp->mdio_bus->read     = &tg3_mdio_read;
1007         tp->mdio_bus->write    = &tg3_mdio_write;
1008         tp->mdio_bus->reset    = &tg3_mdio_reset;
1009         tp->mdio_bus->phy_mask = ~(1 << PHY_ADDR);
1010         tp->mdio_bus->irq      = &tp->mdio_irq[0];
1011
1012         for (i = 0; i < PHY_MAX_ADDR; i++)
1013                 tp->mdio_bus->irq[i] = PHY_POLL;
1014
1015         /* The bus registration will look for all the PHYs on the mdio bus.
1016          * Unfortunately, it does not ensure the PHY is powered up before
1017          * accessing the PHY ID registers.  A chip reset is the
1018          * quickest way to bring the device back to an operational state..
1019          */
1020         if (tg3_readphy(tp, MII_BMCR, &reg) || (reg & BMCR_PDOWN))
1021                 tg3_bmcr_reset(tp);
1022
1023         i = mdiobus_register(tp->mdio_bus);
1024         if (i) {
1025                 printk(KERN_WARNING "%s: mdiobus_reg failed (0x%x)\n",
1026                         tp->dev->name, i);
1027                 mdiobus_free(tp->mdio_bus);
1028                 return i;
1029         }
1030
1031         phydev = tp->mdio_bus->phy_map[PHY_ADDR];
1032
1033         if (!phydev || !phydev->drv) {
1034                 printk(KERN_WARNING "%s: No PHY devices\n", tp->dev->name);
1035                 mdiobus_unregister(tp->mdio_bus);
1036                 mdiobus_free(tp->mdio_bus);
1037                 return -ENODEV;
1038         }
1039
1040         switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1041         case TG3_PHY_ID_BCM57780:
1042                 phydev->interface = PHY_INTERFACE_MODE_GMII;
1043                 break;
1044         case TG3_PHY_ID_BCM50610:
1045                 if (tp->tg3_flags3 & TG3_FLG3_RGMII_STD_IBND_DISABLE)
1046                         phydev->dev_flags |= PHY_BRCM_STD_IBND_DISABLE;
1047                 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_RX_EN)
1048                         phydev->dev_flags |= PHY_BRCM_EXT_IBND_RX_ENABLE;
1049                 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_TX_EN)
1050                         phydev->dev_flags |= PHY_BRCM_EXT_IBND_TX_ENABLE;
1051                 /* fallthru */
1052         case TG3_PHY_ID_RTL8211C:
1053                 phydev->interface = PHY_INTERFACE_MODE_RGMII;
1054                 break;
1055         case TG3_PHY_ID_RTL8201E:
1056         case TG3_PHY_ID_BCMAC131:
1057                 phydev->interface = PHY_INTERFACE_MODE_MII;
1058                 break;
1059         }
1060
1061         tp->tg3_flags3 |= TG3_FLG3_MDIOBUS_INITED;
1062
1063         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
1064                 tg3_mdio_config_5785(tp);
1065
1066         return 0;
1067 }
1068
1069 static void tg3_mdio_fini(struct tg3 *tp)
1070 {
1071         if (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED) {
1072                 tp->tg3_flags3 &= ~TG3_FLG3_MDIOBUS_INITED;
1073                 mdiobus_unregister(tp->mdio_bus);
1074                 mdiobus_free(tp->mdio_bus);
1075                 tp->tg3_flags3 &= ~TG3_FLG3_MDIOBUS_PAUSED;
1076         }
1077 }
1078
1079 /* tp->lock is held. */
1080 static inline void tg3_generate_fw_event(struct tg3 *tp)
1081 {
1082         u32 val;
1083
1084         val = tr32(GRC_RX_CPU_EVENT);
1085         val |= GRC_RX_CPU_DRIVER_EVENT;
1086         tw32_f(GRC_RX_CPU_EVENT, val);
1087
1088         tp->last_event_jiffies = jiffies;
1089 }
1090
1091 #define TG3_FW_EVENT_TIMEOUT_USEC 2500
1092
1093 /* tp->lock is held. */
1094 static void tg3_wait_for_event_ack(struct tg3 *tp)
1095 {
1096         int i;
1097         unsigned int delay_cnt;
1098         long time_remain;
1099
1100         /* If enough time has passed, no wait is necessary. */
1101         time_remain = (long)(tp->last_event_jiffies + 1 +
1102                       usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC)) -
1103                       (long)jiffies;
1104         if (time_remain < 0)
1105                 return;
1106
1107         /* Check if we can shorten the wait time. */
1108         delay_cnt = jiffies_to_usecs(time_remain);
1109         if (delay_cnt > TG3_FW_EVENT_TIMEOUT_USEC)
1110                 delay_cnt = TG3_FW_EVENT_TIMEOUT_USEC;
1111         delay_cnt = (delay_cnt >> 3) + 1;
1112
1113         for (i = 0; i < delay_cnt; i++) {
1114                 if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
1115                         break;
1116                 udelay(8);
1117         }
1118 }
1119
1120 /* tp->lock is held. */
1121 static void tg3_ump_link_report(struct tg3 *tp)
1122 {
1123         u32 reg;
1124         u32 val;
1125
1126         if (!(tp->tg3_flags2 & TG3_FLG2_5780_CLASS) ||
1127             !(tp->tg3_flags  & TG3_FLAG_ENABLE_ASF))
1128                 return;
1129
1130         tg3_wait_for_event_ack(tp);
1131
1132         tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE);
1133
1134         tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14);
1135
1136         val = 0;
1137         if (!tg3_readphy(tp, MII_BMCR, &reg))
1138                 val = reg << 16;
1139         if (!tg3_readphy(tp, MII_BMSR, &reg))
1140                 val |= (reg & 0xffff);
1141         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, val);
1142
1143         val = 0;
1144         if (!tg3_readphy(tp, MII_ADVERTISE, &reg))
1145                 val = reg << 16;
1146         if (!tg3_readphy(tp, MII_LPA, &reg))
1147                 val |= (reg & 0xffff);
1148         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 4, val);
1149
1150         val = 0;
1151         if (!(tp->tg3_flags2 & TG3_FLG2_MII_SERDES)) {
1152                 if (!tg3_readphy(tp, MII_CTRL1000, &reg))
1153                         val = reg << 16;
1154                 if (!tg3_readphy(tp, MII_STAT1000, &reg))
1155                         val |= (reg & 0xffff);
1156         }
1157         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 8, val);
1158
1159         if (!tg3_readphy(tp, MII_PHYADDR, &reg))
1160                 val = reg << 16;
1161         else
1162                 val = 0;
1163         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 12, val);
1164
1165         tg3_generate_fw_event(tp);
1166 }
1167
1168 static void tg3_link_report(struct tg3 *tp)
1169 {
1170         if (!netif_carrier_ok(tp->dev)) {
1171                 if (netif_msg_link(tp))
1172                         printk(KERN_INFO PFX "%s: Link is down.\n",
1173                                tp->dev->name);
1174                 tg3_ump_link_report(tp);
1175         } else if (netif_msg_link(tp)) {
1176                 printk(KERN_INFO PFX "%s: Link is up at %d Mbps, %s duplex.\n",
1177                        tp->dev->name,
1178                        (tp->link_config.active_speed == SPEED_1000 ?
1179                         1000 :
1180                         (tp->link_config.active_speed == SPEED_100 ?
1181                          100 : 10)),
1182                        (tp->link_config.active_duplex == DUPLEX_FULL ?
1183                         "full" : "half"));
1184
1185                 printk(KERN_INFO PFX
1186                        "%s: Flow control is %s for TX and %s for RX.\n",
1187                        tp->dev->name,
1188                        (tp->link_config.active_flowctrl & FLOW_CTRL_TX) ?
1189                        "on" : "off",
1190                        (tp->link_config.active_flowctrl & FLOW_CTRL_RX) ?
1191                        "on" : "off");
1192                 tg3_ump_link_report(tp);
1193         }
1194 }
1195
1196 static u16 tg3_advert_flowctrl_1000T(u8 flow_ctrl)
1197 {
1198         u16 miireg;
1199
1200         if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1201                 miireg = ADVERTISE_PAUSE_CAP;
1202         else if (flow_ctrl & FLOW_CTRL_TX)
1203                 miireg = ADVERTISE_PAUSE_ASYM;
1204         else if (flow_ctrl & FLOW_CTRL_RX)
1205                 miireg = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1206         else
1207                 miireg = 0;
1208
1209         return miireg;
1210 }
1211
1212 static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
1213 {
1214         u16 miireg;
1215
1216         if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1217                 miireg = ADVERTISE_1000XPAUSE;
1218         else if (flow_ctrl & FLOW_CTRL_TX)
1219                 miireg = ADVERTISE_1000XPSE_ASYM;
1220         else if (flow_ctrl & FLOW_CTRL_RX)
1221                 miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1222         else
1223                 miireg = 0;
1224
1225         return miireg;
1226 }
1227
1228 static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
1229 {
1230         u8 cap = 0;
1231
1232         if (lcladv & ADVERTISE_1000XPAUSE) {
1233                 if (lcladv & ADVERTISE_1000XPSE_ASYM) {
1234                         if (rmtadv & LPA_1000XPAUSE)
1235                                 cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1236                         else if (rmtadv & LPA_1000XPAUSE_ASYM)
1237                                 cap = FLOW_CTRL_RX;
1238                 } else {
1239                         if (rmtadv & LPA_1000XPAUSE)
1240                                 cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1241                 }
1242         } else if (lcladv & ADVERTISE_1000XPSE_ASYM) {
1243                 if ((rmtadv & LPA_1000XPAUSE) && (rmtadv & LPA_1000XPAUSE_ASYM))
1244                         cap = FLOW_CTRL_TX;
1245         }
1246
1247         return cap;
1248 }
1249
1250 static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv)
1251 {
1252         u8 autoneg;
1253         u8 flowctrl = 0;
1254         u32 old_rx_mode = tp->rx_mode;
1255         u32 old_tx_mode = tp->tx_mode;
1256
1257         if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)
1258                 autoneg = tp->mdio_bus->phy_map[PHY_ADDR]->autoneg;
1259         else
1260                 autoneg = tp->link_config.autoneg;
1261
1262         if (autoneg == AUTONEG_ENABLE &&
1263             (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG)) {
1264                 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
1265                         flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv);
1266                 else
1267                         flowctrl = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1268         } else
1269                 flowctrl = tp->link_config.flowctrl;
1270
1271         tp->link_config.active_flowctrl = flowctrl;
1272
1273         if (flowctrl & FLOW_CTRL_RX)
1274                 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1275         else
1276                 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1277
1278         if (old_rx_mode != tp->rx_mode)
1279                 tw32_f(MAC_RX_MODE, tp->rx_mode);
1280
1281         if (flowctrl & FLOW_CTRL_TX)
1282                 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1283         else
1284                 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1285
1286         if (old_tx_mode != tp->tx_mode)
1287                 tw32_f(MAC_TX_MODE, tp->tx_mode);
1288 }
1289
1290 static void tg3_adjust_link(struct net_device *dev)
1291 {
1292         u8 oldflowctrl, linkmesg = 0;
1293         u32 mac_mode, lcl_adv, rmt_adv;
1294         struct tg3 *tp = netdev_priv(dev);
1295         struct phy_device *phydev = tp->mdio_bus->phy_map[PHY_ADDR];
1296
1297         spin_lock(&tp->lock);
1298
1299         mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK |
1300                                     MAC_MODE_HALF_DUPLEX);
1301
1302         oldflowctrl = tp->link_config.active_flowctrl;
1303
1304         if (phydev->link) {
1305                 lcl_adv = 0;
1306                 rmt_adv = 0;
1307
1308                 if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10)
1309                         mac_mode |= MAC_MODE_PORT_MODE_MII;
1310                 else
1311                         mac_mode |= MAC_MODE_PORT_MODE_GMII;
1312
1313                 if (phydev->duplex == DUPLEX_HALF)
1314                         mac_mode |= MAC_MODE_HALF_DUPLEX;
1315                 else {
1316                         lcl_adv = tg3_advert_flowctrl_1000T(
1317                                   tp->link_config.flowctrl);
1318
1319                         if (phydev->pause)
1320                                 rmt_adv = LPA_PAUSE_CAP;
1321                         if (phydev->asym_pause)
1322                                 rmt_adv |= LPA_PAUSE_ASYM;
1323                 }
1324
1325                 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
1326         } else
1327                 mac_mode |= MAC_MODE_PORT_MODE_GMII;
1328
1329         if (mac_mode != tp->mac_mode) {
1330                 tp->mac_mode = mac_mode;
1331                 tw32_f(MAC_MODE, tp->mac_mode);
1332                 udelay(40);
1333         }
1334
1335         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
1336                 if (phydev->speed == SPEED_10)
1337                         tw32(MAC_MI_STAT,
1338                              MAC_MI_STAT_10MBPS_MODE |
1339                              MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
1340                 else
1341                         tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
1342         }
1343
1344         if (phydev->speed == SPEED_1000 && phydev->duplex == DUPLEX_HALF)
1345                 tw32(MAC_TX_LENGTHS,
1346                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1347                       (6 << TX_LENGTHS_IPG_SHIFT) |
1348                       (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
1349         else
1350                 tw32(MAC_TX_LENGTHS,
1351                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1352                       (6 << TX_LENGTHS_IPG_SHIFT) |
1353                       (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
1354
1355         if ((phydev->link && tp->link_config.active_speed == SPEED_INVALID) ||
1356             (!phydev->link && tp->link_config.active_speed != SPEED_INVALID) ||
1357             phydev->speed != tp->link_config.active_speed ||
1358             phydev->duplex != tp->link_config.active_duplex ||
1359             oldflowctrl != tp->link_config.active_flowctrl)
1360             linkmesg = 1;
1361
1362         tp->link_config.active_speed = phydev->speed;
1363         tp->link_config.active_duplex = phydev->duplex;
1364
1365         spin_unlock(&tp->lock);
1366
1367         if (linkmesg)
1368                 tg3_link_report(tp);
1369 }
1370
1371 static int tg3_phy_init(struct tg3 *tp)
1372 {
1373         struct phy_device *phydev;
1374
1375         if (tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED)
1376                 return 0;
1377
1378         /* Bring the PHY back to a known state. */
1379         tg3_bmcr_reset(tp);
1380
1381         phydev = tp->mdio_bus->phy_map[PHY_ADDR];
1382
1383         /* Attach the MAC to the PHY. */
1384         phydev = phy_connect(tp->dev, dev_name(&phydev->dev), tg3_adjust_link,
1385                              phydev->dev_flags, phydev->interface);
1386         if (IS_ERR(phydev)) {
1387                 printk(KERN_ERR "%s: Could not attach to PHY\n", tp->dev->name);
1388                 return PTR_ERR(phydev);
1389         }
1390
1391         /* Mask with MAC supported features. */
1392         switch (phydev->interface) {
1393         case PHY_INTERFACE_MODE_GMII:
1394         case PHY_INTERFACE_MODE_RGMII:
1395                 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
1396                         phydev->supported &= (PHY_GBIT_FEATURES |
1397                                               SUPPORTED_Pause |
1398                                               SUPPORTED_Asym_Pause);
1399                         break;
1400                 }
1401                 /* fallthru */
1402         case PHY_INTERFACE_MODE_MII:
1403                 phydev->supported &= (PHY_BASIC_FEATURES |
1404                                       SUPPORTED_Pause |
1405                                       SUPPORTED_Asym_Pause);
1406                 break;
1407         default:
1408                 phy_disconnect(tp->mdio_bus->phy_map[PHY_ADDR]);
1409                 return -EINVAL;
1410         }
1411
1412         tp->tg3_flags3 |= TG3_FLG3_PHY_CONNECTED;
1413
1414         phydev->advertising = phydev->supported;
1415
1416         return 0;
1417 }
1418
1419 static void tg3_phy_start(struct tg3 *tp)
1420 {
1421         struct phy_device *phydev;
1422
1423         if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
1424                 return;
1425
1426         phydev = tp->mdio_bus->phy_map[PHY_ADDR];
1427
1428         if (tp->link_config.phy_is_low_power) {
1429                 tp->link_config.phy_is_low_power = 0;
1430                 phydev->speed = tp->link_config.orig_speed;
1431                 phydev->duplex = tp->link_config.orig_duplex;
1432                 phydev->autoneg = tp->link_config.orig_autoneg;
1433                 phydev->advertising = tp->link_config.orig_advertising;
1434         }
1435
1436         phy_start(phydev);
1437
1438         phy_start_aneg(phydev);
1439 }
1440
1441 static void tg3_phy_stop(struct tg3 *tp)
1442 {
1443         if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
1444                 return;
1445
1446         phy_stop(tp->mdio_bus->phy_map[PHY_ADDR]);
1447 }
1448
1449 static void tg3_phy_fini(struct tg3 *tp)
1450 {
1451         if (tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED) {
1452                 phy_disconnect(tp->mdio_bus->phy_map[PHY_ADDR]);
1453                 tp->tg3_flags3 &= ~TG3_FLG3_PHY_CONNECTED;
1454         }
1455 }
1456
1457 static void tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
1458 {
1459         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1460         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
1461 }
1462
1463 static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable)
1464 {
1465         u32 reg;
1466
1467         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
1468                 return;
1469
1470         reg = MII_TG3_MISC_SHDW_WREN |
1471               MII_TG3_MISC_SHDW_SCR5_SEL |
1472               MII_TG3_MISC_SHDW_SCR5_LPED |
1473               MII_TG3_MISC_SHDW_SCR5_DLPTLM |
1474               MII_TG3_MISC_SHDW_SCR5_SDTL |
1475               MII_TG3_MISC_SHDW_SCR5_C125OE;
1476         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 || !enable)
1477                 reg |= MII_TG3_MISC_SHDW_SCR5_DLLAPD;
1478
1479         tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
1480
1481
1482         reg = MII_TG3_MISC_SHDW_WREN |
1483               MII_TG3_MISC_SHDW_APD_SEL |
1484               MII_TG3_MISC_SHDW_APD_WKTM_84MS;
1485         if (enable)
1486                 reg |= MII_TG3_MISC_SHDW_APD_ENABLE;
1487
1488         tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
1489 }
1490
1491 static void tg3_phy_toggle_automdix(struct tg3 *tp, int enable)
1492 {
1493         u32 phy;
1494
1495         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
1496             (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES))
1497                 return;
1498
1499         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1500                 u32 ephy;
1501
1502                 if (!tg3_readphy(tp, MII_TG3_EPHY_TEST, &ephy)) {
1503                         tg3_writephy(tp, MII_TG3_EPHY_TEST,
1504                                      ephy | MII_TG3_EPHY_SHADOW_EN);
1505                         if (!tg3_readphy(tp, MII_TG3_EPHYTST_MISCCTRL, &phy)) {
1506                                 if (enable)
1507                                         phy |= MII_TG3_EPHYTST_MISCCTRL_MDIX;
1508                                 else
1509                                         phy &= ~MII_TG3_EPHYTST_MISCCTRL_MDIX;
1510                                 tg3_writephy(tp, MII_TG3_EPHYTST_MISCCTRL, phy);
1511                         }
1512                         tg3_writephy(tp, MII_TG3_EPHY_TEST, ephy);
1513                 }
1514         } else {
1515                 phy = MII_TG3_AUXCTL_MISC_RDSEL_MISC |
1516                       MII_TG3_AUXCTL_SHDWSEL_MISC;
1517                 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, phy) &&
1518                     !tg3_readphy(tp, MII_TG3_AUX_CTRL, &phy)) {
1519                         if (enable)
1520                                 phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
1521                         else
1522                                 phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
1523                         phy |= MII_TG3_AUXCTL_MISC_WREN;
1524                         tg3_writephy(tp, MII_TG3_AUX_CTRL, phy);
1525                 }
1526         }
1527 }
1528
1529 static void tg3_phy_set_wirespeed(struct tg3 *tp)
1530 {
1531         u32 val;
1532
1533         if (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED)
1534                 return;
1535
1536         if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x7007) &&
1537             !tg3_readphy(tp, MII_TG3_AUX_CTRL, &val))
1538                 tg3_writephy(tp, MII_TG3_AUX_CTRL,
1539                              (val | (1 << 15) | (1 << 4)));
1540 }
1541
1542 static void tg3_phy_apply_otp(struct tg3 *tp)
1543 {
1544         u32 otp, phy;
1545
1546         if (!tp->phy_otp)
1547                 return;
1548
1549         otp = tp->phy_otp;
1550
1551         /* Enable SM_DSP clock and tx 6dB coding. */
1552         phy = MII_TG3_AUXCTL_SHDWSEL_AUXCTL |
1553               MII_TG3_AUXCTL_ACTL_SMDSP_ENA |
1554               MII_TG3_AUXCTL_ACTL_TX_6DB;
1555         tg3_writephy(tp, MII_TG3_AUX_CTRL, phy);
1556
1557         phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
1558         phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT;
1559         tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy);
1560
1561         phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) |
1562               ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT);
1563         tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy);
1564
1565         phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT);
1566         phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ;
1567         tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy);
1568
1569         phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT);
1570         tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy);
1571
1572         phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT);
1573         tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy);
1574
1575         phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) |
1576               ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
1577         tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);
1578
1579         /* Turn off SM_DSP clock. */
1580         phy = MII_TG3_AUXCTL_SHDWSEL_AUXCTL |
1581               MII_TG3_AUXCTL_ACTL_TX_6DB;
1582         tg3_writephy(tp, MII_TG3_AUX_CTRL, phy);
1583 }
1584
1585 static int tg3_wait_macro_done(struct tg3 *tp)
1586 {
1587         int limit = 100;
1588
1589         while (limit--) {
1590                 u32 tmp32;
1591
1592                 if (!tg3_readphy(tp, 0x16, &tmp32)) {
1593                         if ((tmp32 & 0x1000) == 0)
1594                                 break;
1595                 }
1596         }
1597         if (limit <= 0)
1598                 return -EBUSY;
1599
1600         return 0;
1601 }
1602
1603 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
1604 {
1605         static const u32 test_pat[4][6] = {
1606         { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
1607         { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
1608         { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
1609         { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
1610         };
1611         int chan;
1612
1613         for (chan = 0; chan < 4; chan++) {
1614                 int i;
1615
1616                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1617                              (chan * 0x2000) | 0x0200);
1618                 tg3_writephy(tp, 0x16, 0x0002);
1619
1620                 for (i = 0; i < 6; i++)
1621                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
1622                                      test_pat[chan][i]);
1623
1624                 tg3_writephy(tp, 0x16, 0x0202);
1625                 if (tg3_wait_macro_done(tp)) {
1626                         *resetp = 1;
1627                         return -EBUSY;
1628                 }
1629
1630                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1631                              (chan * 0x2000) | 0x0200);
1632                 tg3_writephy(tp, 0x16, 0x0082);
1633                 if (tg3_wait_macro_done(tp)) {
1634                         *resetp = 1;
1635                         return -EBUSY;
1636                 }
1637
1638                 tg3_writephy(tp, 0x16, 0x0802);
1639                 if (tg3_wait_macro_done(tp)) {
1640                         *resetp = 1;
1641                         return -EBUSY;
1642                 }
1643
1644                 for (i = 0; i < 6; i += 2) {
1645                         u32 low, high;
1646
1647                         if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
1648                             tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
1649                             tg3_wait_macro_done(tp)) {
1650                                 *resetp = 1;
1651                                 return -EBUSY;
1652                         }
1653                         low &= 0x7fff;
1654                         high &= 0x000f;
1655                         if (low != test_pat[chan][i] ||
1656                             high != test_pat[chan][i+1]) {
1657                                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
1658                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
1659                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
1660
1661                                 return -EBUSY;
1662                         }
1663                 }
1664         }
1665
1666         return 0;
1667 }
1668
1669 static int tg3_phy_reset_chanpat(struct tg3 *tp)
1670 {
1671         int chan;
1672
1673         for (chan = 0; chan < 4; chan++) {
1674                 int i;
1675
1676                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1677                              (chan * 0x2000) | 0x0200);
1678                 tg3_writephy(tp, 0x16, 0x0002);
1679                 for (i = 0; i < 6; i++)
1680                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
1681                 tg3_writephy(tp, 0x16, 0x0202);
1682                 if (tg3_wait_macro_done(tp))
1683                         return -EBUSY;
1684         }
1685
1686         return 0;
1687 }
1688
1689 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
1690 {
1691         u32 reg32, phy9_orig;
1692         int retries, do_phy_reset, err;
1693
1694         retries = 10;
1695         do_phy_reset = 1;
1696         do {
1697                 if (do_phy_reset) {
1698                         err = tg3_bmcr_reset(tp);
1699                         if (err)
1700                                 return err;
1701                         do_phy_reset = 0;
1702                 }
1703
1704                 /* Disable transmitter and interrupt.  */
1705                 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
1706                         continue;
1707
1708                 reg32 |= 0x3000;
1709                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
1710
1711                 /* Set full-duplex, 1000 mbps.  */
1712                 tg3_writephy(tp, MII_BMCR,
1713                              BMCR_FULLDPLX | TG3_BMCR_SPEED1000);
1714
1715                 /* Set to master mode.  */
1716                 if (tg3_readphy(tp, MII_TG3_CTRL, &phy9_orig))
1717                         continue;
1718
1719                 tg3_writephy(tp, MII_TG3_CTRL,
1720                              (MII_TG3_CTRL_AS_MASTER |
1721                               MII_TG3_CTRL_ENABLE_AS_MASTER));
1722
1723                 /* Enable SM_DSP_CLOCK and 6dB.  */
1724                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1725
1726                 /* Block the PHY control access.  */
1727                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
1728                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0800);
1729
1730                 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
1731                 if (!err)
1732                         break;
1733         } while (--retries);
1734
1735         err = tg3_phy_reset_chanpat(tp);
1736         if (err)
1737                 return err;
1738
1739         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
1740         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0000);
1741
1742         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
1743         tg3_writephy(tp, 0x16, 0x0000);
1744
1745         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
1746             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
1747                 /* Set Extended packet length bit for jumbo frames */
1748                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4400);
1749         }
1750         else {
1751                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1752         }
1753
1754         tg3_writephy(tp, MII_TG3_CTRL, phy9_orig);
1755
1756         if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32)) {
1757                 reg32 &= ~0x3000;
1758                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
1759         } else if (!err)
1760                 err = -EBUSY;
1761
1762         return err;
1763 }
1764
1765 /* This will reset the tigon3 PHY if there is no valid
1766  * link unless the FORCE argument is non-zero.
1767  */
1768 static int tg3_phy_reset(struct tg3 *tp)
1769 {
1770         u32 cpmuctrl;
1771         u32 phy_status;
1772         int err;
1773
1774         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1775                 u32 val;
1776
1777                 val = tr32(GRC_MISC_CFG);
1778                 tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
1779                 udelay(40);
1780         }
1781         err  = tg3_readphy(tp, MII_BMSR, &phy_status);
1782         err |= tg3_readphy(tp, MII_BMSR, &phy_status);
1783         if (err != 0)
1784                 return -EBUSY;
1785
1786         if (netif_running(tp->dev) && netif_carrier_ok(tp->dev)) {
1787                 netif_carrier_off(tp->dev);
1788                 tg3_link_report(tp);
1789         }
1790
1791         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
1792             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
1793             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
1794                 err = tg3_phy_reset_5703_4_5(tp);
1795                 if (err)
1796                         return err;
1797                 goto out;
1798         }
1799
1800         cpmuctrl = 0;
1801         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
1802             GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
1803                 cpmuctrl = tr32(TG3_CPMU_CTRL);
1804                 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY)
1805                         tw32(TG3_CPMU_CTRL,
1806                              cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY);
1807         }
1808
1809         err = tg3_bmcr_reset(tp);
1810         if (err)
1811                 return err;
1812
1813         if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) {
1814                 u32 phy;
1815
1816                 phy = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz;
1817                 tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, phy);
1818
1819                 tw32(TG3_CPMU_CTRL, cpmuctrl);
1820         }
1821
1822         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
1823             GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
1824                 u32 val;
1825
1826                 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
1827                 if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
1828                     CPMU_LSPD_1000MB_MACCLK_12_5) {
1829                         val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
1830                         udelay(40);
1831                         tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
1832                 }
1833         }
1834
1835         tg3_phy_apply_otp(tp);
1836
1837         if (tp->tg3_flags3 & TG3_FLG3_PHY_ENABLE_APD)
1838                 tg3_phy_toggle_apd(tp, true);
1839         else
1840                 tg3_phy_toggle_apd(tp, false);
1841
1842 out:
1843         if (tp->tg3_flags2 & TG3_FLG2_PHY_ADC_BUG) {
1844                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1845                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1846                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x2aaa);
1847                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1848                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0323);
1849                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1850         }
1851         if (tp->tg3_flags2 & TG3_FLG2_PHY_5704_A0_BUG) {
1852                 tg3_writephy(tp, 0x1c, 0x8d68);
1853                 tg3_writephy(tp, 0x1c, 0x8d68);
1854         }
1855         if (tp->tg3_flags2 & TG3_FLG2_PHY_BER_BUG) {
1856                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1857                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1858                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x310b);
1859                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1860                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x9506);
1861                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x401f);
1862                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x14e2);
1863                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1864         }
1865         else if (tp->tg3_flags2 & TG3_FLG2_PHY_JITTER_BUG) {
1866                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1867                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1868                 if (tp->tg3_flags2 & TG3_FLG2_PHY_ADJUST_TRIM) {
1869                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
1870                         tg3_writephy(tp, MII_TG3_TEST1,
1871                                      MII_TG3_TEST1_TRIM_EN | 0x4);
1872                 } else
1873                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
1874                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1875         }
1876         /* Set Extended packet length bit (bit 14) on all chips that */
1877         /* support jumbo frames */
1878         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
1879                 /* Cannot do read-modify-write on 5401 */
1880                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
1881         } else if (tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) {
1882                 u32 phy_reg;
1883
1884                 /* Set bit 14 with read-modify-write to preserve other bits */
1885                 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0007) &&
1886                     !tg3_readphy(tp, MII_TG3_AUX_CTRL, &phy_reg))
1887                         tg3_writephy(tp, MII_TG3_AUX_CTRL, phy_reg | 0x4000);
1888         }
1889
1890         /* Set phy register 0x10 bit 0 to high fifo elasticity to support
1891          * jumbo frames transmission.
1892          */
1893         if (tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) {
1894                 u32 phy_reg;
1895
1896                 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &phy_reg))
1897                     tg3_writephy(tp, MII_TG3_EXT_CTRL,
1898                                  phy_reg | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
1899         }
1900
1901         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1902                 /* adjust output voltage */
1903                 tg3_writephy(tp, MII_TG3_EPHY_PTEST, 0x12);
1904         }
1905
1906         tg3_phy_toggle_automdix(tp, 1);
1907         tg3_phy_set_wirespeed(tp);
1908         return 0;
1909 }
1910
1911 static void tg3_frob_aux_power(struct tg3 *tp)
1912 {
1913         struct tg3 *tp_peer = tp;
1914
1915         if ((tp->tg3_flags2 & TG3_FLG2_IS_NIC) == 0)
1916                 return;
1917
1918         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
1919             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)) {
1920                 struct net_device *dev_peer;
1921
1922                 dev_peer = pci_get_drvdata(tp->pdev_peer);
1923                 /* remove_one() may have been run on the peer. */
1924                 if (!dev_peer)
1925                         tp_peer = tp;
1926                 else
1927                         tp_peer = netdev_priv(dev_peer);
1928         }
1929
1930         if ((tp->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
1931             (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0 ||
1932             (tp_peer->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
1933             (tp_peer->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0) {
1934                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1935                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1936                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1937                                     (GRC_LCLCTRL_GPIO_OE0 |
1938                                      GRC_LCLCTRL_GPIO_OE1 |
1939                                      GRC_LCLCTRL_GPIO_OE2 |
1940                                      GRC_LCLCTRL_GPIO_OUTPUT0 |
1941                                      GRC_LCLCTRL_GPIO_OUTPUT1),
1942                                     100);
1943                 } else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761) {
1944                         /* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
1945                         u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
1946                                              GRC_LCLCTRL_GPIO_OE1 |
1947                                              GRC_LCLCTRL_GPIO_OE2 |
1948                                              GRC_LCLCTRL_GPIO_OUTPUT0 |
1949                                              GRC_LCLCTRL_GPIO_OUTPUT1 |
1950                                              tp->grc_local_ctrl;
1951                         tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, 100);
1952
1953                         grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2;
1954                         tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, 100);
1955
1956                         grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0;
1957                         tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, 100);
1958                 } else {
1959                         u32 no_gpio2;
1960                         u32 grc_local_ctrl = 0;
1961
1962                         if (tp_peer != tp &&
1963                             (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
1964                                 return;
1965
1966                         /* Workaround to prevent overdrawing Amps. */
1967                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
1968                             ASIC_REV_5714) {
1969                                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
1970                                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1971                                             grc_local_ctrl, 100);
1972                         }
1973
1974                         /* On 5753 and variants, GPIO2 cannot be used. */
1975                         no_gpio2 = tp->nic_sram_data_cfg &
1976                                     NIC_SRAM_DATA_CFG_NO_GPIO2;
1977
1978                         grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
1979                                          GRC_LCLCTRL_GPIO_OE1 |
1980                                          GRC_LCLCTRL_GPIO_OE2 |
1981                                          GRC_LCLCTRL_GPIO_OUTPUT1 |
1982                                          GRC_LCLCTRL_GPIO_OUTPUT2;
1983                         if (no_gpio2) {
1984                                 grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
1985                                                     GRC_LCLCTRL_GPIO_OUTPUT2);
1986                         }
1987                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1988                                                     grc_local_ctrl, 100);
1989
1990                         grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
1991
1992                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1993                                                     grc_local_ctrl, 100);
1994
1995                         if (!no_gpio2) {
1996                                 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
1997                                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1998                                             grc_local_ctrl, 100);
1999                         }
2000                 }
2001         } else {
2002                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
2003                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
2004                         if (tp_peer != tp &&
2005                             (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
2006                                 return;
2007
2008                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2009                                     (GRC_LCLCTRL_GPIO_OE1 |
2010                                      GRC_LCLCTRL_GPIO_OUTPUT1), 100);
2011
2012                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2013                                     GRC_LCLCTRL_GPIO_OE1, 100);
2014
2015                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2016                                     (GRC_LCLCTRL_GPIO_OE1 |
2017                                      GRC_LCLCTRL_GPIO_OUTPUT1), 100);
2018                 }
2019         }
2020 }
2021
2022 static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
2023 {
2024         if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
2025                 return 1;
2026         else if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411) {
2027                 if (speed != SPEED_10)
2028                         return 1;
2029         } else if (speed == SPEED_10)
2030                 return 1;
2031
2032         return 0;
2033 }
2034
2035 static int tg3_setup_phy(struct tg3 *, int);
2036
2037 #define RESET_KIND_SHUTDOWN     0
2038 #define RESET_KIND_INIT         1
2039 #define RESET_KIND_SUSPEND      2
2040
2041 static void tg3_write_sig_post_reset(struct tg3 *, int);
2042 static int tg3_halt_cpu(struct tg3 *, u32);
2043 static int tg3_nvram_lock(struct tg3 *);
2044 static void tg3_nvram_unlock(struct tg3 *);
2045
2046 static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
2047 {
2048         u32 val;
2049
2050         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
2051                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
2052                         u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
2053                         u32 serdes_cfg = tr32(MAC_SERDES_CFG);
2054
2055                         sg_dig_ctrl |=
2056                                 SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
2057                         tw32(SG_DIG_CTRL, sg_dig_ctrl);
2058                         tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
2059                 }
2060                 return;
2061         }
2062
2063         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2064                 tg3_bmcr_reset(tp);
2065                 val = tr32(GRC_MISC_CFG);
2066                 tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
2067                 udelay(40);
2068                 return;
2069         } else if (do_low_power) {
2070                 tg3_writephy(tp, MII_TG3_EXT_CTRL,
2071                              MII_TG3_EXT_CTRL_FORCE_LED_OFF);
2072
2073                 tg3_writephy(tp, MII_TG3_AUX_CTRL,
2074                              MII_TG3_AUXCTL_SHDWSEL_PWRCTL |
2075                              MII_TG3_AUXCTL_PCTL_100TX_LPWR |
2076                              MII_TG3_AUXCTL_PCTL_SPR_ISOLATE |
2077                              MII_TG3_AUXCTL_PCTL_VREG_11V);
2078         }
2079
2080         /* The PHY should not be powered down on some chips because
2081          * of bugs.
2082          */
2083         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2084             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2085             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 &&
2086              (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)))
2087                 return;
2088
2089         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
2090             GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
2091                 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2092                 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2093                 val |= CPMU_LSPD_1000MB_MACCLK_12_5;
2094                 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2095         }
2096
2097         tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
2098 }
2099
2100 /* tp->lock is held. */
2101 static void __tg3_set_mac_addr(struct tg3 *tp, int skip_mac_1)
2102 {
2103         u32 addr_high, addr_low;
2104         int i;
2105
2106         addr_high = ((tp->dev->dev_addr[0] << 8) |
2107                      tp->dev->dev_addr[1]);
2108         addr_low = ((tp->dev->dev_addr[2] << 24) |
2109                     (tp->dev->dev_addr[3] << 16) |
2110                     (tp->dev->dev_addr[4] <<  8) |
2111                     (tp->dev->dev_addr[5] <<  0));
2112         for (i = 0; i < 4; i++) {
2113                 if (i == 1 && skip_mac_1)
2114                         continue;
2115                 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
2116                 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
2117         }
2118
2119         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
2120             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
2121                 for (i = 0; i < 12; i++) {
2122                         tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
2123                         tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
2124                 }
2125         }
2126
2127         addr_high = (tp->dev->dev_addr[0] +
2128                      tp->dev->dev_addr[1] +
2129                      tp->dev->dev_addr[2] +
2130                      tp->dev->dev_addr[3] +
2131                      tp->dev->dev_addr[4] +
2132                      tp->dev->dev_addr[5]) &
2133                 TX_BACKOFF_SEED_MASK;
2134         tw32(MAC_TX_BACKOFF_SEED, addr_high);
2135 }
2136
2137 static int tg3_set_power_state(struct tg3 *tp, pci_power_t state)
2138 {
2139         u32 misc_host_ctrl;
2140         bool device_should_wake, do_low_power;
2141
2142         /* Make sure register accesses (indirect or otherwise)
2143          * will function correctly.
2144          */
2145         pci_write_config_dword(tp->pdev,
2146                                TG3PCI_MISC_HOST_CTRL,
2147                                tp->misc_host_ctrl);
2148
2149         switch (state) {
2150         case PCI_D0:
2151                 pci_enable_wake(tp->pdev, state, false);
2152                 pci_set_power_state(tp->pdev, PCI_D0);
2153
2154                 /* Switch out of Vaux if it is a NIC */
2155                 if (tp->tg3_flags2 & TG3_FLG2_IS_NIC)
2156                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl, 100);
2157
2158                 return 0;
2159
2160         case PCI_D1:
2161         case PCI_D2:
2162         case PCI_D3hot:
2163                 break;
2164
2165         default:
2166                 printk(KERN_ERR PFX "%s: Invalid power state (D%d) requested\n",
2167                         tp->dev->name, state);
2168                 return -EINVAL;
2169         }
2170
2171         /* Restore the CLKREQ setting. */
2172         if (tp->tg3_flags3 & TG3_FLG3_CLKREQ_BUG) {
2173                 u16 lnkctl;
2174
2175                 pci_read_config_word(tp->pdev,
2176                                      tp->pcie_cap + PCI_EXP_LNKCTL,
2177                                      &lnkctl);
2178                 lnkctl |= PCI_EXP_LNKCTL_CLKREQ_EN;
2179                 pci_write_config_word(tp->pdev,
2180                                       tp->pcie_cap + PCI_EXP_LNKCTL,
2181                                       lnkctl);
2182         }
2183
2184         misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
2185         tw32(TG3PCI_MISC_HOST_CTRL,
2186              misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
2187
2188         device_should_wake = pci_pme_capable(tp->pdev, state) &&
2189                              device_may_wakeup(&tp->pdev->dev) &&
2190                              (tp->tg3_flags & TG3_FLAG_WOL_ENABLE);
2191
2192         if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
2193                 do_low_power = false;
2194                 if ((tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED) &&
2195                     !tp->link_config.phy_is_low_power) {
2196                         struct phy_device *phydev;
2197                         u32 phyid, advertising;
2198
2199                         phydev = tp->mdio_bus->phy_map[PHY_ADDR];
2200
2201                         tp->link_config.phy_is_low_power = 1;
2202
2203                         tp->link_config.orig_speed = phydev->speed;
2204                         tp->link_config.orig_duplex = phydev->duplex;
2205                         tp->link_config.orig_autoneg = phydev->autoneg;
2206                         tp->link_config.orig_advertising = phydev->advertising;
2207
2208                         advertising = ADVERTISED_TP |
2209                                       ADVERTISED_Pause |
2210                                       ADVERTISED_Autoneg |
2211                                       ADVERTISED_10baseT_Half;
2212
2213                         if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) ||
2214                             device_should_wake) {
2215                                 if (tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB)
2216                                         advertising |=
2217                                                 ADVERTISED_100baseT_Half |
2218                                                 ADVERTISED_100baseT_Full |
2219                                                 ADVERTISED_10baseT_Full;
2220                                 else
2221                                         advertising |= ADVERTISED_10baseT_Full;
2222                         }
2223
2224                         phydev->advertising = advertising;
2225
2226                         phy_start_aneg(phydev);
2227
2228                         phyid = phydev->drv->phy_id & phydev->drv->phy_id_mask;
2229                         if (phyid != TG3_PHY_ID_BCMAC131) {
2230                                 phyid &= TG3_PHY_OUI_MASK;
2231                                 if (phyid == TG3_PHY_OUI_1 &&
2232                                     phyid == TG3_PHY_OUI_2 &&
2233                                     phyid == TG3_PHY_OUI_3)
2234                                         do_low_power = true;
2235                         }
2236                 }
2237         } else {
2238                 do_low_power = true;
2239
2240                 if (tp->link_config.phy_is_low_power == 0) {
2241                         tp->link_config.phy_is_low_power = 1;
2242                         tp->link_config.orig_speed = tp->link_config.speed;
2243                         tp->link_config.orig_duplex = tp->link_config.duplex;
2244                         tp->link_config.orig_autoneg = tp->link_config.autoneg;
2245                 }
2246
2247                 if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)) {
2248                         tp->link_config.speed = SPEED_10;
2249                         tp->link_config.duplex = DUPLEX_HALF;
2250                         tp->link_config.autoneg = AUTONEG_ENABLE;
2251                         tg3_setup_phy(tp, 0);
2252                 }
2253         }
2254
2255         __tg3_set_mac_addr(tp, 0);
2256
2257         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2258                 u32 val;
2259
2260                 val = tr32(GRC_VCPU_EXT_CTRL);
2261                 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
2262         } else if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
2263                 int i;
2264                 u32 val;
2265
2266                 for (i = 0; i < 200; i++) {
2267                         tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
2268                         if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
2269                                 break;
2270                         msleep(1);
2271                 }
2272         }
2273         if (tp->tg3_flags & TG3_FLAG_WOL_CAP)
2274                 tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
2275                                                      WOL_DRV_STATE_SHUTDOWN |
2276                                                      WOL_DRV_WOL |
2277                                                      WOL_SET_MAGIC_PKT);
2278
2279         if (device_should_wake) {
2280                 u32 mac_mode;
2281
2282                 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
2283                         if (do_low_power) {
2284                                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x5a);
2285                                 udelay(40);
2286                         }
2287
2288                         if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
2289                                 mac_mode = MAC_MODE_PORT_MODE_GMII;
2290                         else
2291                                 mac_mode = MAC_MODE_PORT_MODE_MII;
2292
2293                         mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
2294                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
2295                             ASIC_REV_5700) {
2296                                 u32 speed = (tp->tg3_flags &
2297                                              TG3_FLAG_WOL_SPEED_100MB) ?
2298                                              SPEED_100 : SPEED_10;
2299                                 if (tg3_5700_link_polarity(tp, speed))
2300                                         mac_mode |= MAC_MODE_LINK_POLARITY;
2301                                 else
2302                                         mac_mode &= ~MAC_MODE_LINK_POLARITY;
2303                         }
2304                 } else {
2305                         mac_mode = MAC_MODE_PORT_MODE_TBI;
2306                 }
2307
2308                 if (!(tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
2309                         tw32(MAC_LED_CTRL, tp->led_ctrl);
2310
2311                 mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
2312                 if (((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
2313                     !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) &&
2314                     ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) ||
2315                      (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)))
2316                         mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL;
2317
2318                 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
2319                         mac_mode |= tp->mac_mode &
2320                                     (MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN);
2321                         if (mac_mode & MAC_MODE_APE_TX_EN)
2322                                 mac_mode |= MAC_MODE_TDE_ENABLE;
2323                 }
2324
2325                 tw32_f(MAC_MODE, mac_mode);
2326                 udelay(100);
2327
2328                 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
2329                 udelay(10);
2330         }
2331
2332         if (!(tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB) &&
2333             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2334              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
2335                 u32 base_val;
2336
2337                 base_val = tp->pci_clock_ctrl;
2338                 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
2339                              CLOCK_CTRL_TXCLK_DISABLE);
2340
2341                 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
2342                             CLOCK_CTRL_PWRDOWN_PLL133, 40);
2343         } else if ((tp->tg3_flags2 & TG3_FLG2_5780_CLASS) ||
2344                    (tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) ||
2345                    (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)) {
2346                 /* do nothing */
2347         } else if (!((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
2348                      (tp->tg3_flags & TG3_FLAG_ENABLE_ASF))) {
2349                 u32 newbits1, newbits2;
2350
2351                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2352                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2353                         newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
2354                                     CLOCK_CTRL_TXCLK_DISABLE |
2355                                     CLOCK_CTRL_ALTCLK);
2356                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
2357                 } else if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
2358                         newbits1 = CLOCK_CTRL_625_CORE;
2359                         newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
2360                 } else {
2361                         newbits1 = CLOCK_CTRL_ALTCLK;
2362                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
2363                 }
2364
2365                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
2366                             40);
2367
2368                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
2369                             40);
2370
2371                 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
2372                         u32 newbits3;
2373
2374                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2375                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2376                                 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
2377                                             CLOCK_CTRL_TXCLK_DISABLE |
2378                                             CLOCK_CTRL_44MHZ_CORE);
2379                         } else {
2380                                 newbits3 = CLOCK_CTRL_44MHZ_CORE;
2381                         }
2382
2383                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
2384                                     tp->pci_clock_ctrl | newbits3, 40);
2385                 }
2386         }
2387
2388         if (!(device_should_wake) &&
2389             !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
2390                 tg3_power_down_phy(tp, do_low_power);
2391
2392         tg3_frob_aux_power(tp);
2393
2394         /* Workaround for unstable PLL clock */
2395         if ((GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX) ||
2396             (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX)) {
2397                 u32 val = tr32(0x7d00);
2398
2399                 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
2400                 tw32(0x7d00, val);
2401                 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
2402                         int err;
2403
2404                         err = tg3_nvram_lock(tp);
2405                         tg3_halt_cpu(tp, RX_CPU_BASE);
2406                         if (!err)
2407                                 tg3_nvram_unlock(tp);
2408                 }
2409         }
2410
2411         tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
2412
2413         if (device_should_wake)
2414                 pci_enable_wake(tp->pdev, state, true);
2415
2416         /* Finally, set the new power state. */
2417         pci_set_power_state(tp->pdev, state);
2418
2419         return 0;
2420 }
2421
2422 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
2423 {
2424         switch (val & MII_TG3_AUX_STAT_SPDMASK) {
2425         case MII_TG3_AUX_STAT_10HALF:
2426                 *speed = SPEED_10;
2427                 *duplex = DUPLEX_HALF;
2428                 break;
2429
2430         case MII_TG3_AUX_STAT_10FULL:
2431                 *speed = SPEED_10;
2432                 *duplex = DUPLEX_FULL;
2433                 break;
2434
2435         case MII_TG3_AUX_STAT_100HALF:
2436                 *speed = SPEED_100;
2437                 *duplex = DUPLEX_HALF;
2438                 break;
2439
2440         case MII_TG3_AUX_STAT_100FULL:
2441                 *speed = SPEED_100;
2442                 *duplex = DUPLEX_FULL;
2443                 break;
2444
2445         case MII_TG3_AUX_STAT_1000HALF:
2446                 *speed = SPEED_1000;
2447                 *duplex = DUPLEX_HALF;
2448                 break;
2449
2450         case MII_TG3_AUX_STAT_1000FULL:
2451                 *speed = SPEED_1000;
2452                 *duplex = DUPLEX_FULL;
2453                 break;
2454
2455         default:
2456                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2457                         *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
2458                                  SPEED_10;
2459                         *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
2460                                   DUPLEX_HALF;
2461                         break;
2462                 }
2463                 *speed = SPEED_INVALID;
2464                 *duplex = DUPLEX_INVALID;
2465                 break;
2466         }
2467 }
2468
2469 static void tg3_phy_copper_begin(struct tg3 *tp)
2470 {
2471         u32 new_adv;
2472         int i;
2473
2474         if (tp->link_config.phy_is_low_power) {
2475                 /* Entering low power mode.  Disable gigabit and
2476                  * 100baseT advertisements.
2477                  */
2478                 tg3_writephy(tp, MII_TG3_CTRL, 0);
2479
2480                 new_adv = (ADVERTISE_10HALF | ADVERTISE_10FULL |
2481                            ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
2482                 if (tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB)
2483                         new_adv |= (ADVERTISE_100HALF | ADVERTISE_100FULL);
2484
2485                 tg3_writephy(tp, MII_ADVERTISE, new_adv);
2486         } else if (tp->link_config.speed == SPEED_INVALID) {
2487                 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
2488                         tp->link_config.advertising &=
2489                                 ~(ADVERTISED_1000baseT_Half |
2490                                   ADVERTISED_1000baseT_Full);
2491
2492                 new_adv = ADVERTISE_CSMA;
2493                 if (tp->link_config.advertising & ADVERTISED_10baseT_Half)
2494                         new_adv |= ADVERTISE_10HALF;
2495                 if (tp->link_config.advertising & ADVERTISED_10baseT_Full)
2496                         new_adv |= ADVERTISE_10FULL;
2497                 if (tp->link_config.advertising & ADVERTISED_100baseT_Half)
2498                         new_adv |= ADVERTISE_100HALF;
2499                 if (tp->link_config.advertising & ADVERTISED_100baseT_Full)
2500                         new_adv |= ADVERTISE_100FULL;
2501
2502                 new_adv |= tg3_advert_flowctrl_1000T(tp->link_config.flowctrl);
2503
2504                 tg3_writephy(tp, MII_ADVERTISE, new_adv);
2505
2506                 if (tp->link_config.advertising &
2507                     (ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full)) {
2508                         new_adv = 0;
2509                         if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
2510                                 new_adv |= MII_TG3_CTRL_ADV_1000_HALF;
2511                         if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
2512                                 new_adv |= MII_TG3_CTRL_ADV_1000_FULL;
2513                         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY) &&
2514                             (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
2515                              tp->pci_chip_rev_id == CHIPREV_ID_5701_B0))
2516                                 new_adv |= (MII_TG3_CTRL_AS_MASTER |
2517                                             MII_TG3_CTRL_ENABLE_AS_MASTER);
2518                         tg3_writephy(tp, MII_TG3_CTRL, new_adv);
2519                 } else {
2520                         tg3_writephy(tp, MII_TG3_CTRL, 0);
2521                 }
2522         } else {
2523                 new_adv = tg3_advert_flowctrl_1000T(tp->link_config.flowctrl);
2524                 new_adv |= ADVERTISE_CSMA;
2525
2526                 /* Asking for a specific link mode. */
2527                 if (tp->link_config.speed == SPEED_1000) {
2528                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
2529
2530                         if (tp->link_config.duplex == DUPLEX_FULL)
2531                                 new_adv = MII_TG3_CTRL_ADV_1000_FULL;
2532                         else
2533                                 new_adv = MII_TG3_CTRL_ADV_1000_HALF;
2534                         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
2535                             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
2536                                 new_adv |= (MII_TG3_CTRL_AS_MASTER |
2537                                             MII_TG3_CTRL_ENABLE_AS_MASTER);
2538                 } else {
2539                         if (tp->link_config.speed == SPEED_100) {
2540                                 if (tp->link_config.duplex == DUPLEX_FULL)
2541                                         new_adv |= ADVERTISE_100FULL;
2542                                 else
2543                                         new_adv |= ADVERTISE_100HALF;
2544                         } else {
2545                                 if (tp->link_config.duplex == DUPLEX_FULL)
2546                                         new_adv |= ADVERTISE_10FULL;
2547                                 else
2548                                         new_adv |= ADVERTISE_10HALF;
2549                         }
2550                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
2551
2552                         new_adv = 0;
2553                 }
2554
2555                 tg3_writephy(tp, MII_TG3_CTRL, new_adv);
2556         }
2557
2558         if (tp->link_config.autoneg == AUTONEG_DISABLE &&
2559             tp->link_config.speed != SPEED_INVALID) {
2560                 u32 bmcr, orig_bmcr;
2561
2562                 tp->link_config.active_speed = tp->link_config.speed;
2563                 tp->link_config.active_duplex = tp->link_config.duplex;
2564
2565                 bmcr = 0;
2566                 switch (tp->link_config.speed) {
2567                 default:
2568                 case SPEED_10:
2569                         break;
2570
2571                 case SPEED_100:
2572                         bmcr |= BMCR_SPEED100;
2573                         break;
2574
2575                 case SPEED_1000:
2576                         bmcr |= TG3_BMCR_SPEED1000;
2577                         break;
2578                 }
2579
2580                 if (tp->link_config.duplex == DUPLEX_FULL)
2581                         bmcr |= BMCR_FULLDPLX;
2582
2583                 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
2584                     (bmcr != orig_bmcr)) {
2585                         tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
2586                         for (i = 0; i < 1500; i++) {
2587                                 u32 tmp;
2588
2589                                 udelay(10);
2590                                 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
2591                                     tg3_readphy(tp, MII_BMSR, &tmp))
2592                                         continue;
2593                                 if (!(tmp & BMSR_LSTATUS)) {
2594                                         udelay(40);
2595                                         break;
2596                                 }
2597                         }
2598                         tg3_writephy(tp, MII_BMCR, bmcr);
2599                         udelay(40);
2600                 }
2601         } else {
2602                 tg3_writephy(tp, MII_BMCR,
2603                              BMCR_ANENABLE | BMCR_ANRESTART);
2604         }
2605 }
2606
2607 static int tg3_init_5401phy_dsp(struct tg3 *tp)
2608 {
2609         int err;
2610
2611         /* Turn off tap power management. */
2612         /* Set Extended packet length bit */
2613         err  = tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
2614
2615         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0012);
2616         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1804);
2617
2618         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0013);
2619         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1204);
2620
2621         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
2622         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0132);
2623
2624         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
2625         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0232);
2626
2627         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
2628         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0a20);
2629
2630         udelay(40);
2631
2632         return err;
2633 }
2634
2635 static int tg3_copper_is_advertising_all(struct tg3 *tp, u32 mask)
2636 {
2637         u32 adv_reg, all_mask = 0;
2638
2639         if (mask & ADVERTISED_10baseT_Half)
2640                 all_mask |= ADVERTISE_10HALF;
2641         if (mask & ADVERTISED_10baseT_Full)
2642                 all_mask |= ADVERTISE_10FULL;
2643         if (mask & ADVERTISED_100baseT_Half)
2644                 all_mask |= ADVERTISE_100HALF;
2645         if (mask & ADVERTISED_100baseT_Full)
2646                 all_mask |= ADVERTISE_100FULL;
2647
2648         if (tg3_readphy(tp, MII_ADVERTISE, &adv_reg))
2649                 return 0;
2650
2651         if ((adv_reg & all_mask) != all_mask)
2652                 return 0;
2653         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
2654                 u32 tg3_ctrl;
2655
2656                 all_mask = 0;
2657                 if (mask & ADVERTISED_1000baseT_Half)
2658                         all_mask |= ADVERTISE_1000HALF;
2659                 if (mask & ADVERTISED_1000baseT_Full)
2660                         all_mask |= ADVERTISE_1000FULL;
2661
2662                 if (tg3_readphy(tp, MII_TG3_CTRL, &tg3_ctrl))
2663                         return 0;
2664
2665                 if ((tg3_ctrl & all_mask) != all_mask)
2666                         return 0;
2667         }
2668         return 1;
2669 }
2670
2671 static int tg3_adv_1000T_flowctrl_ok(struct tg3 *tp, u32 *lcladv, u32 *rmtadv)
2672 {
2673         u32 curadv, reqadv;
2674
2675         if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
2676                 return 1;
2677
2678         curadv = *lcladv & (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
2679         reqadv = tg3_advert_flowctrl_1000T(tp->link_config.flowctrl);
2680
2681         if (tp->link_config.active_duplex == DUPLEX_FULL) {
2682                 if (curadv != reqadv)
2683                         return 0;
2684
2685                 if (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG)
2686                         tg3_readphy(tp, MII_LPA, rmtadv);
2687         } else {
2688                 /* Reprogram the advertisement register, even if it
2689                  * does not affect the current link.  If the link
2690                  * gets renegotiated in the future, we can save an
2691                  * additional renegotiation cycle by advertising
2692                  * it correctly in the first place.
2693                  */
2694                 if (curadv != reqadv) {
2695                         *lcladv &= ~(ADVERTISE_PAUSE_CAP |
2696                                      ADVERTISE_PAUSE_ASYM);
2697                         tg3_writephy(tp, MII_ADVERTISE, *lcladv | reqadv);
2698                 }
2699         }
2700
2701         return 1;
2702 }
2703
2704 static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
2705 {
2706         int current_link_up;
2707         u32 bmsr, dummy;
2708         u32 lcl_adv, rmt_adv;
2709         u16 current_speed;
2710         u8 current_duplex;
2711         int i, err;
2712
2713         tw32(MAC_EVENT, 0);
2714
2715         tw32_f(MAC_STATUS,
2716              (MAC_STATUS_SYNC_CHANGED |
2717               MAC_STATUS_CFG_CHANGED |
2718               MAC_STATUS_MI_COMPLETION |
2719               MAC_STATUS_LNKSTATE_CHANGED));
2720         udelay(40);
2721
2722         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
2723                 tw32_f(MAC_MI_MODE,
2724                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
2725                 udelay(80);
2726         }
2727
2728         tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x02);
2729
2730         /* Some third-party PHYs need to be reset on link going
2731          * down.
2732          */
2733         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
2734              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2735              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
2736             netif_carrier_ok(tp->dev)) {
2737                 tg3_readphy(tp, MII_BMSR, &bmsr);
2738                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
2739                     !(bmsr & BMSR_LSTATUS))
2740                         force_reset = 1;
2741         }
2742         if (force_reset)
2743                 tg3_phy_reset(tp);
2744
2745         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
2746                 tg3_readphy(tp, MII_BMSR, &bmsr);
2747                 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
2748                     !(tp->tg3_flags & TG3_FLAG_INIT_COMPLETE))
2749                         bmsr = 0;
2750
2751                 if (!(bmsr & BMSR_LSTATUS)) {
2752                         err = tg3_init_5401phy_dsp(tp);
2753                         if (err)
2754                                 return err;
2755
2756                         tg3_readphy(tp, MII_BMSR, &bmsr);
2757                         for (i = 0; i < 1000; i++) {
2758                                 udelay(10);
2759                                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
2760                                     (bmsr & BMSR_LSTATUS)) {
2761                                         udelay(40);
2762                                         break;
2763                                 }
2764                         }
2765
2766                         if ((tp->phy_id & PHY_ID_REV_MASK) == PHY_REV_BCM5401_B0 &&
2767                             !(bmsr & BMSR_LSTATUS) &&
2768                             tp->link_config.active_speed == SPEED_1000) {
2769                                 err = tg3_phy_reset(tp);
2770                                 if (!err)
2771                                         err = tg3_init_5401phy_dsp(tp);
2772                                 if (err)
2773                                         return err;
2774                         }
2775                 }
2776         } else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
2777                    tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
2778                 /* 5701 {A0,B0} CRC bug workaround */
2779                 tg3_writephy(tp, 0x15, 0x0a75);
2780                 tg3_writephy(tp, 0x1c, 0x8c68);
2781                 tg3_writephy(tp, 0x1c, 0x8d68);
2782                 tg3_writephy(tp, 0x1c, 0x8c68);
2783         }
2784
2785         /* Clear pending interrupts... */
2786         tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
2787         tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
2788
2789         if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT)
2790                 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
2791         else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906)
2792                 tg3_writephy(tp, MII_TG3_IMASK, ~0);
2793
2794         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2795             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2796                 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
2797                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
2798                                      MII_TG3_EXT_CTRL_LNK3_LED_MODE);
2799                 else
2800                         tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
2801         }
2802
2803         current_link_up = 0;
2804         current_speed = SPEED_INVALID;
2805         current_duplex = DUPLEX_INVALID;
2806
2807         if (tp->tg3_flags2 & TG3_FLG2_CAPACITIVE_COUPLING) {
2808                 u32 val;
2809
2810                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4007);
2811                 tg3_readphy(tp, MII_TG3_AUX_CTRL, &val);
2812                 if (!(val & (1 << 10))) {
2813                         val |= (1 << 10);
2814                         tg3_writephy(tp, MII_TG3_AUX_CTRL, val);
2815                         goto relink;
2816                 }
2817         }
2818
2819         bmsr = 0;
2820         for (i = 0; i < 100; i++) {
2821                 tg3_readphy(tp, MII_BMSR, &bmsr);
2822                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
2823                     (bmsr & BMSR_LSTATUS))
2824                         break;
2825                 udelay(40);
2826         }
2827
2828         if (bmsr & BMSR_LSTATUS) {
2829                 u32 aux_stat, bmcr;
2830
2831                 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
2832                 for (i = 0; i < 2000; i++) {
2833                         udelay(10);
2834                         if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
2835                             aux_stat)
2836                                 break;
2837                 }
2838
2839                 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
2840                                              &current_speed,
2841                                              &current_duplex);
2842
2843                 bmcr = 0;
2844                 for (i = 0; i < 200; i++) {
2845                         tg3_readphy(tp, MII_BMCR, &bmcr);
2846                         if (tg3_readphy(tp, MII_BMCR, &bmcr))
2847                                 continue;
2848                         if (bmcr && bmcr != 0x7fff)
2849                                 break;
2850                         udelay(10);
2851                 }
2852
2853                 lcl_adv = 0;
2854                 rmt_adv = 0;
2855
2856                 tp->link_config.active_speed = current_speed;
2857                 tp->link_config.active_duplex = current_duplex;
2858
2859                 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2860                         if ((bmcr & BMCR_ANENABLE) &&
2861                             tg3_copper_is_advertising_all(tp,
2862                                                 tp->link_config.advertising)) {
2863                                 if (tg3_adv_1000T_flowctrl_ok(tp, &lcl_adv,
2864                                                                   &rmt_adv))
2865                                         current_link_up = 1;
2866                         }
2867                 } else {
2868                         if (!(bmcr & BMCR_ANENABLE) &&
2869                             tp->link_config.speed == current_speed &&
2870                             tp->link_config.duplex == current_duplex &&
2871                             tp->link_config.flowctrl ==
2872                             tp->link_config.active_flowctrl) {
2873                                 current_link_up = 1;
2874                         }
2875                 }
2876
2877                 if (current_link_up == 1 &&
2878                     tp->link_config.active_duplex == DUPLEX_FULL)
2879                         tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
2880         }
2881
2882 relink:
2883         if (current_link_up == 0 || tp->link_config.phy_is_low_power) {
2884                 u32 tmp;
2885
2886                 tg3_phy_copper_begin(tp);
2887
2888                 tg3_readphy(tp, MII_BMSR, &tmp);
2889                 if (!tg3_readphy(tp, MII_BMSR, &tmp) &&
2890                     (tmp & BMSR_LSTATUS))
2891                         current_link_up = 1;
2892         }
2893
2894         tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
2895         if (current_link_up == 1) {
2896                 if (tp->link_config.active_speed == SPEED_100 ||
2897                     tp->link_config.active_speed == SPEED_10)
2898                         tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
2899                 else
2900                         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
2901         } else
2902                 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
2903
2904         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
2905         if (tp->link_config.active_duplex == DUPLEX_HALF)
2906                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
2907
2908         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
2909                 if (current_link_up == 1 &&
2910                     tg3_5700_link_polarity(tp, tp->link_config.active_speed))
2911                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
2912                 else
2913                         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
2914         }
2915
2916         /* ??? Without this setting Netgear GA302T PHY does not
2917          * ??? send/receive packets...
2918          */
2919         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411 &&
2920             tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) {
2921                 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
2922                 tw32_f(MAC_MI_MODE, tp->mi_mode);
2923                 udelay(80);
2924         }
2925
2926         tw32_f(MAC_MODE, tp->mac_mode);
2927         udelay(40);
2928
2929         if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
2930                 /* Polled via timer. */
2931                 tw32_f(MAC_EVENT, 0);
2932         } else {
2933                 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2934         }
2935         udelay(40);
2936
2937         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 &&
2938             current_link_up == 1 &&
2939             tp->link_config.active_speed == SPEED_1000 &&
2940             ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ||
2941              (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED))) {
2942                 udelay(120);
2943                 tw32_f(MAC_STATUS,
2944                      (MAC_STATUS_SYNC_CHANGED |
2945                       MAC_STATUS_CFG_CHANGED));
2946                 udelay(40);
2947                 tg3_write_mem(tp,
2948                               NIC_SRAM_FIRMWARE_MBOX,
2949                               NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
2950         }
2951
2952         /* Prevent send BD corruption. */
2953         if (tp->tg3_flags3 & TG3_FLG3_CLKREQ_BUG) {
2954                 u16 oldlnkctl, newlnkctl;
2955
2956                 pci_read_config_word(tp->pdev,
2957                                      tp->pcie_cap + PCI_EXP_LNKCTL,
2958                                      &oldlnkctl);
2959                 if (tp->link_config.active_speed == SPEED_100 ||
2960                     tp->link_config.active_speed == SPEED_10)
2961                         newlnkctl = oldlnkctl & ~PCI_EXP_LNKCTL_CLKREQ_EN;
2962                 else
2963                         newlnkctl = oldlnkctl | PCI_EXP_LNKCTL_CLKREQ_EN;
2964                 if (newlnkctl != oldlnkctl)
2965                         pci_write_config_word(tp->pdev,
2966                                               tp->pcie_cap + PCI_EXP_LNKCTL,
2967                                               newlnkctl);
2968         }
2969
2970         if (current_link_up != netif_carrier_ok(tp->dev)) {
2971                 if (current_link_up)
2972                         netif_carrier_on(tp->dev);
2973                 else
2974                         netif_carrier_off(tp->dev);
2975                 tg3_link_report(tp);
2976         }
2977
2978         return 0;
2979 }
2980
2981 struct tg3_fiber_aneginfo {
2982         int state;
2983 #define ANEG_STATE_UNKNOWN              0
2984 #define ANEG_STATE_AN_ENABLE            1
2985 #define ANEG_STATE_RESTART_INIT         2
2986 #define ANEG_STATE_RESTART              3
2987 #define ANEG_STATE_DISABLE_LINK_OK      4
2988 #define ANEG_STATE_ABILITY_DETECT_INIT  5
2989 #define ANEG_STATE_ABILITY_DETECT       6
2990 #define ANEG_STATE_ACK_DETECT_INIT      7
2991 #define ANEG_STATE_ACK_DETECT           8
2992 #define ANEG_STATE_COMPLETE_ACK_INIT    9
2993 #define ANEG_STATE_COMPLETE_ACK         10
2994 #define ANEG_STATE_IDLE_DETECT_INIT     11
2995 #define ANEG_STATE_IDLE_DETECT          12
2996 #define ANEG_STATE_LINK_OK              13
2997 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT  14
2998 #define ANEG_STATE_NEXT_PAGE_WAIT       15
2999
3000         u32 flags;
3001 #define MR_AN_ENABLE            0x00000001
3002 #define MR_RESTART_AN           0x00000002
3003 #define MR_AN_COMPLETE          0x00000004
3004 #define MR_PAGE_RX              0x00000008
3005 #define MR_NP_LOADED            0x00000010
3006 #define MR_TOGGLE_TX            0x00000020
3007 #define MR_LP_ADV_FULL_DUPLEX   0x00000040
3008 #define MR_LP_ADV_HALF_DUPLEX   0x00000080
3009 #define MR_LP_ADV_SYM_PAUSE     0x00000100
3010 #define MR_LP_ADV_ASYM_PAUSE    0x00000200
3011 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
3012 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
3013 #define MR_LP_ADV_NEXT_PAGE     0x00001000
3014 #define MR_TOGGLE_RX            0x00002000
3015 #define MR_NP_RX                0x00004000
3016
3017 #define MR_LINK_OK              0x80000000
3018
3019         unsigned long link_time, cur_time;
3020
3021         u32 ability_match_cfg;
3022         int ability_match_count;
3023
3024         char ability_match, idle_match, ack_match;
3025
3026         u32 txconfig, rxconfig;
3027 #define ANEG_CFG_NP             0x00000080
3028 #define ANEG_CFG_ACK            0x00000040
3029 #define ANEG_CFG_RF2            0x00000020
3030 #define ANEG_CFG_RF1            0x00000010
3031 #define ANEG_CFG_PS2            0x00000001
3032 #define ANEG_CFG_PS1            0x00008000
3033 #define ANEG_CFG_HD             0x00004000
3034 #define ANEG_CFG_FD             0x00002000
3035 #define ANEG_CFG_INVAL          0x00001f06
3036
3037 };
3038 #define ANEG_OK         0
3039 #define ANEG_DONE       1
3040 #define ANEG_TIMER_ENAB 2
3041 #define ANEG_FAILED     -1
3042
3043 #define ANEG_STATE_SETTLE_TIME  10000
3044
3045 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
3046                                    struct tg3_fiber_aneginfo *ap)
3047 {
3048         u16 flowctrl;
3049         unsigned long delta;
3050         u32 rx_cfg_reg;
3051         int ret;
3052
3053         if (ap->state == ANEG_STATE_UNKNOWN) {
3054                 ap->rxconfig = 0;
3055                 ap->link_time = 0;
3056                 ap->cur_time = 0;
3057                 ap->ability_match_cfg = 0;
3058                 ap->ability_match_count = 0;
3059                 ap->ability_match = 0;
3060                 ap->idle_match = 0;
3061                 ap->ack_match = 0;
3062         }
3063         ap->cur_time++;
3064
3065         if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
3066                 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
3067
3068                 if (rx_cfg_reg != ap->ability_match_cfg) {
3069                         ap->ability_match_cfg = rx_cfg_reg;
3070                         ap->ability_match = 0;
3071                         ap->ability_match_count = 0;
3072                 } else {
3073                         if (++ap->ability_match_count > 1) {
3074                                 ap->ability_match = 1;
3075                                 ap->ability_match_cfg = rx_cfg_reg;
3076                         }
3077                 }
3078                 if (rx_cfg_reg & ANEG_CFG_ACK)
3079                         ap->ack_match = 1;
3080                 else
3081                         ap->ack_match = 0;
3082
3083                 ap->idle_match = 0;
3084         } else {
3085                 ap->idle_match = 1;
3086                 ap->ability_match_cfg = 0;
3087                 ap->ability_match_count = 0;
3088                 ap->ability_match = 0;
3089                 ap->ack_match = 0;
3090
3091                 rx_cfg_reg = 0;
3092         }
3093
3094         ap->rxconfig = rx_cfg_reg;
3095         ret = ANEG_OK;
3096
3097         switch(ap->state) {
3098         case ANEG_STATE_UNKNOWN:
3099                 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
3100                         ap->state = ANEG_STATE_AN_ENABLE;
3101
3102                 /* fallthru */
3103         case ANEG_STATE_AN_ENABLE:
3104                 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
3105                 if (ap->flags & MR_AN_ENABLE) {
3106                         ap->link_time = 0;
3107                         ap->cur_time = 0;
3108                         ap->ability_match_cfg = 0;
3109                         ap->ability_match_count = 0;
3110                         ap->ability_match = 0;
3111                         ap->idle_match = 0;
3112                         ap->ack_match = 0;
3113
3114                         ap->state = ANEG_STATE_RESTART_INIT;
3115                 } else {
3116                         ap->state = ANEG_STATE_DISABLE_LINK_OK;
3117                 }
3118                 break;
3119
3120         case ANEG_STATE_RESTART_INIT:
3121                 ap->link_time = ap->cur_time;
3122                 ap->flags &= ~(MR_NP_LOADED);
3123                 ap->txconfig = 0;
3124                 tw32(MAC_TX_AUTO_NEG, 0);
3125                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
3126                 tw32_f(MAC_MODE, tp->mac_mode);
3127                 udelay(40);
3128
3129                 ret = ANEG_TIMER_ENAB;
3130                 ap->state = ANEG_STATE_RESTART;
3131
3132                 /* fallthru */
3133         case ANEG_STATE_RESTART:
3134                 delta = ap->cur_time - ap->link_time;
3135                 if (delta > ANEG_STATE_SETTLE_TIME) {
3136                         ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
3137                 } else {
3138                         ret = ANEG_TIMER_ENAB;
3139                 }
3140                 break;
3141
3142         case ANEG_STATE_DISABLE_LINK_OK:
3143                 ret = ANEG_DONE;
3144                 break;
3145
3146         case ANEG_STATE_ABILITY_DETECT_INIT:
3147                 ap->flags &= ~(MR_TOGGLE_TX);
3148                 ap->txconfig = ANEG_CFG_FD;
3149                 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
3150                 if (flowctrl & ADVERTISE_1000XPAUSE)
3151                         ap->txconfig |= ANEG_CFG_PS1;
3152                 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
3153                         ap->txconfig |= ANEG_CFG_PS2;
3154                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
3155                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
3156                 tw32_f(MAC_MODE, tp->mac_mode);
3157                 udelay(40);
3158
3159                 ap->state = ANEG_STATE_ABILITY_DETECT;
3160                 break;
3161
3162         case ANEG_STATE_ABILITY_DETECT:
3163                 if (ap->ability_match != 0 && ap->rxconfig != 0) {
3164                         ap->state = ANEG_STATE_ACK_DETECT_INIT;
3165                 }
3166                 break;
3167
3168         case ANEG_STATE_ACK_DETECT_INIT:
3169                 ap->txconfig |= ANEG_CFG_ACK;
3170                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
3171                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
3172                 tw32_f(MAC_MODE, tp->mac_mode);
3173                 udelay(40);
3174
3175                 ap->state = ANEG_STATE_ACK_DETECT;
3176
3177                 /* fallthru */
3178         case ANEG_STATE_ACK_DETECT:
3179                 if (ap->ack_match != 0) {
3180                         if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
3181                             (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
3182                                 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
3183                         } else {
3184                                 ap->state = ANEG_STATE_AN_ENABLE;
3185                         }
3186                 } else if (ap->ability_match != 0 &&
3187                            ap->rxconfig == 0) {
3188                         ap->state = ANEG_STATE_AN_ENABLE;
3189                 }
3190                 break;
3191
3192         case ANEG_STATE_COMPLETE_ACK_INIT:
3193                 if (ap->rxconfig & ANEG_CFG_INVAL) {
3194                         ret = ANEG_FAILED;
3195                         break;
3196                 }
3197                 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
3198                                MR_LP_ADV_HALF_DUPLEX |
3199                                MR_LP_ADV_SYM_PAUSE |
3200                                MR_LP_ADV_ASYM_PAUSE |
3201                                MR_LP_ADV_REMOTE_FAULT1 |
3202                                MR_LP_ADV_REMOTE_FAULT2 |
3203                                MR_LP_ADV_NEXT_PAGE |
3204                                MR_TOGGLE_RX |
3205                                MR_NP_RX);
3206                 if (ap->rxconfig & ANEG_CFG_FD)
3207                         ap->flags |= MR_LP_ADV_FULL_DUPLEX;
3208                 if (ap->rxconfig & ANEG_CFG_HD)
3209                         ap->flags |= MR_LP_ADV_HALF_DUPLEX;
3210                 if (ap->rxconfig & ANEG_CFG_PS1)
3211                         ap->flags |= MR_LP_ADV_SYM_PAUSE;
3212                 if (ap->rxconfig & ANEG_CFG_PS2)
3213                         ap->flags |= MR_LP_ADV_ASYM_PAUSE;
3214                 if (ap->rxconfig & ANEG_CFG_RF1)
3215                         ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
3216                 if (ap->rxconfig & ANEG_CFG_RF2)
3217                         ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
3218                 if (ap->rxconfig & ANEG_CFG_NP)
3219                         ap->flags |= MR_LP_ADV_NEXT_PAGE;
3220
3221                 ap->link_time = ap->cur_time;
3222
3223                 ap->flags ^= (MR_TOGGLE_TX);
3224                 if (ap->rxconfig & 0x0008)
3225                         ap->flags |= MR_TOGGLE_RX;
3226                 if (ap->rxconfig & ANEG_CFG_NP)
3227                         ap->flags |= MR_NP_RX;
3228                 ap->flags |= MR_PAGE_RX;
3229
3230                 ap->state = ANEG_STATE_COMPLETE_ACK;
3231                 ret = ANEG_TIMER_ENAB;
3232                 break;
3233
3234         case ANEG_STATE_COMPLETE_ACK:
3235                 if (ap->ability_match != 0 &&
3236                     ap->rxconfig == 0) {
3237                         ap->state = ANEG_STATE_AN_ENABLE;
3238                         break;
3239                 }
3240                 delta = ap->cur_time - ap->link_time;
3241                 if (delta > ANEG_STATE_SETTLE_TIME) {
3242                         if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
3243                                 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
3244                         } else {
3245                                 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
3246                                     !(ap->flags & MR_NP_RX)) {
3247                                         ap->state = ANEG_STATE_IDLE_DETECT_INIT;
3248                                 } else {
3249                                         ret = ANEG_FAILED;
3250                                 }
3251                         }
3252                 }
3253                 break;
3254
3255         case ANEG_STATE_IDLE_DETECT_INIT:
3256                 ap->link_time = ap->cur_time;
3257                 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
3258                 tw32_f(MAC_MODE, tp->mac_mode);
3259                 udelay(40);
3260
3261                 ap->state = ANEG_STATE_IDLE_DETECT;
3262                 ret = ANEG_TIMER_ENAB;
3263                 break;
3264
3265         case ANEG_STATE_IDLE_DETECT:
3266                 if (ap->ability_match != 0 &&
3267                     ap->rxconfig == 0) {
3268                         ap->state = ANEG_STATE_AN_ENABLE;
3269                         break;
3270                 }
3271                 delta = ap->cur_time - ap->link_time;
3272                 if (delta > ANEG_STATE_SETTLE_TIME) {
3273                         /* XXX another gem from the Broadcom driver :( */
3274                         ap->state = ANEG_STATE_LINK_OK;
3275                 }
3276                 break;
3277
3278         case ANEG_STATE_LINK_OK:
3279                 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
3280                 ret = ANEG_DONE;
3281                 break;
3282
3283         case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
3284                 /* ??? unimplemented */
3285                 break;
3286
3287         case ANEG_STATE_NEXT_PAGE_WAIT:
3288                 /* ??? unimplemented */
3289                 break;
3290
3291         default:
3292                 ret = ANEG_FAILED;
3293                 break;
3294         }
3295
3296         return ret;
3297 }
3298
3299 static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags)
3300 {
3301         int res = 0;
3302         struct tg3_fiber_aneginfo aninfo;
3303         int status = ANEG_FAILED;
3304         unsigned int tick;
3305         u32 tmp;
3306
3307         tw32_f(MAC_TX_AUTO_NEG, 0);
3308
3309         tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
3310         tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
3311         udelay(40);
3312
3313         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
3314         udelay(40);
3315
3316         memset(&aninfo, 0, sizeof(aninfo));
3317         aninfo.flags |= MR_AN_ENABLE;
3318         aninfo.state = ANEG_STATE_UNKNOWN;
3319         aninfo.cur_time = 0;
3320         tick = 0;
3321         while (++tick < 195000) {
3322                 status = tg3_fiber_aneg_smachine(tp, &aninfo);
3323                 if (status == ANEG_DONE || status == ANEG_FAILED)
3324                         break;
3325
3326                 udelay(1);
3327         }
3328
3329         tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
3330         tw32_f(MAC_MODE, tp->mac_mode);
3331         udelay(40);
3332
3333         *txflags = aninfo.txconfig;
3334         *rxflags = aninfo.flags;
3335
3336         if (status == ANEG_DONE &&
3337             (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
3338                              MR_LP_ADV_FULL_DUPLEX)))
3339                 res = 1;
3340
3341         return res;
3342 }
3343
3344 static void tg3_init_bcm8002(struct tg3 *tp)
3345 {
3346         u32 mac_status = tr32(MAC_STATUS);
3347         int i;
3348
3349         /* Reset when initting first time or we have a link. */
3350         if ((tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) &&
3351             !(mac_status & MAC_STATUS_PCS_SYNCED))
3352                 return;
3353
3354         /* Set PLL lock range. */
3355         tg3_writephy(tp, 0x16, 0x8007);
3356
3357         /* SW reset */
3358         tg3_writephy(tp, MII_BMCR, BMCR_RESET);
3359
3360         /* Wait for reset to complete. */
3361         /* XXX schedule_timeout() ... */
3362         for (i = 0; i < 500; i++)
3363                 udelay(10);
3364
3365         /* Config mode; select PMA/Ch 1 regs. */
3366         tg3_writephy(tp, 0x10, 0x8411);
3367
3368         /* Enable auto-lock and comdet, select txclk for tx. */
3369         tg3_writephy(tp, 0x11, 0x0a10);
3370
3371         tg3_writephy(tp, 0x18, 0x00a0);
3372         tg3_writephy(tp, 0x16, 0x41ff);
3373
3374         /* Assert and deassert POR. */
3375         tg3_writephy(tp, 0x13, 0x0400);
3376         udelay(40);
3377         tg3_writephy(tp, 0x13, 0x0000);
3378
3379         tg3_writephy(tp, 0x11, 0x0a50);
3380         udelay(40);
3381         tg3_writephy(tp, 0x11, 0x0a10);
3382
3383         /* Wait for signal to stabilize */
3384         /* XXX schedule_timeout() ... */
3385         for (i = 0; i < 15000; i++)
3386                 udelay(10);
3387
3388         /* Deselect the channel register so we can read the PHYID
3389          * later.
3390          */
3391         tg3_writephy(tp, 0x10, 0x8011);
3392 }
3393
3394 static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
3395 {
3396         u16 flowctrl;
3397         u32 sg_dig_ctrl, sg_dig_status;
3398         u32 serdes_cfg, expected_sg_dig_ctrl;
3399         int workaround, port_a;
3400         int current_link_up;
3401
3402         serdes_cfg = 0;
3403         expected_sg_dig_ctrl = 0;
3404         workaround = 0;
3405         port_a = 1;
3406         current_link_up = 0;
3407
3408         if (tp->pci_chip_rev_id != CHIPREV_ID_5704_A0 &&
3409             tp->pci_chip_rev_id != CHIPREV_ID_5704_A1) {
3410                 workaround = 1;
3411                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
3412                         port_a = 0;
3413
3414                 /* preserve bits 0-11,13,14 for signal pre-emphasis */
3415                 /* preserve bits 20-23 for voltage regulator */
3416                 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
3417         }
3418
3419         sg_dig_ctrl = tr32(SG_DIG_CTRL);
3420
3421         if (tp->link_config.autoneg != AUTONEG_ENABLE) {
3422                 if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) {
3423                         if (workaround) {
3424                                 u32 val = serdes_cfg;
3425
3426                                 if (port_a)
3427                                         val |= 0xc010000;
3428                                 else
3429                                         val |= 0x4010000;
3430                                 tw32_f(MAC_SERDES_CFG, val);
3431                         }
3432
3433                         tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
3434                 }
3435                 if (mac_status & MAC_STATUS_PCS_SYNCED) {
3436                         tg3_setup_flow_control(tp, 0, 0);
3437                         current_link_up = 1;
3438                 }
3439                 goto out;
3440         }
3441
3442         /* Want auto-negotiation.  */
3443         expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP;
3444
3445         flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
3446         if (flowctrl & ADVERTISE_1000XPAUSE)
3447                 expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP;
3448         if (flowctrl & ADVERTISE_1000XPSE_ASYM)
3449                 expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE;
3450
3451         if (sg_dig_ctrl != expected_sg_dig_ctrl) {
3452                 if ((tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT) &&
3453                     tp->serdes_counter &&
3454                     ((mac_status & (MAC_STATUS_PCS_SYNCED |
3455                                     MAC_STATUS_RCVD_CFG)) ==
3456                      MAC_STATUS_PCS_SYNCED)) {
3457                         tp->serdes_counter--;
3458                         current_link_up = 1;
3459                         goto out;
3460                 }
3461 restart_autoneg:
3462                 if (workaround)
3463                         tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
3464                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET);
3465                 udelay(5);
3466                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
3467
3468                 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
3469                 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3470         } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
3471                                  MAC_STATUS_SIGNAL_DET)) {
3472                 sg_dig_status = tr32(SG_DIG_STATUS);
3473                 mac_status = tr32(MAC_STATUS);
3474
3475                 if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) &&
3476                     (mac_status & MAC_STATUS_PCS_SYNCED)) {
3477                         u32 local_adv = 0, remote_adv = 0;
3478
3479                         if (sg_dig_ctrl & SG_DIG_PAUSE_CAP)
3480                                 local_adv |= ADVERTISE_1000XPAUSE;
3481                         if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE)
3482                                 local_adv |= ADVERTISE_1000XPSE_ASYM;
3483
3484                         if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE)
3485                                 remote_adv |= LPA_1000XPAUSE;
3486                         if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE)
3487                                 remote_adv |= LPA_1000XPAUSE_ASYM;
3488
3489                         tg3_setup_flow_control(tp, local_adv, remote_adv);
3490                         current_link_up = 1;
3491                         tp->serdes_counter = 0;
3492                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3493                 } else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) {
3494                         if (tp->serdes_counter)
3495                                 tp->serdes_counter--;
3496                         else {
3497                                 if (workaround) {
3498                                         u32 val = serdes_cfg;
3499
3500                                         if (port_a)
3501                                                 val |= 0xc010000;
3502                                         else
3503                                                 val |= 0x4010000;
3504
3505                                         tw32_f(MAC_SERDES_CFG, val);
3506                                 }
3507
3508                                 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
3509                                 udelay(40);
3510
3511                                 /* Link parallel detection - link is up */
3512                                 /* only if we have PCS_SYNC and not */
3513                                 /* receiving config code words */
3514                                 mac_status = tr32(MAC_STATUS);
3515                                 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
3516                                     !(mac_status & MAC_STATUS_RCVD_CFG)) {
3517                                         tg3_setup_flow_control(tp, 0, 0);
3518                                         current_link_up = 1;
3519                                         tp->tg3_flags2 |=
3520                                                 TG3_FLG2_PARALLEL_DETECT;
3521                                         tp->serdes_counter =
3522                                                 SERDES_PARALLEL_DET_TIMEOUT;
3523                                 } else
3524                                         goto restart_autoneg;
3525                         }
3526                 }
3527         } else {
3528                 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
3529                 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3530         }
3531
3532 out:
3533         return current_link_up;
3534 }
3535
3536 static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
3537 {
3538         int current_link_up = 0;
3539
3540         if (!(mac_status & MAC_STATUS_PCS_SYNCED))
3541                 goto out;
3542
3543         if (tp->link_config.autoneg == AUTONEG_ENABLE) {
3544                 u32 txflags, rxflags;
3545                 int i;
3546
3547                 if (fiber_autoneg(tp, &txflags, &rxflags)) {
3548                         u32 local_adv = 0, remote_adv = 0;
3549
3550                         if (txflags & ANEG_CFG_PS1)
3551                                 local_adv |= ADVERTISE_1000XPAUSE;
3552                         if (txflags & ANEG_CFG_PS2)
3553                                 local_adv |= ADVERTISE_1000XPSE_ASYM;
3554
3555                         if (rxflags & MR_LP_ADV_SYM_PAUSE)
3556                                 remote_adv |= LPA_1000XPAUSE;
3557                         if (rxflags & MR_LP_ADV_ASYM_PAUSE)
3558                                 remote_adv |= LPA_1000XPAUSE_ASYM;
3559
3560                         tg3_setup_flow_control(tp, local_adv, remote_adv);
3561
3562                         current_link_up = 1;
3563                 }
3564                 for (i = 0; i < 30; i++) {
3565                         udelay(20);
3566                         tw32_f(MAC_STATUS,
3567                                (MAC_STATUS_SYNC_CHANGED |
3568                                 MAC_STATUS_CFG_CHANGED));
3569                         udelay(40);
3570                         if ((tr32(MAC_STATUS) &
3571                              (MAC_STATUS_SYNC_CHANGED |
3572                               MAC_STATUS_CFG_CHANGED)) == 0)
3573                                 break;
3574                 }
3575
3576                 mac_status = tr32(MAC_STATUS);
3577                 if (current_link_up == 0 &&
3578                     (mac_status & MAC_STATUS_PCS_SYNCED) &&
3579                     !(mac_status & MAC_STATUS_RCVD_CFG))
3580                         current_link_up = 1;
3581         } else {
3582                 tg3_setup_flow_control(tp, 0, 0);
3583
3584                 /* Forcing 1000FD link up. */
3585                 current_link_up = 1;
3586
3587                 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
3588                 udelay(40);
3589
3590                 tw32_f(MAC_MODE, tp->mac_mode);
3591                 udelay(40);
3592         }
3593
3594 out:
3595         return current_link_up;
3596 }
3597
3598 static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
3599 {
3600         u32 orig_pause_cfg;
3601         u16 orig_active_speed;
3602         u8 orig_active_duplex;
3603         u32 mac_status;
3604         int current_link_up;
3605         int i;
3606
3607         orig_pause_cfg = tp->link_config.active_flowctrl;
3608         orig_active_speed = tp->link_config.active_speed;
3609         orig_active_duplex = tp->link_config.active_duplex;
3610
3611         if (!(tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG) &&
3612             netif_carrier_ok(tp->dev) &&
3613             (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE)) {
3614                 mac_status = tr32(MAC_STATUS);
3615                 mac_status &= (MAC_STATUS_PCS_SYNCED |
3616                                MAC_STATUS_SIGNAL_DET |
3617                                MAC_STATUS_CFG_CHANGED |
3618                                MAC_STATUS_RCVD_CFG);
3619                 if (mac_status == (MAC_STATUS_PCS_SYNCED |
3620                                    MAC_STATUS_SIGNAL_DET)) {
3621                         tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
3622                                             MAC_STATUS_CFG_CHANGED));
3623                         return 0;
3624                 }
3625         }
3626
3627         tw32_f(MAC_TX_AUTO_NEG, 0);
3628
3629         tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
3630         tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
3631         tw32_f(MAC_MODE, tp->mac_mode);
3632         udelay(40);
3633
3634         if (tp->phy_id == PHY_ID_BCM8002)
3635                 tg3_init_bcm8002(tp);
3636
3637         /* Enable link change event even when serdes polling.  */
3638         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
3639         udelay(40);
3640
3641         current_link_up = 0;
3642         mac_status = tr32(MAC_STATUS);
3643
3644         if (tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG)
3645                 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
3646         else
3647                 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
3648
3649         tp->hw_status->status =
3650                 (SD_STATUS_UPDATED |
3651                  (tp->hw_status->status & ~SD_STATUS_LINK_CHG));
3652
3653         for (i = 0; i < 100; i++) {
3654                 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
3655                                     MAC_STATUS_CFG_CHANGED));
3656                 udelay(5);
3657                 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
3658                                          MAC_STATUS_CFG_CHANGED |
3659                                          MAC_STATUS_LNKSTATE_CHANGED)) == 0)
3660                         break;
3661         }
3662
3663         mac_status = tr32(MAC_STATUS);
3664         if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
3665                 current_link_up = 0;
3666                 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
3667                     tp->serdes_counter == 0) {
3668                         tw32_f(MAC_MODE, (tp->mac_mode |
3669                                           MAC_MODE_SEND_CONFIGS));
3670                         udelay(1);
3671                         tw32_f(MAC_MODE, tp->mac_mode);
3672                 }
3673         }
3674
3675         if (current_link_up == 1) {
3676                 tp->link_config.active_speed = SPEED_1000;
3677                 tp->link_config.active_duplex = DUPLEX_FULL;
3678                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
3679                                     LED_CTRL_LNKLED_OVERRIDE |
3680                                     LED_CTRL_1000MBPS_ON));
3681         } else {
3682                 tp->link_config.active_speed = SPEED_INVALID;
3683                 tp->link_config.active_duplex = DUPLEX_INVALID;
3684                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
3685                                     LED_CTRL_LNKLED_OVERRIDE |
3686                                     LED_CTRL_TRAFFIC_OVERRIDE));
3687         }
3688
3689         if (current_link_up != netif_carrier_ok(tp->dev)) {
3690                 if (current_link_up)
3691                         netif_carrier_on(tp->dev);
3692                 else
3693                         netif_carrier_off(tp->dev);
3694                 tg3_link_report(tp);
3695         } else {
3696                 u32 now_pause_cfg = tp->link_config.active_flowctrl;
3697                 if (orig_pause_cfg != now_pause_cfg ||
3698                     orig_active_speed != tp->link_config.active_speed ||
3699                     orig_active_duplex != tp->link_config.active_duplex)
3700                         tg3_link_report(tp);
3701         }
3702
3703         return 0;
3704 }
3705
3706 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
3707 {
3708         int current_link_up, err = 0;
3709         u32 bmsr, bmcr;
3710         u16 current_speed;
3711         u8 current_duplex;
3712         u32 local_adv, remote_adv;
3713
3714         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
3715         tw32_f(MAC_MODE, tp->mac_mode);
3716         udelay(40);
3717
3718         tw32(MAC_EVENT, 0);
3719
3720         tw32_f(MAC_STATUS,
3721              (MAC_STATUS_SYNC_CHANGED |
3722               MAC_STATUS_CFG_CHANGED |
3723               MAC_STATUS_MI_COMPLETION |
3724               MAC_STATUS_LNKSTATE_CHANGED));
3725         udelay(40);
3726
3727         if (force_reset)
3728                 tg3_phy_reset(tp);
3729
3730         current_link_up = 0;
3731         current_speed = SPEED_INVALID;
3732         current_duplex = DUPLEX_INVALID;
3733
3734         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
3735         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
3736         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
3737                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
3738                         bmsr |= BMSR_LSTATUS;
3739                 else
3740                         bmsr &= ~BMSR_LSTATUS;
3741         }
3742
3743         err |= tg3_readphy(tp, MII_BMCR, &bmcr);
3744
3745         if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
3746             (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT)) {
3747                 /* do nothing, just check for link up at the end */
3748         } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
3749                 u32 adv, new_adv;
3750
3751                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
3752                 new_adv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
3753                                   ADVERTISE_1000XPAUSE |
3754                                   ADVERTISE_1000XPSE_ASYM |
3755                                   ADVERTISE_SLCT);
3756
3757                 new_adv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
3758
3759                 if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
3760                         new_adv |= ADVERTISE_1000XHALF;
3761                 if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
3762                         new_adv |= ADVERTISE_1000XFULL;
3763
3764                 if ((new_adv != adv) || !(bmcr & BMCR_ANENABLE)) {
3765                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
3766                         bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
3767                         tg3_writephy(tp, MII_BMCR, bmcr);
3768
3769                         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
3770                         tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
3771                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3772
3773                         return err;
3774                 }
3775         } else {
3776                 u32 new_bmcr;
3777
3778                 bmcr &= ~BMCR_SPEED1000;
3779                 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
3780
3781                 if (tp->link_config.duplex == DUPLEX_FULL)
3782                         new_bmcr |= BMCR_FULLDPLX;
3783
3784                 if (new_bmcr != bmcr) {
3785                         /* BMCR_SPEED1000 is a reserved bit that needs
3786                          * to be set on write.
3787                          */
3788                         new_bmcr |= BMCR_SPEED1000;
3789
3790                         /* Force a linkdown */
3791                         if (netif_carrier_ok(tp->dev)) {
3792                                 u32 adv;
3793
3794                                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
3795                                 adv &= ~(ADVERTISE_1000XFULL |
3796                                          ADVERTISE_1000XHALF |
3797                                          ADVERTISE_SLCT);
3798                                 tg3_writephy(tp, MII_ADVERTISE, adv);
3799                                 tg3_writephy(tp, MII_BMCR, bmcr |
3800                                                            BMCR_ANRESTART |
3801                                                            BMCR_ANENABLE);
3802                                 udelay(10);
3803                                 netif_carrier_off(tp->dev);
3804                         }
3805                         tg3_writephy(tp, MII_BMCR, new_bmcr);
3806                         bmcr = new_bmcr;
3807                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
3808                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
3809                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
3810                             ASIC_REV_5714) {
3811                                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
3812                                         bmsr |= BMSR_LSTATUS;
3813                                 else
3814                                         bmsr &= ~BMSR_LSTATUS;
3815                         }
3816                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3817                 }
3818         }
3819
3820         if (bmsr & BMSR_LSTATUS) {
3821                 current_speed = SPEED_1000;
3822                 current_link_up = 1;
3823                 if (bmcr & BMCR_FULLDPLX)
3824                         current_duplex = DUPLEX_FULL;
3825                 else
3826                         current_duplex = DUPLEX_HALF;
3827
3828                 local_adv = 0;
3829                 remote_adv = 0;
3830
3831                 if (bmcr & BMCR_ANENABLE) {
3832                         u32 common;
3833
3834                         err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
3835                         err |= tg3_readphy(tp, MII_LPA, &remote_adv);
3836                         common = local_adv & remote_adv;
3837                         if (common & (ADVERTISE_1000XHALF |
3838                                       ADVERTISE_1000XFULL)) {
3839                                 if (common & ADVERTISE_1000XFULL)
3840                                         current_duplex = DUPLEX_FULL;
3841                                 else
3842                                         current_duplex = DUPLEX_HALF;
3843                         }
3844                         else
3845                                 current_link_up = 0;
3846                 }
3847         }
3848
3849         if (current_link_up == 1 && current_duplex == DUPLEX_FULL)
3850                 tg3_setup_flow_control(tp, local_adv, remote_adv);
3851
3852         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
3853         if (tp->link_config.active_duplex == DUPLEX_HALF)
3854                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
3855
3856         tw32_f(MAC_MODE, tp->mac_mode);
3857         udelay(40);
3858
3859         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
3860
3861         tp->link_config.active_speed = current_speed;
3862         tp->link_config.active_duplex = current_duplex;
3863
3864         if (current_link_up != netif_carrier_ok(tp->dev)) {
3865                 if (current_link_up)
3866                         netif_carrier_on(tp->dev);
3867                 else {
3868                         netif_carrier_off(tp->dev);
3869                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3870                 }
3871                 tg3_link_report(tp);
3872         }
3873         return err;
3874 }
3875
3876 static void tg3_serdes_parallel_detect(struct tg3 *tp)
3877 {
3878         if (tp->serdes_counter) {
3879                 /* Give autoneg time to complete. */
3880                 tp->serdes_counter--;
3881                 return;
3882         }
3883         if (!netif_carrier_ok(tp->dev) &&
3884             (tp->link_config.autoneg == AUTONEG_ENABLE)) {
3885                 u32 bmcr;
3886
3887                 tg3_readphy(tp, MII_BMCR, &bmcr);
3888                 if (bmcr & BMCR_ANENABLE) {
3889                         u32 phy1, phy2;
3890
3891                         /* Select shadow register 0x1f */
3892                         tg3_writephy(tp, 0x1c, 0x7c00);
3893                         tg3_readphy(tp, 0x1c, &phy1);
3894
3895                         /* Select expansion interrupt status register */
3896                         tg3_writephy(tp, 0x17, 0x0f01);
3897                         tg3_readphy(tp, 0x15, &phy2);
3898                         tg3_readphy(tp, 0x15, &phy2);
3899
3900                         if ((phy1 & 0x10) && !(phy2 & 0x20)) {
3901                                 /* We have signal detect and not receiving
3902                                  * config code words, link is up by parallel
3903                                  * detection.
3904                                  */
3905
3906                                 bmcr &= ~BMCR_ANENABLE;
3907                                 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
3908                                 tg3_writephy(tp, MII_BMCR, bmcr);
3909                                 tp->tg3_flags2 |= TG3_FLG2_PARALLEL_DETECT;
3910                         }
3911                 }
3912         }
3913         else if (netif_carrier_ok(tp->dev) &&
3914                  (tp->link_config.autoneg == AUTONEG_ENABLE) &&
3915                  (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT)) {
3916                 u32 phy2;
3917
3918                 /* Select expansion interrupt status register */
3919                 tg3_writephy(tp, 0x17, 0x0f01);
3920                 tg3_readphy(tp, 0x15, &phy2);
3921                 if (phy2 & 0x20) {
3922                         u32 bmcr;
3923
3924                         /* Config code words received, turn on autoneg. */
3925                         tg3_readphy(tp, MII_BMCR, &bmcr);
3926                         tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
3927
3928                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3929
3930                 }
3931         }
3932 }
3933
3934 static int tg3_setup_phy(struct tg3 *tp, int force_reset)
3935 {
3936         int err;
3937
3938         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
3939                 err = tg3_setup_fiber_phy(tp, force_reset);
3940         } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
3941                 err = tg3_setup_fiber_mii_phy(tp, force_reset);
3942         } else {
3943                 err = tg3_setup_copper_phy(tp, force_reset);
3944         }
3945
3946         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
3947                 u32 val, scale;
3948
3949                 val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
3950                 if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5)
3951                         scale = 65;
3952                 else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25)
3953                         scale = 6;
3954                 else
3955                         scale = 12;
3956
3957                 val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK;
3958                 val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT);
3959                 tw32(GRC_MISC_CFG, val);
3960         }
3961
3962         if (tp->link_config.active_speed == SPEED_1000 &&
3963             tp->link_config.active_duplex == DUPLEX_HALF)
3964                 tw32(MAC_TX_LENGTHS,
3965                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
3966                       (6 << TX_LENGTHS_IPG_SHIFT) |
3967                       (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
3968         else
3969                 tw32(MAC_TX_LENGTHS,
3970                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
3971                       (6 << TX_LENGTHS_IPG_SHIFT) |
3972                       (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
3973
3974         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
3975                 if (netif_carrier_ok(tp->dev)) {
3976                         tw32(HOSTCC_STAT_COAL_TICKS,
3977                              tp->coal.stats_block_coalesce_usecs);
3978                 } else {
3979                         tw32(HOSTCC_STAT_COAL_TICKS, 0);
3980                 }
3981         }
3982
3983         if (tp->tg3_flags & TG3_FLAG_ASPM_WORKAROUND) {
3984                 u32 val = tr32(PCIE_PWR_MGMT_THRESH);
3985                 if (!netif_carrier_ok(tp->dev))
3986                         val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
3987                               tp->pwrmgmt_thresh;
3988                 else
3989                         val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
3990                 tw32(PCIE_PWR_MGMT_THRESH, val);
3991         }
3992
3993         return err;
3994 }
3995
3996 /* This is called whenever we suspect that the system chipset is re-
3997  * ordering the sequence of MMIO to the tx send mailbox. The symptom
3998  * is bogus tx completions. We try to recover by setting the
3999  * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
4000  * in the workqueue.
4001  */
4002 static void tg3_tx_recover(struct tg3 *tp)
4003 {
4004         BUG_ON((tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) ||
4005                tp->write32_tx_mbox == tg3_write_indirect_mbox);
4006
4007         printk(KERN_WARNING PFX "%s: The system may be re-ordering memory-"
4008                "mapped I/O cycles to the network device, attempting to "
4009                "recover. Please report the problem to the driver maintainer "
4010                "and include system chipset information.\n", tp->dev->name);
4011
4012         spin_lock(&tp->lock);
4013         tp->tg3_flags |= TG3_FLAG_TX_RECOVERY_PENDING;
4014         spin_unlock(&tp->lock);
4015 }
4016
4017 static inline u32 tg3_tx_avail(struct tg3 *tp)
4018 {
4019         smp_mb();
4020         return (tp->tx_pending -
4021                 ((tp->tx_prod - tp->tx_cons) & (TG3_TX_RING_SIZE - 1)));
4022 }
4023
4024 /* Tigon3 never reports partial packet sends.  So we do not
4025  * need special logic to handle SKBs that have not had all
4026  * of their frags sent yet, like SunGEM does.
4027  */
4028 static void tg3_tx(struct tg3 *tp)
4029 {
4030         u32 hw_idx = tp->hw_status->idx[0].tx_consumer;
4031         u32 sw_idx = tp->tx_cons;
4032
4033         while (sw_idx != hw_idx) {
4034                 struct tx_ring_info *ri = &tp->tx_buffers[sw_idx];
4035                 struct sk_buff *skb = ri->skb;
4036                 int i, tx_bug = 0;
4037
4038                 if (unlikely(skb == NULL)) {
4039                         tg3_tx_recover(tp);
4040                         return;
4041                 }
4042
4043                 skb_dma_unmap(&tp->pdev->dev, skb, DMA_TO_DEVICE);
4044
4045                 ri->skb = NULL;
4046
4047                 sw_idx = NEXT_TX(sw_idx);
4048
4049                 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
4050                         ri = &tp->tx_buffers[sw_idx];
4051                         if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
4052                                 tx_bug = 1;
4053                         sw_idx = NEXT_TX(sw_idx);
4054                 }
4055
4056                 dev_kfree_skb(skb);
4057
4058                 if (unlikely(tx_bug)) {
4059                         tg3_tx_recover(tp);
4060                         return;
4061                 }
4062         }
4063
4064         tp->tx_cons = sw_idx;
4065
4066         /* Need to make the tx_cons update visible to tg3_start_xmit()
4067          * before checking for netif_queue_stopped().  Without the
4068          * memory barrier, there is a small possibility that tg3_start_xmit()
4069          * will miss it and cause the queue to be stopped forever.
4070          */
4071         smp_mb();
4072
4073         if (unlikely(netif_queue_stopped(tp->dev) &&
4074                      (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp)))) {
4075                 netif_tx_lock(tp->dev);
4076                 if (netif_queue_stopped(tp->dev) &&
4077                     (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp)))
4078                         netif_wake_queue(tp->dev);
4079                 netif_tx_unlock(tp->dev);
4080         }
4081 }
4082
4083 /* Returns size of skb allocated or < 0 on error.
4084  *
4085  * We only need to fill in the address because the other members
4086  * of the RX descriptor are invariant, see tg3_init_rings.
4087  *
4088  * Note the purposeful assymetry of cpu vs. chip accesses.  For
4089  * posting buffers we only dirty the first cache line of the RX
4090  * descriptor (containing the address).  Whereas for the RX status
4091  * buffers the cpu only reads the last cacheline of the RX descriptor
4092  * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
4093  */
4094 static int tg3_alloc_rx_skb(struct tg3 *tp, u32 opaque_key,
4095                             int src_idx, u32 dest_idx_unmasked)
4096 {
4097         struct tg3_rx_buffer_desc *desc;
4098         struct ring_info *map, *src_map;
4099         struct sk_buff *skb;
4100         dma_addr_t mapping;
4101         int skb_size, dest_idx;
4102
4103         src_map = NULL;
4104         switch (opaque_key) {
4105         case RXD_OPAQUE_RING_STD:
4106                 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
4107                 desc = &tp->rx_std[dest_idx];
4108                 map = &tp->rx_std_buffers[dest_idx];
4109                 if (src_idx >= 0)
4110                         src_map = &tp->rx_std_buffers[src_idx];
4111                 skb_size = tp->rx_pkt_buf_sz;
4112                 break;
4113
4114         case RXD_OPAQUE_RING_JUMBO:
4115                 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
4116                 desc = &tp->rx_jumbo[dest_idx];
4117                 map = &tp->rx_jumbo_buffers[dest_idx];
4118                 if (src_idx >= 0)
4119                         src_map = &tp->rx_jumbo_buffers[src_idx];
4120                 skb_size = RX_JUMBO_PKT_BUF_SZ;
4121                 break;
4122
4123         default:
4124                 return -EINVAL;
4125         }
4126
4127         /* Do not overwrite any of the map or rp information
4128          * until we are sure we can commit to a new buffer.
4129          *
4130          * Callers depend upon this behavior and assume that
4131          * we leave everything unchanged if we fail.
4132          */
4133         skb = netdev_alloc_skb(tp->dev, skb_size);
4134         if (skb == NULL)
4135                 return -ENOMEM;
4136
4137         skb_reserve(skb, tp->rx_offset);
4138
4139         mapping = pci_map_single(tp->pdev, skb->data,
4140                                  skb_size - tp->rx_offset,
4141                                  PCI_DMA_FROMDEVICE);
4142
4143         map->skb = skb;
4144         pci_unmap_addr_set(map, mapping, mapping);
4145
4146         if (src_map != NULL)
4147                 src_map->skb = NULL;
4148
4149         desc->addr_hi = ((u64)mapping >> 32);
4150         desc->addr_lo = ((u64)mapping & 0xffffffff);
4151
4152         return skb_size;
4153 }
4154
4155 /* We only need to move over in the address because the other
4156  * members of the RX descriptor are invariant.  See notes above
4157  * tg3_alloc_rx_skb for full details.
4158  */
4159 static void tg3_recycle_rx(struct tg3 *tp, u32 opaque_key,
4160                            int src_idx, u32 dest_idx_unmasked)
4161 {
4162         struct tg3_rx_buffer_desc *src_desc, *dest_desc;
4163         struct ring_info *src_map, *dest_map;
4164         int dest_idx;
4165
4166         switch (opaque_key) {
4167         case RXD_OPAQUE_RING_STD:
4168                 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
4169                 dest_desc = &tp->rx_std[dest_idx];
4170                 dest_map = &tp->rx_std_buffers[dest_idx];
4171                 src_desc = &tp->rx_std[src_idx];
4172                 src_map = &tp->rx_std_buffers[src_idx];
4173                 break;
4174
4175         case RXD_OPAQUE_RING_JUMBO:
4176                 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
4177                 dest_desc = &tp->rx_jumbo[dest_idx];
4178                 dest_map = &tp->rx_jumbo_buffers[dest_idx];
4179                 src_desc = &tp->rx_jumbo[src_idx];
4180                 src_map = &tp->rx_jumbo_buffers[src_idx];
4181                 break;
4182
4183         default:
4184                 return;
4185         }
4186
4187         dest_map->skb = src_map->skb;
4188         pci_unmap_addr_set(dest_map, mapping,
4189                            pci_unmap_addr(src_map, mapping));
4190         dest_desc->addr_hi = src_desc->addr_hi;
4191         dest_desc->addr_lo = src_desc->addr_lo;
4192
4193         src_map->skb = NULL;
4194 }
4195
4196 #if TG3_VLAN_TAG_USED
4197 static int tg3_vlan_rx(struct tg3 *tp, struct sk_buff *skb, u16 vlan_tag)
4198 {
4199         return vlan_hwaccel_receive_skb(skb, tp->vlgrp, vlan_tag);
4200 }
4201 #endif
4202
4203 /* The RX ring scheme is composed of multiple rings which post fresh
4204  * buffers to the chip, and one special ring the chip uses to report
4205  * status back to the host.
4206  *
4207  * The special ring reports the status of received packets to the
4208  * host.  The chip does not write into the original descriptor the
4209  * RX buffer was obtained from.  The chip simply takes the original
4210  * descriptor as provided by the host, updates the status and length
4211  * field, then writes this into the next status ring entry.
4212  *
4213  * Each ring the host uses to post buffers to the chip is described
4214  * by a TG3_BDINFO entry in the chips SRAM area.  When a packet arrives,
4215  * it is first placed into the on-chip ram.  When the packet's length
4216  * is known, it walks down the TG3_BDINFO entries to select the ring.
4217  * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
4218  * which is within the range of the new packet's length is chosen.
4219  *
4220  * The "separate ring for rx status" scheme may sound queer, but it makes
4221  * sense from a cache coherency perspective.  If only the host writes
4222  * to the buffer post rings, and only the chip writes to the rx status
4223  * rings, then cache lines never move beyond shared-modified state.
4224  * If both the host and chip were to write into the same ring, cache line
4225  * eviction could occur since both entities want it in an exclusive state.
4226  */
4227 static int tg3_rx(struct tg3 *tp, int budget)
4228 {
4229         u32 work_mask, rx_std_posted = 0;
4230         u32 sw_idx = tp->rx_rcb_ptr;
4231         u16 hw_idx;
4232         int received;
4233
4234         hw_idx = tp->hw_status->idx[0].rx_producer;
4235         /*
4236          * We need to order the read of hw_idx and the read of
4237          * the opaque cookie.
4238          */
4239         rmb();
4240         work_mask = 0;
4241         received = 0;
4242         while (sw_idx != hw_idx && budget > 0) {
4243                 struct tg3_rx_buffer_desc *desc = &tp->rx_rcb[sw_idx];
4244                 unsigned int len;
4245                 struct sk_buff *skb;
4246                 dma_addr_t dma_addr;
4247                 u32 opaque_key, desc_idx, *post_ptr;
4248
4249                 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
4250                 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
4251                 if (opaque_key == RXD_OPAQUE_RING_STD) {
4252                         dma_addr = pci_unmap_addr(&tp->rx_std_buffers[desc_idx],
4253                                                   mapping);
4254                         skb = tp->rx_std_buffers[desc_idx].skb;
4255                         post_ptr = &tp->rx_std_ptr;
4256                         rx_std_posted++;
4257                 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
4258                         dma_addr = pci_unmap_addr(&tp->rx_jumbo_buffers[desc_idx],
4259                                                   mapping);
4260                         skb = tp->rx_jumbo_buffers[desc_idx].skb;
4261                         post_ptr = &tp->rx_jumbo_ptr;
4262                 }
4263                 else {
4264                         goto next_pkt_nopost;
4265                 }
4266
4267                 work_mask |= opaque_key;
4268
4269                 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
4270                     (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
4271                 drop_it:
4272                         tg3_recycle_rx(tp, opaque_key,
4273                                        desc_idx, *post_ptr);
4274                 drop_it_no_recycle:
4275                         /* Other statistics kept track of by card. */
4276                         tp->net_stats.rx_dropped++;
4277                         goto next_pkt;
4278                 }
4279
4280                 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) -
4281                       ETH_FCS_LEN;
4282
4283                 if (len > RX_COPY_THRESHOLD
4284                         && tp->rx_offset == NET_IP_ALIGN
4285                         /* rx_offset will likely not equal NET_IP_ALIGN
4286                          * if this is a 5701 card running in PCI-X mode
4287                          * [see tg3_get_invariants()]
4288                          */
4289                 ) {
4290                         int skb_size;
4291
4292                         skb_size = tg3_alloc_rx_skb(tp, opaque_key,
4293                                                     desc_idx, *post_ptr);
4294                         if (skb_size < 0)
4295                                 goto drop_it;
4296
4297                         pci_unmap_single(tp->pdev, dma_addr,
4298                                          skb_size - tp->rx_offset,
4299                                          PCI_DMA_FROMDEVICE);
4300
4301                         skb_put(skb, len);
4302                 } else {
4303                         struct sk_buff *copy_skb;
4304
4305                         tg3_recycle_rx(tp, opaque_key,
4306                                        desc_idx, *post_ptr);
4307
4308                         copy_skb = netdev_alloc_skb(tp->dev,
4309                                                     len + TG3_RAW_IP_ALIGN);
4310                         if (copy_skb == NULL)
4311                                 goto drop_it_no_recycle;
4312
4313                         skb_reserve(copy_skb, TG3_RAW_IP_ALIGN);
4314                         skb_put(copy_skb, len);
4315                         pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
4316                         skb_copy_from_linear_data(skb, copy_skb->data, len);
4317                         pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
4318
4319                         /* We'll reuse the original ring buffer. */
4320                         skb = copy_skb;
4321                 }
4322
4323                 if ((tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) &&
4324                     (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
4325                     (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
4326                       >> RXD_TCPCSUM_SHIFT) == 0xffff))
4327                         skb->ip_summed = CHECKSUM_UNNECESSARY;
4328                 else
4329                         skb->ip_summed = CHECKSUM_NONE;
4330
4331                 skb->protocol = eth_type_trans(skb, tp->dev);
4332 #if TG3_VLAN_TAG_USED
4333                 if (tp->vlgrp != NULL &&
4334                     desc->type_flags & RXD_FLAG_VLAN) {
4335                         tg3_vlan_rx(tp, skb,
4336                                     desc->err_vlan & RXD_VLAN_MASK);
4337                 } else
4338 #endif
4339                         netif_receive_skb(skb);
4340
4341                 received++;
4342                 budget--;
4343
4344 next_pkt:
4345                 (*post_ptr)++;
4346
4347                 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
4348                         u32 idx = *post_ptr % TG3_RX_RING_SIZE;
4349
4350                         tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX +
4351                                      TG3_64BIT_REG_LOW, idx);
4352                         work_mask &= ~RXD_OPAQUE_RING_STD;
4353                         rx_std_posted = 0;
4354                 }
4355 next_pkt_nopost:
4356                 sw_idx++;
4357                 sw_idx &= (TG3_RX_RCB_RING_SIZE(tp) - 1);
4358
4359                 /* Refresh hw_idx to see if there is new work */
4360                 if (sw_idx == hw_idx) {
4361                         hw_idx = tp->hw_status->idx[0].rx_producer;
4362                         rmb();
4363                 }
4364         }
4365
4366         /* ACK the status ring. */
4367         tp->rx_rcb_ptr = sw_idx;
4368         tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW, sw_idx);
4369
4370         /* Refill RX ring(s). */
4371         if (work_mask & RXD_OPAQUE_RING_STD) {
4372                 sw_idx = tp->rx_std_ptr % TG3_RX_RING_SIZE;
4373                 tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
4374                              sw_idx);
4375         }
4376         if (work_mask & RXD_OPAQUE_RING_JUMBO) {
4377                 sw_idx = tp->rx_jumbo_ptr % TG3_RX_JUMBO_RING_SIZE;
4378                 tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
4379                              sw_idx);
4380         }
4381         mmiowb();
4382
4383         return received;
4384 }
4385
4386 static int tg3_poll_work(struct tg3 *tp, int work_done, int budget)
4387 {
4388         struct tg3_hw_status *sblk = tp->hw_status;
4389
4390         /* handle link change and other phy events */
4391         if (!(tp->tg3_flags &
4392               (TG3_FLAG_USE_LINKCHG_REG |
4393                TG3_FLAG_POLL_SERDES))) {
4394                 if (sblk->status & SD_STATUS_LINK_CHG) {
4395                         sblk->status = SD_STATUS_UPDATED |
4396                                 (sblk->status & ~SD_STATUS_LINK_CHG);
4397                         spin_lock(&tp->lock);
4398                         if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
4399                                 tw32_f(MAC_STATUS,
4400                                      (MAC_STATUS_SYNC_CHANGED |
4401                                       MAC_STATUS_CFG_CHANGED |
4402                                       MAC_STATUS_MI_COMPLETION |
4403                                       MAC_STATUS_LNKSTATE_CHANGED));
4404                                 udelay(40);
4405                         } else
4406                                 tg3_setup_phy(tp, 0);
4407                         spin_unlock(&tp->lock);
4408                 }
4409         }
4410
4411         /* run TX completion thread */
4412         if (sblk->idx[0].tx_consumer != tp->tx_cons) {
4413                 tg3_tx(tp);
4414                 if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING))
4415                         return work_done;
4416         }
4417
4418         /* run RX thread, within the bounds set by NAPI.
4419          * All RX "locking" is done by ensuring outside
4420          * code synchronizes with tg3->napi.poll()
4421          */
4422         if (sblk->idx[0].rx_producer != tp->rx_rcb_ptr)
4423                 work_done += tg3_rx(tp, budget - work_done);
4424
4425         return work_done;
4426 }
4427
4428 static int tg3_poll(struct napi_struct *napi, int budget)
4429 {
4430         struct tg3 *tp = container_of(napi, struct tg3, napi);
4431         int work_done = 0;
4432         struct tg3_hw_status *sblk = tp->hw_status;
4433
4434         while (1) {
4435                 work_done = tg3_poll_work(tp, work_done, budget);
4436
4437                 if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING))
4438                         goto tx_recovery;
4439
4440                 if (unlikely(work_done >= budget))
4441                         break;
4442
4443                 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) {
4444                         /* tp->last_tag is used in tg3_restart_ints() below
4445                          * to tell the hw how much work has been processed,
4446                          * so we must read it before checking for more work.
4447                          */
4448                         tp->last_tag = sblk->status_tag;
4449                         rmb();
4450                 } else
4451                         sblk->status &= ~SD_STATUS_UPDATED;
4452
4453                 if (likely(!tg3_has_work(tp))) {
4454                         netif_rx_complete(napi);
4455                         tg3_restart_ints(tp);
4456                         break;
4457                 }
4458         }
4459
4460         return work_done;
4461
4462 tx_recovery:
4463         /* work_done is guaranteed to be less than budget. */
4464         netif_rx_complete(napi);
4465         schedule_work(&tp->reset_task);
4466         return work_done;
4467 }
4468
4469 static void tg3_irq_quiesce(struct tg3 *tp)
4470 {
4471         BUG_ON(tp->irq_sync);
4472
4473         tp->irq_sync = 1;
4474         smp_mb();
4475
4476         synchronize_irq(tp->pdev->irq);
4477 }
4478
4479 static inline int tg3_irq_sync(struct tg3 *tp)
4480 {
4481         return tp->irq_sync;
4482 }
4483
4484 /* Fully shutdown all tg3 driver activity elsewhere in the system.
4485  * If irq_sync is non-zero, then the IRQ handler must be synchronized
4486  * with as well.  Most of the time, this is not necessary except when
4487  * shutting down the device.
4488  */
4489 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
4490 {
4491         spin_lock_bh(&tp->lock);
4492         if (irq_sync)
4493                 tg3_irq_quiesce(tp);
4494 }
4495
4496 static inline void tg3_full_unlock(struct tg3 *tp)
4497 {
4498         spin_unlock_bh(&tp->lock);
4499 }
4500
4501 /* One-shot MSI handler - Chip automatically disables interrupt
4502  * after sending MSI so driver doesn't have to do it.
4503  */
4504 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
4505 {
4506         struct net_device *dev = dev_id;
4507         struct tg3 *tp = netdev_priv(dev);
4508
4509         prefetch(tp->hw_status);
4510         prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
4511
4512         if (likely(!tg3_irq_sync(tp)))
4513                 netif_rx_schedule(&tp->napi);
4514
4515         return IRQ_HANDLED;
4516 }
4517
4518 /* MSI ISR - No need to check for interrupt sharing and no need to
4519  * flush status block and interrupt mailbox. PCI ordering rules
4520  * guarantee that MSI will arrive after the status block.
4521  */
4522 static irqreturn_t tg3_msi(int irq, void *dev_id)
4523 {
4524         struct net_device *dev = dev_id;
4525         struct tg3 *tp = netdev_priv(dev);
4526
4527         prefetch(tp->hw_status);
4528         prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
4529         /*
4530          * Writing any value to intr-mbox-0 clears PCI INTA# and
4531          * chip-internal interrupt pending events.
4532          * Writing non-zero to intr-mbox-0 additional tells the
4533          * NIC to stop sending us irqs, engaging "in-intr-handler"
4534          * event coalescing.
4535          */
4536         tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
4537         if (likely(!tg3_irq_sync(tp)))
4538                 netif_rx_schedule(&tp->napi);
4539
4540         return IRQ_RETVAL(1);
4541 }
4542
4543 static irqreturn_t tg3_interrupt(int irq, void *dev_id)
4544 {
4545         struct net_device *dev = dev_id;
4546         struct tg3 *tp = netdev_priv(dev);
4547         struct tg3_hw_status *sblk = tp->hw_status;
4548         unsigned int handled = 1;
4549
4550         /* In INTx mode, it is possible for the interrupt to arrive at
4551          * the CPU before the status block posted prior to the interrupt.
4552          * Reading the PCI State register will confirm whether the
4553          * interrupt is ours and will flush the status block.
4554          */
4555         if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
4556                 if ((tp->tg3_flags & TG3_FLAG_CHIP_RESETTING) ||
4557                     (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
4558                         handled = 0;
4559                         goto out;
4560                 }
4561         }
4562
4563         /*
4564          * Writing any value to intr-mbox-0 clears PCI INTA# and
4565          * chip-internal interrupt pending events.
4566          * Writing non-zero to intr-mbox-0 additional tells the
4567          * NIC to stop sending us irqs, engaging "in-intr-handler"
4568          * event coalescing.
4569          *
4570          * Flush the mailbox to de-assert the IRQ immediately to prevent
4571          * spurious interrupts.  The flush impacts performance but
4572          * excessive spurious interrupts can be worse in some cases.
4573          */
4574         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
4575         if (tg3_irq_sync(tp))
4576                 goto out;
4577         sblk->status &= ~SD_STATUS_UPDATED;
4578         if (likely(tg3_has_work(tp))) {
4579                 prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
4580                 netif_rx_schedule(&tp->napi);
4581         } else {
4582                 /* No work, shared interrupt perhaps?  re-enable
4583                  * interrupts, and flush that PCI write
4584                  */
4585                 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
4586                                0x00000000);
4587         }
4588 out:
4589         return IRQ_RETVAL(handled);
4590 }
4591
4592 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
4593 {
4594         struct net_device *dev = dev_id;
4595         struct tg3 *tp = netdev_priv(dev);
4596         struct tg3_hw_status *sblk = tp->hw_status;
4597         unsigned int handled = 1;
4598
4599         /* In INTx mode, it is possible for the interrupt to arrive at
4600          * the CPU before the status block posted prior to the interrupt.
4601          * Reading the PCI State register will confirm whether the
4602          * interrupt is ours and will flush the status block.
4603          */
4604         if (unlikely(sblk->status_tag == tp->last_tag)) {
4605                 if ((tp->tg3_flags & TG3_FLAG_CHIP_RESETTING) ||
4606                     (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
4607                         handled = 0;
4608                         goto out;
4609                 }
4610         }
4611
4612         /*
4613          * writing any value to intr-mbox-0 clears PCI INTA# and
4614          * chip-internal interrupt pending events.
4615          * writing non-zero to intr-mbox-0 additional tells the
4616          * NIC to stop sending us irqs, engaging "in-intr-handler"
4617          * event coalescing.
4618          *
4619          * Flush the mailbox to de-assert the IRQ immediately to prevent
4620          * spurious interrupts.  The flush impacts performance but
4621          * excessive spurious interrupts can be worse in some cases.
4622          */
4623         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
4624         if (tg3_irq_sync(tp))
4625                 goto out;
4626         if (netif_rx_schedule_prep(&tp->napi)) {
4627                 prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
4628                 /* Update last_tag to mark that this status has been
4629                  * seen. Because interrupt may be shared, we may be
4630                  * racing with tg3_poll(), so only update last_tag
4631                  * if tg3_poll() is not scheduled.
4632                  */
4633                 tp->last_tag = sblk->status_tag;
4634                 __netif_rx_schedule(&tp->napi);
4635         }
4636 out:
4637         return IRQ_RETVAL(handled);
4638 }
4639
4640 /* ISR for interrupt test */
4641 static irqreturn_t tg3_test_isr(int irq, void *dev_id)
4642 {
4643         struct net_device *dev = dev_id;
4644         struct tg3 *tp = netdev_priv(dev);
4645         struct tg3_hw_status *sblk = tp->hw_status;
4646
4647         if ((sblk->status & SD_STATUS_UPDATED) ||
4648             !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
4649                 tg3_disable_ints(tp);
4650                 return IRQ_RETVAL(1);
4651         }
4652         return IRQ_RETVAL(0);
4653 }
4654
4655 static int tg3_init_hw(struct tg3 *, int);
4656 static int tg3_halt(struct tg3 *, int, int);
4657
4658 /* Restart hardware after configuration changes, self-test, etc.
4659  * Invoked with tp->lock held.
4660  */
4661 static int tg3_restart_hw(struct tg3 *tp, int reset_phy)
4662         __releases(tp->lock)
4663         __acquires(tp->lock)
4664 {
4665         int err;
4666
4667         err = tg3_init_hw(tp, reset_phy);
4668         if (err) {
4669                 printk(KERN_ERR PFX "%s: Failed to re-initialize device, "
4670                        "aborting.\n", tp->dev->name);
4671                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
4672                 tg3_full_unlock(tp);
4673                 del_timer_sync(&tp->timer);
4674                 tp->irq_sync = 0;
4675                 napi_enable(&tp->napi);
4676                 dev_close(tp->dev);
4677                 tg3_full_lock(tp, 0);
4678         }
4679         return err;
4680 }
4681
4682 #ifdef CONFIG_NET_POLL_CONTROLLER
4683 static void tg3_poll_controller(struct net_device *dev)
4684 {
4685         struct tg3 *tp = netdev_priv(dev);
4686
4687         tg3_interrupt(tp->pdev->irq, dev);
4688 }
4689 #endif
4690
4691 static void tg3_reset_task(struct work_struct *work)
4692 {
4693         struct tg3 *tp = container_of(work, struct tg3, reset_task);
4694         int err;
4695         unsigned int restart_timer;
4696
4697         tg3_full_lock(tp, 0);
4698
4699         if (!netif_running(tp->dev)) {
4700                 tg3_full_unlock(tp);
4701                 return;
4702         }
4703
4704         tg3_full_unlock(tp);
4705
4706         tg3_phy_stop(tp);
4707
4708         tg3_netif_stop(tp);
4709
4710         tg3_full_lock(tp, 1);
4711
4712         restart_timer = tp->tg3_flags2 & TG3_FLG2_RESTART_TIMER;
4713         tp->tg3_flags2 &= ~TG3_FLG2_RESTART_TIMER;
4714
4715         if (tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING) {
4716                 tp->write32_tx_mbox = tg3_write32_tx_mbox;
4717                 tp->write32_rx_mbox = tg3_write_flush_reg32;
4718                 tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;
4719                 tp->tg3_flags &= ~TG3_FLAG_TX_RECOVERY_PENDING;
4720         }
4721
4722         tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
4723         err = tg3_init_hw(tp, 1);
4724         if (err)
4725                 goto out;
4726
4727         tg3_netif_start(tp);
4728
4729         if (restart_timer)
4730                 mod_timer(&tp->timer, jiffies + 1);
4731
4732 out:
4733         tg3_full_unlock(tp);
4734
4735         if (!err)
4736                 tg3_phy_start(tp);
4737 }
4738
4739 static void tg3_dump_short_state(struct tg3 *tp)
4740 {
4741         printk(KERN_ERR PFX "DEBUG: MAC_TX_STATUS[%08x] MAC_RX_STATUS[%08x]\n",
4742                tr32(MAC_TX_STATUS), tr32(MAC_RX_STATUS));
4743         printk(KERN_ERR PFX "DEBUG: RDMAC_STATUS[%08x] WDMAC_STATUS[%08x]\n",
4744                tr32(RDMAC_STATUS), tr32(WDMAC_STATUS));
4745 }
4746
4747 static void tg3_tx_timeout(struct net_device *dev)
4748 {
4749         struct tg3 *tp = netdev_priv(dev);
4750
4751         if (netif_msg_tx_err(tp)) {
4752                 printk(KERN_ERR PFX "%s: transmit timed out, resetting\n",
4753                        dev->name);
4754                 tg3_dump_short_state(tp);
4755         }
4756
4757         schedule_work(&tp->reset_task);
4758 }
4759
4760 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
4761 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
4762 {
4763         u32 base = (u32) mapping & 0xffffffff;
4764
4765         return ((base > 0xffffdcc0) &&
4766                 (base + len + 8 < base));
4767 }
4768
4769 /* Test for DMA addresses > 40-bit */
4770 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
4771                                           int len)
4772 {
4773 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
4774         if (tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG)
4775                 return (((u64) mapping + len) > DMA_40BIT_MASK);
4776         return 0;
4777 #else
4778         return 0;
4779 #endif
4780 }
4781
4782 static void tg3_set_txd(struct tg3 *, int, dma_addr_t, int, u32, u32);
4783
4784 /* Workaround 4GB and 40-bit hardware DMA bugs. */
4785 static int tigon3_dma_hwbug_workaround(struct tg3 *tp, struct sk_buff *skb,
4786                                        u32 last_plus_one, u32 *start,
4787                                        u32 base_flags, u32 mss)
4788 {
4789         struct sk_buff *new_skb;
4790         dma_addr_t new_addr = 0;
4791         u32 entry = *start;
4792         int i, ret = 0;
4793
4794         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
4795                 new_skb = skb_copy(skb, GFP_ATOMIC);
4796         else {
4797                 int more_headroom = 4 - ((unsigned long)skb->data & 3);
4798
4799                 new_skb = skb_copy_expand(skb,
4800                                           skb_headroom(skb) + more_headroom,
4801                                           skb_tailroom(skb), GFP_ATOMIC);
4802         }
4803
4804         if (!new_skb) {
4805                 ret = -1;
4806         } else {
4807                 /* New SKB is guaranteed to be linear. */
4808                 entry = *start;
4809                 ret = skb_dma_map(&tp->pdev->dev, new_skb, DMA_TO_DEVICE);
4810                 new_addr = skb_shinfo(new_skb)->dma_maps[0];
4811
4812                 /* Make sure new skb does not cross any 4G boundaries.
4813                  * Drop the packet if it does.
4814                  */
4815                 if (ret || tg3_4g_overflow_test(new_addr, new_skb->len)) {
4816                         if (!ret)
4817                                 skb_dma_unmap(&tp->pdev->dev, new_skb,
4818                                               DMA_TO_DEVICE);
4819                         ret = -1;
4820                         dev_kfree_skb(new_skb);
4821                         new_skb = NULL;
4822                 } else {
4823                         tg3_set_txd(tp, entry, new_addr, new_skb->len,
4824                                     base_flags, 1 | (mss << 1));
4825                         *start = NEXT_TX(entry);
4826                 }
4827         }
4828
4829         /* Now clean up the sw ring entries. */
4830         i = 0;
4831         while (entry != last_plus_one) {
4832                 if (i == 0) {
4833                         tp->tx_buffers[entry].skb = new_skb;
4834                 } else {
4835                         tp->tx_buffers[entry].skb = NULL;
4836                 }
4837                 entry = NEXT_TX(entry);
4838                 i++;
4839         }
4840
4841         skb_dma_unmap(&tp->pdev->dev, skb, DMA_TO_DEVICE);
4842         dev_kfree_skb(skb);
4843
4844         return ret;
4845 }
4846
4847 static void tg3_set_txd(struct tg3 *tp, int entry,
4848                         dma_addr_t mapping, int len, u32 flags,
4849                         u32 mss_and_is_end)
4850 {
4851         struct tg3_tx_buffer_desc *txd = &tp->tx_ring[entry];
4852         int is_end = (mss_and_is_end & 0x1);
4853         u32 mss = (mss_and_is_end >> 1);
4854         u32 vlan_tag = 0;
4855
4856         if (is_end)
4857                 flags |= TXD_FLAG_END;
4858         if (flags & TXD_FLAG_VLAN) {
4859                 vlan_tag = flags >> 16;
4860                 flags &= 0xffff;
4861         }
4862         vlan_tag |= (mss << TXD_MSS_SHIFT);
4863
4864         txd->addr_hi = ((u64) mapping >> 32);
4865         txd->addr_lo = ((u64) mapping & 0xffffffff);
4866         txd->len_flags = (len << TXD_LEN_SHIFT) | flags;
4867         txd->vlan_tag = vlan_tag << TXD_VLAN_TAG_SHIFT;
4868 }
4869
4870 /* hard_start_xmit for devices that don't have any bugs and
4871  * support TG3_FLG2_HW_TSO_2 only.
4872  */
4873 static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
4874 {
4875         struct tg3 *tp = netdev_priv(dev);
4876         u32 len, entry, base_flags, mss;
4877         struct skb_shared_info *sp;
4878         dma_addr_t mapping;
4879
4880         len = skb_headlen(skb);
4881
4882         /* We are running in BH disabled context with netif_tx_lock
4883          * and TX reclaim runs via tp->napi.poll inside of a software
4884          * interrupt.  Furthermore, IRQ processing runs lockless so we have
4885          * no IRQ context deadlocks to worry about either.  Rejoice!
4886          */
4887         if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->nr_frags + 1))) {
4888                 if (!netif_queue_stopped(dev)) {
4889                         netif_stop_queue(dev);
4890
4891                         /* This is a hard error, log it. */
4892                         printk(KERN_ERR PFX "%s: BUG! Tx Ring full when "
4893                                "queue awake!\n", dev->name);
4894                 }
4895                 return NETDEV_TX_BUSY;
4896         }
4897
4898         entry = tp->tx_prod;
4899         base_flags = 0;
4900         mss = 0;
4901         if ((mss = skb_shinfo(skb)->gso_size) != 0) {
4902                 int tcp_opt_len, ip_tcp_len;
4903
4904                 if (skb_header_cloned(skb) &&
4905                     pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
4906                         dev_kfree_skb(skb);
4907                         goto out_unlock;
4908                 }
4909
4910                 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
4911                         mss |= (skb_headlen(skb) - ETH_HLEN) << 9;
4912                 else {
4913                         struct iphdr *iph = ip_hdr(skb);
4914
4915                         tcp_opt_len = tcp_optlen(skb);
4916                         ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
4917
4918                         iph->check = 0;
4919                         iph->tot_len = htons(mss + ip_tcp_len + tcp_opt_len);
4920                         mss |= (ip_tcp_len + tcp_opt_len) << 9;
4921                 }
4922
4923                 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
4924                                TXD_FLAG_CPU_POST_DMA);
4925
4926                 tcp_hdr(skb)->check = 0;
4927
4928         }
4929         else if (skb->ip_summed == CHECKSUM_PARTIAL)
4930                 base_flags |= TXD_FLAG_TCPUDP_CSUM;
4931 #if TG3_VLAN_TAG_USED
4932         if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
4933                 base_flags |= (TXD_FLAG_VLAN |
4934                                (vlan_tx_tag_get(skb) << 16));
4935 #endif
4936
4937         if (skb_dma_map(&tp->pdev->dev, skb, DMA_TO_DEVICE)) {
4938                 dev_kfree_skb(skb);
4939                 goto out_unlock;
4940         }
4941
4942         sp = skb_shinfo(skb);
4943
4944         mapping = sp->dma_maps[0];
4945
4946         tp->tx_buffers[entry].skb = skb;
4947
4948         tg3_set_txd(tp, entry, mapping, len, base_flags,
4949                     (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
4950
4951         entry = NEXT_TX(entry);
4952
4953         /* Now loop through additional data fragments, and queue them. */
4954         if (skb_shinfo(skb)->nr_frags > 0) {
4955                 unsigned int i, last;
4956
4957                 last = skb_shinfo(skb)->nr_frags - 1;
4958                 for (i = 0; i <= last; i++) {
4959                         skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4960
4961                         len = frag->size;
4962                         mapping = sp->dma_maps[i + 1];
4963                         tp->tx_buffers[entry].skb = NULL;
4964
4965                         tg3_set_txd(tp, entry, mapping, len,
4966                                     base_flags, (i == last) | (mss << 1));
4967
4968                         entry = NEXT_TX(entry);
4969                 }
4970         }
4971
4972         /* Packets are ready, update Tx producer idx local and on card. */
4973         tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry);
4974
4975         tp->tx_prod = entry;
4976         if (unlikely(tg3_tx_avail(tp) <= (MAX_SKB_FRAGS + 1))) {
4977                 netif_stop_queue(dev);
4978                 if (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp))
4979                         netif_wake_queue(tp->dev);
4980         }
4981
4982 out_unlock:
4983         mmiowb();
4984
4985         dev->trans_start = jiffies;
4986
4987         return NETDEV_TX_OK;
4988 }
4989
4990 static int tg3_start_xmit_dma_bug(struct sk_buff *, struct net_device *);
4991
4992 /* Use GSO to workaround a rare TSO bug that may be triggered when the
4993  * TSO header is greater than 80 bytes.
4994  */
4995 static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
4996 {
4997         struct sk_buff *segs, *nskb;
4998
4999         /* Estimate the number of fragments in the worst case */
5000         if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->gso_segs * 3))) {
5001                 netif_stop_queue(tp->dev);
5002                 if (tg3_tx_avail(tp) <= (skb_shinfo(skb)->gso_segs * 3))
5003                         return NETDEV_TX_BUSY;
5004
5005                 netif_wake_queue(tp->dev);
5006         }
5007
5008         segs = skb_gso_segment(skb, tp->dev->features & ~NETIF_F_TSO);
5009         if (IS_ERR(segs))
5010                 goto tg3_tso_bug_end;
5011
5012         do {
5013                 nskb = segs;
5014                 segs = segs->next;
5015                 nskb->next = NULL;
5016                 tg3_start_xmit_dma_bug(nskb, tp->dev);
5017         } while (segs);
5018
5019 tg3_tso_bug_end:
5020         dev_kfree_skb(skb);
5021
5022         return NETDEV_TX_OK;
5023 }
5024
5025 /* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
5026  * support TG3_FLG2_HW_TSO_1 or firmware TSO only.
5027  */
5028 static int tg3_start_xmit_dma_bug(struct sk_buff *skb, struct net_device *dev)
5029 {
5030         struct tg3 *tp = netdev_priv(dev);
5031         u32 len, entry, base_flags, mss;
5032         struct skb_shared_info *sp;
5033         int would_hit_hwbug;
5034         dma_addr_t mapping;
5035
5036         len = skb_headlen(skb);
5037
5038         /* We are running in BH disabled context with netif_tx_lock
5039          * and TX reclaim runs via tp->napi.poll inside of a software
5040          * interrupt.  Furthermore, IRQ processing runs lockless so we have
5041          * no IRQ context deadlocks to worry about either.  Rejoice!
5042          */
5043         if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->nr_frags + 1))) {
5044                 if (!netif_queue_stopped(dev)) {
5045                         netif_stop_queue(dev);
5046
5047                         /* This is a hard error, log it. */
5048                         printk(KERN_ERR PFX "%s: BUG! Tx Ring full when "
5049                                "queue awake!\n", dev->name);
5050                 }
5051                 return NETDEV_TX_BUSY;
5052         }
5053
5054         entry = tp->tx_prod;
5055         base_flags = 0;
5056         if (skb->ip_summed == CHECKSUM_PARTIAL)
5057                 base_flags |= TXD_FLAG_TCPUDP_CSUM;
5058         mss = 0;
5059         if ((mss = skb_shinfo(skb)->gso_size) != 0) {
5060                 struct iphdr *iph;
5061                 int tcp_opt_len, ip_tcp_len, hdr_len;
5062
5063                 if (skb_header_cloned(skb) &&
5064                     pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
5065                         dev_kfree_skb(skb);
5066                         goto out_unlock;
5067                 }
5068
5069                 tcp_opt_len = tcp_optlen(skb);
5070                 ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
5071
5072                 hdr_len = ip_tcp_len + tcp_opt_len;
5073                 if (unlikely((ETH_HLEN + hdr_len) > 80) &&
5074                              (tp->tg3_flags2 & TG3_FLG2_TSO_BUG))
5075                         return (tg3_tso_bug(tp, skb));
5076
5077                 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
5078                                TXD_FLAG_CPU_POST_DMA);
5079
5080                 iph = ip_hdr(skb);
5081                 iph->check = 0;
5082                 iph->tot_len = htons(mss + hdr_len);
5083                 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
5084                         tcp_hdr(skb)->check = 0;
5085                         base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
5086                 } else
5087                         tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
5088                                                                  iph->daddr, 0,
5089                                                                  IPPROTO_TCP,
5090                                                                  0);
5091
5092                 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO) ||
5093                     (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)) {
5094                         if (tcp_opt_len || iph->ihl > 5) {
5095                                 int tsflags;
5096
5097                                 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
5098                                 mss |= (tsflags << 11);
5099                         }
5100                 } else {
5101                         if (tcp_opt_len || iph->ihl > 5) {
5102                                 int tsflags;
5103
5104                                 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
5105                                 base_flags |= tsflags << 12;
5106                         }
5107                 }
5108         }
5109 #if TG3_VLAN_TAG_USED
5110         if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
5111                 base_flags |= (TXD_FLAG_VLAN |
5112                                (vlan_tx_tag_get(skb) << 16));
5113 #endif
5114
5115         if (skb_dma_map(&tp->pdev->dev, skb, DMA_TO_DEVICE)) {
5116                 dev_kfree_skb(skb);
5117                 goto out_unlock;
5118         }
5119
5120         sp = skb_shinfo(skb);
5121
5122         mapping = sp->dma_maps[0];
5123
5124         tp->tx_buffers[entry].skb = skb;
5125
5126         would_hit_hwbug = 0;
5127
5128         if (tp->tg3_flags3 & TG3_FLG3_5701_DMA_BUG)
5129                 would_hit_hwbug = 1;
5130         else if (tg3_4g_overflow_test(mapping, len))
5131                 would_hit_hwbug = 1;
5132
5133         tg3_set_txd(tp, entry, mapping, len, base_flags,
5134                     (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
5135
5136         entry = NEXT_TX(entry);
5137
5138         /* Now loop through additional data fragments, and queue them. */
5139         if (skb_shinfo(skb)->nr_frags > 0) {
5140                 unsigned int i, last;
5141
5142                 last = skb_shinfo(skb)->nr_frags - 1;
5143                 for (i = 0; i <= last; i++) {
5144                         skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
5145
5146                         len = frag->size;
5147                         mapping = sp->dma_maps[i + 1];
5148
5149                         tp->tx_buffers[entry].skb = NULL;
5150
5151                         if (tg3_4g_overflow_test(mapping, len))
5152                                 would_hit_hwbug = 1;
5153
5154                         if (tg3_40bit_overflow_test(tp, mapping, len))
5155                                 would_hit_hwbug = 1;
5156
5157                         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
5158                                 tg3_set_txd(tp, entry, mapping, len,
5159                                             base_flags, (i == last)|(mss << 1));
5160                         else
5161                                 tg3_set_txd(tp, entry, mapping, len,
5162                                             base_flags, (i == last));
5163
5164                         entry = NEXT_TX(entry);
5165                 }
5166         }
5167
5168         if (would_hit_hwbug) {
5169                 u32 last_plus_one = entry;
5170                 u32 start;
5171
5172                 start = entry - 1 - skb_shinfo(skb)->nr_frags;
5173                 start &= (TG3_TX_RING_SIZE - 1);
5174
5175                 /* If the workaround fails due to memory/mapping
5176                  * failure, silently drop this packet.
5177                  */
5178                 if (tigon3_dma_hwbug_workaround(tp, skb, last_plus_one,
5179                                                 &start, base_flags, mss))
5180                         goto out_unlock;
5181
5182                 entry = start;
5183         }
5184
5185         /* Packets are ready, update Tx producer idx local and on card. */
5186         tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry);
5187
5188         tp->tx_prod = entry;
5189         if (unlikely(tg3_tx_avail(tp) <= (MAX_SKB_FRAGS + 1))) {
5190                 netif_stop_queue(dev);
5191                 if (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp))
5192                         netif_wake_queue(tp->dev);
5193         }
5194
5195 out_unlock:
5196         mmiowb();
5197
5198         dev->trans_start = jiffies;
5199
5200         return NETDEV_TX_OK;
5201 }
5202
5203 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
5204                                int new_mtu)
5205 {
5206         dev->mtu = new_mtu;
5207
5208         if (new_mtu > ETH_DATA_LEN) {
5209                 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
5210                         tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
5211                         ethtool_op_set_tso(dev, 0);
5212                 }
5213                 else
5214                         tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE;
5215         } else {
5216                 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
5217                         tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
5218                 tp->tg3_flags &= ~TG3_FLAG_JUMBO_RING_ENABLE;
5219         }
5220 }
5221
5222 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
5223 {
5224         struct tg3 *tp = netdev_priv(dev);
5225         int err;
5226
5227         if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
5228                 return -EINVAL;
5229
5230         if (!netif_running(dev)) {
5231                 /* We'll just catch it later when the
5232                  * device is up'd.
5233                  */
5234                 tg3_set_mtu(dev, tp, new_mtu);
5235                 return 0;
5236         }
5237
5238         tg3_phy_stop(tp);
5239
5240         tg3_netif_stop(tp);
5241
5242         tg3_full_lock(tp, 1);
5243
5244         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
5245
5246         tg3_set_mtu(dev, tp, new_mtu);
5247
5248         err = tg3_restart_hw(tp, 0);
5249
5250         if (!err)
5251                 tg3_netif_start(tp);
5252
5253         tg3_full_unlock(tp);
5254
5255         if (!err)
5256                 tg3_phy_start(tp);
5257
5258         return err;
5259 }
5260
5261 /* Free up pending packets in all rx/tx rings.
5262  *
5263  * The chip has been shut down and the driver detached from
5264  * the networking, so no interrupts or new tx packets will
5265  * end up in the driver.  tp->{tx,}lock is not held and we are not
5266  * in an interrupt context and thus may sleep.
5267  */
5268 static void tg3_free_rings(struct tg3 *tp)
5269 {
5270         struct ring_info *rxp;
5271         int i;
5272
5273         for (i = 0; i < TG3_RX_RING_SIZE; i++) {
5274                 rxp = &tp->rx_std_buffers[i];
5275
5276                 if (rxp->skb == NULL)
5277                         continue;
5278                 pci_unmap_single(tp->pdev,
5279                                  pci_unmap_addr(rxp, mapping),
5280                                  tp->rx_pkt_buf_sz - tp->rx_offset,
5281                                  PCI_DMA_FROMDEVICE);
5282                 dev_kfree_skb_any(rxp->skb);
5283                 rxp->skb = NULL;
5284         }
5285
5286         for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
5287                 rxp = &tp->rx_jumbo_buffers[i];
5288
5289                 if (rxp->skb == NULL)
5290                         continue;
5291                 pci_unmap_single(tp->pdev,
5292                                  pci_unmap_addr(rxp, mapping),
5293                                  RX_JUMBO_PKT_BUF_SZ - tp->rx_offset,
5294                                  PCI_DMA_FROMDEVICE);
5295                 dev_kfree_skb_any(rxp->skb);
5296                 rxp->skb = NULL;
5297         }
5298
5299         for (i = 0; i < TG3_TX_RING_SIZE; ) {
5300                 struct tx_ring_info *txp;
5301                 struct sk_buff *skb;
5302
5303                 txp = &tp->tx_buffers[i];
5304                 skb = txp->skb;
5305
5306                 if (skb == NULL) {
5307                         i++;
5308                         continue;
5309                 }
5310
5311                 skb_dma_unmap(&tp->pdev->dev, skb, DMA_TO_DEVICE);
5312
5313                 txp->skb = NULL;
5314
5315                 i += skb_shinfo(skb)->nr_frags + 1;
5316
5317                 dev_kfree_skb_any(skb);
5318         }
5319 }
5320
5321 /* Initialize tx/rx rings for packet processing.
5322  *
5323  * The chip has been shut down and the driver detached from
5324  * the networking, so no interrupts or new tx packets will
5325  * end up in the driver.  tp->{tx,}lock are held and thus
5326  * we may not sleep.
5327  */
5328 static int tg3_init_rings(struct tg3 *tp)
5329 {
5330         u32 i;
5331
5332         /* Free up all the SKBs. */
5333         tg3_free_rings(tp);
5334
5335         /* Zero out all descriptors. */
5336         memset(tp->rx_std, 0, TG3_RX_RING_BYTES);
5337         memset(tp->rx_jumbo, 0, TG3_RX_JUMBO_RING_BYTES);
5338         memset(tp->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
5339         memset(tp->tx_ring, 0, TG3_TX_RING_BYTES);
5340
5341         tp->rx_pkt_buf_sz = RX_PKT_BUF_SZ;
5342         if ((tp->tg3_flags2 & TG3_FLG2_5780_CLASS) &&
5343             (tp->dev->mtu > ETH_DATA_LEN))
5344                 tp->rx_pkt_buf_sz = RX_JUMBO_PKT_BUF_SZ;
5345
5346         /* Initialize invariants of the rings, we only set this
5347          * stuff once.  This works because the card does not
5348          * write into the rx buffer posting rings.
5349          */
5350         for (i = 0; i < TG3_RX_RING_SIZE; i++) {
5351                 struct tg3_rx_buffer_desc *rxd;
5352
5353                 rxd = &tp->rx_std[i];
5354                 rxd->idx_len = (tp->rx_pkt_buf_sz - tp->rx_offset - 64)
5355                         << RXD_LEN_SHIFT;
5356                 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
5357                 rxd->opaque = (RXD_OPAQUE_RING_STD |
5358                                (i << RXD_OPAQUE_INDEX_SHIFT));
5359         }
5360
5361         if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
5362                 for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
5363                         struct tg3_rx_buffer_desc *rxd;
5364
5365                         rxd = &tp->rx_jumbo[i];
5366                         rxd->idx_len = (RX_JUMBO_PKT_BUF_SZ - tp->rx_offset - 64)
5367                                 << RXD_LEN_SHIFT;
5368                         rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
5369                                 RXD_FLAG_JUMBO;
5370                         rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
5371                                (i << RXD_OPAQUE_INDEX_SHIFT));
5372                 }
5373         }
5374
5375         /* Now allocate fresh SKBs for each rx ring. */
5376         for (i = 0; i < tp->rx_pending; i++) {
5377                 if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_STD, -1, i) < 0) {
5378                         printk(KERN_WARNING PFX
5379                                "%s: Using a smaller RX standard ring, "
5380                                "only %d out of %d buffers were allocated "
5381                                "successfully.\n",
5382                                tp->dev->name, i, tp->rx_pending);
5383                         if (i == 0)
5384                                 return -ENOMEM;
5385                         tp->rx_pending = i;
5386                         break;
5387                 }
5388         }
5389
5390         if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
5391                 for (i = 0; i < tp->rx_jumbo_pending; i++) {
5392                         if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_JUMBO,
5393                                              -1, i) < 0) {
5394                                 printk(KERN_WARNING PFX
5395                                        "%s: Using a smaller RX jumbo ring, "
5396                                        "only %d out of %d buffers were "
5397                                        "allocated successfully.\n",
5398                                        tp->dev->name, i, tp->rx_jumbo_pending);
5399                                 if (i == 0) {
5400                                         tg3_free_rings(tp);
5401                                         return -ENOMEM;
5402                                 }
5403                                 tp->rx_jumbo_pending = i;
5404                                 break;
5405                         }
5406                 }
5407         }
5408         return 0;
5409 }
5410
5411 /*
5412  * Must not be invoked with interrupt sources disabled and
5413  * the hardware shutdown down.
5414  */
5415 static void tg3_free_consistent(struct tg3 *tp)
5416 {
5417         kfree(tp->rx_std_buffers);
5418         tp->rx_std_buffers = NULL;
5419         if (tp->rx_std) {
5420                 pci_free_consistent(tp->pdev, TG3_RX_RING_BYTES,
5421                                     tp->rx_std, tp->rx_std_mapping);
5422                 tp->rx_std = NULL;
5423         }
5424         if (tp->rx_jumbo) {
5425                 pci_free_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
5426                                     tp->rx_jumbo, tp->rx_jumbo_mapping);
5427                 tp->rx_jumbo = NULL;
5428         }
5429         if (tp->rx_rcb) {
5430                 pci_free_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
5431                                     tp->rx_rcb, tp->rx_rcb_mapping);
5432                 tp->rx_rcb = NULL;
5433         }
5434         if (tp->tx_ring) {
5435                 pci_free_consistent(tp->pdev, TG3_TX_RING_BYTES,
5436                         tp->tx_ring, tp->tx_desc_mapping);
5437                 tp->tx_ring = NULL;
5438         }
5439         if (tp->hw_status) {
5440                 pci_free_consistent(tp->pdev, TG3_HW_STATUS_SIZE,
5441                                     tp->hw_status, tp->status_mapping);
5442                 tp->hw_status = NULL;
5443         }
5444         if (tp->hw_stats) {
5445                 pci_free_consistent(tp->pdev, sizeof(struct tg3_hw_stats),
5446                                     tp->hw_stats, tp->stats_mapping);
5447                 tp->hw_stats = NULL;
5448         }
5449 }
5450
5451 /*
5452  * Must not be invoked with interrupt sources disabled and
5453  * the hardware shutdown down.  Can sleep.
5454  */
5455 static int tg3_alloc_consistent(struct tg3 *tp)
5456 {
5457         tp->rx_std_buffers = kzalloc((sizeof(struct ring_info) *
5458                                       (TG3_RX_RING_SIZE +
5459                                        TG3_RX_JUMBO_RING_SIZE)) +
5460                                      (sizeof(struct tx_ring_info) *
5461                                       TG3_TX_RING_SIZE),
5462                                      GFP_KERNEL);
5463         if (!tp->rx_std_buffers)
5464                 return -ENOMEM;
5465
5466         tp->rx_jumbo_buffers = &tp->rx_std_buffers[TG3_RX_RING_SIZE];
5467         tp->tx_buffers = (struct tx_ring_info *)
5468                 &tp->rx_jumbo_buffers[TG3_RX_JUMBO_RING_SIZE];
5469
5470         tp->rx_std = pci_alloc_consistent(tp->pdev, TG3_RX_RING_BYTES,
5471                                           &tp->rx_std_mapping);
5472         if (!tp->rx_std)
5473                 goto err_out;
5474
5475         tp->rx_jumbo = pci_alloc_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
5476                                             &tp->rx_jumbo_mapping);
5477
5478         if (!tp->rx_jumbo)
5479                 goto err_out;
5480
5481         tp->rx_rcb = pci_alloc_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
5482                                           &tp->rx_rcb_mapping);
5483         if (!tp->rx_rcb)
5484                 goto err_out;
5485
5486         tp->tx_ring = pci_alloc_consistent(tp->pdev, TG3_TX_RING_BYTES,
5487                                            &tp->tx_desc_mapping);
5488         if (!tp->tx_ring)
5489                 goto err_out;
5490
5491         tp->hw_status = pci_alloc_consistent(tp->pdev,
5492                                              TG3_HW_STATUS_SIZE,
5493                                              &tp->status_mapping);
5494         if (!tp->hw_status)
5495                 goto err_out;
5496
5497         tp->hw_stats = pci_alloc_consistent(tp->pdev,
5498                                             sizeof(struct tg3_hw_stats),
5499                                             &tp->stats_mapping);
5500         if (!tp->hw_stats)
5501                 goto err_out;
5502
5503         memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
5504         memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
5505
5506         return 0;
5507
5508 err_out:
5509         tg3_free_consistent(tp);
5510         return -ENOMEM;
5511 }
5512
5513 #define MAX_WAIT_CNT 1000
5514
5515 /* To stop a block, clear the enable bit and poll till it
5516  * clears.  tp->lock is held.
5517  */
5518 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, int silent)
5519 {
5520         unsigned int i;
5521         u32 val;
5522
5523         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
5524                 switch (ofs) {
5525                 case RCVLSC_MODE:
5526                 case DMAC_MODE:
5527                 case MBFREE_MODE:
5528                 case BUFMGR_MODE:
5529                 case MEMARB_MODE:
5530                         /* We can't enable/disable these bits of the
5531                          * 5705/5750, just say success.
5532                          */
5533                         return 0;
5534
5535                 default:
5536                         break;
5537                 }
5538         }
5539
5540         val = tr32(ofs);
5541         val &= ~enable_bit;
5542         tw32_f(ofs, val);
5543
5544         for (i = 0; i < MAX_WAIT_CNT; i++) {
5545                 udelay(100);
5546                 val = tr32(ofs);
5547                 if ((val & enable_bit) == 0)
5548                         break;
5549         }
5550
5551         if (i == MAX_WAIT_CNT && !silent) {
5552                 printk(KERN_ERR PFX "tg3_stop_block timed out, "
5553                        "ofs=%lx enable_bit=%x\n",
5554                        ofs, enable_bit);
5555                 return -ENODEV;
5556         }
5557
5558         return 0;
5559 }
5560
5561 /* tp->lock is held. */
5562 static int tg3_abort_hw(struct tg3 *tp, int silent)
5563 {
5564         int i, err;
5565
5566         tg3_disable_ints(tp);
5567
5568         tp->rx_mode &= ~RX_MODE_ENABLE;
5569         tw32_f(MAC_RX_MODE, tp->rx_mode);
5570         udelay(10);
5571
5572         err  = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
5573         err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
5574         err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
5575         err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
5576         err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
5577         err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
5578
5579         err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
5580         err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
5581         err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
5582         err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
5583         err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
5584         err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
5585         err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
5586
5587         tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
5588         tw32_f(MAC_MODE, tp->mac_mode);
5589         udelay(40);
5590
5591         tp->tx_mode &= ~TX_MODE_ENABLE;
5592         tw32_f(MAC_TX_MODE, tp->tx_mode);
5593
5594         for (i = 0; i < MAX_WAIT_CNT; i++) {
5595                 udelay(100);
5596                 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
5597                         break;
5598         }
5599         if (i >= MAX_WAIT_CNT) {
5600                 printk(KERN_ERR PFX "tg3_abort_hw timed out for %s, "
5601                        "TX_MODE_ENABLE will not clear MAC_TX_MODE=%08x\n",
5602                        tp->dev->name, tr32(MAC_TX_MODE));
5603                 err |= -ENODEV;
5604         }
5605
5606         err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
5607         err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
5608         err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
5609
5610         tw32(FTQ_RESET, 0xffffffff);
5611         tw32(FTQ_RESET, 0x00000000);
5612
5613         err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
5614         err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
5615
5616         if (tp->hw_status)
5617                 memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
5618         if (tp->hw_stats)
5619                 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
5620
5621         return err;
5622 }
5623
5624 /* tp->lock is held. */
5625 static int tg3_nvram_lock(struct tg3 *tp)
5626 {
5627         if (tp->tg3_flags & TG3_FLAG_NVRAM) {
5628                 int i;
5629
5630                 if (tp->nvram_lock_cnt == 0) {
5631                         tw32(NVRAM_SWARB, SWARB_REQ_SET1);
5632                         for (i = 0; i < 8000; i++) {
5633                                 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
5634                                         break;
5635                                 udelay(20);
5636                         }
5637                         if (i == 8000) {
5638                                 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
5639                                 return -ENODEV;
5640                         }
5641                 }
5642                 tp->nvram_lock_cnt++;
5643         }
5644         return 0;
5645 }
5646
5647 /* tp->lock is held. */
5648 static void tg3_nvram_unlock(struct tg3 *tp)
5649 {
5650         if (tp->tg3_flags & TG3_FLAG_NVRAM) {
5651                 if (tp->nvram_lock_cnt > 0)
5652                         tp->nvram_lock_cnt--;
5653                 if (tp->nvram_lock_cnt == 0)
5654                         tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
5655         }
5656 }
5657
5658 /* tp->lock is held. */
5659 static void tg3_enable_nvram_access(struct tg3 *tp)
5660 {
5661         if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
5662             !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM)) {
5663                 u32 nvaccess = tr32(NVRAM_ACCESS);
5664
5665                 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
5666         }
5667 }
5668
5669 /* tp->lock is held. */
5670 static void tg3_disable_nvram_access(struct tg3 *tp)
5671 {
5672         if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
5673             !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM)) {
5674                 u32 nvaccess = tr32(NVRAM_ACCESS);
5675
5676                 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
5677         }
5678 }
5679
5680 static void tg3_ape_send_event(struct tg3 *tp, u32 event)
5681 {
5682         int i;
5683         u32 apedata;
5684
5685         apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
5686         if (apedata != APE_SEG_SIG_MAGIC)
5687                 return;
5688
5689         apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
5690         if (!(apedata & APE_FW_STATUS_READY))
5691                 return;
5692
5693         /* Wait for up to 1 millisecond for APE to service previous event. */
5694         for (i = 0; i < 10; i++) {
5695                 if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
5696                         return;
5697
5698                 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
5699
5700                 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
5701                         tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
5702                                         event | APE_EVENT_STATUS_EVENT_PENDING);
5703
5704                 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
5705
5706                 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
5707                         break;
5708
5709                 udelay(100);
5710         }
5711
5712         if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
5713                 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
5714 }
5715
5716 static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
5717 {
5718         u32 event;
5719         u32 apedata;
5720
5721         if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
5722                 return;
5723
5724         switch (kind) {
5725                 case RESET_KIND_INIT:
5726                         tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
5727                                         APE_HOST_SEG_SIG_MAGIC);
5728                         tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
5729                                         APE_HOST_SEG_LEN_MAGIC);
5730                         apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
5731                         tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
5732                         tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
5733                                         APE_HOST_DRIVER_ID_MAGIC);
5734                         tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
5735                                         APE_HOST_BEHAV_NO_PHYLOCK);
5736
5737                         event = APE_EVENT_STATUS_STATE_START;
5738                         break;
5739                 case RESET_KIND_SHUTDOWN:
5740                         /* With the interface we are currently using,
5741                          * APE does not track driver state.  Wiping
5742                          * out the HOST SEGMENT SIGNATURE forces
5743                          * the APE to assume OS absent status.
5744                          */
5745                         tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG, 0x0);
5746
5747                         event = APE_EVENT_STATUS_STATE_UNLOAD;
5748                         break;
5749                 case RESET_KIND_SUSPEND:
5750                         event = APE_EVENT_STATUS_STATE_SUSPEND;
5751                         break;
5752                 default:
5753                         return;
5754         }
5755
5756         event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
5757
5758         tg3_ape_send_event(tp, event);
5759 }
5760
5761 /* tp->lock is held. */
5762 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
5763 {
5764         tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
5765                       NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
5766
5767         if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
5768                 switch (kind) {
5769                 case RESET_KIND_INIT:
5770                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5771                                       DRV_STATE_START);
5772                         break;
5773
5774                 case RESET_KIND_SHUTDOWN:
5775                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5776                                       DRV_STATE_UNLOAD);
5777                         break;
5778
5779                 case RESET_KIND_SUSPEND:
5780                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5781                                       DRV_STATE_SUSPEND);
5782                         break;
5783
5784                 default:
5785                         break;
5786                 }
5787         }
5788
5789         if (kind == RESET_KIND_INIT ||
5790             kind == RESET_KIND_SUSPEND)
5791                 tg3_ape_driver_state_change(tp, kind);
5792 }
5793
5794 /* tp->lock is held. */
5795 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
5796 {
5797         if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
5798                 switch (kind) {
5799                 case RESET_KIND_INIT:
5800                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5801                                       DRV_STATE_START_DONE);
5802                         break;
5803
5804                 case RESET_KIND_SHUTDOWN:
5805                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5806                                       DRV_STATE_UNLOAD_DONE);
5807                         break;
5808
5809                 default:
5810                         break;
5811                 }
5812         }
5813
5814         if (kind == RESET_KIND_SHUTDOWN)
5815                 tg3_ape_driver_state_change(tp, kind);
5816 }
5817
5818 /* tp->lock is held. */
5819 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
5820 {
5821         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
5822                 switch (kind) {
5823                 case RESET_KIND_INIT:
5824                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5825                                       DRV_STATE_START);
5826                         break;
5827
5828                 case RESET_KIND_SHUTDOWN:
5829                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5830                                       DRV_STATE_UNLOAD);
5831                         break;
5832
5833                 case RESET_KIND_SUSPEND:
5834                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5835                                       DRV_STATE_SUSPEND);
5836                         break;
5837
5838                 default:
5839                         break;
5840                 }
5841         }
5842 }
5843
5844 static int tg3_poll_fw(struct tg3 *tp)
5845 {
5846         int i;
5847         u32 val;
5848
5849         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
5850                 /* Wait up to 20ms for init done. */
5851                 for (i = 0; i < 200; i++) {
5852                         if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
5853                                 return 0;
5854                         udelay(100);
5855                 }
5856                 return -ENODEV;
5857         }
5858
5859         /* Wait for firmware initialization to complete. */
5860         for (i = 0; i < 100000; i++) {
5861                 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
5862                 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
5863                         break;
5864                 udelay(10);
5865         }
5866
5867         /* Chip might not be fitted with firmware.  Some Sun onboard
5868          * parts are configured like that.  So don't signal the timeout
5869          * of the above loop as an error, but do report the lack of
5870          * running firmware once.
5871          */
5872         if (i >= 100000 &&
5873             !(tp->tg3_flags2 & TG3_FLG2_NO_FWARE_REPORTED)) {
5874                 tp->tg3_flags2 |= TG3_FLG2_NO_FWARE_REPORTED;
5875
5876                 printk(KERN_INFO PFX "%s: No firmware running.\n",
5877                        tp->dev->name);
5878         }
5879
5880         return 0;
5881 }
5882
5883 /* Save PCI command register before chip reset */
5884 static void tg3_save_pci_state(struct tg3 *tp)
5885 {
5886         pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd);
5887 }
5888
5889 /* Restore PCI state after chip reset */
5890 static void tg3_restore_pci_state(struct tg3 *tp)
5891 {
5892         u32 val;
5893
5894         /* Re-enable indirect register accesses. */
5895         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
5896                                tp->misc_host_ctrl);
5897
5898         /* Set MAX PCI retry to zero. */
5899         val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
5900         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
5901             (tp->tg3_flags & TG3_FLAG_PCIX_MODE))
5902                 val |= PCISTATE_RETRY_SAME_DMA;
5903         /* Allow reads and writes to the APE register and memory space. */
5904         if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
5905                 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
5906                        PCISTATE_ALLOW_APE_SHMEM_WR;
5907         pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
5908
5909         pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
5910
5911         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785) {
5912                 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)
5913                         pcie_set_readrq(tp->pdev, 4096);
5914                 else {
5915                         pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
5916                                               tp->pci_cacheline_sz);
5917                         pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
5918                                               tp->pci_lat_timer);
5919                 }
5920         }
5921
5922         /* Make sure PCI-X relaxed ordering bit is clear. */
5923         if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) {
5924                 u16 pcix_cmd;
5925
5926                 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
5927                                      &pcix_cmd);
5928                 pcix_cmd &= ~PCI_X_CMD_ERO;
5929                 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
5930                                       pcix_cmd);
5931         }
5932
5933         if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
5934
5935                 /* Chip reset on 5780 will reset MSI enable bit,
5936                  * so need to restore it.
5937                  */
5938                 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
5939                         u16 ctrl;
5940
5941                         pci_read_config_word(tp->pdev,
5942                                              tp->msi_cap + PCI_MSI_FLAGS,
5943                                              &ctrl);
5944                         pci_write_config_word(tp->pdev,
5945                                               tp->msi_cap + PCI_MSI_FLAGS,
5946                                               ctrl | PCI_MSI_FLAGS_ENABLE);
5947                         val = tr32(MSGINT_MODE);
5948                         tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
5949                 }
5950         }
5951 }
5952
5953 static void tg3_stop_fw(struct tg3 *);
5954
5955 /* tp->lock is held. */
5956 static int tg3_chip_reset(struct tg3 *tp)
5957 {
5958         u32 val;
5959         void (*write_op)(struct tg3 *, u32, u32);
5960         int err;
5961
5962         tg3_nvram_lock(tp);
5963
5964         tg3_mdio_stop(tp);
5965
5966         tg3_ape_lock(tp, TG3_APE_LOCK_GRC);
5967
5968         /* No matching tg3_nvram_unlock() after this because
5969          * chip reset below will undo the nvram lock.
5970          */
5971         tp->nvram_lock_cnt = 0;
5972
5973         /* GRC_MISC_CFG core clock reset will clear the memory
5974          * enable bit in PCI register 4 and the MSI enable bit
5975          * on some chips, so we save relevant registers here.
5976          */
5977         tg3_save_pci_state(tp);
5978
5979         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
5980             (tp->tg3_flags3 & TG3_FLG3_5755_PLUS))
5981                 tw32(GRC_FASTBOOT_PC, 0);
5982
5983         /*
5984          * We must avoid the readl() that normally takes place.
5985          * It locks machines, causes machine checks, and other
5986          * fun things.  So, temporarily disable the 5701
5987          * hardware workaround, while we do the reset.
5988          */
5989         write_op = tp->write32;
5990         if (write_op == tg3_write_flush_reg32)
5991                 tp->write32 = tg3_write32;
5992
5993         /* Prevent the irq handler from reading or writing PCI registers
5994          * during chip reset when the memory enable bit in the PCI command
5995          * register may be cleared.  The chip does not generate interrupt
5996          * at this time, but the irq handler may still be called due to irq
5997          * sharing or irqpoll.
5998          */
5999         tp->tg3_flags |= TG3_FLAG_CHIP_RESETTING;
6000         if (tp->hw_status) {
6001                 tp->hw_status->status = 0;
6002                 tp->hw_status->status_tag = 0;
6003         }
6004         tp->last_tag = 0;
6005         smp_mb();
6006         synchronize_irq(tp->pdev->irq);
6007
6008         /* do the reset */
6009         val = GRC_MISC_CFG_CORECLK_RESET;
6010
6011         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
6012                 if (tr32(0x7e2c) == 0x60) {
6013                         tw32(0x7e2c, 0x20);
6014                 }
6015                 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
6016                         tw32(GRC_MISC_CFG, (1 << 29));
6017                         val |= (1 << 29);
6018                 }
6019         }
6020
6021         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
6022                 tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
6023                 tw32(GRC_VCPU_EXT_CTRL,
6024                      tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
6025         }
6026
6027         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
6028                 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
6029         tw32(GRC_MISC_CFG, val);
6030
6031         /* restore 5701 hardware bug workaround write method */
6032         tp->write32 = write_op;
6033
6034         /* Unfortunately, we have to delay before the PCI read back.
6035          * Some 575X chips even will not respond to a PCI cfg access
6036          * when the reset command is given to the chip.
6037          *
6038          * How do these hardware designers expect things to work
6039          * properly if the PCI write is posted for a long period
6040          * of time?  It is always necessary to have some method by
6041          * which a register read back can occur to push the write
6042          * out which does the reset.
6043          *
6044          * For most tg3 variants the trick below was working.
6045          * Ho hum...
6046          */
6047         udelay(120);
6048
6049         /* Flush PCI posted writes.  The normal MMIO registers
6050          * are inaccessible at this time so this is the only
6051          * way to make this reliably (actually, this is no longer
6052          * the case, see above).  I tried to use indirect
6053          * register read/write but this upset some 5701 variants.
6054          */
6055         pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
6056
6057         udelay(120);
6058
6059         if ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) && tp->pcie_cap) {
6060                 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) {
6061                         int i;
6062                         u32 cfg_val;
6063
6064                         /* Wait for link training to complete.  */
6065                         for (i = 0; i < 5000; i++)
6066                                 udelay(100);
6067
6068                         pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
6069                         pci_write_config_dword(tp->pdev, 0xc4,
6070                                                cfg_val | (1 << 15));
6071                 }
6072
6073                 /* Set PCIE max payload size to 128 bytes and
6074                  * clear the "no snoop" and "relaxed ordering" bits.
6075                  */
6076                 pci_write_config_word(tp->pdev,
6077                                       tp->pcie_cap + PCI_EXP_DEVCTL,
6078                                       0);
6079
6080                 pcie_set_readrq(tp->pdev, 4096);
6081
6082                 /* Clear error status */
6083                 pci_write_config_word(tp->pdev,
6084                                       tp->pcie_cap + PCI_EXP_DEVSTA,
6085                                       PCI_EXP_DEVSTA_CED |
6086                                       PCI_EXP_DEVSTA_NFED |
6087                                       PCI_EXP_DEVSTA_FED |
6088                                       PCI_EXP_DEVSTA_URD);
6089         }
6090
6091         tg3_restore_pci_state(tp);
6092
6093         tp->tg3_flags &= ~TG3_FLAG_CHIP_RESETTING;
6094
6095         val = 0;
6096         if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
6097                 val = tr32(MEMARB_MODE);
6098         tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
6099
6100         if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A3) {
6101                 tg3_stop_fw(tp);
6102                 tw32(0x5000, 0x400);
6103         }
6104
6105         tw32(GRC_MODE, tp->grc_mode);
6106
6107         if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0) {
6108                 val = tr32(0xc4);
6109
6110                 tw32(0xc4, val | (1 << 15));
6111         }
6112
6113         if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
6114             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
6115                 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
6116                 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0)
6117                         tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
6118                 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
6119         }
6120
6121         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
6122                 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
6123                 tw32_f(MAC_MODE, tp->mac_mode);
6124         } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
6125                 tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
6126                 tw32_f(MAC_MODE, tp->mac_mode);
6127         } else if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
6128                 tp->mac_mode &= (MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN);
6129                 if (tp->mac_mode & MAC_MODE_APE_TX_EN)
6130                         tp->mac_mode |= MAC_MODE_TDE_ENABLE;
6131                 tw32_f(MAC_MODE, tp->mac_mode);
6132         } else
6133                 tw32_f(MAC_MODE, 0);
6134         udelay(40);
6135
6136         tg3_mdio_start(tp);
6137
6138         tg3_ape_unlock(tp, TG3_APE_LOCK_GRC);
6139
6140         err = tg3_poll_fw(tp);
6141         if (err)
6142                 return err;
6143
6144         if ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
6145             tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
6146                 val = tr32(0x7c00);
6147
6148                 tw32(0x7c00, val | (1 << 25));
6149         }
6150
6151         /* Reprobe ASF enable state.  */
6152         tp->tg3_flags &= ~TG3_FLAG_ENABLE_ASF;
6153         tp->tg3_flags2 &= ~TG3_FLG2_ASF_NEW_HANDSHAKE;
6154         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
6155         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
6156                 u32 nic_cfg;
6157
6158                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
6159                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
6160                         tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
6161                         tp->last_event_jiffies = jiffies;
6162                         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
6163                                 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
6164                 }
6165         }
6166
6167         return 0;
6168 }
6169
6170 /* tp->lock is held. */
6171 static void tg3_stop_fw(struct tg3 *tp)
6172 {
6173         if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) &&
6174            !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)) {
6175                 /* Wait for RX cpu to ACK the previous event. */
6176                 tg3_wait_for_event_ack(tp);
6177
6178                 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
6179
6180                 tg3_generate_fw_event(tp);
6181
6182                 /* Wait for RX cpu to ACK this event. */
6183                 tg3_wait_for_event_ack(tp);
6184         }
6185 }
6186
6187 /* tp->lock is held. */
6188 static int tg3_halt(struct tg3 *tp, int kind, int silent)
6189 {
6190         int err;
6191
6192         tg3_stop_fw(tp);
6193
6194         tg3_write_sig_pre_reset(tp, kind);
6195
6196         tg3_abort_hw(tp, silent);
6197         err = tg3_chip_reset(tp);
6198
6199         tg3_write_sig_legacy(tp, kind);
6200         tg3_write_sig_post_reset(tp, kind);
6201
6202         if (err)
6203                 return err;
6204
6205         return 0;
6206 }
6207
6208 #define TG3_FW_RELEASE_MAJOR    0x0
6209 #define TG3_FW_RELASE_MINOR     0x0
6210 #define TG3_FW_RELEASE_FIX      0x0
6211 #define TG3_FW_START_ADDR       0x08000000
6212 #define TG3_FW_TEXT_ADDR        0x08000000
6213 #define TG3_FW_TEXT_LEN         0x9c0
6214 #define TG3_FW_RODATA_ADDR      0x080009c0
6215 #define TG3_FW_RODATA_LEN       0x60
6216 #define TG3_FW_DATA_ADDR        0x08000a40
6217 #define TG3_FW_DATA_LEN         0x20
6218 #define TG3_FW_SBSS_ADDR        0x08000a60
6219 #define TG3_FW_SBSS_LEN         0xc
6220 #define TG3_FW_BSS_ADDR         0x08000a70
6221 #define TG3_FW_BSS_LEN          0x10
6222
6223 static const u32 tg3FwText[(TG3_FW_TEXT_LEN / sizeof(u32)) + 1] = {
6224         0x00000000, 0x10000003, 0x00000000, 0x0000000d, 0x0000000d, 0x3c1d0800,
6225         0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100000, 0x0e000018, 0x00000000,
6226         0x0000000d, 0x3c1d0800, 0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100034,
6227         0x0e00021c, 0x00000000, 0x0000000d, 0x00000000, 0x00000000, 0x00000000,
6228         0x27bdffe0, 0x3c1cc000, 0xafbf0018, 0xaf80680c, 0x0e00004c, 0x241b2105,
6229         0x97850000, 0x97870002, 0x9782002c, 0x9783002e, 0x3c040800, 0x248409c0,
6230         0xafa00014, 0x00021400, 0x00621825, 0x00052c00, 0xafa30010, 0x8f860010,
6231         0x00e52825, 0x0e000060, 0x24070102, 0x3c02ac00, 0x34420100, 0x3c03ac01,
6232         0x34630100, 0xaf820490, 0x3c02ffff, 0xaf820494, 0xaf830498, 0xaf82049c,
6233         0x24020001, 0xaf825ce0, 0x0e00003f, 0xaf825d00, 0x0e000140, 0x00000000,
6234         0x8fbf0018, 0x03e00008, 0x27bd0020, 0x2402ffff, 0xaf825404, 0x8f835400,
6235         0x34630400, 0xaf835400, 0xaf825404, 0x3c020800, 0x24420034, 0xaf82541c,
6236         0x03e00008, 0xaf805400, 0x00000000, 0x00000000, 0x3c020800, 0x34423000,
6237         0x3c030800, 0x34633000, 0x3c040800, 0x348437ff, 0x3c010800, 0xac220a64,
6238         0x24020040, 0x3c010800, 0xac220a68, 0x3c010800, 0xac200a60, 0xac600000,
6239         0x24630004, 0x0083102b, 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000,
6240         0x00804821, 0x8faa0010, 0x3c020800, 0x8c420a60, 0x3c040800, 0x8c840a68,
6241         0x8fab0014, 0x24430001, 0x0044102b, 0x3c010800, 0xac230a60, 0x14400003,
6242         0x00004021, 0x3c010800, 0xac200a60, 0x3c020800, 0x8c420a60, 0x3c030800,
6243         0x8c630a64, 0x91240000, 0x00021140, 0x00431021, 0x00481021, 0x25080001,
6244         0xa0440000, 0x29020008, 0x1440fff4, 0x25290001, 0x3c020800, 0x8c420a60,
6245         0x3c030800, 0x8c630a64, 0x8f84680c, 0x00021140, 0x00431021, 0xac440008,
6246         0xac45000c, 0xac460010, 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c,
6247         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
6248         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
6249         0, 0, 0, 0, 0, 0,
6250         0x02000008, 0x00000000, 0x0a0001e3, 0x3c0a0001, 0x0a0001e3, 0x3c0a0002,
6251         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
6252         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
6253         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
6254         0x0a0001e3, 0x3c0a0007, 0x0a0001e3, 0x3c0a0008, 0x0a0001e3, 0x3c0a0009,
6255         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a000b,
6256         0x0a0001e3, 0x3c0a000c, 0x0a0001e3, 0x3c0a000d, 0x0a0001e3, 0x00000000,
6257         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a000e, 0x0a0001e3, 0x00000000,
6258         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
6259         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
6260         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a0013, 0x0a0001e3, 0x3c0a0014,
6261         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
6262         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
6263         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
6264         0x27bdffe0, 0x00001821, 0x00001021, 0xafbf0018, 0xafb10014, 0xafb00010,
6265         0x3c010800, 0x00220821, 0xac200a70, 0x3c010800, 0x00220821, 0xac200a74,
6266         0x3c010800, 0x00220821, 0xac200a78, 0x24630001, 0x1860fff5, 0x2442000c,
6267         0x24110001, 0x8f906810, 0x32020004, 0x14400005, 0x24040001, 0x3c020800,
6268         0x8c420a78, 0x18400003, 0x00002021, 0x0e000182, 0x00000000, 0x32020001,
6269         0x10400003, 0x00000000, 0x0e000169, 0x00000000, 0x0a000153, 0xaf915028,
6270         0x8fbf0018, 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0020, 0x3c050800,
6271         0x8ca50a70, 0x3c060800, 0x8cc60a80, 0x3c070800, 0x8ce70a78, 0x27bdffe0,
6272         0x3c040800, 0x248409d0, 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014,
6273         0x0e00017b, 0x00002021, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x24020001,
6274         0x8f836810, 0x00821004, 0x00021027, 0x00621824, 0x03e00008, 0xaf836810,
6275         0x27bdffd8, 0xafbf0024, 0x1080002e, 0xafb00020, 0x8f825cec, 0xafa20018,
6276         0x8f825cec, 0x3c100800, 0x26100a78, 0xafa2001c, 0x34028000, 0xaf825cec,
6277         0x8e020000, 0x18400016, 0x00000000, 0x3c020800, 0x94420a74, 0x8fa3001c,
6278         0x000221c0, 0xac830004, 0x8fa2001c, 0x3c010800, 0x0e000201, 0xac220a74,
6279         0x10400005, 0x00000000, 0x8e020000, 0x24420001, 0x0a0001df, 0xae020000,
6280         0x3c020800, 0x8c420a70, 0x00021c02, 0x000321c0, 0x0a0001c5, 0xafa2001c,
6281         0x0e000201, 0x00000000, 0x1040001f, 0x00000000, 0x8e020000, 0x8fa3001c,
6282         0x24420001, 0x3c010800, 0xac230a70, 0x3c010800, 0xac230a74, 0x0a0001df,
6283         0xae020000, 0x3c100800, 0x26100a78, 0x8e020000, 0x18400028, 0x00000000,
6284         0x0e000201, 0x00000000, 0x14400024, 0x00000000, 0x8e020000, 0x3c030800,
6285         0x8c630a70, 0x2442ffff, 0xafa3001c, 0x18400006, 0xae020000, 0x00031402,
6286         0x000221c0, 0x8c820004, 0x3c010800, 0xac220a70, 0x97a2001e, 0x2442ff00,
6287         0x2c420300, 0x1440000b, 0x24024000, 0x3c040800, 0x248409dc, 0xafa00010,
6288         0xafa00014, 0x8fa6001c, 0x24050008, 0x0e000060, 0x00003821, 0x0a0001df,
6289         0x00000000, 0xaf825cf8, 0x3c020800, 0x8c420a40, 0x8fa3001c, 0x24420001,
6290         0xaf835cf8, 0x3c010800, 0xac220a40, 0x8fbf0024, 0x8fb00020, 0x03e00008,
6291         0x27bd0028, 0x27bdffe0, 0x3c040800, 0x248409e8, 0x00002821, 0x00003021,
6292         0x00003821, 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014, 0x8fbf0018,
6293         0x03e00008, 0x27bd0020, 0x8f82680c, 0x8f85680c, 0x00021827, 0x0003182b,
6294         0x00031823, 0x00431024, 0x00441021, 0x00a2282b, 0x10a00006, 0x00000000,
6295         0x00401821, 0x8f82680c, 0x0043102b, 0x1440fffd, 0x00000000, 0x03e00008,
6296         0x00000000, 0x3c040800, 0x8c840000, 0x3c030800, 0x8c630a40, 0x0064102b,
6297         0x54400002, 0x00831023, 0x00641023, 0x2c420008, 0x03e00008, 0x38420001,
6298         0x27bdffe0, 0x00802821, 0x3c040800, 0x24840a00, 0x00003021, 0x00003821,
6299         0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014, 0x0a000216, 0x00000000,
6300         0x8fbf0018, 0x03e00008, 0x27bd0020, 0x00000000, 0x27bdffe0, 0x3c1cc000,
6301         0xafbf0018, 0x0e00004c, 0xaf80680c, 0x3c040800, 0x24840a10, 0x03802821,
6302         0x00003021, 0x00003821, 0xafa00010, 0x0e000060, 0xafa00014, 0x2402ffff,
6303         0xaf825404, 0x3c0200aa, 0x0e000234, 0xaf825434, 0x8fbf0018, 0x03e00008,
6304         0x27bd0020, 0x00000000, 0x00000000, 0x00000000, 0x27bdffe8, 0xafb00010,
6305         0x24100001, 0xafbf0014, 0x3c01c003, 0xac200000, 0x8f826810, 0x30422000,
6306         0x10400003, 0x00000000, 0x0e000246, 0x00000000, 0x0a00023a, 0xaf905428,
6307         0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x27bdfff8, 0x8f845d0c,
6308         0x3c0200ff, 0x3c030800, 0x8c630a50, 0x3442fff8, 0x00821024, 0x1043001e,
6309         0x3c0500ff, 0x34a5fff8, 0x3c06c003, 0x3c074000, 0x00851824, 0x8c620010,
6310         0x3c010800, 0xac230a50, 0x30420008, 0x10400005, 0x00871025, 0x8cc20000,
6311         0x24420001, 0xacc20000, 0x00871025, 0xaf825d0c, 0x8fa20000, 0x24420001,
6312         0xafa20000, 0x8fa20000, 0x8fa20000, 0x24420001, 0xafa20000, 0x8fa20000,
6313         0x8f845d0c, 0x3c030800, 0x8c630a50, 0x00851024, 0x1443ffe8, 0x00851824,
6314         0x27bd0008, 0x03e00008, 0x00000000, 0x00000000, 0x00000000
6315 };
6316
6317 static const u32 tg3FwRodata[(TG3_FW_RODATA_LEN / sizeof(u32)) + 1] = {
6318         0x35373031, 0x726c7341, 0x00000000, 0x00000000, 0x53774576, 0x656e7430,
6319         0x00000000, 0x726c7045, 0x76656e74, 0x31000000, 0x556e6b6e, 0x45766e74,
6320         0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x66617461, 0x6c457272,
6321         0x00000000, 0x00000000, 0x4d61696e, 0x43707542, 0x00000000, 0x00000000,
6322         0x00000000
6323 };
6324
6325 #if 0 /* All zeros, don't eat up space with it. */
6326 u32 tg3FwData[(TG3_FW_DATA_LEN / sizeof(u32)) + 1] = {
6327         0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
6328         0x00000000, 0x00000000, 0x00000000, 0x00000000
6329 };
6330 #endif
6331
6332 #define RX_CPU_SCRATCH_BASE     0x30000
6333 #define RX_CPU_SCRATCH_SIZE     0x04000
6334 #define TX_CPU_SCRATCH_BASE     0x34000
6335 #define TX_CPU_SCRATCH_SIZE     0x04000
6336
6337 /* tp->lock is held. */
6338 static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
6339 {
6340         int i;
6341
6342         BUG_ON(offset == TX_CPU_BASE &&
6343             (tp->tg3_flags2 & TG3_FLG2_5705_PLUS));
6344
6345         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
6346                 u32 val = tr32(GRC_VCPU_EXT_CTRL);
6347
6348                 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
6349                 return 0;
6350         }
6351         if (offset == RX_CPU_BASE) {
6352                 for (i = 0; i < 10000; i++) {
6353                         tw32(offset + CPU_STATE, 0xffffffff);
6354                         tw32(offset + CPU_MODE,  CPU_MODE_HALT);
6355                         if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
6356                                 break;
6357                 }
6358
6359                 tw32(offset + CPU_STATE, 0xffffffff);
6360                 tw32_f(offset + CPU_MODE,  CPU_MODE_HALT);
6361                 udelay(10);
6362         } else {
6363                 for (i = 0; i < 10000; i++) {
6364                         tw32(offset + CPU_STATE, 0xffffffff);
6365                         tw32(offset + CPU_MODE,  CPU_MODE_HALT);
6366                         if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
6367                                 break;
6368                 }
6369         }
6370
6371         if (i >= 10000) {
6372                 printk(KERN_ERR PFX "tg3_reset_cpu timed out for %s, "
6373                        "and %s CPU\n",
6374                        tp->dev->name,
6375                        (offset == RX_CPU_BASE ? "RX" : "TX"));
6376                 return -ENODEV;
6377         }
6378
6379         /* Clear firmware's nvram arbitration. */
6380         if (tp->tg3_flags & TG3_FLAG_NVRAM)
6381                 tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
6382         return 0;
6383 }
6384
6385 struct fw_info {
6386         unsigned int text_base;
6387         unsigned int text_len;
6388         const u32 *text_data;
6389         unsigned int rodata_base;
6390         unsigned int rodata_len;
6391         const u32 *rodata_data;
6392         unsigned int data_base;
6393         unsigned int data_len;
6394         const u32 *data_data;
6395 };
6396
6397 /* tp->lock is held. */
6398 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base, u32 cpu_scratch_base,
6399                                  int cpu_scratch_size, struct fw_info *info)
6400 {
6401         int err, lock_err, i;
6402         void (*write_op)(struct tg3 *, u32, u32);
6403
6404         if (cpu_base == TX_CPU_BASE &&
6405             (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6406                 printk(KERN_ERR PFX "tg3_load_firmware_cpu: Trying to load "
6407                        "TX cpu firmware on %s which is 5705.\n",
6408                        tp->dev->name);
6409                 return -EINVAL;
6410         }
6411
6412         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
6413                 write_op = tg3_write_mem;
6414         else
6415                 write_op = tg3_write_indirect_reg32;
6416
6417         /* It is possible that bootcode is still loading at this point.
6418          * Get the nvram lock first before halting the cpu.
6419          */
6420         lock_err = tg3_nvram_lock(tp);
6421         err = tg3_halt_cpu(tp, cpu_base);
6422         if (!lock_err)
6423                 tg3_nvram_unlock(tp);
6424         if (err)
6425                 goto out;
6426
6427         for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
6428                 write_op(tp, cpu_scratch_base + i, 0);
6429         tw32(cpu_base + CPU_STATE, 0xffffffff);
6430         tw32(cpu_base + CPU_MODE, tr32(cpu_base+CPU_MODE)|CPU_MODE_HALT);
6431         for (i = 0; i < (info->text_len / sizeof(u32)); i++)
6432                 write_op(tp, (cpu_scratch_base +
6433                               (info->text_base & 0xffff) +
6434                               (i * sizeof(u32))),
6435                          (info->text_data ?
6436                           info->text_data[i] : 0));
6437         for (i = 0; i < (info->rodata_len / sizeof(u32)); i++)
6438                 write_op(tp, (cpu_scratch_base +
6439                               (info->rodata_base & 0xffff) +
6440                               (i * sizeof(u32))),
6441                          (info->rodata_data ?
6442                           info->rodata_data[i] : 0));
6443         for (i = 0; i < (info->data_len / sizeof(u32)); i++)
6444                 write_op(tp, (cpu_scratch_base +
6445                               (info->data_base & 0xffff) +
6446                               (i * sizeof(u32))),
6447                          (info->data_data ?
6448                           info->data_data[i] : 0));
6449
6450         err = 0;
6451
6452 out:
6453         return err;
6454 }
6455
6456 /* tp->lock is held. */
6457 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
6458 {
6459         struct fw_info info;
6460         int err, i;
6461
6462         info.text_base = TG3_FW_TEXT_ADDR;
6463         info.text_len = TG3_FW_TEXT_LEN;
6464         info.text_data = &tg3FwText[0];
6465         info.rodata_base = TG3_FW_RODATA_ADDR;
6466         info.rodata_len = TG3_FW_RODATA_LEN;
6467         info.rodata_data = &tg3FwRodata[0];
6468         info.data_base = TG3_FW_DATA_ADDR;
6469         info.data_len = TG3_FW_DATA_LEN;
6470         info.data_data = NULL;
6471
6472         err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
6473                                     RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
6474                                     &info);
6475         if (err)
6476                 return err;
6477
6478         err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
6479                                     TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
6480                                     &info);
6481         if (err)
6482                 return err;
6483
6484         /* Now startup only the RX cpu. */
6485         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
6486         tw32_f(RX_CPU_BASE + CPU_PC,    TG3_FW_TEXT_ADDR);
6487
6488         for (i = 0; i < 5; i++) {
6489                 if (tr32(RX_CPU_BASE + CPU_PC) == TG3_FW_TEXT_ADDR)
6490                         break;
6491                 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
6492                 tw32(RX_CPU_BASE + CPU_MODE,  CPU_MODE_HALT);
6493                 tw32_f(RX_CPU_BASE + CPU_PC,    TG3_FW_TEXT_ADDR);
6494                 udelay(1000);
6495         }
6496         if (i >= 5) {
6497                 printk(KERN_ERR PFX "tg3_load_firmware fails for %s "
6498                        "to set RX CPU PC, is %08x should be %08x\n",
6499                        tp->dev->name, tr32(RX_CPU_BASE + CPU_PC),
6500                        TG3_FW_TEXT_ADDR);
6501                 return -ENODEV;
6502         }
6503         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
6504         tw32_f(RX_CPU_BASE + CPU_MODE,  0x00000000);
6505
6506         return 0;
6507 }
6508
6509
6510 #define TG3_TSO_FW_RELEASE_MAJOR        0x1
6511 #define TG3_TSO_FW_RELASE_MINOR         0x6
6512 #define TG3_TSO_FW_RELEASE_FIX          0x0
6513 #define TG3_TSO_FW_START_ADDR           0x08000000
6514 #define TG3_TSO_FW_TEXT_ADDR            0x08000000
6515 #define TG3_TSO_FW_TEXT_LEN             0x1aa0
6516 #define TG3_TSO_FW_RODATA_ADDR          0x08001aa0
6517 #define TG3_TSO_FW_RODATA_LEN           0x60
6518 #define TG3_TSO_FW_DATA_ADDR            0x08001b20
6519 #define TG3_TSO_FW_DATA_LEN             0x30
6520 #define TG3_TSO_FW_SBSS_ADDR            0x08001b50
6521 #define TG3_TSO_FW_SBSS_LEN             0x2c
6522 #define TG3_TSO_FW_BSS_ADDR             0x08001b80
6523 #define TG3_TSO_FW_BSS_LEN              0x894
6524
6525 static const u32 tg3TsoFwText[(TG3_TSO_FW_TEXT_LEN / 4) + 1] = {
6526         0x0e000003, 0x00000000, 0x08001b24, 0x00000000, 0x10000003, 0x00000000,
6527         0x0000000d, 0x0000000d, 0x3c1d0800, 0x37bd4000, 0x03a0f021, 0x3c100800,
6528         0x26100000, 0x0e000010, 0x00000000, 0x0000000d, 0x27bdffe0, 0x3c04fefe,
6529         0xafbf0018, 0x0e0005d8, 0x34840002, 0x0e000668, 0x00000000, 0x3c030800,
6530         0x90631b68, 0x24020002, 0x3c040800, 0x24841aac, 0x14620003, 0x24050001,
6531         0x3c040800, 0x24841aa0, 0x24060006, 0x00003821, 0xafa00010, 0x0e00067c,
6532         0xafa00014, 0x8f625c50, 0x34420001, 0xaf625c50, 0x8f625c90, 0x34420001,
6533         0xaf625c90, 0x2402ffff, 0x0e000034, 0xaf625404, 0x8fbf0018, 0x03e00008,
6534         0x27bd0020, 0x00000000, 0x00000000, 0x00000000, 0x27bdffe0, 0xafbf001c,
6535         0xafb20018, 0xafb10014, 0x0e00005b, 0xafb00010, 0x24120002, 0x24110001,
6536         0x8f706820, 0x32020100, 0x10400003, 0x00000000, 0x0e0000bb, 0x00000000,
6537         0x8f706820, 0x32022000, 0x10400004, 0x32020001, 0x0e0001f0, 0x24040001,
6538         0x32020001, 0x10400003, 0x00000000, 0x0e0000a3, 0x00000000, 0x3c020800,
6539         0x90421b98, 0x14520003, 0x00000000, 0x0e0004c0, 0x00000000, 0x0a00003c,
6540         0xaf715028, 0x8fbf001c, 0x8fb20018, 0x8fb10014, 0x8fb00010, 0x03e00008,
6541         0x27bd0020, 0x27bdffe0, 0x3c040800, 0x24841ac0, 0x00002821, 0x00003021,
6542         0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014, 0x3c040800,
6543         0x248423d8, 0xa4800000, 0x3c010800, 0xa0201b98, 0x3c010800, 0xac201b9c,
6544         0x3c010800, 0xac201ba0, 0x3c010800, 0xac201ba4, 0x3c010800, 0xac201bac,
6545         0x3c010800, 0xac201bb8, 0x3c010800, 0xac201bbc, 0x8f624434, 0x3c010800,
6546         0xac221b88, 0x8f624438, 0x3c010800, 0xac221b8c, 0x8f624410, 0xac80f7a8,
6547         0x3c010800, 0xac201b84, 0x3c010800, 0xac2023e0, 0x3c010800, 0xac2023c8,
6548         0x3c010800, 0xac2023cc, 0x3c010800, 0xac202400, 0x3c010800, 0xac221b90,
6549         0x8f620068, 0x24030007, 0x00021702, 0x10430005, 0x00000000, 0x8f620068,
6550         0x00021702, 0x14400004, 0x24020001, 0x3c010800, 0x0a000097, 0xac20240c,
6551         0xac820034, 0x3c040800, 0x24841acc, 0x3c050800, 0x8ca5240c, 0x00003021,
6552         0x00003821, 0xafa00010, 0x0e00067c, 0xafa00014, 0x8fbf0018, 0x03e00008,
6553         0x27bd0020, 0x27bdffe0, 0x3c040800, 0x24841ad8, 0x00002821, 0x00003021,
6554         0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014, 0x0e00005b,
6555         0x00000000, 0x0e0000b4, 0x00002021, 0x8fbf0018, 0x03e00008, 0x27bd0020,
6556         0x24020001, 0x8f636820, 0x00821004, 0x00021027, 0x00621824, 0x03e00008,
6557         0xaf636820, 0x27bdffd0, 0xafbf002c, 0xafb60028, 0xafb50024, 0xafb40020,
6558         0xafb3001c, 0xafb20018, 0xafb10014, 0xafb00010, 0x8f675c5c, 0x3c030800,
6559         0x24631bbc, 0x8c620000, 0x14470005, 0x3c0200ff, 0x3c020800, 0x90421b98,
6560         0x14400119, 0x3c0200ff, 0x3442fff8, 0x00e28824, 0xac670000, 0x00111902,
6561         0x306300ff, 0x30e20003, 0x000211c0, 0x00622825, 0x00a04021, 0x00071602,
6562         0x3c030800, 0x90631b98, 0x3044000f, 0x14600036, 0x00804821, 0x24020001,
6563         0x3c010800, 0xa0221b98, 0x00051100, 0x00821025, 0x3c010800, 0xac201b9c,
6564         0x3c010800, 0xac201ba0, 0x3c010800, 0xac201ba4, 0x3c010800, 0xac201bac,
6565         0x3c010800, 0xac201bb8, 0x3c010800, 0xac201bb0, 0x3c010800, 0xac201bb4,
6566         0x3c010800, 0xa42223d8, 0x9622000c, 0x30437fff, 0x3c010800, 0xa4222410,
6567         0x30428000, 0x3c010800, 0xa4231bc6, 0x10400005, 0x24020001, 0x3c010800,
6568         0xac2223f4, 0x0a000102, 0x2406003e, 0x24060036, 0x3c010800, 0xac2023f4,
6569         0x9622000a, 0x3c030800, 0x94631bc6, 0x3c010800, 0xac2023f0, 0x3c010800,
6570         0xac2023f8, 0x00021302, 0x00021080, 0x00c21021, 0x00621821, 0x3c010800,
6571         0xa42223d0, 0x3c010800, 0x0a000115, 0xa4231b96, 0x9622000c, 0x3c010800,
6572         0xa42223ec, 0x3c040800, 0x24841b9c, 0x8c820000, 0x00021100, 0x3c010800,
6573         0x00220821, 0xac311bc8, 0x8c820000, 0x00021100, 0x3c010800, 0x00220821,
6574         0xac271bcc, 0x8c820000, 0x25030001, 0x306601ff, 0x00021100, 0x3c010800,
6575         0x00220821, 0xac261bd0, 0x8c820000, 0x00021100, 0x3c010800, 0x00220821,
6576         0xac291bd4, 0x96230008, 0x3c020800, 0x8c421bac, 0x00432821, 0x3c010800,
6577         0xac251bac, 0x9622000a, 0x30420004, 0x14400018, 0x00061100, 0x8f630c14,
6578         0x3063000f, 0x2c620002, 0x1440000b, 0x3c02c000, 0x8f630c14, 0x3c020800,
6579         0x8c421b40, 0x3063000f, 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002,
6580         0x1040fff7, 0x3c02c000, 0x00e21825, 0xaf635c5c, 0x8f625c50, 0x30420002,
6581         0x10400014, 0x00000000, 0x0a000147, 0x00000000, 0x3c030800, 0x8c631b80,
6582         0x3c040800, 0x94841b94, 0x01221025, 0x3c010800, 0xa42223da, 0x24020001,
6583         0x3c010800, 0xac221bb8, 0x24630001, 0x0085202a, 0x3c010800, 0x10800003,
6584         0xac231b80, 0x3c010800, 0xa4251b94, 0x3c060800, 0x24c61b9c, 0x8cc20000,
6585         0x24420001, 0xacc20000, 0x28420080, 0x14400005, 0x00000000, 0x0e000656,
6586         0x24040002, 0x0a0001e6, 0x00000000, 0x3c020800, 0x8c421bb8, 0x10400078,
6587         0x24020001, 0x3c050800, 0x90a51b98, 0x14a20072, 0x00000000, 0x3c150800,
6588         0x96b51b96, 0x3c040800, 0x8c841bac, 0x32a3ffff, 0x0083102a, 0x1440006c,
6589         0x00000000, 0x14830003, 0x00000000, 0x3c010800, 0xac2523f0, 0x1060005c,
6590         0x00009021, 0x24d60004, 0x0060a021, 0x24d30014, 0x8ec20000, 0x00028100,
6591         0x3c110800, 0x02308821, 0x0e000625, 0x8e311bc8, 0x00402821, 0x10a00054,
6592         0x00000000, 0x9628000a, 0x31020040, 0x10400005, 0x2407180c, 0x8e22000c,
6593         0x2407188c, 0x00021400, 0xaca20018, 0x3c030800, 0x00701821, 0x8c631bd0,
6594         0x3c020800, 0x00501021, 0x8c421bd4, 0x00031d00, 0x00021400, 0x00621825,
6595         0xaca30014, 0x8ec30004, 0x96220008, 0x00432023, 0x3242ffff, 0x3083ffff,
6596         0x00431021, 0x0282102a, 0x14400002, 0x02b23023, 0x00803021, 0x8e620000,
6597         0x30c4ffff, 0x00441021, 0xae620000, 0x8e220000, 0xaca20000, 0x8e220004,
6598         0x8e63fff4, 0x00431021, 0xaca20004, 0xa4a6000e, 0x8e62fff4, 0x00441021,
6599         0xae62fff4, 0x96230008, 0x0043102a, 0x14400005, 0x02469021, 0x8e62fff0,
6600         0xae60fff4, 0x24420001, 0xae62fff0, 0xaca00008, 0x3242ffff, 0x14540008,
6601         0x24020305, 0x31020080, 0x54400001, 0x34e70010, 0x24020905, 0xa4a2000c,
6602         0x0a0001cb, 0x34e70020, 0xa4a2000c, 0x3c020800, 0x8c4223f0, 0x10400003,
6603         0x3c024b65, 0x0a0001d3, 0x34427654, 0x3c02b49a, 0x344289ab, 0xaca2001c,
6604         0x30e2ffff, 0xaca20010, 0x0e0005a2, 0x00a02021, 0x3242ffff, 0x0054102b,
6605         0x1440ffa9, 0x00000000, 0x24020002, 0x3c010800, 0x0a0001e6, 0xa0221b98,
6606         0x8ec2083c, 0x24420001, 0x0a0001e6, 0xaec2083c, 0x0e0004c0, 0x00000000,
6607         0x8fbf002c, 0x8fb60028, 0x8fb50024, 0x8fb40020, 0x8fb3001c, 0x8fb20018,
6608         0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0030, 0x27bdffd0, 0xafbf0028,
6609         0xafb30024, 0xafb20020, 0xafb1001c, 0xafb00018, 0x8f725c9c, 0x3c0200ff,
6610         0x3442fff8, 0x3c070800, 0x24e71bb4, 0x02428824, 0x9623000e, 0x8ce20000,
6611         0x00431021, 0xace20000, 0x8e220010, 0x30420020, 0x14400011, 0x00809821,
6612         0x0e00063b, 0x02202021, 0x3c02c000, 0x02421825, 0xaf635c9c, 0x8f625c90,
6613         0x30420002, 0x1040011e, 0x00000000, 0xaf635c9c, 0x8f625c90, 0x30420002,
6614         0x10400119, 0x00000000, 0x0a00020d, 0x00000000, 0x8e240008, 0x8e230014,
6615         0x00041402, 0x000231c0, 0x00031502, 0x304201ff, 0x2442ffff, 0x3042007f,
6616         0x00031942, 0x30637800, 0x00021100, 0x24424000, 0x00624821, 0x9522000a,
6617         0x3084ffff, 0x30420008, 0x104000b0, 0x000429c0, 0x3c020800, 0x8c422400,
6618         0x14400024, 0x24c50008, 0x94c20014, 0x3c010800, 0xa42223d0, 0x8cc40010,
6619         0x00041402, 0x3c010800, 0xa42223d2, 0x3c010800, 0xa42423d4, 0x94c2000e,
6620         0x3083ffff, 0x00431023, 0x3c010800, 0xac222408, 0x94c2001a, 0x3c010800,
6621         0xac262400, 0x3c010800, 0xac322404, 0x3c010800, 0xac2223fc, 0x3c02c000,
6622         0x02421825, 0xaf635c9c, 0x8f625c90, 0x30420002, 0x104000e5, 0x00000000,
6623         0xaf635c9c, 0x8f625c90, 0x30420002, 0x104000e0, 0x00000000, 0x0a000246,
6624         0x00000000, 0x94c2000e, 0x3c030800, 0x946323d4, 0x00434023, 0x3103ffff,
6625         0x2c620008, 0x1040001c, 0x00000000, 0x94c20014, 0x24420028, 0x00a22821,
6626         0x00031042, 0x1840000b, 0x00002021, 0x24e60848, 0x00403821, 0x94a30000,
6627         0x8cc20000, 0x24840001, 0x00431021, 0xacc20000, 0x0087102a, 0x1440fff9,
6628         0x24a50002, 0x31020001, 0x1040001f, 0x3c024000, 0x3c040800, 0x248423fc,
6629         0xa0a00001, 0x94a30000, 0x8c820000, 0x00431021, 0x0a000285, 0xac820000,
6630         0x8f626800, 0x3c030010, 0x00431024, 0x10400009, 0x00000000, 0x94c2001a,
6631         0x3c030800, 0x8c6323fc, 0x00431021, 0x3c010800, 0xac2223fc, 0x0a000286,
6632         0x3c024000, 0x94c2001a, 0x94c4001c, 0x3c030800, 0x8c6323fc, 0x00441023,
6633         0x00621821, 0x3c010800, 0xac2323fc, 0x3c024000, 0x02421825, 0xaf635c9c,
6634         0x8f625c90, 0x30420002, 0x1440fffc, 0x00000000, 0x9522000a, 0x30420010,
6635         0x1040009b, 0x00000000, 0x3c030800, 0x946323d4, 0x3c070800, 0x24e72400,
6636         0x8ce40000, 0x8f626800, 0x24630030, 0x00832821, 0x3c030010, 0x00431024,
6637         0x1440000a, 0x00000000, 0x94a20004, 0x3c040800, 0x8c842408, 0x3c030800,
6638         0x8c6323fc, 0x00441023, 0x00621821, 0x3c010800, 0xac2323fc, 0x3c040800,
6639         0x8c8423fc, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402, 0x00822021,
6640         0x00041027, 0xa4a20006, 0x3c030800, 0x8c632404, 0x3c0200ff, 0x3442fff8,
6641         0x00628824, 0x96220008, 0x24050001, 0x24034000, 0x000231c0, 0x00801021,
6642         0xa4c2001a, 0xa4c0001c, 0xace00000, 0x3c010800, 0xac251b60, 0xaf635cb8,
6643         0x8f625cb0, 0x30420002, 0x10400003, 0x00000000, 0x3c010800, 0xac201b60,
6644         0x8e220008, 0xaf625cb8, 0x8f625cb0, 0x30420002, 0x10400003, 0x00000000,
6645         0x3c010800, 0xac201b60, 0x3c020800, 0x8c421b60, 0x1040ffec, 0x00000000,
6646         0x3c040800, 0x0e00063b, 0x8c842404, 0x0a00032a, 0x00000000, 0x3c030800,
6647         0x90631b98, 0x24020002, 0x14620003, 0x3c034b65, 0x0a0002e1, 0x00008021,
6648         0x8e22001c, 0x34637654, 0x10430002, 0x24100002, 0x24100001, 0x00c02021,
6649         0x0e000350, 0x02003021, 0x24020003, 0x3c010800, 0xa0221b98, 0x24020002,
6650         0x1202000a, 0x24020001, 0x3c030800, 0x8c6323f0, 0x10620006, 0x00000000,
6651         0x3c020800, 0x944223d8, 0x00021400, 0x0a00031f, 0xae220014, 0x3c040800,
6652         0x248423da, 0x94820000, 0x00021400, 0xae220014, 0x3c020800, 0x8c421bbc,
6653         0x3c03c000, 0x3c010800, 0xa0201b98, 0x00431025, 0xaf625c5c, 0x8f625c50,
6654         0x30420002, 0x10400009, 0x00000000, 0x2484f7e2, 0x8c820000, 0x00431025,
6655         0xaf625c5c, 0x8f625c50, 0x30420002, 0x1440fffa, 0x00000000, 0x3c020800,
6656         0x24421b84, 0x8c430000, 0x24630001, 0xac430000, 0x8f630c14, 0x3063000f,
6657         0x2c620002, 0x1440000c, 0x3c024000, 0x8f630c14, 0x3c020800, 0x8c421b40,
6658         0x3063000f, 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002, 0x1040fff7,
6659         0x00000000, 0x3c024000, 0x02421825, 0xaf635c9c, 0x8f625c90, 0x30420002,
6660         0x1440fffc, 0x00000000, 0x12600003, 0x00000000, 0x0e0004c0, 0x00000000,
6661         0x8fbf0028, 0x8fb30024, 0x8fb20020, 0x8fb1001c, 0x8fb00018, 0x03e00008,
6662         0x27bd0030, 0x8f634450, 0x3c040800, 0x24841b88, 0x8c820000, 0x00031c02,
6663         0x0043102b, 0x14400007, 0x3c038000, 0x8c840004, 0x8f624450, 0x00021c02,
6664         0x0083102b, 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024,
6665         0x1440fffd, 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3c024000,
6666         0x00822025, 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00000000,
6667         0x03e00008, 0x00000000, 0x27bdffe0, 0x00805821, 0x14c00011, 0x256e0008,
6668         0x3c020800, 0x8c4223f4, 0x10400007, 0x24020016, 0x3c010800, 0xa42223d2,
6669         0x2402002a, 0x3c010800, 0x0a000364, 0xa42223d4, 0x8d670010, 0x00071402,
6670         0x3c010800, 0xa42223d2, 0x3c010800, 0xa42723d4, 0x3c040800, 0x948423d4,
6671         0x3c030800, 0x946323d2, 0x95cf0006, 0x3c020800, 0x944223d0, 0x00832023,
6672         0x01e2c023, 0x3065ffff, 0x24a20028, 0x01c24821, 0x3082ffff, 0x14c0001a,
6673         0x01226021, 0x9582000c, 0x3042003f, 0x3c010800, 0xa42223d6, 0x95820004,
6674         0x95830006, 0x3c010800, 0xac2023e4, 0x3c010800, 0xac2023e8, 0x00021400,
6675         0x00431025, 0x3c010800, 0xac221bc0, 0x95220004, 0x3c010800, 0xa4221bc4,
6676         0x95230002, 0x01e51023, 0x0043102a, 0x10400010, 0x24020001, 0x3c010800,
6677         0x0a000398, 0xac2223f8, 0x3c030800, 0x8c6323e8, 0x3c020800, 0x94421bc4,
6678         0x00431021, 0xa5220004, 0x3c020800, 0x94421bc0, 0xa5820004, 0x3c020800,
6679         0x8c421bc0, 0xa5820006, 0x3c020800, 0x8c4223f0, 0x3c0d0800, 0x8dad23e4,
6680         0x3c0a0800, 0x144000e5, 0x8d4a23e8, 0x3c020800, 0x94421bc4, 0x004a1821,
6681         0x3063ffff, 0x0062182b, 0x24020002, 0x10c2000d, 0x01435023, 0x3c020800,
6682         0x944223d6, 0x30420009, 0x10400008, 0x00000000, 0x9582000c, 0x3042fff6,
6683         0xa582000c, 0x3c020800, 0x944223d6, 0x30420009, 0x01a26823, 0x3c020800,
6684         0x8c4223f8, 0x1040004a, 0x01203821, 0x3c020800, 0x944223d2, 0x00004021,
6685         0xa520000a, 0x01e21023, 0xa5220002, 0x3082ffff, 0x00021042, 0x18400008,
6686         0x00003021, 0x00401821, 0x94e20000, 0x25080001, 0x00c23021, 0x0103102a,
6687         0x1440fffb, 0x24e70002, 0x00061c02, 0x30c2ffff, 0x00623021, 0x00061402,
6688         0x00c23021, 0x00c02821, 0x00061027, 0xa522000a, 0x00003021, 0x2527000c,
6689         0x00004021, 0x94e20000, 0x25080001, 0x00c23021, 0x2d020004, 0x1440fffb,
6690         0x24e70002, 0x95220002, 0x00004021, 0x91230009, 0x00442023, 0x01803821,
6691         0x3082ffff, 0xa4e00010, 0x00621821, 0x00021042, 0x18400010, 0x00c33021,
6692         0x00404821, 0x94e20000, 0x24e70002, 0x00c23021, 0x30e2007f, 0x14400006,
6693         0x25080001, 0x8d630000, 0x3c02007f, 0x3442ff80, 0x00625824, 0x25670008,
6694         0x0109102a, 0x1440fff3, 0x00000000, 0x30820001, 0x10400005, 0x00061c02,
6695         0xa0e00001, 0x94e20000, 0x00c23021, 0x00061c02, 0x30c2ffff, 0x00623021,
6696         0x00061402, 0x00c23021, 0x0a00047d, 0x30c6ffff, 0x24020002, 0x14c20081,
6697         0x00000000, 0x3c020800, 0x8c42240c, 0x14400007, 0x00000000, 0x3c020800,
6698         0x944223d2, 0x95230002, 0x01e21023, 0x10620077, 0x00000000, 0x3c020800,
6699         0x944223d2, 0x01e21023, 0xa5220002, 0x3c020800, 0x8c42240c, 0x1040001a,
6700         0x31e3ffff, 0x8dc70010, 0x3c020800, 0x94421b96, 0x00e04021, 0x00072c02,
6701         0x00aa2021, 0x00431023, 0x00823823, 0x00072402, 0x30e2ffff, 0x00823821,
6702         0x00071027, 0xa522000a, 0x3102ffff, 0x3c040800, 0x948423d4, 0x00453023,
6703         0x00e02821, 0x00641823, 0x006d1821, 0x00c33021, 0x00061c02, 0x30c2ffff,
6704         0x0a00047d, 0x00623021, 0x01203821, 0x00004021, 0x3082ffff, 0x00021042,
6705         0x18400008, 0x00003021, 0x00401821, 0x94e20000, 0x25080001, 0x00c23021,
6706         0x0103102a, 0x1440fffb, 0x24e70002, 0x00061c02, 0x30c2ffff, 0x00623021,
6707         0x00061402, 0x00c23021, 0x00c02821, 0x00061027, 0xa522000a, 0x00003021,
6708         0x2527000c, 0x00004021, 0x94e20000, 0x25080001, 0x00c23021, 0x2d020004,
6709         0x1440fffb, 0x24e70002, 0x95220002, 0x00004021, 0x91230009, 0x00442023,
6710         0x01803821, 0x3082ffff, 0xa4e00010, 0x3c040800, 0x948423d4, 0x00621821,
6711         0x00c33021, 0x00061c02, 0x30c2ffff, 0x00623021, 0x00061c02, 0x3c020800,
6712         0x944223d0, 0x00c34821, 0x00441023, 0x00021fc2, 0x00431021, 0x00021043,
6713         0x18400010, 0x00003021, 0x00402021, 0x94e20000, 0x24e70002, 0x00c23021,
6714         0x30e2007f, 0x14400006, 0x25080001, 0x8d630000, 0x3c02007f, 0x3442ff80,
6715         0x00625824, 0x25670008, 0x0104102a, 0x1440fff3, 0x00000000, 0x3c020800,
6716         0x944223ec, 0x00c23021, 0x3122ffff, 0x00c23021, 0x00061c02, 0x30c2ffff,
6717         0x00623021, 0x00061402, 0x00c23021, 0x00c04021, 0x00061027, 0xa5820010,
6718         0xadc00014, 0x0a00049d, 0xadc00000, 0x8dc70010, 0x00e04021, 0x11400007,
6719         0x00072c02, 0x00aa3021, 0x00061402, 0x30c3ffff, 0x00433021, 0x00061402,
6720         0x00c22821, 0x00051027, 0xa522000a, 0x3c030800, 0x946323d4, 0x3102ffff,
6721         0x01e21021, 0x00433023, 0x00cd3021, 0x00061c02, 0x30c2ffff, 0x00623021,
6722         0x00061402, 0x00c23021, 0x00c04021, 0x00061027, 0xa5820010, 0x3102ffff,
6723         0x00051c00, 0x00431025, 0xadc20010, 0x3c020800, 0x8c4223f4, 0x10400005,
6724         0x2de205eb, 0x14400002, 0x25e2fff2, 0x34028870, 0xa5c20034, 0x3c030800,
6725         0x246323e8, 0x8c620000, 0x24420001, 0xac620000, 0x3c040800, 0x8c8423e4,
6726         0x3c020800, 0x8c421bc0, 0x3303ffff, 0x00832021, 0x00431821, 0x0062102b,
6727         0x3c010800, 0xac2423e4, 0x10400003, 0x2482ffff, 0x3c010800, 0xac2223e4,
6728         0x3c010800, 0xac231bc0, 0x03e00008, 0x27bd0020, 0x27bdffb8, 0x3c050800,
6729         0x24a51b96, 0xafbf0044, 0xafbe0040, 0xafb7003c, 0xafb60038, 0xafb50034,
6730         0xafb40030, 0xafb3002c, 0xafb20028, 0xafb10024, 0xafb00020, 0x94a90000,
6731         0x3c020800, 0x944223d0, 0x3c030800, 0x8c631bb0, 0x3c040800, 0x8c841bac,
6732         0x01221023, 0x0064182a, 0xa7a9001e, 0x106000be, 0xa7a20016, 0x24be0022,
6733         0x97b6001e, 0x24b3001a, 0x24b70016, 0x8fc20000, 0x14400008, 0x00000000,
6734         0x8fc2fff8, 0x97a30016, 0x8fc4fff4, 0x00431021, 0x0082202a, 0x148000b0,
6735         0x00000000, 0x97d50818, 0x32a2ffff, 0x104000a3, 0x00009021, 0x0040a021,
6736         0x00008821, 0x0e000625, 0x00000000, 0x00403021, 0x14c00007, 0x00000000,
6737         0x3c020800, 0x8c4223dc, 0x24420001, 0x3c010800, 0x0a000596, 0xac2223dc,
6738         0x3c100800, 0x02118021, 0x8e101bc8, 0x9608000a, 0x31020040, 0x10400005,
6739         0x2407180c, 0x8e02000c, 0x2407188c, 0x00021400, 0xacc20018, 0x31020080,
6740         0x54400001, 0x34e70010, 0x3c020800, 0x00511021, 0x8c421bd0, 0x3c030800,
6741         0x00711821, 0x8c631bd4, 0x00021500, 0x00031c00, 0x00431025, 0xacc20014,
6742         0x96040008, 0x3242ffff, 0x00821021, 0x0282102a, 0x14400002, 0x02b22823,
6743         0x00802821, 0x8e020000, 0x02459021, 0xacc20000, 0x8e020004, 0x00c02021,
6744         0x26310010, 0xac820004, 0x30e2ffff, 0xac800008, 0xa485000e, 0xac820010,
6745         0x24020305, 0x0e0005a2, 0xa482000c, 0x3242ffff, 0x0054102b, 0x1440ffc5,
6746         0x3242ffff, 0x0a00058e, 0x00000000, 0x8e620000, 0x8e63fffc, 0x0043102a,
6747         0x10400067, 0x00000000, 0x8e62fff0, 0x00028900, 0x3c100800, 0x02118021,
6748         0x0e000625, 0x8e101bc8, 0x00403021, 0x14c00005, 0x00000000, 0x8e62082c,
6749         0x24420001, 0x0a000596, 0xae62082c, 0x9608000a, 0x31020040, 0x10400005,
6750         0x2407180c, 0x8e02000c, 0x2407188c, 0x00021400, 0xacc20018, 0x3c020800,
6751         0x00511021, 0x8c421bd0, 0x3c030800, 0x00711821, 0x8c631bd4, 0x00021500,
6752         0x00031c00, 0x00431025, 0xacc20014, 0x8e63fff4, 0x96020008, 0x00432023,
6753         0x3242ffff, 0x3083ffff, 0x00431021, 0x02c2102a, 0x10400003, 0x00802821,
6754         0x97a9001e, 0x01322823, 0x8e620000, 0x30a4ffff, 0x00441021, 0xae620000,
6755         0xa4c5000e, 0x8e020000, 0xacc20000, 0x8e020004, 0x8e63fff4, 0x00431021,
6756         0xacc20004, 0x8e63fff4, 0x96020008, 0x00641821, 0x0062102a, 0x14400006,
6757         0x02459021, 0x8e62fff0, 0xae60fff4, 0x24420001, 0x0a000571, 0xae62fff0,
6758         0xae63fff4, 0xacc00008, 0x3242ffff, 0x10560003, 0x31020004, 0x10400006,
6759         0x24020305, 0x31020080, 0x54400001, 0x34e70010, 0x34e70020, 0x24020905,
6760         0xa4c2000c, 0x8ee30000, 0x8ee20004, 0x14620007, 0x3c02b49a, 0x8ee20860,
6761         0x54400001, 0x34e70400, 0x3c024b65, 0x0a000588, 0x34427654, 0x344289ab,
6762         0xacc2001c, 0x30e2ffff, 0xacc20010, 0x0e0005a2, 0x00c02021, 0x3242ffff,
6763         0x0056102b, 0x1440ff9b, 0x00000000, 0x8e620000, 0x8e63fffc, 0x0043102a,
6764         0x1440ff48, 0x00000000, 0x8fbf0044, 0x8fbe0040, 0x8fb7003c, 0x8fb60038,
6765         0x8fb50034, 0x8fb40030, 0x8fb3002c, 0x8fb20028, 0x8fb10024, 0x8fb00020,
6766         0x03e00008, 0x27bd0048, 0x27bdffe8, 0xafbf0014, 0xafb00010, 0x8f624450,
6767         0x8f634410, 0x0a0005b1, 0x00808021, 0x8f626820, 0x30422000, 0x10400003,
6768         0x00000000, 0x0e0001f0, 0x00002021, 0x8f624450, 0x8f634410, 0x3042ffff,
6769         0x0043102b, 0x1440fff5, 0x00000000, 0x8f630c14, 0x3063000f, 0x2c620002,
6770         0x1440000b, 0x00000000, 0x8f630c14, 0x3c020800, 0x8c421b40, 0x3063000f,
6771         0x24420001, 0x3c010800, 0xac221b40, 0x2c620002, 0x1040fff7, 0x00000000,
6772         0xaf705c18, 0x8f625c10, 0x30420002, 0x10400009, 0x00000000, 0x8f626820,
6773         0x30422000, 0x1040fff8, 0x00000000, 0x0e0001f0, 0x00002021, 0x0a0005c4,
6774         0x00000000, 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x00000000,
6775         0x00000000, 0x00000000, 0x27bdffe8, 0x3c1bc000, 0xafbf0014, 0xafb00010,
6776         0xaf60680c, 0x8f626804, 0x34420082, 0xaf626804, 0x8f634000, 0x24020b50,
6777         0x3c010800, 0xac221b54, 0x24020b78, 0x3c010800, 0xac221b64, 0x34630002,
6778         0xaf634000, 0x0e000605, 0x00808021, 0x3c010800, 0xa0221b68, 0x304200ff,
6779         0x24030002, 0x14430005, 0x00000000, 0x3c020800, 0x8c421b54, 0x0a0005f8,
6780         0xac5000c0, 0x3c020800, 0x8c421b54, 0xac5000bc, 0x8f624434, 0x8f634438,
6781         0x8f644410, 0x3c010800, 0xac221b5c, 0x3c010800, 0xac231b6c, 0x3c010800,
6782         0xac241b58, 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x3c040800,
6783         0x8c870000, 0x3c03aa55, 0x3463aa55, 0x3c06c003, 0xac830000, 0x8cc20000,
6784         0x14430007, 0x24050002, 0x3c0355aa, 0x346355aa, 0xac830000, 0x8cc20000,
6785         0x50430001, 0x24050001, 0x3c020800, 0xac470000, 0x03e00008, 0x00a01021,
6786         0x27bdfff8, 0x18800009, 0x00002821, 0x8f63680c, 0x8f62680c, 0x1043fffe,
6787         0x00000000, 0x24a50001, 0x00a4102a, 0x1440fff9, 0x00000000, 0x03e00008,
6788         0x27bd0008, 0x8f634450, 0x3c020800, 0x8c421b5c, 0x00031c02, 0x0043102b,
6789         0x14400008, 0x3c038000, 0x3c040800, 0x8c841b6c, 0x8f624450, 0x00021c02,
6790         0x0083102b, 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024,
6791         0x1440fffd, 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3082ffff,
6792         0x2442e000, 0x2c422001, 0x14400003, 0x3c024000, 0x0a000648, 0x2402ffff,
6793         0x00822025, 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00001021,
6794         0x03e00008, 0x00000000, 0x8f624450, 0x3c030800, 0x8c631b58, 0x0a000651,
6795         0x3042ffff, 0x8f624450, 0x3042ffff, 0x0043102b, 0x1440fffc, 0x00000000,
6796         0x03e00008, 0x00000000, 0x27bdffe0, 0x00802821, 0x3c040800, 0x24841af0,
6797         0x00003021, 0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014,
6798         0x0a000660, 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x00000000,
6799         0x00000000, 0x00000000, 0x3c020800, 0x34423000, 0x3c030800, 0x34633000,
6800         0x3c040800, 0x348437ff, 0x3c010800, 0xac221b74, 0x24020040, 0x3c010800,
6801         0xac221b78, 0x3c010800, 0xac201b70, 0xac600000, 0x24630004, 0x0083102b,
6802         0x5040fffd, 0xac600000, 0x03e00008, 0x00000000, 0x00804821, 0x8faa0010,
6803         0x3c020800, 0x8c421b70, 0x3c040800, 0x8c841b78, 0x8fab0014, 0x24430001,
6804         0x0044102b, 0x3c010800, 0xac231b70, 0x14400003, 0x00004021, 0x3c010800,
6805         0xac201b70, 0x3c020800, 0x8c421b70, 0x3c030800, 0x8c631b74, 0x91240000,
6806         0x00021140, 0x00431021, 0x00481021, 0x25080001, 0xa0440000, 0x29020008,
6807         0x1440fff4, 0x25290001, 0x3c020800, 0x8c421b70, 0x3c030800, 0x8c631b74,
6808         0x8f64680c, 0x00021140, 0x00431021, 0xac440008, 0xac45000c, 0xac460010,
6809         0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c, 0x00000000, 0x00000000,
6810 };
6811
6812 static const u32 tg3TsoFwRodata[] = {
6813         0x4d61696e, 0x43707542, 0x00000000, 0x4d61696e, 0x43707541, 0x00000000,
6814         0x00000000, 0x00000000, 0x73746b6f, 0x66666c64, 0x496e0000, 0x73746b6f,
6815         0x66662a2a, 0x00000000, 0x53774576, 0x656e7430, 0x00000000, 0x00000000,
6816         0x00000000, 0x00000000, 0x66617461, 0x6c457272, 0x00000000, 0x00000000,
6817         0x00000000,
6818 };
6819
6820 static const u32 tg3TsoFwData[] = {
6821         0x00000000, 0x73746b6f, 0x66666c64, 0x5f76312e, 0x362e3000, 0x00000000,
6822         0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
6823         0x00000000,
6824 };
6825
6826 /* 5705 needs a special version of the TSO firmware.  */
6827 #define TG3_TSO5_FW_RELEASE_MAJOR       0x1
6828 #define TG3_TSO5_FW_RELASE_MINOR        0x2
6829 #define TG3_TSO5_FW_RELEASE_FIX         0x0
6830 #define TG3_TSO5_FW_START_ADDR          0x00010000
6831 #define TG3_TSO5_FW_TEXT_ADDR           0x00010000
6832 #define TG3_TSO5_FW_TEXT_LEN            0xe90
6833 #define TG3_TSO5_FW_RODATA_ADDR         0x00010e90
6834 #define TG3_TSO5_FW_RODATA_LEN          0x50
6835 #define TG3_TSO5_FW_DATA_ADDR           0x00010f00
6836 #define TG3_TSO5_FW_DATA_LEN            0x20
6837 #define TG3_TSO5_FW_SBSS_ADDR           0x00010f20
6838 #define TG3_TSO5_FW_SBSS_LEN            0x28
6839 #define TG3_TSO5_FW_BSS_ADDR            0x00010f50
6840 #define TG3_TSO5_FW_BSS_LEN             0x88
6841
6842 static const u32 tg3Tso5FwText[(TG3_TSO5_FW_TEXT_LEN / 4) + 1] = {
6843         0x0c004003, 0x00000000, 0x00010f04, 0x00000000, 0x10000003, 0x00000000,
6844         0x0000000d, 0x0000000d, 0x3c1d0001, 0x37bde000, 0x03a0f021, 0x3c100001,
6845         0x26100000, 0x0c004010, 0x00000000, 0x0000000d, 0x27bdffe0, 0x3c04fefe,
6846         0xafbf0018, 0x0c0042e8, 0x34840002, 0x0c004364, 0x00000000, 0x3c030001,
6847         0x90630f34, 0x24020002, 0x3c040001, 0x24840e9c, 0x14620003, 0x24050001,
6848         0x3c040001, 0x24840e90, 0x24060002, 0x00003821, 0xafa00010, 0x0c004378,
6849         0xafa00014, 0x0c00402c, 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020,
6850         0x00000000, 0x00000000, 0x27bdffe0, 0xafbf001c, 0xafb20018, 0xafb10014,
6851         0x0c0042d4, 0xafb00010, 0x3c128000, 0x24110001, 0x8f706810, 0x32020400,
6852         0x10400007, 0x00000000, 0x8f641008, 0x00921024, 0x14400003, 0x00000000,
6853         0x0c004064, 0x00000000, 0x3c020001, 0x90420f56, 0x10510003, 0x32020200,
6854         0x1040fff1, 0x00000000, 0x0c0041b4, 0x00000000, 0x08004034, 0x00000000,
6855         0x8fbf001c, 0x8fb20018, 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0020,
6856         0x27bdffe0, 0x3c040001, 0x24840eb0, 0x00002821, 0x00003021, 0x00003821,
6857         0xafbf0018, 0xafa00010, 0x0c004378, 0xafa00014, 0x0000d021, 0x24020130,
6858         0xaf625000, 0x3c010001, 0xa4200f50, 0x3c010001, 0xa0200f57, 0x8fbf0018,
6859         0x03e00008, 0x27bd0020, 0x00000000, 0x00000000, 0x3c030001, 0x24630f60,
6860         0x90620000, 0x27bdfff0, 0x14400003, 0x0080c021, 0x08004073, 0x00004821,
6861         0x3c022000, 0x03021024, 0x10400003, 0x24090002, 0x08004073, 0xa0600000,
6862         0x24090001, 0x00181040, 0x30431f80, 0x346f8008, 0x1520004b, 0x25eb0028,
6863         0x3c040001, 0x00832021, 0x8c848010, 0x3c050001, 0x24a50f7a, 0x00041402,
6864         0xa0a20000, 0x3c010001, 0xa0240f7b, 0x3c020001, 0x00431021, 0x94428014,
6865         0x3c010001, 0xa0220f7c, 0x3c0c0001, 0x01836021, 0x8d8c8018, 0x304200ff,
6866         0x24420008, 0x000220c3, 0x24020001, 0x3c010001, 0xa0220f60, 0x0124102b,
6867         0x1040000c, 0x00003821, 0x24a6000e, 0x01602821, 0x8ca20000, 0x8ca30004,
6868         0x24a50008, 0x24e70001, 0xacc20000, 0xacc30004, 0x00e4102b, 0x1440fff8,
6869         0x24c60008, 0x00003821, 0x3c080001, 0x25080f7b, 0x91060000, 0x3c020001,
6870         0x90420f7c, 0x2503000d, 0x00c32821, 0x00461023, 0x00021fc2, 0x00431021,
6871         0x00021043, 0x1840000c, 0x00002021, 0x91020001, 0x00461023, 0x00021fc2,
6872         0x00431021, 0x00021843, 0x94a20000, 0x24e70001, 0x00822021, 0x00e3102a,
6873         0x1440fffb, 0x24a50002, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402,
6874         0x00822021, 0x3c02ffff, 0x01821024, 0x3083ffff, 0x00431025, 0x3c010001,
6875         0x080040fa, 0xac220f80, 0x3c050001, 0x24a50f7c, 0x90a20000, 0x3c0c0001,
6876         0x01836021, 0x8d8c8018, 0x000220c2, 0x1080000e, 0x00003821, 0x01603021,
6877         0x24a5000c, 0x8ca20000, 0x8ca30004, 0x24a50008, 0x24e70001, 0xacc20000,
6878         0xacc30004, 0x00e4102b, 0x1440fff8, 0x24c60008, 0x3c050001, 0x24a50f7c,
6879         0x90a20000, 0x30430007, 0x24020004, 0x10620011, 0x28620005, 0x10400005,
6880         0x24020002, 0x10620008, 0x000710c0, 0x080040fa, 0x00000000, 0x24020006,
6881         0x1062000e, 0x000710c0, 0x080040fa, 0x00000000, 0x00a21821, 0x9463000c,
6882         0x004b1021, 0x080040fa, 0xa4430000, 0x000710c0, 0x00a21821, 0x8c63000c,
6883         0x004b1021, 0x080040fa, 0xac430000, 0x00a21821, 0x8c63000c, 0x004b2021,
6884         0x00a21021, 0xac830000, 0x94420010, 0xa4820004, 0x95e70006, 0x3c020001,
6885         0x90420f7c, 0x3c030001, 0x90630f7a, 0x00e2c823, 0x3c020001, 0x90420f7b,
6886         0x24630028, 0x01e34021, 0x24420028, 0x15200012, 0x01e23021, 0x94c2000c,
6887         0x3c010001, 0xa4220f78, 0x94c20004, 0x94c30006, 0x3c010001, 0xa4200f76,
6888         0x3c010001, 0xa4200f72, 0x00021400, 0x00431025, 0x3c010001, 0xac220f6c,
6889         0x95020004, 0x3c010001, 0x08004124, 0xa4220f70, 0x3c020001, 0x94420f70,
6890         0x3c030001, 0x94630f72, 0x00431021, 0xa5020004, 0x3c020001, 0x94420f6c,
6891         0xa4c20004, 0x3c020001, 0x8c420f6c, 0xa4c20006, 0x3c040001, 0x94840f72,
6892         0x3c020001, 0x94420f70, 0x3c0a0001, 0x954a0f76, 0x00441821, 0x3063ffff,
6893         0x0062182a, 0x24020002, 0x1122000b, 0x00832023, 0x3c030001, 0x94630f78,
6894         0x30620009, 0x10400006, 0x3062fff6, 0xa4c2000c, 0x3c020001, 0x94420f78,
6895         0x30420009, 0x01425023, 0x24020001, 0x1122001b, 0x29220002, 0x50400005,
6896         0x24020002, 0x11200007, 0x31a2ffff, 0x08004197, 0x00000000, 0x1122001d,
6897         0x24020016, 0x08004197, 0x31a2ffff, 0x3c0e0001, 0x95ce0f80, 0x10800005,
6898         0x01806821, 0x01c42021, 0x00041c02, 0x3082ffff, 0x00627021, 0x000e1027,
6899         0xa502000a, 0x3c030001, 0x90630f7b, 0x31a2ffff, 0x00e21021, 0x0800418d,
6900         0x00432023, 0x3c020001, 0x94420f80, 0x00442021, 0x00041c02, 0x3082ffff,
6901         0x00622021, 0x00807021, 0x00041027, 0x08004185, 0xa502000a, 0x3c050001,
6902         0x24a50f7a, 0x90a30000, 0x14620002, 0x24e2fff2, 0xa5e20034, 0x90a20000,
6903         0x00e21023, 0xa5020002, 0x3c030001, 0x94630f80, 0x3c020001, 0x94420f5a,
6904         0x30e5ffff, 0x00641821, 0x00451023, 0x00622023, 0x00041c02, 0x3082ffff,
6905         0x00622021, 0x00041027, 0xa502000a, 0x3c030001, 0x90630f7c, 0x24620001,
6906         0x14a20005, 0x00807021, 0x01631021, 0x90420000, 0x08004185, 0x00026200,
6907         0x24620002, 0x14a20003, 0x306200fe, 0x004b1021, 0x944c0000, 0x3c020001,
6908         0x94420f82, 0x3183ffff, 0x3c040001, 0x90840f7b, 0x00431021, 0x00e21021,
6909         0x00442023, 0x008a2021, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402,
6910         0x00822021, 0x00806821, 0x00041027, 0xa4c20010, 0x31a2ffff, 0x000e1c00,
6911         0x00431025, 0x3c040001, 0x24840f72, 0xade20010, 0x94820000, 0x3c050001,
6912         0x94a50f76, 0x3c030001, 0x8c630f6c, 0x24420001, 0x00b92821, 0xa4820000,
6913         0x3322ffff, 0x00622021, 0x0083182b, 0x3c010001, 0xa4250f76, 0x10600003,
6914         0x24a2ffff, 0x3c010001, 0xa4220f76, 0x3c024000, 0x03021025, 0x3c010001,
6915         0xac240f6c, 0xaf621008, 0x03e00008, 0x27bd0010, 0x3c030001, 0x90630f56,
6916         0x27bdffe8, 0x24020001, 0xafbf0014, 0x10620026, 0xafb00010, 0x8f620cf4,
6917         0x2442ffff, 0x3042007f, 0x00021100, 0x8c434000, 0x3c010001, 0xac230f64,
6918         0x8c434008, 0x24444000, 0x8c5c4004, 0x30620040, 0x14400002, 0x24020088,
6919         0x24020008, 0x3c010001, 0xa4220f68, 0x30620004, 0x10400005, 0x24020001,
6920         0x3c010001, 0xa0220f57, 0x080041d5, 0x00031402, 0x3c010001, 0xa0200f57,
6921         0x00031402, 0x3c010001, 0xa4220f54, 0x9483000c, 0x24020001, 0x3c010001,
6922         0xa4200f50, 0x3c010001, 0xa0220f56, 0x3c010001, 0xa4230f62, 0x24020001,
6923         0x1342001e, 0x00000000, 0x13400005, 0x24020003, 0x13420067, 0x00000000,
6924         0x080042cf, 0x00000000, 0x3c020001, 0x94420f62, 0x241a0001, 0x3c010001,
6925         0xa4200f5e, 0x3c010001, 0xa4200f52, 0x304407ff, 0x00021bc2, 0x00031823,
6926         0x3063003e, 0x34630036, 0x00021242, 0x3042003c, 0x00621821, 0x3c010001,
6927         0xa4240f58, 0x00832021, 0x24630030, 0x3c010001, 0xa4240f5a, 0x3c010001,
6928         0xa4230f5c, 0x3c060001, 0x24c60f52, 0x94c50000, 0x94c30002, 0x3c040001,
6929         0x94840f5a, 0x00651021, 0x0044102a, 0x10400013, 0x3c108000, 0x00a31021,
6930         0xa4c20000, 0x3c02a000, 0xaf620cf4, 0x3c010001, 0xa0200f56, 0x8f641008,
6931         0x00901024, 0x14400003, 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4,
6932         0x00501024, 0x104000b7, 0x00000000, 0x0800420f, 0x00000000, 0x3c030001,
6933         0x94630f50, 0x00851023, 0xa4c40000, 0x00621821, 0x3042ffff, 0x3c010001,
6934         0xa4230f50, 0xaf620ce8, 0x3c020001, 0x94420f68, 0x34420024, 0xaf620cec,
6935         0x94c30002, 0x3c020001, 0x94420f50, 0x14620012, 0x3c028000, 0x3c108000,
6936         0x3c02a000, 0xaf620cf4, 0x3c010001, 0xa0200f56, 0x8f641008, 0x00901024,
6937         0x14400003, 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4, 0x00501024,
6938         0x1440fff7, 0x00000000, 0x080042cf, 0x241a0003, 0xaf620cf4, 0x3c108000,
6939         0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064, 0x00000000,
6940         0x8f620cf4, 0x00501024, 0x1440fff7, 0x00000000, 0x080042cf, 0x241a0003,
6941         0x3c070001, 0x24e70f50, 0x94e20000, 0x03821021, 0xaf620ce0, 0x3c020001,
6942         0x8c420f64, 0xaf620ce4, 0x3c050001, 0x94a50f54, 0x94e30000, 0x3c040001,
6943         0x94840f58, 0x3c020001, 0x94420f5e, 0x00a32823, 0x00822023, 0x30a6ffff,
6944         0x3083ffff, 0x00c3102b, 0x14400043, 0x00000000, 0x3c020001, 0x94420f5c,
6945         0x00021400, 0x00621025, 0xaf620ce8, 0x94e20000, 0x3c030001, 0x94630f54,
6946         0x00441021, 0xa4e20000, 0x3042ffff, 0x14430021, 0x3c020008, 0x3c020001,
6947         0x90420f57, 0x10400006, 0x3c03000c, 0x3c020001, 0x94420f68, 0x34630624,
6948         0x0800427c, 0x0000d021, 0x3c020001, 0x94420f68, 0x3c030008, 0x34630624,
6949         0x00431025, 0xaf620cec, 0x3c108000, 0x3c02a000, 0xaf620cf4, 0x3c010001,
6950         0xa0200f56, 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064,
6951         0x00000000, 0x8f620cf4, 0x00501024, 0x10400015, 0x00000000, 0x08004283,
6952         0x00000000, 0x3c030001, 0x94630f68, 0x34420624, 0x3c108000, 0x00621825,
6953         0x3c028000, 0xaf630cec, 0xaf620cf4, 0x8f641008, 0x00901024, 0x14400003,
6954         0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4, 0x00501024, 0x1440fff7,
6955         0x00000000, 0x3c010001, 0x080042cf, 0xa4200f5e, 0x3c020001, 0x94420f5c,
6956         0x00021400, 0x00c21025, 0xaf620ce8, 0x3c020001, 0x90420f57, 0x10400009,
6957         0x3c03000c, 0x3c020001, 0x94420f68, 0x34630624, 0x0000d021, 0x00431025,
6958         0xaf620cec, 0x080042c1, 0x3c108000, 0x3c020001, 0x94420f68, 0x3c030008,
6959         0x34630604, 0x00431025, 0xaf620cec, 0x3c020001, 0x94420f5e, 0x00451021,
6960         0x3c010001, 0xa4220f5e, 0x3c108000, 0x3c02a000, 0xaf620cf4, 0x3c010001,
6961         0xa0200f56, 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064,
6962         0x00000000, 0x8f620cf4, 0x00501024, 0x1440fff7, 0x00000000, 0x8fbf0014,
6963         0x8fb00010, 0x03e00008, 0x27bd0018, 0x00000000, 0x27bdffe0, 0x3c040001,
6964         0x24840ec0, 0x00002821, 0x00003021, 0x00003821, 0xafbf0018, 0xafa00010,
6965         0x0c004378, 0xafa00014, 0x0000d021, 0x24020130, 0xaf625000, 0x3c010001,
6966         0xa4200f50, 0x3c010001, 0xa0200f57, 0x8fbf0018, 0x03e00008, 0x27bd0020,
6967         0x27bdffe8, 0x3c1bc000, 0xafbf0014, 0xafb00010, 0xaf60680c, 0x8f626804,
6968         0x34420082, 0xaf626804, 0x8f634000, 0x24020b50, 0x3c010001, 0xac220f20,
6969         0x24020b78, 0x3c010001, 0xac220f30, 0x34630002, 0xaf634000, 0x0c004315,
6970         0x00808021, 0x3c010001, 0xa0220f34, 0x304200ff, 0x24030002, 0x14430005,
6971         0x00000000, 0x3c020001, 0x8c420f20, 0x08004308, 0xac5000c0, 0x3c020001,
6972         0x8c420f20, 0xac5000bc, 0x8f624434, 0x8f634438, 0x8f644410, 0x3c010001,
6973         0xac220f28, 0x3c010001, 0xac230f38, 0x3c010001, 0xac240f24, 0x8fbf0014,
6974         0x8fb00010, 0x03e00008, 0x27bd0018, 0x03e00008, 0x24020001, 0x27bdfff8,
6975         0x18800009, 0x00002821, 0x8f63680c, 0x8f62680c, 0x1043fffe, 0x00000000,
6976         0x24a50001, 0x00a4102a, 0x1440fff9, 0x00000000, 0x03e00008, 0x27bd0008,
6977         0x8f634450, 0x3c020001, 0x8c420f28, 0x00031c02, 0x0043102b, 0x14400008,
6978         0x3c038000, 0x3c040001, 0x8c840f38, 0x8f624450, 0x00021c02, 0x0083102b,
6979         0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024, 0x1440fffd,
6980         0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3082ffff, 0x2442e000,
6981         0x2c422001, 0x14400003, 0x3c024000, 0x08004347, 0x2402ffff, 0x00822025,
6982         0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00001021, 0x03e00008,
6983         0x00000000, 0x8f624450, 0x3c030001, 0x8c630f24, 0x08004350, 0x3042ffff,
6984         0x8f624450, 0x3042ffff, 0x0043102b, 0x1440fffc, 0x00000000, 0x03e00008,
6985         0x00000000, 0x27bdffe0, 0x00802821, 0x3c040001, 0x24840ed0, 0x00003021,
6986         0x00003821, 0xafbf0018, 0xafa00010, 0x0c004378, 0xafa00014, 0x0800435f,
6987         0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x3c020001, 0x3442d600,
6988         0x3c030001, 0x3463d600, 0x3c040001, 0x3484ddff, 0x3c010001, 0xac220f40,
6989         0x24020040, 0x3c010001, 0xac220f44, 0x3c010001, 0xac200f3c, 0xac600000,
6990         0x24630004, 0x0083102b, 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000,
6991         0x00804821, 0x8faa0010, 0x3c020001, 0x8c420f3c, 0x3c040001, 0x8c840f44,
6992         0x8fab0014, 0x24430001, 0x0044102b, 0x3c010001, 0xac230f3c, 0x14400003,
6993         0x00004021, 0x3c010001, 0xac200f3c, 0x3c020001, 0x8c420f3c, 0x3c030001,
6994         0x8c630f40, 0x91240000, 0x00021140, 0x00431021, 0x00481021, 0x25080001,
6995         0xa0440000, 0x29020008, 0x1440fff4, 0x25290001, 0x3c020001, 0x8c420f3c,
6996         0x3c030001, 0x8c630f40, 0x8f64680c, 0x00021140, 0x00431021, 0xac440008,
6997         0xac45000c, 0xac460010, 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c,
6998         0x00000000, 0x00000000, 0x00000000,
6999 };
7000
7001 static const u32 tg3Tso5FwRodata[(TG3_TSO5_FW_RODATA_LEN / 4) + 1] = {
7002         0x4d61696e, 0x43707542, 0x00000000, 0x4d61696e, 0x43707541, 0x00000000,
7003         0x00000000, 0x00000000, 0x73746b6f, 0x66666c64, 0x00000000, 0x00000000,
7004         0x73746b6f, 0x66666c64, 0x00000000, 0x00000000, 0x66617461, 0x6c457272,
7005         0x00000000, 0x00000000, 0x00000000,
7006 };
7007
7008 static const u32 tg3Tso5FwData[(TG3_TSO5_FW_DATA_LEN / 4) + 1] = {
7009         0x00000000, 0x73746b6f, 0x66666c64, 0x5f76312e, 0x322e3000, 0x00000000,
7010         0x00000000, 0x00000000, 0x00000000,
7011 };
7012
7013 /* tp->lock is held. */
7014 static int tg3_load_tso_firmware(struct tg3 *tp)
7015 {
7016         struct fw_info info;
7017         unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
7018         int err, i;
7019
7020         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
7021                 return 0;
7022
7023         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
7024                 info.text_base = TG3_TSO5_FW_TEXT_ADDR;
7025                 info.text_len = TG3_TSO5_FW_TEXT_LEN;
7026                 info.text_data = &tg3Tso5FwText[0];
7027                 info.rodata_base = TG3_TSO5_FW_RODATA_ADDR;
7028                 info.rodata_len = TG3_TSO5_FW_RODATA_LEN;
7029                 info.rodata_data = &tg3Tso5FwRodata[0];
7030                 info.data_base = TG3_TSO5_FW_DATA_ADDR;
7031                 info.data_len = TG3_TSO5_FW_DATA_LEN;
7032                 info.data_data = &tg3Tso5FwData[0];
7033                 cpu_base = RX_CPU_BASE;
7034                 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
7035                 cpu_scratch_size = (info.text_len +
7036                                     info.rodata_len +
7037                                     info.data_len +
7038                                     TG3_TSO5_FW_SBSS_LEN +
7039                                     TG3_TSO5_FW_BSS_LEN);
7040         } else {
7041                 info.text_base = TG3_TSO_FW_TEXT_ADDR;
7042                 info.text_len = TG3_TSO_FW_TEXT_LEN;
7043                 info.text_data = &tg3TsoFwText[0];
7044                 info.rodata_base = TG3_TSO_FW_RODATA_ADDR;
7045                 info.rodata_len = TG3_TSO_FW_RODATA_LEN;
7046                 info.rodata_data = &tg3TsoFwRodata[0];
7047                 info.data_base = TG3_TSO_FW_DATA_ADDR;
7048                 info.data_len = TG3_TSO_FW_DATA_LEN;
7049                 info.data_data = &tg3TsoFwData[0];
7050                 cpu_base = TX_CPU_BASE;
7051                 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
7052                 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
7053         }
7054
7055         err = tg3_load_firmware_cpu(tp, cpu_base,
7056                                     cpu_scratch_base, cpu_scratch_size,
7057                                     &info);
7058         if (err)
7059                 return err;
7060
7061         /* Now startup the cpu. */
7062         tw32(cpu_base + CPU_STATE, 0xffffffff);
7063         tw32_f(cpu_base + CPU_PC,    info.text_base);
7064
7065         for (i = 0; i < 5; i++) {
7066                 if (tr32(cpu_base + CPU_PC) == info.text_base)
7067                         break;
7068                 tw32(cpu_base + CPU_STATE, 0xffffffff);
7069                 tw32(cpu_base + CPU_MODE,  CPU_MODE_HALT);
7070                 tw32_f(cpu_base + CPU_PC,    info.text_base);
7071                 udelay(1000);
7072         }
7073         if (i >= 5) {
7074                 printk(KERN_ERR PFX "tg3_load_tso_firmware fails for %s "
7075                        "to set CPU PC, is %08x should be %08x\n",
7076                        tp->dev->name, tr32(cpu_base + CPU_PC),
7077                        info.text_base);
7078                 return -ENODEV;
7079         }
7080         tw32(cpu_base + CPU_STATE, 0xffffffff);
7081         tw32_f(cpu_base + CPU_MODE,  0x00000000);
7082         return 0;
7083 }
7084
7085
7086 static int tg3_set_mac_addr(struct net_device *dev, void *p)
7087 {
7088         struct tg3 *tp = netdev_priv(dev);
7089         struct sockaddr *addr = p;
7090         int err = 0, skip_mac_1 = 0;
7091
7092         if (!is_valid_ether_addr(addr->sa_data))
7093                 return -EINVAL;
7094
7095         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
7096
7097         if (!netif_running(dev))
7098                 return 0;
7099
7100         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
7101                 u32 addr0_high, addr0_low, addr1_high, addr1_low;
7102
7103                 addr0_high = tr32(MAC_ADDR_0_HIGH);
7104                 addr0_low = tr32(MAC_ADDR_0_LOW);
7105                 addr1_high = tr32(MAC_ADDR_1_HIGH);
7106                 addr1_low = tr32(MAC_ADDR_1_LOW);
7107
7108                 /* Skip MAC addr 1 if ASF is using it. */
7109                 if ((addr0_high != addr1_high || addr0_low != addr1_low) &&
7110                     !(addr1_high == 0 && addr1_low == 0))
7111                         skip_mac_1 = 1;
7112         }
7113         spin_lock_bh(&tp->lock);
7114         __tg3_set_mac_addr(tp, skip_mac_1);
7115         spin_unlock_bh(&tp->lock);
7116
7117         return err;
7118 }
7119
7120 /* tp->lock is held. */
7121 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
7122                            dma_addr_t mapping, u32 maxlen_flags,
7123                            u32 nic_addr)
7124 {
7125         tg3_write_mem(tp,
7126                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
7127                       ((u64) mapping >> 32));
7128         tg3_write_mem(tp,
7129                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
7130                       ((u64) mapping & 0xffffffff));
7131         tg3_write_mem(tp,
7132                       (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
7133                        maxlen_flags);
7134
7135         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
7136                 tg3_write_mem(tp,
7137                               (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
7138                               nic_addr);
7139 }
7140
7141 static void __tg3_set_rx_mode(struct net_device *);
7142 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
7143 {
7144         tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
7145         tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
7146         tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
7147         tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
7148         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
7149                 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
7150                 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
7151         }
7152         tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
7153         tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
7154         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
7155                 u32 val = ec->stats_block_coalesce_usecs;
7156
7157                 if (!netif_carrier_ok(tp->dev))
7158                         val = 0;
7159
7160                 tw32(HOSTCC_STAT_COAL_TICKS, val);
7161         }
7162 }
7163
7164 /* tp->lock is held. */
7165 static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
7166 {
7167         u32 val, rdmac_mode;
7168         int i, err, limit;
7169
7170         tg3_disable_ints(tp);
7171
7172         tg3_stop_fw(tp);
7173
7174         tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
7175
7176         if (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) {
7177                 tg3_abort_hw(tp, 1);
7178         }
7179
7180         if (reset_phy &&
7181             !(tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB))
7182                 tg3_phy_reset(tp);
7183
7184         err = tg3_chip_reset(tp);
7185         if (err)
7186                 return err;
7187
7188         tg3_write_sig_legacy(tp, RESET_KIND_INIT);
7189
7190         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
7191                 val = tr32(TG3_CPMU_CTRL);
7192                 val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE);
7193                 tw32(TG3_CPMU_CTRL, val);
7194
7195                 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
7196                 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
7197                 val |= CPMU_LSPD_10MB_MACCLK_6_25;
7198                 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
7199
7200                 val = tr32(TG3_CPMU_LNK_AWARE_PWRMD);
7201                 val &= ~CPMU_LNK_AWARE_MACCLK_MASK;
7202                 val |= CPMU_LNK_AWARE_MACCLK_6_25;
7203                 tw32(TG3_CPMU_LNK_AWARE_PWRMD, val);
7204
7205                 val = tr32(TG3_CPMU_HST_ACC);
7206                 val &= ~CPMU_HST_ACC_MACCLK_MASK;
7207                 val |= CPMU_HST_ACC_MACCLK_6_25;
7208                 tw32(TG3_CPMU_HST_ACC, val);
7209         }
7210
7211         /* This works around an issue with Athlon chipsets on
7212          * B3 tigon3 silicon.  This bit has no effect on any
7213          * other revision.  But do not set this on PCI Express
7214          * chips and don't even touch the clocks if the CPMU is present.
7215          */
7216         if (!(tp->tg3_flags & TG3_FLAG_CPMU_PRESENT)) {
7217                 if (!(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
7218                         tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
7219                 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
7220         }
7221
7222         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
7223             (tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
7224                 val = tr32(TG3PCI_PCISTATE);
7225                 val |= PCISTATE_RETRY_SAME_DMA;
7226                 tw32(TG3PCI_PCISTATE, val);
7227         }
7228
7229         if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
7230                 /* Allow reads and writes to the
7231                  * APE register and memory space.
7232                  */
7233                 val = tr32(TG3PCI_PCISTATE);
7234                 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
7235                        PCISTATE_ALLOW_APE_SHMEM_WR;
7236                 tw32(TG3PCI_PCISTATE, val);
7237         }
7238
7239         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_BX) {
7240                 /* Enable some hw fixes.  */
7241                 val = tr32(TG3PCI_MSI_DATA);
7242                 val |= (1 << 26) | (1 << 28) | (1 << 29);
7243                 tw32(TG3PCI_MSI_DATA, val);
7244         }
7245
7246         /* Descriptor ring init may make accesses to the
7247          * NIC SRAM area to setup the TX descriptors, so we
7248          * can only do this after the hardware has been
7249          * successfully reset.
7250          */
7251         err = tg3_init_rings(tp);
7252         if (err)
7253                 return err;
7254
7255         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 &&
7256             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761) {
7257                 /* This value is determined during the probe time DMA
7258                  * engine test, tg3_test_dma.
7259                  */
7260                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
7261         }
7262
7263         tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
7264                           GRC_MODE_4X_NIC_SEND_RINGS |
7265                           GRC_MODE_NO_TX_PHDR_CSUM |
7266                           GRC_MODE_NO_RX_PHDR_CSUM);
7267         tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
7268
7269         /* Pseudo-header checksum is done by hardware logic and not
7270          * the offload processers, so make the chip do the pseudo-
7271          * header checksums on receive.  For transmit it is more
7272          * convenient to do the pseudo-header checksum in software
7273          * as Linux does that on transmit for us in all cases.
7274          */
7275         tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
7276
7277         tw32(GRC_MODE,
7278              tp->grc_mode |
7279              (GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP));
7280
7281         /* Setup the timer prescalar register.  Clock is always 66Mhz. */
7282         val = tr32(GRC_MISC_CFG);
7283         val &= ~0xff;
7284         val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
7285         tw32(GRC_MISC_CFG, val);
7286
7287         /* Initialize MBUF/DESC pool. */
7288         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
7289                 /* Do nothing.  */
7290         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
7291                 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
7292                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
7293                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
7294                 else
7295                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
7296                 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
7297                 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
7298         }
7299         else if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
7300                 int fw_len;
7301
7302                 fw_len = (TG3_TSO5_FW_TEXT_LEN +
7303                           TG3_TSO5_FW_RODATA_LEN +
7304                           TG3_TSO5_FW_DATA_LEN +
7305                           TG3_TSO5_FW_SBSS_LEN +
7306                           TG3_TSO5_FW_BSS_LEN);
7307                 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
7308                 tw32(BUFMGR_MB_POOL_ADDR,
7309                      NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
7310                 tw32(BUFMGR_MB_POOL_SIZE,
7311                      NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
7312         }
7313
7314         if (tp->dev->mtu <= ETH_DATA_LEN) {
7315                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
7316                      tp->bufmgr_config.mbuf_read_dma_low_water);
7317                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
7318                      tp->bufmgr_config.mbuf_mac_rx_low_water);
7319                 tw32(BUFMGR_MB_HIGH_WATER,
7320                      tp->bufmgr_config.mbuf_high_water);
7321         } else {
7322                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
7323                      tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
7324                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
7325                      tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
7326                 tw32(BUFMGR_MB_HIGH_WATER,
7327                      tp->bufmgr_config.mbuf_high_water_jumbo);
7328         }
7329         tw32(BUFMGR_DMA_LOW_WATER,
7330              tp->bufmgr_config.dma_low_water);
7331         tw32(BUFMGR_DMA_HIGH_WATER,
7332              tp->bufmgr_config.dma_high_water);
7333
7334         tw32(BUFMGR_MODE, BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE);
7335         for (i = 0; i < 2000; i++) {
7336                 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
7337                         break;
7338                 udelay(10);
7339         }
7340         if (i >= 2000) {
7341                 printk(KERN_ERR PFX "tg3_reset_hw cannot enable BUFMGR for %s.\n",
7342                        tp->dev->name);
7343                 return -ENODEV;
7344         }
7345
7346         /* Setup replenish threshold. */
7347         val = tp->rx_pending / 8;
7348         if (val == 0)
7349                 val = 1;
7350         else if (val > tp->rx_std_max_post)
7351                 val = tp->rx_std_max_post;
7352         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
7353                 if (tp->pci_chip_rev_id == CHIPREV_ID_5906_A1)
7354                         tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
7355
7356                 if (val > (TG3_RX_INTERNAL_RING_SZ_5906 / 2))
7357                         val = TG3_RX_INTERNAL_RING_SZ_5906 / 2;
7358         }
7359
7360         tw32(RCVBDI_STD_THRESH, val);
7361
7362         /* Initialize TG3_BDINFO's at:
7363          *  RCVDBDI_STD_BD:     standard eth size rx ring
7364          *  RCVDBDI_JUMBO_BD:   jumbo frame rx ring
7365          *  RCVDBDI_MINI_BD:    small frame rx ring (??? does not work)
7366          *
7367          * like so:
7368          *  TG3_BDINFO_HOST_ADDR:       high/low parts of DMA address of ring
7369          *  TG3_BDINFO_MAXLEN_FLAGS:    (rx max buffer size << 16) |
7370          *                              ring attribute flags
7371          *  TG3_BDINFO_NIC_ADDR:        location of descriptors in nic SRAM
7372          *
7373          * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
7374          * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
7375          *
7376          * The size of each ring is fixed in the firmware, but the location is
7377          * configurable.
7378          */
7379         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
7380              ((u64) tp->rx_std_mapping >> 32));
7381         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
7382              ((u64) tp->rx_std_mapping & 0xffffffff));
7383         tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
7384              NIC_SRAM_RX_BUFFER_DESC);
7385
7386         /* Don't even try to program the JUMBO/MINI buffer descriptor
7387          * configs on 5705.
7388          */
7389         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
7390                 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS,
7391                      RX_STD_MAX_SIZE_5705 << BDINFO_FLAGS_MAXLEN_SHIFT);
7392         } else {
7393                 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS,
7394                      RX_STD_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT);
7395
7396                 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
7397                      BDINFO_FLAGS_DISABLED);
7398
7399                 /* Setup replenish threshold. */
7400                 tw32(RCVBDI_JUMBO_THRESH, tp->rx_jumbo_pending / 8);
7401
7402                 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
7403                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
7404                              ((u64) tp->rx_jumbo_mapping >> 32));
7405                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
7406                              ((u64) tp->rx_jumbo_mapping & 0xffffffff));
7407                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
7408                              RX_JUMBO_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT);
7409                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
7410                              NIC_SRAM_RX_JUMBO_BUFFER_DESC);
7411                 } else {
7412                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
7413                              BDINFO_FLAGS_DISABLED);
7414                 }
7415
7416         }
7417
7418         /* There is only one send ring on 5705/5750, no need to explicitly
7419          * disable the others.
7420          */
7421         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
7422                 /* Clear out send RCB ring in SRAM. */
7423                 for (i = NIC_SRAM_SEND_RCB; i < NIC_SRAM_RCV_RET_RCB; i += TG3_BDINFO_SIZE)
7424                         tg3_write_mem(tp, i + TG3_BDINFO_MAXLEN_FLAGS,
7425                                       BDINFO_FLAGS_DISABLED);
7426         }
7427
7428         tp->tx_prod = 0;
7429         tp->tx_cons = 0;
7430         tw32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0);
7431         tw32_tx_mbox(MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0);
7432
7433         tg3_set_bdinfo(tp, NIC_SRAM_SEND_RCB,
7434                        tp->tx_desc_mapping,
7435                        (TG3_TX_RING_SIZE <<
7436                         BDINFO_FLAGS_MAXLEN_SHIFT),
7437                        NIC_SRAM_TX_BUFFER_DESC);
7438
7439         /* There is only one receive return ring on 5705/5750, no need
7440          * to explicitly disable the others.
7441          */
7442         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
7443                 for (i = NIC_SRAM_RCV_RET_RCB; i < NIC_SRAM_STATS_BLK;
7444                      i += TG3_BDINFO_SIZE) {
7445                         tg3_write_mem(tp, i + TG3_BDINFO_MAXLEN_FLAGS,
7446                                       BDINFO_FLAGS_DISABLED);
7447                 }
7448         }
7449
7450         tp->rx_rcb_ptr = 0;
7451         tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW, 0);
7452
7453         tg3_set_bdinfo(tp, NIC_SRAM_RCV_RET_RCB,
7454                        tp->rx_rcb_mapping,
7455                        (TG3_RX_RCB_RING_SIZE(tp) <<
7456                         BDINFO_FLAGS_MAXLEN_SHIFT),
7457                        0);
7458
7459         tp->rx_std_ptr = tp->rx_pending;
7460         tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
7461                      tp->rx_std_ptr);
7462
7463         tp->rx_jumbo_ptr = (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) ?
7464                                                 tp->rx_jumbo_pending : 0;
7465         tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
7466                      tp->rx_jumbo_ptr);
7467
7468         /* Initialize MAC address and backoff seed. */
7469         __tg3_set_mac_addr(tp, 0);
7470
7471         /* MTU + ethernet header + FCS + optional VLAN tag */
7472         tw32(MAC_RX_MTU_SIZE, tp->dev->mtu + ETH_HLEN + 8);
7473
7474         /* The slot time is changed by tg3_setup_phy if we
7475          * run at gigabit with half duplex.
7476          */
7477         tw32(MAC_TX_LENGTHS,
7478              (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
7479              (6 << TX_LENGTHS_IPG_SHIFT) |
7480              (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
7481
7482         /* Receive rules. */
7483         tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
7484         tw32(RCVLPC_CONFIG, 0x0181);
7485
7486         /* Calculate RDMAC_MODE setting early, we need it to determine
7487          * the RCVLPC_STATE_ENABLE mask.
7488          */
7489         rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
7490                       RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
7491                       RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
7492                       RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
7493                       RDMAC_MODE_LNGREAD_ENAB);
7494
7495         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
7496             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
7497             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
7498                 rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
7499                               RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
7500                               RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
7501
7502         /* If statement applies to 5705 and 5750 PCI devices only */
7503         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
7504              tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
7505             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)) {
7506                 if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE &&
7507                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
7508                         rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
7509                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
7510                            !(tp->tg3_flags2 & TG3_FLG2_IS_5788)) {
7511                         rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
7512                 }
7513         }
7514
7515         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)
7516                 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
7517
7518         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
7519                 rdmac_mode |= RDMAC_MODE_IPV4_LSO_EN;
7520
7521         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
7522             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
7523                 rdmac_mode |= RDMAC_MODE_IPV6_LSO_EN;
7524
7525         /* Receive/send statistics. */
7526         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
7527                 val = tr32(RCVLPC_STATS_ENABLE);
7528                 val &= ~RCVLPC_STATSENAB_DACK_FIX;
7529                 tw32(RCVLPC_STATS_ENABLE, val);
7530         } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
7531                    (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
7532                 val = tr32(RCVLPC_STATS_ENABLE);
7533                 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
7534                 tw32(RCVLPC_STATS_ENABLE, val);
7535         } else {
7536                 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
7537         }
7538         tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
7539         tw32(SNDDATAI_STATSENAB, 0xffffff);
7540         tw32(SNDDATAI_STATSCTRL,
7541              (SNDDATAI_SCTRL_ENABLE |
7542               SNDDATAI_SCTRL_FASTUPD));
7543
7544         /* Setup host coalescing engine. */
7545         tw32(HOSTCC_MODE, 0);
7546         for (i = 0; i < 2000; i++) {
7547                 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
7548                         break;
7549                 udelay(10);
7550         }
7551
7552         __tg3_set_coalesce(tp, &tp->coal);
7553
7554         /* set status block DMA address */
7555         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
7556              ((u64) tp->status_mapping >> 32));
7557         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
7558              ((u64) tp->status_mapping & 0xffffffff));
7559
7560         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
7561                 /* Status/statistics block address.  See tg3_timer,
7562                  * the tg3_periodic_fetch_stats call there, and
7563                  * tg3_get_stats to see how this works for 5705/5750 chips.
7564                  */
7565                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
7566                      ((u64) tp->stats_mapping >> 32));
7567                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
7568                      ((u64) tp->stats_mapping & 0xffffffff));
7569                 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
7570                 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
7571         }
7572
7573         tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
7574
7575         tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
7576         tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
7577         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
7578                 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
7579
7580         /* Clear statistics/status block in chip, and status block in ram. */
7581         for (i = NIC_SRAM_STATS_BLK;
7582              i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
7583              i += sizeof(u32)) {
7584                 tg3_write_mem(tp, i, 0);
7585                 udelay(40);
7586         }
7587         memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
7588
7589         if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
7590                 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
7591                 /* reset to prevent losing 1st rx packet intermittently */
7592                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
7593                 udelay(10);
7594         }
7595
7596         if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
7597                 tp->mac_mode &= MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
7598         else
7599                 tp->mac_mode = 0;
7600         tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
7601                 MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE | MAC_MODE_FHDE_ENABLE;
7602         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
7603             !(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
7604             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
7605                 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
7606         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
7607         udelay(40);
7608
7609         /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
7610          * If TG3_FLG2_IS_NIC is zero, we should read the
7611          * register to preserve the GPIO settings for LOMs. The GPIOs,
7612          * whether used as inputs or outputs, are set by boot code after
7613          * reset.
7614          */
7615         if (!(tp->tg3_flags2 & TG3_FLG2_IS_NIC)) {
7616                 u32 gpio_mask;
7617
7618                 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
7619                             GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
7620                             GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
7621
7622                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
7623                         gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
7624                                      GRC_LCLCTRL_GPIO_OUTPUT3;
7625
7626                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
7627                         gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
7628
7629                 tp->grc_local_ctrl &= ~gpio_mask;
7630                 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
7631
7632                 /* GPIO1 must be driven high for eeprom write protect */
7633                 if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT)
7634                         tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
7635                                                GRC_LCLCTRL_GPIO_OUTPUT1);
7636         }
7637         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
7638         udelay(100);
7639
7640         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0);
7641         tp->last_tag = 0;
7642
7643         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
7644                 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
7645                 udelay(40);
7646         }
7647
7648         val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
7649                WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
7650                WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
7651                WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
7652                WDMAC_MODE_LNGREAD_ENAB);
7653
7654         /* If statement applies to 5705 and 5750 PCI devices only */
7655         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
7656              tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
7657             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
7658                 if ((tp->tg3_flags & TG3_FLG2_TSO_CAPABLE) &&
7659                     (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
7660                      tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
7661                         /* nothing */
7662                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
7663                            !(tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
7664                            !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
7665                         val |= WDMAC_MODE_RX_ACCEL;
7666                 }
7667         }
7668
7669         /* Enable host coalescing bug fix */
7670         if (tp->tg3_flags3 & TG3_FLG3_5755_PLUS)
7671                 val |= WDMAC_MODE_STATUS_TAG_FIX;
7672
7673         tw32_f(WDMAC_MODE, val);
7674         udelay(40);
7675
7676         if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) {
7677                 u16 pcix_cmd;
7678
7679                 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
7680                                      &pcix_cmd);
7681                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) {
7682                         pcix_cmd &= ~PCI_X_CMD_MAX_READ;
7683                         pcix_cmd |= PCI_X_CMD_READ_2K;
7684                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
7685                         pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ);
7686                         pcix_cmd |= PCI_X_CMD_READ_2K;
7687                 }
7688                 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
7689                                       pcix_cmd);
7690         }
7691
7692         tw32_f(RDMAC_MODE, rdmac_mode);
7693         udelay(40);
7694
7695         tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
7696         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
7697                 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
7698
7699         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
7700                 tw32(SNDDATAC_MODE,
7701                      SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY);
7702         else
7703                 tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
7704
7705         tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
7706         tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
7707         tw32(RCVDBDI_MODE, RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ);
7708         tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
7709         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
7710                 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
7711         tw32(SNDBDI_MODE, SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE);
7712         tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
7713
7714         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
7715                 err = tg3_load_5701_a0_firmware_fix(tp);
7716                 if (err)
7717                         return err;
7718         }
7719
7720         if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
7721                 err = tg3_load_tso_firmware(tp);
7722                 if (err)
7723                         return err;
7724         }
7725
7726         tp->tx_mode = TX_MODE_ENABLE;
7727         tw32_f(MAC_TX_MODE, tp->tx_mode);
7728         udelay(100);
7729
7730         tp->rx_mode = RX_MODE_ENABLE;
7731         if (tp->tg3_flags3 & TG3_FLG3_5755_PLUS)
7732                 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
7733
7734         tw32_f(MAC_RX_MODE, tp->rx_mode);
7735         udelay(10);
7736
7737         tw32(MAC_LED_CTRL, tp->led_ctrl);
7738
7739         tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
7740         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
7741                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
7742                 udelay(10);
7743         }
7744         tw32_f(MAC_RX_MODE, tp->rx_mode);
7745         udelay(10);
7746
7747         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
7748                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) &&
7749                         !(tp->tg3_flags2 & TG3_FLG2_SERDES_PREEMPHASIS)) {
7750                         /* Set drive transmission level to 1.2V  */
7751                         /* only if the signal pre-emphasis bit is not set  */
7752                         val = tr32(MAC_SERDES_CFG);
7753                         val &= 0xfffff000;
7754                         val |= 0x880;
7755                         tw32(MAC_SERDES_CFG, val);
7756                 }
7757                 if (tp->pci_chip_rev_id == CHIPREV_ID_5703_A1)
7758                         tw32(MAC_SERDES_CFG, 0x616000);
7759         }
7760
7761         /* Prevent chip from dropping frames when flow control
7762          * is enabled.
7763          */
7764         tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, 2);
7765
7766         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
7767             (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
7768                 /* Use hardware link auto-negotiation */
7769                 tp->tg3_flags2 |= TG3_FLG2_HW_AUTONEG;
7770         }
7771
7772         if ((tp->tg3_flags2 & TG3_FLG2_MII_SERDES) &&
7773             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)) {
7774                 u32 tmp;
7775
7776                 tmp = tr32(SERDES_RX_CTRL);
7777                 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
7778                 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
7779                 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
7780                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
7781         }
7782
7783         if (!(tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)) {
7784                 if (tp->link_config.phy_is_low_power) {
7785                         tp->link_config.phy_is_low_power = 0;
7786                         tp->link_config.speed = tp->link_config.orig_speed;
7787                         tp->link_config.duplex = tp->link_config.orig_duplex;
7788                         tp->link_config.autoneg = tp->link_config.orig_autoneg;
7789                 }
7790
7791                 err = tg3_setup_phy(tp, 0);
7792                 if (err)
7793                         return err;
7794
7795                 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
7796                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906) {
7797                         u32 tmp;
7798
7799                         /* Clear CRC stats. */
7800                         if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
7801                                 tg3_writephy(tp, MII_TG3_TEST1,
7802                                              tmp | MII_TG3_TEST1_CRC_EN);
7803                                 tg3_readphy(tp, 0x14, &tmp);
7804                         }
7805                 }
7806         }
7807
7808         __tg3_set_rx_mode(tp->dev);
7809
7810         /* Initialize receive rules. */
7811         tw32(MAC_RCV_RULE_0,  0xc2000000 & RCV_RULE_DISABLE_MASK);
7812         tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
7813         tw32(MAC_RCV_RULE_1,  0x86000004 & RCV_RULE_DISABLE_MASK);
7814         tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
7815
7816         if ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
7817             !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
7818                 limit = 8;
7819         else
7820                 limit = 16;
7821         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF)
7822                 limit -= 4;
7823         switch (limit) {
7824         case 16:
7825                 tw32(MAC_RCV_RULE_15,  0); tw32(MAC_RCV_VALUE_15,  0);
7826         case 15:
7827                 tw32(MAC_RCV_RULE_14,  0); tw32(MAC_RCV_VALUE_14,  0);
7828         case 14:
7829                 tw32(MAC_RCV_RULE_13,  0); tw32(MAC_RCV_VALUE_13,  0);
7830         case 13:
7831                 tw32(MAC_RCV_RULE_12,  0); tw32(MAC_RCV_VALUE_12,  0);
7832         case 12:
7833                 tw32(MAC_RCV_RULE_11,  0); tw32(MAC_RCV_VALUE_11,  0);
7834         case 11:
7835                 tw32(MAC_RCV_RULE_10,  0); tw32(MAC_RCV_VALUE_10,  0);
7836         case 10:
7837                 tw32(MAC_RCV_RULE_9,  0); tw32(MAC_RCV_VALUE_9,  0);
7838         case 9:
7839                 tw32(MAC_RCV_RULE_8,  0); tw32(MAC_RCV_VALUE_8,  0);
7840         case 8:
7841                 tw32(MAC_RCV_RULE_7,  0); tw32(MAC_RCV_VALUE_7,  0);
7842         case 7:
7843                 tw32(MAC_RCV_RULE_6,  0); tw32(MAC_RCV_VALUE_6,  0);
7844         case 6:
7845                 tw32(MAC_RCV_RULE_5,  0); tw32(MAC_RCV_VALUE_5,  0);
7846         case 5:
7847                 tw32(MAC_RCV_RULE_4,  0); tw32(MAC_RCV_VALUE_4,  0);
7848         case 4:
7849                 /* tw32(MAC_RCV_RULE_3,  0); tw32(MAC_RCV_VALUE_3,  0); */
7850         case 3:
7851                 /* tw32(MAC_RCV_RULE_2,  0); tw32(MAC_RCV_VALUE_2,  0); */
7852         case 2:
7853         case 1:
7854
7855         default:
7856                 break;
7857         }
7858
7859         if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
7860                 /* Write our heartbeat update interval to APE. */
7861                 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS,
7862                                 APE_HOST_HEARTBEAT_INT_DISABLE);
7863
7864         tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
7865
7866         return 0;
7867 }
7868
7869 /* Called at device open time to get the chip ready for
7870  * packet processing.  Invoked with tp->lock held.
7871  */
7872 static int tg3_init_hw(struct tg3 *tp, int reset_phy)
7873 {
7874         tg3_switch_clocks(tp);
7875
7876         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
7877
7878         return tg3_reset_hw(tp, reset_phy);
7879 }
7880
7881 #define TG3_STAT_ADD32(PSTAT, REG) \
7882 do {    u32 __val = tr32(REG); \
7883         (PSTAT)->low += __val; \
7884         if ((PSTAT)->low < __val) \
7885                 (PSTAT)->high += 1; \
7886 } while (0)
7887
7888 static void tg3_periodic_fetch_stats(struct tg3 *tp)
7889 {
7890         struct tg3_hw_stats *sp = tp->hw_stats;
7891
7892         if (!netif_carrier_ok(tp->dev))
7893                 return;
7894
7895         TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
7896         TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
7897         TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
7898         TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
7899         TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
7900         TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
7901         TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
7902         TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
7903         TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
7904         TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
7905         TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
7906         TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
7907         TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
7908
7909         TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
7910         TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
7911         TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
7912         TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
7913         TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
7914         TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
7915         TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
7916         TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
7917         TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
7918         TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
7919         TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
7920         TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
7921         TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
7922         TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
7923
7924         TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
7925         TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
7926         TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
7927 }
7928
7929 static void tg3_timer(unsigned long __opaque)
7930 {
7931         struct tg3 *tp = (struct tg3 *) __opaque;
7932
7933         if (tp->irq_sync)
7934                 goto restart_timer;
7935
7936         spin_lock(&tp->lock);
7937
7938         if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) {
7939                 /* All of this garbage is because when using non-tagged
7940                  * IRQ status the mailbox/status_block protocol the chip
7941                  * uses with the cpu is race prone.
7942                  */
7943                 if (tp->hw_status->status & SD_STATUS_UPDATED) {
7944                         tw32(GRC_LOCAL_CTRL,
7945                              tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
7946                 } else {
7947                         tw32(HOSTCC_MODE, tp->coalesce_mode |
7948                              (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
7949                 }
7950
7951                 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
7952                         tp->tg3_flags2 |= TG3_FLG2_RESTART_TIMER;
7953                         spin_unlock(&tp->lock);
7954                         schedule_work(&tp->reset_task);
7955                         return;
7956                 }
7957         }
7958
7959         /* This part only runs once per second. */
7960         if (!--tp->timer_counter) {
7961                 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
7962                         tg3_periodic_fetch_stats(tp);
7963
7964                 if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
7965                         u32 mac_stat;
7966                         int phy_event;
7967
7968                         mac_stat = tr32(MAC_STATUS);
7969
7970                         phy_event = 0;
7971                         if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) {
7972                                 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
7973                                         phy_event = 1;
7974                         } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
7975                                 phy_event = 1;
7976
7977                         if (phy_event)
7978                                 tg3_setup_phy(tp, 0);
7979                 } else if (tp->tg3_flags & TG3_FLAG_POLL_SERDES) {
7980                         u32 mac_stat = tr32(MAC_STATUS);
7981                         int need_setup = 0;
7982
7983                         if (netif_carrier_ok(tp->dev) &&
7984                             (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
7985                                 need_setup = 1;
7986                         }
7987                         if (! netif_carrier_ok(tp->dev) &&
7988                             (mac_stat & (MAC_STATUS_PCS_SYNCED |
7989                                          MAC_STATUS_SIGNAL_DET))) {
7990                                 need_setup = 1;
7991                         }
7992                         if (need_setup) {
7993                                 if (!tp->serdes_counter) {
7994                                         tw32_f(MAC_MODE,
7995                                              (tp->mac_mode &
7996                                               ~MAC_MODE_PORT_MODE_MASK));
7997                                         udelay(40);
7998                                         tw32_f(MAC_MODE, tp->mac_mode);
7999                                         udelay(40);
8000                                 }
8001                                 tg3_setup_phy(tp, 0);
8002                         }
8003                 } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
8004                         tg3_serdes_parallel_detect(tp);
8005
8006                 tp->timer_counter = tp->timer_multiplier;
8007         }
8008
8009         /* Heartbeat is only sent once every 2 seconds.
8010          *
8011          * The heartbeat is to tell the ASF firmware that the host
8012          * driver is still alive.  In the event that the OS crashes,
8013          * ASF needs to reset the hardware to free up the FIFO space
8014          * that may be filled with rx packets destined for the host.
8015          * If the FIFO is full, ASF will no longer function properly.
8016          *
8017          * Unintended resets have been reported on real time kernels
8018          * where the timer doesn't run on time.  Netpoll will also have
8019          * same problem.
8020          *
8021          * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
8022          * to check the ring condition when the heartbeat is expiring
8023          * before doing the reset.  This will prevent most unintended
8024          * resets.
8025          */
8026         if (!--tp->asf_counter) {
8027                 if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) &&
8028                     !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)) {
8029                         tg3_wait_for_event_ack(tp);
8030
8031                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
8032                                       FWCMD_NICDRV_ALIVE3);
8033                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
8034                         /* 5 seconds timeout */
8035                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, 5);
8036
8037                         tg3_generate_fw_event(tp);
8038                 }
8039                 tp->asf_counter = tp->asf_multiplier;
8040         }
8041
8042         spin_unlock(&tp->lock);
8043
8044 restart_timer:
8045         tp->timer.expires = jiffies + tp->timer_offset;
8046         add_timer(&tp->timer);
8047 }
8048
8049 static int tg3_request_irq(struct tg3 *tp)
8050 {
8051         irq_handler_t fn;
8052         unsigned long flags;
8053         struct net_device *dev = tp->dev;
8054
8055         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
8056                 fn = tg3_msi;
8057                 if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI)
8058                         fn = tg3_msi_1shot;
8059                 flags = IRQF_SAMPLE_RANDOM;
8060         } else {
8061                 fn = tg3_interrupt;
8062                 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
8063                         fn = tg3_interrupt_tagged;
8064                 flags = IRQF_SHARED | IRQF_SAMPLE_RANDOM;
8065         }
8066         return (request_irq(tp->pdev->irq, fn, flags, dev->name, dev));
8067 }
8068
8069 static int tg3_test_interrupt(struct tg3 *tp)
8070 {
8071         struct net_device *dev = tp->dev;
8072         int err, i, intr_ok = 0;
8073
8074         if (!netif_running(dev))
8075                 return -ENODEV;
8076
8077         tg3_disable_ints(tp);
8078
8079         free_irq(tp->pdev->irq, dev);
8080
8081         err = request_irq(tp->pdev->irq, tg3_test_isr,
8082                           IRQF_SHARED | IRQF_SAMPLE_RANDOM, dev->name, dev);
8083         if (err)
8084                 return err;
8085
8086         tp->hw_status->status &= ~SD_STATUS_UPDATED;
8087         tg3_enable_ints(tp);
8088
8089         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
8090                HOSTCC_MODE_NOW);
8091
8092         for (i = 0; i < 5; i++) {
8093                 u32 int_mbox, misc_host_ctrl;
8094
8095                 int_mbox = tr32_mailbox(MAILBOX_INTERRUPT_0 +
8096                                         TG3_64BIT_REG_LOW);
8097                 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
8098
8099                 if ((int_mbox != 0) ||
8100                     (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) {
8101                         intr_ok = 1;
8102                         break;
8103                 }
8104
8105                 msleep(10);
8106         }
8107
8108         tg3_disable_ints(tp);
8109
8110         free_irq(tp->pdev->irq, dev);
8111
8112         err = tg3_request_irq(tp);
8113
8114         if (err)
8115                 return err;
8116
8117         if (intr_ok)
8118                 return 0;
8119
8120         return -EIO;
8121 }
8122
8123 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
8124  * successfully restored
8125  */
8126 static int tg3_test_msi(struct tg3 *tp)
8127 {
8128         struct net_device *dev = tp->dev;
8129         int err;
8130         u16 pci_cmd;
8131
8132         if (!(tp->tg3_flags2 & TG3_FLG2_USING_MSI))
8133                 return 0;
8134
8135         /* Turn off SERR reporting in case MSI terminates with Master
8136          * Abort.
8137          */
8138         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
8139         pci_write_config_word(tp->pdev, PCI_COMMAND,
8140                               pci_cmd & ~PCI_COMMAND_SERR);
8141
8142         err = tg3_test_interrupt(tp);
8143
8144         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
8145
8146         if (!err)
8147                 return 0;
8148
8149         /* other failures */
8150         if (err != -EIO)
8151                 return err;
8152
8153         /* MSI test failed, go back to INTx mode */
8154         printk(KERN_WARNING PFX "%s: No interrupt was generated using MSI, "
8155                "switching to INTx mode. Please report this failure to "
8156                "the PCI maintainer and include system chipset information.\n",
8157                        tp->dev->name);
8158
8159         free_irq(tp->pdev->irq, dev);
8160         pci_disable_msi(tp->pdev);
8161
8162         tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
8163
8164         err = tg3_request_irq(tp);
8165         if (err)
8166                 return err;
8167
8168         /* Need to reset the chip because the MSI cycle may have terminated
8169          * with Master Abort.
8170          */
8171         tg3_full_lock(tp, 1);
8172
8173         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
8174         err = tg3_init_hw(tp, 1);
8175
8176         tg3_full_unlock(tp);
8177
8178         if (err)
8179                 free_irq(tp->pdev->irq, dev);
8180
8181         return err;
8182 }
8183
8184 static int tg3_open(struct net_device *dev)
8185 {
8186         struct tg3 *tp = netdev_priv(dev);
8187         int err;
8188
8189         netif_carrier_off(tp->dev);
8190
8191         err = tg3_set_power_state(tp, PCI_D0);
8192         if (err)
8193                 return err;
8194
8195         tg3_full_lock(tp, 0);
8196
8197         tg3_disable_ints(tp);
8198         tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
8199
8200         tg3_full_unlock(tp);
8201
8202         /* The placement of this call is tied
8203          * to the setup and use of Host TX descriptors.
8204          */
8205         err = tg3_alloc_consistent(tp);
8206         if (err)
8207                 return err;
8208
8209         if (tp->tg3_flags & TG3_FLAG_SUPPORT_MSI) {
8210                 /* All MSI supporting chips should support tagged
8211                  * status.  Assert that this is the case.
8212                  */
8213                 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) {
8214                         printk(KERN_WARNING PFX "%s: MSI without TAGGED? "
8215                                "Not using MSI.\n", tp->dev->name);
8216                 } else if (pci_enable_msi(tp->pdev) == 0) {
8217                         u32 msi_mode;
8218
8219                         msi_mode = tr32(MSGINT_MODE);
8220                         tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
8221                         tp->tg3_flags2 |= TG3_FLG2_USING_MSI;
8222                 }
8223         }
8224         err = tg3_request_irq(tp);
8225
8226         if (err) {
8227                 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
8228                         pci_disable_msi(tp->pdev);
8229                         tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
8230                 }
8231                 tg3_free_consistent(tp);
8232                 return err;
8233         }
8234
8235         napi_enable(&tp->napi);
8236
8237         tg3_full_lock(tp, 0);
8238
8239         err = tg3_init_hw(tp, 1);
8240         if (err) {
8241                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
8242                 tg3_free_rings(tp);
8243         } else {
8244                 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
8245                         tp->timer_offset = HZ;
8246                 else
8247                         tp->timer_offset = HZ / 10;
8248
8249                 BUG_ON(tp->timer_offset > HZ);
8250                 tp->timer_counter = tp->timer_multiplier =
8251                         (HZ / tp->timer_offset);
8252                 tp->asf_counter = tp->asf_multiplier =
8253                         ((HZ / tp->timer_offset) * 2);
8254
8255                 init_timer(&tp->timer);
8256                 tp->timer.expires = jiffies + tp->timer_offset;
8257                 tp->timer.data = (unsigned long) tp;
8258                 tp->timer.function = tg3_timer;
8259         }
8260
8261         tg3_full_unlock(tp);
8262
8263         if (err) {
8264                 napi_disable(&tp->napi);
8265                 free_irq(tp->pdev->irq, dev);
8266                 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
8267                         pci_disable_msi(tp->pdev);
8268                         tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
8269                 }
8270                 tg3_free_consistent(tp);
8271                 return err;
8272         }
8273
8274         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
8275                 err = tg3_test_msi(tp);
8276
8277                 if (err) {
8278                         tg3_full_lock(tp, 0);
8279
8280                         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
8281                                 pci_disable_msi(tp->pdev);
8282                                 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
8283                         }
8284                         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
8285                         tg3_free_rings(tp);
8286                         tg3_free_consistent(tp);
8287
8288                         tg3_full_unlock(tp);
8289
8290                         napi_disable(&tp->napi);
8291
8292                         return err;
8293                 }
8294
8295                 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
8296                         if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI) {
8297                                 u32 val = tr32(PCIE_TRANSACTION_CFG);
8298
8299                                 tw32(PCIE_TRANSACTION_CFG,
8300                                      val | PCIE_TRANS_CFG_1SHOT_MSI);
8301                         }
8302                 }
8303         }
8304
8305         tg3_phy_start(tp);
8306
8307         tg3_full_lock(tp, 0);
8308
8309         add_timer(&tp->timer);
8310         tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
8311         tg3_enable_ints(tp);
8312
8313         tg3_full_unlock(tp);
8314
8315         netif_start_queue(dev);
8316
8317         return 0;
8318 }
8319
8320 #if 0
8321 /*static*/ void tg3_dump_state(struct tg3 *tp)
8322 {
8323         u32 val32, val32_2, val32_3, val32_4, val32_5;
8324         u16 val16;
8325         int i;
8326
8327         pci_read_config_word(tp->pdev, PCI_STATUS, &val16);
8328         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE, &val32);
8329         printk("DEBUG: PCI status [%04x] TG3PCI state[%08x]\n",
8330                val16, val32);
8331
8332         /* MAC block */
8333         printk("DEBUG: MAC_MODE[%08x] MAC_STATUS[%08x]\n",
8334                tr32(MAC_MODE), tr32(MAC_STATUS));
8335         printk("       MAC_EVENT[%08x] MAC_LED_CTRL[%08x]\n",
8336                tr32(MAC_EVENT), tr32(MAC_LED_CTRL));
8337         printk("DEBUG: MAC_TX_MODE[%08x] MAC_TX_STATUS[%08x]\n",
8338                tr32(MAC_TX_MODE), tr32(MAC_TX_STATUS));
8339         printk("       MAC_RX_MODE[%08x] MAC_RX_STATUS[%08x]\n",
8340                tr32(MAC_RX_MODE), tr32(MAC_RX_STATUS));
8341
8342         /* Send data initiator control block */
8343         printk("DEBUG: SNDDATAI_MODE[%08x] SNDDATAI_STATUS[%08x]\n",
8344                tr32(SNDDATAI_MODE), tr32(SNDDATAI_STATUS));
8345         printk("       SNDDATAI_STATSCTRL[%08x]\n",
8346                tr32(SNDDATAI_STATSCTRL));
8347
8348         /* Send data completion control block */
8349         printk("DEBUG: SNDDATAC_MODE[%08x]\n", tr32(SNDDATAC_MODE));
8350
8351         /* Send BD ring selector block */
8352         printk("DEBUG: SNDBDS_MODE[%08x] SNDBDS_STATUS[%08x]\n",
8353                tr32(SNDBDS_MODE), tr32(SNDBDS_STATUS));
8354
8355         /* Send BD initiator control block */
8356         printk("DEBUG: SNDBDI_MODE[%08x] SNDBDI_STATUS[%08x]\n",
8357                tr32(SNDBDI_MODE), tr32(SNDBDI_STATUS));
8358
8359         /* Send BD completion control block */
8360         printk("DEBUG: SNDBDC_MODE[%08x]\n", tr32(SNDBDC_MODE));
8361
8362         /* Receive list placement control block */
8363         printk("DEBUG: RCVLPC_MODE[%08x] RCVLPC_STATUS[%08x]\n",
8364                tr32(RCVLPC_MODE), tr32(RCVLPC_STATUS));
8365         printk("       RCVLPC_STATSCTRL[%08x]\n",
8366                tr32(RCVLPC_STATSCTRL));
8367
8368         /* Receive data and receive BD initiator control block */
8369         printk("DEBUG: RCVDBDI_MODE[%08x] RCVDBDI_STATUS[%08x]\n",
8370                tr32(RCVDBDI_MODE), tr32(RCVDBDI_STATUS));
8371
8372         /* Receive data completion control block */
8373         printk("DEBUG: RCVDCC_MODE[%08x]\n",
8374                tr32(RCVDCC_MODE));
8375
8376         /* Receive BD initiator control block */
8377         printk("DEBUG: RCVBDI_MODE[%08x] RCVBDI_STATUS[%08x]\n",
8378                tr32(RCVBDI_MODE), tr32(RCVBDI_STATUS));
8379
8380         /* Receive BD completion control block */
8381         printk("DEBUG: RCVCC_MODE[%08x] RCVCC_STATUS[%08x]\n",
8382                tr32(RCVCC_MODE), tr32(RCVCC_STATUS));
8383
8384         /* Receive list selector control block */
8385         printk("DEBUG: RCVLSC_MODE[%08x] RCVLSC_STATUS[%08x]\n",
8386                tr32(RCVLSC_MODE), tr32(RCVLSC_STATUS));
8387
8388         /* Mbuf cluster free block */
8389         printk("DEBUG: MBFREE_MODE[%08x] MBFREE_STATUS[%08x]\n",
8390                tr32(MBFREE_MODE), tr32(MBFREE_STATUS));
8391
8392         /* Host coalescing control block */
8393         printk("DEBUG: HOSTCC_MODE[%08x] HOSTCC_STATUS[%08x]\n",
8394                tr32(HOSTCC_MODE), tr32(HOSTCC_STATUS));
8395         printk("DEBUG: HOSTCC_STATS_BLK_HOST_ADDR[%08x%08x]\n",
8396                tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
8397                tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
8398         printk("DEBUG: HOSTCC_STATUS_BLK_HOST_ADDR[%08x%08x]\n",
8399                tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
8400                tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
8401         printk("DEBUG: HOSTCC_STATS_BLK_NIC_ADDR[%08x]\n",
8402                tr32(HOSTCC_STATS_BLK_NIC_ADDR));
8403         printk("DEBUG: HOSTCC_STATUS_BLK_NIC_ADDR[%08x]\n",
8404                tr32(HOSTCC_STATUS_BLK_NIC_ADDR));
8405
8406         /* Memory arbiter control block */
8407         printk("DEBUG: MEMARB_MODE[%08x] MEMARB_STATUS[%08x]\n",
8408                tr32(MEMARB_MODE), tr32(MEMARB_STATUS));
8409
8410         /* Buffer manager control block */
8411         printk("DEBUG: BUFMGR_MODE[%08x] BUFMGR_STATUS[%08x]\n",
8412                tr32(BUFMGR_MODE), tr32(BUFMGR_STATUS));
8413         printk("DEBUG: BUFMGR_MB_POOL_ADDR[%08x] BUFMGR_MB_POOL_SIZE[%08x]\n",
8414                tr32(BUFMGR_MB_POOL_ADDR), tr32(BUFMGR_MB_POOL_SIZE));
8415         printk("DEBUG: BUFMGR_DMA_DESC_POOL_ADDR[%08x] "
8416                "BUFMGR_DMA_DESC_POOL_SIZE[%08x]\n",
8417                tr32(BUFMGR_DMA_DESC_POOL_ADDR),
8418                tr32(BUFMGR_DMA_DESC_POOL_SIZE));
8419
8420         /* Read DMA control block */
8421         printk("DEBUG: RDMAC_MODE[%08x] RDMAC_STATUS[%08x]\n",
8422                tr32(RDMAC_MODE), tr32(RDMAC_STATUS));
8423
8424         /* Write DMA control block */
8425         printk("DEBUG: WDMAC_MODE[%08x] WDMAC_STATUS[%08x]\n",
8426                tr32(WDMAC_MODE), tr32(WDMAC_STATUS));
8427
8428         /* DMA completion block */
8429         printk("DEBUG: DMAC_MODE[%08x]\n",
8430                tr32(DMAC_MODE));
8431
8432         /* GRC block */
8433         printk("DEBUG: GRC_MODE[%08x] GRC_MISC_CFG[%08x]\n",
8434                tr32(GRC_MODE), tr32(GRC_MISC_CFG));
8435         printk("DEBUG: GRC_LOCAL_CTRL[%08x]\n",
8436                tr32(GRC_LOCAL_CTRL));
8437
8438         /* TG3_BDINFOs */
8439         printk("DEBUG: RCVDBDI_JUMBO_BD[%08x%08x:%08x:%08x]\n",
8440                tr32(RCVDBDI_JUMBO_BD + 0x0),
8441                tr32(RCVDBDI_JUMBO_BD + 0x4),
8442                tr32(RCVDBDI_JUMBO_BD + 0x8),
8443                tr32(RCVDBDI_JUMBO_BD + 0xc));
8444         printk("DEBUG: RCVDBDI_STD_BD[%08x%08x:%08x:%08x]\n",
8445                tr32(RCVDBDI_STD_BD + 0x0),
8446                tr32(RCVDBDI_STD_BD + 0x4),
8447                tr32(RCVDBDI_STD_BD + 0x8),
8448                tr32(RCVDBDI_STD_BD + 0xc));
8449         printk("DEBUG: RCVDBDI_MINI_BD[%08x%08x:%08x:%08x]\n",
8450                tr32(RCVDBDI_MINI_BD + 0x0),
8451                tr32(RCVDBDI_MINI_BD + 0x4),
8452                tr32(RCVDBDI_MINI_BD + 0x8),
8453                tr32(RCVDBDI_MINI_BD + 0xc));
8454
8455         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x0, &val32);
8456         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x4, &val32_2);
8457         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x8, &val32_3);
8458         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0xc, &val32_4);
8459         printk("DEBUG: SRAM_SEND_RCB_0[%08x%08x:%08x:%08x]\n",
8460                val32, val32_2, val32_3, val32_4);
8461
8462         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x0, &val32);
8463         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x4, &val32_2);
8464         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x8, &val32_3);
8465         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0xc, &val32_4);
8466         printk("DEBUG: SRAM_RCV_RET_RCB_0[%08x%08x:%08x:%08x]\n",
8467                val32, val32_2, val32_3, val32_4);
8468
8469         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x0, &val32);
8470         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x4, &val32_2);
8471         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x8, &val32_3);
8472         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0xc, &val32_4);
8473         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x10, &val32_5);
8474         printk("DEBUG: SRAM_STATUS_BLK[%08x:%08x:%08x:%08x:%08x]\n",
8475                val32, val32_2, val32_3, val32_4, val32_5);
8476
8477         /* SW status block */
8478         printk("DEBUG: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
8479                tp->hw_status->status,
8480                tp->hw_status->status_tag,
8481                tp->hw_status->rx_jumbo_consumer,
8482                tp->hw_status->rx_consumer,
8483                tp->hw_status->rx_mini_consumer,
8484                tp->hw_status->idx[0].rx_producer,
8485                tp->hw_status->idx[0].tx_consumer);
8486
8487         /* SW statistics block */
8488         printk("DEBUG: Host statistics block [%08x:%08x:%08x:%08x]\n",
8489                ((u32 *)tp->hw_stats)[0],
8490                ((u32 *)tp->hw_stats)[1],
8491                ((u32 *)tp->hw_stats)[2],
8492                ((u32 *)tp->hw_stats)[3]);
8493
8494         /* Mailboxes */
8495         printk("DEBUG: SNDHOST_PROD[%08x%08x] SNDNIC_PROD[%08x%08x]\n",
8496                tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + 0x0),
8497                tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + 0x4),
8498                tr32_mailbox(MAILBOX_SNDNIC_PROD_IDX_0 + 0x0),
8499                tr32_mailbox(MAILBOX_SNDNIC_PROD_IDX_0 + 0x4));
8500
8501         /* NIC side send descriptors. */
8502         for (i = 0; i < 6; i++) {
8503                 unsigned long txd;
8504
8505                 txd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_TX_BUFFER_DESC
8506                         + (i * sizeof(struct tg3_tx_buffer_desc));
8507                 printk("DEBUG: NIC TXD(%d)[%08x:%08x:%08x:%08x]\n",
8508                        i,
8509                        readl(txd + 0x0), readl(txd + 0x4),
8510                        readl(txd + 0x8), readl(txd + 0xc));
8511         }
8512
8513         /* NIC side RX descriptors. */
8514         for (i = 0; i < 6; i++) {
8515                 unsigned long rxd;
8516
8517                 rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_BUFFER_DESC
8518                         + (i * sizeof(struct tg3_rx_buffer_desc));
8519                 printk("DEBUG: NIC RXD_STD(%d)[0][%08x:%08x:%08x:%08x]\n",
8520                        i,
8521                        readl(rxd + 0x0), readl(rxd + 0x4),
8522                        readl(rxd + 0x8), readl(rxd + 0xc));
8523                 rxd += (4 * sizeof(u32));
8524                 printk("DEBUG: NIC RXD_STD(%d)[1][%08x:%08x:%08x:%08x]\n",
8525                        i,
8526                        readl(rxd + 0x0), readl(rxd + 0x4),
8527                        readl(rxd + 0x8), readl(rxd + 0xc));
8528         }
8529
8530         for (i = 0; i < 6; i++) {
8531                 unsigned long rxd;
8532
8533                 rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_JUMBO_BUFFER_DESC
8534                         + (i * sizeof(struct tg3_rx_buffer_desc));
8535                 printk("DEBUG: NIC RXD_JUMBO(%d)[0][%08x:%08x:%08x:%08x]\n",
8536                        i,
8537                        readl(rxd + 0x0), readl(rxd + 0x4),
8538                        readl(rxd + 0x8), readl(rxd + 0xc));
8539                 rxd += (4 * sizeof(u32));
8540                 printk("DEBUG: NIC RXD_JUMBO(%d)[1][%08x:%08x:%08x:%08x]\n",
8541                        i,
8542                        readl(rxd + 0x0), readl(rxd + 0x4),
8543                        readl(rxd + 0x8), readl(rxd + 0xc));
8544         }
8545 }
8546 #endif
8547
8548 static struct net_device_stats *tg3_get_stats(struct net_device *);
8549 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *);
8550
8551 static int tg3_close(struct net_device *dev)
8552 {
8553         struct tg3 *tp = netdev_priv(dev);
8554
8555         napi_disable(&tp->napi);
8556         cancel_work_sync(&tp->reset_task);
8557
8558         netif_stop_queue(dev);
8559
8560         del_timer_sync(&tp->timer);
8561
8562         tg3_full_lock(tp, 1);
8563 #if 0
8564         tg3_dump_state(tp);
8565 #endif
8566
8567         tg3_disable_ints(tp);
8568
8569         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
8570         tg3_free_rings(tp);
8571         tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
8572
8573         tg3_full_unlock(tp);
8574
8575         free_irq(tp->pdev->irq, dev);
8576         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
8577                 pci_disable_msi(tp->pdev);
8578                 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
8579         }
8580
8581         memcpy(&tp->net_stats_prev, tg3_get_stats(tp->dev),
8582                sizeof(tp->net_stats_prev));
8583         memcpy(&tp->estats_prev, tg3_get_estats(tp),
8584                sizeof(tp->estats_prev));
8585
8586         tg3_free_consistent(tp);
8587
8588         tg3_set_power_state(tp, PCI_D3hot);
8589
8590         netif_carrier_off(tp->dev);
8591
8592         return 0;
8593 }
8594
8595 static inline unsigned long get_stat64(tg3_stat64_t *val)
8596 {
8597         unsigned long ret;
8598
8599 #if (BITS_PER_LONG == 32)
8600         ret = val->low;
8601 #else
8602         ret = ((u64)val->high << 32) | ((u64)val->low);
8603 #endif
8604         return ret;
8605 }
8606
8607 static inline u64 get_estat64(tg3_stat64_t *val)
8608 {
8609        return ((u64)val->high << 32) | ((u64)val->low);
8610 }
8611
8612 static unsigned long calc_crc_errors(struct tg3 *tp)
8613 {
8614         struct tg3_hw_stats *hw_stats = tp->hw_stats;
8615
8616         if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
8617             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
8618              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
8619                 u32 val;
8620
8621                 spin_lock_bh(&tp->lock);
8622                 if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) {
8623                         tg3_writephy(tp, MII_TG3_TEST1,
8624                                      val | MII_TG3_TEST1_CRC_EN);
8625                         tg3_readphy(tp, 0x14, &val);
8626                 } else
8627                         val = 0;
8628                 spin_unlock_bh(&tp->lock);
8629
8630                 tp->phy_crc_errors += val;
8631
8632                 return tp->phy_crc_errors;
8633         }
8634
8635         return get_stat64(&hw_stats->rx_fcs_errors);
8636 }
8637
8638 #define ESTAT_ADD(member) \
8639         estats->member =        old_estats->member + \
8640                                 get_estat64(&hw_stats->member)
8641
8642 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *tp)
8643 {
8644         struct tg3_ethtool_stats *estats = &tp->estats;
8645         struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
8646         struct tg3_hw_stats *hw_stats = tp->hw_stats;
8647
8648         if (!hw_stats)
8649                 return old_estats;
8650
8651         ESTAT_ADD(rx_octets);
8652         ESTAT_ADD(rx_fragments);
8653         ESTAT_ADD(rx_ucast_packets);
8654         ESTAT_ADD(rx_mcast_packets);
8655         ESTAT_ADD(rx_bcast_packets);
8656         ESTAT_ADD(rx_fcs_errors);
8657         ESTAT_ADD(rx_align_errors);
8658         ESTAT_ADD(rx_xon_pause_rcvd);
8659         ESTAT_ADD(rx_xoff_pause_rcvd);
8660         ESTAT_ADD(rx_mac_ctrl_rcvd);
8661         ESTAT_ADD(rx_xoff_entered);
8662         ESTAT_ADD(rx_frame_too_long_errors);
8663         ESTAT_ADD(rx_jabbers);
8664         ESTAT_ADD(rx_undersize_packets);
8665         ESTAT_ADD(rx_in_length_errors);
8666         ESTAT_ADD(rx_out_length_errors);
8667         ESTAT_ADD(rx_64_or_less_octet_packets);
8668         ESTAT_ADD(rx_65_to_127_octet_packets);
8669         ESTAT_ADD(rx_128_to_255_octet_packets);
8670         ESTAT_ADD(rx_256_to_511_octet_packets);
8671         ESTAT_ADD(rx_512_to_1023_octet_packets);
8672         ESTAT_ADD(rx_1024_to_1522_octet_packets);
8673         ESTAT_ADD(rx_1523_to_2047_octet_packets);
8674         ESTAT_ADD(rx_2048_to_4095_octet_packets);
8675         ESTAT_ADD(rx_4096_to_8191_octet_packets);
8676         ESTAT_ADD(rx_8192_to_9022_octet_packets);
8677
8678         ESTAT_ADD(tx_octets);
8679         ESTAT_ADD(tx_collisions);
8680         ESTAT_ADD(tx_xon_sent);
8681         ESTAT_ADD(tx_xoff_sent);
8682         ESTAT_ADD(tx_flow_control);
8683         ESTAT_ADD(tx_mac_errors);
8684         ESTAT_ADD(tx_single_collisions);
8685         ESTAT_ADD(tx_mult_collisions);
8686         ESTAT_ADD(tx_deferred);
8687         ESTAT_ADD(tx_excessive_collisions);
8688         ESTAT_ADD(tx_late_collisions);
8689         ESTAT_ADD(tx_collide_2times);
8690         ESTAT_ADD(tx_collide_3times);
8691         ESTAT_ADD(tx_collide_4times);
8692         ESTAT_ADD(tx_collide_5times);
8693         ESTAT_ADD(tx_collide_6times);
8694         ESTAT_ADD(tx_collide_7times);
8695         ESTAT_ADD(tx_collide_8times);
8696         ESTAT_ADD(tx_collide_9times);
8697         ESTAT_ADD(tx_collide_10times);
8698         ESTAT_ADD(tx_collide_11times);
8699         ESTAT_ADD(tx_collide_12times);
8700         ESTAT_ADD(tx_collide_13times);
8701         ESTAT_ADD(tx_collide_14times);
8702         ESTAT_ADD(tx_collide_15times);
8703         ESTAT_ADD(tx_ucast_packets);
8704         ESTAT_ADD(tx_mcast_packets);
8705         ESTAT_ADD(tx_bcast_packets);
8706         ESTAT_ADD(tx_carrier_sense_errors);
8707         ESTAT_ADD(tx_discards);
8708         ESTAT_ADD(tx_errors);
8709
8710         ESTAT_ADD(dma_writeq_full);
8711         ESTAT_ADD(dma_write_prioq_full);
8712         ESTAT_ADD(rxbds_empty);
8713         ESTAT_ADD(rx_discards);
8714         ESTAT_ADD(rx_errors);
8715         ESTAT_ADD(rx_threshold_hit);
8716
8717         ESTAT_ADD(dma_readq_full);
8718         ESTAT_ADD(dma_read_prioq_full);
8719         ESTAT_ADD(tx_comp_queue_full);
8720
8721         ESTAT_ADD(ring_set_send_prod_index);
8722         ESTAT_ADD(ring_status_update);
8723         ESTAT_ADD(nic_irqs);
8724         ESTAT_ADD(nic_avoided_irqs);
8725         ESTAT_ADD(nic_tx_threshold_hit);
8726
8727         return estats;
8728 }
8729
8730 static struct net_device_stats *tg3_get_stats(struct net_device *dev)
8731 {
8732         struct tg3 *tp = netdev_priv(dev);
8733         struct net_device_stats *stats = &tp->net_stats;
8734         struct net_device_stats *old_stats = &tp->net_stats_prev;
8735         struct tg3_hw_stats *hw_stats = tp->hw_stats;
8736
8737         if (!hw_stats)
8738                 return old_stats;
8739
8740         stats->rx_packets = old_stats->rx_packets +
8741                 get_stat64(&hw_stats->rx_ucast_packets) +
8742                 get_stat64(&hw_stats->rx_mcast_packets) +
8743                 get_stat64(&hw_stats->rx_bcast_packets);
8744
8745         stats->tx_packets = old_stats->tx_packets +
8746                 get_stat64(&hw_stats->tx_ucast_packets) +
8747                 get_stat64(&hw_stats->tx_mcast_packets) +
8748                 get_stat64(&hw_stats->tx_bcast_packets);
8749
8750         stats->rx_bytes = old_stats->rx_bytes +
8751                 get_stat64(&hw_stats->rx_octets);
8752         stats->tx_bytes = old_stats->tx_bytes +
8753                 get_stat64(&hw_stats->tx_octets);
8754
8755         stats->rx_errors = old_stats->rx_errors +
8756                 get_stat64(&hw_stats->rx_errors);
8757         stats->tx_errors = old_stats->tx_errors +
8758                 get_stat64(&hw_stats->tx_errors) +
8759                 get_stat64(&hw_stats->tx_mac_errors) +
8760                 get_stat64(&hw_stats->tx_carrier_sense_errors) +
8761                 get_stat64(&hw_stats->tx_discards);
8762
8763         stats->multicast = old_stats->multicast +
8764                 get_stat64(&hw_stats->rx_mcast_packets);
8765         stats->collisions = old_stats->collisions +
8766                 get_stat64(&hw_stats->tx_collisions);
8767
8768         stats->rx_length_errors = old_stats->rx_length_errors +
8769                 get_stat64(&hw_stats->rx_frame_too_long_errors) +
8770                 get_stat64(&hw_stats->rx_undersize_packets);
8771
8772         stats->rx_over_errors = old_stats->rx_over_errors +
8773                 get_stat64(&hw_stats->rxbds_empty);
8774         stats->rx_frame_errors = old_stats->rx_frame_errors +
8775                 get_stat64(&hw_stats->rx_align_errors);
8776         stats->tx_aborted_errors = old_stats->tx_aborted_errors +
8777                 get_stat64(&hw_stats->tx_discards);
8778         stats->tx_carrier_errors = old_stats->tx_carrier_errors +
8779                 get_stat64(&hw_stats->tx_carrier_sense_errors);
8780
8781         stats->rx_crc_errors = old_stats->rx_crc_errors +
8782                 calc_crc_errors(tp);
8783
8784         stats->rx_missed_errors = old_stats->rx_missed_errors +
8785                 get_stat64(&hw_stats->rx_discards);
8786
8787         return stats;
8788 }
8789
8790 static inline u32 calc_crc(unsigned char *buf, int len)
8791 {
8792         u32 reg;
8793         u32 tmp;
8794         int j, k;
8795
8796         reg = 0xffffffff;
8797
8798         for (j = 0; j < len; j++) {
8799                 reg ^= buf[j];
8800
8801                 for (k = 0; k < 8; k++) {
8802                         tmp = reg & 0x01;
8803
8804                         reg >>= 1;
8805
8806                         if (tmp) {
8807                                 reg ^= 0xedb88320;
8808                         }
8809                 }
8810         }
8811
8812         return ~reg;
8813 }
8814
8815 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
8816 {
8817         /* accept or reject all multicast frames */
8818         tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
8819         tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
8820         tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
8821         tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
8822 }
8823
8824 static void __tg3_set_rx_mode(struct net_device *dev)
8825 {
8826         struct tg3 *tp = netdev_priv(dev);
8827         u32 rx_mode;
8828
8829         rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
8830                                   RX_MODE_KEEP_VLAN_TAG);
8831
8832         /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
8833          * flag clear.
8834          */
8835 #if TG3_VLAN_TAG_USED
8836         if (!tp->vlgrp &&
8837             !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
8838                 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
8839 #else
8840         /* By definition, VLAN is disabled always in this
8841          * case.
8842          */
8843         if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
8844                 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
8845 #endif
8846
8847         if (dev->flags & IFF_PROMISC) {
8848                 /* Promiscuous mode. */
8849                 rx_mode |= RX_MODE_PROMISC;
8850         } else if (dev->flags & IFF_ALLMULTI) {
8851                 /* Accept all multicast. */
8852                 tg3_set_multi (tp, 1);
8853         } else if (dev->mc_count < 1) {
8854                 /* Reject all multicast. */
8855                 tg3_set_multi (tp, 0);
8856         } else {
8857                 /* Accept one or more multicast(s). */
8858                 struct dev_mc_list *mclist;
8859                 unsigned int i;
8860                 u32 mc_filter[4] = { 0, };
8861                 u32 regidx;
8862                 u32 bit;
8863                 u32 crc;
8864
8865                 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
8866                      i++, mclist = mclist->next) {
8867
8868                         crc = calc_crc (mclist->dmi_addr, ETH_ALEN);
8869                         bit = ~crc & 0x7f;
8870                         regidx = (bit & 0x60) >> 5;
8871                         bit &= 0x1f;
8872                         mc_filter[regidx] |= (1 << bit);
8873                 }
8874
8875                 tw32(MAC_HASH_REG_0, mc_filter[0]);
8876                 tw32(MAC_HASH_REG_1, mc_filter[1]);
8877                 tw32(MAC_HASH_REG_2, mc_filter[2]);
8878                 tw32(MAC_HASH_REG_3, mc_filter[3]);
8879         }
8880
8881         if (rx_mode != tp->rx_mode) {
8882                 tp->rx_mode = rx_mode;
8883                 tw32_f(MAC_RX_MODE, rx_mode);
8884                 udelay(10);
8885         }
8886 }
8887
8888 static void tg3_set_rx_mode(struct net_device *dev)
8889 {
8890         struct tg3 *tp = netdev_priv(dev);
8891
8892         if (!netif_running(dev))
8893                 return;
8894
8895         tg3_full_lock(tp, 0);
8896         __tg3_set_rx_mode(dev);
8897         tg3_full_unlock(tp);
8898 }
8899
8900 #define TG3_REGDUMP_LEN         (32 * 1024)
8901
8902 static int tg3_get_regs_len(struct net_device *dev)
8903 {
8904         return TG3_REGDUMP_LEN;
8905 }
8906
8907 static void tg3_get_regs(struct net_device *dev,
8908                 struct ethtool_regs *regs, void *_p)
8909 {
8910         u32 *p = _p;
8911         struct tg3 *tp = netdev_priv(dev);
8912         u8 *orig_p = _p;
8913         int i;
8914
8915         regs->version = 0;
8916
8917         memset(p, 0, TG3_REGDUMP_LEN);
8918
8919         if (tp->link_config.phy_is_low_power)
8920                 return;
8921
8922         tg3_full_lock(tp, 0);
8923
8924 #define __GET_REG32(reg)        (*(p)++ = tr32(reg))
8925 #define GET_REG32_LOOP(base,len)                \
8926 do {    p = (u32 *)(orig_p + (base));           \
8927         for (i = 0; i < len; i += 4)            \
8928                 __GET_REG32((base) + i);        \
8929 } while (0)
8930 #define GET_REG32_1(reg)                        \
8931 do {    p = (u32 *)(orig_p + (reg));            \
8932         __GET_REG32((reg));                     \
8933 } while (0)
8934
8935         GET_REG32_LOOP(TG3PCI_VENDOR, 0xb0);
8936         GET_REG32_LOOP(MAILBOX_INTERRUPT_0, 0x200);
8937         GET_REG32_LOOP(MAC_MODE, 0x4f0);
8938         GET_REG32_LOOP(SNDDATAI_MODE, 0xe0);
8939         GET_REG32_1(SNDDATAC_MODE);
8940         GET_REG32_LOOP(SNDBDS_MODE, 0x80);
8941         GET_REG32_LOOP(SNDBDI_MODE, 0x48);
8942         GET_REG32_1(SNDBDC_MODE);
8943         GET_REG32_LOOP(RCVLPC_MODE, 0x20);
8944         GET_REG32_LOOP(RCVLPC_SELLST_BASE, 0x15c);
8945         GET_REG32_LOOP(RCVDBDI_MODE, 0x0c);
8946         GET_REG32_LOOP(RCVDBDI_JUMBO_BD, 0x3c);
8947         GET_REG32_LOOP(RCVDBDI_BD_PROD_IDX_0, 0x44);
8948         GET_REG32_1(RCVDCC_MODE);
8949         GET_REG32_LOOP(RCVBDI_MODE, 0x20);
8950         GET_REG32_LOOP(RCVCC_MODE, 0x14);
8951         GET_REG32_LOOP(RCVLSC_MODE, 0x08);
8952         GET_REG32_1(MBFREE_MODE);
8953         GET_REG32_LOOP(HOSTCC_MODE, 0x100);
8954         GET_REG32_LOOP(MEMARB_MODE, 0x10);
8955         GET_REG32_LOOP(BUFMGR_MODE, 0x58);
8956         GET_REG32_LOOP(RDMAC_MODE, 0x08);
8957         GET_REG32_LOOP(WDMAC_MODE, 0x08);
8958         GET_REG32_1(RX_CPU_MODE);
8959         GET_REG32_1(RX_CPU_STATE);
8960         GET_REG32_1(RX_CPU_PGMCTR);
8961         GET_REG32_1(RX_CPU_HWBKPT);
8962         GET_REG32_1(TX_CPU_MODE);
8963         GET_REG32_1(TX_CPU_STATE);
8964         GET_REG32_1(TX_CPU_PGMCTR);
8965         GET_REG32_LOOP(GRCMBOX_INTERRUPT_0, 0x110);
8966         GET_REG32_LOOP(FTQ_RESET, 0x120);
8967         GET_REG32_LOOP(MSGINT_MODE, 0x0c);
8968         GET_REG32_1(DMAC_MODE);
8969         GET_REG32_LOOP(GRC_MODE, 0x4c);
8970         if (tp->tg3_flags & TG3_FLAG_NVRAM)
8971                 GET_REG32_LOOP(NVRAM_CMD, 0x24);
8972
8973 #undef __GET_REG32
8974 #undef GET_REG32_LOOP
8975 #undef GET_REG32_1
8976
8977         tg3_full_unlock(tp);
8978 }
8979
8980 static int tg3_get_eeprom_len(struct net_device *dev)
8981 {
8982         struct tg3 *tp = netdev_priv(dev);
8983
8984         return tp->nvram_size;
8985 }
8986
8987 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val);
8988 static int tg3_nvram_read_le(struct tg3 *tp, u32 offset, __le32 *val);
8989 static int tg3_nvram_read_swab(struct tg3 *tp, u32 offset, u32 *val);
8990
8991 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
8992 {
8993         struct tg3 *tp = netdev_priv(dev);
8994         int ret;
8995         u8  *pd;
8996         u32 i, offset, len, b_offset, b_count;
8997         __le32 val;
8998
8999         if (tp->link_config.phy_is_low_power)
9000                 return -EAGAIN;
9001
9002         offset = eeprom->offset;
9003         len = eeprom->len;
9004         eeprom->len = 0;
9005
9006         eeprom->magic = TG3_EEPROM_MAGIC;
9007
9008         if (offset & 3) {
9009                 /* adjustments to start on required 4 byte boundary */
9010                 b_offset = offset & 3;
9011                 b_count = 4 - b_offset;
9012                 if (b_count > len) {
9013                         /* i.e. offset=1 len=2 */
9014                         b_count = len;
9015                 }
9016                 ret = tg3_nvram_read_le(tp, offset-b_offset, &val);
9017                 if (ret)
9018                         return ret;
9019                 memcpy(data, ((char*)&val) + b_offset, b_count);
9020                 len -= b_count;
9021                 offset += b_count;
9022                 eeprom->len += b_count;
9023         }
9024
9025         /* read bytes upto the last 4 byte boundary */
9026         pd = &data[eeprom->len];
9027         for (i = 0; i < (len - (len & 3)); i += 4) {
9028                 ret = tg3_nvram_read_le(tp, offset + i, &val);
9029                 if (ret) {
9030                         eeprom->len += i;
9031                         return ret;
9032                 }
9033                 memcpy(pd + i, &val, 4);
9034         }
9035         eeprom->len += i;
9036
9037         if (len & 3) {
9038                 /* read last bytes not ending on 4 byte boundary */
9039                 pd = &data[eeprom->len];
9040                 b_count = len & 3;
9041                 b_offset = offset + len - b_count;
9042                 ret = tg3_nvram_read_le(tp, b_offset, &val);
9043                 if (ret)
9044                         return ret;
9045                 memcpy(pd, &val, b_count);
9046                 eeprom->len += b_count;
9047         }
9048         return 0;
9049 }
9050
9051 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf);
9052
9053 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
9054 {
9055         struct tg3 *tp = netdev_priv(dev);
9056         int ret;
9057         u32 offset, len, b_offset, odd_len;
9058         u8 *buf;
9059         __le32 start, end;
9060
9061         if (tp->link_config.phy_is_low_power)
9062                 return -EAGAIN;
9063
9064         if (eeprom->magic != TG3_EEPROM_MAGIC)
9065                 return -EINVAL;
9066
9067         offset = eeprom->offset;
9068         len = eeprom->len;
9069
9070         if ((b_offset = (offset & 3))) {
9071                 /* adjustments to start on required 4 byte boundary */
9072                 ret = tg3_nvram_read_le(tp, offset-b_offset, &start);
9073                 if (ret)
9074                         return ret;
9075                 len += b_offset;
9076                 offset &= ~3;
9077                 if (len < 4)
9078                         len = 4;
9079         }
9080
9081         odd_len = 0;
9082         if (len & 3) {
9083                 /* adjustments to end on required 4 byte boundary */
9084                 odd_len = 1;
9085                 len = (len + 3) & ~3;
9086                 ret = tg3_nvram_read_le(tp, offset+len-4, &end);
9087                 if (ret)
9088                         return ret;
9089         }
9090
9091         buf = data;
9092         if (b_offset || odd_len) {
9093                 buf = kmalloc(len, GFP_KERNEL);
9094                 if (!buf)
9095                         return -ENOMEM;
9096                 if (b_offset)
9097                         memcpy(buf, &start, 4);
9098                 if (odd_len)
9099                         memcpy(buf+len-4, &end, 4);
9100                 memcpy(buf + b_offset, data, eeprom->len);
9101         }
9102
9103         ret = tg3_nvram_write_block(tp, offset, len, buf);
9104
9105         if (buf != data)
9106                 kfree(buf);
9107
9108         return ret;
9109 }
9110
9111 static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
9112 {
9113         struct tg3 *tp = netdev_priv(dev);
9114
9115         if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
9116                 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
9117                         return -EAGAIN;
9118                 return phy_ethtool_gset(tp->mdio_bus->phy_map[PHY_ADDR], cmd);
9119         }
9120
9121         cmd->supported = (SUPPORTED_Autoneg);
9122
9123         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
9124                 cmd->supported |= (SUPPORTED_1000baseT_Half |
9125                                    SUPPORTED_1000baseT_Full);
9126
9127         if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)) {
9128                 cmd->supported |= (SUPPORTED_100baseT_Half |
9129                                   SUPPORTED_100baseT_Full |
9130                                   SUPPORTED_10baseT_Half |
9131                                   SUPPORTED_10baseT_Full |
9132                                   SUPPORTED_TP);
9133                 cmd->port = PORT_TP;
9134         } else {
9135                 cmd->supported |= SUPPORTED_FIBRE;
9136                 cmd->port = PORT_FIBRE;
9137         }
9138
9139         cmd->advertising = tp->link_config.advertising;
9140         if (netif_running(dev)) {
9141                 cmd->speed = tp->link_config.active_speed;
9142                 cmd->duplex = tp->link_config.active_duplex;
9143         }
9144         cmd->phy_address = PHY_ADDR;
9145         cmd->transceiver = 0;
9146         cmd->autoneg = tp->link_config.autoneg;
9147         cmd->maxtxpkt = 0;
9148         cmd->maxrxpkt = 0;
9149         return 0;
9150 }
9151
9152 static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
9153 {
9154         struct tg3 *tp = netdev_priv(dev);
9155
9156         if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
9157                 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
9158                         return -EAGAIN;
9159                 return phy_ethtool_sset(tp->mdio_bus->phy_map[PHY_ADDR], cmd);
9160         }
9161
9162         if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) {
9163                 /* These are the only valid advertisement bits allowed.  */
9164                 if (cmd->autoneg == AUTONEG_ENABLE &&
9165                     (cmd->advertising & ~(ADVERTISED_1000baseT_Half |
9166                                           ADVERTISED_1000baseT_Full |
9167                                           ADVERTISED_Autoneg |
9168                                           ADVERTISED_FIBRE)))
9169                         return -EINVAL;
9170                 /* Fiber can only do SPEED_1000.  */
9171                 else if ((cmd->autoneg != AUTONEG_ENABLE) &&
9172                          (cmd->speed != SPEED_1000))
9173                         return -EINVAL;
9174         /* Copper cannot force SPEED_1000.  */
9175         } else if ((cmd->autoneg != AUTONEG_ENABLE) &&
9176                    (cmd->speed == SPEED_1000))
9177                 return -EINVAL;
9178         else if ((cmd->speed == SPEED_1000) &&
9179                  (tp->tg3_flags & TG3_FLAG_10_100_ONLY))
9180                 return -EINVAL;
9181
9182         tg3_full_lock(tp, 0);
9183
9184         tp->link_config.autoneg = cmd->autoneg;
9185         if (cmd->autoneg == AUTONEG_ENABLE) {
9186                 tp->link_config.advertising = (cmd->advertising |
9187                                               ADVERTISED_Autoneg);
9188                 tp->link_config.speed = SPEED_INVALID;
9189                 tp->link_config.duplex = DUPLEX_INVALID;
9190         } else {
9191                 tp->link_config.advertising = 0;
9192                 tp->link_config.speed = cmd->speed;
9193                 tp->link_config.duplex = cmd->duplex;
9194         }
9195
9196         tp->link_config.orig_speed = tp->link_config.speed;
9197         tp->link_config.orig_duplex = tp->link_config.duplex;
9198         tp->link_config.orig_autoneg = tp->link_config.autoneg;
9199
9200         if (netif_running(dev))
9201                 tg3_setup_phy(tp, 1);
9202
9203         tg3_full_unlock(tp);
9204
9205         return 0;
9206 }
9207
9208 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
9209 {
9210         struct tg3 *tp = netdev_priv(dev);
9211
9212         strcpy(info->driver, DRV_MODULE_NAME);
9213         strcpy(info->version, DRV_MODULE_VERSION);
9214         strcpy(info->fw_version, tp->fw_ver);
9215         strcpy(info->bus_info, pci_name(tp->pdev));
9216 }
9217
9218 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
9219 {
9220         struct tg3 *tp = netdev_priv(dev);
9221
9222         if ((tp->tg3_flags & TG3_FLAG_WOL_CAP) &&
9223             device_can_wakeup(&tp->pdev->dev))
9224                 wol->supported = WAKE_MAGIC;
9225         else
9226                 wol->supported = 0;
9227         wol->wolopts = 0;
9228         if ((tp->tg3_flags & TG3_FLAG_WOL_ENABLE) &&
9229             device_can_wakeup(&tp->pdev->dev))
9230                 wol->wolopts = WAKE_MAGIC;
9231         memset(&wol->sopass, 0, sizeof(wol->sopass));
9232 }
9233
9234 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
9235 {
9236         struct tg3 *tp = netdev_priv(dev);
9237         struct device *dp = &tp->pdev->dev;
9238
9239         if (wol->wolopts & ~WAKE_MAGIC)
9240                 return -EINVAL;
9241         if ((wol->wolopts & WAKE_MAGIC) &&
9242             !((tp->tg3_flags & TG3_FLAG_WOL_CAP) && device_can_wakeup(dp)))
9243                 return -EINVAL;
9244
9245         spin_lock_bh(&tp->lock);
9246         if (wol->wolopts & WAKE_MAGIC) {
9247                 tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
9248                 device_set_wakeup_enable(dp, true);
9249         } else {
9250                 tp->tg3_flags &= ~TG3_FLAG_WOL_ENABLE;
9251                 device_set_wakeup_enable(dp, false);
9252         }
9253         spin_unlock_bh(&tp->lock);
9254
9255         return 0;
9256 }
9257
9258 static u32 tg3_get_msglevel(struct net_device *dev)
9259 {
9260         struct tg3 *tp = netdev_priv(dev);
9261         return tp->msg_enable;
9262 }
9263
9264 static void tg3_set_msglevel(struct net_device *dev, u32 value)
9265 {
9266         struct tg3 *tp = netdev_priv(dev);
9267         tp->msg_enable = value;
9268 }
9269
9270 static int tg3_set_tso(struct net_device *dev, u32 value)
9271 {
9272         struct tg3 *tp = netdev_priv(dev);
9273
9274         if (!(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
9275                 if (value)
9276                         return -EINVAL;
9277                 return 0;
9278         }
9279         if ((dev->features & NETIF_F_IPV6_CSUM) &&
9280             (tp->tg3_flags2 & TG3_FLG2_HW_TSO_2)) {
9281                 if (value) {
9282                         dev->features |= NETIF_F_TSO6;
9283                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
9284                             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
9285                              GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) ||
9286                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
9287                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
9288                                 dev->features |= NETIF_F_TSO_ECN;
9289                 } else
9290                         dev->features &= ~(NETIF_F_TSO6 | NETIF_F_TSO_ECN);
9291         }
9292         return ethtool_op_set_tso(dev, value);
9293 }
9294
9295 static int tg3_nway_reset(struct net_device *dev)
9296 {
9297         struct tg3 *tp = netdev_priv(dev);
9298         int r;
9299
9300         if (!netif_running(dev))
9301                 return -EAGAIN;
9302
9303         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
9304                 return -EINVAL;
9305
9306         if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
9307                 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
9308                         return -EAGAIN;
9309                 r = phy_start_aneg(tp->mdio_bus->phy_map[PHY_ADDR]);
9310         } else {
9311                 u32 bmcr;
9312
9313                 spin_lock_bh(&tp->lock);
9314                 r = -EINVAL;
9315                 tg3_readphy(tp, MII_BMCR, &bmcr);
9316                 if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
9317                     ((bmcr & BMCR_ANENABLE) ||
9318                      (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT))) {
9319                         tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
9320                                                    BMCR_ANENABLE);
9321                         r = 0;
9322                 }
9323                 spin_unlock_bh(&tp->lock);
9324         }
9325
9326         return r;
9327 }
9328
9329 static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
9330 {
9331         struct tg3 *tp = netdev_priv(dev);
9332
9333         ering->rx_max_pending = TG3_RX_RING_SIZE - 1;
9334         ering->rx_mini_max_pending = 0;
9335         if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE)
9336                 ering->rx_jumbo_max_pending = TG3_RX_JUMBO_RING_SIZE - 1;
9337         else
9338                 ering->rx_jumbo_max_pending = 0;
9339
9340         ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
9341
9342         ering->rx_pending = tp->rx_pending;
9343         ering->rx_mini_pending = 0;
9344         if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE)
9345                 ering->rx_jumbo_pending = tp->rx_jumbo_pending;
9346         else
9347                 ering->rx_jumbo_pending = 0;
9348
9349         ering->tx_pending = tp->tx_pending;
9350 }
9351
9352 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
9353 {
9354         struct tg3 *tp = netdev_priv(dev);
9355         int irq_sync = 0, err = 0;
9356
9357         if ((ering->rx_pending > TG3_RX_RING_SIZE - 1) ||
9358             (ering->rx_jumbo_pending > TG3_RX_JUMBO_RING_SIZE - 1) ||
9359             (ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
9360             (ering->tx_pending <= MAX_SKB_FRAGS) ||
9361             ((tp->tg3_flags2 & TG3_FLG2_TSO_BUG) &&
9362              (ering->tx_pending <= (MAX_SKB_FRAGS * 3))))
9363                 return -EINVAL;
9364
9365         if (netif_running(dev)) {
9366                 tg3_phy_stop(tp);
9367                 tg3_netif_stop(tp);
9368                 irq_sync = 1;
9369         }
9370
9371         tg3_full_lock(tp, irq_sync);
9372
9373         tp->rx_pending = ering->rx_pending;
9374
9375         if ((tp->tg3_flags2 & TG3_FLG2_MAX_RXPEND_64) &&
9376             tp->rx_pending > 63)
9377                 tp->rx_pending = 63;
9378         tp->rx_jumbo_pending = ering->rx_jumbo_pending;
9379         tp->tx_pending = ering->tx_pending;
9380
9381         if (netif_running(dev)) {
9382                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9383                 err = tg3_restart_hw(tp, 1);
9384                 if (!err)
9385                         tg3_netif_start(tp);
9386         }
9387
9388         tg3_full_unlock(tp);
9389
9390         if (irq_sync && !err)
9391                 tg3_phy_start(tp);
9392
9393         return err;
9394 }
9395
9396 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
9397 {
9398         struct tg3 *tp = netdev_priv(dev);
9399
9400         epause->autoneg = (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG) != 0;
9401
9402         if (tp->link_config.active_flowctrl & FLOW_CTRL_RX)
9403                 epause->rx_pause = 1;
9404         else
9405                 epause->rx_pause = 0;
9406
9407         if (tp->link_config.active_flowctrl & FLOW_CTRL_TX)
9408                 epause->tx_pause = 1;
9409         else
9410                 epause->tx_pause = 0;
9411 }
9412
9413 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
9414 {
9415         struct tg3 *tp = netdev_priv(dev);
9416         int err = 0;
9417
9418         if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
9419                 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
9420                         return -EAGAIN;
9421
9422                 if (epause->autoneg) {
9423                         u32 newadv;
9424                         struct phy_device *phydev;
9425
9426                         phydev = tp->mdio_bus->phy_map[PHY_ADDR];
9427
9428                         if (epause->rx_pause) {
9429                                 if (epause->tx_pause)
9430                                         newadv = ADVERTISED_Pause;
9431                                 else
9432                                         newadv = ADVERTISED_Pause |
9433                                                  ADVERTISED_Asym_Pause;
9434                         } else if (epause->tx_pause) {
9435                                 newadv = ADVERTISED_Asym_Pause;
9436                         } else
9437                                 newadv = 0;
9438
9439                         if (tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED) {
9440                                 u32 oldadv = phydev->advertising &
9441                                              (ADVERTISED_Pause |
9442                                               ADVERTISED_Asym_Pause);
9443                                 if (oldadv != newadv) {
9444                                         phydev->advertising &=
9445                                                 ~(ADVERTISED_Pause |
9446                                                   ADVERTISED_Asym_Pause);
9447                                         phydev->advertising |= newadv;
9448                                         err = phy_start_aneg(phydev);
9449                                 }
9450                         } else {
9451                                 tp->link_config.advertising &=
9452                                                 ~(ADVERTISED_Pause |
9453                                                   ADVERTISED_Asym_Pause);
9454                                 tp->link_config.advertising |= newadv;
9455                         }
9456                 } else {
9457                         if (epause->rx_pause)
9458                                 tp->link_config.flowctrl |= FLOW_CTRL_RX;
9459                         else
9460                                 tp->link_config.flowctrl &= ~FLOW_CTRL_RX;
9461
9462                         if (epause->tx_pause)
9463                                 tp->link_config.flowctrl |= FLOW_CTRL_TX;
9464                         else
9465                                 tp->link_config.flowctrl &= ~FLOW_CTRL_TX;
9466
9467                         if (netif_running(dev))
9468                                 tg3_setup_flow_control(tp, 0, 0);
9469                 }
9470         } else {
9471                 int irq_sync = 0;
9472
9473                 if (netif_running(dev)) {
9474                         tg3_netif_stop(tp);
9475                         irq_sync = 1;
9476                 }
9477
9478                 tg3_full_lock(tp, irq_sync);
9479
9480                 if (epause->autoneg)
9481                         tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
9482                 else
9483                         tp->tg3_flags &= ~TG3_FLAG_PAUSE_AUTONEG;
9484                 if (epause->rx_pause)
9485                         tp->link_config.flowctrl |= FLOW_CTRL_RX;
9486                 else
9487                         tp->link_config.flowctrl &= ~FLOW_CTRL_RX;
9488                 if (epause->tx_pause)
9489                         tp->link_config.flowctrl |= FLOW_CTRL_TX;
9490                 else
9491                         tp->link_config.flowctrl &= ~FLOW_CTRL_TX;
9492
9493                 if (netif_running(dev)) {
9494                         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9495                         err = tg3_restart_hw(tp, 1);
9496                         if (!err)
9497                                 tg3_netif_start(tp);
9498                 }
9499
9500                 tg3_full_unlock(tp);
9501         }
9502
9503         return err;
9504 }
9505
9506 static u32 tg3_get_rx_csum(struct net_device *dev)
9507 {
9508         struct tg3 *tp = netdev_priv(dev);
9509         return (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0;
9510 }
9511
9512 static int tg3_set_rx_csum(struct net_device *dev, u32 data)
9513 {
9514         struct tg3 *tp = netdev_priv(dev);
9515
9516         if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
9517                 if (data != 0)
9518                         return -EINVAL;
9519                 return 0;
9520         }
9521
9522         spin_lock_bh(&tp->lock);
9523         if (data)
9524                 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
9525         else
9526                 tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
9527         spin_unlock_bh(&tp->lock);
9528
9529         return 0;
9530 }
9531
9532 static int tg3_set_tx_csum(struct net_device *dev, u32 data)
9533 {
9534         struct tg3 *tp = netdev_priv(dev);
9535
9536         if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
9537                 if (data != 0)
9538                         return -EINVAL;
9539                 return 0;
9540         }
9541
9542         if (tp->tg3_flags3 & TG3_FLG3_5755_PLUS)
9543                 ethtool_op_set_tx_ipv6_csum(dev, data);
9544         else
9545                 ethtool_op_set_tx_csum(dev, data);
9546
9547         return 0;
9548 }
9549
9550 static int tg3_get_sset_count (struct net_device *dev, int sset)
9551 {
9552         switch (sset) {
9553         case ETH_SS_TEST:
9554                 return TG3_NUM_TEST;
9555         case ETH_SS_STATS:
9556                 return TG3_NUM_STATS;
9557         default:
9558                 return -EOPNOTSUPP;
9559         }
9560 }
9561
9562 static void tg3_get_strings (struct net_device *dev, u32 stringset, u8 *buf)
9563 {
9564         switch (stringset) {
9565         case ETH_SS_STATS:
9566                 memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
9567                 break;
9568         case ETH_SS_TEST:
9569                 memcpy(buf, &ethtool_test_keys, sizeof(ethtool_test_keys));
9570                 break;
9571         default:
9572                 WARN_ON(1);     /* we need a WARN() */
9573                 break;
9574         }
9575 }
9576
9577 static int tg3_phys_id(struct net_device *dev, u32 data)
9578 {
9579         struct tg3 *tp = netdev_priv(dev);
9580         int i;
9581
9582         if (!netif_running(tp->dev))
9583                 return -EAGAIN;
9584
9585         if (data == 0)
9586                 data = UINT_MAX / 2;
9587
9588         for (i = 0; i < (data * 2); i++) {
9589                 if ((i % 2) == 0)
9590                         tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
9591                                            LED_CTRL_1000MBPS_ON |
9592                                            LED_CTRL_100MBPS_ON |
9593                                            LED_CTRL_10MBPS_ON |
9594                                            LED_CTRL_TRAFFIC_OVERRIDE |
9595                                            LED_CTRL_TRAFFIC_BLINK |
9596                                            LED_CTRL_TRAFFIC_LED);
9597
9598                 else
9599                         tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
9600                                            LED_CTRL_TRAFFIC_OVERRIDE);
9601
9602                 if (msleep_interruptible(500))
9603                         break;
9604         }
9605         tw32(MAC_LED_CTRL, tp->led_ctrl);
9606         return 0;
9607 }
9608
9609 static void tg3_get_ethtool_stats (struct net_device *dev,
9610                                    struct ethtool_stats *estats, u64 *tmp_stats)
9611 {
9612         struct tg3 *tp = netdev_priv(dev);
9613         memcpy(tmp_stats, tg3_get_estats(tp), sizeof(tp->estats));
9614 }
9615
9616 #define NVRAM_TEST_SIZE 0x100
9617 #define NVRAM_SELFBOOT_FORMAT1_0_SIZE   0x14
9618 #define NVRAM_SELFBOOT_FORMAT1_2_SIZE   0x18
9619 #define NVRAM_SELFBOOT_FORMAT1_3_SIZE   0x1c
9620 #define NVRAM_SELFBOOT_HW_SIZE 0x20
9621 #define NVRAM_SELFBOOT_DATA_SIZE 0x1c
9622
9623 static int tg3_test_nvram(struct tg3 *tp)
9624 {
9625         u32 csum, magic;
9626         __le32 *buf;
9627         int i, j, k, err = 0, size;
9628
9629         if (tg3_nvram_read_swab(tp, 0, &magic) != 0)
9630                 return -EIO;
9631
9632         if (magic == TG3_EEPROM_MAGIC)
9633                 size = NVRAM_TEST_SIZE;
9634         else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) {
9635                 if ((magic & TG3_EEPROM_SB_FORMAT_MASK) ==
9636                     TG3_EEPROM_SB_FORMAT_1) {
9637                         switch (magic & TG3_EEPROM_SB_REVISION_MASK) {
9638                         case TG3_EEPROM_SB_REVISION_0:
9639                                 size = NVRAM_SELFBOOT_FORMAT1_0_SIZE;
9640                                 break;
9641                         case TG3_EEPROM_SB_REVISION_2:
9642                                 size = NVRAM_SELFBOOT_FORMAT1_2_SIZE;
9643                                 break;
9644                         case TG3_EEPROM_SB_REVISION_3:
9645                                 size = NVRAM_SELFBOOT_FORMAT1_3_SIZE;
9646                                 break;
9647                         default:
9648                                 return 0;
9649                         }
9650                 } else
9651                         return 0;
9652         } else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
9653                 size = NVRAM_SELFBOOT_HW_SIZE;
9654         else
9655                 return -EIO;
9656
9657         buf = kmalloc(size, GFP_KERNEL);
9658         if (buf == NULL)
9659                 return -ENOMEM;
9660
9661         err = -EIO;
9662         for (i = 0, j = 0; i < size; i += 4, j++) {
9663                 if ((err = tg3_nvram_read_le(tp, i, &buf[j])) != 0)
9664                         break;
9665         }
9666         if (i < size)
9667                 goto out;
9668
9669         /* Selfboot format */
9670         magic = swab32(le32_to_cpu(buf[0]));
9671         if ((magic & TG3_EEPROM_MAGIC_FW_MSK) ==
9672             TG3_EEPROM_MAGIC_FW) {
9673                 u8 *buf8 = (u8 *) buf, csum8 = 0;
9674
9675                 if ((magic & TG3_EEPROM_SB_REVISION_MASK) ==
9676                     TG3_EEPROM_SB_REVISION_2) {
9677                         /* For rev 2, the csum doesn't include the MBA. */
9678                         for (i = 0; i < TG3_EEPROM_SB_F1R2_MBA_OFF; i++)
9679                                 csum8 += buf8[i];
9680                         for (i = TG3_EEPROM_SB_F1R2_MBA_OFF + 4; i < size; i++)
9681                                 csum8 += buf8[i];
9682                 } else {
9683                         for (i = 0; i < size; i++)
9684                                 csum8 += buf8[i];
9685                 }
9686
9687                 if (csum8 == 0) {
9688                         err = 0;
9689                         goto out;
9690                 }
9691
9692                 err = -EIO;
9693                 goto out;
9694         }
9695
9696         if ((magic & TG3_EEPROM_MAGIC_HW_MSK) ==
9697             TG3_EEPROM_MAGIC_HW) {
9698                 u8 data[NVRAM_SELFBOOT_DATA_SIZE];
9699                 u8 parity[NVRAM_SELFBOOT_DATA_SIZE];
9700                 u8 *buf8 = (u8 *) buf;
9701
9702                 /* Separate the parity bits and the data bytes.  */
9703                 for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) {
9704                         if ((i == 0) || (i == 8)) {
9705                                 int l;
9706                                 u8 msk;
9707
9708                                 for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1)
9709                                         parity[k++] = buf8[i] & msk;
9710                                 i++;
9711                         }
9712                         else if (i == 16) {
9713                                 int l;
9714                                 u8 msk;
9715
9716                                 for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1)
9717                                         parity[k++] = buf8[i] & msk;
9718                                 i++;
9719
9720                                 for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1)
9721                                         parity[k++] = buf8[i] & msk;
9722                                 i++;
9723                         }
9724                         data[j++] = buf8[i];
9725                 }
9726
9727                 err = -EIO;
9728                 for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) {
9729                         u8 hw8 = hweight8(data[i]);
9730
9731                         if ((hw8 & 0x1) && parity[i])
9732                                 goto out;
9733                         else if (!(hw8 & 0x1) && !parity[i])
9734                                 goto out;
9735                 }
9736                 err = 0;
9737                 goto out;
9738         }
9739
9740         /* Bootstrap checksum at offset 0x10 */
9741         csum = calc_crc((unsigned char *) buf, 0x10);
9742         if(csum != le32_to_cpu(buf[0x10/4]))
9743                 goto out;
9744
9745         /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
9746         csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
9747         if (csum != le32_to_cpu(buf[0xfc/4]))
9748                  goto out;
9749
9750         err = 0;
9751
9752 out:
9753         kfree(buf);
9754         return err;
9755 }
9756
9757 #define TG3_SERDES_TIMEOUT_SEC  2
9758 #define TG3_COPPER_TIMEOUT_SEC  6
9759
9760 static int tg3_test_link(struct tg3 *tp)
9761 {
9762         int i, max;
9763
9764         if (!netif_running(tp->dev))
9765                 return -ENODEV;
9766
9767         if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
9768                 max = TG3_SERDES_TIMEOUT_SEC;
9769         else
9770                 max = TG3_COPPER_TIMEOUT_SEC;
9771
9772         for (i = 0; i < max; i++) {
9773                 if (netif_carrier_ok(tp->dev))
9774                         return 0;
9775
9776                 if (msleep_interruptible(1000))
9777                         break;
9778         }
9779
9780         return -EIO;
9781 }
9782
9783 /* Only test the commonly used registers */
9784 static int tg3_test_registers(struct tg3 *tp)
9785 {
9786         int i, is_5705, is_5750;
9787         u32 offset, read_mask, write_mask, val, save_val, read_val;
9788         static struct {
9789                 u16 offset;
9790                 u16 flags;
9791 #define TG3_FL_5705     0x1
9792 #define TG3_FL_NOT_5705 0x2
9793 #define TG3_FL_NOT_5788 0x4
9794 #define TG3_FL_NOT_5750 0x8
9795                 u32 read_mask;
9796                 u32 write_mask;
9797         } reg_tbl[] = {
9798                 /* MAC Control Registers */
9799                 { MAC_MODE, TG3_FL_NOT_5705,
9800                         0x00000000, 0x00ef6f8c },
9801                 { MAC_MODE, TG3_FL_5705,
9802                         0x00000000, 0x01ef6b8c },
9803                 { MAC_STATUS, TG3_FL_NOT_5705,
9804                         0x03800107, 0x00000000 },
9805                 { MAC_STATUS, TG3_FL_5705,
9806                         0x03800100, 0x00000000 },
9807                 { MAC_ADDR_0_HIGH, 0x0000,
9808                         0x00000000, 0x0000ffff },
9809                 { MAC_ADDR_0_LOW, 0x0000,
9810                         0x00000000, 0xffffffff },
9811                 { MAC_RX_MTU_SIZE, 0x0000,
9812                         0x00000000, 0x0000ffff },
9813                 { MAC_TX_MODE, 0x0000,
9814                         0x00000000, 0x00000070 },
9815                 { MAC_TX_LENGTHS, 0x0000,
9816                         0x00000000, 0x00003fff },
9817                 { MAC_RX_MODE, TG3_FL_NOT_5705,
9818                         0x00000000, 0x000007fc },
9819                 { MAC_RX_MODE, TG3_FL_5705,
9820                         0x00000000, 0x000007dc },
9821                 { MAC_HASH_REG_0, 0x0000,
9822                         0x00000000, 0xffffffff },
9823                 { MAC_HASH_REG_1, 0x0000,
9824                         0x00000000, 0xffffffff },
9825                 { MAC_HASH_REG_2, 0x0000,
9826                         0x00000000, 0xffffffff },
9827                 { MAC_HASH_REG_3, 0x0000,
9828                         0x00000000, 0xffffffff },
9829
9830                 /* Receive Data and Receive BD Initiator Control Registers. */
9831                 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
9832                         0x00000000, 0xffffffff },
9833                 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
9834                         0x00000000, 0xffffffff },
9835                 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
9836                         0x00000000, 0x00000003 },
9837                 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
9838                         0x00000000, 0xffffffff },
9839                 { RCVDBDI_STD_BD+0, 0x0000,
9840                         0x00000000, 0xffffffff },
9841                 { RCVDBDI_STD_BD+4, 0x0000,
9842                         0x00000000, 0xffffffff },
9843                 { RCVDBDI_STD_BD+8, 0x0000,
9844                         0x00000000, 0xffff0002 },
9845                 { RCVDBDI_STD_BD+0xc, 0x0000,
9846                         0x00000000, 0xffffffff },
9847
9848                 /* Receive BD Initiator Control Registers. */
9849                 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
9850                         0x00000000, 0xffffffff },
9851                 { RCVBDI_STD_THRESH, TG3_FL_5705,
9852                         0x00000000, 0x000003ff },
9853                 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
9854                         0x00000000, 0xffffffff },
9855
9856                 /* Host Coalescing Control Registers. */
9857                 { HOSTCC_MODE, TG3_FL_NOT_5705,
9858                         0x00000000, 0x00000004 },
9859                 { HOSTCC_MODE, TG3_FL_5705,
9860                         0x00000000, 0x000000f6 },
9861                 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
9862                         0x00000000, 0xffffffff },
9863                 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
9864                         0x00000000, 0x000003ff },
9865                 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
9866                         0x00000000, 0xffffffff },
9867                 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
9868                         0x00000000, 0x000003ff },
9869                 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
9870                         0x00000000, 0xffffffff },
9871                 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
9872                         0x00000000, 0x000000ff },
9873                 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
9874                         0x00000000, 0xffffffff },
9875                 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
9876                         0x00000000, 0x000000ff },
9877                 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
9878                         0x00000000, 0xffffffff },
9879                 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
9880                         0x00000000, 0xffffffff },
9881                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
9882                         0x00000000, 0xffffffff },
9883                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
9884                         0x00000000, 0x000000ff },
9885                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
9886                         0x00000000, 0xffffffff },
9887                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
9888                         0x00000000, 0x000000ff },
9889                 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
9890                         0x00000000, 0xffffffff },
9891                 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
9892                         0x00000000, 0xffffffff },
9893                 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
9894                         0x00000000, 0xffffffff },
9895                 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
9896                         0x00000000, 0xffffffff },
9897                 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
9898                         0x00000000, 0xffffffff },
9899                 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
9900                         0xffffffff, 0x00000000 },
9901                 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
9902                         0xffffffff, 0x00000000 },
9903
9904                 /* Buffer Manager Control Registers. */
9905                 { BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750,
9906                         0x00000000, 0x007fff80 },
9907                 { BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750,
9908                         0x00000000, 0x007fffff },
9909                 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
9910                         0x00000000, 0x0000003f },
9911                 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
9912                         0x00000000, 0x000001ff },
9913                 { BUFMGR_MB_HIGH_WATER, 0x0000,
9914                         0x00000000, 0x000001ff },
9915                 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
9916                         0xffffffff, 0x00000000 },
9917                 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
9918                         0xffffffff, 0x00000000 },
9919
9920                 /* Mailbox Registers */
9921                 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
9922                         0x00000000, 0x000001ff },
9923                 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
9924                         0x00000000, 0x000001ff },
9925                 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
9926                         0x00000000, 0x000007ff },
9927                 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
9928                         0x00000000, 0x000001ff },
9929
9930                 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
9931         };
9932
9933         is_5705 = is_5750 = 0;
9934         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
9935                 is_5705 = 1;
9936                 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
9937                         is_5750 = 1;
9938         }
9939
9940         for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
9941                 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
9942                         continue;
9943
9944                 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
9945                         continue;
9946
9947                 if ((tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
9948                     (reg_tbl[i].flags & TG3_FL_NOT_5788))
9949                         continue;
9950
9951                 if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750))
9952                         continue;
9953
9954                 offset = (u32) reg_tbl[i].offset;
9955                 read_mask = reg_tbl[i].read_mask;
9956                 write_mask = reg_tbl[i].write_mask;
9957
9958                 /* Save the original register content */
9959                 save_val = tr32(offset);
9960
9961                 /* Determine the read-only value. */
9962                 read_val = save_val & read_mask;
9963
9964                 /* Write zero to the register, then make sure the read-only bits
9965                  * are not changed and the read/write bits are all zeros.
9966                  */
9967                 tw32(offset, 0);
9968
9969                 val = tr32(offset);
9970
9971                 /* Test the read-only and read/write bits. */
9972                 if (((val & read_mask) != read_val) || (val & write_mask))
9973                         goto out;
9974
9975                 /* Write ones to all the bits defined by RdMask and WrMask, then
9976                  * make sure the read-only bits are not changed and the
9977                  * read/write bits are all ones.
9978                  */
9979                 tw32(offset, read_mask | write_mask);
9980
9981                 val = tr32(offset);
9982
9983                 /* Test the read-only bits. */
9984                 if ((val & read_mask) != read_val)
9985                         goto out;
9986
9987                 /* Test the read/write bits. */
9988                 if ((val & write_mask) != write_mask)
9989                         goto out;
9990
9991                 tw32(offset, save_val);
9992         }
9993
9994         return 0;
9995
9996 out:
9997         if (netif_msg_hw(tp))
9998                 printk(KERN_ERR PFX "Register test failed at offset %x\n",
9999                        offset);
10000         tw32(offset, save_val);
10001         return -EIO;
10002 }
10003
10004 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
10005 {
10006         static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
10007         int i;
10008         u32 j;
10009
10010         for (i = 0; i < ARRAY_SIZE(test_pattern); i++) {
10011                 for (j = 0; j < len; j += 4) {
10012                         u32 val;
10013
10014                         tg3_write_mem(tp, offset + j, test_pattern[i]);
10015                         tg3_read_mem(tp, offset + j, &val);
10016                         if (val != test_pattern[i])
10017                                 return -EIO;
10018                 }
10019         }
10020         return 0;
10021 }
10022
10023 static int tg3_test_memory(struct tg3 *tp)
10024 {
10025         static struct mem_entry {
10026                 u32 offset;
10027                 u32 len;
10028         } mem_tbl_570x[] = {
10029                 { 0x00000000, 0x00b50},
10030                 { 0x00002000, 0x1c000},
10031                 { 0xffffffff, 0x00000}
10032         }, mem_tbl_5705[] = {
10033                 { 0x00000100, 0x0000c},
10034                 { 0x00000200, 0x00008},
10035                 { 0x00004000, 0x00800},
10036                 { 0x00006000, 0x01000},
10037                 { 0x00008000, 0x02000},
10038                 { 0x00010000, 0x0e000},
10039                 { 0xffffffff, 0x00000}
10040         }, mem_tbl_5755[] = {
10041                 { 0x00000200, 0x00008},
10042                 { 0x00004000, 0x00800},
10043                 { 0x00006000, 0x00800},
10044                 { 0x00008000, 0x02000},
10045                 { 0x00010000, 0x0c000},
10046                 { 0xffffffff, 0x00000}
10047         }, mem_tbl_5906[] = {
10048                 { 0x00000200, 0x00008},
10049                 { 0x00004000, 0x00400},
10050                 { 0x00006000, 0x00400},
10051                 { 0x00008000, 0x01000},
10052                 { 0x00010000, 0x01000},
10053                 { 0xffffffff, 0x00000}
10054         };
10055         struct mem_entry *mem_tbl;
10056         int err = 0;
10057         int i;
10058
10059         if (tp->tg3_flags3 & TG3_FLG3_5755_PLUS)
10060                 mem_tbl = mem_tbl_5755;
10061         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
10062                 mem_tbl = mem_tbl_5906;
10063         else if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
10064                 mem_tbl = mem_tbl_5705;
10065         else
10066                 mem_tbl = mem_tbl_570x;
10067
10068         for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
10069                 if ((err = tg3_do_mem_test(tp, mem_tbl[i].offset,
10070                     mem_tbl[i].len)) != 0)
10071                         break;
10072         }
10073
10074         return err;
10075 }
10076
10077 #define TG3_MAC_LOOPBACK        0
10078 #define TG3_PHY_LOOPBACK        1
10079
10080 static int tg3_run_loopback(struct tg3 *tp, int loopback_mode)
10081 {
10082         u32 mac_mode, rx_start_idx, rx_idx, tx_idx, opaque_key;
10083         u32 desc_idx;
10084         struct sk_buff *skb, *rx_skb;
10085         u8 *tx_data;
10086         dma_addr_t map;
10087         int num_pkts, tx_len, rx_len, i, err;
10088         struct tg3_rx_buffer_desc *desc;
10089
10090         if (loopback_mode == TG3_MAC_LOOPBACK) {
10091                 /* HW errata - mac loopback fails in some cases on 5780.
10092                  * Normal traffic and PHY loopback are not affected by
10093                  * errata.
10094                  */
10095                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780)
10096                         return 0;
10097
10098                 mac_mode = (tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK) |
10099                            MAC_MODE_PORT_INT_LPBACK;
10100                 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
10101                         mac_mode |= MAC_MODE_LINK_POLARITY;
10102                 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
10103                         mac_mode |= MAC_MODE_PORT_MODE_MII;
10104                 else
10105                         mac_mode |= MAC_MODE_PORT_MODE_GMII;
10106                 tw32(MAC_MODE, mac_mode);
10107         } else if (loopback_mode == TG3_PHY_LOOPBACK) {
10108                 u32 val;
10109
10110                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
10111                         u32 phytest;
10112
10113                         if (!tg3_readphy(tp, MII_TG3_EPHY_TEST, &phytest)) {
10114                                 u32 phy;
10115
10116                                 tg3_writephy(tp, MII_TG3_EPHY_TEST,
10117                                              phytest | MII_TG3_EPHY_SHADOW_EN);
10118                                 if (!tg3_readphy(tp, 0x1b, &phy))
10119                                         tg3_writephy(tp, 0x1b, phy & ~0x20);
10120                                 tg3_writephy(tp, MII_TG3_EPHY_TEST, phytest);
10121                         }
10122                         val = BMCR_LOOPBACK | BMCR_FULLDPLX | BMCR_SPEED100;
10123                 } else
10124                         val = BMCR_LOOPBACK | BMCR_FULLDPLX | BMCR_SPEED1000;
10125
10126                 tg3_phy_toggle_automdix(tp, 0);
10127
10128                 tg3_writephy(tp, MII_BMCR, val);
10129                 udelay(40);
10130
10131                 mac_mode = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
10132                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
10133                         tg3_writephy(tp, MII_TG3_EPHY_PTEST, 0x1800);
10134                         mac_mode |= MAC_MODE_PORT_MODE_MII;
10135                 } else
10136                         mac_mode |= MAC_MODE_PORT_MODE_GMII;
10137
10138                 /* reset to prevent losing 1st rx packet intermittently */
10139                 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
10140                         tw32_f(MAC_RX_MODE, RX_MODE_RESET);
10141                         udelay(10);
10142                         tw32_f(MAC_RX_MODE, tp->rx_mode);
10143                 }
10144                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
10145                         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401)
10146                                 mac_mode &= ~MAC_MODE_LINK_POLARITY;
10147                         else if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411)
10148                                 mac_mode |= MAC_MODE_LINK_POLARITY;
10149                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
10150                                      MII_TG3_EXT_CTRL_LNK3_LED_MODE);
10151                 }
10152                 tw32(MAC_MODE, mac_mode);
10153         }
10154         else
10155                 return -EINVAL;
10156
10157         err = -EIO;
10158
10159         tx_len = 1514;
10160         skb = netdev_alloc_skb(tp->dev, tx_len);
10161         if (!skb)
10162                 return -ENOMEM;
10163
10164         tx_data = skb_put(skb, tx_len);
10165         memcpy(tx_data, tp->dev->dev_addr, 6);
10166         memset(tx_data + 6, 0x0, 8);
10167
10168         tw32(MAC_RX_MTU_SIZE, tx_len + 4);
10169
10170         for (i = 14; i < tx_len; i++)
10171                 tx_data[i] = (u8) (i & 0xff);
10172
10173         map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
10174
10175         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
10176              HOSTCC_MODE_NOW);
10177
10178         udelay(10);
10179
10180         rx_start_idx = tp->hw_status->idx[0].rx_producer;
10181
10182         num_pkts = 0;
10183
10184         tg3_set_txd(tp, tp->tx_prod, map, tx_len, 0, 1);
10185
10186         tp->tx_prod++;
10187         num_pkts++;
10188
10189         tw32_tx_mbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW,
10190                      tp->tx_prod);
10191         tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW);
10192
10193         udelay(10);
10194
10195         /* 250 usec to allow enough time on some 10/100 Mbps devices.  */
10196         for (i = 0; i < 25; i++) {
10197                 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
10198                        HOSTCC_MODE_NOW);
10199
10200                 udelay(10);
10201
10202                 tx_idx = tp->hw_status->idx[0].tx_consumer;
10203                 rx_idx = tp->hw_status->idx[0].rx_producer;
10204                 if ((tx_idx == tp->tx_prod) &&
10205                     (rx_idx == (rx_start_idx + num_pkts)))
10206                         break;
10207         }
10208
10209         pci_unmap_single(tp->pdev, map, tx_len, PCI_DMA_TODEVICE);
10210         dev_kfree_skb(skb);
10211
10212         if (tx_idx != tp->tx_prod)
10213                 goto out;
10214
10215         if (rx_idx != rx_start_idx + num_pkts)
10216                 goto out;
10217
10218         desc = &tp->rx_rcb[rx_start_idx];
10219         desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
10220         opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
10221         if (opaque_key != RXD_OPAQUE_RING_STD)
10222                 goto out;
10223
10224         if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
10225             (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
10226                 goto out;
10227
10228         rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4;
10229         if (rx_len != tx_len)
10230                 goto out;
10231
10232         rx_skb = tp->rx_std_buffers[desc_idx].skb;
10233
10234         map = pci_unmap_addr(&tp->rx_std_buffers[desc_idx], mapping);
10235         pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len, PCI_DMA_FROMDEVICE);
10236
10237         for (i = 14; i < tx_len; i++) {
10238                 if (*(rx_skb->data + i) != (u8) (i & 0xff))
10239                         goto out;
10240         }
10241         err = 0;
10242
10243         /* tg3_free_rings will unmap and free the rx_skb */
10244 out:
10245         return err;
10246 }
10247
10248 #define TG3_MAC_LOOPBACK_FAILED         1
10249 #define TG3_PHY_LOOPBACK_FAILED         2
10250 #define TG3_LOOPBACK_FAILED             (TG3_MAC_LOOPBACK_FAILED |      \
10251                                          TG3_PHY_LOOPBACK_FAILED)
10252
10253 static int tg3_test_loopback(struct tg3 *tp)
10254 {
10255         int err = 0;
10256         u32 cpmuctrl = 0;
10257
10258         if (!netif_running(tp->dev))
10259                 return TG3_LOOPBACK_FAILED;
10260
10261         err = tg3_reset_hw(tp, 1);
10262         if (err)
10263                 return TG3_LOOPBACK_FAILED;
10264
10265         /* Turn off gphy autopowerdown. */
10266         if (tp->tg3_flags3 & TG3_FLG3_PHY_ENABLE_APD)
10267                 tg3_phy_toggle_apd(tp, false);
10268
10269         if (tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) {
10270                 int i;
10271                 u32 status;
10272
10273                 tw32(TG3_CPMU_MUTEX_REQ, CPMU_MUTEX_REQ_DRIVER);
10274
10275                 /* Wait for up to 40 microseconds to acquire lock. */
10276                 for (i = 0; i < 4; i++) {
10277                         status = tr32(TG3_CPMU_MUTEX_GNT);
10278                         if (status == CPMU_MUTEX_GNT_DRIVER)
10279                                 break;
10280                         udelay(10);
10281                 }
10282
10283                 if (status != CPMU_MUTEX_GNT_DRIVER)
10284                         return TG3_LOOPBACK_FAILED;
10285
10286                 /* Turn off link-based power management. */
10287                 cpmuctrl = tr32(TG3_CPMU_CTRL);
10288                 tw32(TG3_CPMU_CTRL,
10289                      cpmuctrl & ~(CPMU_CTRL_LINK_SPEED_MODE |
10290                                   CPMU_CTRL_LINK_AWARE_MODE));
10291         }
10292
10293         if (tg3_run_loopback(tp, TG3_MAC_LOOPBACK))
10294                 err |= TG3_MAC_LOOPBACK_FAILED;
10295
10296         if (tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) {
10297                 tw32(TG3_CPMU_CTRL, cpmuctrl);
10298
10299                 /* Release the mutex */
10300                 tw32(TG3_CPMU_MUTEX_GNT, CPMU_MUTEX_GNT_DRIVER);
10301         }
10302
10303         if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
10304             !(tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)) {
10305                 if (tg3_run_loopback(tp, TG3_PHY_LOOPBACK))
10306                         err |= TG3_PHY_LOOPBACK_FAILED;
10307         }
10308
10309         /* Re-enable gphy autopowerdown. */
10310         if (tp->tg3_flags3 & TG3_FLG3_PHY_ENABLE_APD)
10311                 tg3_phy_toggle_apd(tp, true);
10312
10313         return err;
10314 }
10315
10316 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
10317                           u64 *data)
10318 {
10319         struct tg3 *tp = netdev_priv(dev);
10320
10321         if (tp->link_config.phy_is_low_power)
10322                 tg3_set_power_state(tp, PCI_D0);
10323
10324         memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
10325
10326         if (tg3_test_nvram(tp) != 0) {
10327                 etest->flags |= ETH_TEST_FL_FAILED;
10328                 data[0] = 1;
10329         }
10330         if (tg3_test_link(tp) != 0) {
10331                 etest->flags |= ETH_TEST_FL_FAILED;
10332                 data[1] = 1;
10333         }
10334         if (etest->flags & ETH_TEST_FL_OFFLINE) {
10335                 int err, err2 = 0, irq_sync = 0;
10336
10337                 if (netif_running(dev)) {
10338                         tg3_phy_stop(tp);
10339                         tg3_netif_stop(tp);
10340                         irq_sync = 1;
10341                 }
10342
10343                 tg3_full_lock(tp, irq_sync);
10344
10345                 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
10346                 err = tg3_nvram_lock(tp);
10347                 tg3_halt_cpu(tp, RX_CPU_BASE);
10348                 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
10349                         tg3_halt_cpu(tp, TX_CPU_BASE);
10350                 if (!err)
10351                         tg3_nvram_unlock(tp);
10352
10353                 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
10354                         tg3_phy_reset(tp);
10355
10356                 if (tg3_test_registers(tp) != 0) {
10357                         etest->flags |= ETH_TEST_FL_FAILED;
10358                         data[2] = 1;
10359                 }
10360                 if (tg3_test_memory(tp) != 0) {
10361                         etest->flags |= ETH_TEST_FL_FAILED;
10362                         data[3] = 1;
10363                 }
10364                 if ((data[4] = tg3_test_loopback(tp)) != 0)
10365                         etest->flags |= ETH_TEST_FL_FAILED;
10366
10367                 tg3_full_unlock(tp);
10368
10369                 if (tg3_test_interrupt(tp) != 0) {
10370                         etest->flags |= ETH_TEST_FL_FAILED;
10371                         data[5] = 1;
10372                 }
10373
10374                 tg3_full_lock(tp, 0);
10375
10376                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10377                 if (netif_running(dev)) {
10378                         tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
10379                         err2 = tg3_restart_hw(tp, 1);
10380                         if (!err2)
10381                                 tg3_netif_start(tp);
10382                 }
10383
10384                 tg3_full_unlock(tp);
10385
10386                 if (irq_sync && !err2)
10387                         tg3_phy_start(tp);
10388         }
10389         if (tp->link_config.phy_is_low_power)
10390                 tg3_set_power_state(tp, PCI_D3hot);
10391
10392 }
10393
10394 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
10395 {
10396         struct mii_ioctl_data *data = if_mii(ifr);
10397         struct tg3 *tp = netdev_priv(dev);
10398         int err;
10399
10400         if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
10401                 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
10402                         return -EAGAIN;
10403                 return phy_mii_ioctl(tp->mdio_bus->phy_map[PHY_ADDR], data, cmd);
10404         }
10405
10406         switch(cmd) {
10407         case SIOCGMIIPHY:
10408                 data->phy_id = PHY_ADDR;
10409
10410                 /* fallthru */
10411         case SIOCGMIIREG: {
10412                 u32 mii_regval;
10413
10414                 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
10415                         break;                  /* We have no PHY */
10416
10417                 if (tp->link_config.phy_is_low_power)
10418                         return -EAGAIN;
10419
10420                 spin_lock_bh(&tp->lock);
10421                 err = tg3_readphy(tp, data->reg_num & 0x1f, &mii_regval);
10422                 spin_unlock_bh(&tp->lock);
10423
10424                 data->val_out = mii_regval;
10425
10426                 return err;
10427         }
10428
10429         case SIOCSMIIREG:
10430                 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
10431                         break;                  /* We have no PHY */
10432
10433                 if (!capable(CAP_NET_ADMIN))
10434                         return -EPERM;
10435
10436                 if (tp->link_config.phy_is_low_power)
10437                         return -EAGAIN;
10438
10439                 spin_lock_bh(&tp->lock);
10440                 err = tg3_writephy(tp, data->reg_num & 0x1f, data->val_in);
10441                 spin_unlock_bh(&tp->lock);
10442
10443                 return err;
10444
10445         default:
10446                 /* do nothing */
10447                 break;
10448         }
10449         return -EOPNOTSUPP;
10450 }
10451
10452 #if TG3_VLAN_TAG_USED
10453 static void tg3_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
10454 {
10455         struct tg3 *tp = netdev_priv(dev);
10456
10457         if (netif_running(dev))
10458                 tg3_netif_stop(tp);
10459
10460         tg3_full_lock(tp, 0);
10461
10462         tp->vlgrp = grp;
10463
10464         /* Update RX_MODE_KEEP_VLAN_TAG bit in RX_MODE register. */
10465         __tg3_set_rx_mode(dev);
10466
10467         if (netif_running(dev))
10468                 tg3_netif_start(tp);
10469
10470         tg3_full_unlock(tp);
10471 }
10472 #endif
10473
10474 static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
10475 {
10476         struct tg3 *tp = netdev_priv(dev);
10477
10478         memcpy(ec, &tp->coal, sizeof(*ec));
10479         return 0;
10480 }
10481
10482 static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
10483 {
10484         struct tg3 *tp = netdev_priv(dev);
10485         u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
10486         u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
10487
10488         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
10489                 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
10490                 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
10491                 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
10492                 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
10493         }
10494
10495         if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
10496             (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
10497             (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
10498             (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
10499             (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
10500             (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
10501             (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
10502             (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
10503             (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
10504             (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
10505                 return -EINVAL;
10506
10507         /* No rx interrupts will be generated if both are zero */
10508         if ((ec->rx_coalesce_usecs == 0) &&
10509             (ec->rx_max_coalesced_frames == 0))
10510                 return -EINVAL;
10511
10512         /* No tx interrupts will be generated if both are zero */
10513         if ((ec->tx_coalesce_usecs == 0) &&
10514             (ec->tx_max_coalesced_frames == 0))
10515                 return -EINVAL;
10516
10517         /* Only copy relevant parameters, ignore all others. */
10518         tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
10519         tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
10520         tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
10521         tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
10522         tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
10523         tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
10524         tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
10525         tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
10526         tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
10527
10528         if (netif_running(dev)) {
10529                 tg3_full_lock(tp, 0);
10530                 __tg3_set_coalesce(tp, &tp->coal);
10531                 tg3_full_unlock(tp);
10532         }
10533         return 0;
10534 }
10535
10536 static const struct ethtool_ops tg3_ethtool_ops = {
10537         .get_settings           = tg3_get_settings,
10538         .set_settings           = tg3_set_settings,
10539         .get_drvinfo            = tg3_get_drvinfo,
10540         .get_regs_len           = tg3_get_regs_len,
10541         .get_regs               = tg3_get_regs,
10542         .get_wol                = tg3_get_wol,
10543         .set_wol                = tg3_set_wol,
10544         .get_msglevel           = tg3_get_msglevel,
10545         .set_msglevel           = tg3_set_msglevel,
10546         .nway_reset             = tg3_nway_reset,
10547         .get_link               = ethtool_op_get_link,
10548         .get_eeprom_len         = tg3_get_eeprom_len,
10549         .get_eeprom             = tg3_get_eeprom,
10550         .set_eeprom             = tg3_set_eeprom,
10551         .get_ringparam          = tg3_get_ringparam,
10552         .set_ringparam          = tg3_set_ringparam,
10553         .get_pauseparam         = tg3_get_pauseparam,
10554         .set_pauseparam         = tg3_set_pauseparam,
10555         .get_rx_csum            = tg3_get_rx_csum,
10556         .set_rx_csum            = tg3_set_rx_csum,
10557         .set_tx_csum            = tg3_set_tx_csum,
10558         .set_sg                 = ethtool_op_set_sg,
10559         .set_tso                = tg3_set_tso,
10560         .self_test              = tg3_self_test,
10561         .get_strings            = tg3_get_strings,
10562         .phys_id                = tg3_phys_id,
10563         .get_ethtool_stats      = tg3_get_ethtool_stats,
10564         .get_coalesce           = tg3_get_coalesce,
10565         .set_coalesce           = tg3_set_coalesce,
10566         .get_sset_count         = tg3_get_sset_count,
10567 };
10568
10569 static void __devinit tg3_get_eeprom_size(struct tg3 *tp)
10570 {
10571         u32 cursize, val, magic;
10572
10573         tp->nvram_size = EEPROM_CHIP_SIZE;
10574
10575         if (tg3_nvram_read_swab(tp, 0, &magic) != 0)
10576                 return;
10577
10578         if ((magic != TG3_EEPROM_MAGIC) &&
10579             ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) &&
10580             ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW))
10581                 return;
10582
10583         /*
10584          * Size the chip by reading offsets at increasing powers of two.
10585          * When we encounter our validation signature, we know the addressing
10586          * has wrapped around, and thus have our chip size.
10587          */
10588         cursize = 0x10;
10589
10590         while (cursize < tp->nvram_size) {
10591                 if (tg3_nvram_read_swab(tp, cursize, &val) != 0)
10592                         return;
10593
10594                 if (val == magic)
10595                         break;
10596
10597                 cursize <<= 1;
10598         }
10599
10600         tp->nvram_size = cursize;
10601 }
10602
10603 static void __devinit tg3_get_nvram_size(struct tg3 *tp)
10604 {
10605         u32 val;
10606
10607         if (tg3_nvram_read_swab(tp, 0, &val) != 0)
10608                 return;
10609
10610         /* Selfboot format */
10611         if (val != TG3_EEPROM_MAGIC) {
10612                 tg3_get_eeprom_size(tp);
10613                 return;
10614         }
10615
10616         if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
10617                 if (val != 0) {
10618                         tp->nvram_size = (val >> 16) * 1024;
10619                         return;
10620                 }
10621         }
10622         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
10623 }
10624
10625 static void __devinit tg3_get_nvram_info(struct tg3 *tp)
10626 {
10627         u32 nvcfg1;
10628
10629         nvcfg1 = tr32(NVRAM_CFG1);
10630         if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
10631                 tp->tg3_flags2 |= TG3_FLG2_FLASH;
10632         }
10633         else {
10634                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
10635                 tw32(NVRAM_CFG1, nvcfg1);
10636         }
10637
10638         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) ||
10639             (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
10640                 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
10641                         case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
10642                                 tp->nvram_jedecnum = JEDEC_ATMEL;
10643                                 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
10644                                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10645                                 break;
10646                         case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
10647                                 tp->nvram_jedecnum = JEDEC_ATMEL;
10648                                 tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
10649                                 break;
10650                         case FLASH_VENDOR_ATMEL_EEPROM:
10651                                 tp->nvram_jedecnum = JEDEC_ATMEL;
10652                                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
10653                                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10654                                 break;
10655                         case FLASH_VENDOR_ST:
10656                                 tp->nvram_jedecnum = JEDEC_ST;
10657                                 tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
10658                                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10659                                 break;
10660                         case FLASH_VENDOR_SAIFUN:
10661                                 tp->nvram_jedecnum = JEDEC_SAIFUN;
10662                                 tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
10663                                 break;
10664                         case FLASH_VENDOR_SST_SMALL:
10665                         case FLASH_VENDOR_SST_LARGE:
10666                                 tp->nvram_jedecnum = JEDEC_SST;
10667                                 tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
10668                                 break;
10669                 }
10670         }
10671         else {
10672                 tp->nvram_jedecnum = JEDEC_ATMEL;
10673                 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
10674                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10675         }
10676 }
10677
10678 static void __devinit tg3_get_5752_nvram_info(struct tg3 *tp)
10679 {
10680         u32 nvcfg1;
10681
10682         nvcfg1 = tr32(NVRAM_CFG1);
10683
10684         /* NVRAM protection for TPM */
10685         if (nvcfg1 & (1 << 27))
10686                 tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM;
10687
10688         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
10689                 case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
10690                 case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
10691                         tp->nvram_jedecnum = JEDEC_ATMEL;
10692                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10693                         break;
10694                 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
10695                         tp->nvram_jedecnum = JEDEC_ATMEL;
10696                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10697                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
10698                         break;
10699                 case FLASH_5752VENDOR_ST_M45PE10:
10700                 case FLASH_5752VENDOR_ST_M45PE20:
10701                 case FLASH_5752VENDOR_ST_M45PE40:
10702                         tp->nvram_jedecnum = JEDEC_ST;
10703                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10704                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
10705                         break;
10706         }
10707
10708         if (tp->tg3_flags2 & TG3_FLG2_FLASH) {
10709                 switch (nvcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
10710                         case FLASH_5752PAGE_SIZE_256:
10711                                 tp->nvram_pagesize = 256;
10712                                 break;
10713                         case FLASH_5752PAGE_SIZE_512:
10714                                 tp->nvram_pagesize = 512;
10715                                 break;
10716                         case FLASH_5752PAGE_SIZE_1K:
10717                                 tp->nvram_pagesize = 1024;
10718                                 break;
10719                         case FLASH_5752PAGE_SIZE_2K:
10720                                 tp->nvram_pagesize = 2048;
10721                                 break;
10722                         case FLASH_5752PAGE_SIZE_4K:
10723                                 tp->nvram_pagesize = 4096;
10724                                 break;
10725                         case FLASH_5752PAGE_SIZE_264:
10726                                 tp->nvram_pagesize = 264;
10727                                 break;
10728                 }
10729         }
10730         else {
10731                 /* For eeprom, set pagesize to maximum eeprom size */
10732                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
10733
10734                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
10735                 tw32(NVRAM_CFG1, nvcfg1);
10736         }
10737 }
10738
10739 static void __devinit tg3_get_5755_nvram_info(struct tg3 *tp)
10740 {
10741         u32 nvcfg1, protect = 0;
10742
10743         nvcfg1 = tr32(NVRAM_CFG1);
10744
10745         /* NVRAM protection for TPM */
10746         if (nvcfg1 & (1 << 27)) {
10747                 tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM;
10748                 protect = 1;
10749         }
10750
10751         nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
10752         switch (nvcfg1) {
10753                 case FLASH_5755VENDOR_ATMEL_FLASH_1:
10754                 case FLASH_5755VENDOR_ATMEL_FLASH_2:
10755                 case FLASH_5755VENDOR_ATMEL_FLASH_3:
10756                 case FLASH_5755VENDOR_ATMEL_FLASH_5:
10757                         tp->nvram_jedecnum = JEDEC_ATMEL;
10758                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10759                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
10760                         tp->nvram_pagesize = 264;
10761                         if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 ||
10762                             nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5)
10763                                 tp->nvram_size = (protect ? 0x3e200 :
10764                                                   TG3_NVRAM_SIZE_512KB);
10765                         else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2)
10766                                 tp->nvram_size = (protect ? 0x1f200 :
10767                                                   TG3_NVRAM_SIZE_256KB);
10768                         else
10769                                 tp->nvram_size = (protect ? 0x1f200 :
10770                                                   TG3_NVRAM_SIZE_128KB);
10771                         break;
10772                 case FLASH_5752VENDOR_ST_M45PE10:
10773                 case FLASH_5752VENDOR_ST_M45PE20:
10774                 case FLASH_5752VENDOR_ST_M45PE40:
10775                         tp->nvram_jedecnum = JEDEC_ST;
10776                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10777                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
10778                         tp->nvram_pagesize = 256;
10779                         if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10)
10780                                 tp->nvram_size = (protect ?
10781                                                   TG3_NVRAM_SIZE_64KB :
10782                                                   TG3_NVRAM_SIZE_128KB);
10783                         else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20)
10784                                 tp->nvram_size = (protect ?
10785                                                   TG3_NVRAM_SIZE_64KB :
10786                                                   TG3_NVRAM_SIZE_256KB);
10787                         else
10788                                 tp->nvram_size = (protect ?
10789                                                   TG3_NVRAM_SIZE_128KB :
10790                                                   TG3_NVRAM_SIZE_512KB);
10791                         break;
10792         }
10793 }
10794
10795 static void __devinit tg3_get_5787_nvram_info(struct tg3 *tp)
10796 {
10797         u32 nvcfg1;
10798
10799         nvcfg1 = tr32(NVRAM_CFG1);
10800
10801         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
10802                 case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
10803                 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
10804                 case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
10805                 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
10806                         tp->nvram_jedecnum = JEDEC_ATMEL;
10807                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10808                         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
10809
10810                         nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
10811                         tw32(NVRAM_CFG1, nvcfg1);
10812                         break;
10813                 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
10814                 case FLASH_5755VENDOR_ATMEL_FLASH_1:
10815                 case FLASH_5755VENDOR_ATMEL_FLASH_2:
10816                 case FLASH_5755VENDOR_ATMEL_FLASH_3:
10817                         tp->nvram_jedecnum = JEDEC_ATMEL;
10818                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10819                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
10820                         tp->nvram_pagesize = 264;
10821                         break;
10822                 case FLASH_5752VENDOR_ST_M45PE10:
10823                 case FLASH_5752VENDOR_ST_M45PE20:
10824                 case FLASH_5752VENDOR_ST_M45PE40:
10825                         tp->nvram_jedecnum = JEDEC_ST;
10826                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10827                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
10828                         tp->nvram_pagesize = 256;
10829                         break;
10830         }
10831 }
10832
10833 static void __devinit tg3_get_5761_nvram_info(struct tg3 *tp)
10834 {
10835         u32 nvcfg1, protect = 0;
10836
10837         nvcfg1 = tr32(NVRAM_CFG1);
10838
10839         /* NVRAM protection for TPM */
10840         if (nvcfg1 & (1 << 27)) {
10841                 tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM;
10842                 protect = 1;
10843         }
10844
10845         nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
10846         switch (nvcfg1) {
10847                 case FLASH_5761VENDOR_ATMEL_ADB021D:
10848                 case FLASH_5761VENDOR_ATMEL_ADB041D:
10849                 case FLASH_5761VENDOR_ATMEL_ADB081D:
10850                 case FLASH_5761VENDOR_ATMEL_ADB161D:
10851                 case FLASH_5761VENDOR_ATMEL_MDB021D:
10852                 case FLASH_5761VENDOR_ATMEL_MDB041D:
10853                 case FLASH_5761VENDOR_ATMEL_MDB081D:
10854                 case FLASH_5761VENDOR_ATMEL_MDB161D:
10855                         tp->nvram_jedecnum = JEDEC_ATMEL;
10856                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10857                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
10858                         tp->tg3_flags3 |= TG3_FLG3_NO_NVRAM_ADDR_TRANS;
10859                         tp->nvram_pagesize = 256;
10860                         break;
10861                 case FLASH_5761VENDOR_ST_A_M45PE20:
10862                 case FLASH_5761VENDOR_ST_A_M45PE40:
10863                 case FLASH_5761VENDOR_ST_A_M45PE80:
10864                 case FLASH_5761VENDOR_ST_A_M45PE16:
10865                 case FLASH_5761VENDOR_ST_M_M45PE20:
10866                 case FLASH_5761VENDOR_ST_M_M45PE40:
10867                 case FLASH_5761VENDOR_ST_M_M45PE80:
10868                 case FLASH_5761VENDOR_ST_M_M45PE16:
10869                         tp->nvram_jedecnum = JEDEC_ST;
10870                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10871                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
10872                         tp->nvram_pagesize = 256;
10873                         break;
10874         }
10875
10876         if (protect) {
10877                 tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT);
10878         } else {
10879                 switch (nvcfg1) {
10880                         case FLASH_5761VENDOR_ATMEL_ADB161D:
10881                         case FLASH_5761VENDOR_ATMEL_MDB161D:
10882                         case FLASH_5761VENDOR_ST_A_M45PE16:
10883                         case FLASH_5761VENDOR_ST_M_M45PE16:
10884                                 tp->nvram_size = TG3_NVRAM_SIZE_2MB;
10885                                 break;
10886                         case FLASH_5761VENDOR_ATMEL_ADB081D:
10887                         case FLASH_5761VENDOR_ATMEL_MDB081D:
10888                         case FLASH_5761VENDOR_ST_A_M45PE80:
10889                         case FLASH_5761VENDOR_ST_M_M45PE80:
10890                                 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
10891                                 break;
10892                         case FLASH_5761VENDOR_ATMEL_ADB041D:
10893                         case FLASH_5761VENDOR_ATMEL_MDB041D:
10894                         case FLASH_5761VENDOR_ST_A_M45PE40:
10895                         case FLASH_5761VENDOR_ST_M_M45PE40:
10896                                 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
10897                                 break;
10898                         case FLASH_5761VENDOR_ATMEL_ADB021D:
10899                         case FLASH_5761VENDOR_ATMEL_MDB021D:
10900                         case FLASH_5761VENDOR_ST_A_M45PE20:
10901                         case FLASH_5761VENDOR_ST_M_M45PE20:
10902                                 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
10903                                 break;
10904                 }
10905         }
10906 }
10907
10908 static void __devinit tg3_get_5906_nvram_info(struct tg3 *tp)
10909 {
10910         tp->nvram_jedecnum = JEDEC_ATMEL;
10911         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10912         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
10913 }
10914
10915 static void __devinit tg3_get_57780_nvram_info(struct tg3 *tp)
10916 {
10917         u32 nvcfg1;
10918
10919         nvcfg1 = tr32(NVRAM_CFG1);
10920
10921         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
10922         case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
10923         case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
10924                 tp->nvram_jedecnum = JEDEC_ATMEL;
10925                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10926                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
10927
10928                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
10929                 tw32(NVRAM_CFG1, nvcfg1);
10930                 return;
10931         case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
10932         case FLASH_57780VENDOR_ATMEL_AT45DB011D:
10933         case FLASH_57780VENDOR_ATMEL_AT45DB011B:
10934         case FLASH_57780VENDOR_ATMEL_AT45DB021D:
10935         case FLASH_57780VENDOR_ATMEL_AT45DB021B:
10936         case FLASH_57780VENDOR_ATMEL_AT45DB041D:
10937         case FLASH_57780VENDOR_ATMEL_AT45DB041B:
10938                 tp->nvram_jedecnum = JEDEC_ATMEL;
10939                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10940                 tp->tg3_flags2 |= TG3_FLG2_FLASH;
10941
10942                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
10943                 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
10944                 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
10945                 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
10946                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
10947                         break;
10948                 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
10949                 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
10950                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
10951                         break;
10952                 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
10953                 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
10954                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
10955                         break;
10956                 }
10957                 break;
10958         case FLASH_5752VENDOR_ST_M45PE10:
10959         case FLASH_5752VENDOR_ST_M45PE20:
10960         case FLASH_5752VENDOR_ST_M45PE40:
10961                 tp->nvram_jedecnum = JEDEC_ST;
10962                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10963                 tp->tg3_flags2 |= TG3_FLG2_FLASH;
10964
10965                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
10966                 case FLASH_5752VENDOR_ST_M45PE10:
10967                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
10968                         break;
10969                 case FLASH_5752VENDOR_ST_M45PE20:
10970                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
10971                         break;
10972                 case FLASH_5752VENDOR_ST_M45PE40:
10973                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
10974                         break;
10975                 }
10976                 break;
10977         default:
10978                 return;
10979         }
10980
10981         switch (nvcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
10982         case FLASH_5752PAGE_SIZE_256:
10983                 tp->tg3_flags3 |= TG3_FLG3_NO_NVRAM_ADDR_TRANS;
10984                 tp->nvram_pagesize = 256;
10985                 break;
10986         case FLASH_5752PAGE_SIZE_512:
10987                 tp->tg3_flags3 |= TG3_FLG3_NO_NVRAM_ADDR_TRANS;
10988                 tp->nvram_pagesize = 512;
10989                 break;
10990         case FLASH_5752PAGE_SIZE_1K:
10991                 tp->tg3_flags3 |= TG3_FLG3_NO_NVRAM_ADDR_TRANS;
10992                 tp->nvram_pagesize = 1024;
10993                 break;
10994         case FLASH_5752PAGE_SIZE_2K:
10995                 tp->tg3_flags3 |= TG3_FLG3_NO_NVRAM_ADDR_TRANS;
10996                 tp->nvram_pagesize = 2048;
10997                 break;
10998         case FLASH_5752PAGE_SIZE_4K:
10999                 tp->tg3_flags3 |= TG3_FLG3_NO_NVRAM_ADDR_TRANS;
11000                 tp->nvram_pagesize = 4096;
11001                 break;
11002         case FLASH_5752PAGE_SIZE_264:
11003                 tp->nvram_pagesize = 264;
11004                 break;
11005         case FLASH_5752PAGE_SIZE_528:
11006                 tp->nvram_pagesize = 528;
11007                 break;
11008         }
11009 }
11010
11011 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
11012 static void __devinit tg3_nvram_init(struct tg3 *tp)
11013 {
11014         tw32_f(GRC_EEPROM_ADDR,
11015              (EEPROM_ADDR_FSM_RESET |
11016               (EEPROM_DEFAULT_CLOCK_PERIOD <<
11017                EEPROM_ADDR_CLKPERD_SHIFT)));
11018
11019         msleep(1);
11020
11021         /* Enable seeprom accesses. */
11022         tw32_f(GRC_LOCAL_CTRL,
11023              tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
11024         udelay(100);
11025
11026         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
11027             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
11028                 tp->tg3_flags |= TG3_FLAG_NVRAM;
11029
11030                 if (tg3_nvram_lock(tp)) {
11031                         printk(KERN_WARNING PFX "%s: Cannot get nvarm lock, "
11032                                "tg3_nvram_init failed.\n", tp->dev->name);
11033                         return;
11034                 }
11035                 tg3_enable_nvram_access(tp);
11036
11037                 tp->nvram_size = 0;
11038
11039                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
11040                         tg3_get_5752_nvram_info(tp);
11041                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
11042                         tg3_get_5755_nvram_info(tp);
11043                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
11044                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
11045                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
11046                         tg3_get_5787_nvram_info(tp);
11047                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
11048                         tg3_get_5761_nvram_info(tp);
11049                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
11050                         tg3_get_5906_nvram_info(tp);
11051                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
11052                         tg3_get_57780_nvram_info(tp);
11053                 else
11054                         tg3_get_nvram_info(tp);
11055
11056                 if (tp->nvram_size == 0)
11057                         tg3_get_nvram_size(tp);
11058
11059                 tg3_disable_nvram_access(tp);
11060                 tg3_nvram_unlock(tp);
11061
11062         } else {
11063                 tp->tg3_flags &= ~(TG3_FLAG_NVRAM | TG3_FLAG_NVRAM_BUFFERED);
11064
11065                 tg3_get_eeprom_size(tp);
11066         }
11067 }
11068
11069 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
11070                                         u32 offset, u32 *val)
11071 {
11072         u32 tmp;
11073         int i;
11074
11075         if (offset > EEPROM_ADDR_ADDR_MASK ||
11076             (offset % 4) != 0)
11077                 return -EINVAL;
11078
11079         tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
11080                                         EEPROM_ADDR_DEVID_MASK |
11081                                         EEPROM_ADDR_READ);
11082         tw32(GRC_EEPROM_ADDR,
11083              tmp |
11084              (0 << EEPROM_ADDR_DEVID_SHIFT) |
11085              ((offset << EEPROM_ADDR_ADDR_SHIFT) &
11086               EEPROM_ADDR_ADDR_MASK) |
11087              EEPROM_ADDR_READ | EEPROM_ADDR_START);
11088
11089         for (i = 0; i < 1000; i++) {
11090                 tmp = tr32(GRC_EEPROM_ADDR);
11091
11092                 if (tmp & EEPROM_ADDR_COMPLETE)
11093                         break;
11094                 msleep(1);
11095         }
11096         if (!(tmp & EEPROM_ADDR_COMPLETE))
11097                 return -EBUSY;
11098
11099         *val = tr32(GRC_EEPROM_DATA);
11100         return 0;
11101 }
11102
11103 #define NVRAM_CMD_TIMEOUT 10000
11104
11105 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
11106 {
11107         int i;
11108
11109         tw32(NVRAM_CMD, nvram_cmd);
11110         for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
11111                 udelay(10);
11112                 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
11113                         udelay(10);
11114                         break;
11115                 }
11116         }
11117         if (i == NVRAM_CMD_TIMEOUT) {
11118                 return -EBUSY;
11119         }
11120         return 0;
11121 }
11122
11123 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
11124 {
11125         if ((tp->tg3_flags & TG3_FLAG_NVRAM) &&
11126             (tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) &&
11127             (tp->tg3_flags2 & TG3_FLG2_FLASH) &&
11128            !(tp->tg3_flags3 & TG3_FLG3_NO_NVRAM_ADDR_TRANS) &&
11129             (tp->nvram_jedecnum == JEDEC_ATMEL))
11130
11131                 addr = ((addr / tp->nvram_pagesize) <<
11132                         ATMEL_AT45DB0X1B_PAGE_POS) +
11133                        (addr % tp->nvram_pagesize);
11134
11135         return addr;
11136 }
11137
11138 static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
11139 {
11140         if ((tp->tg3_flags & TG3_FLAG_NVRAM) &&
11141             (tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) &&
11142             (tp->tg3_flags2 & TG3_FLG2_FLASH) &&
11143            !(tp->tg3_flags3 & TG3_FLG3_NO_NVRAM_ADDR_TRANS) &&
11144             (tp->nvram_jedecnum == JEDEC_ATMEL))
11145
11146                 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
11147                         tp->nvram_pagesize) +
11148                        (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
11149
11150         return addr;
11151 }
11152
11153 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
11154 {
11155         int ret;
11156
11157         if (!(tp->tg3_flags & TG3_FLAG_NVRAM))
11158                 return tg3_nvram_read_using_eeprom(tp, offset, val);
11159
11160         offset = tg3_nvram_phys_addr(tp, offset);
11161
11162         if (offset > NVRAM_ADDR_MSK)
11163                 return -EINVAL;
11164
11165         ret = tg3_nvram_lock(tp);
11166         if (ret)
11167                 return ret;
11168
11169         tg3_enable_nvram_access(tp);
11170
11171         tw32(NVRAM_ADDR, offset);
11172         ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
11173                 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
11174
11175         if (ret == 0)
11176                 *val = swab32(tr32(NVRAM_RDDATA));
11177
11178         tg3_disable_nvram_access(tp);
11179
11180         tg3_nvram_unlock(tp);
11181
11182         return ret;
11183 }
11184
11185 static int tg3_nvram_read_le(struct tg3 *tp, u32 offset, __le32 *val)
11186 {
11187         u32 v;
11188         int res = tg3_nvram_read(tp, offset, &v);
11189         if (!res)
11190                 *val = cpu_to_le32(v);
11191         return res;
11192 }
11193
11194 static int tg3_nvram_read_swab(struct tg3 *tp, u32 offset, u32 *val)
11195 {
11196         int err;
11197         u32 tmp;
11198
11199         err = tg3_nvram_read(tp, offset, &tmp);
11200         *val = swab32(tmp);
11201         return err;
11202 }
11203
11204 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
11205                                     u32 offset, u32 len, u8 *buf)
11206 {
11207         int i, j, rc = 0;
11208         u32 val;
11209
11210         for (i = 0; i < len; i += 4) {
11211                 u32 addr;
11212                 __le32 data;
11213
11214                 addr = offset + i;
11215
11216                 memcpy(&data, buf + i, 4);
11217
11218                 tw32(GRC_EEPROM_DATA, le32_to_cpu(data));
11219
11220                 val = tr32(GRC_EEPROM_ADDR);
11221                 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
11222
11223                 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
11224                         EEPROM_ADDR_READ);
11225                 tw32(GRC_EEPROM_ADDR, val |
11226                         (0 << EEPROM_ADDR_DEVID_SHIFT) |
11227                         (addr & EEPROM_ADDR_ADDR_MASK) |
11228                         EEPROM_ADDR_START |
11229                         EEPROM_ADDR_WRITE);
11230
11231                 for (j = 0; j < 1000; j++) {
11232                         val = tr32(GRC_EEPROM_ADDR);
11233
11234                         if (val & EEPROM_ADDR_COMPLETE)
11235                                 break;
11236                         msleep(1);
11237                 }
11238                 if (!(val & EEPROM_ADDR_COMPLETE)) {
11239                         rc = -EBUSY;
11240                         break;
11241                 }
11242         }
11243
11244         return rc;
11245 }
11246
11247 /* offset and length are dword aligned */
11248 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
11249                 u8 *buf)
11250 {
11251         int ret = 0;
11252         u32 pagesize = tp->nvram_pagesize;
11253         u32 pagemask = pagesize - 1;
11254         u32 nvram_cmd;
11255         u8 *tmp;
11256
11257         tmp = kmalloc(pagesize, GFP_KERNEL);
11258         if (tmp == NULL)
11259                 return -ENOMEM;
11260
11261         while (len) {
11262                 int j;
11263                 u32 phy_addr, page_off, size;
11264
11265                 phy_addr = offset & ~pagemask;
11266
11267                 for (j = 0; j < pagesize; j += 4) {
11268                         if ((ret = tg3_nvram_read_le(tp, phy_addr + j,
11269                                                 (__le32 *) (tmp + j))))
11270                                 break;
11271                 }
11272                 if (ret)
11273                         break;
11274
11275                 page_off = offset & pagemask;
11276                 size = pagesize;
11277                 if (len < size)
11278                         size = len;
11279
11280                 len -= size;
11281
11282                 memcpy(tmp + page_off, buf, size);
11283
11284                 offset = offset + (pagesize - page_off);
11285
11286                 tg3_enable_nvram_access(tp);
11287
11288                 /*
11289                  * Before we can erase the flash page, we need
11290                  * to issue a special "write enable" command.
11291                  */
11292                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
11293
11294                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
11295                         break;
11296
11297                 /* Erase the target page */
11298                 tw32(NVRAM_ADDR, phy_addr);
11299
11300                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
11301                         NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
11302
11303                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
11304                         break;
11305
11306                 /* Issue another write enable to start the write. */
11307                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
11308
11309                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
11310                         break;
11311
11312                 for (j = 0; j < pagesize; j += 4) {
11313                         __be32 data;
11314
11315                         data = *((__be32 *) (tmp + j));
11316                         /* swab32(le32_to_cpu(data)), actually */
11317                         tw32(NVRAM_WRDATA, be32_to_cpu(data));
11318
11319                         tw32(NVRAM_ADDR, phy_addr + j);
11320
11321                         nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
11322                                 NVRAM_CMD_WR;
11323
11324                         if (j == 0)
11325                                 nvram_cmd |= NVRAM_CMD_FIRST;
11326                         else if (j == (pagesize - 4))
11327                                 nvram_cmd |= NVRAM_CMD_LAST;
11328
11329                         if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
11330                                 break;
11331                 }
11332                 if (ret)
11333                         break;
11334         }
11335
11336         nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
11337         tg3_nvram_exec_cmd(tp, nvram_cmd);
11338
11339         kfree(tmp);
11340
11341         return ret;
11342 }
11343
11344 /* offset and length are dword aligned */
11345 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
11346                 u8 *buf)
11347 {
11348         int i, ret = 0;
11349
11350         for (i = 0; i < len; i += 4, offset += 4) {
11351                 u32 page_off, phy_addr, nvram_cmd;
11352                 __be32 data;
11353
11354                 memcpy(&data, buf + i, 4);
11355                 tw32(NVRAM_WRDATA, be32_to_cpu(data));
11356
11357                 page_off = offset % tp->nvram_pagesize;
11358
11359                 phy_addr = tg3_nvram_phys_addr(tp, offset);
11360
11361                 tw32(NVRAM_ADDR, phy_addr);
11362
11363                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
11364
11365                 if ((page_off == 0) || (i == 0))
11366                         nvram_cmd |= NVRAM_CMD_FIRST;
11367                 if (page_off == (tp->nvram_pagesize - 4))
11368                         nvram_cmd |= NVRAM_CMD_LAST;
11369
11370                 if (i == (len - 4))
11371                         nvram_cmd |= NVRAM_CMD_LAST;
11372
11373                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752 &&
11374                     !(tp->tg3_flags3 & TG3_FLG3_5755_PLUS) &&
11375                     (tp->nvram_jedecnum == JEDEC_ST) &&
11376                     (nvram_cmd & NVRAM_CMD_FIRST)) {
11377
11378                         if ((ret = tg3_nvram_exec_cmd(tp,
11379                                 NVRAM_CMD_WREN | NVRAM_CMD_GO |
11380                                 NVRAM_CMD_DONE)))
11381
11382                                 break;
11383                 }
11384                 if (!(tp->tg3_flags2 & TG3_FLG2_FLASH)) {
11385                         /* We always do complete word writes to eeprom. */
11386                         nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
11387                 }
11388
11389                 if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
11390                         break;
11391         }
11392         return ret;
11393 }
11394
11395 /* offset and length are dword aligned */
11396 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
11397 {
11398         int ret;
11399
11400         if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
11401                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
11402                        ~GRC_LCLCTRL_GPIO_OUTPUT1);
11403                 udelay(40);
11404         }
11405
11406         if (!(tp->tg3_flags & TG3_FLAG_NVRAM)) {
11407                 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
11408         }
11409         else {
11410                 u32 grc_mode;
11411
11412                 ret = tg3_nvram_lock(tp);
11413                 if (ret)
11414                         return ret;
11415
11416                 tg3_enable_nvram_access(tp);
11417                 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
11418                     !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM))
11419                         tw32(NVRAM_WRITE1, 0x406);
11420
11421                 grc_mode = tr32(GRC_MODE);
11422                 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
11423
11424                 if ((tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) ||
11425                         !(tp->tg3_flags2 & TG3_FLG2_FLASH)) {
11426
11427                         ret = tg3_nvram_write_block_buffered(tp, offset, len,
11428                                 buf);
11429                 }
11430                 else {
11431                         ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
11432                                 buf);
11433                 }
11434
11435                 grc_mode = tr32(GRC_MODE);
11436                 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
11437
11438                 tg3_disable_nvram_access(tp);
11439                 tg3_nvram_unlock(tp);
11440         }
11441
11442         if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
11443                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
11444                 udelay(40);
11445         }
11446
11447         return ret;
11448 }
11449
11450 struct subsys_tbl_ent {
11451         u16 subsys_vendor, subsys_devid;
11452         u32 phy_id;
11453 };
11454
11455 static struct subsys_tbl_ent subsys_id_to_phy_id[] = {
11456         /* Broadcom boards. */
11457         { PCI_VENDOR_ID_BROADCOM, 0x1644, PHY_ID_BCM5401 }, /* BCM95700A6 */
11458         { PCI_VENDOR_ID_BROADCOM, 0x0001, PHY_ID_BCM5701 }, /* BCM95701A5 */
11459         { PCI_VENDOR_ID_BROADCOM, 0x0002, PHY_ID_BCM8002 }, /* BCM95700T6 */
11460         { PCI_VENDOR_ID_BROADCOM, 0x0003, 0 },              /* BCM95700A9 */
11461         { PCI_VENDOR_ID_BROADCOM, 0x0005, PHY_ID_BCM5701 }, /* BCM95701T1 */
11462         { PCI_VENDOR_ID_BROADCOM, 0x0006, PHY_ID_BCM5701 }, /* BCM95701T8 */
11463         { PCI_VENDOR_ID_BROADCOM, 0x0007, 0 },              /* BCM95701A7 */
11464         { PCI_VENDOR_ID_BROADCOM, 0x0008, PHY_ID_BCM5701 }, /* BCM95701A10 */
11465         { PCI_VENDOR_ID_BROADCOM, 0x8008, PHY_ID_BCM5701 }, /* BCM95701A12 */
11466         { PCI_VENDOR_ID_BROADCOM, 0x0009, PHY_ID_BCM5703 }, /* BCM95703Ax1 */
11467         { PCI_VENDOR_ID_BROADCOM, 0x8009, PHY_ID_BCM5703 }, /* BCM95703Ax2 */
11468
11469         /* 3com boards. */
11470         { PCI_VENDOR_ID_3COM, 0x1000, PHY_ID_BCM5401 }, /* 3C996T */
11471         { PCI_VENDOR_ID_3COM, 0x1006, PHY_ID_BCM5701 }, /* 3C996BT */
11472         { PCI_VENDOR_ID_3COM, 0x1004, 0 },              /* 3C996SX */
11473         { PCI_VENDOR_ID_3COM, 0x1007, PHY_ID_BCM5701 }, /* 3C1000T */
11474         { PCI_VENDOR_ID_3COM, 0x1008, PHY_ID_BCM5701 }, /* 3C940BR01 */
11475
11476         /* DELL boards. */
11477         { PCI_VENDOR_ID_DELL, 0x00d1, PHY_ID_BCM5401 }, /* VIPER */
11478         { PCI_VENDOR_ID_DELL, 0x0106, PHY_ID_BCM5401 }, /* JAGUAR */
11479         { PCI_VENDOR_ID_DELL, 0x0109, PHY_ID_BCM5411 }, /* MERLOT */
11480         { PCI_VENDOR_ID_DELL, 0x010a, PHY_ID_BCM5411 }, /* SLIM_MERLOT */
11481
11482         /* Compaq boards. */
11483         { PCI_VENDOR_ID_COMPAQ, 0x007c, PHY_ID_BCM5701 }, /* BANSHEE */
11484         { PCI_VENDOR_ID_COMPAQ, 0x009a, PHY_ID_BCM5701 }, /* BANSHEE_2 */
11485         { PCI_VENDOR_ID_COMPAQ, 0x007d, 0 },              /* CHANGELING */
11486         { PCI_VENDOR_ID_COMPAQ, 0x0085, PHY_ID_BCM5701 }, /* NC7780 */
11487         { PCI_VENDOR_ID_COMPAQ, 0x0099, PHY_ID_BCM5701 }, /* NC7780_2 */
11488
11489         /* IBM boards. */
11490         { PCI_VENDOR_ID_IBM, 0x0281, 0 } /* IBM??? */
11491 };
11492
11493 static inline struct subsys_tbl_ent *lookup_by_subsys(struct tg3 *tp)
11494 {
11495         int i;
11496
11497         for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
11498                 if ((subsys_id_to_phy_id[i].subsys_vendor ==
11499                      tp->pdev->subsystem_vendor) &&
11500                     (subsys_id_to_phy_id[i].subsys_devid ==
11501                      tp->pdev->subsystem_device))
11502                         return &subsys_id_to_phy_id[i];
11503         }
11504         return NULL;
11505 }
11506
11507 static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
11508 {
11509         u32 val;
11510         u16 pmcsr;
11511
11512         /* On some early chips the SRAM cannot be accessed in D3hot state,
11513          * so need make sure we're in D0.
11514          */
11515         pci_read_config_word(tp->pdev, tp->pm_cap + PCI_PM_CTRL, &pmcsr);
11516         pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
11517         pci_write_config_word(tp->pdev, tp->pm_cap + PCI_PM_CTRL, pmcsr);
11518         msleep(1);
11519
11520         /* Make sure register accesses (indirect or otherwise)
11521          * will function correctly.
11522          */
11523         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
11524                                tp->misc_host_ctrl);
11525
11526         /* The memory arbiter has to be enabled in order for SRAM accesses
11527          * to succeed.  Normally on powerup the tg3 chip firmware will make
11528          * sure it is enabled, but other entities such as system netboot
11529          * code might disable it.
11530          */
11531         val = tr32(MEMARB_MODE);
11532         tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
11533
11534         tp->phy_id = PHY_ID_INVALID;
11535         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
11536
11537         /* Assume an onboard device and WOL capable by default.  */
11538         tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT | TG3_FLAG_WOL_CAP;
11539
11540         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
11541                 if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
11542                         tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT;
11543                         tp->tg3_flags2 |= TG3_FLG2_IS_NIC;
11544                 }
11545                 val = tr32(VCPU_CFGSHDW);
11546                 if (val & VCPU_CFGSHDW_ASPM_DBNC)
11547                         tp->tg3_flags |= TG3_FLAG_ASPM_WORKAROUND;
11548                 if ((val & VCPU_CFGSHDW_WOL_ENABLE) &&
11549                     (val & VCPU_CFGSHDW_WOL_MAGPKT))
11550                         tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
11551                 goto done;
11552         }
11553
11554         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
11555         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
11556                 u32 nic_cfg, led_cfg;
11557                 u32 nic_phy_id, ver, cfg2 = 0, cfg4 = 0, eeprom_phy_id;
11558                 int eeprom_phy_serdes = 0;
11559
11560                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
11561                 tp->nic_sram_data_cfg = nic_cfg;
11562
11563                 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
11564                 ver >>= NIC_SRAM_DATA_VER_SHIFT;
11565                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700) &&
11566                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) &&
11567                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5703) &&
11568                     (ver > 0) && (ver < 0x100))
11569                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
11570
11571                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
11572                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_4, &cfg4);
11573
11574                 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
11575                     NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
11576                         eeprom_phy_serdes = 1;
11577
11578                 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
11579                 if (nic_phy_id != 0) {
11580                         u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
11581                         u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
11582
11583                         eeprom_phy_id  = (id1 >> 16) << 10;
11584                         eeprom_phy_id |= (id2 & 0xfc00) << 16;
11585                         eeprom_phy_id |= (id2 & 0x03ff) <<  0;
11586                 } else
11587                         eeprom_phy_id = 0;
11588
11589                 tp->phy_id = eeprom_phy_id;
11590                 if (eeprom_phy_serdes) {
11591                         if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
11592                                 tp->tg3_flags2 |= TG3_FLG2_MII_SERDES;
11593                         else
11594                                 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
11595                 }
11596
11597                 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
11598                         led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
11599                                     SHASTA_EXT_LED_MODE_MASK);
11600                 else
11601                         led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
11602
11603                 switch (led_cfg) {
11604                 default:
11605                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
11606                         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
11607                         break;
11608
11609                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
11610                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
11611                         break;
11612
11613                 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
11614                         tp->led_ctrl = LED_CTRL_MODE_MAC;
11615
11616                         /* Default to PHY_1_MODE if 0 (MAC_MODE) is
11617                          * read on some older 5700/5701 bootcode.
11618                          */
11619                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
11620                             ASIC_REV_5700 ||
11621                             GET_ASIC_REV(tp->pci_chip_rev_id) ==
11622                             ASIC_REV_5701)
11623                                 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
11624
11625                         break;
11626
11627                 case SHASTA_EXT_LED_SHARED:
11628                         tp->led_ctrl = LED_CTRL_MODE_SHARED;
11629                         if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
11630                             tp->pci_chip_rev_id != CHIPREV_ID_5750_A1)
11631                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
11632                                                  LED_CTRL_MODE_PHY_2);
11633                         break;
11634
11635                 case SHASTA_EXT_LED_MAC:
11636                         tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
11637                         break;
11638
11639                 case SHASTA_EXT_LED_COMBO:
11640                         tp->led_ctrl = LED_CTRL_MODE_COMBO;
11641                         if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0)
11642                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
11643                                                  LED_CTRL_MODE_PHY_2);
11644                         break;
11645
11646                 }
11647
11648                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
11649                      GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) &&
11650                     tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
11651                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
11652
11653                 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX)
11654                         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
11655
11656                 if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
11657                         tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT;
11658                         if ((tp->pdev->subsystem_vendor ==
11659                              PCI_VENDOR_ID_ARIMA) &&
11660                             (tp->pdev->subsystem_device == 0x205a ||
11661                              tp->pdev->subsystem_device == 0x2063))
11662                                 tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT;
11663                 } else {
11664                         tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT;
11665                         tp->tg3_flags2 |= TG3_FLG2_IS_NIC;
11666                 }
11667
11668                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
11669                         tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
11670                         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
11671                                 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
11672                 }
11673
11674                 if ((nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE) &&
11675                         (tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
11676                         tp->tg3_flags3 |= TG3_FLG3_ENABLE_APE;
11677
11678                 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES &&
11679                     !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL))
11680                         tp->tg3_flags &= ~TG3_FLAG_WOL_CAP;
11681
11682                 if ((tp->tg3_flags & TG3_FLAG_WOL_CAP) &&
11683                     (nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE))
11684                         tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
11685
11686                 if (cfg2 & (1 << 17))
11687                         tp->tg3_flags2 |= TG3_FLG2_CAPACITIVE_COUPLING;
11688
11689                 /* serdes signal pre-emphasis in register 0x590 set by */
11690                 /* bootcode if bit 18 is set */
11691                 if (cfg2 & (1 << 18))
11692                         tp->tg3_flags2 |= TG3_FLG2_SERDES_PREEMPHASIS;
11693
11694                 if (((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
11695                       GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX)) &&
11696                     (cfg2 & NIC_SRAM_DATA_CFG_2_APD_EN))
11697                         tp->tg3_flags3 |= TG3_FLG3_PHY_ENABLE_APD;
11698
11699                 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
11700                         u32 cfg3;
11701
11702                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
11703                         if (cfg3 & NIC_SRAM_ASPM_DEBOUNCE)
11704                                 tp->tg3_flags |= TG3_FLAG_ASPM_WORKAROUND;
11705                 }
11706
11707                 if (cfg4 & NIC_SRAM_RGMII_STD_IBND_DISABLE)
11708                         tp->tg3_flags3 |= TG3_FLG3_RGMII_STD_IBND_DISABLE;
11709                 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_RX_EN)
11710                         tp->tg3_flags3 |= TG3_FLG3_RGMII_EXT_IBND_RX_EN;
11711                 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN)
11712                         tp->tg3_flags3 |= TG3_FLG3_RGMII_EXT_IBND_TX_EN;
11713         }
11714 done:
11715         device_init_wakeup(&tp->pdev->dev, tp->tg3_flags & TG3_FLAG_WOL_CAP);
11716         device_set_wakeup_enable(&tp->pdev->dev,
11717                                  tp->tg3_flags & TG3_FLAG_WOL_ENABLE);
11718 }
11719
11720 static int __devinit tg3_issue_otp_command(struct tg3 *tp, u32 cmd)
11721 {
11722         int i;
11723         u32 val;
11724
11725         tw32(OTP_CTRL, cmd | OTP_CTRL_OTP_CMD_START);
11726         tw32(OTP_CTRL, cmd);
11727
11728         /* Wait for up to 1 ms for command to execute. */
11729         for (i = 0; i < 100; i++) {
11730                 val = tr32(OTP_STATUS);
11731                 if (val & OTP_STATUS_CMD_DONE)
11732                         break;
11733                 udelay(10);
11734         }
11735
11736         return (val & OTP_STATUS_CMD_DONE) ? 0 : -EBUSY;
11737 }
11738
11739 /* Read the gphy configuration from the OTP region of the chip.  The gphy
11740  * configuration is a 32-bit value that straddles the alignment boundary.
11741  * We do two 32-bit reads and then shift and merge the results.
11742  */
11743 static u32 __devinit tg3_read_otp_phycfg(struct tg3 *tp)
11744 {
11745         u32 bhalf_otp, thalf_otp;
11746
11747         tw32(OTP_MODE, OTP_MODE_OTP_THRU_GRC);
11748
11749         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_INIT))
11750                 return 0;
11751
11752         tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC1);
11753
11754         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
11755                 return 0;
11756
11757         thalf_otp = tr32(OTP_READ_DATA);
11758
11759         tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC2);
11760
11761         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
11762                 return 0;
11763
11764         bhalf_otp = tr32(OTP_READ_DATA);
11765
11766         return ((thalf_otp & 0x0000ffff) << 16) | (bhalf_otp >> 16);
11767 }
11768
11769 static int __devinit tg3_phy_probe(struct tg3 *tp)
11770 {
11771         u32 hw_phy_id_1, hw_phy_id_2;
11772         u32 hw_phy_id, hw_phy_id_masked;
11773         int err;
11774
11775         if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)
11776                 return tg3_phy_init(tp);
11777
11778         /* Reading the PHY ID register can conflict with ASF
11779          * firwmare access to the PHY hardware.
11780          */
11781         err = 0;
11782         if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) ||
11783             (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)) {
11784                 hw_phy_id = hw_phy_id_masked = PHY_ID_INVALID;
11785         } else {
11786                 /* Now read the physical PHY_ID from the chip and verify
11787                  * that it is sane.  If it doesn't look good, we fall back
11788                  * to either the hard-coded table based PHY_ID and failing
11789                  * that the value found in the eeprom area.
11790                  */
11791                 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
11792                 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
11793
11794                 hw_phy_id  = (hw_phy_id_1 & 0xffff) << 10;
11795                 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
11796                 hw_phy_id |= (hw_phy_id_2 & 0x03ff) <<  0;
11797
11798                 hw_phy_id_masked = hw_phy_id & PHY_ID_MASK;
11799         }
11800
11801         if (!err && KNOWN_PHY_ID(hw_phy_id_masked)) {
11802                 tp->phy_id = hw_phy_id;
11803                 if (hw_phy_id_masked == PHY_ID_BCM8002)
11804                         tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
11805                 else
11806                         tp->tg3_flags2 &= ~TG3_FLG2_PHY_SERDES;
11807         } else {
11808                 if (tp->phy_id != PHY_ID_INVALID) {
11809                         /* Do nothing, phy ID already set up in
11810                          * tg3_get_eeprom_hw_cfg().
11811                          */
11812                 } else {
11813                         struct subsys_tbl_ent *p;
11814
11815                         /* No eeprom signature?  Try the hardcoded
11816                          * subsys device table.
11817                          */
11818                         p = lookup_by_subsys(tp);
11819                         if (!p)
11820                                 return -ENODEV;
11821
11822                         tp->phy_id = p->phy_id;
11823                         if (!tp->phy_id ||
11824                             tp->phy_id == PHY_ID_BCM8002)
11825                                 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
11826                 }
11827         }
11828
11829         if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) &&
11830             !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) &&
11831             !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
11832                 u32 bmsr, adv_reg, tg3_ctrl, mask;
11833
11834                 tg3_readphy(tp, MII_BMSR, &bmsr);
11835                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
11836                     (bmsr & BMSR_LSTATUS))
11837                         goto skip_phy_reset;
11838
11839                 err = tg3_phy_reset(tp);
11840                 if (err)
11841                         return err;
11842
11843                 adv_reg = (ADVERTISE_10HALF | ADVERTISE_10FULL |
11844                            ADVERTISE_100HALF | ADVERTISE_100FULL |
11845                            ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
11846                 tg3_ctrl = 0;
11847                 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
11848                         tg3_ctrl = (MII_TG3_CTRL_ADV_1000_HALF |
11849                                     MII_TG3_CTRL_ADV_1000_FULL);
11850                         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
11851                             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
11852                                 tg3_ctrl |= (MII_TG3_CTRL_AS_MASTER |
11853                                              MII_TG3_CTRL_ENABLE_AS_MASTER);
11854                 }
11855
11856                 mask = (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
11857                         ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
11858                         ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full);
11859                 if (!tg3_copper_is_advertising_all(tp, mask)) {
11860                         tg3_writephy(tp, MII_ADVERTISE, adv_reg);
11861
11862                         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
11863                                 tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
11864
11865                         tg3_writephy(tp, MII_BMCR,
11866                                      BMCR_ANENABLE | BMCR_ANRESTART);
11867                 }
11868                 tg3_phy_set_wirespeed(tp);
11869
11870                 tg3_writephy(tp, MII_ADVERTISE, adv_reg);
11871                 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
11872                         tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
11873         }
11874
11875 skip_phy_reset:
11876         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
11877                 err = tg3_init_5401phy_dsp(tp);
11878                 if (err)
11879                         return err;
11880         }
11881
11882         if (!err && ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401)) {
11883                 err = tg3_init_5401phy_dsp(tp);
11884         }
11885
11886         if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
11887                 tp->link_config.advertising =
11888                         (ADVERTISED_1000baseT_Half |
11889                          ADVERTISED_1000baseT_Full |
11890                          ADVERTISED_Autoneg |
11891                          ADVERTISED_FIBRE);
11892         if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
11893                 tp->link_config.advertising &=
11894                         ~(ADVERTISED_1000baseT_Half |
11895                           ADVERTISED_1000baseT_Full);
11896
11897         return err;
11898 }
11899
11900 static void __devinit tg3_read_partno(struct tg3 *tp)
11901 {
11902         unsigned char vpd_data[256];
11903         unsigned int i;
11904         u32 magic;
11905
11906         if (tg3_nvram_read_swab(tp, 0x0, &magic))
11907                 goto out_not_found;
11908
11909         if (magic == TG3_EEPROM_MAGIC) {
11910                 for (i = 0; i < 256; i += 4) {
11911                         u32 tmp;
11912
11913                         if (tg3_nvram_read(tp, 0x100 + i, &tmp))
11914                                 goto out_not_found;
11915
11916                         vpd_data[i + 0] = ((tmp >>  0) & 0xff);
11917                         vpd_data[i + 1] = ((tmp >>  8) & 0xff);
11918                         vpd_data[i + 2] = ((tmp >> 16) & 0xff);
11919                         vpd_data[i + 3] = ((tmp >> 24) & 0xff);
11920                 }
11921         } else {
11922                 int vpd_cap;
11923
11924                 vpd_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_VPD);
11925                 for (i = 0; i < 256; i += 4) {
11926                         u32 tmp, j = 0;
11927                         __le32 v;
11928                         u16 tmp16;
11929
11930                         pci_write_config_word(tp->pdev, vpd_cap + PCI_VPD_ADDR,
11931                                               i);
11932                         while (j++ < 100) {
11933                                 pci_read_config_word(tp->pdev, vpd_cap +
11934                                                      PCI_VPD_ADDR, &tmp16);
11935                                 if (tmp16 & 0x8000)
11936                                         break;
11937                                 msleep(1);
11938                         }
11939                         if (!(tmp16 & 0x8000))
11940                                 goto out_not_found;
11941
11942                         pci_read_config_dword(tp->pdev, vpd_cap + PCI_VPD_DATA,
11943                                               &tmp);
11944                         v = cpu_to_le32(tmp);
11945                         memcpy(&vpd_data[i], &v, 4);
11946                 }
11947         }
11948
11949         /* Now parse and find the part number. */
11950         for (i = 0; i < 254; ) {
11951                 unsigned char val = vpd_data[i];
11952                 unsigned int block_end;
11953
11954                 if (val == 0x82 || val == 0x91) {
11955                         i = (i + 3 +
11956                              (vpd_data[i + 1] +
11957                               (vpd_data[i + 2] << 8)));
11958                         continue;
11959                 }
11960
11961                 if (val != 0x90)
11962                         goto out_not_found;
11963
11964                 block_end = (i + 3 +
11965                              (vpd_data[i + 1] +
11966                               (vpd_data[i + 2] << 8)));
11967                 i += 3;
11968
11969                 if (block_end > 256)
11970                         goto out_not_found;
11971
11972                 while (i < (block_end - 2)) {
11973                         if (vpd_data[i + 0] == 'P' &&
11974                             vpd_data[i + 1] == 'N') {
11975                                 int partno_len = vpd_data[i + 2];
11976
11977                                 i += 3;
11978                                 if (partno_len > 24 || (partno_len + i) > 256)
11979                                         goto out_not_found;
11980
11981                                 memcpy(tp->board_part_number,
11982                                        &vpd_data[i], partno_len);
11983
11984                                 /* Success. */
11985                                 return;
11986                         }
11987                         i += 3 + vpd_data[i + 2];
11988                 }
11989
11990                 /* Part number not found. */
11991                 goto out_not_found;
11992         }
11993
11994 out_not_found:
11995         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
11996                 strcpy(tp->board_part_number, "BCM95906");
11997         else
11998                 strcpy(tp->board_part_number, "none");
11999 }
12000
12001 static int __devinit tg3_fw_img_is_valid(struct tg3 *tp, u32 offset)
12002 {
12003         u32 val;
12004
12005         if (tg3_nvram_read_swab(tp, offset, &val) ||
12006             (val & 0xfc000000) != 0x0c000000 ||
12007             tg3_nvram_read_swab(tp, offset + 4, &val) ||
12008             val != 0)
12009                 return 0;
12010
12011         return 1;
12012 }
12013
12014 static void __devinit tg3_read_sb_ver(struct tg3 *tp, u32 val)
12015 {
12016         u32 offset, major, minor, build;
12017
12018         tp->fw_ver[0] = 's';
12019         tp->fw_ver[1] = 'b';
12020         tp->fw_ver[2] = '\0';
12021
12022         if ((val & TG3_EEPROM_SB_FORMAT_MASK) != TG3_EEPROM_SB_FORMAT_1)
12023                 return;
12024
12025         switch (val & TG3_EEPROM_SB_REVISION_MASK) {
12026         case TG3_EEPROM_SB_REVISION_0:
12027                 offset = TG3_EEPROM_SB_F1R0_EDH_OFF;
12028                 break;
12029         case TG3_EEPROM_SB_REVISION_2:
12030                 offset = TG3_EEPROM_SB_F1R2_EDH_OFF;
12031                 break;
12032         case TG3_EEPROM_SB_REVISION_3:
12033                 offset = TG3_EEPROM_SB_F1R3_EDH_OFF;
12034                 break;
12035         default:
12036                 return;
12037         }
12038
12039         if (tg3_nvram_read_swab(tp, offset, &val))
12040                 return;
12041
12042         build = (val & TG3_EEPROM_SB_EDH_BLD_MASK) >>
12043                 TG3_EEPROM_SB_EDH_BLD_SHFT;
12044         major = (val & TG3_EEPROM_SB_EDH_MAJ_MASK) >>
12045                 TG3_EEPROM_SB_EDH_MAJ_SHFT;
12046         minor =  val & TG3_EEPROM_SB_EDH_MIN_MASK;
12047
12048         if (minor > 99 || build > 26)
12049                 return;
12050
12051         snprintf(&tp->fw_ver[2], 30, " v%d.%02d", major, minor);
12052
12053         if (build > 0) {
12054                 tp->fw_ver[8] = 'a' + build - 1;
12055                 tp->fw_ver[9] = '\0';
12056         }
12057 }
12058
12059 static void __devinit tg3_read_fw_ver(struct tg3 *tp)
12060 {
12061         u32 val, offset, start;
12062         u32 ver_offset;
12063         int i, bcnt;
12064
12065         if (tg3_nvram_read_swab(tp, 0, &val))
12066                 return;
12067
12068         if (val != TG3_EEPROM_MAGIC) {
12069                 if ((val & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW)
12070                         tg3_read_sb_ver(tp, val);
12071
12072                 return;
12073         }
12074
12075         if (tg3_nvram_read_swab(tp, 0xc, &offset) ||
12076             tg3_nvram_read_swab(tp, 0x4, &start))
12077                 return;
12078
12079         offset = tg3_nvram_logical_addr(tp, offset);
12080
12081         if (!tg3_fw_img_is_valid(tp, offset) ||
12082             tg3_nvram_read_swab(tp, offset + 8, &ver_offset))
12083                 return;
12084
12085         offset = offset + ver_offset - start;
12086         for (i = 0; i < 16; i += 4) {
12087                 __le32 v;
12088                 if (tg3_nvram_read_le(tp, offset + i, &v))
12089                         return;
12090
12091                 memcpy(tp->fw_ver + i, &v, 4);
12092         }
12093
12094         if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF) ||
12095              (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
12096                 return;
12097
12098         for (offset = TG3_NVM_DIR_START;
12099              offset < TG3_NVM_DIR_END;
12100              offset += TG3_NVM_DIRENT_SIZE) {
12101                 if (tg3_nvram_read_swab(tp, offset, &val))
12102                         return;
12103
12104                 if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI)
12105                         break;
12106         }
12107
12108         if (offset == TG3_NVM_DIR_END)
12109                 return;
12110
12111         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
12112                 start = 0x08000000;
12113         else if (tg3_nvram_read_swab(tp, offset - 4, &start))
12114                 return;
12115
12116         if (tg3_nvram_read_swab(tp, offset + 4, &offset) ||
12117             !tg3_fw_img_is_valid(tp, offset) ||
12118             tg3_nvram_read_swab(tp, offset + 8, &val))
12119                 return;
12120
12121         offset += val - start;
12122
12123         bcnt = strlen(tp->fw_ver);
12124
12125         tp->fw_ver[bcnt++] = ',';
12126         tp->fw_ver[bcnt++] = ' ';
12127
12128         for (i = 0; i < 4; i++) {
12129                 __le32 v;
12130                 if (tg3_nvram_read_le(tp, offset, &v))
12131                         return;
12132
12133                 offset += sizeof(v);
12134
12135                 if (bcnt > TG3_VER_SIZE - sizeof(v)) {
12136                         memcpy(&tp->fw_ver[bcnt], &v, TG3_VER_SIZE - bcnt);
12137                         break;
12138                 }
12139
12140                 memcpy(&tp->fw_ver[bcnt], &v, sizeof(v));
12141                 bcnt += sizeof(v);
12142         }
12143
12144         tp->fw_ver[TG3_VER_SIZE - 1] = 0;
12145 }
12146
12147 static struct pci_dev * __devinit tg3_find_peer(struct tg3 *);
12148
12149 static int __devinit tg3_get_invariants(struct tg3 *tp)
12150 {
12151         static struct pci_device_id write_reorder_chipsets[] = {
12152                 { PCI_DEVICE(PCI_VENDOR_ID_AMD,
12153                              PCI_DEVICE_ID_AMD_FE_GATE_700C) },
12154                 { PCI_DEVICE(PCI_VENDOR_ID_AMD,
12155                              PCI_DEVICE_ID_AMD_8131_BRIDGE) },
12156                 { PCI_DEVICE(PCI_VENDOR_ID_VIA,
12157                              PCI_DEVICE_ID_VIA_8385_0) },
12158                 { },
12159         };
12160         u32 misc_ctrl_reg;
12161         u32 pci_state_reg, grc_misc_cfg;
12162         u32 val;
12163         u16 pci_cmd;
12164         int err;
12165
12166         /* Force memory write invalidate off.  If we leave it on,
12167          * then on 5700_BX chips we have to enable a workaround.
12168          * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
12169          * to match the cacheline size.  The Broadcom driver have this
12170          * workaround but turns MWI off all the times so never uses
12171          * it.  This seems to suggest that the workaround is insufficient.
12172          */
12173         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
12174         pci_cmd &= ~PCI_COMMAND_INVALIDATE;
12175         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
12176
12177         /* It is absolutely critical that TG3PCI_MISC_HOST_CTRL
12178          * has the register indirect write enable bit set before
12179          * we try to access any of the MMIO registers.  It is also
12180          * critical that the PCI-X hw workaround situation is decided
12181          * before that as well.
12182          */
12183         pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
12184                               &misc_ctrl_reg);
12185
12186         tp->pci_chip_rev_id = (misc_ctrl_reg >>
12187                                MISC_HOST_CTRL_CHIPREV_SHIFT);
12188         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_USE_PROD_ID_REG) {
12189                 u32 prod_id_asic_rev;
12190
12191                 pci_read_config_dword(tp->pdev, TG3PCI_PRODID_ASICREV,
12192                                       &prod_id_asic_rev);
12193                 tp->pci_chip_rev_id = prod_id_asic_rev;
12194         }
12195
12196         /* Wrong chip ID in 5752 A0. This code can be removed later
12197          * as A0 is not in production.
12198          */
12199         if (tp->pci_chip_rev_id == CHIPREV_ID_5752_A0_HW)
12200                 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
12201
12202         /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
12203          * we need to disable memory and use config. cycles
12204          * only to access all registers. The 5702/03 chips
12205          * can mistakenly decode the special cycles from the
12206          * ICH chipsets as memory write cycles, causing corruption
12207          * of register and memory space. Only certain ICH bridges
12208          * will drive special cycles with non-zero data during the
12209          * address phase which can fall within the 5703's address
12210          * range. This is not an ICH bug as the PCI spec allows
12211          * non-zero address during special cycles. However, only
12212          * these ICH bridges are known to drive non-zero addresses
12213          * during special cycles.
12214          *
12215          * Since special cycles do not cross PCI bridges, we only
12216          * enable this workaround if the 5703 is on the secondary
12217          * bus of these ICH bridges.
12218          */
12219         if ((tp->pci_chip_rev_id == CHIPREV_ID_5703_A1) ||
12220             (tp->pci_chip_rev_id == CHIPREV_ID_5703_A2)) {
12221                 static struct tg3_dev_id {
12222                         u32     vendor;
12223                         u32     device;
12224                         u32     rev;
12225                 } ich_chipsets[] = {
12226                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
12227                           PCI_ANY_ID },
12228                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
12229                           PCI_ANY_ID },
12230                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
12231                           0xa },
12232                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
12233                           PCI_ANY_ID },
12234                         { },
12235                 };
12236                 struct tg3_dev_id *pci_id = &ich_chipsets[0];
12237                 struct pci_dev *bridge = NULL;
12238
12239                 while (pci_id->vendor != 0) {
12240                         bridge = pci_get_device(pci_id->vendor, pci_id->device,
12241                                                 bridge);
12242                         if (!bridge) {
12243                                 pci_id++;
12244                                 continue;
12245                         }
12246                         if (pci_id->rev != PCI_ANY_ID) {
12247                                 if (bridge->revision > pci_id->rev)
12248                                         continue;
12249                         }
12250                         if (bridge->subordinate &&
12251                             (bridge->subordinate->number ==
12252                              tp->pdev->bus->number)) {
12253
12254                                 tp->tg3_flags2 |= TG3_FLG2_ICH_WORKAROUND;
12255                                 pci_dev_put(bridge);
12256                                 break;
12257                         }
12258                 }
12259         }
12260
12261         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
12262                 static struct tg3_dev_id {
12263                         u32     vendor;
12264                         u32     device;
12265                 } bridge_chipsets[] = {
12266                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0 },
12267                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1 },
12268                         { },
12269                 };
12270                 struct tg3_dev_id *pci_id = &bridge_chipsets[0];
12271                 struct pci_dev *bridge = NULL;
12272
12273                 while (pci_id->vendor != 0) {
12274                         bridge = pci_get_device(pci_id->vendor,
12275                                                 pci_id->device,
12276                                                 bridge);
12277                         if (!bridge) {
12278                                 pci_id++;
12279                                 continue;
12280                         }
12281                         if (bridge->subordinate &&
12282                             (bridge->subordinate->number <=
12283                              tp->pdev->bus->number) &&
12284                             (bridge->subordinate->subordinate >=
12285                              tp->pdev->bus->number)) {
12286                                 tp->tg3_flags3 |= TG3_FLG3_5701_DMA_BUG;
12287                                 pci_dev_put(bridge);
12288                                 break;
12289                         }
12290                 }
12291         }
12292
12293         /* The EPB bridge inside 5714, 5715, and 5780 cannot support
12294          * DMA addresses > 40-bit. This bridge may have other additional
12295          * 57xx devices behind it in some 4-port NIC designs for example.
12296          * Any tg3 device found behind the bridge will also need the 40-bit
12297          * DMA workaround.
12298          */
12299         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 ||
12300             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
12301                 tp->tg3_flags2 |= TG3_FLG2_5780_CLASS;
12302                 tp->tg3_flags |= TG3_FLAG_40BIT_DMA_BUG;
12303                 tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI);
12304         }
12305         else {
12306                 struct pci_dev *bridge = NULL;
12307
12308                 do {
12309                         bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
12310                                                 PCI_DEVICE_ID_SERVERWORKS_EPB,
12311                                                 bridge);
12312                         if (bridge && bridge->subordinate &&
12313                             (bridge->subordinate->number <=
12314                              tp->pdev->bus->number) &&
12315                             (bridge->subordinate->subordinate >=
12316                              tp->pdev->bus->number)) {
12317                                 tp->tg3_flags |= TG3_FLAG_40BIT_DMA_BUG;
12318                                 pci_dev_put(bridge);
12319                                 break;
12320                         }
12321                 } while (bridge);
12322         }
12323
12324         /* Initialize misc host control in PCI block. */
12325         tp->misc_host_ctrl |= (misc_ctrl_reg &
12326                                MISC_HOST_CTRL_CHIPREV);
12327         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
12328                                tp->misc_host_ctrl);
12329
12330         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
12331             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714))
12332                 tp->pdev_peer = tg3_find_peer(tp);
12333
12334         /* Intentionally exclude ASIC_REV_5906 */
12335         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
12336             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
12337             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
12338             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
12339             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
12340             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
12341                 tp->tg3_flags3 |= TG3_FLG3_5755_PLUS;
12342
12343         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
12344             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
12345             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 ||
12346             (tp->tg3_flags3 & TG3_FLG3_5755_PLUS) ||
12347             (tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
12348                 tp->tg3_flags2 |= TG3_FLG2_5750_PLUS;
12349
12350         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) ||
12351             (tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
12352                 tp->tg3_flags2 |= TG3_FLG2_5705_PLUS;
12353
12354         /* 5700 B0 chips do not support checksumming correctly due
12355          * to hardware bugs.
12356          */
12357         if (tp->pci_chip_rev_id == CHIPREV_ID_5700_B0)
12358                 tp->tg3_flags |= TG3_FLAG_BROKEN_CHECKSUMS;
12359         else {
12360                 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
12361                 tp->dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
12362                 if (tp->tg3_flags3 & TG3_FLG3_5755_PLUS)
12363                         tp->dev->features |= NETIF_F_IPV6_CSUM;
12364         }
12365
12366         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
12367                 tp->tg3_flags |= TG3_FLAG_SUPPORT_MSI;
12368                 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX ||
12369                     GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX ||
12370                     (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 &&
12371                      tp->pci_chip_rev_id <= CHIPREV_ID_5714_A2 &&
12372                      tp->pdev_peer == tp->pdev))
12373                         tp->tg3_flags &= ~TG3_FLAG_SUPPORT_MSI;
12374
12375                 if ((tp->tg3_flags3 & TG3_FLG3_5755_PLUS) ||
12376                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
12377                         tp->tg3_flags2 |= TG3_FLG2_HW_TSO_2;
12378                         tp->tg3_flags2 |= TG3_FLG2_1SHOT_MSI;
12379                 } else {
12380                         tp->tg3_flags2 |= TG3_FLG2_HW_TSO_1 | TG3_FLG2_TSO_BUG;
12381                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
12382                                 ASIC_REV_5750 &&
12383                             tp->pci_chip_rev_id >= CHIPREV_ID_5750_C2)
12384                                 tp->tg3_flags2 &= ~TG3_FLG2_TSO_BUG;
12385                 }
12386         }
12387
12388         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
12389              (tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
12390                 tp->tg3_flags2 |= TG3_FLG2_JUMBO_CAPABLE;
12391
12392         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
12393                               &pci_state_reg);
12394
12395         tp->pcie_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_EXP);
12396         if (tp->pcie_cap != 0) {
12397                 u16 lnkctl;
12398
12399                 tp->tg3_flags2 |= TG3_FLG2_PCI_EXPRESS;
12400
12401                 pcie_set_readrq(tp->pdev, 4096);
12402
12403                 pci_read_config_word(tp->pdev,
12404                                      tp->pcie_cap + PCI_EXP_LNKCTL,
12405                                      &lnkctl);
12406                 if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN) {
12407                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
12408                                 tp->tg3_flags2 &= ~TG3_FLG2_HW_TSO_2;
12409                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
12410                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
12411                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
12412                                 tp->tg3_flags3 |= TG3_FLG3_CLKREQ_BUG;
12413                 }
12414         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
12415                 tp->tg3_flags2 |= TG3_FLG2_PCI_EXPRESS;
12416         } else if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
12417                    (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
12418                 tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
12419                 if (!tp->pcix_cap) {
12420                         printk(KERN_ERR PFX "Cannot find PCI-X "
12421                                             "capability, aborting.\n");
12422                         return -EIO;
12423                 }
12424
12425                 if (!(pci_state_reg & PCISTATE_CONV_PCI_MODE))
12426                         tp->tg3_flags |= TG3_FLAG_PCIX_MODE;
12427         }
12428
12429         /* If we have an AMD 762 or VIA K8T800 chipset, write
12430          * reordering to the mailbox registers done by the host
12431          * controller can cause major troubles.  We read back from
12432          * every mailbox register write to force the writes to be
12433          * posted to the chip in order.
12434          */
12435         if (pci_dev_present(write_reorder_chipsets) &&
12436             !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
12437                 tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;
12438
12439         pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
12440                              &tp->pci_cacheline_sz);
12441         pci_read_config_byte(tp->pdev, PCI_LATENCY_TIMER,
12442                              &tp->pci_lat_timer);
12443         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
12444             tp->pci_lat_timer < 64) {
12445                 tp->pci_lat_timer = 64;
12446                 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
12447                                       tp->pci_lat_timer);
12448         }
12449
12450         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX) {
12451                 /* 5700 BX chips need to have their TX producer index
12452                  * mailboxes written twice to workaround a bug.
12453                  */
12454                 tp->tg3_flags |= TG3_FLAG_TXD_MBOX_HWBUG;
12455
12456                 /* If we are in PCI-X mode, enable register write workaround.
12457                  *
12458                  * The workaround is to use indirect register accesses
12459                  * for all chip writes not to mailbox registers.
12460                  */
12461                 if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) {
12462                         u32 pm_reg;
12463
12464                         tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
12465
12466                         /* The chip can have it's power management PCI config
12467                          * space registers clobbered due to this bug.
12468                          * So explicitly force the chip into D0 here.
12469                          */
12470                         pci_read_config_dword(tp->pdev,
12471                                               tp->pm_cap + PCI_PM_CTRL,
12472                                               &pm_reg);
12473                         pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
12474                         pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
12475                         pci_write_config_dword(tp->pdev,
12476                                                tp->pm_cap + PCI_PM_CTRL,
12477                                                pm_reg);
12478
12479                         /* Also, force SERR#/PERR# in PCI command. */
12480                         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
12481                         pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
12482                         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
12483                 }
12484         }
12485
12486         if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
12487                 tp->tg3_flags |= TG3_FLAG_PCI_HIGH_SPEED;
12488         if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
12489                 tp->tg3_flags |= TG3_FLAG_PCI_32BIT;
12490
12491         /* Chip-specific fixup from Broadcom driver */
12492         if ((tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) &&
12493             (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
12494                 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
12495                 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
12496         }
12497
12498         /* Default fast path register access methods */
12499         tp->read32 = tg3_read32;
12500         tp->write32 = tg3_write32;
12501         tp->read32_mbox = tg3_read32;
12502         tp->write32_mbox = tg3_write32;
12503         tp->write32_tx_mbox = tg3_write32;
12504         tp->write32_rx_mbox = tg3_write32;
12505
12506         /* Various workaround register access methods */
12507         if (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG)
12508                 tp->write32 = tg3_write_indirect_reg32;
12509         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
12510                  ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
12511                   tp->pci_chip_rev_id == CHIPREV_ID_5750_A0)) {
12512                 /*
12513                  * Back to back register writes can cause problems on these
12514                  * chips, the workaround is to read back all reg writes
12515                  * except those to mailbox regs.
12516                  *
12517                  * See tg3_write_indirect_reg32().
12518                  */
12519                 tp->write32 = tg3_write_flush_reg32;
12520         }
12521
12522
12523         if ((tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG) ||
12524             (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)) {
12525                 tp->write32_tx_mbox = tg3_write32_tx_mbox;
12526                 if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
12527                         tp->write32_rx_mbox = tg3_write_flush_reg32;
12528         }
12529
12530         if (tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND) {
12531                 tp->read32 = tg3_read_indirect_reg32;
12532                 tp->write32 = tg3_write_indirect_reg32;
12533                 tp->read32_mbox = tg3_read_indirect_mbox;
12534                 tp->write32_mbox = tg3_write_indirect_mbox;
12535                 tp->write32_tx_mbox = tg3_write_indirect_mbox;
12536                 tp->write32_rx_mbox = tg3_write_indirect_mbox;
12537
12538                 iounmap(tp->regs);
12539                 tp->regs = NULL;
12540
12541                 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
12542                 pci_cmd &= ~PCI_COMMAND_MEMORY;
12543                 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
12544         }
12545         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
12546                 tp->read32_mbox = tg3_read32_mbox_5906;
12547                 tp->write32_mbox = tg3_write32_mbox_5906;
12548                 tp->write32_tx_mbox = tg3_write32_mbox_5906;
12549                 tp->write32_rx_mbox = tg3_write32_mbox_5906;
12550         }
12551
12552         if (tp->write32 == tg3_write_indirect_reg32 ||
12553             ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
12554              (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
12555               GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)))
12556                 tp->tg3_flags |= TG3_FLAG_SRAM_USE_CONFIG;
12557
12558         /* Get eeprom hw config before calling tg3_set_power_state().
12559          * In particular, the TG3_FLG2_IS_NIC flag must be
12560          * determined before calling tg3_set_power_state() so that
12561          * we know whether or not to switch out of Vaux power.
12562          * When the flag is set, it means that GPIO1 is used for eeprom
12563          * write protect and also implies that it is a LOM where GPIOs
12564          * are not used to switch power.
12565          */
12566         tg3_get_eeprom_hw_cfg(tp);
12567
12568         if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
12569                 /* Allow reads and writes to the
12570                  * APE register and memory space.
12571                  */
12572                 pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR |
12573                                  PCISTATE_ALLOW_APE_SHMEM_WR;
12574                 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE,
12575                                        pci_state_reg);
12576         }
12577
12578         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
12579             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
12580             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
12581             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
12582                 tp->tg3_flags |= TG3_FLAG_CPMU_PRESENT;
12583
12584         /* Set up tp->grc_local_ctrl before calling tg3_set_power_state().
12585          * GPIO1 driven high will bring 5700's external PHY out of reset.
12586          * It is also used as eeprom write protect on LOMs.
12587          */
12588         tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
12589         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
12590             (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT))
12591                 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
12592                                        GRC_LCLCTRL_GPIO_OUTPUT1);
12593         /* Unused GPIO3 must be driven as output on 5752 because there
12594          * are no pull-up resistors on unused GPIO pins.
12595          */
12596         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
12597                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
12598
12599         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
12600             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
12601                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
12602
12603         if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761) {
12604                 /* Turn off the debug UART. */
12605                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
12606                 if (tp->tg3_flags2 & TG3_FLG2_IS_NIC)
12607                         /* Keep VMain power. */
12608                         tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
12609                                               GRC_LCLCTRL_GPIO_OUTPUT0;
12610         }
12611
12612         /* Force the chip into D0. */
12613         err = tg3_set_power_state(tp, PCI_D0);
12614         if (err) {
12615                 printk(KERN_ERR PFX "(%s) transition to D0 failed\n",
12616                        pci_name(tp->pdev));
12617                 return err;
12618         }
12619
12620         /* Derive initial jumbo mode from MTU assigned in
12621          * ether_setup() via the alloc_etherdev() call
12622          */
12623         if (tp->dev->mtu > ETH_DATA_LEN &&
12624             !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
12625                 tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE;
12626
12627         /* Determine WakeOnLan speed to use. */
12628         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
12629             tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
12630             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0 ||
12631             tp->pci_chip_rev_id == CHIPREV_ID_5701_B2) {
12632                 tp->tg3_flags &= ~(TG3_FLAG_WOL_SPEED_100MB);
12633         } else {
12634                 tp->tg3_flags |= TG3_FLAG_WOL_SPEED_100MB;
12635         }
12636
12637         /* A few boards don't want Ethernet@WireSpeed phy feature */
12638         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
12639             ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
12640              (tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) &&
12641              (tp->pci_chip_rev_id != CHIPREV_ID_5705_A1)) ||
12642             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) ||
12643             (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES))
12644                 tp->tg3_flags2 |= TG3_FLG2_NO_ETH_WIRE_SPEED;
12645
12646         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5703_AX ||
12647             GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_AX)
12648                 tp->tg3_flags2 |= TG3_FLG2_PHY_ADC_BUG;
12649         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0)
12650                 tp->tg3_flags2 |= TG3_FLG2_PHY_5704_A0_BUG;
12651
12652         if ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
12653             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906 &&
12654             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
12655             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57780) {
12656                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
12657                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
12658                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
12659                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
12660                         if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
12661                             tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
12662                                 tp->tg3_flags2 |= TG3_FLG2_PHY_JITTER_BUG;
12663                         if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
12664                                 tp->tg3_flags2 |= TG3_FLG2_PHY_ADJUST_TRIM;
12665                 } else
12666                         tp->tg3_flags2 |= TG3_FLG2_PHY_BER_BUG;
12667         }
12668
12669         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
12670             GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
12671                 tp->phy_otp = tg3_read_otp_phycfg(tp);
12672                 if (tp->phy_otp == 0)
12673                         tp->phy_otp = TG3_OTP_DEFAULT;
12674         }
12675
12676         if (tp->tg3_flags & TG3_FLAG_CPMU_PRESENT)
12677                 tp->mi_mode = MAC_MI_MODE_500KHZ_CONST;
12678         else
12679                 tp->mi_mode = MAC_MI_MODE_BASE;
12680
12681         tp->coalesce_mode = 0;
12682         if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX &&
12683             GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX)
12684                 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
12685
12686         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
12687             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
12688                 tp->tg3_flags3 |= TG3_FLG3_USE_PHYLIB;
12689
12690         err = tg3_mdio_init(tp);
12691         if (err)
12692                 return err;
12693
12694         /* Initialize data/descriptor byte/word swapping. */
12695         val = tr32(GRC_MODE);
12696         val &= GRC_MODE_HOST_STACKUP;
12697         tw32(GRC_MODE, val | tp->grc_mode);
12698
12699         tg3_switch_clocks(tp);
12700
12701         /* Clear this out for sanity. */
12702         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
12703
12704         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
12705                               &pci_state_reg);
12706         if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
12707             (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) == 0) {
12708                 u32 chiprevid = GET_CHIP_REV_ID(tp->misc_host_ctrl);
12709
12710                 if (chiprevid == CHIPREV_ID_5701_A0 ||
12711                     chiprevid == CHIPREV_ID_5701_B0 ||
12712                     chiprevid == CHIPREV_ID_5701_B2 ||
12713                     chiprevid == CHIPREV_ID_5701_B5) {
12714                         void __iomem *sram_base;
12715
12716                         /* Write some dummy words into the SRAM status block
12717                          * area, see if it reads back correctly.  If the return
12718                          * value is bad, force enable the PCIX workaround.
12719                          */
12720                         sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
12721
12722                         writel(0x00000000, sram_base);
12723                         writel(0x00000000, sram_base + 4);
12724                         writel(0xffffffff, sram_base + 4);
12725                         if (readl(sram_base) != 0x00000000)
12726                                 tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
12727                 }
12728         }
12729
12730         udelay(50);
12731         tg3_nvram_init(tp);
12732
12733         grc_misc_cfg = tr32(GRC_MISC_CFG);
12734         grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
12735
12736         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
12737             (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
12738              grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
12739                 tp->tg3_flags2 |= TG3_FLG2_IS_5788;
12740
12741         if (!(tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
12742             (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700))
12743                 tp->tg3_flags |= TG3_FLAG_TAGGED_STATUS;
12744         if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) {
12745                 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
12746                                       HOSTCC_MODE_CLRTICK_TXBD);
12747
12748                 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
12749                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
12750                                        tp->misc_host_ctrl);
12751         }
12752
12753         /* Preserve the APE MAC_MODE bits */
12754         if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
12755                 tp->mac_mode = tr32(MAC_MODE) |
12756                                MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
12757         else
12758                 tp->mac_mode = TG3_DEF_MAC_MODE;
12759
12760         /* these are limited to 10/100 only */
12761         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
12762              (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
12763             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
12764              tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
12765              (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901 ||
12766               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901_2 ||
12767               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5705F)) ||
12768             (tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
12769              (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5751F ||
12770               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5753F ||
12771               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5787F)) ||
12772             tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790 ||
12773             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
12774                 tp->tg3_flags |= TG3_FLAG_10_100_ONLY;
12775
12776         err = tg3_phy_probe(tp);
12777         if (err) {
12778                 printk(KERN_ERR PFX "(%s) phy probe failed, err %d\n",
12779                        pci_name(tp->pdev), err);
12780                 /* ... but do not return immediately ... */
12781                 tg3_mdio_fini(tp);
12782         }
12783
12784         tg3_read_partno(tp);
12785         tg3_read_fw_ver(tp);
12786
12787         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
12788                 tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
12789         } else {
12790                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
12791                         tp->tg3_flags |= TG3_FLAG_USE_MI_INTERRUPT;
12792                 else
12793                         tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
12794         }
12795
12796         /* 5700 {AX,BX} chips have a broken status block link
12797          * change bit implementation, so we must use the
12798          * status register in those cases.
12799          */
12800         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
12801                 tp->tg3_flags |= TG3_FLAG_USE_LINKCHG_REG;
12802         else
12803                 tp->tg3_flags &= ~TG3_FLAG_USE_LINKCHG_REG;
12804
12805         /* The led_ctrl is set during tg3_phy_probe, here we might
12806          * have to force the link status polling mechanism based
12807          * upon subsystem IDs.
12808          */
12809         if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
12810             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
12811             !(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
12812                 tp->tg3_flags |= (TG3_FLAG_USE_MI_INTERRUPT |
12813                                   TG3_FLAG_USE_LINKCHG_REG);
12814         }
12815
12816         /* For all SERDES we poll the MAC status register. */
12817         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
12818                 tp->tg3_flags |= TG3_FLAG_POLL_SERDES;
12819         else
12820                 tp->tg3_flags &= ~TG3_FLAG_POLL_SERDES;
12821
12822         tp->rx_offset = NET_IP_ALIGN;
12823         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
12824             (tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0)
12825                 tp->rx_offset = 0;
12826
12827         tp->rx_std_max_post = TG3_RX_RING_SIZE;
12828
12829         /* Increment the rx prod index on the rx std ring by at most
12830          * 8 for these chips to workaround hw errata.
12831          */
12832         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
12833             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
12834             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
12835                 tp->rx_std_max_post = 8;
12836
12837         if (tp->tg3_flags & TG3_FLAG_ASPM_WORKAROUND)
12838                 tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) &
12839                                      PCIE_PWR_MGMT_L1_THRESH_MSK;
12840
12841         return err;
12842 }
12843
12844 #ifdef CONFIG_SPARC
12845 static int __devinit tg3_get_macaddr_sparc(struct tg3 *tp)
12846 {
12847         struct net_device *dev = tp->dev;
12848         struct pci_dev *pdev = tp->pdev;
12849         struct device_node *dp = pci_device_to_OF_node(pdev);
12850         const unsigned char *addr;
12851         int len;
12852
12853         addr = of_get_property(dp, "local-mac-address", &len);
12854         if (addr && len == 6) {
12855                 memcpy(dev->dev_addr, addr, 6);
12856                 memcpy(dev->perm_addr, dev->dev_addr, 6);
12857                 return 0;
12858         }
12859         return -ENODEV;
12860 }
12861
12862 static int __devinit tg3_get_default_macaddr_sparc(struct tg3 *tp)
12863 {
12864         struct net_device *dev = tp->dev;
12865
12866         memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
12867         memcpy(dev->perm_addr, idprom->id_ethaddr, 6);
12868         return 0;
12869 }
12870 #endif
12871
12872 static int __devinit tg3_get_device_address(struct tg3 *tp)
12873 {
12874         struct net_device *dev = tp->dev;
12875         u32 hi, lo, mac_offset;
12876         int addr_ok = 0;
12877
12878 #ifdef CONFIG_SPARC
12879         if (!tg3_get_macaddr_sparc(tp))
12880                 return 0;
12881 #endif
12882
12883         mac_offset = 0x7c;
12884         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
12885             (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
12886                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
12887                         mac_offset = 0xcc;
12888                 if (tg3_nvram_lock(tp))
12889                         tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
12890                 else
12891                         tg3_nvram_unlock(tp);
12892         }
12893         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
12894                 mac_offset = 0x10;
12895
12896         /* First try to get it from MAC address mailbox. */
12897         tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
12898         if ((hi >> 16) == 0x484b) {
12899                 dev->dev_addr[0] = (hi >>  8) & 0xff;
12900                 dev->dev_addr[1] = (hi >>  0) & 0xff;
12901
12902                 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
12903                 dev->dev_addr[2] = (lo >> 24) & 0xff;
12904                 dev->dev_addr[3] = (lo >> 16) & 0xff;
12905                 dev->dev_addr[4] = (lo >>  8) & 0xff;
12906                 dev->dev_addr[5] = (lo >>  0) & 0xff;
12907
12908                 /* Some old bootcode may report a 0 MAC address in SRAM */
12909                 addr_ok = is_valid_ether_addr(&dev->dev_addr[0]);
12910         }
12911         if (!addr_ok) {
12912                 /* Next, try NVRAM. */
12913                 if (!tg3_nvram_read(tp, mac_offset + 0, &hi) &&
12914                     !tg3_nvram_read(tp, mac_offset + 4, &lo)) {
12915                         dev->dev_addr[0] = ((hi >> 16) & 0xff);
12916                         dev->dev_addr[1] = ((hi >> 24) & 0xff);
12917                         dev->dev_addr[2] = ((lo >>  0) & 0xff);
12918                         dev->dev_addr[3] = ((lo >>  8) & 0xff);
12919                         dev->dev_addr[4] = ((lo >> 16) & 0xff);
12920                         dev->dev_addr[5] = ((lo >> 24) & 0xff);
12921                 }
12922                 /* Finally just fetch it out of the MAC control regs. */
12923                 else {
12924                         hi = tr32(MAC_ADDR_0_HIGH);
12925                         lo = tr32(MAC_ADDR_0_LOW);
12926
12927                         dev->dev_addr[5] = lo & 0xff;
12928                         dev->dev_addr[4] = (lo >> 8) & 0xff;
12929                         dev->dev_addr[3] = (lo >> 16) & 0xff;
12930                         dev->dev_addr[2] = (lo >> 24) & 0xff;
12931                         dev->dev_addr[1] = hi & 0xff;
12932                         dev->dev_addr[0] = (hi >> 8) & 0xff;
12933                 }
12934         }
12935
12936         if (!is_valid_ether_addr(&dev->dev_addr[0])) {
12937 #ifdef CONFIG_SPARC
12938                 if (!tg3_get_default_macaddr_sparc(tp))
12939                         return 0;
12940 #endif
12941                 return -EINVAL;
12942         }
12943         memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
12944         return 0;
12945 }
12946
12947 #define BOUNDARY_SINGLE_CACHELINE       1
12948 #define BOUNDARY_MULTI_CACHELINE        2
12949
12950 static u32 __devinit tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
12951 {
12952         int cacheline_size;
12953         u8 byte;
12954         int goal;
12955
12956         pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
12957         if (byte == 0)
12958                 cacheline_size = 1024;
12959         else
12960                 cacheline_size = (int) byte * 4;
12961
12962         /* On 5703 and later chips, the boundary bits have no
12963          * effect.
12964          */
12965         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
12966             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
12967             !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
12968                 goto out;
12969
12970 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
12971         goal = BOUNDARY_MULTI_CACHELINE;
12972 #else
12973 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
12974         goal = BOUNDARY_SINGLE_CACHELINE;
12975 #else
12976         goal = 0;
12977 #endif
12978 #endif
12979
12980         if (!goal)
12981                 goto out;
12982
12983         /* PCI controllers on most RISC systems tend to disconnect
12984          * when a device tries to burst across a cache-line boundary.
12985          * Therefore, letting tg3 do so just wastes PCI bandwidth.
12986          *
12987          * Unfortunately, for PCI-E there are only limited
12988          * write-side controls for this, and thus for reads
12989          * we will still get the disconnects.  We'll also waste
12990          * these PCI cycles for both read and write for chips
12991          * other than 5700 and 5701 which do not implement the
12992          * boundary bits.
12993          */
12994         if ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
12995             !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
12996                 switch (cacheline_size) {
12997                 case 16:
12998                 case 32:
12999                 case 64:
13000                 case 128:
13001                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
13002                                 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
13003                                         DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
13004                         } else {
13005                                 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
13006                                         DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
13007                         }
13008                         break;
13009
13010                 case 256:
13011                         val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
13012                                 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
13013                         break;
13014
13015                 default:
13016                         val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
13017                                 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
13018                         break;
13019                 }
13020         } else if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
13021                 switch (cacheline_size) {
13022                 case 16:
13023                 case 32:
13024                 case 64:
13025                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
13026                                 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
13027                                 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
13028                                 break;
13029                         }
13030                         /* fallthrough */
13031                 case 128:
13032                 default:
13033                         val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
13034                         val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
13035                         break;
13036                 }
13037         } else {
13038                 switch (cacheline_size) {
13039                 case 16:
13040                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
13041                                 val |= (DMA_RWCTRL_READ_BNDRY_16 |
13042                                         DMA_RWCTRL_WRITE_BNDRY_16);
13043                                 break;
13044                         }
13045                         /* fallthrough */
13046                 case 32:
13047                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
13048                                 val |= (DMA_RWCTRL_READ_BNDRY_32 |
13049                                         DMA_RWCTRL_WRITE_BNDRY_32);
13050                                 break;
13051                         }
13052                         /* fallthrough */
13053                 case 64:
13054                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
13055                                 val |= (DMA_RWCTRL_READ_BNDRY_64 |
13056                                         DMA_RWCTRL_WRITE_BNDRY_64);
13057                                 break;
13058                         }
13059                         /* fallthrough */
13060                 case 128:
13061                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
13062                                 val |= (DMA_RWCTRL_READ_BNDRY_128 |
13063                                         DMA_RWCTRL_WRITE_BNDRY_128);
13064                                 break;
13065                         }
13066                         /* fallthrough */
13067                 case 256:
13068                         val |= (DMA_RWCTRL_READ_BNDRY_256 |
13069                                 DMA_RWCTRL_WRITE_BNDRY_256);
13070                         break;
13071                 case 512:
13072                         val |= (DMA_RWCTRL_READ_BNDRY_512 |
13073                                 DMA_RWCTRL_WRITE_BNDRY_512);
13074                         break;
13075                 case 1024:
13076                 default:
13077                         val |= (DMA_RWCTRL_READ_BNDRY_1024 |
13078                                 DMA_RWCTRL_WRITE_BNDRY_1024);
13079                         break;
13080                 }
13081         }
13082
13083 out:
13084         return val;
13085 }
13086
13087 static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma, int size, int to_device)
13088 {
13089         struct tg3_internal_buffer_desc test_desc;
13090         u32 sram_dma_descs;
13091         int i, ret;
13092
13093         sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
13094
13095         tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
13096         tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
13097         tw32(RDMAC_STATUS, 0);
13098         tw32(WDMAC_STATUS, 0);
13099
13100         tw32(BUFMGR_MODE, 0);
13101         tw32(FTQ_RESET, 0);
13102
13103         test_desc.addr_hi = ((u64) buf_dma) >> 32;
13104         test_desc.addr_lo = buf_dma & 0xffffffff;
13105         test_desc.nic_mbuf = 0x00002100;
13106         test_desc.len = size;
13107
13108         /*
13109          * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
13110          * the *second* time the tg3 driver was getting loaded after an
13111          * initial scan.
13112          *
13113          * Broadcom tells me:
13114          *   ...the DMA engine is connected to the GRC block and a DMA
13115          *   reset may affect the GRC block in some unpredictable way...
13116          *   The behavior of resets to individual blocks has not been tested.
13117          *
13118          * Broadcom noted the GRC reset will also reset all sub-components.
13119          */
13120         if (to_device) {
13121                 test_desc.cqid_sqid = (13 << 8) | 2;
13122
13123                 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
13124                 udelay(40);
13125         } else {
13126                 test_desc.cqid_sqid = (16 << 8) | 7;
13127
13128                 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
13129                 udelay(40);
13130         }
13131         test_desc.flags = 0x00000005;
13132
13133         for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
13134                 u32 val;
13135
13136                 val = *(((u32 *)&test_desc) + i);
13137                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
13138                                        sram_dma_descs + (i * sizeof(u32)));
13139                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
13140         }
13141         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
13142
13143         if (to_device) {
13144                 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
13145         } else {
13146                 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
13147         }
13148
13149         ret = -ENODEV;
13150         for (i = 0; i < 40; i++) {
13151                 u32 val;
13152
13153                 if (to_device)
13154                         val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
13155                 else
13156                         val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
13157                 if ((val & 0xffff) == sram_dma_descs) {
13158                         ret = 0;
13159                         break;
13160                 }
13161
13162                 udelay(100);
13163         }
13164
13165         return ret;
13166 }
13167
13168 #define TEST_BUFFER_SIZE        0x2000
13169
13170 static int __devinit tg3_test_dma(struct tg3 *tp)
13171 {
13172         dma_addr_t buf_dma;
13173         u32 *buf, saved_dma_rwctrl;
13174         int ret;
13175
13176         buf = pci_alloc_consistent(tp->pdev, TEST_BUFFER_SIZE, &buf_dma);
13177         if (!buf) {
13178                 ret = -ENOMEM;
13179                 goto out_nofree;
13180         }
13181
13182         tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
13183                           (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
13184
13185         tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
13186
13187         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
13188                 /* DMA read watermark not used on PCIE */
13189                 tp->dma_rwctrl |= 0x00180000;
13190         } else if (!(tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
13191                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
13192                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
13193                         tp->dma_rwctrl |= 0x003f0000;
13194                 else
13195                         tp->dma_rwctrl |= 0x003f000f;
13196         } else {
13197                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
13198                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
13199                         u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
13200                         u32 read_water = 0x7;
13201
13202                         /* If the 5704 is behind the EPB bridge, we can
13203                          * do the less restrictive ONE_DMA workaround for
13204                          * better performance.
13205                          */
13206                         if ((tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG) &&
13207                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
13208                                 tp->dma_rwctrl |= 0x8000;
13209                         else if (ccval == 0x6 || ccval == 0x7)
13210                                 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
13211
13212                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703)
13213                                 read_water = 4;
13214                         /* Set bit 23 to enable PCIX hw bug fix */
13215                         tp->dma_rwctrl |=
13216                                 (read_water << DMA_RWCTRL_READ_WATER_SHIFT) |
13217                                 (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
13218                                 (1 << 23);
13219                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) {
13220                         /* 5780 always in PCIX mode */
13221                         tp->dma_rwctrl |= 0x00144000;
13222                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
13223                         /* 5714 always in PCIX mode */
13224                         tp->dma_rwctrl |= 0x00148000;
13225                 } else {
13226                         tp->dma_rwctrl |= 0x001b000f;
13227                 }
13228         }
13229
13230         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
13231             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
13232                 tp->dma_rwctrl &= 0xfffffff0;
13233
13234         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
13235             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
13236                 /* Remove this if it causes problems for some boards. */
13237                 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
13238
13239                 /* On 5700/5701 chips, we need to set this bit.
13240                  * Otherwise the chip will issue cacheline transactions
13241                  * to streamable DMA memory with not all the byte
13242                  * enables turned on.  This is an error on several
13243                  * RISC PCI controllers, in particular sparc64.
13244                  *
13245                  * On 5703/5704 chips, this bit has been reassigned
13246                  * a different meaning.  In particular, it is used
13247                  * on those chips to enable a PCI-X workaround.
13248                  */
13249                 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
13250         }
13251
13252         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
13253
13254 #if 0
13255         /* Unneeded, already done by tg3_get_invariants.  */
13256         tg3_switch_clocks(tp);
13257 #endif
13258
13259         ret = 0;
13260         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
13261             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
13262                 goto out;
13263
13264         /* It is best to perform DMA test with maximum write burst size
13265          * to expose the 5700/5701 write DMA bug.
13266          */
13267         saved_dma_rwctrl = tp->dma_rwctrl;
13268         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
13269         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
13270
13271         while (1) {
13272                 u32 *p = buf, i;
13273
13274                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
13275                         p[i] = i;
13276
13277                 /* Send the buffer to the chip. */
13278                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1);
13279                 if (ret) {
13280                         printk(KERN_ERR "tg3_test_dma() Write the buffer failed %d\n", ret);
13281                         break;
13282                 }
13283
13284 #if 0
13285                 /* validate data reached card RAM correctly. */
13286                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
13287                         u32 val;
13288                         tg3_read_mem(tp, 0x2100 + (i*4), &val);
13289                         if (le32_to_cpu(val) != p[i]) {
13290                                 printk(KERN_ERR "  tg3_test_dma()  Card buffer corrupted on write! (%d != %d)\n", val, i);
13291                                 /* ret = -ENODEV here? */
13292                         }
13293                         p[i] = 0;
13294                 }
13295 #endif
13296                 /* Now read it back. */
13297                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0);
13298                 if (ret) {
13299                         printk(KERN_ERR "tg3_test_dma() Read the buffer failed %d\n", ret);
13300
13301                         break;
13302                 }
13303
13304                 /* Verify it. */
13305                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
13306                         if (p[i] == i)
13307                                 continue;
13308
13309                         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
13310                             DMA_RWCTRL_WRITE_BNDRY_16) {
13311                                 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
13312                                 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
13313                                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
13314                                 break;
13315                         } else {
13316                                 printk(KERN_ERR "tg3_test_dma() buffer corrupted on read back! (%d != %d)\n", p[i], i);
13317                                 ret = -ENODEV;
13318                                 goto out;
13319                         }
13320                 }
13321
13322                 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
13323                         /* Success. */
13324                         ret = 0;
13325                         break;
13326                 }
13327         }
13328         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
13329             DMA_RWCTRL_WRITE_BNDRY_16) {
13330                 static struct pci_device_id dma_wait_state_chipsets[] = {
13331                         { PCI_DEVICE(PCI_VENDOR_ID_APPLE,
13332                                      PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
13333                         { },
13334                 };
13335
13336                 /* DMA test passed without adjusting DMA boundary,
13337                  * now look for chipsets that are known to expose the
13338                  * DMA bug without failing the test.
13339                  */
13340                 if (pci_dev_present(dma_wait_state_chipsets)) {
13341                         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
13342                         tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
13343                 }
13344                 else
13345                         /* Safe to use the calculated DMA boundary. */
13346                         tp->dma_rwctrl = saved_dma_rwctrl;
13347
13348                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
13349         }
13350
13351 out:
13352         pci_free_consistent(tp->pdev, TEST_BUFFER_SIZE, buf, buf_dma);
13353 out_nofree:
13354         return ret;
13355 }
13356
13357 static void __devinit tg3_init_link_config(struct tg3 *tp)
13358 {
13359         tp->link_config.advertising =
13360                 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
13361                  ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
13362                  ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full |
13363                  ADVERTISED_Autoneg | ADVERTISED_MII);
13364         tp->link_config.speed = SPEED_INVALID;
13365         tp->link_config.duplex = DUPLEX_INVALID;
13366         tp->link_config.autoneg = AUTONEG_ENABLE;
13367         tp->link_config.active_speed = SPEED_INVALID;
13368         tp->link_config.active_duplex = DUPLEX_INVALID;
13369         tp->link_config.phy_is_low_power = 0;
13370         tp->link_config.orig_speed = SPEED_INVALID;
13371         tp->link_config.orig_duplex = DUPLEX_INVALID;
13372         tp->link_config.orig_autoneg = AUTONEG_INVALID;
13373 }
13374
13375 static void __devinit tg3_init_bufmgr_config(struct tg3 *tp)
13376 {
13377         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
13378                 tp->bufmgr_config.mbuf_read_dma_low_water =
13379                         DEFAULT_MB_RDMA_LOW_WATER_5705;
13380                 tp->bufmgr_config.mbuf_mac_rx_low_water =
13381                         DEFAULT_MB_MACRX_LOW_WATER_5705;
13382                 tp->bufmgr_config.mbuf_high_water =
13383                         DEFAULT_MB_HIGH_WATER_5705;
13384                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
13385                         tp->bufmgr_config.mbuf_mac_rx_low_water =
13386                                 DEFAULT_MB_MACRX_LOW_WATER_5906;
13387                         tp->bufmgr_config.mbuf_high_water =
13388                                 DEFAULT_MB_HIGH_WATER_5906;
13389                 }
13390
13391                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
13392                         DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
13393                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
13394                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
13395                 tp->bufmgr_config.mbuf_high_water_jumbo =
13396                         DEFAULT_MB_HIGH_WATER_JUMBO_5780;
13397         } else {
13398                 tp->bufmgr_config.mbuf_read_dma_low_water =
13399                         DEFAULT_MB_RDMA_LOW_WATER;
13400                 tp->bufmgr_config.mbuf_mac_rx_low_water =
13401                         DEFAULT_MB_MACRX_LOW_WATER;
13402                 tp->bufmgr_config.mbuf_high_water =
13403                         DEFAULT_MB_HIGH_WATER;
13404
13405                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
13406                         DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
13407                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
13408                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
13409                 tp->bufmgr_config.mbuf_high_water_jumbo =
13410                         DEFAULT_MB_HIGH_WATER_JUMBO;
13411         }
13412
13413         tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
13414         tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
13415 }
13416
13417 static char * __devinit tg3_phy_string(struct tg3 *tp)
13418 {
13419         switch (tp->phy_id & PHY_ID_MASK) {
13420         case PHY_ID_BCM5400:    return "5400";
13421         case PHY_ID_BCM5401:    return "5401";
13422         case PHY_ID_BCM5411:    return "5411";
13423         case PHY_ID_BCM5701:    return "5701";
13424         case PHY_ID_BCM5703:    return "5703";
13425         case PHY_ID_BCM5704:    return "5704";
13426         case PHY_ID_BCM5705:    return "5705";
13427         case PHY_ID_BCM5750:    return "5750";
13428         case PHY_ID_BCM5752:    return "5752";
13429         case PHY_ID_BCM5714:    return "5714";
13430         case PHY_ID_BCM5780:    return "5780";
13431         case PHY_ID_BCM5755:    return "5755";
13432         case PHY_ID_BCM5787:    return "5787";
13433         case PHY_ID_BCM5784:    return "5784";
13434         case PHY_ID_BCM5756:    return "5722/5756";
13435         case PHY_ID_BCM5906:    return "5906";
13436         case PHY_ID_BCM5761:    return "5761";
13437         case PHY_ID_BCM8002:    return "8002/serdes";
13438         case 0:                 return "serdes";
13439         default:                return "unknown";
13440         }
13441 }
13442
13443 static char * __devinit tg3_bus_string(struct tg3 *tp, char *str)
13444 {
13445         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
13446                 strcpy(str, "PCI Express");
13447                 return str;
13448         } else if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) {
13449                 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
13450
13451                 strcpy(str, "PCIX:");
13452
13453                 if ((clock_ctrl == 7) ||
13454                     ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
13455                      GRC_MISC_CFG_BOARD_ID_5704CIOBE))
13456                         strcat(str, "133MHz");
13457                 else if (clock_ctrl == 0)
13458                         strcat(str, "33MHz");
13459                 else if (clock_ctrl == 2)
13460                         strcat(str, "50MHz");
13461                 else if (clock_ctrl == 4)
13462                         strcat(str, "66MHz");
13463                 else if (clock_ctrl == 6)
13464                         strcat(str, "100MHz");
13465         } else {
13466                 strcpy(str, "PCI:");
13467                 if (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED)
13468                         strcat(str, "66MHz");
13469                 else
13470                         strcat(str, "33MHz");
13471         }
13472         if (tp->tg3_flags & TG3_FLAG_PCI_32BIT)
13473                 strcat(str, ":32-bit");
13474         else
13475                 strcat(str, ":64-bit");
13476         return str;
13477 }
13478
13479 static struct pci_dev * __devinit tg3_find_peer(struct tg3 *tp)
13480 {
13481         struct pci_dev *peer;
13482         unsigned int func, devnr = tp->pdev->devfn & ~7;
13483
13484         for (func = 0; func < 8; func++) {
13485                 peer = pci_get_slot(tp->pdev->bus, devnr | func);
13486                 if (peer && peer != tp->pdev)
13487                         break;
13488                 pci_dev_put(peer);
13489         }
13490         /* 5704 can be configured in single-port mode, set peer to
13491          * tp->pdev in that case.
13492          */
13493         if (!peer) {
13494                 peer = tp->pdev;
13495                 return peer;
13496         }
13497
13498         /*
13499          * We don't need to keep the refcount elevated; there's no way
13500          * to remove one half of this device without removing the other
13501          */
13502         pci_dev_put(peer);
13503
13504         return peer;
13505 }
13506
13507 static void __devinit tg3_init_coal(struct tg3 *tp)
13508 {
13509         struct ethtool_coalesce *ec = &tp->coal;
13510
13511         memset(ec, 0, sizeof(*ec));
13512         ec->cmd = ETHTOOL_GCOALESCE;
13513         ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
13514         ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
13515         ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
13516         ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
13517         ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
13518         ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
13519         ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
13520         ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
13521         ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
13522
13523         if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
13524                                  HOSTCC_MODE_CLRTICK_TXBD)) {
13525                 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
13526                 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
13527                 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
13528                 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
13529         }
13530
13531         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
13532                 ec->rx_coalesce_usecs_irq = 0;
13533                 ec->tx_coalesce_usecs_irq = 0;
13534                 ec->stats_block_coalesce_usecs = 0;
13535         }
13536 }
13537
13538 static const struct net_device_ops tg3_netdev_ops = {
13539         .ndo_open               = tg3_open,
13540         .ndo_stop               = tg3_close,
13541         .ndo_start_xmit         = tg3_start_xmit,
13542         .ndo_get_stats          = tg3_get_stats,
13543         .ndo_validate_addr      = eth_validate_addr,
13544         .ndo_set_multicast_list = tg3_set_rx_mode,
13545         .ndo_set_mac_address    = tg3_set_mac_addr,
13546         .ndo_do_ioctl           = tg3_ioctl,
13547         .ndo_tx_timeout         = tg3_tx_timeout,
13548         .ndo_change_mtu         = tg3_change_mtu,
13549 #if TG3_VLAN_TAG_USED
13550         .ndo_vlan_rx_register   = tg3_vlan_rx_register,
13551 #endif
13552 #ifdef CONFIG_NET_POLL_CONTROLLER
13553         .ndo_poll_controller    = tg3_poll_controller,
13554 #endif
13555 };
13556
13557 static const struct net_device_ops tg3_netdev_ops_dma_bug = {
13558         .ndo_open               = tg3_open,
13559         .ndo_stop               = tg3_close,
13560         .ndo_start_xmit         = tg3_start_xmit_dma_bug,
13561         .ndo_get_stats          = tg3_get_stats,
13562         .ndo_validate_addr      = eth_validate_addr,
13563         .ndo_set_multicast_list = tg3_set_rx_mode,
13564         .ndo_set_mac_address    = tg3_set_mac_addr,
13565         .ndo_do_ioctl           = tg3_ioctl,
13566         .ndo_tx_timeout         = tg3_tx_timeout,
13567         .ndo_change_mtu         = tg3_change_mtu,
13568 #if TG3_VLAN_TAG_USED
13569         .ndo_vlan_rx_register   = tg3_vlan_rx_register,
13570 #endif
13571 #ifdef CONFIG_NET_POLL_CONTROLLER
13572         .ndo_poll_controller    = tg3_poll_controller,
13573 #endif
13574 };
13575
13576 static int __devinit tg3_init_one(struct pci_dev *pdev,
13577                                   const struct pci_device_id *ent)
13578 {
13579         static int tg3_version_printed = 0;
13580         struct net_device *dev;
13581         struct tg3 *tp;
13582         int err, pm_cap;
13583         char str[40];
13584         u64 dma_mask, persist_dma_mask;
13585
13586         if (tg3_version_printed++ == 0)
13587                 printk(KERN_INFO "%s", version);
13588
13589         err = pci_enable_device(pdev);
13590         if (err) {
13591                 printk(KERN_ERR PFX "Cannot enable PCI device, "
13592                        "aborting.\n");
13593                 return err;
13594         }
13595
13596         err = pci_request_regions(pdev, DRV_MODULE_NAME);
13597         if (err) {
13598                 printk(KERN_ERR PFX "Cannot obtain PCI resources, "
13599                        "aborting.\n");
13600                 goto err_out_disable_pdev;
13601         }
13602
13603         pci_set_master(pdev);
13604
13605         /* Find power-management capability. */
13606         pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
13607         if (pm_cap == 0) {
13608                 printk(KERN_ERR PFX "Cannot find PowerManagement capability, "
13609                        "aborting.\n");
13610                 err = -EIO;
13611                 goto err_out_free_res;
13612         }
13613
13614         dev = alloc_etherdev(sizeof(*tp));
13615         if (!dev) {
13616                 printk(KERN_ERR PFX "Etherdev alloc failed, aborting.\n");
13617                 err = -ENOMEM;
13618                 goto err_out_free_res;
13619         }
13620
13621         SET_NETDEV_DEV(dev, &pdev->dev);
13622
13623 #if TG3_VLAN_TAG_USED
13624         dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
13625 #endif
13626
13627         tp = netdev_priv(dev);
13628         tp->pdev = pdev;
13629         tp->dev = dev;
13630         tp->pm_cap = pm_cap;
13631         tp->rx_mode = TG3_DEF_RX_MODE;
13632         tp->tx_mode = TG3_DEF_TX_MODE;
13633
13634         if (tg3_debug > 0)
13635                 tp->msg_enable = tg3_debug;
13636         else
13637                 tp->msg_enable = TG3_DEF_MSG_ENABLE;
13638
13639         /* The word/byte swap controls here control register access byte
13640          * swapping.  DMA data byte swapping is controlled in the GRC_MODE
13641          * setting below.
13642          */
13643         tp->misc_host_ctrl =
13644                 MISC_HOST_CTRL_MASK_PCI_INT |
13645                 MISC_HOST_CTRL_WORD_SWAP |
13646                 MISC_HOST_CTRL_INDIR_ACCESS |
13647                 MISC_HOST_CTRL_PCISTATE_RW;
13648
13649         /* The NONFRM (non-frame) byte/word swap controls take effect
13650          * on descriptor entries, anything which isn't packet data.
13651          *
13652          * The StrongARM chips on the board (one for tx, one for rx)
13653          * are running in big-endian mode.
13654          */
13655         tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
13656                         GRC_MODE_WSWAP_NONFRM_DATA);
13657 #ifdef __BIG_ENDIAN
13658         tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
13659 #endif
13660         spin_lock_init(&tp->lock);
13661         spin_lock_init(&tp->indirect_lock);
13662         INIT_WORK(&tp->reset_task, tg3_reset_task);
13663
13664         tp->regs = pci_ioremap_bar(pdev, BAR_0);
13665         if (!tp->regs) {
13666                 printk(KERN_ERR PFX "Cannot map device registers, "
13667                        "aborting.\n");
13668                 err = -ENOMEM;
13669                 goto err_out_free_dev;
13670         }
13671
13672         tg3_init_link_config(tp);
13673
13674         tp->rx_pending = TG3_DEF_RX_RING_PENDING;
13675         tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
13676         tp->tx_pending = TG3_DEF_TX_RING_PENDING;
13677
13678         netif_napi_add(dev, &tp->napi, tg3_poll, 64);
13679         dev->ethtool_ops = &tg3_ethtool_ops;
13680         dev->watchdog_timeo = TG3_TX_TIMEOUT;
13681         dev->irq = pdev->irq;
13682
13683         err = tg3_get_invariants(tp);
13684         if (err) {
13685                 printk(KERN_ERR PFX "Problem fetching invariants of chip, "
13686                        "aborting.\n");
13687                 goto err_out_iounmap;
13688         }
13689
13690         if ((tp->tg3_flags3 & TG3_FLG3_5755_PLUS) ||
13691             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
13692                 dev->netdev_ops = &tg3_netdev_ops;
13693         else
13694                 dev->netdev_ops = &tg3_netdev_ops_dma_bug;
13695
13696
13697         /* The EPB bridge inside 5714, 5715, and 5780 and any
13698          * device behind the EPB cannot support DMA addresses > 40-bit.
13699          * On 64-bit systems with IOMMU, use 40-bit dma_mask.
13700          * On 64-bit systems without IOMMU, use 64-bit dma_mask and
13701          * do DMA address check in tg3_start_xmit().
13702          */
13703         if (tp->tg3_flags2 & TG3_FLG2_IS_5788)
13704                 persist_dma_mask = dma_mask = DMA_32BIT_MASK;
13705         else if (tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG) {
13706                 persist_dma_mask = dma_mask = DMA_40BIT_MASK;
13707 #ifdef CONFIG_HIGHMEM
13708                 dma_mask = DMA_64BIT_MASK;
13709 #endif
13710         } else
13711                 persist_dma_mask = dma_mask = DMA_64BIT_MASK;
13712
13713         /* Configure DMA attributes. */
13714         if (dma_mask > DMA_32BIT_MASK) {
13715                 err = pci_set_dma_mask(pdev, dma_mask);
13716                 if (!err) {
13717                         dev->features |= NETIF_F_HIGHDMA;
13718                         err = pci_set_consistent_dma_mask(pdev,
13719                                                           persist_dma_mask);
13720                         if (err < 0) {
13721                                 printk(KERN_ERR PFX "Unable to obtain 64 bit "
13722                                        "DMA for consistent allocations\n");
13723                                 goto err_out_iounmap;
13724                         }
13725                 }
13726         }
13727         if (err || dma_mask == DMA_32BIT_MASK) {
13728                 err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
13729                 if (err) {
13730                         printk(KERN_ERR PFX "No usable DMA configuration, "
13731                                "aborting.\n");
13732                         goto err_out_iounmap;
13733                 }
13734         }
13735
13736         tg3_init_bufmgr_config(tp);
13737
13738         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
13739                 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
13740         }
13741         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
13742             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
13743             tp->pci_chip_rev_id == CHIPREV_ID_5705_A0 ||
13744             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 ||
13745             (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0) {
13746                 tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
13747         } else {
13748                 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE | TG3_FLG2_TSO_BUG;
13749         }
13750
13751         /* TSO is on by default on chips that support hardware TSO.
13752          * Firmware TSO on older chips gives lower performance, so it
13753          * is off by default, but can be enabled using ethtool.
13754          */
13755         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
13756                 if (dev->features & NETIF_F_IP_CSUM)
13757                         dev->features |= NETIF_F_TSO;
13758                 if ((dev->features & NETIF_F_IPV6_CSUM) &&
13759                     (tp->tg3_flags2 & TG3_FLG2_HW_TSO_2))
13760                         dev->features |= NETIF_F_TSO6;
13761                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
13762                     (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
13763                      GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) ||
13764                         GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
13765                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
13766                         dev->features |= NETIF_F_TSO_ECN;
13767         }
13768
13769
13770         if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 &&
13771             !(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) &&
13772             !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
13773                 tp->tg3_flags2 |= TG3_FLG2_MAX_RXPEND_64;
13774                 tp->rx_pending = 63;
13775         }
13776
13777         err = tg3_get_device_address(tp);
13778         if (err) {
13779                 printk(KERN_ERR PFX "Could not obtain valid ethernet address, "
13780                        "aborting.\n");
13781                 goto err_out_iounmap;
13782         }
13783
13784         if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
13785                 tp->aperegs = pci_ioremap_bar(pdev, BAR_2);
13786                 if (!tp->aperegs) {
13787                         printk(KERN_ERR PFX "Cannot map APE registers, "
13788                                "aborting.\n");
13789                         err = -ENOMEM;
13790                         goto err_out_iounmap;
13791                 }
13792
13793                 tg3_ape_lock_init(tp);
13794         }
13795
13796         /*
13797          * Reset chip in case UNDI or EFI driver did not shutdown
13798          * DMA self test will enable WDMAC and we'll see (spurious)
13799          * pending DMA on the PCI bus at that point.
13800          */
13801         if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
13802             (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
13803                 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
13804                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
13805         }
13806
13807         err = tg3_test_dma(tp);
13808         if (err) {
13809                 printk(KERN_ERR PFX "DMA engine test failed, aborting.\n");
13810                 goto err_out_apeunmap;
13811         }
13812
13813         /* flow control autonegotiation is default behavior */
13814         tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
13815         tp->link_config.flowctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
13816
13817         tg3_init_coal(tp);
13818
13819         pci_set_drvdata(pdev, dev);
13820
13821         err = register_netdev(dev);
13822         if (err) {
13823                 printk(KERN_ERR PFX "Cannot register net device, "
13824                        "aborting.\n");
13825                 goto err_out_apeunmap;
13826         }
13827
13828         printk(KERN_INFO "%s: Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n",
13829                dev->name,
13830                tp->board_part_number,
13831                tp->pci_chip_rev_id,
13832                tg3_bus_string(tp, str),
13833                dev->dev_addr);
13834
13835         if (tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED)
13836                 printk(KERN_INFO
13837                        "%s: attached PHY driver [%s] (mii_bus:phy_addr=%s)\n",
13838                        tp->dev->name,
13839                        tp->mdio_bus->phy_map[PHY_ADDR]->drv->name,
13840                        dev_name(&tp->mdio_bus->phy_map[PHY_ADDR]->dev));
13841         else
13842                 printk(KERN_INFO
13843                        "%s: attached PHY is %s (%s Ethernet) (WireSpeed[%d])\n",
13844                        tp->dev->name, tg3_phy_string(tp),
13845                        ((tp->tg3_flags & TG3_FLAG_10_100_ONLY) ? "10/100Base-TX" :
13846                         ((tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) ? "1000Base-SX" :
13847                          "10/100/1000Base-T")),
13848                        (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED) == 0);
13849
13850         printk(KERN_INFO "%s: RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n",
13851                dev->name,
13852                (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0,
13853                (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) != 0,
13854                (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) != 0,
13855                (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0,
13856                (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) != 0);
13857         printk(KERN_INFO "%s: dma_rwctrl[%08x] dma_mask[%d-bit]\n",
13858                dev->name, tp->dma_rwctrl,
13859                (pdev->dma_mask == DMA_32BIT_MASK) ? 32 :
13860                 (((u64) pdev->dma_mask == DMA_40BIT_MASK) ? 40 : 64));
13861
13862         return 0;
13863
13864 err_out_apeunmap:
13865         if (tp->aperegs) {
13866                 iounmap(tp->aperegs);
13867                 tp->aperegs = NULL;
13868         }
13869
13870 err_out_iounmap:
13871         if (tp->regs) {
13872                 iounmap(tp->regs);
13873                 tp->regs = NULL;
13874         }
13875
13876 err_out_free_dev:
13877         free_netdev(dev);
13878
13879 err_out_free_res:
13880         pci_release_regions(pdev);
13881
13882 err_out_disable_pdev:
13883         pci_disable_device(pdev);
13884         pci_set_drvdata(pdev, NULL);
13885         return err;
13886 }
13887
13888 static void __devexit tg3_remove_one(struct pci_dev *pdev)
13889 {
13890         struct net_device *dev = pci_get_drvdata(pdev);
13891
13892         if (dev) {
13893                 struct tg3 *tp = netdev_priv(dev);
13894
13895                 flush_scheduled_work();
13896
13897                 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
13898                         tg3_phy_fini(tp);
13899                         tg3_mdio_fini(tp);
13900                 }
13901
13902                 unregister_netdev(dev);
13903                 if (tp->aperegs) {
13904                         iounmap(tp->aperegs);
13905                         tp->aperegs = NULL;
13906                 }
13907                 if (tp->regs) {
13908                         iounmap(tp->regs);
13909                         tp->regs = NULL;
13910                 }
13911                 free_netdev(dev);
13912                 pci_release_regions(pdev);
13913                 pci_disable_device(pdev);
13914                 pci_set_drvdata(pdev, NULL);
13915         }
13916 }
13917
13918 static int tg3_suspend(struct pci_dev *pdev, pm_message_t state)
13919 {
13920         struct net_device *dev = pci_get_drvdata(pdev);
13921         struct tg3 *tp = netdev_priv(dev);
13922         pci_power_t target_state;
13923         int err;
13924
13925         /* PCI register 4 needs to be saved whether netif_running() or not.
13926          * MSI address and data need to be saved if using MSI and
13927          * netif_running().
13928          */
13929         pci_save_state(pdev);
13930
13931         if (!netif_running(dev))
13932                 return 0;
13933
13934         flush_scheduled_work();
13935         tg3_phy_stop(tp);
13936         tg3_netif_stop(tp);
13937
13938         del_timer_sync(&tp->timer);
13939
13940         tg3_full_lock(tp, 1);
13941         tg3_disable_ints(tp);
13942         tg3_full_unlock(tp);
13943
13944         netif_device_detach(dev);
13945
13946         tg3_full_lock(tp, 0);
13947         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
13948         tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
13949         tg3_full_unlock(tp);
13950
13951         target_state = pdev->pm_cap ? pci_target_state(pdev) : PCI_D3hot;
13952
13953         err = tg3_set_power_state(tp, target_state);
13954         if (err) {
13955                 int err2;
13956
13957                 tg3_full_lock(tp, 0);
13958
13959                 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
13960                 err2 = tg3_restart_hw(tp, 1);
13961                 if (err2)
13962                         goto out;
13963
13964                 tp->timer.expires = jiffies + tp->timer_offset;
13965                 add_timer(&tp->timer);
13966
13967                 netif_device_attach(dev);
13968                 tg3_netif_start(tp);
13969
13970 out:
13971                 tg3_full_unlock(tp);
13972
13973                 if (!err2)
13974                         tg3_phy_start(tp);
13975         }
13976
13977         return err;
13978 }
13979
13980 static int tg3_resume(struct pci_dev *pdev)
13981 {
13982         struct net_device *dev = pci_get_drvdata(pdev);
13983         struct tg3 *tp = netdev_priv(dev);
13984         int err;
13985
13986         pci_restore_state(tp->pdev);
13987
13988         if (!netif_running(dev))
13989                 return 0;
13990
13991         err = tg3_set_power_state(tp, PCI_D0);
13992         if (err)
13993                 return err;
13994
13995         netif_device_attach(dev);
13996
13997         tg3_full_lock(tp, 0);
13998
13999         tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
14000         err = tg3_restart_hw(tp, 1);
14001         if (err)
14002                 goto out;
14003
14004         tp->timer.expires = jiffies + tp->timer_offset;
14005         add_timer(&tp->timer);
14006
14007         tg3_netif_start(tp);
14008
14009 out:
14010         tg3_full_unlock(tp);
14011
14012         if (!err)
14013                 tg3_phy_start(tp);
14014
14015         return err;
14016 }
14017
14018 static struct pci_driver tg3_driver = {
14019         .name           = DRV_MODULE_NAME,
14020         .id_table       = tg3_pci_tbl,
14021         .probe          = tg3_init_one,
14022         .remove         = __devexit_p(tg3_remove_one),
14023         .suspend        = tg3_suspend,
14024         .resume         = tg3_resume
14025 };
14026
14027 static int __init tg3_init(void)
14028 {
14029         return pci_register_driver(&tg3_driver);
14030 }
14031
14032 static void __exit tg3_cleanup(void)
14033 {
14034         pci_unregister_driver(&tg3_driver);
14035 }
14036
14037 module_init(tg3_init);
14038 module_exit(tg3_cleanup);