net: Move flow control definitions to mii.h
[linux-2.6] / drivers / net / tg3.c
1 /*
2  * tg3.c: Broadcom Tigon3 ethernet driver.
3  *
4  * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5  * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6  * Copyright (C) 2004 Sun Microsystems Inc.
7  * Copyright (C) 2005-2007 Broadcom Corporation.
8  *
9  * Firmware is:
10  *      Derived from proprietary unpublished source code,
11  *      Copyright (C) 2000-2003 Broadcom Corporation.
12  *
13  *      Permission is hereby granted for the distribution of this firmware
14  *      data in hexadecimal or equivalent format, provided this copyright
15  *      notice is accompanying it.
16  */
17
18
19 #include <linux/module.h>
20 #include <linux/moduleparam.h>
21 #include <linux/kernel.h>
22 #include <linux/types.h>
23 #include <linux/compiler.h>
24 #include <linux/slab.h>
25 #include <linux/delay.h>
26 #include <linux/in.h>
27 #include <linux/init.h>
28 #include <linux/ioport.h>
29 #include <linux/pci.h>
30 #include <linux/netdevice.h>
31 #include <linux/etherdevice.h>
32 #include <linux/skbuff.h>
33 #include <linux/ethtool.h>
34 #include <linux/mii.h>
35 #include <linux/phy.h>
36 #include <linux/brcmphy.h>
37 #include <linux/if_vlan.h>
38 #include <linux/ip.h>
39 #include <linux/tcp.h>
40 #include <linux/workqueue.h>
41 #include <linux/prefetch.h>
42 #include <linux/dma-mapping.h>
43
44 #include <net/checksum.h>
45 #include <net/ip.h>
46
47 #include <asm/system.h>
48 #include <asm/io.h>
49 #include <asm/byteorder.h>
50 #include <asm/uaccess.h>
51
52 #ifdef CONFIG_SPARC
53 #include <asm/idprom.h>
54 #include <asm/prom.h>
55 #endif
56
57 #define BAR_0   0
58 #define BAR_2   2
59
60 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
61 #define TG3_VLAN_TAG_USED 1
62 #else
63 #define TG3_VLAN_TAG_USED 0
64 #endif
65
66 #define TG3_TSO_SUPPORT 1
67
68 #include "tg3.h"
69
70 #define DRV_MODULE_NAME         "tg3"
71 #define PFX DRV_MODULE_NAME     ": "
72 #define DRV_MODULE_VERSION      "3.96"
73 #define DRV_MODULE_RELDATE      "November 21, 2008"
74
75 #define TG3_DEF_MAC_MODE        0
76 #define TG3_DEF_RX_MODE         0
77 #define TG3_DEF_TX_MODE         0
78 #define TG3_DEF_MSG_ENABLE        \
79         (NETIF_MSG_DRV          | \
80          NETIF_MSG_PROBE        | \
81          NETIF_MSG_LINK         | \
82          NETIF_MSG_TIMER        | \
83          NETIF_MSG_IFDOWN       | \
84          NETIF_MSG_IFUP         | \
85          NETIF_MSG_RX_ERR       | \
86          NETIF_MSG_TX_ERR)
87
88 /* length of time before we decide the hardware is borked,
89  * and dev->tx_timeout() should be called to fix the problem
90  */
91 #define TG3_TX_TIMEOUT                  (5 * HZ)
92
93 /* hardware minimum and maximum for a single frame's data payload */
94 #define TG3_MIN_MTU                     60
95 #define TG3_MAX_MTU(tp) \
96         ((tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) ? 9000 : 1500)
97
98 /* These numbers seem to be hard coded in the NIC firmware somehow.
99  * You can't change the ring sizes, but you can change where you place
100  * them in the NIC onboard memory.
101  */
102 #define TG3_RX_RING_SIZE                512
103 #define TG3_DEF_RX_RING_PENDING         200
104 #define TG3_RX_JUMBO_RING_SIZE          256
105 #define TG3_DEF_RX_JUMBO_RING_PENDING   100
106
107 /* Do not place this n-ring entries value into the tp struct itself,
108  * we really want to expose these constants to GCC so that modulo et
109  * al.  operations are done with shifts and masks instead of with
110  * hw multiply/modulo instructions.  Another solution would be to
111  * replace things like '% foo' with '& (foo - 1)'.
112  */
113 #define TG3_RX_RCB_RING_SIZE(tp)        \
114         ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ?  512 : 1024)
115
116 #define TG3_TX_RING_SIZE                512
117 #define TG3_DEF_TX_RING_PENDING         (TG3_TX_RING_SIZE - 1)
118
119 #define TG3_RX_RING_BYTES       (sizeof(struct tg3_rx_buffer_desc) * \
120                                  TG3_RX_RING_SIZE)
121 #define TG3_RX_JUMBO_RING_BYTES (sizeof(struct tg3_rx_buffer_desc) * \
122                                  TG3_RX_JUMBO_RING_SIZE)
123 #define TG3_RX_RCB_RING_BYTES(tp) (sizeof(struct tg3_rx_buffer_desc) * \
124                                    TG3_RX_RCB_RING_SIZE(tp))
125 #define TG3_TX_RING_BYTES       (sizeof(struct tg3_tx_buffer_desc) * \
126                                  TG3_TX_RING_SIZE)
127 #define NEXT_TX(N)              (((N) + 1) & (TG3_TX_RING_SIZE - 1))
128
129 #define RX_PKT_BUF_SZ           (1536 + tp->rx_offset + 64)
130 #define RX_JUMBO_PKT_BUF_SZ     (9046 + tp->rx_offset + 64)
131
132 /* minimum number of free TX descriptors required to wake up TX process */
133 #define TG3_TX_WAKEUP_THRESH(tp)                ((tp)->tx_pending / 4)
134
135 #define TG3_RAW_IP_ALIGN 2
136
137 /* number of ETHTOOL_GSTATS u64's */
138 #define TG3_NUM_STATS           (sizeof(struct tg3_ethtool_stats)/sizeof(u64))
139
140 #define TG3_NUM_TEST            6
141
142 static char version[] __devinitdata =
143         DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
144
145 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
146 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
147 MODULE_LICENSE("GPL");
148 MODULE_VERSION(DRV_MODULE_VERSION);
149
150 static int tg3_debug = -1;      /* -1 == use TG3_DEF_MSG_ENABLE as value */
151 module_param(tg3_debug, int, 0);
152 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
153
154 static struct pci_device_id tg3_pci_tbl[] = {
155         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
156         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
157         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
158         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
159         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
160         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
161         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
162         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
163         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
164         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
165         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
166         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
167         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
168         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
169         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
170         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
171         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
172         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
173         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901)},
174         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2)},
175         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
176         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F)},
177         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5720)},
178         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
179         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
180         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750)},
181         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
182         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750M)},
183         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
184         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F)},
185         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
186         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
187         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
188         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
189         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F)},
190         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
191         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
192         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
193         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
194         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
195         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
196         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
197         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
198         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F)},
199         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
200         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
201         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
202         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
203         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
204         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
205         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
206         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
207         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
208         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
209         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
210         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
211         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
212         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
213         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761S)},
214         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761SE)},
215         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5785)},
216         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780)},
217         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57760)},
218         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57790)},
219         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57720)},
220         {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
221         {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
222         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
223         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
224         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
225         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
226         {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
227         {}
228 };
229
230 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
231
232 static const struct {
233         const char string[ETH_GSTRING_LEN];
234 } ethtool_stats_keys[TG3_NUM_STATS] = {
235         { "rx_octets" },
236         { "rx_fragments" },
237         { "rx_ucast_packets" },
238         { "rx_mcast_packets" },
239         { "rx_bcast_packets" },
240         { "rx_fcs_errors" },
241         { "rx_align_errors" },
242         { "rx_xon_pause_rcvd" },
243         { "rx_xoff_pause_rcvd" },
244         { "rx_mac_ctrl_rcvd" },
245         { "rx_xoff_entered" },
246         { "rx_frame_too_long_errors" },
247         { "rx_jabbers" },
248         { "rx_undersize_packets" },
249         { "rx_in_length_errors" },
250         { "rx_out_length_errors" },
251         { "rx_64_or_less_octet_packets" },
252         { "rx_65_to_127_octet_packets" },
253         { "rx_128_to_255_octet_packets" },
254         { "rx_256_to_511_octet_packets" },
255         { "rx_512_to_1023_octet_packets" },
256         { "rx_1024_to_1522_octet_packets" },
257         { "rx_1523_to_2047_octet_packets" },
258         { "rx_2048_to_4095_octet_packets" },
259         { "rx_4096_to_8191_octet_packets" },
260         { "rx_8192_to_9022_octet_packets" },
261
262         { "tx_octets" },
263         { "tx_collisions" },
264
265         { "tx_xon_sent" },
266         { "tx_xoff_sent" },
267         { "tx_flow_control" },
268         { "tx_mac_errors" },
269         { "tx_single_collisions" },
270         { "tx_mult_collisions" },
271         { "tx_deferred" },
272         { "tx_excessive_collisions" },
273         { "tx_late_collisions" },
274         { "tx_collide_2times" },
275         { "tx_collide_3times" },
276         { "tx_collide_4times" },
277         { "tx_collide_5times" },
278         { "tx_collide_6times" },
279         { "tx_collide_7times" },
280         { "tx_collide_8times" },
281         { "tx_collide_9times" },
282         { "tx_collide_10times" },
283         { "tx_collide_11times" },
284         { "tx_collide_12times" },
285         { "tx_collide_13times" },
286         { "tx_collide_14times" },
287         { "tx_collide_15times" },
288         { "tx_ucast_packets" },
289         { "tx_mcast_packets" },
290         { "tx_bcast_packets" },
291         { "tx_carrier_sense_errors" },
292         { "tx_discards" },
293         { "tx_errors" },
294
295         { "dma_writeq_full" },
296         { "dma_write_prioq_full" },
297         { "rxbds_empty" },
298         { "rx_discards" },
299         { "rx_errors" },
300         { "rx_threshold_hit" },
301
302         { "dma_readq_full" },
303         { "dma_read_prioq_full" },
304         { "tx_comp_queue_full" },
305
306         { "ring_set_send_prod_index" },
307         { "ring_status_update" },
308         { "nic_irqs" },
309         { "nic_avoided_irqs" },
310         { "nic_tx_threshold_hit" }
311 };
312
313 static const struct {
314         const char string[ETH_GSTRING_LEN];
315 } ethtool_test_keys[TG3_NUM_TEST] = {
316         { "nvram test     (online) " },
317         { "link test      (online) " },
318         { "register test  (offline)" },
319         { "memory test    (offline)" },
320         { "loopback test  (offline)" },
321         { "interrupt test (offline)" },
322 };
323
324 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
325 {
326         writel(val, tp->regs + off);
327 }
328
329 static u32 tg3_read32(struct tg3 *tp, u32 off)
330 {
331         return (readl(tp->regs + off));
332 }
333
334 static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
335 {
336         writel(val, tp->aperegs + off);
337 }
338
339 static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
340 {
341         return (readl(tp->aperegs + off));
342 }
343
344 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
345 {
346         unsigned long flags;
347
348         spin_lock_irqsave(&tp->indirect_lock, flags);
349         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
350         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
351         spin_unlock_irqrestore(&tp->indirect_lock, flags);
352 }
353
354 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
355 {
356         writel(val, tp->regs + off);
357         readl(tp->regs + off);
358 }
359
360 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
361 {
362         unsigned long flags;
363         u32 val;
364
365         spin_lock_irqsave(&tp->indirect_lock, flags);
366         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
367         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
368         spin_unlock_irqrestore(&tp->indirect_lock, flags);
369         return val;
370 }
371
372 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
373 {
374         unsigned long flags;
375
376         if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
377                 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
378                                        TG3_64BIT_REG_LOW, val);
379                 return;
380         }
381         if (off == (MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW)) {
382                 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
383                                        TG3_64BIT_REG_LOW, val);
384                 return;
385         }
386
387         spin_lock_irqsave(&tp->indirect_lock, flags);
388         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
389         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
390         spin_unlock_irqrestore(&tp->indirect_lock, flags);
391
392         /* In indirect mode when disabling interrupts, we also need
393          * to clear the interrupt bit in the GRC local ctrl register.
394          */
395         if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
396             (val == 0x1)) {
397                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
398                                        tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
399         }
400 }
401
402 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
403 {
404         unsigned long flags;
405         u32 val;
406
407         spin_lock_irqsave(&tp->indirect_lock, flags);
408         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
409         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
410         spin_unlock_irqrestore(&tp->indirect_lock, flags);
411         return val;
412 }
413
414 /* usec_wait specifies the wait time in usec when writing to certain registers
415  * where it is unsafe to read back the register without some delay.
416  * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
417  * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
418  */
419 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
420 {
421         if ((tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) ||
422             (tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND))
423                 /* Non-posted methods */
424                 tp->write32(tp, off, val);
425         else {
426                 /* Posted method */
427                 tg3_write32(tp, off, val);
428                 if (usec_wait)
429                         udelay(usec_wait);
430                 tp->read32(tp, off);
431         }
432         /* Wait again after the read for the posted method to guarantee that
433          * the wait time is met.
434          */
435         if (usec_wait)
436                 udelay(usec_wait);
437 }
438
439 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
440 {
441         tp->write32_mbox(tp, off, val);
442         if (!(tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) &&
443             !(tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND))
444                 tp->read32_mbox(tp, off);
445 }
446
447 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
448 {
449         void __iomem *mbox = tp->regs + off;
450         writel(val, mbox);
451         if (tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG)
452                 writel(val, mbox);
453         if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
454                 readl(mbox);
455 }
456
457 static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
458 {
459         return (readl(tp->regs + off + GRCMBOX_BASE));
460 }
461
462 static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
463 {
464         writel(val, tp->regs + off + GRCMBOX_BASE);
465 }
466
467 #define tw32_mailbox(reg, val)  tp->write32_mbox(tp, reg, val)
468 #define tw32_mailbox_f(reg, val)        tw32_mailbox_flush(tp, (reg), (val))
469 #define tw32_rx_mbox(reg, val)  tp->write32_rx_mbox(tp, reg, val)
470 #define tw32_tx_mbox(reg, val)  tp->write32_tx_mbox(tp, reg, val)
471 #define tr32_mailbox(reg)       tp->read32_mbox(tp, reg)
472
473 #define tw32(reg,val)           tp->write32(tp, reg, val)
474 #define tw32_f(reg,val)         _tw32_flush(tp,(reg),(val), 0)
475 #define tw32_wait_f(reg,val,us) _tw32_flush(tp,(reg),(val), (us))
476 #define tr32(reg)               tp->read32(tp, reg)
477
478 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
479 {
480         unsigned long flags;
481
482         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) &&
483             (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
484                 return;
485
486         spin_lock_irqsave(&tp->indirect_lock, flags);
487         if (tp->tg3_flags & TG3_FLAG_SRAM_USE_CONFIG) {
488                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
489                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
490
491                 /* Always leave this as zero. */
492                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
493         } else {
494                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
495                 tw32_f(TG3PCI_MEM_WIN_DATA, val);
496
497                 /* Always leave this as zero. */
498                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
499         }
500         spin_unlock_irqrestore(&tp->indirect_lock, flags);
501 }
502
503 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
504 {
505         unsigned long flags;
506
507         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) &&
508             (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
509                 *val = 0;
510                 return;
511         }
512
513         spin_lock_irqsave(&tp->indirect_lock, flags);
514         if (tp->tg3_flags & TG3_FLAG_SRAM_USE_CONFIG) {
515                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
516                 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
517
518                 /* Always leave this as zero. */
519                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
520         } else {
521                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
522                 *val = tr32(TG3PCI_MEM_WIN_DATA);
523
524                 /* Always leave this as zero. */
525                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
526         }
527         spin_unlock_irqrestore(&tp->indirect_lock, flags);
528 }
529
530 static void tg3_ape_lock_init(struct tg3 *tp)
531 {
532         int i;
533
534         /* Make sure the driver hasn't any stale locks. */
535         for (i = 0; i < 8; i++)
536                 tg3_ape_write32(tp, TG3_APE_LOCK_GRANT + 4 * i,
537                                 APE_LOCK_GRANT_DRIVER);
538 }
539
540 static int tg3_ape_lock(struct tg3 *tp, int locknum)
541 {
542         int i, off;
543         int ret = 0;
544         u32 status;
545
546         if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
547                 return 0;
548
549         switch (locknum) {
550                 case TG3_APE_LOCK_GRC:
551                 case TG3_APE_LOCK_MEM:
552                         break;
553                 default:
554                         return -EINVAL;
555         }
556
557         off = 4 * locknum;
558
559         tg3_ape_write32(tp, TG3_APE_LOCK_REQ + off, APE_LOCK_REQ_DRIVER);
560
561         /* Wait for up to 1 millisecond to acquire lock. */
562         for (i = 0; i < 100; i++) {
563                 status = tg3_ape_read32(tp, TG3_APE_LOCK_GRANT + off);
564                 if (status == APE_LOCK_GRANT_DRIVER)
565                         break;
566                 udelay(10);
567         }
568
569         if (status != APE_LOCK_GRANT_DRIVER) {
570                 /* Revoke the lock request. */
571                 tg3_ape_write32(tp, TG3_APE_LOCK_GRANT + off,
572                                 APE_LOCK_GRANT_DRIVER);
573
574                 ret = -EBUSY;
575         }
576
577         return ret;
578 }
579
580 static void tg3_ape_unlock(struct tg3 *tp, int locknum)
581 {
582         int off;
583
584         if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
585                 return;
586
587         switch (locknum) {
588                 case TG3_APE_LOCK_GRC:
589                 case TG3_APE_LOCK_MEM:
590                         break;
591                 default:
592                         return;
593         }
594
595         off = 4 * locknum;
596         tg3_ape_write32(tp, TG3_APE_LOCK_GRANT + off, APE_LOCK_GRANT_DRIVER);
597 }
598
599 static void tg3_disable_ints(struct tg3 *tp)
600 {
601         tw32(TG3PCI_MISC_HOST_CTRL,
602              (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
603         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
604 }
605
606 static inline void tg3_cond_int(struct tg3 *tp)
607 {
608         if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
609             (tp->hw_status->status & SD_STATUS_UPDATED))
610                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
611         else
612                 tw32(HOSTCC_MODE, tp->coalesce_mode |
613                      (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
614 }
615
616 static void tg3_enable_ints(struct tg3 *tp)
617 {
618         tp->irq_sync = 0;
619         wmb();
620
621         tw32(TG3PCI_MISC_HOST_CTRL,
622              (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
623         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
624                        (tp->last_tag << 24));
625         if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI)
626                 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
627                                (tp->last_tag << 24));
628         tg3_cond_int(tp);
629 }
630
631 static inline unsigned int tg3_has_work(struct tg3 *tp)
632 {
633         struct tg3_hw_status *sblk = tp->hw_status;
634         unsigned int work_exists = 0;
635
636         /* check for phy events */
637         if (!(tp->tg3_flags &
638               (TG3_FLAG_USE_LINKCHG_REG |
639                TG3_FLAG_POLL_SERDES))) {
640                 if (sblk->status & SD_STATUS_LINK_CHG)
641                         work_exists = 1;
642         }
643         /* check for RX/TX work to do */
644         if (sblk->idx[0].tx_consumer != tp->tx_cons ||
645             sblk->idx[0].rx_producer != tp->rx_rcb_ptr)
646                 work_exists = 1;
647
648         return work_exists;
649 }
650
651 /* tg3_restart_ints
652  *  similar to tg3_enable_ints, but it accurately determines whether there
653  *  is new work pending and can return without flushing the PIO write
654  *  which reenables interrupts
655  */
656 static void tg3_restart_ints(struct tg3 *tp)
657 {
658         tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
659                      tp->last_tag << 24);
660         mmiowb();
661
662         /* When doing tagged status, this work check is unnecessary.
663          * The last_tag we write above tells the chip which piece of
664          * work we've completed.
665          */
666         if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
667             tg3_has_work(tp))
668                 tw32(HOSTCC_MODE, tp->coalesce_mode |
669                      (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
670 }
671
672 static inline void tg3_netif_stop(struct tg3 *tp)
673 {
674         tp->dev->trans_start = jiffies; /* prevent tx timeout */
675         napi_disable(&tp->napi);
676         netif_tx_disable(tp->dev);
677 }
678
679 static inline void tg3_netif_start(struct tg3 *tp)
680 {
681         netif_wake_queue(tp->dev);
682         /* NOTE: unconditional netif_wake_queue is only appropriate
683          * so long as all callers are assured to have free tx slots
684          * (such as after tg3_init_hw)
685          */
686         napi_enable(&tp->napi);
687         tp->hw_status->status |= SD_STATUS_UPDATED;
688         tg3_enable_ints(tp);
689 }
690
691 static void tg3_switch_clocks(struct tg3 *tp)
692 {
693         u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
694         u32 orig_clock_ctrl;
695
696         if ((tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) ||
697             (tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
698                 return;
699
700         orig_clock_ctrl = clock_ctrl;
701         clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
702                        CLOCK_CTRL_CLKRUN_OENABLE |
703                        0x1f);
704         tp->pci_clock_ctrl = clock_ctrl;
705
706         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
707                 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
708                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
709                                     clock_ctrl | CLOCK_CTRL_625_CORE, 40);
710                 }
711         } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
712                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
713                             clock_ctrl |
714                             (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
715                             40);
716                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
717                             clock_ctrl | (CLOCK_CTRL_ALTCLK),
718                             40);
719         }
720         tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
721 }
722
723 #define PHY_BUSY_LOOPS  5000
724
725 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
726 {
727         u32 frame_val;
728         unsigned int loops;
729         int ret;
730
731         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
732                 tw32_f(MAC_MI_MODE,
733                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
734                 udelay(80);
735         }
736
737         *val = 0x0;
738
739         frame_val  = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
740                       MI_COM_PHY_ADDR_MASK);
741         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
742                       MI_COM_REG_ADDR_MASK);
743         frame_val |= (MI_COM_CMD_READ | MI_COM_START);
744
745         tw32_f(MAC_MI_COM, frame_val);
746
747         loops = PHY_BUSY_LOOPS;
748         while (loops != 0) {
749                 udelay(10);
750                 frame_val = tr32(MAC_MI_COM);
751
752                 if ((frame_val & MI_COM_BUSY) == 0) {
753                         udelay(5);
754                         frame_val = tr32(MAC_MI_COM);
755                         break;
756                 }
757                 loops -= 1;
758         }
759
760         ret = -EBUSY;
761         if (loops != 0) {
762                 *val = frame_val & MI_COM_DATA_MASK;
763                 ret = 0;
764         }
765
766         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
767                 tw32_f(MAC_MI_MODE, tp->mi_mode);
768                 udelay(80);
769         }
770
771         return ret;
772 }
773
774 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
775 {
776         u32 frame_val;
777         unsigned int loops;
778         int ret;
779
780         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 &&
781             (reg == MII_TG3_CTRL || reg == MII_TG3_AUX_CTRL))
782                 return 0;
783
784         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
785                 tw32_f(MAC_MI_MODE,
786                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
787                 udelay(80);
788         }
789
790         frame_val  = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
791                       MI_COM_PHY_ADDR_MASK);
792         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
793                       MI_COM_REG_ADDR_MASK);
794         frame_val |= (val & MI_COM_DATA_MASK);
795         frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
796
797         tw32_f(MAC_MI_COM, frame_val);
798
799         loops = PHY_BUSY_LOOPS;
800         while (loops != 0) {
801                 udelay(10);
802                 frame_val = tr32(MAC_MI_COM);
803                 if ((frame_val & MI_COM_BUSY) == 0) {
804                         udelay(5);
805                         frame_val = tr32(MAC_MI_COM);
806                         break;
807                 }
808                 loops -= 1;
809         }
810
811         ret = -EBUSY;
812         if (loops != 0)
813                 ret = 0;
814
815         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
816                 tw32_f(MAC_MI_MODE, tp->mi_mode);
817                 udelay(80);
818         }
819
820         return ret;
821 }
822
823 static int tg3_bmcr_reset(struct tg3 *tp)
824 {
825         u32 phy_control;
826         int limit, err;
827
828         /* OK, reset it, and poll the BMCR_RESET bit until it
829          * clears or we time out.
830          */
831         phy_control = BMCR_RESET;
832         err = tg3_writephy(tp, MII_BMCR, phy_control);
833         if (err != 0)
834                 return -EBUSY;
835
836         limit = 5000;
837         while (limit--) {
838                 err = tg3_readphy(tp, MII_BMCR, &phy_control);
839                 if (err != 0)
840                         return -EBUSY;
841
842                 if ((phy_control & BMCR_RESET) == 0) {
843                         udelay(40);
844                         break;
845                 }
846                 udelay(10);
847         }
848         if (limit <= 0)
849                 return -EBUSY;
850
851         return 0;
852 }
853
854 static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)
855 {
856         struct tg3 *tp = (struct tg3 *)bp->priv;
857         u32 val;
858
859         if (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_PAUSED)
860                 return -EAGAIN;
861
862         if (tg3_readphy(tp, reg, &val))
863                 return -EIO;
864
865         return val;
866 }
867
868 static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val)
869 {
870         struct tg3 *tp = (struct tg3 *)bp->priv;
871
872         if (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_PAUSED)
873                 return -EAGAIN;
874
875         if (tg3_writephy(tp, reg, val))
876                 return -EIO;
877
878         return 0;
879 }
880
881 static int tg3_mdio_reset(struct mii_bus *bp)
882 {
883         return 0;
884 }
885
886 static void tg3_mdio_config_5785(struct tg3 *tp)
887 {
888         u32 val;
889         struct phy_device *phydev;
890
891         phydev = tp->mdio_bus->phy_map[PHY_ADDR];
892         switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
893         case TG3_PHY_ID_BCM50610:
894                 val = MAC_PHYCFG2_50610_LED_MODES;
895                 break;
896         case TG3_PHY_ID_BCMAC131:
897                 val = MAC_PHYCFG2_AC131_LED_MODES;
898                 break;
899         case TG3_PHY_ID_RTL8211C:
900                 val = MAC_PHYCFG2_RTL8211C_LED_MODES;
901                 break;
902         case TG3_PHY_ID_RTL8201E:
903                 val = MAC_PHYCFG2_RTL8201E_LED_MODES;
904                 break;
905         default:
906                 return;
907         }
908
909         if (phydev->interface != PHY_INTERFACE_MODE_RGMII) {
910                 tw32(MAC_PHYCFG2, val);
911
912                 val = tr32(MAC_PHYCFG1);
913                 val &= ~MAC_PHYCFG1_RGMII_INT;
914                 tw32(MAC_PHYCFG1, val);
915
916                 return;
917         }
918
919         if (!(tp->tg3_flags3 & TG3_FLG3_RGMII_STD_IBND_DISABLE))
920                 val |= MAC_PHYCFG2_EMODE_MASK_MASK |
921                        MAC_PHYCFG2_FMODE_MASK_MASK |
922                        MAC_PHYCFG2_GMODE_MASK_MASK |
923                        MAC_PHYCFG2_ACT_MASK_MASK   |
924                        MAC_PHYCFG2_QUAL_MASK_MASK |
925                        MAC_PHYCFG2_INBAND_ENABLE;
926
927         tw32(MAC_PHYCFG2, val);
928
929         val = tr32(MAC_PHYCFG1) & ~(MAC_PHYCFG1_RGMII_EXT_RX_DEC |
930                                     MAC_PHYCFG1_RGMII_SND_STAT_EN);
931         if (tp->tg3_flags3 & TG3_FLG3_RGMII_STD_IBND_DISABLE) {
932                 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_RX_EN)
933                         val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC;
934                 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_TX_EN)
935                         val |= MAC_PHYCFG1_RGMII_SND_STAT_EN;
936         }
937         tw32(MAC_PHYCFG1, val | MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV);
938
939         val = tr32(MAC_EXT_RGMII_MODE);
940         val &= ~(MAC_RGMII_MODE_RX_INT_B |
941                  MAC_RGMII_MODE_RX_QUALITY |
942                  MAC_RGMII_MODE_RX_ACTIVITY |
943                  MAC_RGMII_MODE_RX_ENG_DET |
944                  MAC_RGMII_MODE_TX_ENABLE |
945                  MAC_RGMII_MODE_TX_LOWPWR |
946                  MAC_RGMII_MODE_TX_RESET);
947         if (!(tp->tg3_flags3 & TG3_FLG3_RGMII_STD_IBND_DISABLE)) {
948                 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_RX_EN)
949                         val |= MAC_RGMII_MODE_RX_INT_B |
950                                MAC_RGMII_MODE_RX_QUALITY |
951                                MAC_RGMII_MODE_RX_ACTIVITY |
952                                MAC_RGMII_MODE_RX_ENG_DET;
953                 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_TX_EN)
954                         val |= MAC_RGMII_MODE_TX_ENABLE |
955                                MAC_RGMII_MODE_TX_LOWPWR |
956                                MAC_RGMII_MODE_TX_RESET;
957         }
958         tw32(MAC_EXT_RGMII_MODE, val);
959 }
960
961 static void tg3_mdio_start(struct tg3 *tp)
962 {
963         if (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED) {
964                 mutex_lock(&tp->mdio_bus->mdio_lock);
965                 tp->tg3_flags3 &= ~TG3_FLG3_MDIOBUS_PAUSED;
966                 mutex_unlock(&tp->mdio_bus->mdio_lock);
967         }
968
969         tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL;
970         tw32_f(MAC_MI_MODE, tp->mi_mode);
971         udelay(80);
972
973         if ((tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED) &&
974             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
975                 tg3_mdio_config_5785(tp);
976 }
977
978 static void tg3_mdio_stop(struct tg3 *tp)
979 {
980         if (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED) {
981                 mutex_lock(&tp->mdio_bus->mdio_lock);
982                 tp->tg3_flags3 |= TG3_FLG3_MDIOBUS_PAUSED;
983                 mutex_unlock(&tp->mdio_bus->mdio_lock);
984         }
985 }
986
987 static int tg3_mdio_init(struct tg3 *tp)
988 {
989         int i;
990         u32 reg;
991         struct phy_device *phydev;
992
993         tg3_mdio_start(tp);
994
995         if (!(tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) ||
996             (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED))
997                 return 0;
998
999         tp->mdio_bus = mdiobus_alloc();
1000         if (tp->mdio_bus == NULL)
1001                 return -ENOMEM;
1002
1003         tp->mdio_bus->name     = "tg3 mdio bus";
1004         snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x",
1005                  (tp->pdev->bus->number << 8) | tp->pdev->devfn);
1006         tp->mdio_bus->priv     = tp;
1007         tp->mdio_bus->parent   = &tp->pdev->dev;
1008         tp->mdio_bus->read     = &tg3_mdio_read;
1009         tp->mdio_bus->write    = &tg3_mdio_write;
1010         tp->mdio_bus->reset    = &tg3_mdio_reset;
1011         tp->mdio_bus->phy_mask = ~(1 << PHY_ADDR);
1012         tp->mdio_bus->irq      = &tp->mdio_irq[0];
1013
1014         for (i = 0; i < PHY_MAX_ADDR; i++)
1015                 tp->mdio_bus->irq[i] = PHY_POLL;
1016
1017         /* The bus registration will look for all the PHYs on the mdio bus.
1018          * Unfortunately, it does not ensure the PHY is powered up before
1019          * accessing the PHY ID registers.  A chip reset is the
1020          * quickest way to bring the device back to an operational state..
1021          */
1022         if (tg3_readphy(tp, MII_BMCR, &reg) || (reg & BMCR_PDOWN))
1023                 tg3_bmcr_reset(tp);
1024
1025         i = mdiobus_register(tp->mdio_bus);
1026         if (i) {
1027                 printk(KERN_WARNING "%s: mdiobus_reg failed (0x%x)\n",
1028                         tp->dev->name, i);
1029                 mdiobus_free(tp->mdio_bus);
1030                 return i;
1031         }
1032
1033         phydev = tp->mdio_bus->phy_map[PHY_ADDR];
1034
1035         if (!phydev || !phydev->drv) {
1036                 printk(KERN_WARNING "%s: No PHY devices\n", tp->dev->name);
1037                 mdiobus_unregister(tp->mdio_bus);
1038                 mdiobus_free(tp->mdio_bus);
1039                 return -ENODEV;
1040         }
1041
1042         switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1043         case TG3_PHY_ID_BCM57780:
1044                 phydev->interface = PHY_INTERFACE_MODE_GMII;
1045                 break;
1046         case TG3_PHY_ID_BCM50610:
1047                 if (tp->tg3_flags3 & TG3_FLG3_RGMII_STD_IBND_DISABLE)
1048                         phydev->dev_flags |= PHY_BRCM_STD_IBND_DISABLE;
1049                 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_RX_EN)
1050                         phydev->dev_flags |= PHY_BRCM_EXT_IBND_RX_ENABLE;
1051                 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_TX_EN)
1052                         phydev->dev_flags |= PHY_BRCM_EXT_IBND_TX_ENABLE;
1053                 /* fallthru */
1054         case TG3_PHY_ID_RTL8211C:
1055                 phydev->interface = PHY_INTERFACE_MODE_RGMII;
1056                 break;
1057         case TG3_PHY_ID_RTL8201E:
1058         case TG3_PHY_ID_BCMAC131:
1059                 phydev->interface = PHY_INTERFACE_MODE_MII;
1060                 break;
1061         }
1062
1063         tp->tg3_flags3 |= TG3_FLG3_MDIOBUS_INITED;
1064
1065         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
1066                 tg3_mdio_config_5785(tp);
1067
1068         return 0;
1069 }
1070
1071 static void tg3_mdio_fini(struct tg3 *tp)
1072 {
1073         if (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED) {
1074                 tp->tg3_flags3 &= ~TG3_FLG3_MDIOBUS_INITED;
1075                 mdiobus_unregister(tp->mdio_bus);
1076                 mdiobus_free(tp->mdio_bus);
1077                 tp->tg3_flags3 &= ~TG3_FLG3_MDIOBUS_PAUSED;
1078         }
1079 }
1080
1081 /* tp->lock is held. */
1082 static inline void tg3_generate_fw_event(struct tg3 *tp)
1083 {
1084         u32 val;
1085
1086         val = tr32(GRC_RX_CPU_EVENT);
1087         val |= GRC_RX_CPU_DRIVER_EVENT;
1088         tw32_f(GRC_RX_CPU_EVENT, val);
1089
1090         tp->last_event_jiffies = jiffies;
1091 }
1092
1093 #define TG3_FW_EVENT_TIMEOUT_USEC 2500
1094
1095 /* tp->lock is held. */
1096 static void tg3_wait_for_event_ack(struct tg3 *tp)
1097 {
1098         int i;
1099         unsigned int delay_cnt;
1100         long time_remain;
1101
1102         /* If enough time has passed, no wait is necessary. */
1103         time_remain = (long)(tp->last_event_jiffies + 1 +
1104                       usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC)) -
1105                       (long)jiffies;
1106         if (time_remain < 0)
1107                 return;
1108
1109         /* Check if we can shorten the wait time. */
1110         delay_cnt = jiffies_to_usecs(time_remain);
1111         if (delay_cnt > TG3_FW_EVENT_TIMEOUT_USEC)
1112                 delay_cnt = TG3_FW_EVENT_TIMEOUT_USEC;
1113         delay_cnt = (delay_cnt >> 3) + 1;
1114
1115         for (i = 0; i < delay_cnt; i++) {
1116                 if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
1117                         break;
1118                 udelay(8);
1119         }
1120 }
1121
1122 /* tp->lock is held. */
1123 static void tg3_ump_link_report(struct tg3 *tp)
1124 {
1125         u32 reg;
1126         u32 val;
1127
1128         if (!(tp->tg3_flags2 & TG3_FLG2_5780_CLASS) ||
1129             !(tp->tg3_flags  & TG3_FLAG_ENABLE_ASF))
1130                 return;
1131
1132         tg3_wait_for_event_ack(tp);
1133
1134         tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE);
1135
1136         tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14);
1137
1138         val = 0;
1139         if (!tg3_readphy(tp, MII_BMCR, &reg))
1140                 val = reg << 16;
1141         if (!tg3_readphy(tp, MII_BMSR, &reg))
1142                 val |= (reg & 0xffff);
1143         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, val);
1144
1145         val = 0;
1146         if (!tg3_readphy(tp, MII_ADVERTISE, &reg))
1147                 val = reg << 16;
1148         if (!tg3_readphy(tp, MII_LPA, &reg))
1149                 val |= (reg & 0xffff);
1150         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 4, val);
1151
1152         val = 0;
1153         if (!(tp->tg3_flags2 & TG3_FLG2_MII_SERDES)) {
1154                 if (!tg3_readphy(tp, MII_CTRL1000, &reg))
1155                         val = reg << 16;
1156                 if (!tg3_readphy(tp, MII_STAT1000, &reg))
1157                         val |= (reg & 0xffff);
1158         }
1159         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 8, val);
1160
1161         if (!tg3_readphy(tp, MII_PHYADDR, &reg))
1162                 val = reg << 16;
1163         else
1164                 val = 0;
1165         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 12, val);
1166
1167         tg3_generate_fw_event(tp);
1168 }
1169
1170 static void tg3_link_report(struct tg3 *tp)
1171 {
1172         if (!netif_carrier_ok(tp->dev)) {
1173                 if (netif_msg_link(tp))
1174                         printk(KERN_INFO PFX "%s: Link is down.\n",
1175                                tp->dev->name);
1176                 tg3_ump_link_report(tp);
1177         } else if (netif_msg_link(tp)) {
1178                 printk(KERN_INFO PFX "%s: Link is up at %d Mbps, %s duplex.\n",
1179                        tp->dev->name,
1180                        (tp->link_config.active_speed == SPEED_1000 ?
1181                         1000 :
1182                         (tp->link_config.active_speed == SPEED_100 ?
1183                          100 : 10)),
1184                        (tp->link_config.active_duplex == DUPLEX_FULL ?
1185                         "full" : "half"));
1186
1187                 printk(KERN_INFO PFX
1188                        "%s: Flow control is %s for TX and %s for RX.\n",
1189                        tp->dev->name,
1190                        (tp->link_config.active_flowctrl & FLOW_CTRL_TX) ?
1191                        "on" : "off",
1192                        (tp->link_config.active_flowctrl & FLOW_CTRL_RX) ?
1193                        "on" : "off");
1194                 tg3_ump_link_report(tp);
1195         }
1196 }
1197
1198 static u16 tg3_advert_flowctrl_1000T(u8 flow_ctrl)
1199 {
1200         u16 miireg;
1201
1202         if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1203                 miireg = ADVERTISE_PAUSE_CAP;
1204         else if (flow_ctrl & FLOW_CTRL_TX)
1205                 miireg = ADVERTISE_PAUSE_ASYM;
1206         else if (flow_ctrl & FLOW_CTRL_RX)
1207                 miireg = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1208         else
1209                 miireg = 0;
1210
1211         return miireg;
1212 }
1213
1214 static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
1215 {
1216         u16 miireg;
1217
1218         if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1219                 miireg = ADVERTISE_1000XPAUSE;
1220         else if (flow_ctrl & FLOW_CTRL_TX)
1221                 miireg = ADVERTISE_1000XPSE_ASYM;
1222         else if (flow_ctrl & FLOW_CTRL_RX)
1223                 miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1224         else
1225                 miireg = 0;
1226
1227         return miireg;
1228 }
1229
1230 static u8 tg3_resolve_flowctrl_1000T(u16 lcladv, u16 rmtadv)
1231 {
1232         u8 cap = 0;
1233
1234         if (lcladv & ADVERTISE_PAUSE_CAP) {
1235                 if (lcladv & ADVERTISE_PAUSE_ASYM) {
1236                         if (rmtadv & LPA_PAUSE_CAP)
1237                                 cap = TG3_FLOW_CTRL_TX | TG3_FLOW_CTRL_RX;
1238                         else if (rmtadv & LPA_PAUSE_ASYM)
1239                                 cap = TG3_FLOW_CTRL_RX;
1240                 } else {
1241                         if (rmtadv & LPA_PAUSE_CAP)
1242                                 cap = TG3_FLOW_CTRL_TX | TG3_FLOW_CTRL_RX;
1243                 }
1244         } else if (lcladv & ADVERTISE_PAUSE_ASYM) {
1245                 if ((rmtadv & LPA_PAUSE_CAP) && (rmtadv & LPA_PAUSE_ASYM))
1246                         cap = TG3_FLOW_CTRL_TX;
1247         }
1248
1249         return cap;
1250 }
1251
1252 static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
1253 {
1254         u8 cap = 0;
1255
1256         if (lcladv & ADVERTISE_1000XPAUSE) {
1257                 if (lcladv & ADVERTISE_1000XPSE_ASYM) {
1258                         if (rmtadv & LPA_1000XPAUSE)
1259                                 cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1260                         else if (rmtadv & LPA_1000XPAUSE_ASYM)
1261                                 cap = FLOW_CTRL_RX;
1262                 } else {
1263                         if (rmtadv & LPA_1000XPAUSE)
1264                                 cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1265                 }
1266         } else if (lcladv & ADVERTISE_1000XPSE_ASYM) {
1267                 if ((rmtadv & LPA_1000XPAUSE) && (rmtadv & LPA_1000XPAUSE_ASYM))
1268                         cap = FLOW_CTRL_TX;
1269         }
1270
1271         return cap;
1272 }
1273
1274 static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv)
1275 {
1276         u8 autoneg;
1277         u8 flowctrl = 0;
1278         u32 old_rx_mode = tp->rx_mode;
1279         u32 old_tx_mode = tp->tx_mode;
1280
1281         if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)
1282                 autoneg = tp->mdio_bus->phy_map[PHY_ADDR]->autoneg;
1283         else
1284                 autoneg = tp->link_config.autoneg;
1285
1286         if (autoneg == AUTONEG_ENABLE &&
1287             (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG)) {
1288                 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
1289                         flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv);
1290                 else
1291                         flowctrl = tg3_resolve_flowctrl_1000T(lcladv, rmtadv);
1292         } else
1293                 flowctrl = tp->link_config.flowctrl;
1294
1295         tp->link_config.active_flowctrl = flowctrl;
1296
1297         if (flowctrl & FLOW_CTRL_RX)
1298                 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1299         else
1300                 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1301
1302         if (old_rx_mode != tp->rx_mode)
1303                 tw32_f(MAC_RX_MODE, tp->rx_mode);
1304
1305         if (flowctrl & FLOW_CTRL_TX)
1306                 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1307         else
1308                 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1309
1310         if (old_tx_mode != tp->tx_mode)
1311                 tw32_f(MAC_TX_MODE, tp->tx_mode);
1312 }
1313
1314 static void tg3_adjust_link(struct net_device *dev)
1315 {
1316         u8 oldflowctrl, linkmesg = 0;
1317         u32 mac_mode, lcl_adv, rmt_adv;
1318         struct tg3 *tp = netdev_priv(dev);
1319         struct phy_device *phydev = tp->mdio_bus->phy_map[PHY_ADDR];
1320
1321         spin_lock(&tp->lock);
1322
1323         mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK |
1324                                     MAC_MODE_HALF_DUPLEX);
1325
1326         oldflowctrl = tp->link_config.active_flowctrl;
1327
1328         if (phydev->link) {
1329                 lcl_adv = 0;
1330                 rmt_adv = 0;
1331
1332                 if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10)
1333                         mac_mode |= MAC_MODE_PORT_MODE_MII;
1334                 else
1335                         mac_mode |= MAC_MODE_PORT_MODE_GMII;
1336
1337                 if (phydev->duplex == DUPLEX_HALF)
1338                         mac_mode |= MAC_MODE_HALF_DUPLEX;
1339                 else {
1340                         lcl_adv = tg3_advert_flowctrl_1000T(
1341                                   tp->link_config.flowctrl);
1342
1343                         if (phydev->pause)
1344                                 rmt_adv = LPA_PAUSE_CAP;
1345                         if (phydev->asym_pause)
1346                                 rmt_adv |= LPA_PAUSE_ASYM;
1347                 }
1348
1349                 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
1350         } else
1351                 mac_mode |= MAC_MODE_PORT_MODE_GMII;
1352
1353         if (mac_mode != tp->mac_mode) {
1354                 tp->mac_mode = mac_mode;
1355                 tw32_f(MAC_MODE, tp->mac_mode);
1356                 udelay(40);
1357         }
1358
1359         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
1360                 if (phydev->speed == SPEED_10)
1361                         tw32(MAC_MI_STAT,
1362                              MAC_MI_STAT_10MBPS_MODE |
1363                              MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
1364                 else
1365                         tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
1366         }
1367
1368         if (phydev->speed == SPEED_1000 && phydev->duplex == DUPLEX_HALF)
1369                 tw32(MAC_TX_LENGTHS,
1370                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1371                       (6 << TX_LENGTHS_IPG_SHIFT) |
1372                       (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
1373         else
1374                 tw32(MAC_TX_LENGTHS,
1375                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1376                       (6 << TX_LENGTHS_IPG_SHIFT) |
1377                       (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
1378
1379         if ((phydev->link && tp->link_config.active_speed == SPEED_INVALID) ||
1380             (!phydev->link && tp->link_config.active_speed != SPEED_INVALID) ||
1381             phydev->speed != tp->link_config.active_speed ||
1382             phydev->duplex != tp->link_config.active_duplex ||
1383             oldflowctrl != tp->link_config.active_flowctrl)
1384             linkmesg = 1;
1385
1386         tp->link_config.active_speed = phydev->speed;
1387         tp->link_config.active_duplex = phydev->duplex;
1388
1389         spin_unlock(&tp->lock);
1390
1391         if (linkmesg)
1392                 tg3_link_report(tp);
1393 }
1394
1395 static int tg3_phy_init(struct tg3 *tp)
1396 {
1397         struct phy_device *phydev;
1398
1399         if (tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED)
1400                 return 0;
1401
1402         /* Bring the PHY back to a known state. */
1403         tg3_bmcr_reset(tp);
1404
1405         phydev = tp->mdio_bus->phy_map[PHY_ADDR];
1406
1407         /* Attach the MAC to the PHY. */
1408         phydev = phy_connect(tp->dev, dev_name(&phydev->dev), tg3_adjust_link,
1409                              phydev->dev_flags, phydev->interface);
1410         if (IS_ERR(phydev)) {
1411                 printk(KERN_ERR "%s: Could not attach to PHY\n", tp->dev->name);
1412                 return PTR_ERR(phydev);
1413         }
1414
1415         /* Mask with MAC supported features. */
1416         switch (phydev->interface) {
1417         case PHY_INTERFACE_MODE_GMII:
1418         case PHY_INTERFACE_MODE_RGMII:
1419                 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
1420                         phydev->supported &= (PHY_GBIT_FEATURES |
1421                                               SUPPORTED_Pause |
1422                                               SUPPORTED_Asym_Pause);
1423                         break;
1424                 }
1425                 /* fallthru */
1426         case PHY_INTERFACE_MODE_MII:
1427                 phydev->supported &= (PHY_BASIC_FEATURES |
1428                                       SUPPORTED_Pause |
1429                                       SUPPORTED_Asym_Pause);
1430                 break;
1431         default:
1432                 phy_disconnect(tp->mdio_bus->phy_map[PHY_ADDR]);
1433                 return -EINVAL;
1434         }
1435
1436         tp->tg3_flags3 |= TG3_FLG3_PHY_CONNECTED;
1437
1438         phydev->advertising = phydev->supported;
1439
1440         return 0;
1441 }
1442
1443 static void tg3_phy_start(struct tg3 *tp)
1444 {
1445         struct phy_device *phydev;
1446
1447         if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
1448                 return;
1449
1450         phydev = tp->mdio_bus->phy_map[PHY_ADDR];
1451
1452         if (tp->link_config.phy_is_low_power) {
1453                 tp->link_config.phy_is_low_power = 0;
1454                 phydev->speed = tp->link_config.orig_speed;
1455                 phydev->duplex = tp->link_config.orig_duplex;
1456                 phydev->autoneg = tp->link_config.orig_autoneg;
1457                 phydev->advertising = tp->link_config.orig_advertising;
1458         }
1459
1460         phy_start(phydev);
1461
1462         phy_start_aneg(phydev);
1463 }
1464
1465 static void tg3_phy_stop(struct tg3 *tp)
1466 {
1467         if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
1468                 return;
1469
1470         phy_stop(tp->mdio_bus->phy_map[PHY_ADDR]);
1471 }
1472
1473 static void tg3_phy_fini(struct tg3 *tp)
1474 {
1475         if (tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED) {
1476                 phy_disconnect(tp->mdio_bus->phy_map[PHY_ADDR]);
1477                 tp->tg3_flags3 &= ~TG3_FLG3_PHY_CONNECTED;
1478         }
1479 }
1480
1481 static void tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
1482 {
1483         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1484         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
1485 }
1486
1487 static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable)
1488 {
1489         u32 reg;
1490
1491         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
1492                 return;
1493
1494         reg = MII_TG3_MISC_SHDW_WREN |
1495               MII_TG3_MISC_SHDW_SCR5_SEL |
1496               MII_TG3_MISC_SHDW_SCR5_LPED |
1497               MII_TG3_MISC_SHDW_SCR5_DLPTLM |
1498               MII_TG3_MISC_SHDW_SCR5_SDTL |
1499               MII_TG3_MISC_SHDW_SCR5_C125OE;
1500         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 || !enable)
1501                 reg |= MII_TG3_MISC_SHDW_SCR5_DLLAPD;
1502
1503         tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
1504
1505
1506         reg = MII_TG3_MISC_SHDW_WREN |
1507               MII_TG3_MISC_SHDW_APD_SEL |
1508               MII_TG3_MISC_SHDW_APD_WKTM_84MS;
1509         if (enable)
1510                 reg |= MII_TG3_MISC_SHDW_APD_ENABLE;
1511
1512         tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
1513 }
1514
1515 static void tg3_phy_toggle_automdix(struct tg3 *tp, int enable)
1516 {
1517         u32 phy;
1518
1519         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
1520             (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES))
1521                 return;
1522
1523         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1524                 u32 ephy;
1525
1526                 if (!tg3_readphy(tp, MII_TG3_EPHY_TEST, &ephy)) {
1527                         tg3_writephy(tp, MII_TG3_EPHY_TEST,
1528                                      ephy | MII_TG3_EPHY_SHADOW_EN);
1529                         if (!tg3_readphy(tp, MII_TG3_EPHYTST_MISCCTRL, &phy)) {
1530                                 if (enable)
1531                                         phy |= MII_TG3_EPHYTST_MISCCTRL_MDIX;
1532                                 else
1533                                         phy &= ~MII_TG3_EPHYTST_MISCCTRL_MDIX;
1534                                 tg3_writephy(tp, MII_TG3_EPHYTST_MISCCTRL, phy);
1535                         }
1536                         tg3_writephy(tp, MII_TG3_EPHY_TEST, ephy);
1537                 }
1538         } else {
1539                 phy = MII_TG3_AUXCTL_MISC_RDSEL_MISC |
1540                       MII_TG3_AUXCTL_SHDWSEL_MISC;
1541                 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, phy) &&
1542                     !tg3_readphy(tp, MII_TG3_AUX_CTRL, &phy)) {
1543                         if (enable)
1544                                 phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
1545                         else
1546                                 phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
1547                         phy |= MII_TG3_AUXCTL_MISC_WREN;
1548                         tg3_writephy(tp, MII_TG3_AUX_CTRL, phy);
1549                 }
1550         }
1551 }
1552
1553 static void tg3_phy_set_wirespeed(struct tg3 *tp)
1554 {
1555         u32 val;
1556
1557         if (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED)
1558                 return;
1559
1560         if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x7007) &&
1561             !tg3_readphy(tp, MII_TG3_AUX_CTRL, &val))
1562                 tg3_writephy(tp, MII_TG3_AUX_CTRL,
1563                              (val | (1 << 15) | (1 << 4)));
1564 }
1565
1566 static void tg3_phy_apply_otp(struct tg3 *tp)
1567 {
1568         u32 otp, phy;
1569
1570         if (!tp->phy_otp)
1571                 return;
1572
1573         otp = tp->phy_otp;
1574
1575         /* Enable SM_DSP clock and tx 6dB coding. */
1576         phy = MII_TG3_AUXCTL_SHDWSEL_AUXCTL |
1577               MII_TG3_AUXCTL_ACTL_SMDSP_ENA |
1578               MII_TG3_AUXCTL_ACTL_TX_6DB;
1579         tg3_writephy(tp, MII_TG3_AUX_CTRL, phy);
1580
1581         phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
1582         phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT;
1583         tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy);
1584
1585         phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) |
1586               ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT);
1587         tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy);
1588
1589         phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT);
1590         phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ;
1591         tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy);
1592
1593         phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT);
1594         tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy);
1595
1596         phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT);
1597         tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy);
1598
1599         phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) |
1600               ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
1601         tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);
1602
1603         /* Turn off SM_DSP clock. */
1604         phy = MII_TG3_AUXCTL_SHDWSEL_AUXCTL |
1605               MII_TG3_AUXCTL_ACTL_TX_6DB;
1606         tg3_writephy(tp, MII_TG3_AUX_CTRL, phy);
1607 }
1608
1609 static int tg3_wait_macro_done(struct tg3 *tp)
1610 {
1611         int limit = 100;
1612
1613         while (limit--) {
1614                 u32 tmp32;
1615
1616                 if (!tg3_readphy(tp, 0x16, &tmp32)) {
1617                         if ((tmp32 & 0x1000) == 0)
1618                                 break;
1619                 }
1620         }
1621         if (limit <= 0)
1622                 return -EBUSY;
1623
1624         return 0;
1625 }
1626
1627 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
1628 {
1629         static const u32 test_pat[4][6] = {
1630         { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
1631         { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
1632         { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
1633         { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
1634         };
1635         int chan;
1636
1637         for (chan = 0; chan < 4; chan++) {
1638                 int i;
1639
1640                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1641                              (chan * 0x2000) | 0x0200);
1642                 tg3_writephy(tp, 0x16, 0x0002);
1643
1644                 for (i = 0; i < 6; i++)
1645                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
1646                                      test_pat[chan][i]);
1647
1648                 tg3_writephy(tp, 0x16, 0x0202);
1649                 if (tg3_wait_macro_done(tp)) {
1650                         *resetp = 1;
1651                         return -EBUSY;
1652                 }
1653
1654                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1655                              (chan * 0x2000) | 0x0200);
1656                 tg3_writephy(tp, 0x16, 0x0082);
1657                 if (tg3_wait_macro_done(tp)) {
1658                         *resetp = 1;
1659                         return -EBUSY;
1660                 }
1661
1662                 tg3_writephy(tp, 0x16, 0x0802);
1663                 if (tg3_wait_macro_done(tp)) {
1664                         *resetp = 1;
1665                         return -EBUSY;
1666                 }
1667
1668                 for (i = 0; i < 6; i += 2) {
1669                         u32 low, high;
1670
1671                         if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
1672                             tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
1673                             tg3_wait_macro_done(tp)) {
1674                                 *resetp = 1;
1675                                 return -EBUSY;
1676                         }
1677                         low &= 0x7fff;
1678                         high &= 0x000f;
1679                         if (low != test_pat[chan][i] ||
1680                             high != test_pat[chan][i+1]) {
1681                                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
1682                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
1683                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
1684
1685                                 return -EBUSY;
1686                         }
1687                 }
1688         }
1689
1690         return 0;
1691 }
1692
1693 static int tg3_phy_reset_chanpat(struct tg3 *tp)
1694 {
1695         int chan;
1696
1697         for (chan = 0; chan < 4; chan++) {
1698                 int i;
1699
1700                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1701                              (chan * 0x2000) | 0x0200);
1702                 tg3_writephy(tp, 0x16, 0x0002);
1703                 for (i = 0; i < 6; i++)
1704                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
1705                 tg3_writephy(tp, 0x16, 0x0202);
1706                 if (tg3_wait_macro_done(tp))
1707                         return -EBUSY;
1708         }
1709
1710         return 0;
1711 }
1712
1713 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
1714 {
1715         u32 reg32, phy9_orig;
1716         int retries, do_phy_reset, err;
1717
1718         retries = 10;
1719         do_phy_reset = 1;
1720         do {
1721                 if (do_phy_reset) {
1722                         err = tg3_bmcr_reset(tp);
1723                         if (err)
1724                                 return err;
1725                         do_phy_reset = 0;
1726                 }
1727
1728                 /* Disable transmitter and interrupt.  */
1729                 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
1730                         continue;
1731
1732                 reg32 |= 0x3000;
1733                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
1734
1735                 /* Set full-duplex, 1000 mbps.  */
1736                 tg3_writephy(tp, MII_BMCR,
1737                              BMCR_FULLDPLX | TG3_BMCR_SPEED1000);
1738
1739                 /* Set to master mode.  */
1740                 if (tg3_readphy(tp, MII_TG3_CTRL, &phy9_orig))
1741                         continue;
1742
1743                 tg3_writephy(tp, MII_TG3_CTRL,
1744                              (MII_TG3_CTRL_AS_MASTER |
1745                               MII_TG3_CTRL_ENABLE_AS_MASTER));
1746
1747                 /* Enable SM_DSP_CLOCK and 6dB.  */
1748                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1749
1750                 /* Block the PHY control access.  */
1751                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
1752                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0800);
1753
1754                 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
1755                 if (!err)
1756                         break;
1757         } while (--retries);
1758
1759         err = tg3_phy_reset_chanpat(tp);
1760         if (err)
1761                 return err;
1762
1763         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
1764         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0000);
1765
1766         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
1767         tg3_writephy(tp, 0x16, 0x0000);
1768
1769         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
1770             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
1771                 /* Set Extended packet length bit for jumbo frames */
1772                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4400);
1773         }
1774         else {
1775                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1776         }
1777
1778         tg3_writephy(tp, MII_TG3_CTRL, phy9_orig);
1779
1780         if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32)) {
1781                 reg32 &= ~0x3000;
1782                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
1783         } else if (!err)
1784                 err = -EBUSY;
1785
1786         return err;
1787 }
1788
1789 /* This will reset the tigon3 PHY if there is no valid
1790  * link unless the FORCE argument is non-zero.
1791  */
1792 static int tg3_phy_reset(struct tg3 *tp)
1793 {
1794         u32 cpmuctrl;
1795         u32 phy_status;
1796         int err;
1797
1798         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1799                 u32 val;
1800
1801                 val = tr32(GRC_MISC_CFG);
1802                 tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
1803                 udelay(40);
1804         }
1805         err  = tg3_readphy(tp, MII_BMSR, &phy_status);
1806         err |= tg3_readphy(tp, MII_BMSR, &phy_status);
1807         if (err != 0)
1808                 return -EBUSY;
1809
1810         if (netif_running(tp->dev) && netif_carrier_ok(tp->dev)) {
1811                 netif_carrier_off(tp->dev);
1812                 tg3_link_report(tp);
1813         }
1814
1815         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
1816             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
1817             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
1818                 err = tg3_phy_reset_5703_4_5(tp);
1819                 if (err)
1820                         return err;
1821                 goto out;
1822         }
1823
1824         cpmuctrl = 0;
1825         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
1826             GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
1827                 cpmuctrl = tr32(TG3_CPMU_CTRL);
1828                 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY)
1829                         tw32(TG3_CPMU_CTRL,
1830                              cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY);
1831         }
1832
1833         err = tg3_bmcr_reset(tp);
1834         if (err)
1835                 return err;
1836
1837         if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) {
1838                 u32 phy;
1839
1840                 phy = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz;
1841                 tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, phy);
1842
1843                 tw32(TG3_CPMU_CTRL, cpmuctrl);
1844         }
1845
1846         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
1847             GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
1848                 u32 val;
1849
1850                 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
1851                 if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
1852                     CPMU_LSPD_1000MB_MACCLK_12_5) {
1853                         val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
1854                         udelay(40);
1855                         tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
1856                 }
1857         }
1858
1859         tg3_phy_apply_otp(tp);
1860
1861         if (tp->tg3_flags3 & TG3_FLG3_PHY_ENABLE_APD)
1862                 tg3_phy_toggle_apd(tp, true);
1863         else
1864                 tg3_phy_toggle_apd(tp, false);
1865
1866 out:
1867         if (tp->tg3_flags2 & TG3_FLG2_PHY_ADC_BUG) {
1868                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1869                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1870                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x2aaa);
1871                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1872                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0323);
1873                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1874         }
1875         if (tp->tg3_flags2 & TG3_FLG2_PHY_5704_A0_BUG) {
1876                 tg3_writephy(tp, 0x1c, 0x8d68);
1877                 tg3_writephy(tp, 0x1c, 0x8d68);
1878         }
1879         if (tp->tg3_flags2 & TG3_FLG2_PHY_BER_BUG) {
1880                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1881                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1882                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x310b);
1883                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1884                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x9506);
1885                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x401f);
1886                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x14e2);
1887                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1888         }
1889         else if (tp->tg3_flags2 & TG3_FLG2_PHY_JITTER_BUG) {
1890                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1891                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1892                 if (tp->tg3_flags2 & TG3_FLG2_PHY_ADJUST_TRIM) {
1893                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
1894                         tg3_writephy(tp, MII_TG3_TEST1,
1895                                      MII_TG3_TEST1_TRIM_EN | 0x4);
1896                 } else
1897                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
1898                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1899         }
1900         /* Set Extended packet length bit (bit 14) on all chips that */
1901         /* support jumbo frames */
1902         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
1903                 /* Cannot do read-modify-write on 5401 */
1904                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
1905         } else if (tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) {
1906                 u32 phy_reg;
1907
1908                 /* Set bit 14 with read-modify-write to preserve other bits */
1909                 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0007) &&
1910                     !tg3_readphy(tp, MII_TG3_AUX_CTRL, &phy_reg))
1911                         tg3_writephy(tp, MII_TG3_AUX_CTRL, phy_reg | 0x4000);
1912         }
1913
1914         /* Set phy register 0x10 bit 0 to high fifo elasticity to support
1915          * jumbo frames transmission.
1916          */
1917         if (tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) {
1918                 u32 phy_reg;
1919
1920                 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &phy_reg))
1921                     tg3_writephy(tp, MII_TG3_EXT_CTRL,
1922                                  phy_reg | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
1923         }
1924
1925         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1926                 /* adjust output voltage */
1927                 tg3_writephy(tp, MII_TG3_EPHY_PTEST, 0x12);
1928         }
1929
1930         tg3_phy_toggle_automdix(tp, 1);
1931         tg3_phy_set_wirespeed(tp);
1932         return 0;
1933 }
1934
1935 static void tg3_frob_aux_power(struct tg3 *tp)
1936 {
1937         struct tg3 *tp_peer = tp;
1938
1939         if ((tp->tg3_flags2 & TG3_FLG2_IS_NIC) == 0)
1940                 return;
1941
1942         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
1943             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)) {
1944                 struct net_device *dev_peer;
1945
1946                 dev_peer = pci_get_drvdata(tp->pdev_peer);
1947                 /* remove_one() may have been run on the peer. */
1948                 if (!dev_peer)
1949                         tp_peer = tp;
1950                 else
1951                         tp_peer = netdev_priv(dev_peer);
1952         }
1953
1954         if ((tp->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
1955             (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0 ||
1956             (tp_peer->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
1957             (tp_peer->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0) {
1958                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1959                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1960                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1961                                     (GRC_LCLCTRL_GPIO_OE0 |
1962                                      GRC_LCLCTRL_GPIO_OE1 |
1963                                      GRC_LCLCTRL_GPIO_OE2 |
1964                                      GRC_LCLCTRL_GPIO_OUTPUT0 |
1965                                      GRC_LCLCTRL_GPIO_OUTPUT1),
1966                                     100);
1967                 } else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761) {
1968                         /* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
1969                         u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
1970                                              GRC_LCLCTRL_GPIO_OE1 |
1971                                              GRC_LCLCTRL_GPIO_OE2 |
1972                                              GRC_LCLCTRL_GPIO_OUTPUT0 |
1973                                              GRC_LCLCTRL_GPIO_OUTPUT1 |
1974                                              tp->grc_local_ctrl;
1975                         tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, 100);
1976
1977                         grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2;
1978                         tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, 100);
1979
1980                         grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0;
1981                         tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, 100);
1982                 } else {
1983                         u32 no_gpio2;
1984                         u32 grc_local_ctrl = 0;
1985
1986                         if (tp_peer != tp &&
1987                             (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
1988                                 return;
1989
1990                         /* Workaround to prevent overdrawing Amps. */
1991                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
1992                             ASIC_REV_5714) {
1993                                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
1994                                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1995                                             grc_local_ctrl, 100);
1996                         }
1997
1998                         /* On 5753 and variants, GPIO2 cannot be used. */
1999                         no_gpio2 = tp->nic_sram_data_cfg &
2000                                     NIC_SRAM_DATA_CFG_NO_GPIO2;
2001
2002                         grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
2003                                          GRC_LCLCTRL_GPIO_OE1 |
2004                                          GRC_LCLCTRL_GPIO_OE2 |
2005                                          GRC_LCLCTRL_GPIO_OUTPUT1 |
2006                                          GRC_LCLCTRL_GPIO_OUTPUT2;
2007                         if (no_gpio2) {
2008                                 grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
2009                                                     GRC_LCLCTRL_GPIO_OUTPUT2);
2010                         }
2011                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2012                                                     grc_local_ctrl, 100);
2013
2014                         grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
2015
2016                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2017                                                     grc_local_ctrl, 100);
2018
2019                         if (!no_gpio2) {
2020                                 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
2021                                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2022                                             grc_local_ctrl, 100);
2023                         }
2024                 }
2025         } else {
2026                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
2027                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
2028                         if (tp_peer != tp &&
2029                             (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
2030                                 return;
2031
2032                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2033                                     (GRC_LCLCTRL_GPIO_OE1 |
2034                                      GRC_LCLCTRL_GPIO_OUTPUT1), 100);
2035
2036                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2037                                     GRC_LCLCTRL_GPIO_OE1, 100);
2038
2039                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2040                                     (GRC_LCLCTRL_GPIO_OE1 |
2041                                      GRC_LCLCTRL_GPIO_OUTPUT1), 100);
2042                 }
2043         }
2044 }
2045
2046 static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
2047 {
2048         if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
2049                 return 1;
2050         else if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411) {
2051                 if (speed != SPEED_10)
2052                         return 1;
2053         } else if (speed == SPEED_10)
2054                 return 1;
2055
2056         return 0;
2057 }
2058
2059 static int tg3_setup_phy(struct tg3 *, int);
2060
2061 #define RESET_KIND_SHUTDOWN     0
2062 #define RESET_KIND_INIT         1
2063 #define RESET_KIND_SUSPEND      2
2064
2065 static void tg3_write_sig_post_reset(struct tg3 *, int);
2066 static int tg3_halt_cpu(struct tg3 *, u32);
2067 static int tg3_nvram_lock(struct tg3 *);
2068 static void tg3_nvram_unlock(struct tg3 *);
2069
2070 static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
2071 {
2072         u32 val;
2073
2074         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
2075                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
2076                         u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
2077                         u32 serdes_cfg = tr32(MAC_SERDES_CFG);
2078
2079                         sg_dig_ctrl |=
2080                                 SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
2081                         tw32(SG_DIG_CTRL, sg_dig_ctrl);
2082                         tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
2083                 }
2084                 return;
2085         }
2086
2087         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2088                 tg3_bmcr_reset(tp);
2089                 val = tr32(GRC_MISC_CFG);
2090                 tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
2091                 udelay(40);
2092                 return;
2093         } else if (do_low_power) {
2094                 tg3_writephy(tp, MII_TG3_EXT_CTRL,
2095                              MII_TG3_EXT_CTRL_FORCE_LED_OFF);
2096
2097                 tg3_writephy(tp, MII_TG3_AUX_CTRL,
2098                              MII_TG3_AUXCTL_SHDWSEL_PWRCTL |
2099                              MII_TG3_AUXCTL_PCTL_100TX_LPWR |
2100                              MII_TG3_AUXCTL_PCTL_SPR_ISOLATE |
2101                              MII_TG3_AUXCTL_PCTL_VREG_11V);
2102         }
2103
2104         /* The PHY should not be powered down on some chips because
2105          * of bugs.
2106          */
2107         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2108             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2109             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 &&
2110              (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)))
2111                 return;
2112
2113         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
2114             GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
2115                 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2116                 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2117                 val |= CPMU_LSPD_1000MB_MACCLK_12_5;
2118                 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2119         }
2120
2121         tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
2122 }
2123
2124 /* tp->lock is held. */
2125 static void __tg3_set_mac_addr(struct tg3 *tp, int skip_mac_1)
2126 {
2127         u32 addr_high, addr_low;
2128         int i;
2129
2130         addr_high = ((tp->dev->dev_addr[0] << 8) |
2131                      tp->dev->dev_addr[1]);
2132         addr_low = ((tp->dev->dev_addr[2] << 24) |
2133                     (tp->dev->dev_addr[3] << 16) |
2134                     (tp->dev->dev_addr[4] <<  8) |
2135                     (tp->dev->dev_addr[5] <<  0));
2136         for (i = 0; i < 4; i++) {
2137                 if (i == 1 && skip_mac_1)
2138                         continue;
2139                 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
2140                 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
2141         }
2142
2143         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
2144             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
2145                 for (i = 0; i < 12; i++) {
2146                         tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
2147                         tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
2148                 }
2149         }
2150
2151         addr_high = (tp->dev->dev_addr[0] +
2152                      tp->dev->dev_addr[1] +
2153                      tp->dev->dev_addr[2] +
2154                      tp->dev->dev_addr[3] +
2155                      tp->dev->dev_addr[4] +
2156                      tp->dev->dev_addr[5]) &
2157                 TX_BACKOFF_SEED_MASK;
2158         tw32(MAC_TX_BACKOFF_SEED, addr_high);
2159 }
2160
2161 static int tg3_set_power_state(struct tg3 *tp, pci_power_t state)
2162 {
2163         u32 misc_host_ctrl;
2164         bool device_should_wake, do_low_power;
2165
2166         /* Make sure register accesses (indirect or otherwise)
2167          * will function correctly.
2168          */
2169         pci_write_config_dword(tp->pdev,
2170                                TG3PCI_MISC_HOST_CTRL,
2171                                tp->misc_host_ctrl);
2172
2173         switch (state) {
2174         case PCI_D0:
2175                 pci_enable_wake(tp->pdev, state, false);
2176                 pci_set_power_state(tp->pdev, PCI_D0);
2177
2178                 /* Switch out of Vaux if it is a NIC */
2179                 if (tp->tg3_flags2 & TG3_FLG2_IS_NIC)
2180                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl, 100);
2181
2182                 return 0;
2183
2184         case PCI_D1:
2185         case PCI_D2:
2186         case PCI_D3hot:
2187                 break;
2188
2189         default:
2190                 printk(KERN_ERR PFX "%s: Invalid power state (D%d) requested\n",
2191                         tp->dev->name, state);
2192                 return -EINVAL;
2193         }
2194
2195         /* Restore the CLKREQ setting. */
2196         if (tp->tg3_flags3 & TG3_FLG3_CLKREQ_BUG) {
2197                 u16 lnkctl;
2198
2199                 pci_read_config_word(tp->pdev,
2200                                      tp->pcie_cap + PCI_EXP_LNKCTL,
2201                                      &lnkctl);
2202                 lnkctl |= PCI_EXP_LNKCTL_CLKREQ_EN;
2203                 pci_write_config_word(tp->pdev,
2204                                       tp->pcie_cap + PCI_EXP_LNKCTL,
2205                                       lnkctl);
2206         }
2207
2208         misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
2209         tw32(TG3PCI_MISC_HOST_CTRL,
2210              misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
2211
2212         device_should_wake = pci_pme_capable(tp->pdev, state) &&
2213                              device_may_wakeup(&tp->pdev->dev) &&
2214                              (tp->tg3_flags & TG3_FLAG_WOL_ENABLE);
2215
2216         if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
2217                 do_low_power = false;
2218                 if ((tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED) &&
2219                     !tp->link_config.phy_is_low_power) {
2220                         struct phy_device *phydev;
2221                         u32 phyid, advertising;
2222
2223                         phydev = tp->mdio_bus->phy_map[PHY_ADDR];
2224
2225                         tp->link_config.phy_is_low_power = 1;
2226
2227                         tp->link_config.orig_speed = phydev->speed;
2228                         tp->link_config.orig_duplex = phydev->duplex;
2229                         tp->link_config.orig_autoneg = phydev->autoneg;
2230                         tp->link_config.orig_advertising = phydev->advertising;
2231
2232                         advertising = ADVERTISED_TP |
2233                                       ADVERTISED_Pause |
2234                                       ADVERTISED_Autoneg |
2235                                       ADVERTISED_10baseT_Half;
2236
2237                         if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) ||
2238                             device_should_wake) {
2239                                 if (tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB)
2240                                         advertising |=
2241                                                 ADVERTISED_100baseT_Half |
2242                                                 ADVERTISED_100baseT_Full |
2243                                                 ADVERTISED_10baseT_Full;
2244                                 else
2245                                         advertising |= ADVERTISED_10baseT_Full;
2246                         }
2247
2248                         phydev->advertising = advertising;
2249
2250                         phy_start_aneg(phydev);
2251
2252                         phyid = phydev->drv->phy_id & phydev->drv->phy_id_mask;
2253                         if (phyid != TG3_PHY_ID_BCMAC131) {
2254                                 phyid &= TG3_PHY_OUI_MASK;
2255                                 if (phyid == TG3_PHY_OUI_1 &&
2256                                     phyid == TG3_PHY_OUI_2 &&
2257                                     phyid == TG3_PHY_OUI_3)
2258                                         do_low_power = true;
2259                         }
2260                 }
2261         } else {
2262                 do_low_power = false;
2263
2264                 if (tp->link_config.phy_is_low_power == 0) {
2265                         tp->link_config.phy_is_low_power = 1;
2266                         tp->link_config.orig_speed = tp->link_config.speed;
2267                         tp->link_config.orig_duplex = tp->link_config.duplex;
2268                         tp->link_config.orig_autoneg = tp->link_config.autoneg;
2269                 }
2270
2271                 if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)) {
2272                         tp->link_config.speed = SPEED_10;
2273                         tp->link_config.duplex = DUPLEX_HALF;
2274                         tp->link_config.autoneg = AUTONEG_ENABLE;
2275                         tg3_setup_phy(tp, 0);
2276                 }
2277         }
2278
2279         __tg3_set_mac_addr(tp, 0);
2280
2281         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2282                 u32 val;
2283
2284                 val = tr32(GRC_VCPU_EXT_CTRL);
2285                 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
2286         } else if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
2287                 int i;
2288                 u32 val;
2289
2290                 for (i = 0; i < 200; i++) {
2291                         tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
2292                         if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
2293                                 break;
2294                         msleep(1);
2295                 }
2296         }
2297         if (tp->tg3_flags & TG3_FLAG_WOL_CAP)
2298                 tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
2299                                                      WOL_DRV_STATE_SHUTDOWN |
2300                                                      WOL_DRV_WOL |
2301                                                      WOL_SET_MAGIC_PKT);
2302
2303         if (device_should_wake) {
2304                 u32 mac_mode;
2305
2306                 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
2307                         if (do_low_power) {
2308                                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x5a);
2309                                 udelay(40);
2310                         }
2311
2312                         if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
2313                                 mac_mode = MAC_MODE_PORT_MODE_GMII;
2314                         else
2315                                 mac_mode = MAC_MODE_PORT_MODE_MII;
2316
2317                         mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
2318                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
2319                             ASIC_REV_5700) {
2320                                 u32 speed = (tp->tg3_flags &
2321                                              TG3_FLAG_WOL_SPEED_100MB) ?
2322                                              SPEED_100 : SPEED_10;
2323                                 if (tg3_5700_link_polarity(tp, speed))
2324                                         mac_mode |= MAC_MODE_LINK_POLARITY;
2325                                 else
2326                                         mac_mode &= ~MAC_MODE_LINK_POLARITY;
2327                         }
2328                 } else {
2329                         mac_mode = MAC_MODE_PORT_MODE_TBI;
2330                 }
2331
2332                 if (!(tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
2333                         tw32(MAC_LED_CTRL, tp->led_ctrl);
2334
2335                 mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
2336                 if (((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
2337                     !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) &&
2338                     ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) ||
2339                      (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)))
2340                         mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL;
2341
2342                 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
2343                         mac_mode |= tp->mac_mode &
2344                                     (MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN);
2345                         if (mac_mode & MAC_MODE_APE_TX_EN)
2346                                 mac_mode |= MAC_MODE_TDE_ENABLE;
2347                 }
2348
2349                 tw32_f(MAC_MODE, mac_mode);
2350                 udelay(100);
2351
2352                 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
2353                 udelay(10);
2354         }
2355
2356         if (!(tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB) &&
2357             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2358              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
2359                 u32 base_val;
2360
2361                 base_val = tp->pci_clock_ctrl;
2362                 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
2363                              CLOCK_CTRL_TXCLK_DISABLE);
2364
2365                 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
2366                             CLOCK_CTRL_PWRDOWN_PLL133, 40);
2367         } else if ((tp->tg3_flags2 & TG3_FLG2_5780_CLASS) ||
2368                    (tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) ||
2369                    (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)) {
2370                 /* do nothing */
2371         } else if (!((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
2372                      (tp->tg3_flags & TG3_FLAG_ENABLE_ASF))) {
2373                 u32 newbits1, newbits2;
2374
2375                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2376                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2377                         newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
2378                                     CLOCK_CTRL_TXCLK_DISABLE |
2379                                     CLOCK_CTRL_ALTCLK);
2380                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
2381                 } else if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
2382                         newbits1 = CLOCK_CTRL_625_CORE;
2383                         newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
2384                 } else {
2385                         newbits1 = CLOCK_CTRL_ALTCLK;
2386                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
2387                 }
2388
2389                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
2390                             40);
2391
2392                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
2393                             40);
2394
2395                 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
2396                         u32 newbits3;
2397
2398                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2399                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2400                                 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
2401                                             CLOCK_CTRL_TXCLK_DISABLE |
2402                                             CLOCK_CTRL_44MHZ_CORE);
2403                         } else {
2404                                 newbits3 = CLOCK_CTRL_44MHZ_CORE;
2405                         }
2406
2407                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
2408                                     tp->pci_clock_ctrl | newbits3, 40);
2409                 }
2410         }
2411
2412         if (!(device_should_wake) &&
2413             !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
2414                 tg3_power_down_phy(tp, do_low_power);
2415
2416         tg3_frob_aux_power(tp);
2417
2418         /* Workaround for unstable PLL clock */
2419         if ((GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX) ||
2420             (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX)) {
2421                 u32 val = tr32(0x7d00);
2422
2423                 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
2424                 tw32(0x7d00, val);
2425                 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
2426                         int err;
2427
2428                         err = tg3_nvram_lock(tp);
2429                         tg3_halt_cpu(tp, RX_CPU_BASE);
2430                         if (!err)
2431                                 tg3_nvram_unlock(tp);
2432                 }
2433         }
2434
2435         tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
2436
2437         if (device_should_wake)
2438                 pci_enable_wake(tp->pdev, state, true);
2439
2440         /* Finally, set the new power state. */
2441         pci_set_power_state(tp->pdev, state);
2442
2443         return 0;
2444 }
2445
2446 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
2447 {
2448         switch (val & MII_TG3_AUX_STAT_SPDMASK) {
2449         case MII_TG3_AUX_STAT_10HALF:
2450                 *speed = SPEED_10;
2451                 *duplex = DUPLEX_HALF;
2452                 break;
2453
2454         case MII_TG3_AUX_STAT_10FULL:
2455                 *speed = SPEED_10;
2456                 *duplex = DUPLEX_FULL;
2457                 break;
2458
2459         case MII_TG3_AUX_STAT_100HALF:
2460                 *speed = SPEED_100;
2461                 *duplex = DUPLEX_HALF;
2462                 break;
2463
2464         case MII_TG3_AUX_STAT_100FULL:
2465                 *speed = SPEED_100;
2466                 *duplex = DUPLEX_FULL;
2467                 break;
2468
2469         case MII_TG3_AUX_STAT_1000HALF:
2470                 *speed = SPEED_1000;
2471                 *duplex = DUPLEX_HALF;
2472                 break;
2473
2474         case MII_TG3_AUX_STAT_1000FULL:
2475                 *speed = SPEED_1000;
2476                 *duplex = DUPLEX_FULL;
2477                 break;
2478
2479         default:
2480                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2481                         *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
2482                                  SPEED_10;
2483                         *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
2484                                   DUPLEX_HALF;
2485                         break;
2486                 }
2487                 *speed = SPEED_INVALID;
2488                 *duplex = DUPLEX_INVALID;
2489                 break;
2490         }
2491 }
2492
2493 static void tg3_phy_copper_begin(struct tg3 *tp)
2494 {
2495         u32 new_adv;
2496         int i;
2497
2498         if (tp->link_config.phy_is_low_power) {
2499                 /* Entering low power mode.  Disable gigabit and
2500                  * 100baseT advertisements.
2501                  */
2502                 tg3_writephy(tp, MII_TG3_CTRL, 0);
2503
2504                 new_adv = (ADVERTISE_10HALF | ADVERTISE_10FULL |
2505                            ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
2506                 if (tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB)
2507                         new_adv |= (ADVERTISE_100HALF | ADVERTISE_100FULL);
2508
2509                 tg3_writephy(tp, MII_ADVERTISE, new_adv);
2510         } else if (tp->link_config.speed == SPEED_INVALID) {
2511                 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
2512                         tp->link_config.advertising &=
2513                                 ~(ADVERTISED_1000baseT_Half |
2514                                   ADVERTISED_1000baseT_Full);
2515
2516                 new_adv = ADVERTISE_CSMA;
2517                 if (tp->link_config.advertising & ADVERTISED_10baseT_Half)
2518                         new_adv |= ADVERTISE_10HALF;
2519                 if (tp->link_config.advertising & ADVERTISED_10baseT_Full)
2520                         new_adv |= ADVERTISE_10FULL;
2521                 if (tp->link_config.advertising & ADVERTISED_100baseT_Half)
2522                         new_adv |= ADVERTISE_100HALF;
2523                 if (tp->link_config.advertising & ADVERTISED_100baseT_Full)
2524                         new_adv |= ADVERTISE_100FULL;
2525
2526                 new_adv |= tg3_advert_flowctrl_1000T(tp->link_config.flowctrl);
2527
2528                 tg3_writephy(tp, MII_ADVERTISE, new_adv);
2529
2530                 if (tp->link_config.advertising &
2531                     (ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full)) {
2532                         new_adv = 0;
2533                         if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
2534                                 new_adv |= MII_TG3_CTRL_ADV_1000_HALF;
2535                         if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
2536                                 new_adv |= MII_TG3_CTRL_ADV_1000_FULL;
2537                         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY) &&
2538                             (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
2539                              tp->pci_chip_rev_id == CHIPREV_ID_5701_B0))
2540                                 new_adv |= (MII_TG3_CTRL_AS_MASTER |
2541                                             MII_TG3_CTRL_ENABLE_AS_MASTER);
2542                         tg3_writephy(tp, MII_TG3_CTRL, new_adv);
2543                 } else {
2544                         tg3_writephy(tp, MII_TG3_CTRL, 0);
2545                 }
2546         } else {
2547                 new_adv = tg3_advert_flowctrl_1000T(tp->link_config.flowctrl);
2548                 new_adv |= ADVERTISE_CSMA;
2549
2550                 /* Asking for a specific link mode. */
2551                 if (tp->link_config.speed == SPEED_1000) {
2552                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
2553
2554                         if (tp->link_config.duplex == DUPLEX_FULL)
2555                                 new_adv = MII_TG3_CTRL_ADV_1000_FULL;
2556                         else
2557                                 new_adv = MII_TG3_CTRL_ADV_1000_HALF;
2558                         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
2559                             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
2560                                 new_adv |= (MII_TG3_CTRL_AS_MASTER |
2561                                             MII_TG3_CTRL_ENABLE_AS_MASTER);
2562                 } else {
2563                         if (tp->link_config.speed == SPEED_100) {
2564                                 if (tp->link_config.duplex == DUPLEX_FULL)
2565                                         new_adv |= ADVERTISE_100FULL;
2566                                 else
2567                                         new_adv |= ADVERTISE_100HALF;
2568                         } else {
2569                                 if (tp->link_config.duplex == DUPLEX_FULL)
2570                                         new_adv |= ADVERTISE_10FULL;
2571                                 else
2572                                         new_adv |= ADVERTISE_10HALF;
2573                         }
2574                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
2575
2576                         new_adv = 0;
2577                 }
2578
2579                 tg3_writephy(tp, MII_TG3_CTRL, new_adv);
2580         }
2581
2582         if (tp->link_config.autoneg == AUTONEG_DISABLE &&
2583             tp->link_config.speed != SPEED_INVALID) {
2584                 u32 bmcr, orig_bmcr;
2585
2586                 tp->link_config.active_speed = tp->link_config.speed;
2587                 tp->link_config.active_duplex = tp->link_config.duplex;
2588
2589                 bmcr = 0;
2590                 switch (tp->link_config.speed) {
2591                 default:
2592                 case SPEED_10:
2593                         break;
2594
2595                 case SPEED_100:
2596                         bmcr |= BMCR_SPEED100;
2597                         break;
2598
2599                 case SPEED_1000:
2600                         bmcr |= TG3_BMCR_SPEED1000;
2601                         break;
2602                 }
2603
2604                 if (tp->link_config.duplex == DUPLEX_FULL)
2605                         bmcr |= BMCR_FULLDPLX;
2606
2607                 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
2608                     (bmcr != orig_bmcr)) {
2609                         tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
2610                         for (i = 0; i < 1500; i++) {
2611                                 u32 tmp;
2612
2613                                 udelay(10);
2614                                 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
2615                                     tg3_readphy(tp, MII_BMSR, &tmp))
2616                                         continue;
2617                                 if (!(tmp & BMSR_LSTATUS)) {
2618                                         udelay(40);
2619                                         break;
2620                                 }
2621                         }
2622                         tg3_writephy(tp, MII_BMCR, bmcr);
2623                         udelay(40);
2624                 }
2625         } else {
2626                 tg3_writephy(tp, MII_BMCR,
2627                              BMCR_ANENABLE | BMCR_ANRESTART);
2628         }
2629 }
2630
2631 static int tg3_init_5401phy_dsp(struct tg3 *tp)
2632 {
2633         int err;
2634
2635         /* Turn off tap power management. */
2636         /* Set Extended packet length bit */
2637         err  = tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
2638
2639         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0012);
2640         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1804);
2641
2642         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0013);
2643         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1204);
2644
2645         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
2646         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0132);
2647
2648         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
2649         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0232);
2650
2651         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
2652         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0a20);
2653
2654         udelay(40);
2655
2656         return err;
2657 }
2658
2659 static int tg3_copper_is_advertising_all(struct tg3 *tp, u32 mask)
2660 {
2661         u32 adv_reg, all_mask = 0;
2662
2663         if (mask & ADVERTISED_10baseT_Half)
2664                 all_mask |= ADVERTISE_10HALF;
2665         if (mask & ADVERTISED_10baseT_Full)
2666                 all_mask |= ADVERTISE_10FULL;
2667         if (mask & ADVERTISED_100baseT_Half)
2668                 all_mask |= ADVERTISE_100HALF;
2669         if (mask & ADVERTISED_100baseT_Full)
2670                 all_mask |= ADVERTISE_100FULL;
2671
2672         if (tg3_readphy(tp, MII_ADVERTISE, &adv_reg))
2673                 return 0;
2674
2675         if ((adv_reg & all_mask) != all_mask)
2676                 return 0;
2677         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
2678                 u32 tg3_ctrl;
2679
2680                 all_mask = 0;
2681                 if (mask & ADVERTISED_1000baseT_Half)
2682                         all_mask |= ADVERTISE_1000HALF;
2683                 if (mask & ADVERTISED_1000baseT_Full)
2684                         all_mask |= ADVERTISE_1000FULL;
2685
2686                 if (tg3_readphy(tp, MII_TG3_CTRL, &tg3_ctrl))
2687                         return 0;
2688
2689                 if ((tg3_ctrl & all_mask) != all_mask)
2690                         return 0;
2691         }
2692         return 1;
2693 }
2694
2695 static int tg3_adv_1000T_flowctrl_ok(struct tg3 *tp, u32 *lcladv, u32 *rmtadv)
2696 {
2697         u32 curadv, reqadv;
2698
2699         if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
2700                 return 1;
2701
2702         curadv = *lcladv & (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
2703         reqadv = tg3_advert_flowctrl_1000T(tp->link_config.flowctrl);
2704
2705         if (tp->link_config.active_duplex == DUPLEX_FULL) {
2706                 if (curadv != reqadv)
2707                         return 0;
2708
2709                 if (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG)
2710                         tg3_readphy(tp, MII_LPA, rmtadv);
2711         } else {
2712                 /* Reprogram the advertisement register, even if it
2713                  * does not affect the current link.  If the link
2714                  * gets renegotiated in the future, we can save an
2715                  * additional renegotiation cycle by advertising
2716                  * it correctly in the first place.
2717                  */
2718                 if (curadv != reqadv) {
2719                         *lcladv &= ~(ADVERTISE_PAUSE_CAP |
2720                                      ADVERTISE_PAUSE_ASYM);
2721                         tg3_writephy(tp, MII_ADVERTISE, *lcladv | reqadv);
2722                 }
2723         }
2724
2725         return 1;
2726 }
2727
2728 static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
2729 {
2730         int current_link_up;
2731         u32 bmsr, dummy;
2732         u32 lcl_adv, rmt_adv;
2733         u16 current_speed;
2734         u8 current_duplex;
2735         int i, err;
2736
2737         tw32(MAC_EVENT, 0);
2738
2739         tw32_f(MAC_STATUS,
2740              (MAC_STATUS_SYNC_CHANGED |
2741               MAC_STATUS_CFG_CHANGED |
2742               MAC_STATUS_MI_COMPLETION |
2743               MAC_STATUS_LNKSTATE_CHANGED));
2744         udelay(40);
2745
2746         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
2747                 tw32_f(MAC_MI_MODE,
2748                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
2749                 udelay(80);
2750         }
2751
2752         tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x02);
2753
2754         /* Some third-party PHYs need to be reset on link going
2755          * down.
2756          */
2757         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
2758              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2759              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
2760             netif_carrier_ok(tp->dev)) {
2761                 tg3_readphy(tp, MII_BMSR, &bmsr);
2762                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
2763                     !(bmsr & BMSR_LSTATUS))
2764                         force_reset = 1;
2765         }
2766         if (force_reset)
2767                 tg3_phy_reset(tp);
2768
2769         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
2770                 tg3_readphy(tp, MII_BMSR, &bmsr);
2771                 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
2772                     !(tp->tg3_flags & TG3_FLAG_INIT_COMPLETE))
2773                         bmsr = 0;
2774
2775                 if (!(bmsr & BMSR_LSTATUS)) {
2776                         err = tg3_init_5401phy_dsp(tp);
2777                         if (err)
2778                                 return err;
2779
2780                         tg3_readphy(tp, MII_BMSR, &bmsr);
2781                         for (i = 0; i < 1000; i++) {
2782                                 udelay(10);
2783                                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
2784                                     (bmsr & BMSR_LSTATUS)) {
2785                                         udelay(40);
2786                                         break;
2787                                 }
2788                         }
2789
2790                         if ((tp->phy_id & PHY_ID_REV_MASK) == PHY_REV_BCM5401_B0 &&
2791                             !(bmsr & BMSR_LSTATUS) &&
2792                             tp->link_config.active_speed == SPEED_1000) {
2793                                 err = tg3_phy_reset(tp);
2794                                 if (!err)
2795                                         err = tg3_init_5401phy_dsp(tp);
2796                                 if (err)
2797                                         return err;
2798                         }
2799                 }
2800         } else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
2801                    tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
2802                 /* 5701 {A0,B0} CRC bug workaround */
2803                 tg3_writephy(tp, 0x15, 0x0a75);
2804                 tg3_writephy(tp, 0x1c, 0x8c68);
2805                 tg3_writephy(tp, 0x1c, 0x8d68);
2806                 tg3_writephy(tp, 0x1c, 0x8c68);
2807         }
2808
2809         /* Clear pending interrupts... */
2810         tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
2811         tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
2812
2813         if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT)
2814                 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
2815         else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906)
2816                 tg3_writephy(tp, MII_TG3_IMASK, ~0);
2817
2818         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2819             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2820                 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
2821                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
2822                                      MII_TG3_EXT_CTRL_LNK3_LED_MODE);
2823                 else
2824                         tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
2825         }
2826
2827         current_link_up = 0;
2828         current_speed = SPEED_INVALID;
2829         current_duplex = DUPLEX_INVALID;
2830
2831         if (tp->tg3_flags2 & TG3_FLG2_CAPACITIVE_COUPLING) {
2832                 u32 val;
2833
2834                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4007);
2835                 tg3_readphy(tp, MII_TG3_AUX_CTRL, &val);
2836                 if (!(val & (1 << 10))) {
2837                         val |= (1 << 10);
2838                         tg3_writephy(tp, MII_TG3_AUX_CTRL, val);
2839                         goto relink;
2840                 }
2841         }
2842
2843         bmsr = 0;
2844         for (i = 0; i < 100; i++) {
2845                 tg3_readphy(tp, MII_BMSR, &bmsr);
2846                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
2847                     (bmsr & BMSR_LSTATUS))
2848                         break;
2849                 udelay(40);
2850         }
2851
2852         if (bmsr & BMSR_LSTATUS) {
2853                 u32 aux_stat, bmcr;
2854
2855                 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
2856                 for (i = 0; i < 2000; i++) {
2857                         udelay(10);
2858                         if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
2859                             aux_stat)
2860                                 break;
2861                 }
2862
2863                 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
2864                                              &current_speed,
2865                                              &current_duplex);
2866
2867                 bmcr = 0;
2868                 for (i = 0; i < 200; i++) {
2869                         tg3_readphy(tp, MII_BMCR, &bmcr);
2870                         if (tg3_readphy(tp, MII_BMCR, &bmcr))
2871                                 continue;
2872                         if (bmcr && bmcr != 0x7fff)
2873                                 break;
2874                         udelay(10);
2875                 }
2876
2877                 lcl_adv = 0;
2878                 rmt_adv = 0;
2879
2880                 tp->link_config.active_speed = current_speed;
2881                 tp->link_config.active_duplex = current_duplex;
2882
2883                 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2884                         if ((bmcr & BMCR_ANENABLE) &&
2885                             tg3_copper_is_advertising_all(tp,
2886                                                 tp->link_config.advertising)) {
2887                                 if (tg3_adv_1000T_flowctrl_ok(tp, &lcl_adv,
2888                                                                   &rmt_adv))
2889                                         current_link_up = 1;
2890                         }
2891                 } else {
2892                         if (!(bmcr & BMCR_ANENABLE) &&
2893                             tp->link_config.speed == current_speed &&
2894                             tp->link_config.duplex == current_duplex &&
2895                             tp->link_config.flowctrl ==
2896                             tp->link_config.active_flowctrl) {
2897                                 current_link_up = 1;
2898                         }
2899                 }
2900
2901                 if (current_link_up == 1 &&
2902                     tp->link_config.active_duplex == DUPLEX_FULL)
2903                         tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
2904         }
2905
2906 relink:
2907         if (current_link_up == 0 || tp->link_config.phy_is_low_power) {
2908                 u32 tmp;
2909
2910                 tg3_phy_copper_begin(tp);
2911
2912                 tg3_readphy(tp, MII_BMSR, &tmp);
2913                 if (!tg3_readphy(tp, MII_BMSR, &tmp) &&
2914                     (tmp & BMSR_LSTATUS))
2915                         current_link_up = 1;
2916         }
2917
2918         tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
2919         if (current_link_up == 1) {
2920                 if (tp->link_config.active_speed == SPEED_100 ||
2921                     tp->link_config.active_speed == SPEED_10)
2922                         tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
2923                 else
2924                         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
2925         } else
2926                 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
2927
2928         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
2929         if (tp->link_config.active_duplex == DUPLEX_HALF)
2930                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
2931
2932         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
2933                 if (current_link_up == 1 &&
2934                     tg3_5700_link_polarity(tp, tp->link_config.active_speed))
2935                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
2936                 else
2937                         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
2938         }
2939
2940         /* ??? Without this setting Netgear GA302T PHY does not
2941          * ??? send/receive packets...
2942          */
2943         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411 &&
2944             tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) {
2945                 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
2946                 tw32_f(MAC_MI_MODE, tp->mi_mode);
2947                 udelay(80);
2948         }
2949
2950         tw32_f(MAC_MODE, tp->mac_mode);
2951         udelay(40);
2952
2953         if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
2954                 /* Polled via timer. */
2955                 tw32_f(MAC_EVENT, 0);
2956         } else {
2957                 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2958         }
2959         udelay(40);
2960
2961         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 &&
2962             current_link_up == 1 &&
2963             tp->link_config.active_speed == SPEED_1000 &&
2964             ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ||
2965              (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED))) {
2966                 udelay(120);
2967                 tw32_f(MAC_STATUS,
2968                      (MAC_STATUS_SYNC_CHANGED |
2969                       MAC_STATUS_CFG_CHANGED));
2970                 udelay(40);
2971                 tg3_write_mem(tp,
2972                               NIC_SRAM_FIRMWARE_MBOX,
2973                               NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
2974         }
2975
2976         /* Prevent send BD corruption. */
2977         if (tp->tg3_flags3 & TG3_FLG3_CLKREQ_BUG) {
2978                 u16 oldlnkctl, newlnkctl;
2979
2980                 pci_read_config_word(tp->pdev,
2981                                      tp->pcie_cap + PCI_EXP_LNKCTL,
2982                                      &oldlnkctl);
2983                 if (tp->link_config.active_speed == SPEED_100 ||
2984                     tp->link_config.active_speed == SPEED_10)
2985                         newlnkctl = oldlnkctl & ~PCI_EXP_LNKCTL_CLKREQ_EN;
2986                 else
2987                         newlnkctl = oldlnkctl | PCI_EXP_LNKCTL_CLKREQ_EN;
2988                 if (newlnkctl != oldlnkctl)
2989                         pci_write_config_word(tp->pdev,
2990                                               tp->pcie_cap + PCI_EXP_LNKCTL,
2991                                               newlnkctl);
2992         }
2993
2994         if (current_link_up != netif_carrier_ok(tp->dev)) {
2995                 if (current_link_up)
2996                         netif_carrier_on(tp->dev);
2997                 else
2998                         netif_carrier_off(tp->dev);
2999                 tg3_link_report(tp);
3000         }
3001
3002         return 0;
3003 }
3004
3005 struct tg3_fiber_aneginfo {
3006         int state;
3007 #define ANEG_STATE_UNKNOWN              0
3008 #define ANEG_STATE_AN_ENABLE            1
3009 #define ANEG_STATE_RESTART_INIT         2
3010 #define ANEG_STATE_RESTART              3
3011 #define ANEG_STATE_DISABLE_LINK_OK      4
3012 #define ANEG_STATE_ABILITY_DETECT_INIT  5
3013 #define ANEG_STATE_ABILITY_DETECT       6
3014 #define ANEG_STATE_ACK_DETECT_INIT      7
3015 #define ANEG_STATE_ACK_DETECT           8
3016 #define ANEG_STATE_COMPLETE_ACK_INIT    9
3017 #define ANEG_STATE_COMPLETE_ACK         10
3018 #define ANEG_STATE_IDLE_DETECT_INIT     11
3019 #define ANEG_STATE_IDLE_DETECT          12
3020 #define ANEG_STATE_LINK_OK              13
3021 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT  14
3022 #define ANEG_STATE_NEXT_PAGE_WAIT       15
3023
3024         u32 flags;
3025 #define MR_AN_ENABLE            0x00000001
3026 #define MR_RESTART_AN           0x00000002
3027 #define MR_AN_COMPLETE          0x00000004
3028 #define MR_PAGE_RX              0x00000008
3029 #define MR_NP_LOADED            0x00000010
3030 #define MR_TOGGLE_TX            0x00000020
3031 #define MR_LP_ADV_FULL_DUPLEX   0x00000040
3032 #define MR_LP_ADV_HALF_DUPLEX   0x00000080
3033 #define MR_LP_ADV_SYM_PAUSE     0x00000100
3034 #define MR_LP_ADV_ASYM_PAUSE    0x00000200
3035 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
3036 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
3037 #define MR_LP_ADV_NEXT_PAGE     0x00001000
3038 #define MR_TOGGLE_RX            0x00002000
3039 #define MR_NP_RX                0x00004000
3040
3041 #define MR_LINK_OK              0x80000000
3042
3043         unsigned long link_time, cur_time;
3044
3045         u32 ability_match_cfg;
3046         int ability_match_count;
3047
3048         char ability_match, idle_match, ack_match;
3049
3050         u32 txconfig, rxconfig;
3051 #define ANEG_CFG_NP             0x00000080
3052 #define ANEG_CFG_ACK            0x00000040
3053 #define ANEG_CFG_RF2            0x00000020
3054 #define ANEG_CFG_RF1            0x00000010
3055 #define ANEG_CFG_PS2            0x00000001
3056 #define ANEG_CFG_PS1            0x00008000
3057 #define ANEG_CFG_HD             0x00004000
3058 #define ANEG_CFG_FD             0x00002000
3059 #define ANEG_CFG_INVAL          0x00001f06
3060
3061 };
3062 #define ANEG_OK         0
3063 #define ANEG_DONE       1
3064 #define ANEG_TIMER_ENAB 2
3065 #define ANEG_FAILED     -1
3066
3067 #define ANEG_STATE_SETTLE_TIME  10000
3068
3069 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
3070                                    struct tg3_fiber_aneginfo *ap)
3071 {
3072         u16 flowctrl;
3073         unsigned long delta;
3074         u32 rx_cfg_reg;
3075         int ret;
3076
3077         if (ap->state == ANEG_STATE_UNKNOWN) {
3078                 ap->rxconfig = 0;
3079                 ap->link_time = 0;
3080                 ap->cur_time = 0;
3081                 ap->ability_match_cfg = 0;
3082                 ap->ability_match_count = 0;
3083                 ap->ability_match = 0;
3084                 ap->idle_match = 0;
3085                 ap->ack_match = 0;
3086         }
3087         ap->cur_time++;
3088
3089         if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
3090                 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
3091
3092                 if (rx_cfg_reg != ap->ability_match_cfg) {
3093                         ap->ability_match_cfg = rx_cfg_reg;
3094                         ap->ability_match = 0;
3095                         ap->ability_match_count = 0;
3096                 } else {
3097                         if (++ap->ability_match_count > 1) {
3098                                 ap->ability_match = 1;
3099                                 ap->ability_match_cfg = rx_cfg_reg;
3100                         }
3101                 }
3102                 if (rx_cfg_reg & ANEG_CFG_ACK)
3103                         ap->ack_match = 1;
3104                 else
3105                         ap->ack_match = 0;
3106
3107                 ap->idle_match = 0;
3108         } else {
3109                 ap->idle_match = 1;
3110                 ap->ability_match_cfg = 0;
3111                 ap->ability_match_count = 0;
3112                 ap->ability_match = 0;
3113                 ap->ack_match = 0;
3114
3115                 rx_cfg_reg = 0;
3116         }
3117
3118         ap->rxconfig = rx_cfg_reg;
3119         ret = ANEG_OK;
3120
3121         switch(ap->state) {
3122         case ANEG_STATE_UNKNOWN:
3123                 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
3124                         ap->state = ANEG_STATE_AN_ENABLE;
3125
3126                 /* fallthru */
3127         case ANEG_STATE_AN_ENABLE:
3128                 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
3129                 if (ap->flags & MR_AN_ENABLE) {
3130                         ap->link_time = 0;
3131                         ap->cur_time = 0;
3132                         ap->ability_match_cfg = 0;
3133                         ap->ability_match_count = 0;
3134                         ap->ability_match = 0;
3135                         ap->idle_match = 0;
3136                         ap->ack_match = 0;
3137
3138                         ap->state = ANEG_STATE_RESTART_INIT;
3139                 } else {
3140                         ap->state = ANEG_STATE_DISABLE_LINK_OK;
3141                 }
3142                 break;
3143
3144         case ANEG_STATE_RESTART_INIT:
3145                 ap->link_time = ap->cur_time;
3146                 ap->flags &= ~(MR_NP_LOADED);
3147                 ap->txconfig = 0;
3148                 tw32(MAC_TX_AUTO_NEG, 0);
3149                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
3150                 tw32_f(MAC_MODE, tp->mac_mode);
3151                 udelay(40);
3152
3153                 ret = ANEG_TIMER_ENAB;
3154                 ap->state = ANEG_STATE_RESTART;
3155
3156                 /* fallthru */
3157         case ANEG_STATE_RESTART:
3158                 delta = ap->cur_time - ap->link_time;
3159                 if (delta > ANEG_STATE_SETTLE_TIME) {
3160                         ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
3161                 } else {
3162                         ret = ANEG_TIMER_ENAB;
3163                 }
3164                 break;
3165
3166         case ANEG_STATE_DISABLE_LINK_OK:
3167                 ret = ANEG_DONE;
3168                 break;
3169
3170         case ANEG_STATE_ABILITY_DETECT_INIT:
3171                 ap->flags &= ~(MR_TOGGLE_TX);
3172                 ap->txconfig = ANEG_CFG_FD;
3173                 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
3174                 if (flowctrl & ADVERTISE_1000XPAUSE)
3175                         ap->txconfig |= ANEG_CFG_PS1;
3176                 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
3177                         ap->txconfig |= ANEG_CFG_PS2;
3178                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
3179                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
3180                 tw32_f(MAC_MODE, tp->mac_mode);
3181                 udelay(40);
3182
3183                 ap->state = ANEG_STATE_ABILITY_DETECT;
3184                 break;
3185
3186         case ANEG_STATE_ABILITY_DETECT:
3187                 if (ap->ability_match != 0 && ap->rxconfig != 0) {
3188                         ap->state = ANEG_STATE_ACK_DETECT_INIT;
3189                 }
3190                 break;
3191
3192         case ANEG_STATE_ACK_DETECT_INIT:
3193                 ap->txconfig |= ANEG_CFG_ACK;
3194                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
3195                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
3196                 tw32_f(MAC_MODE, tp->mac_mode);
3197                 udelay(40);
3198
3199                 ap->state = ANEG_STATE_ACK_DETECT;
3200
3201                 /* fallthru */
3202         case ANEG_STATE_ACK_DETECT:
3203                 if (ap->ack_match != 0) {
3204                         if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
3205                             (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
3206                                 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
3207                         } else {
3208                                 ap->state = ANEG_STATE_AN_ENABLE;
3209                         }
3210                 } else if (ap->ability_match != 0 &&
3211                            ap->rxconfig == 0) {
3212                         ap->state = ANEG_STATE_AN_ENABLE;
3213                 }
3214                 break;
3215
3216         case ANEG_STATE_COMPLETE_ACK_INIT:
3217                 if (ap->rxconfig & ANEG_CFG_INVAL) {
3218                         ret = ANEG_FAILED;
3219                         break;
3220                 }
3221                 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
3222                                MR_LP_ADV_HALF_DUPLEX |
3223                                MR_LP_ADV_SYM_PAUSE |
3224                                MR_LP_ADV_ASYM_PAUSE |
3225                                MR_LP_ADV_REMOTE_FAULT1 |
3226                                MR_LP_ADV_REMOTE_FAULT2 |
3227                                MR_LP_ADV_NEXT_PAGE |
3228                                MR_TOGGLE_RX |
3229                                MR_NP_RX);
3230                 if (ap->rxconfig & ANEG_CFG_FD)
3231                         ap->flags |= MR_LP_ADV_FULL_DUPLEX;
3232                 if (ap->rxconfig & ANEG_CFG_HD)
3233                         ap->flags |= MR_LP_ADV_HALF_DUPLEX;
3234                 if (ap->rxconfig & ANEG_CFG_PS1)
3235                         ap->flags |= MR_LP_ADV_SYM_PAUSE;
3236                 if (ap->rxconfig & ANEG_CFG_PS2)
3237                         ap->flags |= MR_LP_ADV_ASYM_PAUSE;
3238                 if (ap->rxconfig & ANEG_CFG_RF1)
3239                         ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
3240                 if (ap->rxconfig & ANEG_CFG_RF2)
3241                         ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
3242                 if (ap->rxconfig & ANEG_CFG_NP)
3243                         ap->flags |= MR_LP_ADV_NEXT_PAGE;
3244
3245                 ap->link_time = ap->cur_time;
3246
3247                 ap->flags ^= (MR_TOGGLE_TX);
3248                 if (ap->rxconfig & 0x0008)
3249                         ap->flags |= MR_TOGGLE_RX;
3250                 if (ap->rxconfig & ANEG_CFG_NP)
3251                         ap->flags |= MR_NP_RX;
3252                 ap->flags |= MR_PAGE_RX;
3253
3254                 ap->state = ANEG_STATE_COMPLETE_ACK;
3255                 ret = ANEG_TIMER_ENAB;
3256                 break;
3257
3258         case ANEG_STATE_COMPLETE_ACK:
3259                 if (ap->ability_match != 0 &&
3260                     ap->rxconfig == 0) {
3261                         ap->state = ANEG_STATE_AN_ENABLE;
3262                         break;
3263                 }
3264                 delta = ap->cur_time - ap->link_time;
3265                 if (delta > ANEG_STATE_SETTLE_TIME) {
3266                         if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
3267                                 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
3268                         } else {
3269                                 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
3270                                     !(ap->flags & MR_NP_RX)) {
3271                                         ap->state = ANEG_STATE_IDLE_DETECT_INIT;
3272                                 } else {
3273                                         ret = ANEG_FAILED;
3274                                 }
3275                         }
3276                 }
3277                 break;
3278
3279         case ANEG_STATE_IDLE_DETECT_INIT:
3280                 ap->link_time = ap->cur_time;
3281                 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
3282                 tw32_f(MAC_MODE, tp->mac_mode);
3283                 udelay(40);
3284
3285                 ap->state = ANEG_STATE_IDLE_DETECT;
3286                 ret = ANEG_TIMER_ENAB;
3287                 break;
3288
3289         case ANEG_STATE_IDLE_DETECT:
3290                 if (ap->ability_match != 0 &&
3291                     ap->rxconfig == 0) {
3292                         ap->state = ANEG_STATE_AN_ENABLE;
3293                         break;
3294                 }
3295                 delta = ap->cur_time - ap->link_time;
3296                 if (delta > ANEG_STATE_SETTLE_TIME) {
3297                         /* XXX another gem from the Broadcom driver :( */
3298                         ap->state = ANEG_STATE_LINK_OK;
3299                 }
3300                 break;
3301
3302         case ANEG_STATE_LINK_OK:
3303                 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
3304                 ret = ANEG_DONE;
3305                 break;
3306
3307         case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
3308                 /* ??? unimplemented */
3309                 break;
3310
3311         case ANEG_STATE_NEXT_PAGE_WAIT:
3312                 /* ??? unimplemented */
3313                 break;
3314
3315         default:
3316                 ret = ANEG_FAILED;
3317                 break;
3318         }
3319
3320         return ret;
3321 }
3322
3323 static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags)
3324 {
3325         int res = 0;
3326         struct tg3_fiber_aneginfo aninfo;
3327         int status = ANEG_FAILED;
3328         unsigned int tick;
3329         u32 tmp;
3330
3331         tw32_f(MAC_TX_AUTO_NEG, 0);
3332
3333         tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
3334         tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
3335         udelay(40);
3336
3337         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
3338         udelay(40);
3339
3340         memset(&aninfo, 0, sizeof(aninfo));
3341         aninfo.flags |= MR_AN_ENABLE;
3342         aninfo.state = ANEG_STATE_UNKNOWN;
3343         aninfo.cur_time = 0;
3344         tick = 0;
3345         while (++tick < 195000) {
3346                 status = tg3_fiber_aneg_smachine(tp, &aninfo);
3347                 if (status == ANEG_DONE || status == ANEG_FAILED)
3348                         break;
3349
3350                 udelay(1);
3351         }
3352
3353         tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
3354         tw32_f(MAC_MODE, tp->mac_mode);
3355         udelay(40);
3356
3357         *txflags = aninfo.txconfig;
3358         *rxflags = aninfo.flags;
3359
3360         if (status == ANEG_DONE &&
3361             (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
3362                              MR_LP_ADV_FULL_DUPLEX)))
3363                 res = 1;
3364
3365         return res;
3366 }
3367
3368 static void tg3_init_bcm8002(struct tg3 *tp)
3369 {
3370         u32 mac_status = tr32(MAC_STATUS);
3371         int i;
3372
3373         /* Reset when initting first time or we have a link. */
3374         if ((tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) &&
3375             !(mac_status & MAC_STATUS_PCS_SYNCED))
3376                 return;
3377
3378         /* Set PLL lock range. */
3379         tg3_writephy(tp, 0x16, 0x8007);
3380
3381         /* SW reset */
3382         tg3_writephy(tp, MII_BMCR, BMCR_RESET);
3383
3384         /* Wait for reset to complete. */
3385         /* XXX schedule_timeout() ... */
3386         for (i = 0; i < 500; i++)
3387                 udelay(10);
3388
3389         /* Config mode; select PMA/Ch 1 regs. */
3390         tg3_writephy(tp, 0x10, 0x8411);
3391
3392         /* Enable auto-lock and comdet, select txclk for tx. */
3393         tg3_writephy(tp, 0x11, 0x0a10);
3394
3395         tg3_writephy(tp, 0x18, 0x00a0);
3396         tg3_writephy(tp, 0x16, 0x41ff);
3397
3398         /* Assert and deassert POR. */
3399         tg3_writephy(tp, 0x13, 0x0400);
3400         udelay(40);
3401         tg3_writephy(tp, 0x13, 0x0000);
3402
3403         tg3_writephy(tp, 0x11, 0x0a50);
3404         udelay(40);
3405         tg3_writephy(tp, 0x11, 0x0a10);
3406
3407         /* Wait for signal to stabilize */
3408         /* XXX schedule_timeout() ... */
3409         for (i = 0; i < 15000; i++)
3410                 udelay(10);
3411
3412         /* Deselect the channel register so we can read the PHYID
3413          * later.
3414          */
3415         tg3_writephy(tp, 0x10, 0x8011);
3416 }
3417
3418 static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
3419 {
3420         u16 flowctrl;
3421         u32 sg_dig_ctrl, sg_dig_status;
3422         u32 serdes_cfg, expected_sg_dig_ctrl;
3423         int workaround, port_a;
3424         int current_link_up;
3425
3426         serdes_cfg = 0;
3427         expected_sg_dig_ctrl = 0;
3428         workaround = 0;
3429         port_a = 1;
3430         current_link_up = 0;
3431
3432         if (tp->pci_chip_rev_id != CHIPREV_ID_5704_A0 &&
3433             tp->pci_chip_rev_id != CHIPREV_ID_5704_A1) {
3434                 workaround = 1;
3435                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
3436                         port_a = 0;
3437
3438                 /* preserve bits 0-11,13,14 for signal pre-emphasis */
3439                 /* preserve bits 20-23 for voltage regulator */
3440                 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
3441         }
3442
3443         sg_dig_ctrl = tr32(SG_DIG_CTRL);
3444
3445         if (tp->link_config.autoneg != AUTONEG_ENABLE) {
3446                 if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) {
3447                         if (workaround) {
3448                                 u32 val = serdes_cfg;
3449
3450                                 if (port_a)
3451                                         val |= 0xc010000;
3452                                 else
3453                                         val |= 0x4010000;
3454                                 tw32_f(MAC_SERDES_CFG, val);
3455                         }
3456
3457                         tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
3458                 }
3459                 if (mac_status & MAC_STATUS_PCS_SYNCED) {
3460                         tg3_setup_flow_control(tp, 0, 0);
3461                         current_link_up = 1;
3462                 }
3463                 goto out;
3464         }
3465
3466         /* Want auto-negotiation.  */
3467         expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP;
3468
3469         flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
3470         if (flowctrl & ADVERTISE_1000XPAUSE)
3471                 expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP;
3472         if (flowctrl & ADVERTISE_1000XPSE_ASYM)
3473                 expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE;
3474
3475         if (sg_dig_ctrl != expected_sg_dig_ctrl) {
3476                 if ((tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT) &&
3477                     tp->serdes_counter &&
3478                     ((mac_status & (MAC_STATUS_PCS_SYNCED |
3479                                     MAC_STATUS_RCVD_CFG)) ==
3480                      MAC_STATUS_PCS_SYNCED)) {
3481                         tp->serdes_counter--;
3482                         current_link_up = 1;
3483                         goto out;
3484                 }
3485 restart_autoneg:
3486                 if (workaround)
3487                         tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
3488                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET);
3489                 udelay(5);
3490                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
3491
3492                 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
3493                 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3494         } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
3495                                  MAC_STATUS_SIGNAL_DET)) {
3496                 sg_dig_status = tr32(SG_DIG_STATUS);
3497                 mac_status = tr32(MAC_STATUS);
3498
3499                 if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) &&
3500                     (mac_status & MAC_STATUS_PCS_SYNCED)) {
3501                         u32 local_adv = 0, remote_adv = 0;
3502
3503                         if (sg_dig_ctrl & SG_DIG_PAUSE_CAP)
3504                                 local_adv |= ADVERTISE_1000XPAUSE;
3505                         if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE)
3506                                 local_adv |= ADVERTISE_1000XPSE_ASYM;
3507
3508                         if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE)
3509                                 remote_adv |= LPA_1000XPAUSE;
3510                         if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE)
3511                                 remote_adv |= LPA_1000XPAUSE_ASYM;
3512
3513                         tg3_setup_flow_control(tp, local_adv, remote_adv);
3514                         current_link_up = 1;
3515                         tp->serdes_counter = 0;
3516                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3517                 } else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) {
3518                         if (tp->serdes_counter)
3519                                 tp->serdes_counter--;
3520                         else {
3521                                 if (workaround) {
3522                                         u32 val = serdes_cfg;
3523
3524                                         if (port_a)
3525                                                 val |= 0xc010000;
3526                                         else
3527                                                 val |= 0x4010000;
3528
3529                                         tw32_f(MAC_SERDES_CFG, val);
3530                                 }
3531
3532                                 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
3533                                 udelay(40);
3534
3535                                 /* Link parallel detection - link is up */
3536                                 /* only if we have PCS_SYNC and not */
3537                                 /* receiving config code words */
3538                                 mac_status = tr32(MAC_STATUS);
3539                                 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
3540                                     !(mac_status & MAC_STATUS_RCVD_CFG)) {
3541                                         tg3_setup_flow_control(tp, 0, 0);
3542                                         current_link_up = 1;
3543                                         tp->tg3_flags2 |=
3544                                                 TG3_FLG2_PARALLEL_DETECT;
3545                                         tp->serdes_counter =
3546                                                 SERDES_PARALLEL_DET_TIMEOUT;
3547                                 } else
3548                                         goto restart_autoneg;
3549                         }
3550                 }
3551         } else {
3552                 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
3553                 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3554         }
3555
3556 out:
3557         return current_link_up;
3558 }
3559
3560 static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
3561 {
3562         int current_link_up = 0;
3563
3564         if (!(mac_status & MAC_STATUS_PCS_SYNCED))
3565                 goto out;
3566
3567         if (tp->link_config.autoneg == AUTONEG_ENABLE) {
3568                 u32 txflags, rxflags;
3569                 int i;
3570
3571                 if (fiber_autoneg(tp, &txflags, &rxflags)) {
3572                         u32 local_adv = 0, remote_adv = 0;
3573
3574                         if (txflags & ANEG_CFG_PS1)
3575                                 local_adv |= ADVERTISE_1000XPAUSE;
3576                         if (txflags & ANEG_CFG_PS2)
3577                                 local_adv |= ADVERTISE_1000XPSE_ASYM;
3578
3579                         if (rxflags & MR_LP_ADV_SYM_PAUSE)
3580                                 remote_adv |= LPA_1000XPAUSE;
3581                         if (rxflags & MR_LP_ADV_ASYM_PAUSE)
3582                                 remote_adv |= LPA_1000XPAUSE_ASYM;
3583
3584                         tg3_setup_flow_control(tp, local_adv, remote_adv);
3585
3586                         current_link_up = 1;
3587                 }
3588                 for (i = 0; i < 30; i++) {
3589                         udelay(20);
3590                         tw32_f(MAC_STATUS,
3591                                (MAC_STATUS_SYNC_CHANGED |
3592                                 MAC_STATUS_CFG_CHANGED));
3593                         udelay(40);
3594                         if ((tr32(MAC_STATUS) &
3595                              (MAC_STATUS_SYNC_CHANGED |
3596                               MAC_STATUS_CFG_CHANGED)) == 0)
3597                                 break;
3598                 }
3599
3600                 mac_status = tr32(MAC_STATUS);
3601                 if (current_link_up == 0 &&
3602                     (mac_status & MAC_STATUS_PCS_SYNCED) &&
3603                     !(mac_status & MAC_STATUS_RCVD_CFG))
3604                         current_link_up = 1;
3605         } else {
3606                 tg3_setup_flow_control(tp, 0, 0);
3607
3608                 /* Forcing 1000FD link up. */
3609                 current_link_up = 1;
3610
3611                 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
3612                 udelay(40);
3613
3614                 tw32_f(MAC_MODE, tp->mac_mode);
3615                 udelay(40);
3616         }
3617
3618 out:
3619         return current_link_up;
3620 }
3621
3622 static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
3623 {
3624         u32 orig_pause_cfg;
3625         u16 orig_active_speed;
3626         u8 orig_active_duplex;
3627         u32 mac_status;
3628         int current_link_up;
3629         int i;
3630
3631         orig_pause_cfg = tp->link_config.active_flowctrl;
3632         orig_active_speed = tp->link_config.active_speed;
3633         orig_active_duplex = tp->link_config.active_duplex;
3634
3635         if (!(tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG) &&
3636             netif_carrier_ok(tp->dev) &&
3637             (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE)) {
3638                 mac_status = tr32(MAC_STATUS);
3639                 mac_status &= (MAC_STATUS_PCS_SYNCED |
3640                                MAC_STATUS_SIGNAL_DET |
3641                                MAC_STATUS_CFG_CHANGED |
3642                                MAC_STATUS_RCVD_CFG);
3643                 if (mac_status == (MAC_STATUS_PCS_SYNCED |
3644                                    MAC_STATUS_SIGNAL_DET)) {
3645                         tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
3646                                             MAC_STATUS_CFG_CHANGED));
3647                         return 0;
3648                 }
3649         }
3650
3651         tw32_f(MAC_TX_AUTO_NEG, 0);
3652
3653         tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
3654         tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
3655         tw32_f(MAC_MODE, tp->mac_mode);
3656         udelay(40);
3657
3658         if (tp->phy_id == PHY_ID_BCM8002)
3659                 tg3_init_bcm8002(tp);
3660
3661         /* Enable link change event even when serdes polling.  */
3662         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
3663         udelay(40);
3664
3665         current_link_up = 0;
3666         mac_status = tr32(MAC_STATUS);
3667
3668         if (tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG)
3669                 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
3670         else
3671                 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
3672
3673         tp->hw_status->status =
3674                 (SD_STATUS_UPDATED |
3675                  (tp->hw_status->status & ~SD_STATUS_LINK_CHG));
3676
3677         for (i = 0; i < 100; i++) {
3678                 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
3679                                     MAC_STATUS_CFG_CHANGED));
3680                 udelay(5);
3681                 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
3682                                          MAC_STATUS_CFG_CHANGED |
3683                                          MAC_STATUS_LNKSTATE_CHANGED)) == 0)
3684                         break;
3685         }
3686
3687         mac_status = tr32(MAC_STATUS);
3688         if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
3689                 current_link_up = 0;
3690                 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
3691                     tp->serdes_counter == 0) {
3692                         tw32_f(MAC_MODE, (tp->mac_mode |
3693                                           MAC_MODE_SEND_CONFIGS));
3694                         udelay(1);
3695                         tw32_f(MAC_MODE, tp->mac_mode);
3696                 }
3697         }
3698
3699         if (current_link_up == 1) {
3700                 tp->link_config.active_speed = SPEED_1000;
3701                 tp->link_config.active_duplex = DUPLEX_FULL;
3702                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
3703                                     LED_CTRL_LNKLED_OVERRIDE |
3704                                     LED_CTRL_1000MBPS_ON));
3705         } else {
3706                 tp->link_config.active_speed = SPEED_INVALID;
3707                 tp->link_config.active_duplex = DUPLEX_INVALID;
3708                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
3709                                     LED_CTRL_LNKLED_OVERRIDE |
3710                                     LED_CTRL_TRAFFIC_OVERRIDE));
3711         }
3712
3713         if (current_link_up != netif_carrier_ok(tp->dev)) {
3714                 if (current_link_up)
3715                         netif_carrier_on(tp->dev);
3716                 else
3717                         netif_carrier_off(tp->dev);
3718                 tg3_link_report(tp);
3719         } else {
3720                 u32 now_pause_cfg = tp->link_config.active_flowctrl;
3721                 if (orig_pause_cfg != now_pause_cfg ||
3722                     orig_active_speed != tp->link_config.active_speed ||
3723                     orig_active_duplex != tp->link_config.active_duplex)
3724                         tg3_link_report(tp);
3725         }
3726
3727         return 0;
3728 }
3729
3730 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
3731 {
3732         int current_link_up, err = 0;
3733         u32 bmsr, bmcr;
3734         u16 current_speed;
3735         u8 current_duplex;
3736         u32 local_adv, remote_adv;
3737
3738         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
3739         tw32_f(MAC_MODE, tp->mac_mode);
3740         udelay(40);
3741
3742         tw32(MAC_EVENT, 0);
3743
3744         tw32_f(MAC_STATUS,
3745              (MAC_STATUS_SYNC_CHANGED |
3746               MAC_STATUS_CFG_CHANGED |
3747               MAC_STATUS_MI_COMPLETION |
3748               MAC_STATUS_LNKSTATE_CHANGED));
3749         udelay(40);
3750
3751         if (force_reset)
3752                 tg3_phy_reset(tp);
3753
3754         current_link_up = 0;
3755         current_speed = SPEED_INVALID;
3756         current_duplex = DUPLEX_INVALID;
3757
3758         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
3759         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
3760         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
3761                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
3762                         bmsr |= BMSR_LSTATUS;
3763                 else
3764                         bmsr &= ~BMSR_LSTATUS;
3765         }
3766
3767         err |= tg3_readphy(tp, MII_BMCR, &bmcr);
3768
3769         if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
3770             (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT)) {
3771                 /* do nothing, just check for link up at the end */
3772         } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
3773                 u32 adv, new_adv;
3774
3775                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
3776                 new_adv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
3777                                   ADVERTISE_1000XPAUSE |
3778                                   ADVERTISE_1000XPSE_ASYM |
3779                                   ADVERTISE_SLCT);
3780
3781                 new_adv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
3782
3783                 if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
3784                         new_adv |= ADVERTISE_1000XHALF;
3785                 if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
3786                         new_adv |= ADVERTISE_1000XFULL;
3787
3788                 if ((new_adv != adv) || !(bmcr & BMCR_ANENABLE)) {
3789                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
3790                         bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
3791                         tg3_writephy(tp, MII_BMCR, bmcr);
3792
3793                         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
3794                         tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
3795                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3796
3797                         return err;
3798                 }
3799         } else {
3800                 u32 new_bmcr;
3801
3802                 bmcr &= ~BMCR_SPEED1000;
3803                 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
3804
3805                 if (tp->link_config.duplex == DUPLEX_FULL)
3806                         new_bmcr |= BMCR_FULLDPLX;
3807
3808                 if (new_bmcr != bmcr) {
3809                         /* BMCR_SPEED1000 is a reserved bit that needs
3810                          * to be set on write.
3811                          */
3812                         new_bmcr |= BMCR_SPEED1000;
3813
3814                         /* Force a linkdown */
3815                         if (netif_carrier_ok(tp->dev)) {
3816                                 u32 adv;
3817
3818                                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
3819                                 adv &= ~(ADVERTISE_1000XFULL |
3820                                          ADVERTISE_1000XHALF |
3821                                          ADVERTISE_SLCT);
3822                                 tg3_writephy(tp, MII_ADVERTISE, adv);
3823                                 tg3_writephy(tp, MII_BMCR, bmcr |
3824                                                            BMCR_ANRESTART |
3825                                                            BMCR_ANENABLE);
3826                                 udelay(10);
3827                                 netif_carrier_off(tp->dev);
3828                         }
3829                         tg3_writephy(tp, MII_BMCR, new_bmcr);
3830                         bmcr = new_bmcr;
3831                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
3832                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
3833                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
3834                             ASIC_REV_5714) {
3835                                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
3836                                         bmsr |= BMSR_LSTATUS;
3837                                 else
3838                                         bmsr &= ~BMSR_LSTATUS;
3839                         }
3840                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3841                 }
3842         }
3843
3844         if (bmsr & BMSR_LSTATUS) {
3845                 current_speed = SPEED_1000;
3846                 current_link_up = 1;
3847                 if (bmcr & BMCR_FULLDPLX)
3848                         current_duplex = DUPLEX_FULL;
3849                 else
3850                         current_duplex = DUPLEX_HALF;
3851
3852                 local_adv = 0;
3853                 remote_adv = 0;
3854
3855                 if (bmcr & BMCR_ANENABLE) {
3856                         u32 common;
3857
3858                         err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
3859                         err |= tg3_readphy(tp, MII_LPA, &remote_adv);
3860                         common = local_adv & remote_adv;
3861                         if (common & (ADVERTISE_1000XHALF |
3862                                       ADVERTISE_1000XFULL)) {
3863                                 if (common & ADVERTISE_1000XFULL)
3864                                         current_duplex = DUPLEX_FULL;
3865                                 else
3866                                         current_duplex = DUPLEX_HALF;
3867                         }
3868                         else
3869                                 current_link_up = 0;
3870                 }
3871         }
3872
3873         if (current_link_up == 1 && current_duplex == DUPLEX_FULL)
3874                 tg3_setup_flow_control(tp, local_adv, remote_adv);
3875
3876         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
3877         if (tp->link_config.active_duplex == DUPLEX_HALF)
3878                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
3879
3880         tw32_f(MAC_MODE, tp->mac_mode);
3881         udelay(40);
3882
3883         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
3884
3885         tp->link_config.active_speed = current_speed;
3886         tp->link_config.active_duplex = current_duplex;
3887
3888         if (current_link_up != netif_carrier_ok(tp->dev)) {
3889                 if (current_link_up)
3890                         netif_carrier_on(tp->dev);
3891                 else {
3892                         netif_carrier_off(tp->dev);
3893                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3894                 }
3895                 tg3_link_report(tp);
3896         }
3897         return err;
3898 }
3899
3900 static void tg3_serdes_parallel_detect(struct tg3 *tp)
3901 {
3902         if (tp->serdes_counter) {
3903                 /* Give autoneg time to complete. */
3904                 tp->serdes_counter--;
3905                 return;
3906         }
3907         if (!netif_carrier_ok(tp->dev) &&
3908             (tp->link_config.autoneg == AUTONEG_ENABLE)) {
3909                 u32 bmcr;
3910
3911                 tg3_readphy(tp, MII_BMCR, &bmcr);
3912                 if (bmcr & BMCR_ANENABLE) {
3913                         u32 phy1, phy2;
3914
3915                         /* Select shadow register 0x1f */
3916                         tg3_writephy(tp, 0x1c, 0x7c00);
3917                         tg3_readphy(tp, 0x1c, &phy1);
3918
3919                         /* Select expansion interrupt status register */
3920                         tg3_writephy(tp, 0x17, 0x0f01);
3921                         tg3_readphy(tp, 0x15, &phy2);
3922                         tg3_readphy(tp, 0x15, &phy2);
3923
3924                         if ((phy1 & 0x10) && !(phy2 & 0x20)) {
3925                                 /* We have signal detect and not receiving
3926                                  * config code words, link is up by parallel
3927                                  * detection.
3928                                  */
3929
3930                                 bmcr &= ~BMCR_ANENABLE;
3931                                 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
3932                                 tg3_writephy(tp, MII_BMCR, bmcr);
3933                                 tp->tg3_flags2 |= TG3_FLG2_PARALLEL_DETECT;
3934                         }
3935                 }
3936         }
3937         else if (netif_carrier_ok(tp->dev) &&
3938                  (tp->link_config.autoneg == AUTONEG_ENABLE) &&
3939                  (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT)) {
3940                 u32 phy2;
3941
3942                 /* Select expansion interrupt status register */
3943                 tg3_writephy(tp, 0x17, 0x0f01);
3944                 tg3_readphy(tp, 0x15, &phy2);
3945                 if (phy2 & 0x20) {
3946                         u32 bmcr;
3947
3948                         /* Config code words received, turn on autoneg. */
3949                         tg3_readphy(tp, MII_BMCR, &bmcr);
3950                         tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
3951
3952                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3953
3954                 }
3955         }
3956 }
3957
3958 static int tg3_setup_phy(struct tg3 *tp, int force_reset)
3959 {
3960         int err;
3961
3962         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
3963                 err = tg3_setup_fiber_phy(tp, force_reset);
3964         } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
3965                 err = tg3_setup_fiber_mii_phy(tp, force_reset);
3966         } else {
3967                 err = tg3_setup_copper_phy(tp, force_reset);
3968         }
3969
3970         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
3971                 u32 val, scale;
3972
3973                 val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
3974                 if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5)
3975                         scale = 65;
3976                 else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25)
3977                         scale = 6;
3978                 else
3979                         scale = 12;
3980
3981                 val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK;
3982                 val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT);
3983                 tw32(GRC_MISC_CFG, val);
3984         }
3985
3986         if (tp->link_config.active_speed == SPEED_1000 &&
3987             tp->link_config.active_duplex == DUPLEX_HALF)
3988                 tw32(MAC_TX_LENGTHS,
3989                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
3990                       (6 << TX_LENGTHS_IPG_SHIFT) |
3991                       (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
3992         else
3993                 tw32(MAC_TX_LENGTHS,
3994                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
3995                       (6 << TX_LENGTHS_IPG_SHIFT) |
3996                       (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
3997
3998         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
3999                 if (netif_carrier_ok(tp->dev)) {
4000                         tw32(HOSTCC_STAT_COAL_TICKS,
4001                              tp->coal.stats_block_coalesce_usecs);
4002                 } else {
4003                         tw32(HOSTCC_STAT_COAL_TICKS, 0);
4004                 }
4005         }
4006
4007         if (tp->tg3_flags & TG3_FLAG_ASPM_WORKAROUND) {
4008                 u32 val = tr32(PCIE_PWR_MGMT_THRESH);
4009                 if (!netif_carrier_ok(tp->dev))
4010                         val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
4011                               tp->pwrmgmt_thresh;
4012                 else
4013                         val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
4014                 tw32(PCIE_PWR_MGMT_THRESH, val);
4015         }
4016
4017         return err;
4018 }
4019
4020 /* This is called whenever we suspect that the system chipset is re-
4021  * ordering the sequence of MMIO to the tx send mailbox. The symptom
4022  * is bogus tx completions. We try to recover by setting the
4023  * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
4024  * in the workqueue.
4025  */
4026 static void tg3_tx_recover(struct tg3 *tp)
4027 {
4028         BUG_ON((tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) ||
4029                tp->write32_tx_mbox == tg3_write_indirect_mbox);
4030
4031         printk(KERN_WARNING PFX "%s: The system may be re-ordering memory-"
4032                "mapped I/O cycles to the network device, attempting to "
4033                "recover. Please report the problem to the driver maintainer "
4034                "and include system chipset information.\n", tp->dev->name);
4035
4036         spin_lock(&tp->lock);
4037         tp->tg3_flags |= TG3_FLAG_TX_RECOVERY_PENDING;
4038         spin_unlock(&tp->lock);
4039 }
4040
4041 static inline u32 tg3_tx_avail(struct tg3 *tp)
4042 {
4043         smp_mb();
4044         return (tp->tx_pending -
4045                 ((tp->tx_prod - tp->tx_cons) & (TG3_TX_RING_SIZE - 1)));
4046 }
4047
4048 /* Tigon3 never reports partial packet sends.  So we do not
4049  * need special logic to handle SKBs that have not had all
4050  * of their frags sent yet, like SunGEM does.
4051  */
4052 static void tg3_tx(struct tg3 *tp)
4053 {
4054         u32 hw_idx = tp->hw_status->idx[0].tx_consumer;
4055         u32 sw_idx = tp->tx_cons;
4056
4057         while (sw_idx != hw_idx) {
4058                 struct tx_ring_info *ri = &tp->tx_buffers[sw_idx];
4059                 struct sk_buff *skb = ri->skb;
4060                 int i, tx_bug = 0;
4061
4062                 if (unlikely(skb == NULL)) {
4063                         tg3_tx_recover(tp);
4064                         return;
4065                 }
4066
4067                 skb_dma_unmap(&tp->pdev->dev, skb, DMA_TO_DEVICE);
4068
4069                 ri->skb = NULL;
4070
4071                 sw_idx = NEXT_TX(sw_idx);
4072
4073                 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
4074                         ri = &tp->tx_buffers[sw_idx];
4075                         if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
4076                                 tx_bug = 1;
4077                         sw_idx = NEXT_TX(sw_idx);
4078                 }
4079
4080                 dev_kfree_skb(skb);
4081
4082                 if (unlikely(tx_bug)) {
4083                         tg3_tx_recover(tp);
4084                         return;
4085                 }
4086         }
4087
4088         tp->tx_cons = sw_idx;
4089
4090         /* Need to make the tx_cons update visible to tg3_start_xmit()
4091          * before checking for netif_queue_stopped().  Without the
4092          * memory barrier, there is a small possibility that tg3_start_xmit()
4093          * will miss it and cause the queue to be stopped forever.
4094          */
4095         smp_mb();
4096
4097         if (unlikely(netif_queue_stopped(tp->dev) &&
4098                      (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp)))) {
4099                 netif_tx_lock(tp->dev);
4100                 if (netif_queue_stopped(tp->dev) &&
4101                     (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp)))
4102                         netif_wake_queue(tp->dev);
4103                 netif_tx_unlock(tp->dev);
4104         }
4105 }
4106
4107 /* Returns size of skb allocated or < 0 on error.
4108  *
4109  * We only need to fill in the address because the other members
4110  * of the RX descriptor are invariant, see tg3_init_rings.
4111  *
4112  * Note the purposeful assymetry of cpu vs. chip accesses.  For
4113  * posting buffers we only dirty the first cache line of the RX
4114  * descriptor (containing the address).  Whereas for the RX status
4115  * buffers the cpu only reads the last cacheline of the RX descriptor
4116  * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
4117  */
4118 static int tg3_alloc_rx_skb(struct tg3 *tp, u32 opaque_key,
4119                             int src_idx, u32 dest_idx_unmasked)
4120 {
4121         struct tg3_rx_buffer_desc *desc;
4122         struct ring_info *map, *src_map;
4123         struct sk_buff *skb;
4124         dma_addr_t mapping;
4125         int skb_size, dest_idx;
4126
4127         src_map = NULL;
4128         switch (opaque_key) {
4129         case RXD_OPAQUE_RING_STD:
4130                 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
4131                 desc = &tp->rx_std[dest_idx];
4132                 map = &tp->rx_std_buffers[dest_idx];
4133                 if (src_idx >= 0)
4134                         src_map = &tp->rx_std_buffers[src_idx];
4135                 skb_size = tp->rx_pkt_buf_sz;
4136                 break;
4137
4138         case RXD_OPAQUE_RING_JUMBO:
4139                 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
4140                 desc = &tp->rx_jumbo[dest_idx];
4141                 map = &tp->rx_jumbo_buffers[dest_idx];
4142                 if (src_idx >= 0)
4143                         src_map = &tp->rx_jumbo_buffers[src_idx];
4144                 skb_size = RX_JUMBO_PKT_BUF_SZ;
4145                 break;
4146
4147         default:
4148                 return -EINVAL;
4149         }
4150
4151         /* Do not overwrite any of the map or rp information
4152          * until we are sure we can commit to a new buffer.
4153          *
4154          * Callers depend upon this behavior and assume that
4155          * we leave everything unchanged if we fail.
4156          */
4157         skb = netdev_alloc_skb(tp->dev, skb_size);
4158         if (skb == NULL)
4159                 return -ENOMEM;
4160
4161         skb_reserve(skb, tp->rx_offset);
4162
4163         mapping = pci_map_single(tp->pdev, skb->data,
4164                                  skb_size - tp->rx_offset,
4165                                  PCI_DMA_FROMDEVICE);
4166
4167         map->skb = skb;
4168         pci_unmap_addr_set(map, mapping, mapping);
4169
4170         if (src_map != NULL)
4171                 src_map->skb = NULL;
4172
4173         desc->addr_hi = ((u64)mapping >> 32);
4174         desc->addr_lo = ((u64)mapping & 0xffffffff);
4175
4176         return skb_size;
4177 }
4178
4179 /* We only need to move over in the address because the other
4180  * members of the RX descriptor are invariant.  See notes above
4181  * tg3_alloc_rx_skb for full details.
4182  */
4183 static void tg3_recycle_rx(struct tg3 *tp, u32 opaque_key,
4184                            int src_idx, u32 dest_idx_unmasked)
4185 {
4186         struct tg3_rx_buffer_desc *src_desc, *dest_desc;
4187         struct ring_info *src_map, *dest_map;
4188         int dest_idx;
4189
4190         switch (opaque_key) {
4191         case RXD_OPAQUE_RING_STD:
4192                 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
4193                 dest_desc = &tp->rx_std[dest_idx];
4194                 dest_map = &tp->rx_std_buffers[dest_idx];
4195                 src_desc = &tp->rx_std[src_idx];
4196                 src_map = &tp->rx_std_buffers[src_idx];
4197                 break;
4198
4199         case RXD_OPAQUE_RING_JUMBO:
4200                 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
4201                 dest_desc = &tp->rx_jumbo[dest_idx];
4202                 dest_map = &tp->rx_jumbo_buffers[dest_idx];
4203                 src_desc = &tp->rx_jumbo[src_idx];
4204                 src_map = &tp->rx_jumbo_buffers[src_idx];
4205                 break;
4206
4207         default:
4208                 return;
4209         }
4210
4211         dest_map->skb = src_map->skb;
4212         pci_unmap_addr_set(dest_map, mapping,
4213                            pci_unmap_addr(src_map, mapping));
4214         dest_desc->addr_hi = src_desc->addr_hi;
4215         dest_desc->addr_lo = src_desc->addr_lo;
4216
4217         src_map->skb = NULL;
4218 }
4219
4220 #if TG3_VLAN_TAG_USED
4221 static int tg3_vlan_rx(struct tg3 *tp, struct sk_buff *skb, u16 vlan_tag)
4222 {
4223         return vlan_hwaccel_receive_skb(skb, tp->vlgrp, vlan_tag);
4224 }
4225 #endif
4226
4227 /* The RX ring scheme is composed of multiple rings which post fresh
4228  * buffers to the chip, and one special ring the chip uses to report
4229  * status back to the host.
4230  *
4231  * The special ring reports the status of received packets to the
4232  * host.  The chip does not write into the original descriptor the
4233  * RX buffer was obtained from.  The chip simply takes the original
4234  * descriptor as provided by the host, updates the status and length
4235  * field, then writes this into the next status ring entry.
4236  *
4237  * Each ring the host uses to post buffers to the chip is described
4238  * by a TG3_BDINFO entry in the chips SRAM area.  When a packet arrives,
4239  * it is first placed into the on-chip ram.  When the packet's length
4240  * is known, it walks down the TG3_BDINFO entries to select the ring.
4241  * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
4242  * which is within the range of the new packet's length is chosen.
4243  *
4244  * The "separate ring for rx status" scheme may sound queer, but it makes
4245  * sense from a cache coherency perspective.  If only the host writes
4246  * to the buffer post rings, and only the chip writes to the rx status
4247  * rings, then cache lines never move beyond shared-modified state.
4248  * If both the host and chip were to write into the same ring, cache line
4249  * eviction could occur since both entities want it in an exclusive state.
4250  */
4251 static int tg3_rx(struct tg3 *tp, int budget)
4252 {
4253         u32 work_mask, rx_std_posted = 0;
4254         u32 sw_idx = tp->rx_rcb_ptr;
4255         u16 hw_idx;
4256         int received;
4257
4258         hw_idx = tp->hw_status->idx[0].rx_producer;
4259         /*
4260          * We need to order the read of hw_idx and the read of
4261          * the opaque cookie.
4262          */
4263         rmb();
4264         work_mask = 0;
4265         received = 0;
4266         while (sw_idx != hw_idx && budget > 0) {
4267                 struct tg3_rx_buffer_desc *desc = &tp->rx_rcb[sw_idx];
4268                 unsigned int len;
4269                 struct sk_buff *skb;
4270                 dma_addr_t dma_addr;
4271                 u32 opaque_key, desc_idx, *post_ptr;
4272
4273                 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
4274                 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
4275                 if (opaque_key == RXD_OPAQUE_RING_STD) {
4276                         dma_addr = pci_unmap_addr(&tp->rx_std_buffers[desc_idx],
4277                                                   mapping);
4278                         skb = tp->rx_std_buffers[desc_idx].skb;
4279                         post_ptr = &tp->rx_std_ptr;
4280                         rx_std_posted++;
4281                 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
4282                         dma_addr = pci_unmap_addr(&tp->rx_jumbo_buffers[desc_idx],
4283                                                   mapping);
4284                         skb = tp->rx_jumbo_buffers[desc_idx].skb;
4285                         post_ptr = &tp->rx_jumbo_ptr;
4286                 }
4287                 else {
4288                         goto next_pkt_nopost;
4289                 }
4290
4291                 work_mask |= opaque_key;
4292
4293                 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
4294                     (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
4295                 drop_it:
4296                         tg3_recycle_rx(tp, opaque_key,
4297                                        desc_idx, *post_ptr);
4298                 drop_it_no_recycle:
4299                         /* Other statistics kept track of by card. */
4300                         tp->net_stats.rx_dropped++;
4301                         goto next_pkt;
4302                 }
4303
4304                 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) -
4305                       ETH_FCS_LEN;
4306
4307                 if (len > RX_COPY_THRESHOLD
4308                         && tp->rx_offset == NET_IP_ALIGN
4309                         /* rx_offset will likely not equal NET_IP_ALIGN
4310                          * if this is a 5701 card running in PCI-X mode
4311                          * [see tg3_get_invariants()]
4312                          */
4313                 ) {
4314                         int skb_size;
4315
4316                         skb_size = tg3_alloc_rx_skb(tp, opaque_key,
4317                                                     desc_idx, *post_ptr);
4318                         if (skb_size < 0)
4319                                 goto drop_it;
4320
4321                         pci_unmap_single(tp->pdev, dma_addr,
4322                                          skb_size - tp->rx_offset,
4323                                          PCI_DMA_FROMDEVICE);
4324
4325                         skb_put(skb, len);
4326                 } else {
4327                         struct sk_buff *copy_skb;
4328
4329                         tg3_recycle_rx(tp, opaque_key,
4330                                        desc_idx, *post_ptr);
4331
4332                         copy_skb = netdev_alloc_skb(tp->dev,
4333                                                     len + TG3_RAW_IP_ALIGN);
4334                         if (copy_skb == NULL)
4335                                 goto drop_it_no_recycle;
4336
4337                         skb_reserve(copy_skb, TG3_RAW_IP_ALIGN);
4338                         skb_put(copy_skb, len);
4339                         pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
4340                         skb_copy_from_linear_data(skb, copy_skb->data, len);
4341                         pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
4342
4343                         /* We'll reuse the original ring buffer. */
4344                         skb = copy_skb;
4345                 }
4346
4347                 if ((tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) &&
4348                     (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
4349                     (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
4350                       >> RXD_TCPCSUM_SHIFT) == 0xffff))
4351                         skb->ip_summed = CHECKSUM_UNNECESSARY;
4352                 else
4353                         skb->ip_summed = CHECKSUM_NONE;
4354
4355                 skb->protocol = eth_type_trans(skb, tp->dev);
4356 #if TG3_VLAN_TAG_USED
4357                 if (tp->vlgrp != NULL &&
4358                     desc->type_flags & RXD_FLAG_VLAN) {
4359                         tg3_vlan_rx(tp, skb,
4360                                     desc->err_vlan & RXD_VLAN_MASK);
4361                 } else
4362 #endif
4363                         netif_receive_skb(skb);
4364
4365                 received++;
4366                 budget--;
4367
4368 next_pkt:
4369                 (*post_ptr)++;
4370
4371                 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
4372                         u32 idx = *post_ptr % TG3_RX_RING_SIZE;
4373
4374                         tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX +
4375                                      TG3_64BIT_REG_LOW, idx);
4376                         work_mask &= ~RXD_OPAQUE_RING_STD;
4377                         rx_std_posted = 0;
4378                 }
4379 next_pkt_nopost:
4380                 sw_idx++;
4381                 sw_idx &= (TG3_RX_RCB_RING_SIZE(tp) - 1);
4382
4383                 /* Refresh hw_idx to see if there is new work */
4384                 if (sw_idx == hw_idx) {
4385                         hw_idx = tp->hw_status->idx[0].rx_producer;
4386                         rmb();
4387                 }
4388         }
4389
4390         /* ACK the status ring. */
4391         tp->rx_rcb_ptr = sw_idx;
4392         tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW, sw_idx);
4393
4394         /* Refill RX ring(s). */
4395         if (work_mask & RXD_OPAQUE_RING_STD) {
4396                 sw_idx = tp->rx_std_ptr % TG3_RX_RING_SIZE;
4397                 tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
4398                              sw_idx);
4399         }
4400         if (work_mask & RXD_OPAQUE_RING_JUMBO) {
4401                 sw_idx = tp->rx_jumbo_ptr % TG3_RX_JUMBO_RING_SIZE;
4402                 tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
4403                              sw_idx);
4404         }
4405         mmiowb();
4406
4407         return received;
4408 }
4409
4410 static int tg3_poll_work(struct tg3 *tp, int work_done, int budget)
4411 {
4412         struct tg3_hw_status *sblk = tp->hw_status;
4413
4414         /* handle link change and other phy events */
4415         if (!(tp->tg3_flags &
4416               (TG3_FLAG_USE_LINKCHG_REG |
4417                TG3_FLAG_POLL_SERDES))) {
4418                 if (sblk->status & SD_STATUS_LINK_CHG) {
4419                         sblk->status = SD_STATUS_UPDATED |
4420                                 (sblk->status & ~SD_STATUS_LINK_CHG);
4421                         spin_lock(&tp->lock);
4422                         if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
4423                                 tw32_f(MAC_STATUS,
4424                                      (MAC_STATUS_SYNC_CHANGED |
4425                                       MAC_STATUS_CFG_CHANGED |
4426                                       MAC_STATUS_MI_COMPLETION |
4427                                       MAC_STATUS_LNKSTATE_CHANGED));
4428                                 udelay(40);
4429                         } else
4430                                 tg3_setup_phy(tp, 0);
4431                         spin_unlock(&tp->lock);
4432                 }
4433         }
4434
4435         /* run TX completion thread */
4436         if (sblk->idx[0].tx_consumer != tp->tx_cons) {
4437                 tg3_tx(tp);
4438                 if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING))
4439                         return work_done;
4440         }
4441
4442         /* run RX thread, within the bounds set by NAPI.
4443          * All RX "locking" is done by ensuring outside
4444          * code synchronizes with tg3->napi.poll()
4445          */
4446         if (sblk->idx[0].rx_producer != tp->rx_rcb_ptr)
4447                 work_done += tg3_rx(tp, budget - work_done);
4448
4449         return work_done;
4450 }
4451
4452 static int tg3_poll(struct napi_struct *napi, int budget)
4453 {
4454         struct tg3 *tp = container_of(napi, struct tg3, napi);
4455         int work_done = 0;
4456         struct tg3_hw_status *sblk = tp->hw_status;
4457
4458         while (1) {
4459                 work_done = tg3_poll_work(tp, work_done, budget);
4460
4461                 if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING))
4462                         goto tx_recovery;
4463
4464                 if (unlikely(work_done >= budget))
4465                         break;
4466
4467                 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) {
4468                         /* tp->last_tag is used in tg3_restart_ints() below
4469                          * to tell the hw how much work has been processed,
4470                          * so we must read it before checking for more work.
4471                          */
4472                         tp->last_tag = sblk->status_tag;
4473                         rmb();
4474                 } else
4475                         sblk->status &= ~SD_STATUS_UPDATED;
4476
4477                 if (likely(!tg3_has_work(tp))) {
4478                         netif_rx_complete(tp->dev, napi);
4479                         tg3_restart_ints(tp);
4480                         break;
4481                 }
4482         }
4483
4484         return work_done;
4485
4486 tx_recovery:
4487         /* work_done is guaranteed to be less than budget. */
4488         netif_rx_complete(tp->dev, napi);
4489         schedule_work(&tp->reset_task);
4490         return work_done;
4491 }
4492
4493 static void tg3_irq_quiesce(struct tg3 *tp)
4494 {
4495         BUG_ON(tp->irq_sync);
4496
4497         tp->irq_sync = 1;
4498         smp_mb();
4499
4500         synchronize_irq(tp->pdev->irq);
4501 }
4502
4503 static inline int tg3_irq_sync(struct tg3 *tp)
4504 {
4505         return tp->irq_sync;
4506 }
4507
4508 /* Fully shutdown all tg3 driver activity elsewhere in the system.
4509  * If irq_sync is non-zero, then the IRQ handler must be synchronized
4510  * with as well.  Most of the time, this is not necessary except when
4511  * shutting down the device.
4512  */
4513 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
4514 {
4515         spin_lock_bh(&tp->lock);
4516         if (irq_sync)
4517                 tg3_irq_quiesce(tp);
4518 }
4519
4520 static inline void tg3_full_unlock(struct tg3 *tp)
4521 {
4522         spin_unlock_bh(&tp->lock);
4523 }
4524
4525 /* One-shot MSI handler - Chip automatically disables interrupt
4526  * after sending MSI so driver doesn't have to do it.
4527  */
4528 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
4529 {
4530         struct net_device *dev = dev_id;
4531         struct tg3 *tp = netdev_priv(dev);
4532
4533         prefetch(tp->hw_status);
4534         prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
4535
4536         if (likely(!tg3_irq_sync(tp)))
4537                 netif_rx_schedule(dev, &tp->napi);
4538
4539         return IRQ_HANDLED;
4540 }
4541
4542 /* MSI ISR - No need to check for interrupt sharing and no need to
4543  * flush status block and interrupt mailbox. PCI ordering rules
4544  * guarantee that MSI will arrive after the status block.
4545  */
4546 static irqreturn_t tg3_msi(int irq, void *dev_id)
4547 {
4548         struct net_device *dev = dev_id;
4549         struct tg3 *tp = netdev_priv(dev);
4550
4551         prefetch(tp->hw_status);
4552         prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
4553         /*
4554          * Writing any value to intr-mbox-0 clears PCI INTA# and
4555          * chip-internal interrupt pending events.
4556          * Writing non-zero to intr-mbox-0 additional tells the
4557          * NIC to stop sending us irqs, engaging "in-intr-handler"
4558          * event coalescing.
4559          */
4560         tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
4561         if (likely(!tg3_irq_sync(tp)))
4562                 netif_rx_schedule(dev, &tp->napi);
4563
4564         return IRQ_RETVAL(1);
4565 }
4566
4567 static irqreturn_t tg3_interrupt(int irq, void *dev_id)
4568 {
4569         struct net_device *dev = dev_id;
4570         struct tg3 *tp = netdev_priv(dev);
4571         struct tg3_hw_status *sblk = tp->hw_status;
4572         unsigned int handled = 1;
4573
4574         /* In INTx mode, it is possible for the interrupt to arrive at
4575          * the CPU before the status block posted prior to the interrupt.
4576          * Reading the PCI State register will confirm whether the
4577          * interrupt is ours and will flush the status block.
4578          */
4579         if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
4580                 if ((tp->tg3_flags & TG3_FLAG_CHIP_RESETTING) ||
4581                     (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
4582                         handled = 0;
4583                         goto out;
4584                 }
4585         }
4586
4587         /*
4588          * Writing any value to intr-mbox-0 clears PCI INTA# and
4589          * chip-internal interrupt pending events.
4590          * Writing non-zero to intr-mbox-0 additional tells the
4591          * NIC to stop sending us irqs, engaging "in-intr-handler"
4592          * event coalescing.
4593          *
4594          * Flush the mailbox to de-assert the IRQ immediately to prevent
4595          * spurious interrupts.  The flush impacts performance but
4596          * excessive spurious interrupts can be worse in some cases.
4597          */
4598         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
4599         if (tg3_irq_sync(tp))
4600                 goto out;
4601         sblk->status &= ~SD_STATUS_UPDATED;
4602         if (likely(tg3_has_work(tp))) {
4603                 prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
4604                 netif_rx_schedule(dev, &tp->napi);
4605         } else {
4606                 /* No work, shared interrupt perhaps?  re-enable
4607                  * interrupts, and flush that PCI write
4608                  */
4609                 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
4610                                0x00000000);
4611         }
4612 out:
4613         return IRQ_RETVAL(handled);
4614 }
4615
4616 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
4617 {
4618         struct net_device *dev = dev_id;
4619         struct tg3 *tp = netdev_priv(dev);
4620         struct tg3_hw_status *sblk = tp->hw_status;
4621         unsigned int handled = 1;
4622
4623         /* In INTx mode, it is possible for the interrupt to arrive at
4624          * the CPU before the status block posted prior to the interrupt.
4625          * Reading the PCI State register will confirm whether the
4626          * interrupt is ours and will flush the status block.
4627          */
4628         if (unlikely(sblk->status_tag == tp->last_tag)) {
4629                 if ((tp->tg3_flags & TG3_FLAG_CHIP_RESETTING) ||
4630                     (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
4631                         handled = 0;
4632                         goto out;
4633                 }
4634         }
4635
4636         /*
4637          * writing any value to intr-mbox-0 clears PCI INTA# and
4638          * chip-internal interrupt pending events.
4639          * writing non-zero to intr-mbox-0 additional tells the
4640          * NIC to stop sending us irqs, engaging "in-intr-handler"
4641          * event coalescing.
4642          *
4643          * Flush the mailbox to de-assert the IRQ immediately to prevent
4644          * spurious interrupts.  The flush impacts performance but
4645          * excessive spurious interrupts can be worse in some cases.
4646          */
4647         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
4648         if (tg3_irq_sync(tp))
4649                 goto out;
4650         if (netif_rx_schedule_prep(dev, &tp->napi)) {
4651                 prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
4652                 /* Update last_tag to mark that this status has been
4653                  * seen. Because interrupt may be shared, we may be
4654                  * racing with tg3_poll(), so only update last_tag
4655                  * if tg3_poll() is not scheduled.
4656                  */
4657                 tp->last_tag = sblk->status_tag;
4658                 __netif_rx_schedule(dev, &tp->napi);
4659         }
4660 out:
4661         return IRQ_RETVAL(handled);
4662 }
4663
4664 /* ISR for interrupt test */
4665 static irqreturn_t tg3_test_isr(int irq, void *dev_id)
4666 {
4667         struct net_device *dev = dev_id;
4668         struct tg3 *tp = netdev_priv(dev);
4669         struct tg3_hw_status *sblk = tp->hw_status;
4670
4671         if ((sblk->status & SD_STATUS_UPDATED) ||
4672             !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
4673                 tg3_disable_ints(tp);
4674                 return IRQ_RETVAL(1);
4675         }
4676         return IRQ_RETVAL(0);
4677 }
4678
4679 static int tg3_init_hw(struct tg3 *, int);
4680 static int tg3_halt(struct tg3 *, int, int);
4681
4682 /* Restart hardware after configuration changes, self-test, etc.
4683  * Invoked with tp->lock held.
4684  */
4685 static int tg3_restart_hw(struct tg3 *tp, int reset_phy)
4686         __releases(tp->lock)
4687         __acquires(tp->lock)
4688 {
4689         int err;
4690
4691         err = tg3_init_hw(tp, reset_phy);
4692         if (err) {
4693                 printk(KERN_ERR PFX "%s: Failed to re-initialize device, "
4694                        "aborting.\n", tp->dev->name);
4695                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
4696                 tg3_full_unlock(tp);
4697                 del_timer_sync(&tp->timer);
4698                 tp->irq_sync = 0;
4699                 napi_enable(&tp->napi);
4700                 dev_close(tp->dev);
4701                 tg3_full_lock(tp, 0);
4702         }
4703         return err;
4704 }
4705
4706 #ifdef CONFIG_NET_POLL_CONTROLLER
4707 static void tg3_poll_controller(struct net_device *dev)
4708 {
4709         struct tg3 *tp = netdev_priv(dev);
4710
4711         tg3_interrupt(tp->pdev->irq, dev);
4712 }
4713 #endif
4714
4715 static void tg3_reset_task(struct work_struct *work)
4716 {
4717         struct tg3 *tp = container_of(work, struct tg3, reset_task);
4718         int err;
4719         unsigned int restart_timer;
4720
4721         tg3_full_lock(tp, 0);
4722
4723         if (!netif_running(tp->dev)) {
4724                 tg3_full_unlock(tp);
4725                 return;
4726         }
4727
4728         tg3_full_unlock(tp);
4729
4730         tg3_phy_stop(tp);
4731
4732         tg3_netif_stop(tp);
4733
4734         tg3_full_lock(tp, 1);
4735
4736         restart_timer = tp->tg3_flags2 & TG3_FLG2_RESTART_TIMER;
4737         tp->tg3_flags2 &= ~TG3_FLG2_RESTART_TIMER;
4738
4739         if (tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING) {
4740                 tp->write32_tx_mbox = tg3_write32_tx_mbox;
4741                 tp->write32_rx_mbox = tg3_write_flush_reg32;
4742                 tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;
4743                 tp->tg3_flags &= ~TG3_FLAG_TX_RECOVERY_PENDING;
4744         }
4745
4746         tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
4747         err = tg3_init_hw(tp, 1);
4748         if (err)
4749                 goto out;
4750
4751         tg3_netif_start(tp);
4752
4753         if (restart_timer)
4754                 mod_timer(&tp->timer, jiffies + 1);
4755
4756 out:
4757         tg3_full_unlock(tp);
4758
4759         if (!err)
4760                 tg3_phy_start(tp);
4761 }
4762
4763 static void tg3_dump_short_state(struct tg3 *tp)
4764 {
4765         printk(KERN_ERR PFX "DEBUG: MAC_TX_STATUS[%08x] MAC_RX_STATUS[%08x]\n",
4766                tr32(MAC_TX_STATUS), tr32(MAC_RX_STATUS));
4767         printk(KERN_ERR PFX "DEBUG: RDMAC_STATUS[%08x] WDMAC_STATUS[%08x]\n",
4768                tr32(RDMAC_STATUS), tr32(WDMAC_STATUS));
4769 }
4770
4771 static void tg3_tx_timeout(struct net_device *dev)
4772 {
4773         struct tg3 *tp = netdev_priv(dev);
4774
4775         if (netif_msg_tx_err(tp)) {
4776                 printk(KERN_ERR PFX "%s: transmit timed out, resetting\n",
4777                        dev->name);
4778                 tg3_dump_short_state(tp);
4779         }
4780
4781         schedule_work(&tp->reset_task);
4782 }
4783
4784 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
4785 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
4786 {
4787         u32 base = (u32) mapping & 0xffffffff;
4788
4789         return ((base > 0xffffdcc0) &&
4790                 (base + len + 8 < base));
4791 }
4792
4793 /* Test for DMA addresses > 40-bit */
4794 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
4795                                           int len)
4796 {
4797 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
4798         if (tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG)
4799                 return (((u64) mapping + len) > DMA_40BIT_MASK);
4800         return 0;
4801 #else
4802         return 0;
4803 #endif
4804 }
4805
4806 static void tg3_set_txd(struct tg3 *, int, dma_addr_t, int, u32, u32);
4807
4808 /* Workaround 4GB and 40-bit hardware DMA bugs. */
4809 static int tigon3_dma_hwbug_workaround(struct tg3 *tp, struct sk_buff *skb,
4810                                        u32 last_plus_one, u32 *start,
4811                                        u32 base_flags, u32 mss)
4812 {
4813         struct sk_buff *new_skb;
4814         dma_addr_t new_addr = 0;
4815         u32 entry = *start;
4816         int i, ret = 0;
4817
4818         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
4819                 new_skb = skb_copy(skb, GFP_ATOMIC);
4820         else {
4821                 int more_headroom = 4 - ((unsigned long)skb->data & 3);
4822
4823                 new_skb = skb_copy_expand(skb,
4824                                           skb_headroom(skb) + more_headroom,
4825                                           skb_tailroom(skb), GFP_ATOMIC);
4826         }
4827
4828         if (!new_skb) {
4829                 ret = -1;
4830         } else {
4831                 /* New SKB is guaranteed to be linear. */
4832                 entry = *start;
4833                 ret = skb_dma_map(&tp->pdev->dev, new_skb, DMA_TO_DEVICE);
4834                 new_addr = skb_shinfo(new_skb)->dma_maps[0];
4835
4836                 /* Make sure new skb does not cross any 4G boundaries.
4837                  * Drop the packet if it does.
4838                  */
4839                 if (ret || tg3_4g_overflow_test(new_addr, new_skb->len)) {
4840                         if (!ret)
4841                                 skb_dma_unmap(&tp->pdev->dev, new_skb,
4842                                               DMA_TO_DEVICE);
4843                         ret = -1;
4844                         dev_kfree_skb(new_skb);
4845                         new_skb = NULL;
4846                 } else {
4847                         tg3_set_txd(tp, entry, new_addr, new_skb->len,
4848                                     base_flags, 1 | (mss << 1));
4849                         *start = NEXT_TX(entry);
4850                 }
4851         }
4852
4853         /* Now clean up the sw ring entries. */
4854         i = 0;
4855         while (entry != last_plus_one) {
4856                 if (i == 0) {
4857                         tp->tx_buffers[entry].skb = new_skb;
4858                 } else {
4859                         tp->tx_buffers[entry].skb = NULL;
4860                 }
4861                 entry = NEXT_TX(entry);
4862                 i++;
4863         }
4864
4865         skb_dma_unmap(&tp->pdev->dev, skb, DMA_TO_DEVICE);
4866         dev_kfree_skb(skb);
4867
4868         return ret;
4869 }
4870
4871 static void tg3_set_txd(struct tg3 *tp, int entry,
4872                         dma_addr_t mapping, int len, u32 flags,
4873                         u32 mss_and_is_end)
4874 {
4875         struct tg3_tx_buffer_desc *txd = &tp->tx_ring[entry];
4876         int is_end = (mss_and_is_end & 0x1);
4877         u32 mss = (mss_and_is_end >> 1);
4878         u32 vlan_tag = 0;
4879
4880         if (is_end)
4881                 flags |= TXD_FLAG_END;
4882         if (flags & TXD_FLAG_VLAN) {
4883                 vlan_tag = flags >> 16;
4884                 flags &= 0xffff;
4885         }
4886         vlan_tag |= (mss << TXD_MSS_SHIFT);
4887
4888         txd->addr_hi = ((u64) mapping >> 32);
4889         txd->addr_lo = ((u64) mapping & 0xffffffff);
4890         txd->len_flags = (len << TXD_LEN_SHIFT) | flags;
4891         txd->vlan_tag = vlan_tag << TXD_VLAN_TAG_SHIFT;
4892 }
4893
4894 /* hard_start_xmit for devices that don't have any bugs and
4895  * support TG3_FLG2_HW_TSO_2 only.
4896  */
4897 static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
4898 {
4899         struct tg3 *tp = netdev_priv(dev);
4900         u32 len, entry, base_flags, mss;
4901         struct skb_shared_info *sp;
4902         dma_addr_t mapping;
4903
4904         len = skb_headlen(skb);
4905
4906         /* We are running in BH disabled context with netif_tx_lock
4907          * and TX reclaim runs via tp->napi.poll inside of a software
4908          * interrupt.  Furthermore, IRQ processing runs lockless so we have
4909          * no IRQ context deadlocks to worry about either.  Rejoice!
4910          */
4911         if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->nr_frags + 1))) {
4912                 if (!netif_queue_stopped(dev)) {
4913                         netif_stop_queue(dev);
4914
4915                         /* This is a hard error, log it. */
4916                         printk(KERN_ERR PFX "%s: BUG! Tx Ring full when "
4917                                "queue awake!\n", dev->name);
4918                 }
4919                 return NETDEV_TX_BUSY;
4920         }
4921
4922         entry = tp->tx_prod;
4923         base_flags = 0;
4924         mss = 0;
4925         if ((mss = skb_shinfo(skb)->gso_size) != 0) {
4926                 int tcp_opt_len, ip_tcp_len;
4927
4928                 if (skb_header_cloned(skb) &&
4929                     pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
4930                         dev_kfree_skb(skb);
4931                         goto out_unlock;
4932                 }
4933
4934                 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
4935                         mss |= (skb_headlen(skb) - ETH_HLEN) << 9;
4936                 else {
4937                         struct iphdr *iph = ip_hdr(skb);
4938
4939                         tcp_opt_len = tcp_optlen(skb);
4940                         ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
4941
4942                         iph->check = 0;
4943                         iph->tot_len = htons(mss + ip_tcp_len + tcp_opt_len);
4944                         mss |= (ip_tcp_len + tcp_opt_len) << 9;
4945                 }
4946
4947                 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
4948                                TXD_FLAG_CPU_POST_DMA);
4949
4950                 tcp_hdr(skb)->check = 0;
4951
4952         }
4953         else if (skb->ip_summed == CHECKSUM_PARTIAL)
4954                 base_flags |= TXD_FLAG_TCPUDP_CSUM;
4955 #if TG3_VLAN_TAG_USED
4956         if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
4957                 base_flags |= (TXD_FLAG_VLAN |
4958                                (vlan_tx_tag_get(skb) << 16));
4959 #endif
4960
4961         if (skb_dma_map(&tp->pdev->dev, skb, DMA_TO_DEVICE)) {
4962                 dev_kfree_skb(skb);
4963                 goto out_unlock;
4964         }
4965
4966         sp = skb_shinfo(skb);
4967
4968         mapping = sp->dma_maps[0];
4969
4970         tp->tx_buffers[entry].skb = skb;
4971
4972         tg3_set_txd(tp, entry, mapping, len, base_flags,
4973                     (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
4974
4975         entry = NEXT_TX(entry);
4976
4977         /* Now loop through additional data fragments, and queue them. */
4978         if (skb_shinfo(skb)->nr_frags > 0) {
4979                 unsigned int i, last;
4980
4981                 last = skb_shinfo(skb)->nr_frags - 1;
4982                 for (i = 0; i <= last; i++) {
4983                         skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4984
4985                         len = frag->size;
4986                         mapping = sp->dma_maps[i + 1];
4987                         tp->tx_buffers[entry].skb = NULL;
4988
4989                         tg3_set_txd(tp, entry, mapping, len,
4990                                     base_flags, (i == last) | (mss << 1));
4991
4992                         entry = NEXT_TX(entry);
4993                 }
4994         }
4995
4996         /* Packets are ready, update Tx producer idx local and on card. */
4997         tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry);
4998
4999         tp->tx_prod = entry;
5000         if (unlikely(tg3_tx_avail(tp) <= (MAX_SKB_FRAGS + 1))) {
5001                 netif_stop_queue(dev);
5002                 if (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp))
5003                         netif_wake_queue(tp->dev);
5004         }
5005
5006 out_unlock:
5007         mmiowb();
5008
5009         dev->trans_start = jiffies;
5010
5011         return NETDEV_TX_OK;
5012 }
5013
5014 static int tg3_start_xmit_dma_bug(struct sk_buff *, struct net_device *);
5015
5016 /* Use GSO to workaround a rare TSO bug that may be triggered when the
5017  * TSO header is greater than 80 bytes.
5018  */
5019 static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
5020 {
5021         struct sk_buff *segs, *nskb;
5022
5023         /* Estimate the number of fragments in the worst case */
5024         if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->gso_segs * 3))) {
5025                 netif_stop_queue(tp->dev);
5026                 if (tg3_tx_avail(tp) <= (skb_shinfo(skb)->gso_segs * 3))
5027                         return NETDEV_TX_BUSY;
5028
5029                 netif_wake_queue(tp->dev);
5030         }
5031
5032         segs = skb_gso_segment(skb, tp->dev->features & ~NETIF_F_TSO);
5033         if (IS_ERR(segs))
5034                 goto tg3_tso_bug_end;
5035
5036         do {
5037                 nskb = segs;
5038                 segs = segs->next;
5039                 nskb->next = NULL;
5040                 tg3_start_xmit_dma_bug(nskb, tp->dev);
5041         } while (segs);
5042
5043 tg3_tso_bug_end:
5044         dev_kfree_skb(skb);
5045
5046         return NETDEV_TX_OK;
5047 }
5048
5049 /* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
5050  * support TG3_FLG2_HW_TSO_1 or firmware TSO only.
5051  */
5052 static int tg3_start_xmit_dma_bug(struct sk_buff *skb, struct net_device *dev)
5053 {
5054         struct tg3 *tp = netdev_priv(dev);
5055         u32 len, entry, base_flags, mss;
5056         struct skb_shared_info *sp;
5057         int would_hit_hwbug;
5058         dma_addr_t mapping;
5059
5060         len = skb_headlen(skb);
5061
5062         /* We are running in BH disabled context with netif_tx_lock
5063          * and TX reclaim runs via tp->napi.poll inside of a software
5064          * interrupt.  Furthermore, IRQ processing runs lockless so we have
5065          * no IRQ context deadlocks to worry about either.  Rejoice!
5066          */
5067         if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->nr_frags + 1))) {
5068                 if (!netif_queue_stopped(dev)) {
5069                         netif_stop_queue(dev);
5070
5071                         /* This is a hard error, log it. */
5072                         printk(KERN_ERR PFX "%s: BUG! Tx Ring full when "
5073                                "queue awake!\n", dev->name);
5074                 }
5075                 return NETDEV_TX_BUSY;
5076         }
5077
5078         entry = tp->tx_prod;
5079         base_flags = 0;
5080         if (skb->ip_summed == CHECKSUM_PARTIAL)
5081                 base_flags |= TXD_FLAG_TCPUDP_CSUM;
5082         mss = 0;
5083         if ((mss = skb_shinfo(skb)->gso_size) != 0) {
5084                 struct iphdr *iph;
5085                 int tcp_opt_len, ip_tcp_len, hdr_len;
5086
5087                 if (skb_header_cloned(skb) &&
5088                     pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
5089                         dev_kfree_skb(skb);
5090                         goto out_unlock;
5091                 }
5092
5093                 tcp_opt_len = tcp_optlen(skb);
5094                 ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
5095
5096                 hdr_len = ip_tcp_len + tcp_opt_len;
5097                 if (unlikely((ETH_HLEN + hdr_len) > 80) &&
5098                              (tp->tg3_flags2 & TG3_FLG2_TSO_BUG))
5099                         return (tg3_tso_bug(tp, skb));
5100
5101                 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
5102                                TXD_FLAG_CPU_POST_DMA);
5103
5104                 iph = ip_hdr(skb);
5105                 iph->check = 0;
5106                 iph->tot_len = htons(mss + hdr_len);
5107                 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
5108                         tcp_hdr(skb)->check = 0;
5109                         base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
5110                 } else
5111                         tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
5112                                                                  iph->daddr, 0,
5113                                                                  IPPROTO_TCP,
5114                                                                  0);
5115
5116                 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO) ||
5117                     (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)) {
5118                         if (tcp_opt_len || iph->ihl > 5) {
5119                                 int tsflags;
5120
5121                                 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
5122                                 mss |= (tsflags << 11);
5123                         }
5124                 } else {
5125                         if (tcp_opt_len || iph->ihl > 5) {
5126                                 int tsflags;
5127
5128                                 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
5129                                 base_flags |= tsflags << 12;
5130                         }
5131                 }
5132         }
5133 #if TG3_VLAN_TAG_USED
5134         if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
5135                 base_flags |= (TXD_FLAG_VLAN |
5136                                (vlan_tx_tag_get(skb) << 16));
5137 #endif
5138
5139         if (skb_dma_map(&tp->pdev->dev, skb, DMA_TO_DEVICE)) {
5140                 dev_kfree_skb(skb);
5141                 goto out_unlock;
5142         }
5143
5144         sp = skb_shinfo(skb);
5145
5146         mapping = sp->dma_maps[0];
5147
5148         tp->tx_buffers[entry].skb = skb;
5149
5150         would_hit_hwbug = 0;
5151
5152         if (tp->tg3_flags3 & TG3_FLG3_5701_DMA_BUG)
5153                 would_hit_hwbug = 1;
5154         else if (tg3_4g_overflow_test(mapping, len))
5155                 would_hit_hwbug = 1;
5156
5157         tg3_set_txd(tp, entry, mapping, len, base_flags,
5158                     (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
5159
5160         entry = NEXT_TX(entry);
5161
5162         /* Now loop through additional data fragments, and queue them. */
5163         if (skb_shinfo(skb)->nr_frags > 0) {
5164                 unsigned int i, last;
5165
5166                 last = skb_shinfo(skb)->nr_frags - 1;
5167                 for (i = 0; i <= last; i++) {
5168                         skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
5169
5170                         len = frag->size;
5171                         mapping = sp->dma_maps[i + 1];
5172
5173                         tp->tx_buffers[entry].skb = NULL;
5174
5175                         if (tg3_4g_overflow_test(mapping, len))
5176                                 would_hit_hwbug = 1;
5177
5178                         if (tg3_40bit_overflow_test(tp, mapping, len))
5179                                 would_hit_hwbug = 1;
5180
5181                         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
5182                                 tg3_set_txd(tp, entry, mapping, len,
5183                                             base_flags, (i == last)|(mss << 1));
5184                         else
5185                                 tg3_set_txd(tp, entry, mapping, len,
5186                                             base_flags, (i == last));
5187
5188                         entry = NEXT_TX(entry);
5189                 }
5190         }
5191
5192         if (would_hit_hwbug) {
5193                 u32 last_plus_one = entry;
5194                 u32 start;
5195
5196                 start = entry - 1 - skb_shinfo(skb)->nr_frags;
5197                 start &= (TG3_TX_RING_SIZE - 1);
5198
5199                 /* If the workaround fails due to memory/mapping
5200                  * failure, silently drop this packet.
5201                  */
5202                 if (tigon3_dma_hwbug_workaround(tp, skb, last_plus_one,
5203                                                 &start, base_flags, mss))
5204                         goto out_unlock;
5205
5206                 entry = start;
5207         }
5208
5209         /* Packets are ready, update Tx producer idx local and on card. */
5210         tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry);
5211
5212         tp->tx_prod = entry;
5213         if (unlikely(tg3_tx_avail(tp) <= (MAX_SKB_FRAGS + 1))) {
5214                 netif_stop_queue(dev);
5215                 if (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp))
5216                         netif_wake_queue(tp->dev);
5217         }
5218
5219 out_unlock:
5220         mmiowb();
5221
5222         dev->trans_start = jiffies;
5223
5224         return NETDEV_TX_OK;
5225 }
5226
5227 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
5228                                int new_mtu)
5229 {
5230         dev->mtu = new_mtu;
5231
5232         if (new_mtu > ETH_DATA_LEN) {
5233                 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
5234                         tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
5235                         ethtool_op_set_tso(dev, 0);
5236                 }
5237                 else
5238                         tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE;
5239         } else {
5240                 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
5241                         tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
5242                 tp->tg3_flags &= ~TG3_FLAG_JUMBO_RING_ENABLE;
5243         }
5244 }
5245
5246 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
5247 {
5248         struct tg3 *tp = netdev_priv(dev);
5249         int err;
5250
5251         if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
5252                 return -EINVAL;
5253
5254         if (!netif_running(dev)) {
5255                 /* We'll just catch it later when the
5256                  * device is up'd.
5257                  */
5258                 tg3_set_mtu(dev, tp, new_mtu);
5259                 return 0;
5260         }
5261
5262         tg3_phy_stop(tp);
5263
5264         tg3_netif_stop(tp);
5265
5266         tg3_full_lock(tp, 1);
5267
5268         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
5269
5270         tg3_set_mtu(dev, tp, new_mtu);
5271
5272         err = tg3_restart_hw(tp, 0);
5273
5274         if (!err)
5275                 tg3_netif_start(tp);
5276
5277         tg3_full_unlock(tp);
5278
5279         if (!err)
5280                 tg3_phy_start(tp);
5281
5282         return err;
5283 }
5284
5285 /* Free up pending packets in all rx/tx rings.
5286  *
5287  * The chip has been shut down and the driver detached from
5288  * the networking, so no interrupts or new tx packets will
5289  * end up in the driver.  tp->{tx,}lock is not held and we are not
5290  * in an interrupt context and thus may sleep.
5291  */
5292 static void tg3_free_rings(struct tg3 *tp)
5293 {
5294         struct ring_info *rxp;
5295         int i;
5296
5297         for (i = 0; i < TG3_RX_RING_SIZE; i++) {
5298                 rxp = &tp->rx_std_buffers[i];
5299
5300                 if (rxp->skb == NULL)
5301                         continue;
5302                 pci_unmap_single(tp->pdev,
5303                                  pci_unmap_addr(rxp, mapping),
5304                                  tp->rx_pkt_buf_sz - tp->rx_offset,
5305                                  PCI_DMA_FROMDEVICE);
5306                 dev_kfree_skb_any(rxp->skb);
5307                 rxp->skb = NULL;
5308         }
5309
5310         for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
5311                 rxp = &tp->rx_jumbo_buffers[i];
5312
5313                 if (rxp->skb == NULL)
5314                         continue;
5315                 pci_unmap_single(tp->pdev,
5316                                  pci_unmap_addr(rxp, mapping),
5317                                  RX_JUMBO_PKT_BUF_SZ - tp->rx_offset,
5318                                  PCI_DMA_FROMDEVICE);
5319                 dev_kfree_skb_any(rxp->skb);
5320                 rxp->skb = NULL;
5321         }
5322
5323         for (i = 0; i < TG3_TX_RING_SIZE; ) {
5324                 struct tx_ring_info *txp;
5325                 struct sk_buff *skb;
5326
5327                 txp = &tp->tx_buffers[i];
5328                 skb = txp->skb;
5329
5330                 if (skb == NULL) {
5331                         i++;
5332                         continue;
5333                 }
5334
5335                 skb_dma_unmap(&tp->pdev->dev, skb, DMA_TO_DEVICE);
5336
5337                 txp->skb = NULL;
5338
5339                 i += skb_shinfo(skb)->nr_frags + 1;
5340
5341                 dev_kfree_skb_any(skb);
5342         }
5343 }
5344
5345 /* Initialize tx/rx rings for packet processing.
5346  *
5347  * The chip has been shut down and the driver detached from
5348  * the networking, so no interrupts or new tx packets will
5349  * end up in the driver.  tp->{tx,}lock are held and thus
5350  * we may not sleep.
5351  */
5352 static int tg3_init_rings(struct tg3 *tp)
5353 {
5354         u32 i;
5355
5356         /* Free up all the SKBs. */
5357         tg3_free_rings(tp);
5358
5359         /* Zero out all descriptors. */
5360         memset(tp->rx_std, 0, TG3_RX_RING_BYTES);
5361         memset(tp->rx_jumbo, 0, TG3_RX_JUMBO_RING_BYTES);
5362         memset(tp->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
5363         memset(tp->tx_ring, 0, TG3_TX_RING_BYTES);
5364
5365         tp->rx_pkt_buf_sz = RX_PKT_BUF_SZ;
5366         if ((tp->tg3_flags2 & TG3_FLG2_5780_CLASS) &&
5367             (tp->dev->mtu > ETH_DATA_LEN))
5368                 tp->rx_pkt_buf_sz = RX_JUMBO_PKT_BUF_SZ;
5369
5370         /* Initialize invariants of the rings, we only set this
5371          * stuff once.  This works because the card does not
5372          * write into the rx buffer posting rings.
5373          */
5374         for (i = 0; i < TG3_RX_RING_SIZE; i++) {
5375                 struct tg3_rx_buffer_desc *rxd;
5376
5377                 rxd = &tp->rx_std[i];
5378                 rxd->idx_len = (tp->rx_pkt_buf_sz - tp->rx_offset - 64)
5379                         << RXD_LEN_SHIFT;
5380                 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
5381                 rxd->opaque = (RXD_OPAQUE_RING_STD |
5382                                (i << RXD_OPAQUE_INDEX_SHIFT));
5383         }
5384
5385         if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
5386                 for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
5387                         struct tg3_rx_buffer_desc *rxd;
5388
5389                         rxd = &tp->rx_jumbo[i];
5390                         rxd->idx_len = (RX_JUMBO_PKT_BUF_SZ - tp->rx_offset - 64)
5391                                 << RXD_LEN_SHIFT;
5392                         rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
5393                                 RXD_FLAG_JUMBO;
5394                         rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
5395                                (i << RXD_OPAQUE_INDEX_SHIFT));
5396                 }
5397         }
5398
5399         /* Now allocate fresh SKBs for each rx ring. */
5400         for (i = 0; i < tp->rx_pending; i++) {
5401                 if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_STD, -1, i) < 0) {
5402                         printk(KERN_WARNING PFX
5403                                "%s: Using a smaller RX standard ring, "
5404                                "only %d out of %d buffers were allocated "
5405                                "successfully.\n",
5406                                tp->dev->name, i, tp->rx_pending);
5407                         if (i == 0)
5408                                 return -ENOMEM;
5409                         tp->rx_pending = i;
5410                         break;
5411                 }
5412         }
5413
5414         if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
5415                 for (i = 0; i < tp->rx_jumbo_pending; i++) {
5416                         if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_JUMBO,
5417                                              -1, i) < 0) {
5418                                 printk(KERN_WARNING PFX
5419                                        "%s: Using a smaller RX jumbo ring, "
5420                                        "only %d out of %d buffers were "
5421                                        "allocated successfully.\n",
5422                                        tp->dev->name, i, tp->rx_jumbo_pending);
5423                                 if (i == 0) {
5424                                         tg3_free_rings(tp);
5425                                         return -ENOMEM;
5426                                 }
5427                                 tp->rx_jumbo_pending = i;
5428                                 break;
5429                         }
5430                 }
5431         }
5432         return 0;
5433 }
5434
5435 /*
5436  * Must not be invoked with interrupt sources disabled and
5437  * the hardware shutdown down.
5438  */
5439 static void tg3_free_consistent(struct tg3 *tp)
5440 {
5441         kfree(tp->rx_std_buffers);
5442         tp->rx_std_buffers = NULL;
5443         if (tp->rx_std) {
5444                 pci_free_consistent(tp->pdev, TG3_RX_RING_BYTES,
5445                                     tp->rx_std, tp->rx_std_mapping);
5446                 tp->rx_std = NULL;
5447         }
5448         if (tp->rx_jumbo) {
5449                 pci_free_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
5450                                     tp->rx_jumbo, tp->rx_jumbo_mapping);
5451                 tp->rx_jumbo = NULL;
5452         }
5453         if (tp->rx_rcb) {
5454                 pci_free_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
5455                                     tp->rx_rcb, tp->rx_rcb_mapping);
5456                 tp->rx_rcb = NULL;
5457         }
5458         if (tp->tx_ring) {
5459                 pci_free_consistent(tp->pdev, TG3_TX_RING_BYTES,
5460                         tp->tx_ring, tp->tx_desc_mapping);
5461                 tp->tx_ring = NULL;
5462         }
5463         if (tp->hw_status) {
5464                 pci_free_consistent(tp->pdev, TG3_HW_STATUS_SIZE,
5465                                     tp->hw_status, tp->status_mapping);
5466                 tp->hw_status = NULL;
5467         }
5468         if (tp->hw_stats) {
5469                 pci_free_consistent(tp->pdev, sizeof(struct tg3_hw_stats),
5470                                     tp->hw_stats, tp->stats_mapping);
5471                 tp->hw_stats = NULL;
5472         }
5473 }
5474
5475 /*
5476  * Must not be invoked with interrupt sources disabled and
5477  * the hardware shutdown down.  Can sleep.
5478  */
5479 static int tg3_alloc_consistent(struct tg3 *tp)
5480 {
5481         tp->rx_std_buffers = kzalloc((sizeof(struct ring_info) *
5482                                       (TG3_RX_RING_SIZE +
5483                                        TG3_RX_JUMBO_RING_SIZE)) +
5484                                      (sizeof(struct tx_ring_info) *
5485                                       TG3_TX_RING_SIZE),
5486                                      GFP_KERNEL);
5487         if (!tp->rx_std_buffers)
5488                 return -ENOMEM;
5489
5490         tp->rx_jumbo_buffers = &tp->rx_std_buffers[TG3_RX_RING_SIZE];
5491         tp->tx_buffers = (struct tx_ring_info *)
5492                 &tp->rx_jumbo_buffers[TG3_RX_JUMBO_RING_SIZE];
5493
5494         tp->rx_std = pci_alloc_consistent(tp->pdev, TG3_RX_RING_BYTES,
5495                                           &tp->rx_std_mapping);
5496         if (!tp->rx_std)
5497                 goto err_out;
5498
5499         tp->rx_jumbo = pci_alloc_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
5500                                             &tp->rx_jumbo_mapping);
5501
5502         if (!tp->rx_jumbo)
5503                 goto err_out;
5504
5505         tp->rx_rcb = pci_alloc_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
5506                                           &tp->rx_rcb_mapping);
5507         if (!tp->rx_rcb)
5508                 goto err_out;
5509
5510         tp->tx_ring = pci_alloc_consistent(tp->pdev, TG3_TX_RING_BYTES,
5511                                            &tp->tx_desc_mapping);
5512         if (!tp->tx_ring)
5513                 goto err_out;
5514
5515         tp->hw_status = pci_alloc_consistent(tp->pdev,
5516                                              TG3_HW_STATUS_SIZE,
5517                                              &tp->status_mapping);
5518         if (!tp->hw_status)
5519                 goto err_out;
5520
5521         tp->hw_stats = pci_alloc_consistent(tp->pdev,
5522                                             sizeof(struct tg3_hw_stats),
5523                                             &tp->stats_mapping);
5524         if (!tp->hw_stats)
5525                 goto err_out;
5526
5527         memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
5528         memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
5529
5530         return 0;
5531
5532 err_out:
5533         tg3_free_consistent(tp);
5534         return -ENOMEM;
5535 }
5536
5537 #define MAX_WAIT_CNT 1000
5538
5539 /* To stop a block, clear the enable bit and poll till it
5540  * clears.  tp->lock is held.
5541  */
5542 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, int silent)
5543 {
5544         unsigned int i;
5545         u32 val;
5546
5547         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
5548                 switch (ofs) {
5549                 case RCVLSC_MODE:
5550                 case DMAC_MODE:
5551                 case MBFREE_MODE:
5552                 case BUFMGR_MODE:
5553                 case MEMARB_MODE:
5554                         /* We can't enable/disable these bits of the
5555                          * 5705/5750, just say success.
5556                          */
5557                         return 0;
5558
5559                 default:
5560                         break;
5561                 }
5562         }
5563
5564         val = tr32(ofs);
5565         val &= ~enable_bit;
5566         tw32_f(ofs, val);
5567
5568         for (i = 0; i < MAX_WAIT_CNT; i++) {
5569                 udelay(100);
5570                 val = tr32(ofs);
5571                 if ((val & enable_bit) == 0)
5572                         break;
5573         }
5574
5575         if (i == MAX_WAIT_CNT && !silent) {
5576                 printk(KERN_ERR PFX "tg3_stop_block timed out, "
5577                        "ofs=%lx enable_bit=%x\n",
5578                        ofs, enable_bit);
5579                 return -ENODEV;
5580         }
5581
5582         return 0;
5583 }
5584
5585 /* tp->lock is held. */
5586 static int tg3_abort_hw(struct tg3 *tp, int silent)
5587 {
5588         int i, err;
5589
5590         tg3_disable_ints(tp);
5591
5592         tp->rx_mode &= ~RX_MODE_ENABLE;
5593         tw32_f(MAC_RX_MODE, tp->rx_mode);
5594         udelay(10);
5595
5596         err  = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
5597         err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
5598         err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
5599         err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
5600         err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
5601         err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
5602
5603         err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
5604         err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
5605         err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
5606         err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
5607         err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
5608         err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
5609         err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
5610
5611         tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
5612         tw32_f(MAC_MODE, tp->mac_mode);
5613         udelay(40);
5614
5615         tp->tx_mode &= ~TX_MODE_ENABLE;
5616         tw32_f(MAC_TX_MODE, tp->tx_mode);
5617
5618         for (i = 0; i < MAX_WAIT_CNT; i++) {
5619                 udelay(100);
5620                 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
5621                         break;
5622         }
5623         if (i >= MAX_WAIT_CNT) {
5624                 printk(KERN_ERR PFX "tg3_abort_hw timed out for %s, "
5625                        "TX_MODE_ENABLE will not clear MAC_TX_MODE=%08x\n",
5626                        tp->dev->name, tr32(MAC_TX_MODE));
5627                 err |= -ENODEV;
5628         }
5629
5630         err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
5631         err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
5632         err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
5633
5634         tw32(FTQ_RESET, 0xffffffff);
5635         tw32(FTQ_RESET, 0x00000000);
5636
5637         err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
5638         err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
5639
5640         if (tp->hw_status)
5641                 memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
5642         if (tp->hw_stats)
5643                 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
5644
5645         return err;
5646 }
5647
5648 /* tp->lock is held. */
5649 static int tg3_nvram_lock(struct tg3 *tp)
5650 {
5651         if (tp->tg3_flags & TG3_FLAG_NVRAM) {
5652                 int i;
5653
5654                 if (tp->nvram_lock_cnt == 0) {
5655                         tw32(NVRAM_SWARB, SWARB_REQ_SET1);
5656                         for (i = 0; i < 8000; i++) {
5657                                 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
5658                                         break;
5659                                 udelay(20);
5660                         }
5661                         if (i == 8000) {
5662                                 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
5663                                 return -ENODEV;
5664                         }
5665                 }
5666                 tp->nvram_lock_cnt++;
5667         }
5668         return 0;
5669 }
5670
5671 /* tp->lock is held. */
5672 static void tg3_nvram_unlock(struct tg3 *tp)
5673 {
5674         if (tp->tg3_flags & TG3_FLAG_NVRAM) {
5675                 if (tp->nvram_lock_cnt > 0)
5676                         tp->nvram_lock_cnt--;
5677                 if (tp->nvram_lock_cnt == 0)
5678                         tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
5679         }
5680 }
5681
5682 /* tp->lock is held. */
5683 static void tg3_enable_nvram_access(struct tg3 *tp)
5684 {
5685         if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
5686             !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM)) {
5687                 u32 nvaccess = tr32(NVRAM_ACCESS);
5688
5689                 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
5690         }
5691 }
5692
5693 /* tp->lock is held. */
5694 static void tg3_disable_nvram_access(struct tg3 *tp)
5695 {
5696         if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
5697             !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM)) {
5698                 u32 nvaccess = tr32(NVRAM_ACCESS);
5699
5700                 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
5701         }
5702 }
5703
5704 static void tg3_ape_send_event(struct tg3 *tp, u32 event)
5705 {
5706         int i;
5707         u32 apedata;
5708
5709         apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
5710         if (apedata != APE_SEG_SIG_MAGIC)
5711                 return;
5712
5713         apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
5714         if (!(apedata & APE_FW_STATUS_READY))
5715                 return;
5716
5717         /* Wait for up to 1 millisecond for APE to service previous event. */
5718         for (i = 0; i < 10; i++) {
5719                 if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
5720                         return;
5721
5722                 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
5723
5724                 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
5725                         tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
5726                                         event | APE_EVENT_STATUS_EVENT_PENDING);
5727
5728                 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
5729
5730                 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
5731                         break;
5732
5733                 udelay(100);
5734         }
5735
5736         if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
5737                 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
5738 }
5739
5740 static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
5741 {
5742         u32 event;
5743         u32 apedata;
5744
5745         if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
5746                 return;
5747
5748         switch (kind) {
5749                 case RESET_KIND_INIT:
5750                         tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
5751                                         APE_HOST_SEG_SIG_MAGIC);
5752                         tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
5753                                         APE_HOST_SEG_LEN_MAGIC);
5754                         apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
5755                         tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
5756                         tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
5757                                         APE_HOST_DRIVER_ID_MAGIC);
5758                         tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
5759                                         APE_HOST_BEHAV_NO_PHYLOCK);
5760
5761                         event = APE_EVENT_STATUS_STATE_START;
5762                         break;
5763                 case RESET_KIND_SHUTDOWN:
5764                         /* With the interface we are currently using,
5765                          * APE does not track driver state.  Wiping
5766                          * out the HOST SEGMENT SIGNATURE forces
5767                          * the APE to assume OS absent status.
5768                          */
5769                         tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG, 0x0);
5770
5771                         event = APE_EVENT_STATUS_STATE_UNLOAD;
5772                         break;
5773                 case RESET_KIND_SUSPEND:
5774                         event = APE_EVENT_STATUS_STATE_SUSPEND;
5775                         break;
5776                 default:
5777                         return;
5778         }
5779
5780         event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
5781
5782         tg3_ape_send_event(tp, event);
5783 }
5784
5785 /* tp->lock is held. */
5786 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
5787 {
5788         tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
5789                       NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
5790
5791         if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
5792                 switch (kind) {
5793                 case RESET_KIND_INIT:
5794                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5795                                       DRV_STATE_START);
5796                         break;
5797
5798                 case RESET_KIND_SHUTDOWN:
5799                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5800                                       DRV_STATE_UNLOAD);
5801                         break;
5802
5803                 case RESET_KIND_SUSPEND:
5804                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5805                                       DRV_STATE_SUSPEND);
5806                         break;
5807
5808                 default:
5809                         break;
5810                 }
5811         }
5812
5813         if (kind == RESET_KIND_INIT ||
5814             kind == RESET_KIND_SUSPEND)
5815                 tg3_ape_driver_state_change(tp, kind);
5816 }
5817
5818 /* tp->lock is held. */
5819 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
5820 {
5821         if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
5822                 switch (kind) {
5823                 case RESET_KIND_INIT:
5824                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5825                                       DRV_STATE_START_DONE);
5826                         break;
5827
5828                 case RESET_KIND_SHUTDOWN:
5829                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5830                                       DRV_STATE_UNLOAD_DONE);
5831                         break;
5832
5833                 default:
5834                         break;
5835                 }
5836         }
5837
5838         if (kind == RESET_KIND_SHUTDOWN)
5839                 tg3_ape_driver_state_change(tp, kind);
5840 }
5841
5842 /* tp->lock is held. */
5843 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
5844 {
5845         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
5846                 switch (kind) {
5847                 case RESET_KIND_INIT:
5848                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5849                                       DRV_STATE_START);
5850                         break;
5851
5852                 case RESET_KIND_SHUTDOWN:
5853                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5854                                       DRV_STATE_UNLOAD);
5855                         break;
5856
5857                 case RESET_KIND_SUSPEND:
5858                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5859                                       DRV_STATE_SUSPEND);
5860                         break;
5861
5862                 default:
5863                         break;
5864                 }
5865         }
5866 }
5867
5868 static int tg3_poll_fw(struct tg3 *tp)
5869 {
5870         int i;
5871         u32 val;
5872
5873         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
5874                 /* Wait up to 20ms for init done. */
5875                 for (i = 0; i < 200; i++) {
5876                         if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
5877                                 return 0;
5878                         udelay(100);
5879                 }
5880                 return -ENODEV;
5881         }
5882
5883         /* Wait for firmware initialization to complete. */
5884         for (i = 0; i < 100000; i++) {
5885                 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
5886                 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
5887                         break;
5888                 udelay(10);
5889         }
5890
5891         /* Chip might not be fitted with firmware.  Some Sun onboard
5892          * parts are configured like that.  So don't signal the timeout
5893          * of the above loop as an error, but do report the lack of
5894          * running firmware once.
5895          */
5896         if (i >= 100000 &&
5897             !(tp->tg3_flags2 & TG3_FLG2_NO_FWARE_REPORTED)) {
5898                 tp->tg3_flags2 |= TG3_FLG2_NO_FWARE_REPORTED;
5899
5900                 printk(KERN_INFO PFX "%s: No firmware running.\n",
5901                        tp->dev->name);
5902         }
5903
5904         return 0;
5905 }
5906
5907 /* Save PCI command register before chip reset */
5908 static void tg3_save_pci_state(struct tg3 *tp)
5909 {
5910         pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd);
5911 }
5912
5913 /* Restore PCI state after chip reset */
5914 static void tg3_restore_pci_state(struct tg3 *tp)
5915 {
5916         u32 val;
5917
5918         /* Re-enable indirect register accesses. */
5919         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
5920                                tp->misc_host_ctrl);
5921
5922         /* Set MAX PCI retry to zero. */
5923         val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
5924         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
5925             (tp->tg3_flags & TG3_FLAG_PCIX_MODE))
5926                 val |= PCISTATE_RETRY_SAME_DMA;
5927         /* Allow reads and writes to the APE register and memory space. */
5928         if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
5929                 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
5930                        PCISTATE_ALLOW_APE_SHMEM_WR;
5931         pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
5932
5933         pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
5934
5935         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785) {
5936                 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)
5937                         pcie_set_readrq(tp->pdev, 4096);
5938                 else {
5939                         pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
5940                                               tp->pci_cacheline_sz);
5941                         pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
5942                                               tp->pci_lat_timer);
5943                 }
5944         }
5945
5946         /* Make sure PCI-X relaxed ordering bit is clear. */
5947         if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) {
5948                 u16 pcix_cmd;
5949
5950                 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
5951                                      &pcix_cmd);
5952                 pcix_cmd &= ~PCI_X_CMD_ERO;
5953                 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
5954                                       pcix_cmd);
5955         }
5956
5957         if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
5958
5959                 /* Chip reset on 5780 will reset MSI enable bit,
5960                  * so need to restore it.
5961                  */
5962                 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
5963                         u16 ctrl;
5964
5965                         pci_read_config_word(tp->pdev,
5966                                              tp->msi_cap + PCI_MSI_FLAGS,
5967                                              &ctrl);
5968                         pci_write_config_word(tp->pdev,
5969                                               tp->msi_cap + PCI_MSI_FLAGS,
5970                                               ctrl | PCI_MSI_FLAGS_ENABLE);
5971                         val = tr32(MSGINT_MODE);
5972                         tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
5973                 }
5974         }
5975 }
5976
5977 static void tg3_stop_fw(struct tg3 *);
5978
5979 /* tp->lock is held. */
5980 static int tg3_chip_reset(struct tg3 *tp)
5981 {
5982         u32 val;
5983         void (*write_op)(struct tg3 *, u32, u32);
5984         int err;
5985
5986         tg3_nvram_lock(tp);
5987
5988         tg3_mdio_stop(tp);
5989
5990         tg3_ape_lock(tp, TG3_APE_LOCK_GRC);
5991
5992         /* No matching tg3_nvram_unlock() after this because
5993          * chip reset below will undo the nvram lock.
5994          */
5995         tp->nvram_lock_cnt = 0;
5996
5997         /* GRC_MISC_CFG core clock reset will clear the memory
5998          * enable bit in PCI register 4 and the MSI enable bit
5999          * on some chips, so we save relevant registers here.
6000          */
6001         tg3_save_pci_state(tp);
6002
6003         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
6004             (tp->tg3_flags3 & TG3_FLG3_5755_PLUS))
6005                 tw32(GRC_FASTBOOT_PC, 0);
6006
6007         /*
6008          * We must avoid the readl() that normally takes place.
6009          * It locks machines, causes machine checks, and other
6010          * fun things.  So, temporarily disable the 5701
6011          * hardware workaround, while we do the reset.
6012          */
6013         write_op = tp->write32;
6014         if (write_op == tg3_write_flush_reg32)
6015                 tp->write32 = tg3_write32;
6016
6017         /* Prevent the irq handler from reading or writing PCI registers
6018          * during chip reset when the memory enable bit in the PCI command
6019          * register may be cleared.  The chip does not generate interrupt
6020          * at this time, but the irq handler may still be called due to irq
6021          * sharing or irqpoll.
6022          */
6023         tp->tg3_flags |= TG3_FLAG_CHIP_RESETTING;
6024         if (tp->hw_status) {
6025                 tp->hw_status->status = 0;
6026                 tp->hw_status->status_tag = 0;
6027         }
6028         tp->last_tag = 0;
6029         smp_mb();
6030         synchronize_irq(tp->pdev->irq);
6031
6032         /* do the reset */
6033         val = GRC_MISC_CFG_CORECLK_RESET;
6034
6035         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
6036                 if (tr32(0x7e2c) == 0x60) {
6037                         tw32(0x7e2c, 0x20);
6038                 }
6039                 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
6040                         tw32(GRC_MISC_CFG, (1 << 29));
6041                         val |= (1 << 29);
6042                 }
6043         }
6044
6045         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
6046                 tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
6047                 tw32(GRC_VCPU_EXT_CTRL,
6048                      tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
6049         }
6050
6051         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
6052                 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
6053         tw32(GRC_MISC_CFG, val);
6054
6055         /* restore 5701 hardware bug workaround write method */
6056         tp->write32 = write_op;
6057
6058         /* Unfortunately, we have to delay before the PCI read back.
6059          * Some 575X chips even will not respond to a PCI cfg access
6060          * when the reset command is given to the chip.
6061          *
6062          * How do these hardware designers expect things to work
6063          * properly if the PCI write is posted for a long period
6064          * of time?  It is always necessary to have some method by
6065          * which a register read back can occur to push the write
6066          * out which does the reset.
6067          *
6068          * For most tg3 variants the trick below was working.
6069          * Ho hum...
6070          */
6071         udelay(120);
6072
6073         /* Flush PCI posted writes.  The normal MMIO registers
6074          * are inaccessible at this time so this is the only
6075          * way to make this reliably (actually, this is no longer
6076          * the case, see above).  I tried to use indirect
6077          * register read/write but this upset some 5701 variants.
6078          */
6079         pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
6080
6081         udelay(120);
6082
6083         if ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) && tp->pcie_cap) {
6084                 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) {
6085                         int i;
6086                         u32 cfg_val;
6087
6088                         /* Wait for link training to complete.  */
6089                         for (i = 0; i < 5000; i++)
6090                                 udelay(100);
6091
6092                         pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
6093                         pci_write_config_dword(tp->pdev, 0xc4,
6094                                                cfg_val | (1 << 15));
6095                 }
6096
6097                 /* Set PCIE max payload size to 128 bytes and
6098                  * clear the "no snoop" and "relaxed ordering" bits.
6099                  */
6100                 pci_write_config_word(tp->pdev,
6101                                       tp->pcie_cap + PCI_EXP_DEVCTL,
6102                                       0);
6103
6104                 pcie_set_readrq(tp->pdev, 4096);
6105
6106                 /* Clear error status */
6107                 pci_write_config_word(tp->pdev,
6108                                       tp->pcie_cap + PCI_EXP_DEVSTA,
6109                                       PCI_EXP_DEVSTA_CED |
6110                                       PCI_EXP_DEVSTA_NFED |
6111                                       PCI_EXP_DEVSTA_FED |
6112                                       PCI_EXP_DEVSTA_URD);
6113         }
6114
6115         tg3_restore_pci_state(tp);
6116
6117         tp->tg3_flags &= ~TG3_FLAG_CHIP_RESETTING;
6118
6119         val = 0;
6120         if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
6121                 val = tr32(MEMARB_MODE);
6122         tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
6123
6124         if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A3) {
6125                 tg3_stop_fw(tp);
6126                 tw32(0x5000, 0x400);
6127         }
6128
6129         tw32(GRC_MODE, tp->grc_mode);
6130
6131         if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0) {
6132                 val = tr32(0xc4);
6133
6134                 tw32(0xc4, val | (1 << 15));
6135         }
6136
6137         if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
6138             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
6139                 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
6140                 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0)
6141                         tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
6142                 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
6143         }
6144
6145         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
6146                 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
6147                 tw32_f(MAC_MODE, tp->mac_mode);
6148         } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
6149                 tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
6150                 tw32_f(MAC_MODE, tp->mac_mode);
6151         } else if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
6152                 tp->mac_mode &= (MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN);
6153                 if (tp->mac_mode & MAC_MODE_APE_TX_EN)
6154                         tp->mac_mode |= MAC_MODE_TDE_ENABLE;
6155                 tw32_f(MAC_MODE, tp->mac_mode);
6156         } else
6157                 tw32_f(MAC_MODE, 0);
6158         udelay(40);
6159
6160         tg3_mdio_start(tp);
6161
6162         tg3_ape_unlock(tp, TG3_APE_LOCK_GRC);
6163
6164         err = tg3_poll_fw(tp);
6165         if (err)
6166                 return err;
6167
6168         if ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
6169             tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
6170                 val = tr32(0x7c00);
6171
6172                 tw32(0x7c00, val | (1 << 25));
6173         }
6174
6175         /* Reprobe ASF enable state.  */
6176         tp->tg3_flags &= ~TG3_FLAG_ENABLE_ASF;
6177         tp->tg3_flags2 &= ~TG3_FLG2_ASF_NEW_HANDSHAKE;
6178         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
6179         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
6180                 u32 nic_cfg;
6181
6182                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
6183                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
6184                         tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
6185                         tp->last_event_jiffies = jiffies;
6186                         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
6187                                 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
6188                 }
6189         }
6190
6191         return 0;
6192 }
6193
6194 /* tp->lock is held. */
6195 static void tg3_stop_fw(struct tg3 *tp)
6196 {
6197         if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) &&
6198            !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)) {
6199                 /* Wait for RX cpu to ACK the previous event. */
6200                 tg3_wait_for_event_ack(tp);
6201
6202                 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
6203
6204                 tg3_generate_fw_event(tp);
6205
6206                 /* Wait for RX cpu to ACK this event. */
6207                 tg3_wait_for_event_ack(tp);
6208         }
6209 }
6210
6211 /* tp->lock is held. */
6212 static int tg3_halt(struct tg3 *tp, int kind, int silent)
6213 {
6214         int err;
6215
6216         tg3_stop_fw(tp);
6217
6218         tg3_write_sig_pre_reset(tp, kind);
6219
6220         tg3_abort_hw(tp, silent);
6221         err = tg3_chip_reset(tp);
6222
6223         tg3_write_sig_legacy(tp, kind);
6224         tg3_write_sig_post_reset(tp, kind);
6225
6226         if (err)
6227                 return err;
6228
6229         return 0;
6230 }
6231
6232 #define TG3_FW_RELEASE_MAJOR    0x0
6233 #define TG3_FW_RELASE_MINOR     0x0
6234 #define TG3_FW_RELEASE_FIX      0x0
6235 #define TG3_FW_START_ADDR       0x08000000
6236 #define TG3_FW_TEXT_ADDR        0x08000000
6237 #define TG3_FW_TEXT_LEN         0x9c0
6238 #define TG3_FW_RODATA_ADDR      0x080009c0
6239 #define TG3_FW_RODATA_LEN       0x60
6240 #define TG3_FW_DATA_ADDR        0x08000a40
6241 #define TG3_FW_DATA_LEN         0x20
6242 #define TG3_FW_SBSS_ADDR        0x08000a60
6243 #define TG3_FW_SBSS_LEN         0xc
6244 #define TG3_FW_BSS_ADDR         0x08000a70
6245 #define TG3_FW_BSS_LEN          0x10
6246
6247 static const u32 tg3FwText[(TG3_FW_TEXT_LEN / sizeof(u32)) + 1] = {
6248         0x00000000, 0x10000003, 0x00000000, 0x0000000d, 0x0000000d, 0x3c1d0800,
6249         0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100000, 0x0e000018, 0x00000000,
6250         0x0000000d, 0x3c1d0800, 0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100034,
6251         0x0e00021c, 0x00000000, 0x0000000d, 0x00000000, 0x00000000, 0x00000000,
6252         0x27bdffe0, 0x3c1cc000, 0xafbf0018, 0xaf80680c, 0x0e00004c, 0x241b2105,
6253         0x97850000, 0x97870002, 0x9782002c, 0x9783002e, 0x3c040800, 0x248409c0,
6254         0xafa00014, 0x00021400, 0x00621825, 0x00052c00, 0xafa30010, 0x8f860010,
6255         0x00e52825, 0x0e000060, 0x24070102, 0x3c02ac00, 0x34420100, 0x3c03ac01,
6256         0x34630100, 0xaf820490, 0x3c02ffff, 0xaf820494, 0xaf830498, 0xaf82049c,
6257         0x24020001, 0xaf825ce0, 0x0e00003f, 0xaf825d00, 0x0e000140, 0x00000000,
6258         0x8fbf0018, 0x03e00008, 0x27bd0020, 0x2402ffff, 0xaf825404, 0x8f835400,
6259         0x34630400, 0xaf835400, 0xaf825404, 0x3c020800, 0x24420034, 0xaf82541c,
6260         0x03e00008, 0xaf805400, 0x00000000, 0x00000000, 0x3c020800, 0x34423000,
6261         0x3c030800, 0x34633000, 0x3c040800, 0x348437ff, 0x3c010800, 0xac220a64,
6262         0x24020040, 0x3c010800, 0xac220a68, 0x3c010800, 0xac200a60, 0xac600000,
6263         0x24630004, 0x0083102b, 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000,
6264         0x00804821, 0x8faa0010, 0x3c020800, 0x8c420a60, 0x3c040800, 0x8c840a68,
6265         0x8fab0014, 0x24430001, 0x0044102b, 0x3c010800, 0xac230a60, 0x14400003,
6266         0x00004021, 0x3c010800, 0xac200a60, 0x3c020800, 0x8c420a60, 0x3c030800,
6267         0x8c630a64, 0x91240000, 0x00021140, 0x00431021, 0x00481021, 0x25080001,
6268         0xa0440000, 0x29020008, 0x1440fff4, 0x25290001, 0x3c020800, 0x8c420a60,
6269         0x3c030800, 0x8c630a64, 0x8f84680c, 0x00021140, 0x00431021, 0xac440008,
6270         0xac45000c, 0xac460010, 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c,
6271         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
6272         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
6273         0, 0, 0, 0, 0, 0,
6274         0x02000008, 0x00000000, 0x0a0001e3, 0x3c0a0001, 0x0a0001e3, 0x3c0a0002,
6275         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
6276         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
6277         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
6278         0x0a0001e3, 0x3c0a0007, 0x0a0001e3, 0x3c0a0008, 0x0a0001e3, 0x3c0a0009,
6279         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a000b,
6280         0x0a0001e3, 0x3c0a000c, 0x0a0001e3, 0x3c0a000d, 0x0a0001e3, 0x00000000,
6281         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a000e, 0x0a0001e3, 0x00000000,
6282         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
6283         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
6284         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a0013, 0x0a0001e3, 0x3c0a0014,
6285         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
6286         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
6287         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
6288         0x27bdffe0, 0x00001821, 0x00001021, 0xafbf0018, 0xafb10014, 0xafb00010,
6289         0x3c010800, 0x00220821, 0xac200a70, 0x3c010800, 0x00220821, 0xac200a74,
6290         0x3c010800, 0x00220821, 0xac200a78, 0x24630001, 0x1860fff5, 0x2442000c,
6291         0x24110001, 0x8f906810, 0x32020004, 0x14400005, 0x24040001, 0x3c020800,
6292         0x8c420a78, 0x18400003, 0x00002021, 0x0e000182, 0x00000000, 0x32020001,
6293         0x10400003, 0x00000000, 0x0e000169, 0x00000000, 0x0a000153, 0xaf915028,
6294         0x8fbf0018, 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0020, 0x3c050800,
6295         0x8ca50a70, 0x3c060800, 0x8cc60a80, 0x3c070800, 0x8ce70a78, 0x27bdffe0,
6296         0x3c040800, 0x248409d0, 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014,
6297         0x0e00017b, 0x00002021, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x24020001,
6298         0x8f836810, 0x00821004, 0x00021027, 0x00621824, 0x03e00008, 0xaf836810,
6299         0x27bdffd8, 0xafbf0024, 0x1080002e, 0xafb00020, 0x8f825cec, 0xafa20018,
6300         0x8f825cec, 0x3c100800, 0x26100a78, 0xafa2001c, 0x34028000, 0xaf825cec,
6301         0x8e020000, 0x18400016, 0x00000000, 0x3c020800, 0x94420a74, 0x8fa3001c,
6302         0x000221c0, 0xac830004, 0x8fa2001c, 0x3c010800, 0x0e000201, 0xac220a74,
6303         0x10400005, 0x00000000, 0x8e020000, 0x24420001, 0x0a0001df, 0xae020000,
6304         0x3c020800, 0x8c420a70, 0x00021c02, 0x000321c0, 0x0a0001c5, 0xafa2001c,
6305         0x0e000201, 0x00000000, 0x1040001f, 0x00000000, 0x8e020000, 0x8fa3001c,
6306         0x24420001, 0x3c010800, 0xac230a70, 0x3c010800, 0xac230a74, 0x0a0001df,
6307         0xae020000, 0x3c100800, 0x26100a78, 0x8e020000, 0x18400028, 0x00000000,
6308         0x0e000201, 0x00000000, 0x14400024, 0x00000000, 0x8e020000, 0x3c030800,
6309         0x8c630a70, 0x2442ffff, 0xafa3001c, 0x18400006, 0xae020000, 0x00031402,
6310         0x000221c0, 0x8c820004, 0x3c010800, 0xac220a70, 0x97a2001e, 0x2442ff00,
6311         0x2c420300, 0x1440000b, 0x24024000, 0x3c040800, 0x248409dc, 0xafa00010,
6312         0xafa00014, 0x8fa6001c, 0x24050008, 0x0e000060, 0x00003821, 0x0a0001df,
6313         0x00000000, 0xaf825cf8, 0x3c020800, 0x8c420a40, 0x8fa3001c, 0x24420001,
6314         0xaf835cf8, 0x3c010800, 0xac220a40, 0x8fbf0024, 0x8fb00020, 0x03e00008,
6315         0x27bd0028, 0x27bdffe0, 0x3c040800, 0x248409e8, 0x00002821, 0x00003021,
6316         0x00003821, 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014, 0x8fbf0018,
6317         0x03e00008, 0x27bd0020, 0x8f82680c, 0x8f85680c, 0x00021827, 0x0003182b,
6318         0x00031823, 0x00431024, 0x00441021, 0x00a2282b, 0x10a00006, 0x00000000,
6319         0x00401821, 0x8f82680c, 0x0043102b, 0x1440fffd, 0x00000000, 0x03e00008,
6320         0x00000000, 0x3c040800, 0x8c840000, 0x3c030800, 0x8c630a40, 0x0064102b,
6321         0x54400002, 0x00831023, 0x00641023, 0x2c420008, 0x03e00008, 0x38420001,
6322         0x27bdffe0, 0x00802821, 0x3c040800, 0x24840a00, 0x00003021, 0x00003821,
6323         0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014, 0x0a000216, 0x00000000,
6324         0x8fbf0018, 0x03e00008, 0x27bd0020, 0x00000000, 0x27bdffe0, 0x3c1cc000,
6325         0xafbf0018, 0x0e00004c, 0xaf80680c, 0x3c040800, 0x24840a10, 0x03802821,
6326         0x00003021, 0x00003821, 0xafa00010, 0x0e000060, 0xafa00014, 0x2402ffff,
6327         0xaf825404, 0x3c0200aa, 0x0e000234, 0xaf825434, 0x8fbf0018, 0x03e00008,
6328         0x27bd0020, 0x00000000, 0x00000000, 0x00000000, 0x27bdffe8, 0xafb00010,
6329         0x24100001, 0xafbf0014, 0x3c01c003, 0xac200000, 0x8f826810, 0x30422000,
6330         0x10400003, 0x00000000, 0x0e000246, 0x00000000, 0x0a00023a, 0xaf905428,
6331         0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x27bdfff8, 0x8f845d0c,
6332         0x3c0200ff, 0x3c030800, 0x8c630a50, 0x3442fff8, 0x00821024, 0x1043001e,
6333         0x3c0500ff, 0x34a5fff8, 0x3c06c003, 0x3c074000, 0x00851824, 0x8c620010,
6334         0x3c010800, 0xac230a50, 0x30420008, 0x10400005, 0x00871025, 0x8cc20000,
6335         0x24420001, 0xacc20000, 0x00871025, 0xaf825d0c, 0x8fa20000, 0x24420001,
6336         0xafa20000, 0x8fa20000, 0x8fa20000, 0x24420001, 0xafa20000, 0x8fa20000,
6337         0x8f845d0c, 0x3c030800, 0x8c630a50, 0x00851024, 0x1443ffe8, 0x00851824,
6338         0x27bd0008, 0x03e00008, 0x00000000, 0x00000000, 0x00000000
6339 };
6340
6341 static const u32 tg3FwRodata[(TG3_FW_RODATA_LEN / sizeof(u32)) + 1] = {
6342         0x35373031, 0x726c7341, 0x00000000, 0x00000000, 0x53774576, 0x656e7430,
6343         0x00000000, 0x726c7045, 0x76656e74, 0x31000000, 0x556e6b6e, 0x45766e74,
6344         0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x66617461, 0x6c457272,
6345         0x00000000, 0x00000000, 0x4d61696e, 0x43707542, 0x00000000, 0x00000000,
6346         0x00000000
6347 };
6348
6349 #if 0 /* All zeros, don't eat up space with it. */
6350 u32 tg3FwData[(TG3_FW_DATA_LEN / sizeof(u32)) + 1] = {
6351         0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
6352         0x00000000, 0x00000000, 0x00000000, 0x00000000
6353 };
6354 #endif
6355
6356 #define RX_CPU_SCRATCH_BASE     0x30000
6357 #define RX_CPU_SCRATCH_SIZE     0x04000
6358 #define TX_CPU_SCRATCH_BASE     0x34000
6359 #define TX_CPU_SCRATCH_SIZE     0x04000
6360
6361 /* tp->lock is held. */
6362 static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
6363 {
6364         int i;
6365
6366         BUG_ON(offset == TX_CPU_BASE &&
6367             (tp->tg3_flags2 & TG3_FLG2_5705_PLUS));
6368
6369         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
6370                 u32 val = tr32(GRC_VCPU_EXT_CTRL);
6371
6372                 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
6373                 return 0;
6374         }
6375         if (offset == RX_CPU_BASE) {
6376                 for (i = 0; i < 10000; i++) {
6377                         tw32(offset + CPU_STATE, 0xffffffff);
6378                         tw32(offset + CPU_MODE,  CPU_MODE_HALT);
6379                         if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
6380                                 break;
6381                 }
6382
6383                 tw32(offset + CPU_STATE, 0xffffffff);
6384                 tw32_f(offset + CPU_MODE,  CPU_MODE_HALT);
6385                 udelay(10);
6386         } else {
6387                 for (i = 0; i < 10000; i++) {
6388                         tw32(offset + CPU_STATE, 0xffffffff);
6389                         tw32(offset + CPU_MODE,  CPU_MODE_HALT);
6390                         if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
6391                                 break;
6392                 }
6393         }
6394
6395         if (i >= 10000) {
6396                 printk(KERN_ERR PFX "tg3_reset_cpu timed out for %s, "
6397                        "and %s CPU\n",
6398                        tp->dev->name,
6399                        (offset == RX_CPU_BASE ? "RX" : "TX"));
6400                 return -ENODEV;
6401         }
6402
6403         /* Clear firmware's nvram arbitration. */
6404         if (tp->tg3_flags & TG3_FLAG_NVRAM)
6405                 tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
6406         return 0;
6407 }
6408
6409 struct fw_info {
6410         unsigned int text_base;
6411         unsigned int text_len;
6412         const u32 *text_data;
6413         unsigned int rodata_base;
6414         unsigned int rodata_len;
6415         const u32 *rodata_data;
6416         unsigned int data_base;
6417         unsigned int data_len;
6418         const u32 *data_data;
6419 };
6420
6421 /* tp->lock is held. */
6422 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base, u32 cpu_scratch_base,
6423                                  int cpu_scratch_size, struct fw_info *info)
6424 {
6425         int err, lock_err, i;
6426         void (*write_op)(struct tg3 *, u32, u32);
6427
6428         if (cpu_base == TX_CPU_BASE &&
6429             (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6430                 printk(KERN_ERR PFX "tg3_load_firmware_cpu: Trying to load "
6431                        "TX cpu firmware on %s which is 5705.\n",
6432                        tp->dev->name);
6433                 return -EINVAL;
6434         }
6435
6436         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
6437                 write_op = tg3_write_mem;
6438         else
6439                 write_op = tg3_write_indirect_reg32;
6440
6441         /* It is possible that bootcode is still loading at this point.
6442          * Get the nvram lock first before halting the cpu.
6443          */
6444         lock_err = tg3_nvram_lock(tp);
6445         err = tg3_halt_cpu(tp, cpu_base);
6446         if (!lock_err)
6447                 tg3_nvram_unlock(tp);
6448         if (err)
6449                 goto out;
6450
6451         for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
6452                 write_op(tp, cpu_scratch_base + i, 0);
6453         tw32(cpu_base + CPU_STATE, 0xffffffff);
6454         tw32(cpu_base + CPU_MODE, tr32(cpu_base+CPU_MODE)|CPU_MODE_HALT);
6455         for (i = 0; i < (info->text_len / sizeof(u32)); i++)
6456                 write_op(tp, (cpu_scratch_base +
6457                               (info->text_base & 0xffff) +
6458                               (i * sizeof(u32))),
6459                          (info->text_data ?
6460                           info->text_data[i] : 0));
6461         for (i = 0; i < (info->rodata_len / sizeof(u32)); i++)
6462                 write_op(tp, (cpu_scratch_base +
6463                               (info->rodata_base & 0xffff) +
6464                               (i * sizeof(u32))),
6465                          (info->rodata_data ?
6466                           info->rodata_data[i] : 0));
6467         for (i = 0; i < (info->data_len / sizeof(u32)); i++)
6468                 write_op(tp, (cpu_scratch_base +
6469                               (info->data_base & 0xffff) +
6470                               (i * sizeof(u32))),
6471                          (info->data_data ?
6472                           info->data_data[i] : 0));
6473
6474         err = 0;
6475
6476 out:
6477         return err;
6478 }
6479
6480 /* tp->lock is held. */
6481 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
6482 {
6483         struct fw_info info;
6484         int err, i;
6485
6486         info.text_base = TG3_FW_TEXT_ADDR;
6487         info.text_len = TG3_FW_TEXT_LEN;
6488         info.text_data = &tg3FwText[0];
6489         info.rodata_base = TG3_FW_RODATA_ADDR;
6490         info.rodata_len = TG3_FW_RODATA_LEN;
6491         info.rodata_data = &tg3FwRodata[0];
6492         info.data_base = TG3_FW_DATA_ADDR;
6493         info.data_len = TG3_FW_DATA_LEN;
6494         info.data_data = NULL;
6495
6496         err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
6497                                     RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
6498                                     &info);
6499         if (err)
6500                 return err;
6501
6502         err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
6503                                     TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
6504                                     &info);
6505         if (err)
6506                 return err;
6507
6508         /* Now startup only the RX cpu. */
6509         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
6510         tw32_f(RX_CPU_BASE + CPU_PC,    TG3_FW_TEXT_ADDR);
6511
6512         for (i = 0; i < 5; i++) {
6513                 if (tr32(RX_CPU_BASE + CPU_PC) == TG3_FW_TEXT_ADDR)
6514                         break;
6515                 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
6516                 tw32(RX_CPU_BASE + CPU_MODE,  CPU_MODE_HALT);
6517                 tw32_f(RX_CPU_BASE + CPU_PC,    TG3_FW_TEXT_ADDR);
6518                 udelay(1000);
6519         }
6520         if (i >= 5) {
6521                 printk(KERN_ERR PFX "tg3_load_firmware fails for %s "
6522                        "to set RX CPU PC, is %08x should be %08x\n",
6523                        tp->dev->name, tr32(RX_CPU_BASE + CPU_PC),
6524                        TG3_FW_TEXT_ADDR);
6525                 return -ENODEV;
6526         }
6527         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
6528         tw32_f(RX_CPU_BASE + CPU_MODE,  0x00000000);
6529
6530         return 0;
6531 }
6532
6533
6534 #define TG3_TSO_FW_RELEASE_MAJOR        0x1
6535 #define TG3_TSO_FW_RELASE_MINOR         0x6
6536 #define TG3_TSO_FW_RELEASE_FIX          0x0
6537 #define TG3_TSO_FW_START_ADDR           0x08000000
6538 #define TG3_TSO_FW_TEXT_ADDR            0x08000000
6539 #define TG3_TSO_FW_TEXT_LEN             0x1aa0
6540 #define TG3_TSO_FW_RODATA_ADDR          0x08001aa0
6541 #define TG3_TSO_FW_RODATA_LEN           0x60
6542 #define TG3_TSO_FW_DATA_ADDR            0x08001b20
6543 #define TG3_TSO_FW_DATA_LEN             0x30
6544 #define TG3_TSO_FW_SBSS_ADDR            0x08001b50
6545 #define TG3_TSO_FW_SBSS_LEN             0x2c
6546 #define TG3_TSO_FW_BSS_ADDR             0x08001b80
6547 #define TG3_TSO_FW_BSS_LEN              0x894
6548
6549 static const u32 tg3TsoFwText[(TG3_TSO_FW_TEXT_LEN / 4) + 1] = {
6550         0x0e000003, 0x00000000, 0x08001b24, 0x00000000, 0x10000003, 0x00000000,
6551         0x0000000d, 0x0000000d, 0x3c1d0800, 0x37bd4000, 0x03a0f021, 0x3c100800,
6552         0x26100000, 0x0e000010, 0x00000000, 0x0000000d, 0x27bdffe0, 0x3c04fefe,
6553         0xafbf0018, 0x0e0005d8, 0x34840002, 0x0e000668, 0x00000000, 0x3c030800,
6554         0x90631b68, 0x24020002, 0x3c040800, 0x24841aac, 0x14620003, 0x24050001,
6555         0x3c040800, 0x24841aa0, 0x24060006, 0x00003821, 0xafa00010, 0x0e00067c,
6556         0xafa00014, 0x8f625c50, 0x34420001, 0xaf625c50, 0x8f625c90, 0x34420001,
6557         0xaf625c90, 0x2402ffff, 0x0e000034, 0xaf625404, 0x8fbf0018, 0x03e00008,
6558         0x27bd0020, 0x00000000, 0x00000000, 0x00000000, 0x27bdffe0, 0xafbf001c,
6559         0xafb20018, 0xafb10014, 0x0e00005b, 0xafb00010, 0x24120002, 0x24110001,
6560         0x8f706820, 0x32020100, 0x10400003, 0x00000000, 0x0e0000bb, 0x00000000,
6561         0x8f706820, 0x32022000, 0x10400004, 0x32020001, 0x0e0001f0, 0x24040001,
6562         0x32020001, 0x10400003, 0x00000000, 0x0e0000a3, 0x00000000, 0x3c020800,
6563         0x90421b98, 0x14520003, 0x00000000, 0x0e0004c0, 0x00000000, 0x0a00003c,
6564         0xaf715028, 0x8fbf001c, 0x8fb20018, 0x8fb10014, 0x8fb00010, 0x03e00008,
6565         0x27bd0020, 0x27bdffe0, 0x3c040800, 0x24841ac0, 0x00002821, 0x00003021,
6566         0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014, 0x3c040800,
6567         0x248423d8, 0xa4800000, 0x3c010800, 0xa0201b98, 0x3c010800, 0xac201b9c,
6568         0x3c010800, 0xac201ba0, 0x3c010800, 0xac201ba4, 0x3c010800, 0xac201bac,
6569         0x3c010800, 0xac201bb8, 0x3c010800, 0xac201bbc, 0x8f624434, 0x3c010800,
6570         0xac221b88, 0x8f624438, 0x3c010800, 0xac221b8c, 0x8f624410, 0xac80f7a8,
6571         0x3c010800, 0xac201b84, 0x3c010800, 0xac2023e0, 0x3c010800, 0xac2023c8,
6572         0x3c010800, 0xac2023cc, 0x3c010800, 0xac202400, 0x3c010800, 0xac221b90,
6573         0x8f620068, 0x24030007, 0x00021702, 0x10430005, 0x00000000, 0x8f620068,
6574         0x00021702, 0x14400004, 0x24020001, 0x3c010800, 0x0a000097, 0xac20240c,
6575         0xac820034, 0x3c040800, 0x24841acc, 0x3c050800, 0x8ca5240c, 0x00003021,
6576         0x00003821, 0xafa00010, 0x0e00067c, 0xafa00014, 0x8fbf0018, 0x03e00008,
6577         0x27bd0020, 0x27bdffe0, 0x3c040800, 0x24841ad8, 0x00002821, 0x00003021,
6578         0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014, 0x0e00005b,
6579         0x00000000, 0x0e0000b4, 0x00002021, 0x8fbf0018, 0x03e00008, 0x27bd0020,
6580         0x24020001, 0x8f636820, 0x00821004, 0x00021027, 0x00621824, 0x03e00008,
6581         0xaf636820, 0x27bdffd0, 0xafbf002c, 0xafb60028, 0xafb50024, 0xafb40020,
6582         0xafb3001c, 0xafb20018, 0xafb10014, 0xafb00010, 0x8f675c5c, 0x3c030800,
6583         0x24631bbc, 0x8c620000, 0x14470005, 0x3c0200ff, 0x3c020800, 0x90421b98,
6584         0x14400119, 0x3c0200ff, 0x3442fff8, 0x00e28824, 0xac670000, 0x00111902,
6585         0x306300ff, 0x30e20003, 0x000211c0, 0x00622825, 0x00a04021, 0x00071602,
6586         0x3c030800, 0x90631b98, 0x3044000f, 0x14600036, 0x00804821, 0x24020001,
6587         0x3c010800, 0xa0221b98, 0x00051100, 0x00821025, 0x3c010800, 0xac201b9c,
6588         0x3c010800, 0xac201ba0, 0x3c010800, 0xac201ba4, 0x3c010800, 0xac201bac,
6589         0x3c010800, 0xac201bb8, 0x3c010800, 0xac201bb0, 0x3c010800, 0xac201bb4,
6590         0x3c010800, 0xa42223d8, 0x9622000c, 0x30437fff, 0x3c010800, 0xa4222410,
6591         0x30428000, 0x3c010800, 0xa4231bc6, 0x10400005, 0x24020001, 0x3c010800,
6592         0xac2223f4, 0x0a000102, 0x2406003e, 0x24060036, 0x3c010800, 0xac2023f4,
6593         0x9622000a, 0x3c030800, 0x94631bc6, 0x3c010800, 0xac2023f0, 0x3c010800,
6594         0xac2023f8, 0x00021302, 0x00021080, 0x00c21021, 0x00621821, 0x3c010800,
6595         0xa42223d0, 0x3c010800, 0x0a000115, 0xa4231b96, 0x9622000c, 0x3c010800,
6596         0xa42223ec, 0x3c040800, 0x24841b9c, 0x8c820000, 0x00021100, 0x3c010800,
6597         0x00220821, 0xac311bc8, 0x8c820000, 0x00021100, 0x3c010800, 0x00220821,
6598         0xac271bcc, 0x8c820000, 0x25030001, 0x306601ff, 0x00021100, 0x3c010800,
6599         0x00220821, 0xac261bd0, 0x8c820000, 0x00021100, 0x3c010800, 0x00220821,
6600         0xac291bd4, 0x96230008, 0x3c020800, 0x8c421bac, 0x00432821, 0x3c010800,
6601         0xac251bac, 0x9622000a, 0x30420004, 0x14400018, 0x00061100, 0x8f630c14,
6602         0x3063000f, 0x2c620002, 0x1440000b, 0x3c02c000, 0x8f630c14, 0x3c020800,
6603         0x8c421b40, 0x3063000f, 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002,
6604         0x1040fff7, 0x3c02c000, 0x00e21825, 0xaf635c5c, 0x8f625c50, 0x30420002,
6605         0x10400014, 0x00000000, 0x0a000147, 0x00000000, 0x3c030800, 0x8c631b80,
6606         0x3c040800, 0x94841b94, 0x01221025, 0x3c010800, 0xa42223da, 0x24020001,
6607         0x3c010800, 0xac221bb8, 0x24630001, 0x0085202a, 0x3c010800, 0x10800003,
6608         0xac231b80, 0x3c010800, 0xa4251b94, 0x3c060800, 0x24c61b9c, 0x8cc20000,
6609         0x24420001, 0xacc20000, 0x28420080, 0x14400005, 0x00000000, 0x0e000656,
6610         0x24040002, 0x0a0001e6, 0x00000000, 0x3c020800, 0x8c421bb8, 0x10400078,
6611         0x24020001, 0x3c050800, 0x90a51b98, 0x14a20072, 0x00000000, 0x3c150800,
6612         0x96b51b96, 0x3c040800, 0x8c841bac, 0x32a3ffff, 0x0083102a, 0x1440006c,
6613         0x00000000, 0x14830003, 0x00000000, 0x3c010800, 0xac2523f0, 0x1060005c,
6614         0x00009021, 0x24d60004, 0x0060a021, 0x24d30014, 0x8ec20000, 0x00028100,
6615         0x3c110800, 0x02308821, 0x0e000625, 0x8e311bc8, 0x00402821, 0x10a00054,
6616         0x00000000, 0x9628000a, 0x31020040, 0x10400005, 0x2407180c, 0x8e22000c,
6617         0x2407188c, 0x00021400, 0xaca20018, 0x3c030800, 0x00701821, 0x8c631bd0,
6618         0x3c020800, 0x00501021, 0x8c421bd4, 0x00031d00, 0x00021400, 0x00621825,
6619         0xaca30014, 0x8ec30004, 0x96220008, 0x00432023, 0x3242ffff, 0x3083ffff,
6620         0x00431021, 0x0282102a, 0x14400002, 0x02b23023, 0x00803021, 0x8e620000,
6621         0x30c4ffff, 0x00441021, 0xae620000, 0x8e220000, 0xaca20000, 0x8e220004,
6622         0x8e63fff4, 0x00431021, 0xaca20004, 0xa4a6000e, 0x8e62fff4, 0x00441021,
6623         0xae62fff4, 0x96230008, 0x0043102a, 0x14400005, 0x02469021, 0x8e62fff0,
6624         0xae60fff4, 0x24420001, 0xae62fff0, 0xaca00008, 0x3242ffff, 0x14540008,
6625         0x24020305, 0x31020080, 0x54400001, 0x34e70010, 0x24020905, 0xa4a2000c,
6626         0x0a0001cb, 0x34e70020, 0xa4a2000c, 0x3c020800, 0x8c4223f0, 0x10400003,
6627         0x3c024b65, 0x0a0001d3, 0x34427654, 0x3c02b49a, 0x344289ab, 0xaca2001c,
6628         0x30e2ffff, 0xaca20010, 0x0e0005a2, 0x00a02021, 0x3242ffff, 0x0054102b,
6629         0x1440ffa9, 0x00000000, 0x24020002, 0x3c010800, 0x0a0001e6, 0xa0221b98,
6630         0x8ec2083c, 0x24420001, 0x0a0001e6, 0xaec2083c, 0x0e0004c0, 0x00000000,
6631         0x8fbf002c, 0x8fb60028, 0x8fb50024, 0x8fb40020, 0x8fb3001c, 0x8fb20018,
6632         0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0030, 0x27bdffd0, 0xafbf0028,
6633         0xafb30024, 0xafb20020, 0xafb1001c, 0xafb00018, 0x8f725c9c, 0x3c0200ff,
6634         0x3442fff8, 0x3c070800, 0x24e71bb4, 0x02428824, 0x9623000e, 0x8ce20000,
6635         0x00431021, 0xace20000, 0x8e220010, 0x30420020, 0x14400011, 0x00809821,
6636         0x0e00063b, 0x02202021, 0x3c02c000, 0x02421825, 0xaf635c9c, 0x8f625c90,
6637         0x30420002, 0x1040011e, 0x00000000, 0xaf635c9c, 0x8f625c90, 0x30420002,
6638         0x10400119, 0x00000000, 0x0a00020d, 0x00000000, 0x8e240008, 0x8e230014,
6639         0x00041402, 0x000231c0, 0x00031502, 0x304201ff, 0x2442ffff, 0x3042007f,
6640         0x00031942, 0x30637800, 0x00021100, 0x24424000, 0x00624821, 0x9522000a,
6641         0x3084ffff, 0x30420008, 0x104000b0, 0x000429c0, 0x3c020800, 0x8c422400,
6642         0x14400024, 0x24c50008, 0x94c20014, 0x3c010800, 0xa42223d0, 0x8cc40010,
6643         0x00041402, 0x3c010800, 0xa42223d2, 0x3c010800, 0xa42423d4, 0x94c2000e,
6644         0x3083ffff, 0x00431023, 0x3c010800, 0xac222408, 0x94c2001a, 0x3c010800,
6645         0xac262400, 0x3c010800, 0xac322404, 0x3c010800, 0xac2223fc, 0x3c02c000,
6646         0x02421825, 0xaf635c9c, 0x8f625c90, 0x30420002, 0x104000e5, 0x00000000,
6647         0xaf635c9c, 0x8f625c90, 0x30420002, 0x104000e0, 0x00000000, 0x0a000246,
6648         0x00000000, 0x94c2000e, 0x3c030800, 0x946323d4, 0x00434023, 0x3103ffff,
6649         0x2c620008, 0x1040001c, 0x00000000, 0x94c20014, 0x24420028, 0x00a22821,
6650         0x00031042, 0x1840000b, 0x00002021, 0x24e60848, 0x00403821, 0x94a30000,
6651         0x8cc20000, 0x24840001, 0x00431021, 0xacc20000, 0x0087102a, 0x1440fff9,
6652         0x24a50002, 0x31020001, 0x1040001f, 0x3c024000, 0x3c040800, 0x248423fc,
6653         0xa0a00001, 0x94a30000, 0x8c820000, 0x00431021, 0x0a000285, 0xac820000,
6654         0x8f626800, 0x3c030010, 0x00431024, 0x10400009, 0x00000000, 0x94c2001a,
6655         0x3c030800, 0x8c6323fc, 0x00431021, 0x3c010800, 0xac2223fc, 0x0a000286,
6656         0x3c024000, 0x94c2001a, 0x94c4001c, 0x3c030800, 0x8c6323fc, 0x00441023,
6657         0x00621821, 0x3c010800, 0xac2323fc, 0x3c024000, 0x02421825, 0xaf635c9c,
6658         0x8f625c90, 0x30420002, 0x1440fffc, 0x00000000, 0x9522000a, 0x30420010,
6659         0x1040009b, 0x00000000, 0x3c030800, 0x946323d4, 0x3c070800, 0x24e72400,
6660         0x8ce40000, 0x8f626800, 0x24630030, 0x00832821, 0x3c030010, 0x00431024,
6661         0x1440000a, 0x00000000, 0x94a20004, 0x3c040800, 0x8c842408, 0x3c030800,
6662         0x8c6323fc, 0x00441023, 0x00621821, 0x3c010800, 0xac2323fc, 0x3c040800,
6663         0x8c8423fc, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402, 0x00822021,
6664         0x00041027, 0xa4a20006, 0x3c030800, 0x8c632404, 0x3c0200ff, 0x3442fff8,
6665         0x00628824, 0x96220008, 0x24050001, 0x24034000, 0x000231c0, 0x00801021,
6666         0xa4c2001a, 0xa4c0001c, 0xace00000, 0x3c010800, 0xac251b60, 0xaf635cb8,
6667         0x8f625cb0, 0x30420002, 0x10400003, 0x00000000, 0x3c010800, 0xac201b60,
6668         0x8e220008, 0xaf625cb8, 0x8f625cb0, 0x30420002, 0x10400003, 0x00000000,
6669         0x3c010800, 0xac201b60, 0x3c020800, 0x8c421b60, 0x1040ffec, 0x00000000,
6670         0x3c040800, 0x0e00063b, 0x8c842404, 0x0a00032a, 0x00000000, 0x3c030800,
6671         0x90631b98, 0x24020002, 0x14620003, 0x3c034b65, 0x0a0002e1, 0x00008021,
6672         0x8e22001c, 0x34637654, 0x10430002, 0x24100002, 0x24100001, 0x00c02021,
6673         0x0e000350, 0x02003021, 0x24020003, 0x3c010800, 0xa0221b98, 0x24020002,
6674         0x1202000a, 0x24020001, 0x3c030800, 0x8c6323f0, 0x10620006, 0x00000000,
6675         0x3c020800, 0x944223d8, 0x00021400, 0x0a00031f, 0xae220014, 0x3c040800,
6676         0x248423da, 0x94820000, 0x00021400, 0xae220014, 0x3c020800, 0x8c421bbc,
6677         0x3c03c000, 0x3c010800, 0xa0201b98, 0x00431025, 0xaf625c5c, 0x8f625c50,
6678         0x30420002, 0x10400009, 0x00000000, 0x2484f7e2, 0x8c820000, 0x00431025,
6679         0xaf625c5c, 0x8f625c50, 0x30420002, 0x1440fffa, 0x00000000, 0x3c020800,
6680         0x24421b84, 0x8c430000, 0x24630001, 0xac430000, 0x8f630c14, 0x3063000f,
6681         0x2c620002, 0x1440000c, 0x3c024000, 0x8f630c14, 0x3c020800, 0x8c421b40,
6682         0x3063000f, 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002, 0x1040fff7,
6683         0x00000000, 0x3c024000, 0x02421825, 0xaf635c9c, 0x8f625c90, 0x30420002,
6684         0x1440fffc, 0x00000000, 0x12600003, 0x00000000, 0x0e0004c0, 0x00000000,
6685         0x8fbf0028, 0x8fb30024, 0x8fb20020, 0x8fb1001c, 0x8fb00018, 0x03e00008,
6686         0x27bd0030, 0x8f634450, 0x3c040800, 0x24841b88, 0x8c820000, 0x00031c02,
6687         0x0043102b, 0x14400007, 0x3c038000, 0x8c840004, 0x8f624450, 0x00021c02,
6688         0x0083102b, 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024,
6689         0x1440fffd, 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3c024000,
6690         0x00822025, 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00000000,
6691         0x03e00008, 0x00000000, 0x27bdffe0, 0x00805821, 0x14c00011, 0x256e0008,
6692         0x3c020800, 0x8c4223f4, 0x10400007, 0x24020016, 0x3c010800, 0xa42223d2,
6693         0x2402002a, 0x3c010800, 0x0a000364, 0xa42223d4, 0x8d670010, 0x00071402,
6694         0x3c010800, 0xa42223d2, 0x3c010800, 0xa42723d4, 0x3c040800, 0x948423d4,
6695         0x3c030800, 0x946323d2, 0x95cf0006, 0x3c020800, 0x944223d0, 0x00832023,
6696         0x01e2c023, 0x3065ffff, 0x24a20028, 0x01c24821, 0x3082ffff, 0x14c0001a,
6697         0x01226021, 0x9582000c, 0x3042003f, 0x3c010800, 0xa42223d6, 0x95820004,
6698         0x95830006, 0x3c010800, 0xac2023e4, 0x3c010800, 0xac2023e8, 0x00021400,
6699         0x00431025, 0x3c010800, 0xac221bc0, 0x95220004, 0x3c010800, 0xa4221bc4,
6700         0x95230002, 0x01e51023, 0x0043102a, 0x10400010, 0x24020001, 0x3c010800,
6701         0x0a000398, 0xac2223f8, 0x3c030800, 0x8c6323e8, 0x3c020800, 0x94421bc4,
6702         0x00431021, 0xa5220004, 0x3c020800, 0x94421bc0, 0xa5820004, 0x3c020800,
6703         0x8c421bc0, 0xa5820006, 0x3c020800, 0x8c4223f0, 0x3c0d0800, 0x8dad23e4,
6704         0x3c0a0800, 0x144000e5, 0x8d4a23e8, 0x3c020800, 0x94421bc4, 0x004a1821,
6705         0x3063ffff, 0x0062182b, 0x24020002, 0x10c2000d, 0x01435023, 0x3c020800,
6706         0x944223d6, 0x30420009, 0x10400008, 0x00000000, 0x9582000c, 0x3042fff6,
6707         0xa582000c, 0x3c020800, 0x944223d6, 0x30420009, 0x01a26823, 0x3c020800,
6708         0x8c4223f8, 0x1040004a, 0x01203821, 0x3c020800, 0x944223d2, 0x00004021,
6709         0xa520000a, 0x01e21023, 0xa5220002, 0x3082ffff, 0x00021042, 0x18400008,
6710         0x00003021, 0x00401821, 0x94e20000, 0x25080001, 0x00c23021, 0x0103102a,
6711         0x1440fffb, 0x24e70002, 0x00061c02, 0x30c2ffff, 0x00623021, 0x00061402,
6712         0x00c23021, 0x00c02821, 0x00061027, 0xa522000a, 0x00003021, 0x2527000c,
6713         0x00004021, 0x94e20000, 0x25080001, 0x00c23021, 0x2d020004, 0x1440fffb,
6714         0x24e70002, 0x95220002, 0x00004021, 0x91230009, 0x00442023, 0x01803821,
6715         0x3082ffff, 0xa4e00010, 0x00621821, 0x00021042, 0x18400010, 0x00c33021,
6716         0x00404821, 0x94e20000, 0x24e70002, 0x00c23021, 0x30e2007f, 0x14400006,
6717         0x25080001, 0x8d630000, 0x3c02007f, 0x3442ff80, 0x00625824, 0x25670008,
6718         0x0109102a, 0x1440fff3, 0x00000000, 0x30820001, 0x10400005, 0x00061c02,
6719         0xa0e00001, 0x94e20000, 0x00c23021, 0x00061c02, 0x30c2ffff, 0x00623021,
6720         0x00061402, 0x00c23021, 0x0a00047d, 0x30c6ffff, 0x24020002, 0x14c20081,
6721         0x00000000, 0x3c020800, 0x8c42240c, 0x14400007, 0x00000000, 0x3c020800,
6722         0x944223d2, 0x95230002, 0x01e21023, 0x10620077, 0x00000000, 0x3c020800,
6723         0x944223d2, 0x01e21023, 0xa5220002, 0x3c020800, 0x8c42240c, 0x1040001a,
6724         0x31e3ffff, 0x8dc70010, 0x3c020800, 0x94421b96, 0x00e04021, 0x00072c02,
6725         0x00aa2021, 0x00431023, 0x00823823, 0x00072402, 0x30e2ffff, 0x00823821,
6726         0x00071027, 0xa522000a, 0x3102ffff, 0x3c040800, 0x948423d4, 0x00453023,
6727         0x00e02821, 0x00641823, 0x006d1821, 0x00c33021, 0x00061c02, 0x30c2ffff,
6728         0x0a00047d, 0x00623021, 0x01203821, 0x00004021, 0x3082ffff, 0x00021042,
6729         0x18400008, 0x00003021, 0x00401821, 0x94e20000, 0x25080001, 0x00c23021,
6730         0x0103102a, 0x1440fffb, 0x24e70002, 0x00061c02, 0x30c2ffff, 0x00623021,
6731         0x00061402, 0x00c23021, 0x00c02821, 0x00061027, 0xa522000a, 0x00003021,
6732         0x2527000c, 0x00004021, 0x94e20000, 0x25080001, 0x00c23021, 0x2d020004,
6733         0x1440fffb, 0x24e70002, 0x95220002, 0x00004021, 0x91230009, 0x00442023,
6734         0x01803821, 0x3082ffff, 0xa4e00010, 0x3c040800, 0x948423d4, 0x00621821,
6735         0x00c33021, 0x00061c02, 0x30c2ffff, 0x00623021, 0x00061c02, 0x3c020800,
6736         0x944223d0, 0x00c34821, 0x00441023, 0x00021fc2, 0x00431021, 0x00021043,
6737         0x18400010, 0x00003021, 0x00402021, 0x94e20000, 0x24e70002, 0x00c23021,
6738         0x30e2007f, 0x14400006, 0x25080001, 0x8d630000, 0x3c02007f, 0x3442ff80,
6739         0x00625824, 0x25670008, 0x0104102a, 0x1440fff3, 0x00000000, 0x3c020800,
6740         0x944223ec, 0x00c23021, 0x3122ffff, 0x00c23021, 0x00061c02, 0x30c2ffff,
6741         0x00623021, 0x00061402, 0x00c23021, 0x00c04021, 0x00061027, 0xa5820010,
6742         0xadc00014, 0x0a00049d, 0xadc00000, 0x8dc70010, 0x00e04021, 0x11400007,
6743         0x00072c02, 0x00aa3021, 0x00061402, 0x30c3ffff, 0x00433021, 0x00061402,
6744         0x00c22821, 0x00051027, 0xa522000a, 0x3c030800, 0x946323d4, 0x3102ffff,
6745         0x01e21021, 0x00433023, 0x00cd3021, 0x00061c02, 0x30c2ffff, 0x00623021,
6746         0x00061402, 0x00c23021, 0x00c04021, 0x00061027, 0xa5820010, 0x3102ffff,
6747         0x00051c00, 0x00431025, 0xadc20010, 0x3c020800, 0x8c4223f4, 0x10400005,
6748         0x2de205eb, 0x14400002, 0x25e2fff2, 0x34028870, 0xa5c20034, 0x3c030800,
6749         0x246323e8, 0x8c620000, 0x24420001, 0xac620000, 0x3c040800, 0x8c8423e4,
6750         0x3c020800, 0x8c421bc0, 0x3303ffff, 0x00832021, 0x00431821, 0x0062102b,
6751         0x3c010800, 0xac2423e4, 0x10400003, 0x2482ffff, 0x3c010800, 0xac2223e4,
6752         0x3c010800, 0xac231bc0, 0x03e00008, 0x27bd0020, 0x27bdffb8, 0x3c050800,
6753         0x24a51b96, 0xafbf0044, 0xafbe0040, 0xafb7003c, 0xafb60038, 0xafb50034,
6754         0xafb40030, 0xafb3002c, 0xafb20028, 0xafb10024, 0xafb00020, 0x94a90000,
6755         0x3c020800, 0x944223d0, 0x3c030800, 0x8c631bb0, 0x3c040800, 0x8c841bac,
6756         0x01221023, 0x0064182a, 0xa7a9001e, 0x106000be, 0xa7a20016, 0x24be0022,
6757         0x97b6001e, 0x24b3001a, 0x24b70016, 0x8fc20000, 0x14400008, 0x00000000,
6758         0x8fc2fff8, 0x97a30016, 0x8fc4fff4, 0x00431021, 0x0082202a, 0x148000b0,
6759         0x00000000, 0x97d50818, 0x32a2ffff, 0x104000a3, 0x00009021, 0x0040a021,
6760         0x00008821, 0x0e000625, 0x00000000, 0x00403021, 0x14c00007, 0x00000000,
6761         0x3c020800, 0x8c4223dc, 0x24420001, 0x3c010800, 0x0a000596, 0xac2223dc,
6762         0x3c100800, 0x02118021, 0x8e101bc8, 0x9608000a, 0x31020040, 0x10400005,
6763         0x2407180c, 0x8e02000c, 0x2407188c, 0x00021400, 0xacc20018, 0x31020080,
6764         0x54400001, 0x34e70010, 0x3c020800, 0x00511021, 0x8c421bd0, 0x3c030800,
6765         0x00711821, 0x8c631bd4, 0x00021500, 0x00031c00, 0x00431025, 0xacc20014,
6766         0x96040008, 0x3242ffff, 0x00821021, 0x0282102a, 0x14400002, 0x02b22823,
6767         0x00802821, 0x8e020000, 0x02459021, 0xacc20000, 0x8e020004, 0x00c02021,
6768         0x26310010, 0xac820004, 0x30e2ffff, 0xac800008, 0xa485000e, 0xac820010,
6769         0x24020305, 0x0e0005a2, 0xa482000c, 0x3242ffff, 0x0054102b, 0x1440ffc5,
6770         0x3242ffff, 0x0a00058e, 0x00000000, 0x8e620000, 0x8e63fffc, 0x0043102a,
6771         0x10400067, 0x00000000, 0x8e62fff0, 0x00028900, 0x3c100800, 0x02118021,
6772         0x0e000625, 0x8e101bc8, 0x00403021, 0x14c00005, 0x00000000, 0x8e62082c,
6773         0x24420001, 0x0a000596, 0xae62082c, 0x9608000a, 0x31020040, 0x10400005,
6774         0x2407180c, 0x8e02000c, 0x2407188c, 0x00021400, 0xacc20018, 0x3c020800,
6775         0x00511021, 0x8c421bd0, 0x3c030800, 0x00711821, 0x8c631bd4, 0x00021500,
6776         0x00031c00, 0x00431025, 0xacc20014, 0x8e63fff4, 0x96020008, 0x00432023,
6777         0x3242ffff, 0x3083ffff, 0x00431021, 0x02c2102a, 0x10400003, 0x00802821,
6778         0x97a9001e, 0x01322823, 0x8e620000, 0x30a4ffff, 0x00441021, 0xae620000,
6779         0xa4c5000e, 0x8e020000, 0xacc20000, 0x8e020004, 0x8e63fff4, 0x00431021,
6780         0xacc20004, 0x8e63fff4, 0x96020008, 0x00641821, 0x0062102a, 0x14400006,
6781         0x02459021, 0x8e62fff0, 0xae60fff4, 0x24420001, 0x0a000571, 0xae62fff0,
6782         0xae63fff4, 0xacc00008, 0x3242ffff, 0x10560003, 0x31020004, 0x10400006,
6783         0x24020305, 0x31020080, 0x54400001, 0x34e70010, 0x34e70020, 0x24020905,
6784         0xa4c2000c, 0x8ee30000, 0x8ee20004, 0x14620007, 0x3c02b49a, 0x8ee20860,
6785         0x54400001, 0x34e70400, 0x3c024b65, 0x0a000588, 0x34427654, 0x344289ab,
6786         0xacc2001c, 0x30e2ffff, 0xacc20010, 0x0e0005a2, 0x00c02021, 0x3242ffff,
6787         0x0056102b, 0x1440ff9b, 0x00000000, 0x8e620000, 0x8e63fffc, 0x0043102a,
6788         0x1440ff48, 0x00000000, 0x8fbf0044, 0x8fbe0040, 0x8fb7003c, 0x8fb60038,
6789         0x8fb50034, 0x8fb40030, 0x8fb3002c, 0x8fb20028, 0x8fb10024, 0x8fb00020,
6790         0x03e00008, 0x27bd0048, 0x27bdffe8, 0xafbf0014, 0xafb00010, 0x8f624450,
6791         0x8f634410, 0x0a0005b1, 0x00808021, 0x8f626820, 0x30422000, 0x10400003,
6792         0x00000000, 0x0e0001f0, 0x00002021, 0x8f624450, 0x8f634410, 0x3042ffff,
6793         0x0043102b, 0x1440fff5, 0x00000000, 0x8f630c14, 0x3063000f, 0x2c620002,
6794         0x1440000b, 0x00000000, 0x8f630c14, 0x3c020800, 0x8c421b40, 0x3063000f,
6795         0x24420001, 0x3c010800, 0xac221b40, 0x2c620002, 0x1040fff7, 0x00000000,
6796         0xaf705c18, 0x8f625c10, 0x30420002, 0x10400009, 0x00000000, 0x8f626820,
6797         0x30422000, 0x1040fff8, 0x00000000, 0x0e0001f0, 0x00002021, 0x0a0005c4,
6798         0x00000000, 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x00000000,
6799         0x00000000, 0x00000000, 0x27bdffe8, 0x3c1bc000, 0xafbf0014, 0xafb00010,
6800         0xaf60680c, 0x8f626804, 0x34420082, 0xaf626804, 0x8f634000, 0x24020b50,
6801         0x3c010800, 0xac221b54, 0x24020b78, 0x3c010800, 0xac221b64, 0x34630002,
6802         0xaf634000, 0x0e000605, 0x00808021, 0x3c010800, 0xa0221b68, 0x304200ff,
6803         0x24030002, 0x14430005, 0x00000000, 0x3c020800, 0x8c421b54, 0x0a0005f8,
6804         0xac5000c0, 0x3c020800, 0x8c421b54, 0xac5000bc, 0x8f624434, 0x8f634438,
6805         0x8f644410, 0x3c010800, 0xac221b5c, 0x3c010800, 0xac231b6c, 0x3c010800,
6806         0xac241b58, 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x3c040800,
6807         0x8c870000, 0x3c03aa55, 0x3463aa55, 0x3c06c003, 0xac830000, 0x8cc20000,
6808         0x14430007, 0x24050002, 0x3c0355aa, 0x346355aa, 0xac830000, 0x8cc20000,
6809         0x50430001, 0x24050001, 0x3c020800, 0xac470000, 0x03e00008, 0x00a01021,
6810         0x27bdfff8, 0x18800009, 0x00002821, 0x8f63680c, 0x8f62680c, 0x1043fffe,
6811         0x00000000, 0x24a50001, 0x00a4102a, 0x1440fff9, 0x00000000, 0x03e00008,
6812         0x27bd0008, 0x8f634450, 0x3c020800, 0x8c421b5c, 0x00031c02, 0x0043102b,
6813         0x14400008, 0x3c038000, 0x3c040800, 0x8c841b6c, 0x8f624450, 0x00021c02,
6814         0x0083102b, 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024,
6815         0x1440fffd, 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3082ffff,
6816         0x2442e000, 0x2c422001, 0x14400003, 0x3c024000, 0x0a000648, 0x2402ffff,
6817         0x00822025, 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00001021,
6818         0x03e00008, 0x00000000, 0x8f624450, 0x3c030800, 0x8c631b58, 0x0a000651,
6819         0x3042ffff, 0x8f624450, 0x3042ffff, 0x0043102b, 0x1440fffc, 0x00000000,
6820         0x03e00008, 0x00000000, 0x27bdffe0, 0x00802821, 0x3c040800, 0x24841af0,
6821         0x00003021, 0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014,
6822         0x0a000660, 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x00000000,
6823         0x00000000, 0x00000000, 0x3c020800, 0x34423000, 0x3c030800, 0x34633000,
6824         0x3c040800, 0x348437ff, 0x3c010800, 0xac221b74, 0x24020040, 0x3c010800,
6825         0xac221b78, 0x3c010800, 0xac201b70, 0xac600000, 0x24630004, 0x0083102b,
6826         0x5040fffd, 0xac600000, 0x03e00008, 0x00000000, 0x00804821, 0x8faa0010,
6827         0x3c020800, 0x8c421b70, 0x3c040800, 0x8c841b78, 0x8fab0014, 0x24430001,
6828         0x0044102b, 0x3c010800, 0xac231b70, 0x14400003, 0x00004021, 0x3c010800,
6829         0xac201b70, 0x3c020800, 0x8c421b70, 0x3c030800, 0x8c631b74, 0x91240000,
6830         0x00021140, 0x00431021, 0x00481021, 0x25080001, 0xa0440000, 0x29020008,
6831         0x1440fff4, 0x25290001, 0x3c020800, 0x8c421b70, 0x3c030800, 0x8c631b74,
6832         0x8f64680c, 0x00021140, 0x00431021, 0xac440008, 0xac45000c, 0xac460010,
6833         0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c, 0x00000000, 0x00000000,
6834 };
6835
6836 static const u32 tg3TsoFwRodata[] = {
6837         0x4d61696e, 0x43707542, 0x00000000, 0x4d61696e, 0x43707541, 0x00000000,
6838         0x00000000, 0x00000000, 0x73746b6f, 0x66666c64, 0x496e0000, 0x73746b6f,
6839         0x66662a2a, 0x00000000, 0x53774576, 0x656e7430, 0x00000000, 0x00000000,
6840         0x00000000, 0x00000000, 0x66617461, 0x6c457272, 0x00000000, 0x00000000,
6841         0x00000000,
6842 };
6843
6844 static const u32 tg3TsoFwData[] = {
6845         0x00000000, 0x73746b6f, 0x66666c64, 0x5f76312e, 0x362e3000, 0x00000000,
6846         0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
6847         0x00000000,
6848 };
6849
6850 /* 5705 needs a special version of the TSO firmware.  */
6851 #define TG3_TSO5_FW_RELEASE_MAJOR       0x1
6852 #define TG3_TSO5_FW_RELASE_MINOR        0x2
6853 #define TG3_TSO5_FW_RELEASE_FIX         0x0
6854 #define TG3_TSO5_FW_START_ADDR          0x00010000
6855 #define TG3_TSO5_FW_TEXT_ADDR           0x00010000
6856 #define TG3_TSO5_FW_TEXT_LEN            0xe90
6857 #define TG3_TSO5_FW_RODATA_ADDR         0x00010e90
6858 #define TG3_TSO5_FW_RODATA_LEN          0x50
6859 #define TG3_TSO5_FW_DATA_ADDR           0x00010f00
6860 #define TG3_TSO5_FW_DATA_LEN            0x20
6861 #define TG3_TSO5_FW_SBSS_ADDR           0x00010f20
6862 #define TG3_TSO5_FW_SBSS_LEN            0x28
6863 #define TG3_TSO5_FW_BSS_ADDR            0x00010f50
6864 #define TG3_TSO5_FW_BSS_LEN             0x88
6865
6866 static const u32 tg3Tso5FwText[(TG3_TSO5_FW_TEXT_LEN / 4) + 1] = {
6867         0x0c004003, 0x00000000, 0x00010f04, 0x00000000, 0x10000003, 0x00000000,
6868         0x0000000d, 0x0000000d, 0x3c1d0001, 0x37bde000, 0x03a0f021, 0x3c100001,
6869         0x26100000, 0x0c004010, 0x00000000, 0x0000000d, 0x27bdffe0, 0x3c04fefe,
6870         0xafbf0018, 0x0c0042e8, 0x34840002, 0x0c004364, 0x00000000, 0x3c030001,
6871         0x90630f34, 0x24020002, 0x3c040001, 0x24840e9c, 0x14620003, 0x24050001,
6872         0x3c040001, 0x24840e90, 0x24060002, 0x00003821, 0xafa00010, 0x0c004378,
6873         0xafa00014, 0x0c00402c, 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020,
6874         0x00000000, 0x00000000, 0x27bdffe0, 0xafbf001c, 0xafb20018, 0xafb10014,
6875         0x0c0042d4, 0xafb00010, 0x3c128000, 0x24110001, 0x8f706810, 0x32020400,
6876         0x10400007, 0x00000000, 0x8f641008, 0x00921024, 0x14400003, 0x00000000,
6877         0x0c004064, 0x00000000, 0x3c020001, 0x90420f56, 0x10510003, 0x32020200,
6878         0x1040fff1, 0x00000000, 0x0c0041b4, 0x00000000, 0x08004034, 0x00000000,
6879         0x8fbf001c, 0x8fb20018, 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0020,
6880         0x27bdffe0, 0x3c040001, 0x24840eb0, 0x00002821, 0x00003021, 0x00003821,
6881         0xafbf0018, 0xafa00010, 0x0c004378, 0xafa00014, 0x0000d021, 0x24020130,
6882         0xaf625000, 0x3c010001, 0xa4200f50, 0x3c010001, 0xa0200f57, 0x8fbf0018,
6883         0x03e00008, 0x27bd0020, 0x00000000, 0x00000000, 0x3c030001, 0x24630f60,
6884         0x90620000, 0x27bdfff0, 0x14400003, 0x0080c021, 0x08004073, 0x00004821,
6885         0x3c022000, 0x03021024, 0x10400003, 0x24090002, 0x08004073, 0xa0600000,
6886         0x24090001, 0x00181040, 0x30431f80, 0x346f8008, 0x1520004b, 0x25eb0028,
6887         0x3c040001, 0x00832021, 0x8c848010, 0x3c050001, 0x24a50f7a, 0x00041402,
6888         0xa0a20000, 0x3c010001, 0xa0240f7b, 0x3c020001, 0x00431021, 0x94428014,
6889         0x3c010001, 0xa0220f7c, 0x3c0c0001, 0x01836021, 0x8d8c8018, 0x304200ff,
6890         0x24420008, 0x000220c3, 0x24020001, 0x3c010001, 0xa0220f60, 0x0124102b,
6891         0x1040000c, 0x00003821, 0x24a6000e, 0x01602821, 0x8ca20000, 0x8ca30004,
6892         0x24a50008, 0x24e70001, 0xacc20000, 0xacc30004, 0x00e4102b, 0x1440fff8,
6893         0x24c60008, 0x00003821, 0x3c080001, 0x25080f7b, 0x91060000, 0x3c020001,
6894         0x90420f7c, 0x2503000d, 0x00c32821, 0x00461023, 0x00021fc2, 0x00431021,
6895         0x00021043, 0x1840000c, 0x00002021, 0x91020001, 0x00461023, 0x00021fc2,
6896         0x00431021, 0x00021843, 0x94a20000, 0x24e70001, 0x00822021, 0x00e3102a,
6897         0x1440fffb, 0x24a50002, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402,
6898         0x00822021, 0x3c02ffff, 0x01821024, 0x3083ffff, 0x00431025, 0x3c010001,
6899         0x080040fa, 0xac220f80, 0x3c050001, 0x24a50f7c, 0x90a20000, 0x3c0c0001,
6900         0x01836021, 0x8d8c8018, 0x000220c2, 0x1080000e, 0x00003821, 0x01603021,
6901         0x24a5000c, 0x8ca20000, 0x8ca30004, 0x24a50008, 0x24e70001, 0xacc20000,
6902         0xacc30004, 0x00e4102b, 0x1440fff8, 0x24c60008, 0x3c050001, 0x24a50f7c,
6903         0x90a20000, 0x30430007, 0x24020004, 0x10620011, 0x28620005, 0x10400005,
6904         0x24020002, 0x10620008, 0x000710c0, 0x080040fa, 0x00000000, 0x24020006,
6905         0x1062000e, 0x000710c0, 0x080040fa, 0x00000000, 0x00a21821, 0x9463000c,
6906         0x004b1021, 0x080040fa, 0xa4430000, 0x000710c0, 0x00a21821, 0x8c63000c,
6907         0x004b1021, 0x080040fa, 0xac430000, 0x00a21821, 0x8c63000c, 0x004b2021,
6908         0x00a21021, 0xac830000, 0x94420010, 0xa4820004, 0x95e70006, 0x3c020001,
6909         0x90420f7c, 0x3c030001, 0x90630f7a, 0x00e2c823, 0x3c020001, 0x90420f7b,
6910         0x24630028, 0x01e34021, 0x24420028, 0x15200012, 0x01e23021, 0x94c2000c,
6911         0x3c010001, 0xa4220f78, 0x94c20004, 0x94c30006, 0x3c010001, 0xa4200f76,
6912         0x3c010001, 0xa4200f72, 0x00021400, 0x00431025, 0x3c010001, 0xac220f6c,
6913         0x95020004, 0x3c010001, 0x08004124, 0xa4220f70, 0x3c020001, 0x94420f70,
6914         0x3c030001, 0x94630f72, 0x00431021, 0xa5020004, 0x3c020001, 0x94420f6c,
6915         0xa4c20004, 0x3c020001, 0x8c420f6c, 0xa4c20006, 0x3c040001, 0x94840f72,
6916         0x3c020001, 0x94420f70, 0x3c0a0001, 0x954a0f76, 0x00441821, 0x3063ffff,
6917         0x0062182a, 0x24020002, 0x1122000b, 0x00832023, 0x3c030001, 0x94630f78,
6918         0x30620009, 0x10400006, 0x3062fff6, 0xa4c2000c, 0x3c020001, 0x94420f78,
6919         0x30420009, 0x01425023, 0x24020001, 0x1122001b, 0x29220002, 0x50400005,
6920         0x24020002, 0x11200007, 0x31a2ffff, 0x08004197, 0x00000000, 0x1122001d,
6921         0x24020016, 0x08004197, 0x31a2ffff, 0x3c0e0001, 0x95ce0f80, 0x10800005,
6922         0x01806821, 0x01c42021, 0x00041c02, 0x3082ffff, 0x00627021, 0x000e1027,
6923         0xa502000a, 0x3c030001, 0x90630f7b, 0x31a2ffff, 0x00e21021, 0x0800418d,
6924         0x00432023, 0x3c020001, 0x94420f80, 0x00442021, 0x00041c02, 0x3082ffff,
6925         0x00622021, 0x00807021, 0x00041027, 0x08004185, 0xa502000a, 0x3c050001,
6926         0x24a50f7a, 0x90a30000, 0x14620002, 0x24e2fff2, 0xa5e20034, 0x90a20000,
6927         0x00e21023, 0xa5020002, 0x3c030001, 0x94630f80, 0x3c020001, 0x94420f5a,
6928         0x30e5ffff, 0x00641821, 0x00451023, 0x00622023, 0x00041c02, 0x3082ffff,
6929         0x00622021, 0x00041027, 0xa502000a, 0x3c030001, 0x90630f7c, 0x24620001,
6930         0x14a20005, 0x00807021, 0x01631021, 0x90420000, 0x08004185, 0x00026200,
6931         0x24620002, 0x14a20003, 0x306200fe, 0x004b1021, 0x944c0000, 0x3c020001,
6932         0x94420f82, 0x3183ffff, 0x3c040001, 0x90840f7b, 0x00431021, 0x00e21021,
6933         0x00442023, 0x008a2021, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402,
6934         0x00822021, 0x00806821, 0x00041027, 0xa4c20010, 0x31a2ffff, 0x000e1c00,
6935         0x00431025, 0x3c040001, 0x24840f72, 0xade20010, 0x94820000, 0x3c050001,
6936         0x94a50f76, 0x3c030001, 0x8c630f6c, 0x24420001, 0x00b92821, 0xa4820000,
6937         0x3322ffff, 0x00622021, 0x0083182b, 0x3c010001, 0xa4250f76, 0x10600003,
6938         0x24a2ffff, 0x3c010001, 0xa4220f76, 0x3c024000, 0x03021025, 0x3c010001,
6939         0xac240f6c, 0xaf621008, 0x03e00008, 0x27bd0010, 0x3c030001, 0x90630f56,
6940         0x27bdffe8, 0x24020001, 0xafbf0014, 0x10620026, 0xafb00010, 0x8f620cf4,
6941         0x2442ffff, 0x3042007f, 0x00021100, 0x8c434000, 0x3c010001, 0xac230f64,
6942         0x8c434008, 0x24444000, 0x8c5c4004, 0x30620040, 0x14400002, 0x24020088,
6943         0x24020008, 0x3c010001, 0xa4220f68, 0x30620004, 0x10400005, 0x24020001,
6944         0x3c010001, 0xa0220f57, 0x080041d5, 0x00031402, 0x3c010001, 0xa0200f57,
6945         0x00031402, 0x3c010001, 0xa4220f54, 0x9483000c, 0x24020001, 0x3c010001,
6946         0xa4200f50, 0x3c010001, 0xa0220f56, 0x3c010001, 0xa4230f62, 0x24020001,
6947         0x1342001e, 0x00000000, 0x13400005, 0x24020003, 0x13420067, 0x00000000,
6948         0x080042cf, 0x00000000, 0x3c020001, 0x94420f62, 0x241a0001, 0x3c010001,
6949         0xa4200f5e, 0x3c010001, 0xa4200f52, 0x304407ff, 0x00021bc2, 0x00031823,
6950         0x3063003e, 0x34630036, 0x00021242, 0x3042003c, 0x00621821, 0x3c010001,
6951         0xa4240f58, 0x00832021, 0x24630030, 0x3c010001, 0xa4240f5a, 0x3c010001,
6952         0xa4230f5c, 0x3c060001, 0x24c60f52, 0x94c50000, 0x94c30002, 0x3c040001,
6953         0x94840f5a, 0x00651021, 0x0044102a, 0x10400013, 0x3c108000, 0x00a31021,
6954         0xa4c20000, 0x3c02a000, 0xaf620cf4, 0x3c010001, 0xa0200f56, 0x8f641008,
6955         0x00901024, 0x14400003, 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4,
6956         0x00501024, 0x104000b7, 0x00000000, 0x0800420f, 0x00000000, 0x3c030001,
6957         0x94630f50, 0x00851023, 0xa4c40000, 0x00621821, 0x3042ffff, 0x3c010001,
6958         0xa4230f50, 0xaf620ce8, 0x3c020001, 0x94420f68, 0x34420024, 0xaf620cec,
6959         0x94c30002, 0x3c020001, 0x94420f50, 0x14620012, 0x3c028000, 0x3c108000,
6960         0x3c02a000, 0xaf620cf4, 0x3c010001, 0xa0200f56, 0x8f641008, 0x00901024,
6961         0x14400003, 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4, 0x00501024,
6962         0x1440fff7, 0x00000000, 0x080042cf, 0x241a0003, 0xaf620cf4, 0x3c108000,
6963         0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064, 0x00000000,
6964         0x8f620cf4, 0x00501024, 0x1440fff7, 0x00000000, 0x080042cf, 0x241a0003,
6965         0x3c070001, 0x24e70f50, 0x94e20000, 0x03821021, 0xaf620ce0, 0x3c020001,
6966         0x8c420f64, 0xaf620ce4, 0x3c050001, 0x94a50f54, 0x94e30000, 0x3c040001,
6967         0x94840f58, 0x3c020001, 0x94420f5e, 0x00a32823, 0x00822023, 0x30a6ffff,
6968         0x3083ffff, 0x00c3102b, 0x14400043, 0x00000000, 0x3c020001, 0x94420f5c,
6969         0x00021400, 0x00621025, 0xaf620ce8, 0x94e20000, 0x3c030001, 0x94630f54,
6970         0x00441021, 0xa4e20000, 0x3042ffff, 0x14430021, 0x3c020008, 0x3c020001,
6971         0x90420f57, 0x10400006, 0x3c03000c, 0x3c020001, 0x94420f68, 0x34630624,
6972         0x0800427c, 0x0000d021, 0x3c020001, 0x94420f68, 0x3c030008, 0x34630624,
6973         0x00431025, 0xaf620cec, 0x3c108000, 0x3c02a000, 0xaf620cf4, 0x3c010001,
6974         0xa0200f56, 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064,
6975         0x00000000, 0x8f620cf4, 0x00501024, 0x10400015, 0x00000000, 0x08004283,
6976         0x00000000, 0x3c030001, 0x94630f68, 0x34420624, 0x3c108000, 0x00621825,
6977         0x3c028000, 0xaf630cec, 0xaf620cf4, 0x8f641008, 0x00901024, 0x14400003,
6978         0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4, 0x00501024, 0x1440fff7,
6979         0x00000000, 0x3c010001, 0x080042cf, 0xa4200f5e, 0x3c020001, 0x94420f5c,
6980         0x00021400, 0x00c21025, 0xaf620ce8, 0x3c020001, 0x90420f57, 0x10400009,
6981         0x3c03000c, 0x3c020001, 0x94420f68, 0x34630624, 0x0000d021, 0x00431025,
6982         0xaf620cec, 0x080042c1, 0x3c108000, 0x3c020001, 0x94420f68, 0x3c030008,
6983         0x34630604, 0x00431025, 0xaf620cec, 0x3c020001, 0x94420f5e, 0x00451021,
6984         0x3c010001, 0xa4220f5e, 0x3c108000, 0x3c02a000, 0xaf620cf4, 0x3c010001,
6985         0xa0200f56, 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064,
6986         0x00000000, 0x8f620cf4, 0x00501024, 0x1440fff7, 0x00000000, 0x8fbf0014,
6987         0x8fb00010, 0x03e00008, 0x27bd0018, 0x00000000, 0x27bdffe0, 0x3c040001,
6988         0x24840ec0, 0x00002821, 0x00003021, 0x00003821, 0xafbf0018, 0xafa00010,
6989         0x0c004378, 0xafa00014, 0x0000d021, 0x24020130, 0xaf625000, 0x3c010001,
6990         0xa4200f50, 0x3c010001, 0xa0200f57, 0x8fbf0018, 0x03e00008, 0x27bd0020,
6991         0x27bdffe8, 0x3c1bc000, 0xafbf0014, 0xafb00010, 0xaf60680c, 0x8f626804,
6992         0x34420082, 0xaf626804, 0x8f634000, 0x24020b50, 0x3c010001, 0xac220f20,
6993         0x24020b78, 0x3c010001, 0xac220f30, 0x34630002, 0xaf634000, 0x0c004315,
6994         0x00808021, 0x3c010001, 0xa0220f34, 0x304200ff, 0x24030002, 0x14430005,
6995         0x00000000, 0x3c020001, 0x8c420f20, 0x08004308, 0xac5000c0, 0x3c020001,
6996         0x8c420f20, 0xac5000bc, 0x8f624434, 0x8f634438, 0x8f644410, 0x3c010001,
6997         0xac220f28, 0x3c010001, 0xac230f38, 0x3c010001, 0xac240f24, 0x8fbf0014,
6998         0x8fb00010, 0x03e00008, 0x27bd0018, 0x03e00008, 0x24020001, 0x27bdfff8,
6999         0x18800009, 0x00002821, 0x8f63680c, 0x8f62680c, 0x1043fffe, 0x00000000,
7000         0x24a50001, 0x00a4102a, 0x1440fff9, 0x00000000, 0x03e00008, 0x27bd0008,
7001         0x8f634450, 0x3c020001, 0x8c420f28, 0x00031c02, 0x0043102b, 0x14400008,
7002         0x3c038000, 0x3c040001, 0x8c840f38, 0x8f624450, 0x00021c02, 0x0083102b,
7003         0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024, 0x1440fffd,
7004         0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3082ffff, 0x2442e000,
7005         0x2c422001, 0x14400003, 0x3c024000, 0x08004347, 0x2402ffff, 0x00822025,
7006         0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00001021, 0x03e00008,
7007         0x00000000, 0x8f624450, 0x3c030001, 0x8c630f24, 0x08004350, 0x3042ffff,
7008         0x8f624450, 0x3042ffff, 0x0043102b, 0x1440fffc, 0x00000000, 0x03e00008,
7009         0x00000000, 0x27bdffe0, 0x00802821, 0x3c040001, 0x24840ed0, 0x00003021,
7010         0x00003821, 0xafbf0018, 0xafa00010, 0x0c004378, 0xafa00014, 0x0800435f,
7011         0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x3c020001, 0x3442d600,
7012         0x3c030001, 0x3463d600, 0x3c040001, 0x3484ddff, 0x3c010001, 0xac220f40,
7013         0x24020040, 0x3c010001, 0xac220f44, 0x3c010001, 0xac200f3c, 0xac600000,
7014         0x24630004, 0x0083102b, 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000,
7015         0x00804821, 0x8faa0010, 0x3c020001, 0x8c420f3c, 0x3c040001, 0x8c840f44,
7016         0x8fab0014, 0x24430001, 0x0044102b, 0x3c010001, 0xac230f3c, 0x14400003,
7017         0x00004021, 0x3c010001, 0xac200f3c, 0x3c020001, 0x8c420f3c, 0x3c030001,
7018         0x8c630f40, 0x91240000, 0x00021140, 0x00431021, 0x00481021, 0x25080001,
7019         0xa0440000, 0x29020008, 0x1440fff4, 0x25290001, 0x3c020001, 0x8c420f3c,
7020         0x3c030001, 0x8c630f40, 0x8f64680c, 0x00021140, 0x00431021, 0xac440008,
7021         0xac45000c, 0xac460010, 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c,
7022         0x00000000, 0x00000000, 0x00000000,
7023 };
7024
7025 static const u32 tg3Tso5FwRodata[(TG3_TSO5_FW_RODATA_LEN / 4) + 1] = {
7026         0x4d61696e, 0x43707542, 0x00000000, 0x4d61696e, 0x43707541, 0x00000000,
7027         0x00000000, 0x00000000, 0x73746b6f, 0x66666c64, 0x00000000, 0x00000000,
7028         0x73746b6f, 0x66666c64, 0x00000000, 0x00000000, 0x66617461, 0x6c457272,
7029         0x00000000, 0x00000000, 0x00000000,
7030 };
7031
7032 static const u32 tg3Tso5FwData[(TG3_TSO5_FW_DATA_LEN / 4) + 1] = {
7033         0x00000000, 0x73746b6f, 0x66666c64, 0x5f76312e, 0x322e3000, 0x00000000,
7034         0x00000000, 0x00000000, 0x00000000,
7035 };
7036
7037 /* tp->lock is held. */
7038 static int tg3_load_tso_firmware(struct tg3 *tp)
7039 {
7040         struct fw_info info;
7041         unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
7042         int err, i;
7043
7044         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
7045                 return 0;
7046
7047         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
7048                 info.text_base = TG3_TSO5_FW_TEXT_ADDR;
7049                 info.text_len = TG3_TSO5_FW_TEXT_LEN;
7050                 info.text_data = &tg3Tso5FwText[0];
7051                 info.rodata_base = TG3_TSO5_FW_RODATA_ADDR;
7052                 info.rodata_len = TG3_TSO5_FW_RODATA_LEN;
7053                 info.rodata_data = &tg3Tso5FwRodata[0];
7054                 info.data_base = TG3_TSO5_FW_DATA_ADDR;
7055                 info.data_len = TG3_TSO5_FW_DATA_LEN;
7056                 info.data_data = &tg3Tso5FwData[0];
7057                 cpu_base = RX_CPU_BASE;
7058                 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
7059                 cpu_scratch_size = (info.text_len +
7060                                     info.rodata_len +
7061                                     info.data_len +
7062                                     TG3_TSO5_FW_SBSS_LEN +
7063                                     TG3_TSO5_FW_BSS_LEN);
7064         } else {
7065                 info.text_base = TG3_TSO_FW_TEXT_ADDR;
7066                 info.text_len = TG3_TSO_FW_TEXT_LEN;
7067                 info.text_data = &tg3TsoFwText[0];
7068                 info.rodata_base = TG3_TSO_FW_RODATA_ADDR;
7069                 info.rodata_len = TG3_TSO_FW_RODATA_LEN;
7070                 info.rodata_data = &tg3TsoFwRodata[0];
7071                 info.data_base = TG3_TSO_FW_DATA_ADDR;
7072                 info.data_len = TG3_TSO_FW_DATA_LEN;
7073                 info.data_data = &tg3TsoFwData[0];
7074                 cpu_base = TX_CPU_BASE;
7075                 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
7076                 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
7077         }
7078
7079         err = tg3_load_firmware_cpu(tp, cpu_base,
7080                                     cpu_scratch_base, cpu_scratch_size,
7081                                     &info);
7082         if (err)
7083                 return err;
7084
7085         /* Now startup the cpu. */
7086         tw32(cpu_base + CPU_STATE, 0xffffffff);
7087         tw32_f(cpu_base + CPU_PC,    info.text_base);
7088
7089         for (i = 0; i < 5; i++) {
7090                 if (tr32(cpu_base + CPU_PC) == info.text_base)
7091                         break;
7092                 tw32(cpu_base + CPU_STATE, 0xffffffff);
7093                 tw32(cpu_base + CPU_MODE,  CPU_MODE_HALT);
7094                 tw32_f(cpu_base + CPU_PC,    info.text_base);
7095                 udelay(1000);
7096         }
7097         if (i >= 5) {
7098                 printk(KERN_ERR PFX "tg3_load_tso_firmware fails for %s "
7099                        "to set CPU PC, is %08x should be %08x\n",
7100                        tp->dev->name, tr32(cpu_base + CPU_PC),
7101                        info.text_base);
7102                 return -ENODEV;
7103         }
7104         tw32(cpu_base + CPU_STATE, 0xffffffff);
7105         tw32_f(cpu_base + CPU_MODE,  0x00000000);
7106         return 0;
7107 }
7108
7109
7110 static int tg3_set_mac_addr(struct net_device *dev, void *p)
7111 {
7112         struct tg3 *tp = netdev_priv(dev);
7113         struct sockaddr *addr = p;
7114         int err = 0, skip_mac_1 = 0;
7115
7116         if (!is_valid_ether_addr(addr->sa_data))
7117                 return -EINVAL;
7118
7119         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
7120
7121         if (!netif_running(dev))
7122                 return 0;
7123
7124         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
7125                 u32 addr0_high, addr0_low, addr1_high, addr1_low;
7126
7127                 addr0_high = tr32(MAC_ADDR_0_HIGH);
7128                 addr0_low = tr32(MAC_ADDR_0_LOW);
7129                 addr1_high = tr32(MAC_ADDR_1_HIGH);
7130                 addr1_low = tr32(MAC_ADDR_1_LOW);
7131
7132                 /* Skip MAC addr 1 if ASF is using it. */
7133                 if ((addr0_high != addr1_high || addr0_low != addr1_low) &&
7134                     !(addr1_high == 0 && addr1_low == 0))
7135                         skip_mac_1 = 1;
7136         }
7137         spin_lock_bh(&tp->lock);
7138         __tg3_set_mac_addr(tp, skip_mac_1);
7139         spin_unlock_bh(&tp->lock);
7140
7141         return err;
7142 }
7143
7144 /* tp->lock is held. */
7145 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
7146                            dma_addr_t mapping, u32 maxlen_flags,
7147                            u32 nic_addr)
7148 {
7149         tg3_write_mem(tp,
7150                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
7151                       ((u64) mapping >> 32));
7152         tg3_write_mem(tp,
7153                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
7154                       ((u64) mapping & 0xffffffff));
7155         tg3_write_mem(tp,
7156                       (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
7157                        maxlen_flags);
7158
7159         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
7160                 tg3_write_mem(tp,
7161                               (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
7162                               nic_addr);
7163 }
7164
7165 static void __tg3_set_rx_mode(struct net_device *);
7166 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
7167 {
7168         tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
7169         tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
7170         tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
7171         tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
7172         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
7173                 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
7174                 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
7175         }
7176         tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
7177         tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
7178         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
7179                 u32 val = ec->stats_block_coalesce_usecs;
7180
7181                 if (!netif_carrier_ok(tp->dev))
7182                         val = 0;
7183
7184                 tw32(HOSTCC_STAT_COAL_TICKS, val);
7185         }
7186 }
7187
7188 /* tp->lock is held. */
7189 static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
7190 {
7191         u32 val, rdmac_mode;
7192         int i, err, limit;
7193
7194         tg3_disable_ints(tp);
7195
7196         tg3_stop_fw(tp);
7197
7198         tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
7199
7200         if (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) {
7201                 tg3_abort_hw(tp, 1);
7202         }
7203
7204         if (reset_phy &&
7205             !(tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB))
7206                 tg3_phy_reset(tp);
7207
7208         err = tg3_chip_reset(tp);
7209         if (err)
7210                 return err;
7211
7212         tg3_write_sig_legacy(tp, RESET_KIND_INIT);
7213
7214         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
7215                 val = tr32(TG3_CPMU_CTRL);
7216                 val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE);
7217                 tw32(TG3_CPMU_CTRL, val);
7218
7219                 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
7220                 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
7221                 val |= CPMU_LSPD_10MB_MACCLK_6_25;
7222                 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
7223
7224                 val = tr32(TG3_CPMU_LNK_AWARE_PWRMD);
7225                 val &= ~CPMU_LNK_AWARE_MACCLK_MASK;
7226                 val |= CPMU_LNK_AWARE_MACCLK_6_25;
7227                 tw32(TG3_CPMU_LNK_AWARE_PWRMD, val);
7228
7229                 val = tr32(TG3_CPMU_HST_ACC);
7230                 val &= ~CPMU_HST_ACC_MACCLK_MASK;
7231                 val |= CPMU_HST_ACC_MACCLK_6_25;
7232                 tw32(TG3_CPMU_HST_ACC, val);
7233         }
7234
7235         /* This works around an issue with Athlon chipsets on
7236          * B3 tigon3 silicon.  This bit has no effect on any
7237          * other revision.  But do not set this on PCI Express
7238          * chips and don't even touch the clocks if the CPMU is present.
7239          */
7240         if (!(tp->tg3_flags & TG3_FLAG_CPMU_PRESENT)) {
7241                 if (!(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
7242                         tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
7243                 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
7244         }
7245
7246         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
7247             (tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
7248                 val = tr32(TG3PCI_PCISTATE);
7249                 val |= PCISTATE_RETRY_SAME_DMA;
7250                 tw32(TG3PCI_PCISTATE, val);
7251         }
7252
7253         if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
7254                 /* Allow reads and writes to the
7255                  * APE register and memory space.
7256                  */
7257                 val = tr32(TG3PCI_PCISTATE);
7258                 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
7259                        PCISTATE_ALLOW_APE_SHMEM_WR;
7260                 tw32(TG3PCI_PCISTATE, val);
7261         }
7262
7263         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_BX) {
7264                 /* Enable some hw fixes.  */
7265                 val = tr32(TG3PCI_MSI_DATA);
7266                 val |= (1 << 26) | (1 << 28) | (1 << 29);
7267                 tw32(TG3PCI_MSI_DATA, val);
7268         }
7269
7270         /* Descriptor ring init may make accesses to the
7271          * NIC SRAM area to setup the TX descriptors, so we
7272          * can only do this after the hardware has been
7273          * successfully reset.
7274          */
7275         err = tg3_init_rings(tp);
7276         if (err)
7277                 return err;
7278
7279         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 &&
7280             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761) {
7281                 /* This value is determined during the probe time DMA
7282                  * engine test, tg3_test_dma.
7283                  */
7284                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
7285         }
7286
7287         tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
7288                           GRC_MODE_4X_NIC_SEND_RINGS |
7289                           GRC_MODE_NO_TX_PHDR_CSUM |
7290                           GRC_MODE_NO_RX_PHDR_CSUM);
7291         tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
7292
7293         /* Pseudo-header checksum is done by hardware logic and not
7294          * the offload processers, so make the chip do the pseudo-
7295          * header checksums on receive.  For transmit it is more
7296          * convenient to do the pseudo-header checksum in software
7297          * as Linux does that on transmit for us in all cases.
7298          */
7299         tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
7300
7301         tw32(GRC_MODE,
7302              tp->grc_mode |
7303              (GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP));
7304
7305         /* Setup the timer prescalar register.  Clock is always 66Mhz. */
7306         val = tr32(GRC_MISC_CFG);
7307         val &= ~0xff;
7308         val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
7309         tw32(GRC_MISC_CFG, val);
7310
7311         /* Initialize MBUF/DESC pool. */
7312         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
7313                 /* Do nothing.  */
7314         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
7315                 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
7316                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
7317                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
7318                 else
7319                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
7320                 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
7321                 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
7322         }
7323         else if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
7324                 int fw_len;
7325
7326                 fw_len = (TG3_TSO5_FW_TEXT_LEN +
7327                           TG3_TSO5_FW_RODATA_LEN +
7328                           TG3_TSO5_FW_DATA_LEN +
7329                           TG3_TSO5_FW_SBSS_LEN +
7330                           TG3_TSO5_FW_BSS_LEN);
7331                 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
7332                 tw32(BUFMGR_MB_POOL_ADDR,
7333                      NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
7334                 tw32(BUFMGR_MB_POOL_SIZE,
7335                      NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
7336         }
7337
7338         if (tp->dev->mtu <= ETH_DATA_LEN) {
7339                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
7340                      tp->bufmgr_config.mbuf_read_dma_low_water);
7341                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
7342                      tp->bufmgr_config.mbuf_mac_rx_low_water);
7343                 tw32(BUFMGR_MB_HIGH_WATER,
7344                      tp->bufmgr_config.mbuf_high_water);
7345         } else {
7346                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
7347                      tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
7348                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
7349                      tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
7350                 tw32(BUFMGR_MB_HIGH_WATER,
7351                      tp->bufmgr_config.mbuf_high_water_jumbo);
7352         }
7353         tw32(BUFMGR_DMA_LOW_WATER,
7354              tp->bufmgr_config.dma_low_water);
7355         tw32(BUFMGR_DMA_HIGH_WATER,
7356              tp->bufmgr_config.dma_high_water);
7357
7358         tw32(BUFMGR_MODE, BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE);
7359         for (i = 0; i < 2000; i++) {
7360                 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
7361                         break;
7362                 udelay(10);
7363         }
7364         if (i >= 2000) {
7365                 printk(KERN_ERR PFX "tg3_reset_hw cannot enable BUFMGR for %s.\n",
7366                        tp->dev->name);
7367                 return -ENODEV;
7368         }
7369
7370         /* Setup replenish threshold. */
7371         val = tp->rx_pending / 8;
7372         if (val == 0)
7373                 val = 1;
7374         else if (val > tp->rx_std_max_post)
7375                 val = tp->rx_std_max_post;
7376         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
7377                 if (tp->pci_chip_rev_id == CHIPREV_ID_5906_A1)
7378                         tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
7379
7380                 if (val > (TG3_RX_INTERNAL_RING_SZ_5906 / 2))
7381                         val = TG3_RX_INTERNAL_RING_SZ_5906 / 2;
7382         }
7383
7384         tw32(RCVBDI_STD_THRESH, val);
7385
7386         /* Initialize TG3_BDINFO's at:
7387          *  RCVDBDI_STD_BD:     standard eth size rx ring
7388          *  RCVDBDI_JUMBO_BD:   jumbo frame rx ring
7389          *  RCVDBDI_MINI_BD:    small frame rx ring (??? does not work)
7390          *
7391          * like so:
7392          *  TG3_BDINFO_HOST_ADDR:       high/low parts of DMA address of ring
7393          *  TG3_BDINFO_MAXLEN_FLAGS:    (rx max buffer size << 16) |
7394          *                              ring attribute flags
7395          *  TG3_BDINFO_NIC_ADDR:        location of descriptors in nic SRAM
7396          *
7397          * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
7398          * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
7399          *
7400          * The size of each ring is fixed in the firmware, but the location is
7401          * configurable.
7402          */
7403         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
7404              ((u64) tp->rx_std_mapping >> 32));
7405         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
7406              ((u64) tp->rx_std_mapping & 0xffffffff));
7407         tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
7408              NIC_SRAM_RX_BUFFER_DESC);
7409
7410         /* Don't even try to program the JUMBO/MINI buffer descriptor
7411          * configs on 5705.
7412          */
7413         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
7414                 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS,
7415                      RX_STD_MAX_SIZE_5705 << BDINFO_FLAGS_MAXLEN_SHIFT);
7416         } else {
7417                 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS,
7418                      RX_STD_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT);
7419
7420                 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
7421                      BDINFO_FLAGS_DISABLED);
7422
7423                 /* Setup replenish threshold. */
7424                 tw32(RCVBDI_JUMBO_THRESH, tp->rx_jumbo_pending / 8);
7425
7426                 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
7427                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
7428                              ((u64) tp->rx_jumbo_mapping >> 32));
7429                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
7430                              ((u64) tp->rx_jumbo_mapping & 0xffffffff));
7431                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
7432                              RX_JUMBO_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT);
7433                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
7434                              NIC_SRAM_RX_JUMBO_BUFFER_DESC);
7435                 } else {
7436                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
7437                              BDINFO_FLAGS_DISABLED);
7438                 }
7439
7440         }
7441
7442         /* There is only one send ring on 5705/5750, no need to explicitly
7443          * disable the others.
7444          */
7445         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
7446                 /* Clear out send RCB ring in SRAM. */
7447                 for (i = NIC_SRAM_SEND_RCB; i < NIC_SRAM_RCV_RET_RCB; i += TG3_BDINFO_SIZE)
7448                         tg3_write_mem(tp, i + TG3_BDINFO_MAXLEN_FLAGS,
7449                                       BDINFO_FLAGS_DISABLED);
7450         }
7451
7452         tp->tx_prod = 0;
7453         tp->tx_cons = 0;
7454         tw32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0);
7455         tw32_tx_mbox(MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0);
7456
7457         tg3_set_bdinfo(tp, NIC_SRAM_SEND_RCB,
7458                        tp->tx_desc_mapping,
7459                        (TG3_TX_RING_SIZE <<
7460                         BDINFO_FLAGS_MAXLEN_SHIFT),
7461                        NIC_SRAM_TX_BUFFER_DESC);
7462
7463         /* There is only one receive return ring on 5705/5750, no need
7464          * to explicitly disable the others.
7465          */
7466         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
7467                 for (i = NIC_SRAM_RCV_RET_RCB; i < NIC_SRAM_STATS_BLK;
7468                      i += TG3_BDINFO_SIZE) {
7469                         tg3_write_mem(tp, i + TG3_BDINFO_MAXLEN_FLAGS,
7470                                       BDINFO_FLAGS_DISABLED);
7471                 }
7472         }
7473
7474         tp->rx_rcb_ptr = 0;
7475         tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW, 0);
7476
7477         tg3_set_bdinfo(tp, NIC_SRAM_RCV_RET_RCB,
7478                        tp->rx_rcb_mapping,
7479                        (TG3_RX_RCB_RING_SIZE(tp) <<
7480                         BDINFO_FLAGS_MAXLEN_SHIFT),
7481                        0);
7482
7483         tp->rx_std_ptr = tp->rx_pending;
7484         tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
7485                      tp->rx_std_ptr);
7486
7487         tp->rx_jumbo_ptr = (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) ?
7488                                                 tp->rx_jumbo_pending : 0;
7489         tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
7490                      tp->rx_jumbo_ptr);
7491
7492         /* Initialize MAC address and backoff seed. */
7493         __tg3_set_mac_addr(tp, 0);
7494
7495         /* MTU + ethernet header + FCS + optional VLAN tag */
7496         tw32(MAC_RX_MTU_SIZE, tp->dev->mtu + ETH_HLEN + 8);
7497
7498         /* The slot time is changed by tg3_setup_phy if we
7499          * run at gigabit with half duplex.
7500          */
7501         tw32(MAC_TX_LENGTHS,
7502              (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
7503              (6 << TX_LENGTHS_IPG_SHIFT) |
7504              (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
7505
7506         /* Receive rules. */
7507         tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
7508         tw32(RCVLPC_CONFIG, 0x0181);
7509
7510         /* Calculate RDMAC_MODE setting early, we need it to determine
7511          * the RCVLPC_STATE_ENABLE mask.
7512          */
7513         rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
7514                       RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
7515                       RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
7516                       RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
7517                       RDMAC_MODE_LNGREAD_ENAB);
7518
7519         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
7520             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
7521             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
7522                 rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
7523                               RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
7524                               RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
7525
7526         /* If statement applies to 5705 and 5750 PCI devices only */
7527         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
7528              tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
7529             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)) {
7530                 if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE &&
7531                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
7532                         rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
7533                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
7534                            !(tp->tg3_flags2 & TG3_FLG2_IS_5788)) {
7535                         rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
7536                 }
7537         }
7538
7539         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)
7540                 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
7541
7542         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
7543                 rdmac_mode |= (1 << 27);
7544
7545         /* Receive/send statistics. */
7546         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
7547                 val = tr32(RCVLPC_STATS_ENABLE);
7548                 val &= ~RCVLPC_STATSENAB_DACK_FIX;
7549                 tw32(RCVLPC_STATS_ENABLE, val);
7550         } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
7551                    (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
7552                 val = tr32(RCVLPC_STATS_ENABLE);
7553                 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
7554                 tw32(RCVLPC_STATS_ENABLE, val);
7555         } else {
7556                 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
7557         }
7558         tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
7559         tw32(SNDDATAI_STATSENAB, 0xffffff);
7560         tw32(SNDDATAI_STATSCTRL,
7561              (SNDDATAI_SCTRL_ENABLE |
7562               SNDDATAI_SCTRL_FASTUPD));
7563
7564         /* Setup host coalescing engine. */
7565         tw32(HOSTCC_MODE, 0);
7566         for (i = 0; i < 2000; i++) {
7567                 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
7568                         break;
7569                 udelay(10);
7570         }
7571
7572         __tg3_set_coalesce(tp, &tp->coal);
7573
7574         /* set status block DMA address */
7575         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
7576              ((u64) tp->status_mapping >> 32));
7577         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
7578              ((u64) tp->status_mapping & 0xffffffff));
7579
7580         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
7581                 /* Status/statistics block address.  See tg3_timer,
7582                  * the tg3_periodic_fetch_stats call there, and
7583                  * tg3_get_stats to see how this works for 5705/5750 chips.
7584                  */
7585                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
7586                      ((u64) tp->stats_mapping >> 32));
7587                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
7588                      ((u64) tp->stats_mapping & 0xffffffff));
7589                 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
7590                 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
7591         }
7592
7593         tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
7594
7595         tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
7596         tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
7597         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
7598                 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
7599
7600         /* Clear statistics/status block in chip, and status block in ram. */
7601         for (i = NIC_SRAM_STATS_BLK;
7602              i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
7603              i += sizeof(u32)) {
7604                 tg3_write_mem(tp, i, 0);
7605                 udelay(40);
7606         }
7607         memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
7608
7609         if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
7610                 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
7611                 /* reset to prevent losing 1st rx packet intermittently */
7612                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
7613                 udelay(10);
7614         }
7615
7616         if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
7617                 tp->mac_mode &= MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
7618         else
7619                 tp->mac_mode = 0;
7620         tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
7621                 MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE | MAC_MODE_FHDE_ENABLE;
7622         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
7623             !(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
7624             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
7625                 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
7626         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
7627         udelay(40);
7628
7629         /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
7630          * If TG3_FLG2_IS_NIC is zero, we should read the
7631          * register to preserve the GPIO settings for LOMs. The GPIOs,
7632          * whether used as inputs or outputs, are set by boot code after
7633          * reset.
7634          */
7635         if (!(tp->tg3_flags2 & TG3_FLG2_IS_NIC)) {
7636                 u32 gpio_mask;
7637
7638                 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
7639                             GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
7640                             GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
7641
7642                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
7643                         gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
7644                                      GRC_LCLCTRL_GPIO_OUTPUT3;
7645
7646                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
7647                         gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
7648
7649                 tp->grc_local_ctrl &= ~gpio_mask;
7650                 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
7651
7652                 /* GPIO1 must be driven high for eeprom write protect */
7653                 if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT)
7654                         tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
7655                                                GRC_LCLCTRL_GPIO_OUTPUT1);
7656         }
7657         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
7658         udelay(100);
7659
7660         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0);
7661         tp->last_tag = 0;
7662
7663         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
7664                 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
7665                 udelay(40);
7666         }
7667
7668         val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
7669                WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
7670                WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
7671                WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
7672                WDMAC_MODE_LNGREAD_ENAB);
7673
7674         /* If statement applies to 5705 and 5750 PCI devices only */
7675         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
7676              tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
7677             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
7678                 if ((tp->tg3_flags & TG3_FLG2_TSO_CAPABLE) &&
7679                     (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
7680                      tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
7681                         /* nothing */
7682                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
7683                            !(tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
7684                            !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
7685                         val |= WDMAC_MODE_RX_ACCEL;
7686                 }
7687         }
7688
7689         /* Enable host coalescing bug fix */
7690         if (tp->tg3_flags3 & TG3_FLG3_5755_PLUS)
7691                 val |= WDMAC_MODE_STATUS_TAG_FIX;
7692
7693         tw32_f(WDMAC_MODE, val);
7694         udelay(40);
7695
7696         if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) {
7697                 u16 pcix_cmd;
7698
7699                 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
7700                                      &pcix_cmd);
7701                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) {
7702                         pcix_cmd &= ~PCI_X_CMD_MAX_READ;
7703                         pcix_cmd |= PCI_X_CMD_READ_2K;
7704                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
7705                         pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ);
7706                         pcix_cmd |= PCI_X_CMD_READ_2K;
7707                 }
7708                 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
7709                                       pcix_cmd);
7710         }
7711
7712         tw32_f(RDMAC_MODE, rdmac_mode);
7713         udelay(40);
7714
7715         tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
7716         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
7717                 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
7718
7719         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
7720                 tw32(SNDDATAC_MODE,
7721                      SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY);
7722         else
7723                 tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
7724
7725         tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
7726         tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
7727         tw32(RCVDBDI_MODE, RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ);
7728         tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
7729         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
7730                 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
7731         tw32(SNDBDI_MODE, SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE);
7732         tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
7733
7734         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
7735                 err = tg3_load_5701_a0_firmware_fix(tp);
7736                 if (err)
7737                         return err;
7738         }
7739
7740         if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
7741                 err = tg3_load_tso_firmware(tp);
7742                 if (err)
7743                         return err;
7744         }
7745
7746         tp->tx_mode = TX_MODE_ENABLE;
7747         tw32_f(MAC_TX_MODE, tp->tx_mode);
7748         udelay(100);
7749
7750         tp->rx_mode = RX_MODE_ENABLE;
7751         if (tp->tg3_flags3 & TG3_FLG3_5755_PLUS)
7752                 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
7753
7754         tw32_f(MAC_RX_MODE, tp->rx_mode);
7755         udelay(10);
7756
7757         tw32(MAC_LED_CTRL, tp->led_ctrl);
7758
7759         tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
7760         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
7761                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
7762                 udelay(10);
7763         }
7764         tw32_f(MAC_RX_MODE, tp->rx_mode);
7765         udelay(10);
7766
7767         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
7768                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) &&
7769                         !(tp->tg3_flags2 & TG3_FLG2_SERDES_PREEMPHASIS)) {
7770                         /* Set drive transmission level to 1.2V  */
7771                         /* only if the signal pre-emphasis bit is not set  */
7772                         val = tr32(MAC_SERDES_CFG);
7773                         val &= 0xfffff000;
7774                         val |= 0x880;
7775                         tw32(MAC_SERDES_CFG, val);
7776                 }
7777                 if (tp->pci_chip_rev_id == CHIPREV_ID_5703_A1)
7778                         tw32(MAC_SERDES_CFG, 0x616000);
7779         }
7780
7781         /* Prevent chip from dropping frames when flow control
7782          * is enabled.
7783          */
7784         tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, 2);
7785
7786         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
7787             (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
7788                 /* Use hardware link auto-negotiation */
7789                 tp->tg3_flags2 |= TG3_FLG2_HW_AUTONEG;
7790         }
7791
7792         if ((tp->tg3_flags2 & TG3_FLG2_MII_SERDES) &&
7793             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)) {
7794                 u32 tmp;
7795
7796                 tmp = tr32(SERDES_RX_CTRL);
7797                 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
7798                 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
7799                 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
7800                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
7801         }
7802
7803         if (!(tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)) {
7804                 if (tp->link_config.phy_is_low_power) {
7805                         tp->link_config.phy_is_low_power = 0;
7806                         tp->link_config.speed = tp->link_config.orig_speed;
7807                         tp->link_config.duplex = tp->link_config.orig_duplex;
7808                         tp->link_config.autoneg = tp->link_config.orig_autoneg;
7809                 }
7810
7811                 err = tg3_setup_phy(tp, 0);
7812                 if (err)
7813                         return err;
7814
7815                 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
7816                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906) {
7817                         u32 tmp;
7818
7819                         /* Clear CRC stats. */
7820                         if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
7821                                 tg3_writephy(tp, MII_TG3_TEST1,
7822                                              tmp | MII_TG3_TEST1_CRC_EN);
7823                                 tg3_readphy(tp, 0x14, &tmp);
7824                         }
7825                 }
7826         }
7827
7828         __tg3_set_rx_mode(tp->dev);
7829
7830         /* Initialize receive rules. */
7831         tw32(MAC_RCV_RULE_0,  0xc2000000 & RCV_RULE_DISABLE_MASK);
7832         tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
7833         tw32(MAC_RCV_RULE_1,  0x86000004 & RCV_RULE_DISABLE_MASK);
7834         tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
7835
7836         if ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
7837             !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
7838                 limit = 8;
7839         else
7840                 limit = 16;
7841         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF)
7842                 limit -= 4;
7843         switch (limit) {
7844         case 16:
7845                 tw32(MAC_RCV_RULE_15,  0); tw32(MAC_RCV_VALUE_15,  0);
7846         case 15:
7847                 tw32(MAC_RCV_RULE_14,  0); tw32(MAC_RCV_VALUE_14,  0);
7848         case 14:
7849                 tw32(MAC_RCV_RULE_13,  0); tw32(MAC_RCV_VALUE_13,  0);
7850         case 13:
7851                 tw32(MAC_RCV_RULE_12,  0); tw32(MAC_RCV_VALUE_12,  0);
7852         case 12:
7853                 tw32(MAC_RCV_RULE_11,  0); tw32(MAC_RCV_VALUE_11,  0);
7854         case 11:
7855                 tw32(MAC_RCV_RULE_10,  0); tw32(MAC_RCV_VALUE_10,  0);
7856         case 10:
7857                 tw32(MAC_RCV_RULE_9,  0); tw32(MAC_RCV_VALUE_9,  0);
7858         case 9:
7859                 tw32(MAC_RCV_RULE_8,  0); tw32(MAC_RCV_VALUE_8,  0);
7860         case 8:
7861                 tw32(MAC_RCV_RULE_7,  0); tw32(MAC_RCV_VALUE_7,  0);
7862         case 7:
7863                 tw32(MAC_RCV_RULE_6,  0); tw32(MAC_RCV_VALUE_6,  0);
7864         case 6:
7865                 tw32(MAC_RCV_RULE_5,  0); tw32(MAC_RCV_VALUE_5,  0);
7866         case 5:
7867                 tw32(MAC_RCV_RULE_4,  0); tw32(MAC_RCV_VALUE_4,  0);
7868         case 4:
7869                 /* tw32(MAC_RCV_RULE_3,  0); tw32(MAC_RCV_VALUE_3,  0); */
7870         case 3:
7871                 /* tw32(MAC_RCV_RULE_2,  0); tw32(MAC_RCV_VALUE_2,  0); */
7872         case 2:
7873         case 1:
7874
7875         default:
7876                 break;
7877         }
7878
7879         if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
7880                 /* Write our heartbeat update interval to APE. */
7881                 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS,
7882                                 APE_HOST_HEARTBEAT_INT_DISABLE);
7883
7884         tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
7885
7886         return 0;
7887 }
7888
7889 /* Called at device open time to get the chip ready for
7890  * packet processing.  Invoked with tp->lock held.
7891  */
7892 static int tg3_init_hw(struct tg3 *tp, int reset_phy)
7893 {
7894         tg3_switch_clocks(tp);
7895
7896         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
7897
7898         return tg3_reset_hw(tp, reset_phy);
7899 }
7900
7901 #define TG3_STAT_ADD32(PSTAT, REG) \
7902 do {    u32 __val = tr32(REG); \
7903         (PSTAT)->low += __val; \
7904         if ((PSTAT)->low < __val) \
7905                 (PSTAT)->high += 1; \
7906 } while (0)
7907
7908 static void tg3_periodic_fetch_stats(struct tg3 *tp)
7909 {
7910         struct tg3_hw_stats *sp = tp->hw_stats;
7911
7912         if (!netif_carrier_ok(tp->dev))
7913                 return;
7914
7915         TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
7916         TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
7917         TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
7918         TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
7919         TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
7920         TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
7921         TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
7922         TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
7923         TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
7924         TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
7925         TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
7926         TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
7927         TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
7928
7929         TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
7930         TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
7931         TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
7932         TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
7933         TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
7934         TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
7935         TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
7936         TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
7937         TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
7938         TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
7939         TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
7940         TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
7941         TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
7942         TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
7943
7944         TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
7945         TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
7946         TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
7947 }
7948
7949 static void tg3_timer(unsigned long __opaque)
7950 {
7951         struct tg3 *tp = (struct tg3 *) __opaque;
7952
7953         if (tp->irq_sync)
7954                 goto restart_timer;
7955
7956         spin_lock(&tp->lock);
7957
7958         if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) {
7959                 /* All of this garbage is because when using non-tagged
7960                  * IRQ status the mailbox/status_block protocol the chip
7961                  * uses with the cpu is race prone.
7962                  */
7963                 if (tp->hw_status->status & SD_STATUS_UPDATED) {
7964                         tw32(GRC_LOCAL_CTRL,
7965                              tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
7966                 } else {
7967                         tw32(HOSTCC_MODE, tp->coalesce_mode |
7968                              (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
7969                 }
7970
7971                 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
7972                         tp->tg3_flags2 |= TG3_FLG2_RESTART_TIMER;
7973                         spin_unlock(&tp->lock);
7974                         schedule_work(&tp->reset_task);
7975                         return;
7976                 }
7977         }
7978
7979         /* This part only runs once per second. */
7980         if (!--tp->timer_counter) {
7981                 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
7982                         tg3_periodic_fetch_stats(tp);
7983
7984                 if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
7985                         u32 mac_stat;
7986                         int phy_event;
7987
7988                         mac_stat = tr32(MAC_STATUS);
7989
7990                         phy_event = 0;
7991                         if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) {
7992                                 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
7993                                         phy_event = 1;
7994                         } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
7995                                 phy_event = 1;
7996
7997                         if (phy_event)
7998                                 tg3_setup_phy(tp, 0);
7999                 } else if (tp->tg3_flags & TG3_FLAG_POLL_SERDES) {
8000                         u32 mac_stat = tr32(MAC_STATUS);
8001                         int need_setup = 0;
8002
8003                         if (netif_carrier_ok(tp->dev) &&
8004                             (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
8005                                 need_setup = 1;
8006                         }
8007                         if (! netif_carrier_ok(tp->dev) &&
8008                             (mac_stat & (MAC_STATUS_PCS_SYNCED |
8009                                          MAC_STATUS_SIGNAL_DET))) {
8010                                 need_setup = 1;
8011                         }
8012                         if (need_setup) {
8013                                 if (!tp->serdes_counter) {
8014                                         tw32_f(MAC_MODE,
8015                                              (tp->mac_mode &
8016                                               ~MAC_MODE_PORT_MODE_MASK));
8017                                         udelay(40);
8018                                         tw32_f(MAC_MODE, tp->mac_mode);
8019                                         udelay(40);
8020                                 }
8021                                 tg3_setup_phy(tp, 0);
8022                         }
8023                 } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
8024                         tg3_serdes_parallel_detect(tp);
8025
8026                 tp->timer_counter = tp->timer_multiplier;
8027         }
8028
8029         /* Heartbeat is only sent once every 2 seconds.
8030          *
8031          * The heartbeat is to tell the ASF firmware that the host
8032          * driver is still alive.  In the event that the OS crashes,
8033          * ASF needs to reset the hardware to free up the FIFO space
8034          * that may be filled with rx packets destined for the host.
8035          * If the FIFO is full, ASF will no longer function properly.
8036          *
8037          * Unintended resets have been reported on real time kernels
8038          * where the timer doesn't run on time.  Netpoll will also have
8039          * same problem.
8040          *
8041          * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
8042          * to check the ring condition when the heartbeat is expiring
8043          * before doing the reset.  This will prevent most unintended
8044          * resets.
8045          */
8046         if (!--tp->asf_counter) {
8047                 if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) &&
8048                     !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)) {
8049                         tg3_wait_for_event_ack(tp);
8050
8051                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
8052                                       FWCMD_NICDRV_ALIVE3);
8053                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
8054                         /* 5 seconds timeout */
8055                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, 5);
8056
8057                         tg3_generate_fw_event(tp);
8058                 }
8059                 tp->asf_counter = tp->asf_multiplier;
8060         }
8061
8062         spin_unlock(&tp->lock);
8063
8064 restart_timer:
8065         tp->timer.expires = jiffies + tp->timer_offset;
8066         add_timer(&tp->timer);
8067 }
8068
8069 static int tg3_request_irq(struct tg3 *tp)
8070 {
8071         irq_handler_t fn;
8072         unsigned long flags;
8073         struct net_device *dev = tp->dev;
8074
8075         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
8076                 fn = tg3_msi;
8077                 if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI)
8078                         fn = tg3_msi_1shot;
8079                 flags = IRQF_SAMPLE_RANDOM;
8080         } else {
8081                 fn = tg3_interrupt;
8082                 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
8083                         fn = tg3_interrupt_tagged;
8084                 flags = IRQF_SHARED | IRQF_SAMPLE_RANDOM;
8085         }
8086         return (request_irq(tp->pdev->irq, fn, flags, dev->name, dev));
8087 }
8088
8089 static int tg3_test_interrupt(struct tg3 *tp)
8090 {
8091         struct net_device *dev = tp->dev;
8092         int err, i, intr_ok = 0;
8093
8094         if (!netif_running(dev))
8095                 return -ENODEV;
8096
8097         tg3_disable_ints(tp);
8098
8099         free_irq(tp->pdev->irq, dev);
8100
8101         err = request_irq(tp->pdev->irq, tg3_test_isr,
8102                           IRQF_SHARED | IRQF_SAMPLE_RANDOM, dev->name, dev);
8103         if (err)
8104                 return err;
8105
8106         tp->hw_status->status &= ~SD_STATUS_UPDATED;
8107         tg3_enable_ints(tp);
8108
8109         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
8110                HOSTCC_MODE_NOW);
8111
8112         for (i = 0; i < 5; i++) {
8113                 u32 int_mbox, misc_host_ctrl;
8114
8115                 int_mbox = tr32_mailbox(MAILBOX_INTERRUPT_0 +
8116                                         TG3_64BIT_REG_LOW);
8117                 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
8118
8119                 if ((int_mbox != 0) ||
8120                     (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) {
8121                         intr_ok = 1;
8122                         break;
8123                 }
8124
8125                 msleep(10);
8126         }
8127
8128         tg3_disable_ints(tp);
8129
8130         free_irq(tp->pdev->irq, dev);
8131
8132         err = tg3_request_irq(tp);
8133
8134         if (err)
8135                 return err;
8136
8137         if (intr_ok)
8138                 return 0;
8139
8140         return -EIO;
8141 }
8142
8143 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
8144  * successfully restored
8145  */
8146 static int tg3_test_msi(struct tg3 *tp)
8147 {
8148         struct net_device *dev = tp->dev;
8149         int err;
8150         u16 pci_cmd;
8151
8152         if (!(tp->tg3_flags2 & TG3_FLG2_USING_MSI))
8153                 return 0;
8154
8155         /* Turn off SERR reporting in case MSI terminates with Master
8156          * Abort.
8157          */
8158         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
8159         pci_write_config_word(tp->pdev, PCI_COMMAND,
8160                               pci_cmd & ~PCI_COMMAND_SERR);
8161
8162         err = tg3_test_interrupt(tp);
8163
8164         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
8165
8166         if (!err)
8167                 return 0;
8168
8169         /* other failures */
8170         if (err != -EIO)
8171                 return err;
8172
8173         /* MSI test failed, go back to INTx mode */
8174         printk(KERN_WARNING PFX "%s: No interrupt was generated using MSI, "
8175                "switching to INTx mode. Please report this failure to "
8176                "the PCI maintainer and include system chipset information.\n",
8177                        tp->dev->name);
8178
8179         free_irq(tp->pdev->irq, dev);
8180         pci_disable_msi(tp->pdev);
8181
8182         tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
8183
8184         err = tg3_request_irq(tp);
8185         if (err)
8186                 return err;
8187
8188         /* Need to reset the chip because the MSI cycle may have terminated
8189          * with Master Abort.
8190          */
8191         tg3_full_lock(tp, 1);
8192
8193         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
8194         err = tg3_init_hw(tp, 1);
8195
8196         tg3_full_unlock(tp);
8197
8198         if (err)
8199                 free_irq(tp->pdev->irq, dev);
8200
8201         return err;
8202 }
8203
8204 static int tg3_open(struct net_device *dev)
8205 {
8206         struct tg3 *tp = netdev_priv(dev);
8207         int err;
8208
8209         netif_carrier_off(tp->dev);
8210
8211         err = tg3_set_power_state(tp, PCI_D0);
8212         if (err)
8213                 return err;
8214
8215         tg3_full_lock(tp, 0);
8216
8217         tg3_disable_ints(tp);
8218         tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
8219
8220         tg3_full_unlock(tp);
8221
8222         /* The placement of this call is tied
8223          * to the setup and use of Host TX descriptors.
8224          */
8225         err = tg3_alloc_consistent(tp);
8226         if (err)
8227                 return err;
8228
8229         if (tp->tg3_flags & TG3_FLAG_SUPPORT_MSI) {
8230                 /* All MSI supporting chips should support tagged
8231                  * status.  Assert that this is the case.
8232                  */
8233                 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) {
8234                         printk(KERN_WARNING PFX "%s: MSI without TAGGED? "
8235                                "Not using MSI.\n", tp->dev->name);
8236                 } else if (pci_enable_msi(tp->pdev) == 0) {
8237                         u32 msi_mode;
8238
8239                         msi_mode = tr32(MSGINT_MODE);
8240                         tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
8241                         tp->tg3_flags2 |= TG3_FLG2_USING_MSI;
8242                 }
8243         }
8244         err = tg3_request_irq(tp);
8245
8246         if (err) {
8247                 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
8248                         pci_disable_msi(tp->pdev);
8249                         tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
8250                 }
8251                 tg3_free_consistent(tp);
8252                 return err;
8253         }
8254
8255         napi_enable(&tp->napi);
8256
8257         tg3_full_lock(tp, 0);
8258
8259         err = tg3_init_hw(tp, 1);
8260         if (err) {
8261                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
8262                 tg3_free_rings(tp);
8263         } else {
8264                 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
8265                         tp->timer_offset = HZ;
8266                 else
8267                         tp->timer_offset = HZ / 10;
8268
8269                 BUG_ON(tp->timer_offset > HZ);
8270                 tp->timer_counter = tp->timer_multiplier =
8271                         (HZ / tp->timer_offset);
8272                 tp->asf_counter = tp->asf_multiplier =
8273                         ((HZ / tp->timer_offset) * 2);
8274
8275                 init_timer(&tp->timer);
8276                 tp->timer.expires = jiffies + tp->timer_offset;
8277                 tp->timer.data = (unsigned long) tp;
8278                 tp->timer.function = tg3_timer;
8279         }
8280
8281         tg3_full_unlock(tp);
8282
8283         if (err) {
8284                 napi_disable(&tp->napi);
8285                 free_irq(tp->pdev->irq, dev);
8286                 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
8287                         pci_disable_msi(tp->pdev);
8288                         tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
8289                 }
8290                 tg3_free_consistent(tp);
8291                 return err;
8292         }
8293
8294         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
8295                 err = tg3_test_msi(tp);
8296
8297                 if (err) {
8298                         tg3_full_lock(tp, 0);
8299
8300                         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
8301                                 pci_disable_msi(tp->pdev);
8302                                 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
8303                         }
8304                         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
8305                         tg3_free_rings(tp);
8306                         tg3_free_consistent(tp);
8307
8308                         tg3_full_unlock(tp);
8309
8310                         napi_disable(&tp->napi);
8311
8312                         return err;
8313                 }
8314
8315                 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
8316                         if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI) {
8317                                 u32 val = tr32(PCIE_TRANSACTION_CFG);
8318
8319                                 tw32(PCIE_TRANSACTION_CFG,
8320                                      val | PCIE_TRANS_CFG_1SHOT_MSI);
8321                         }
8322                 }
8323         }
8324
8325         tg3_phy_start(tp);
8326
8327         tg3_full_lock(tp, 0);
8328
8329         add_timer(&tp->timer);
8330         tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
8331         tg3_enable_ints(tp);
8332
8333         tg3_full_unlock(tp);
8334
8335         netif_start_queue(dev);
8336
8337         return 0;
8338 }
8339
8340 #if 0
8341 /*static*/ void tg3_dump_state(struct tg3 *tp)
8342 {
8343         u32 val32, val32_2, val32_3, val32_4, val32_5;
8344         u16 val16;
8345         int i;
8346
8347         pci_read_config_word(tp->pdev, PCI_STATUS, &val16);
8348         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE, &val32);
8349         printk("DEBUG: PCI status [%04x] TG3PCI state[%08x]\n",
8350                val16, val32);
8351
8352         /* MAC block */
8353         printk("DEBUG: MAC_MODE[%08x] MAC_STATUS[%08x]\n",
8354                tr32(MAC_MODE), tr32(MAC_STATUS));
8355         printk("       MAC_EVENT[%08x] MAC_LED_CTRL[%08x]\n",
8356                tr32(MAC_EVENT), tr32(MAC_LED_CTRL));
8357         printk("DEBUG: MAC_TX_MODE[%08x] MAC_TX_STATUS[%08x]\n",
8358                tr32(MAC_TX_MODE), tr32(MAC_TX_STATUS));
8359         printk("       MAC_RX_MODE[%08x] MAC_RX_STATUS[%08x]\n",
8360                tr32(MAC_RX_MODE), tr32(MAC_RX_STATUS));
8361
8362         /* Send data initiator control block */
8363         printk("DEBUG: SNDDATAI_MODE[%08x] SNDDATAI_STATUS[%08x]\n",
8364                tr32(SNDDATAI_MODE), tr32(SNDDATAI_STATUS));
8365         printk("       SNDDATAI_STATSCTRL[%08x]\n",
8366                tr32(SNDDATAI_STATSCTRL));
8367
8368         /* Send data completion control block */
8369         printk("DEBUG: SNDDATAC_MODE[%08x]\n", tr32(SNDDATAC_MODE));
8370
8371         /* Send BD ring selector block */
8372         printk("DEBUG: SNDBDS_MODE[%08x] SNDBDS_STATUS[%08x]\n",
8373                tr32(SNDBDS_MODE), tr32(SNDBDS_STATUS));
8374
8375         /* Send BD initiator control block */
8376         printk("DEBUG: SNDBDI_MODE[%08x] SNDBDI_STATUS[%08x]\n",
8377                tr32(SNDBDI_MODE), tr32(SNDBDI_STATUS));
8378
8379         /* Send BD completion control block */
8380         printk("DEBUG: SNDBDC_MODE[%08x]\n", tr32(SNDBDC_MODE));
8381
8382         /* Receive list placement control block */
8383         printk("DEBUG: RCVLPC_MODE[%08x] RCVLPC_STATUS[%08x]\n",
8384                tr32(RCVLPC_MODE), tr32(RCVLPC_STATUS));
8385         printk("       RCVLPC_STATSCTRL[%08x]\n",
8386                tr32(RCVLPC_STATSCTRL));
8387
8388         /* Receive data and receive BD initiator control block */
8389         printk("DEBUG: RCVDBDI_MODE[%08x] RCVDBDI_STATUS[%08x]\n",
8390                tr32(RCVDBDI_MODE), tr32(RCVDBDI_STATUS));
8391
8392         /* Receive data completion control block */
8393         printk("DEBUG: RCVDCC_MODE[%08x]\n",
8394                tr32(RCVDCC_MODE));
8395
8396         /* Receive BD initiator control block */
8397         printk("DEBUG: RCVBDI_MODE[%08x] RCVBDI_STATUS[%08x]\n",
8398                tr32(RCVBDI_MODE), tr32(RCVBDI_STATUS));
8399
8400         /* Receive BD completion control block */
8401         printk("DEBUG: RCVCC_MODE[%08x] RCVCC_STATUS[%08x]\n",
8402                tr32(RCVCC_MODE), tr32(RCVCC_STATUS));
8403
8404         /* Receive list selector control block */
8405         printk("DEBUG: RCVLSC_MODE[%08x] RCVLSC_STATUS[%08x]\n",
8406                tr32(RCVLSC_MODE), tr32(RCVLSC_STATUS));
8407
8408         /* Mbuf cluster free block */
8409         printk("DEBUG: MBFREE_MODE[%08x] MBFREE_STATUS[%08x]\n",
8410                tr32(MBFREE_MODE), tr32(MBFREE_STATUS));
8411
8412         /* Host coalescing control block */
8413         printk("DEBUG: HOSTCC_MODE[%08x] HOSTCC_STATUS[%08x]\n",
8414                tr32(HOSTCC_MODE), tr32(HOSTCC_STATUS));
8415         printk("DEBUG: HOSTCC_STATS_BLK_HOST_ADDR[%08x%08x]\n",
8416                tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
8417                tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
8418         printk("DEBUG: HOSTCC_STATUS_BLK_HOST_ADDR[%08x%08x]\n",
8419                tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
8420                tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
8421         printk("DEBUG: HOSTCC_STATS_BLK_NIC_ADDR[%08x]\n",
8422                tr32(HOSTCC_STATS_BLK_NIC_ADDR));
8423         printk("DEBUG: HOSTCC_STATUS_BLK_NIC_ADDR[%08x]\n",
8424                tr32(HOSTCC_STATUS_BLK_NIC_ADDR));
8425
8426         /* Memory arbiter control block */
8427         printk("DEBUG: MEMARB_MODE[%08x] MEMARB_STATUS[%08x]\n",
8428                tr32(MEMARB_MODE), tr32(MEMARB_STATUS));
8429
8430         /* Buffer manager control block */
8431         printk("DEBUG: BUFMGR_MODE[%08x] BUFMGR_STATUS[%08x]\n",
8432                tr32(BUFMGR_MODE), tr32(BUFMGR_STATUS));
8433         printk("DEBUG: BUFMGR_MB_POOL_ADDR[%08x] BUFMGR_MB_POOL_SIZE[%08x]\n",
8434                tr32(BUFMGR_MB_POOL_ADDR), tr32(BUFMGR_MB_POOL_SIZE));
8435         printk("DEBUG: BUFMGR_DMA_DESC_POOL_ADDR[%08x] "
8436                "BUFMGR_DMA_DESC_POOL_SIZE[%08x]\n",
8437                tr32(BUFMGR_DMA_DESC_POOL_ADDR),
8438                tr32(BUFMGR_DMA_DESC_POOL_SIZE));
8439
8440         /* Read DMA control block */
8441         printk("DEBUG: RDMAC_MODE[%08x] RDMAC_STATUS[%08x]\n",
8442                tr32(RDMAC_MODE), tr32(RDMAC_STATUS));
8443
8444         /* Write DMA control block */
8445         printk("DEBUG: WDMAC_MODE[%08x] WDMAC_STATUS[%08x]\n",
8446                tr32(WDMAC_MODE), tr32(WDMAC_STATUS));
8447
8448         /* DMA completion block */
8449         printk("DEBUG: DMAC_MODE[%08x]\n",
8450                tr32(DMAC_MODE));
8451
8452         /* GRC block */
8453         printk("DEBUG: GRC_MODE[%08x] GRC_MISC_CFG[%08x]\n",
8454                tr32(GRC_MODE), tr32(GRC_MISC_CFG));
8455         printk("DEBUG: GRC_LOCAL_CTRL[%08x]\n",
8456                tr32(GRC_LOCAL_CTRL));
8457
8458         /* TG3_BDINFOs */
8459         printk("DEBUG: RCVDBDI_JUMBO_BD[%08x%08x:%08x:%08x]\n",
8460                tr32(RCVDBDI_JUMBO_BD + 0x0),
8461                tr32(RCVDBDI_JUMBO_BD + 0x4),
8462                tr32(RCVDBDI_JUMBO_BD + 0x8),
8463                tr32(RCVDBDI_JUMBO_BD + 0xc));
8464         printk("DEBUG: RCVDBDI_STD_BD[%08x%08x:%08x:%08x]\n",
8465                tr32(RCVDBDI_STD_BD + 0x0),
8466                tr32(RCVDBDI_STD_BD + 0x4),
8467                tr32(RCVDBDI_STD_BD + 0x8),
8468                tr32(RCVDBDI_STD_BD + 0xc));
8469         printk("DEBUG: RCVDBDI_MINI_BD[%08x%08x:%08x:%08x]\n",
8470                tr32(RCVDBDI_MINI_BD + 0x0),
8471                tr32(RCVDBDI_MINI_BD + 0x4),
8472                tr32(RCVDBDI_MINI_BD + 0x8),
8473                tr32(RCVDBDI_MINI_BD + 0xc));
8474
8475         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x0, &val32);
8476         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x4, &val32_2);
8477         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x8, &val32_3);
8478         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0xc, &val32_4);
8479         printk("DEBUG: SRAM_SEND_RCB_0[%08x%08x:%08x:%08x]\n",
8480                val32, val32_2, val32_3, val32_4);
8481
8482         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x0, &val32);
8483         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x4, &val32_2);
8484         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x8, &val32_3);
8485         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0xc, &val32_4);
8486         printk("DEBUG: SRAM_RCV_RET_RCB_0[%08x%08x:%08x:%08x]\n",
8487                val32, val32_2, val32_3, val32_4);
8488
8489         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x0, &val32);
8490         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x4, &val32_2);
8491         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x8, &val32_3);
8492         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0xc, &val32_4);
8493         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x10, &val32_5);
8494         printk("DEBUG: SRAM_STATUS_BLK[%08x:%08x:%08x:%08x:%08x]\n",
8495                val32, val32_2, val32_3, val32_4, val32_5);
8496
8497         /* SW status block */
8498         printk("DEBUG: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
8499                tp->hw_status->status,
8500                tp->hw_status->status_tag,
8501                tp->hw_status->rx_jumbo_consumer,
8502                tp->hw_status->rx_consumer,
8503                tp->hw_status->rx_mini_consumer,
8504                tp->hw_status->idx[0].rx_producer,
8505                tp->hw_status->idx[0].tx_consumer);
8506
8507         /* SW statistics block */
8508         printk("DEBUG: Host statistics block [%08x:%08x:%08x:%08x]\n",
8509                ((u32 *)tp->hw_stats)[0],
8510                ((u32 *)tp->hw_stats)[1],
8511                ((u32 *)tp->hw_stats)[2],
8512                ((u32 *)tp->hw_stats)[3]);
8513
8514         /* Mailboxes */
8515         printk("DEBUG: SNDHOST_PROD[%08x%08x] SNDNIC_PROD[%08x%08x]\n",
8516                tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + 0x0),
8517                tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + 0x4),
8518                tr32_mailbox(MAILBOX_SNDNIC_PROD_IDX_0 + 0x0),
8519                tr32_mailbox(MAILBOX_SNDNIC_PROD_IDX_0 + 0x4));
8520
8521         /* NIC side send descriptors. */
8522         for (i = 0; i < 6; i++) {
8523                 unsigned long txd;
8524
8525                 txd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_TX_BUFFER_DESC
8526                         + (i * sizeof(struct tg3_tx_buffer_desc));
8527                 printk("DEBUG: NIC TXD(%d)[%08x:%08x:%08x:%08x]\n",
8528                        i,
8529                        readl(txd + 0x0), readl(txd + 0x4),
8530                        readl(txd + 0x8), readl(txd + 0xc));
8531         }
8532
8533         /* NIC side RX descriptors. */
8534         for (i = 0; i < 6; i++) {
8535                 unsigned long rxd;
8536
8537                 rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_BUFFER_DESC
8538                         + (i * sizeof(struct tg3_rx_buffer_desc));
8539                 printk("DEBUG: NIC RXD_STD(%d)[0][%08x:%08x:%08x:%08x]\n",
8540                        i,
8541                        readl(rxd + 0x0), readl(rxd + 0x4),
8542                        readl(rxd + 0x8), readl(rxd + 0xc));
8543                 rxd += (4 * sizeof(u32));
8544                 printk("DEBUG: NIC RXD_STD(%d)[1][%08x:%08x:%08x:%08x]\n",
8545                        i,
8546                        readl(rxd + 0x0), readl(rxd + 0x4),
8547                        readl(rxd + 0x8), readl(rxd + 0xc));
8548         }
8549
8550         for (i = 0; i < 6; i++) {
8551                 unsigned long rxd;
8552
8553                 rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_JUMBO_BUFFER_DESC
8554                         + (i * sizeof(struct tg3_rx_buffer_desc));
8555                 printk("DEBUG: NIC RXD_JUMBO(%d)[0][%08x:%08x:%08x:%08x]\n",
8556                        i,
8557                        readl(rxd + 0x0), readl(rxd + 0x4),
8558                        readl(rxd + 0x8), readl(rxd + 0xc));
8559                 rxd += (4 * sizeof(u32));
8560                 printk("DEBUG: NIC RXD_JUMBO(%d)[1][%08x:%08x:%08x:%08x]\n",
8561                        i,
8562                        readl(rxd + 0x0), readl(rxd + 0x4),
8563                        readl(rxd + 0x8), readl(rxd + 0xc));
8564         }
8565 }
8566 #endif
8567
8568 static struct net_device_stats *tg3_get_stats(struct net_device *);
8569 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *);
8570
8571 static int tg3_close(struct net_device *dev)
8572 {
8573         struct tg3 *tp = netdev_priv(dev);
8574
8575         napi_disable(&tp->napi);
8576         cancel_work_sync(&tp->reset_task);
8577
8578         netif_stop_queue(dev);
8579
8580         del_timer_sync(&tp->timer);
8581
8582         tg3_full_lock(tp, 1);
8583 #if 0
8584         tg3_dump_state(tp);
8585 #endif
8586
8587         tg3_disable_ints(tp);
8588
8589         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
8590         tg3_free_rings(tp);
8591         tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
8592
8593         tg3_full_unlock(tp);
8594
8595         free_irq(tp->pdev->irq, dev);
8596         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
8597                 pci_disable_msi(tp->pdev);
8598                 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
8599         }
8600
8601         memcpy(&tp->net_stats_prev, tg3_get_stats(tp->dev),
8602                sizeof(tp->net_stats_prev));
8603         memcpy(&tp->estats_prev, tg3_get_estats(tp),
8604                sizeof(tp->estats_prev));
8605
8606         tg3_free_consistent(tp);
8607
8608         tg3_set_power_state(tp, PCI_D3hot);
8609
8610         netif_carrier_off(tp->dev);
8611
8612         return 0;
8613 }
8614
8615 static inline unsigned long get_stat64(tg3_stat64_t *val)
8616 {
8617         unsigned long ret;
8618
8619 #if (BITS_PER_LONG == 32)
8620         ret = val->low;
8621 #else
8622         ret = ((u64)val->high << 32) | ((u64)val->low);
8623 #endif
8624         return ret;
8625 }
8626
8627 static inline u64 get_estat64(tg3_stat64_t *val)
8628 {
8629        return ((u64)val->high << 32) | ((u64)val->low);
8630 }
8631
8632 static unsigned long calc_crc_errors(struct tg3 *tp)
8633 {
8634         struct tg3_hw_stats *hw_stats = tp->hw_stats;
8635
8636         if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
8637             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
8638              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
8639                 u32 val;
8640
8641                 spin_lock_bh(&tp->lock);
8642                 if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) {
8643                         tg3_writephy(tp, MII_TG3_TEST1,
8644                                      val | MII_TG3_TEST1_CRC_EN);
8645                         tg3_readphy(tp, 0x14, &val);
8646                 } else
8647                         val = 0;
8648                 spin_unlock_bh(&tp->lock);
8649
8650                 tp->phy_crc_errors += val;
8651
8652                 return tp->phy_crc_errors;
8653         }
8654
8655         return get_stat64(&hw_stats->rx_fcs_errors);
8656 }
8657
8658 #define ESTAT_ADD(member) \
8659         estats->member =        old_estats->member + \
8660                                 get_estat64(&hw_stats->member)
8661
8662 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *tp)
8663 {
8664         struct tg3_ethtool_stats *estats = &tp->estats;
8665         struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
8666         struct tg3_hw_stats *hw_stats = tp->hw_stats;
8667
8668         if (!hw_stats)
8669                 return old_estats;
8670
8671         ESTAT_ADD(rx_octets);
8672         ESTAT_ADD(rx_fragments);
8673         ESTAT_ADD(rx_ucast_packets);
8674         ESTAT_ADD(rx_mcast_packets);
8675         ESTAT_ADD(rx_bcast_packets);
8676         ESTAT_ADD(rx_fcs_errors);
8677         ESTAT_ADD(rx_align_errors);
8678         ESTAT_ADD(rx_xon_pause_rcvd);
8679         ESTAT_ADD(rx_xoff_pause_rcvd);
8680         ESTAT_ADD(rx_mac_ctrl_rcvd);
8681         ESTAT_ADD(rx_xoff_entered);
8682         ESTAT_ADD(rx_frame_too_long_errors);
8683         ESTAT_ADD(rx_jabbers);
8684         ESTAT_ADD(rx_undersize_packets);
8685         ESTAT_ADD(rx_in_length_errors);
8686         ESTAT_ADD(rx_out_length_errors);
8687         ESTAT_ADD(rx_64_or_less_octet_packets);
8688         ESTAT_ADD(rx_65_to_127_octet_packets);
8689         ESTAT_ADD(rx_128_to_255_octet_packets);
8690         ESTAT_ADD(rx_256_to_511_octet_packets);
8691         ESTAT_ADD(rx_512_to_1023_octet_packets);
8692         ESTAT_ADD(rx_1024_to_1522_octet_packets);
8693         ESTAT_ADD(rx_1523_to_2047_octet_packets);
8694         ESTAT_ADD(rx_2048_to_4095_octet_packets);
8695         ESTAT_ADD(rx_4096_to_8191_octet_packets);
8696         ESTAT_ADD(rx_8192_to_9022_octet_packets);
8697
8698         ESTAT_ADD(tx_octets);
8699         ESTAT_ADD(tx_collisions);
8700         ESTAT_ADD(tx_xon_sent);
8701         ESTAT_ADD(tx_xoff_sent);
8702         ESTAT_ADD(tx_flow_control);
8703         ESTAT_ADD(tx_mac_errors);
8704         ESTAT_ADD(tx_single_collisions);
8705         ESTAT_ADD(tx_mult_collisions);
8706         ESTAT_ADD(tx_deferred);
8707         ESTAT_ADD(tx_excessive_collisions);
8708         ESTAT_ADD(tx_late_collisions);
8709         ESTAT_ADD(tx_collide_2times);
8710         ESTAT_ADD(tx_collide_3times);
8711         ESTAT_ADD(tx_collide_4times);
8712         ESTAT_ADD(tx_collide_5times);
8713         ESTAT_ADD(tx_collide_6times);
8714         ESTAT_ADD(tx_collide_7times);
8715         ESTAT_ADD(tx_collide_8times);
8716         ESTAT_ADD(tx_collide_9times);
8717         ESTAT_ADD(tx_collide_10times);
8718         ESTAT_ADD(tx_collide_11times);
8719         ESTAT_ADD(tx_collide_12times);
8720         ESTAT_ADD(tx_collide_13times);
8721         ESTAT_ADD(tx_collide_14times);
8722         ESTAT_ADD(tx_collide_15times);
8723         ESTAT_ADD(tx_ucast_packets);
8724         ESTAT_ADD(tx_mcast_packets);
8725         ESTAT_ADD(tx_bcast_packets);
8726         ESTAT_ADD(tx_carrier_sense_errors);
8727         ESTAT_ADD(tx_discards);
8728         ESTAT_ADD(tx_errors);
8729
8730         ESTAT_ADD(dma_writeq_full);
8731         ESTAT_ADD(dma_write_prioq_full);
8732         ESTAT_ADD(rxbds_empty);
8733         ESTAT_ADD(rx_discards);
8734         ESTAT_ADD(rx_errors);
8735         ESTAT_ADD(rx_threshold_hit);
8736
8737         ESTAT_ADD(dma_readq_full);
8738         ESTAT_ADD(dma_read_prioq_full);
8739         ESTAT_ADD(tx_comp_queue_full);
8740
8741         ESTAT_ADD(ring_set_send_prod_index);
8742         ESTAT_ADD(ring_status_update);
8743         ESTAT_ADD(nic_irqs);
8744         ESTAT_ADD(nic_avoided_irqs);
8745         ESTAT_ADD(nic_tx_threshold_hit);
8746
8747         return estats;
8748 }
8749
8750 static struct net_device_stats *tg3_get_stats(struct net_device *dev)
8751 {
8752         struct tg3 *tp = netdev_priv(dev);
8753         struct net_device_stats *stats = &tp->net_stats;
8754         struct net_device_stats *old_stats = &tp->net_stats_prev;
8755         struct tg3_hw_stats *hw_stats = tp->hw_stats;
8756
8757         if (!hw_stats)
8758                 return old_stats;
8759
8760         stats->rx_packets = old_stats->rx_packets +
8761                 get_stat64(&hw_stats->rx_ucast_packets) +
8762                 get_stat64(&hw_stats->rx_mcast_packets) +
8763                 get_stat64(&hw_stats->rx_bcast_packets);
8764
8765         stats->tx_packets = old_stats->tx_packets +
8766                 get_stat64(&hw_stats->tx_ucast_packets) +
8767                 get_stat64(&hw_stats->tx_mcast_packets) +
8768                 get_stat64(&hw_stats->tx_bcast_packets);
8769
8770         stats->rx_bytes = old_stats->rx_bytes +
8771                 get_stat64(&hw_stats->rx_octets);
8772         stats->tx_bytes = old_stats->tx_bytes +
8773                 get_stat64(&hw_stats->tx_octets);
8774
8775         stats->rx_errors = old_stats->rx_errors +
8776                 get_stat64(&hw_stats->rx_errors);
8777         stats->tx_errors = old_stats->tx_errors +
8778                 get_stat64(&hw_stats->tx_errors) +
8779                 get_stat64(&hw_stats->tx_mac_errors) +
8780                 get_stat64(&hw_stats->tx_carrier_sense_errors) +
8781                 get_stat64(&hw_stats->tx_discards);
8782
8783         stats->multicast = old_stats->multicast +
8784                 get_stat64(&hw_stats->rx_mcast_packets);
8785         stats->collisions = old_stats->collisions +
8786                 get_stat64(&hw_stats->tx_collisions);
8787
8788         stats->rx_length_errors = old_stats->rx_length_errors +
8789                 get_stat64(&hw_stats->rx_frame_too_long_errors) +
8790                 get_stat64(&hw_stats->rx_undersize_packets);
8791
8792         stats->rx_over_errors = old_stats->rx_over_errors +
8793                 get_stat64(&hw_stats->rxbds_empty);
8794         stats->rx_frame_errors = old_stats->rx_frame_errors +
8795                 get_stat64(&hw_stats->rx_align_errors);
8796         stats->tx_aborted_errors = old_stats->tx_aborted_errors +
8797                 get_stat64(&hw_stats->tx_discards);
8798         stats->tx_carrier_errors = old_stats->tx_carrier_errors +
8799                 get_stat64(&hw_stats->tx_carrier_sense_errors);
8800
8801         stats->rx_crc_errors = old_stats->rx_crc_errors +
8802                 calc_crc_errors(tp);
8803
8804         stats->rx_missed_errors = old_stats->rx_missed_errors +
8805                 get_stat64(&hw_stats->rx_discards);
8806
8807         return stats;
8808 }
8809
8810 static inline u32 calc_crc(unsigned char *buf, int len)
8811 {
8812         u32 reg;
8813         u32 tmp;
8814         int j, k;
8815
8816         reg = 0xffffffff;
8817
8818         for (j = 0; j < len; j++) {
8819                 reg ^= buf[j];
8820
8821                 for (k = 0; k < 8; k++) {
8822                         tmp = reg & 0x01;
8823
8824                         reg >>= 1;
8825
8826                         if (tmp) {
8827                                 reg ^= 0xedb88320;
8828                         }
8829                 }
8830         }
8831
8832         return ~reg;
8833 }
8834
8835 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
8836 {
8837         /* accept or reject all multicast frames */
8838         tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
8839         tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
8840         tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
8841         tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
8842 }
8843
8844 static void __tg3_set_rx_mode(struct net_device *dev)
8845 {
8846         struct tg3 *tp = netdev_priv(dev);
8847         u32 rx_mode;
8848
8849         rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
8850                                   RX_MODE_KEEP_VLAN_TAG);
8851
8852         /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
8853          * flag clear.
8854          */
8855 #if TG3_VLAN_TAG_USED
8856         if (!tp->vlgrp &&
8857             !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
8858                 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
8859 #else
8860         /* By definition, VLAN is disabled always in this
8861          * case.
8862          */
8863         if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
8864                 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
8865 #endif
8866
8867         if (dev->flags & IFF_PROMISC) {
8868                 /* Promiscuous mode. */
8869                 rx_mode |= RX_MODE_PROMISC;
8870         } else if (dev->flags & IFF_ALLMULTI) {
8871                 /* Accept all multicast. */
8872                 tg3_set_multi (tp, 1);
8873         } else if (dev->mc_count < 1) {
8874                 /* Reject all multicast. */
8875                 tg3_set_multi (tp, 0);
8876         } else {
8877                 /* Accept one or more multicast(s). */
8878                 struct dev_mc_list *mclist;
8879                 unsigned int i;
8880                 u32 mc_filter[4] = { 0, };
8881                 u32 regidx;
8882                 u32 bit;
8883                 u32 crc;
8884
8885                 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
8886                      i++, mclist = mclist->next) {
8887
8888                         crc = calc_crc (mclist->dmi_addr, ETH_ALEN);
8889                         bit = ~crc & 0x7f;
8890                         regidx = (bit & 0x60) >> 5;
8891                         bit &= 0x1f;
8892                         mc_filter[regidx] |= (1 << bit);
8893                 }
8894
8895                 tw32(MAC_HASH_REG_0, mc_filter[0]);
8896                 tw32(MAC_HASH_REG_1, mc_filter[1]);
8897                 tw32(MAC_HASH_REG_2, mc_filter[2]);
8898                 tw32(MAC_HASH_REG_3, mc_filter[3]);
8899         }
8900
8901         if (rx_mode != tp->rx_mode) {
8902                 tp->rx_mode = rx_mode;
8903                 tw32_f(MAC_RX_MODE, rx_mode);
8904                 udelay(10);
8905         }
8906 }
8907
8908 static void tg3_set_rx_mode(struct net_device *dev)
8909 {
8910         struct tg3 *tp = netdev_priv(dev);
8911
8912         if (!netif_running(dev))
8913                 return;
8914
8915         tg3_full_lock(tp, 0);
8916         __tg3_set_rx_mode(dev);
8917         tg3_full_unlock(tp);
8918 }
8919
8920 #define TG3_REGDUMP_LEN         (32 * 1024)
8921
8922 static int tg3_get_regs_len(struct net_device *dev)
8923 {
8924         return TG3_REGDUMP_LEN;
8925 }
8926
8927 static void tg3_get_regs(struct net_device *dev,
8928                 struct ethtool_regs *regs, void *_p)
8929 {
8930         u32 *p = _p;
8931         struct tg3 *tp = netdev_priv(dev);
8932         u8 *orig_p = _p;
8933         int i;
8934
8935         regs->version = 0;
8936
8937         memset(p, 0, TG3_REGDUMP_LEN);
8938
8939         if (tp->link_config.phy_is_low_power)
8940                 return;
8941
8942         tg3_full_lock(tp, 0);
8943
8944 #define __GET_REG32(reg)        (*(p)++ = tr32(reg))
8945 #define GET_REG32_LOOP(base,len)                \
8946 do {    p = (u32 *)(orig_p + (base));           \
8947         for (i = 0; i < len; i += 4)            \
8948                 __GET_REG32((base) + i);        \
8949 } while (0)
8950 #define GET_REG32_1(reg)                        \
8951 do {    p = (u32 *)(orig_p + (reg));            \
8952         __GET_REG32((reg));                     \
8953 } while (0)
8954
8955         GET_REG32_LOOP(TG3PCI_VENDOR, 0xb0);
8956         GET_REG32_LOOP(MAILBOX_INTERRUPT_0, 0x200);
8957         GET_REG32_LOOP(MAC_MODE, 0x4f0);
8958         GET_REG32_LOOP(SNDDATAI_MODE, 0xe0);
8959         GET_REG32_1(SNDDATAC_MODE);
8960         GET_REG32_LOOP(SNDBDS_MODE, 0x80);
8961         GET_REG32_LOOP(SNDBDI_MODE, 0x48);
8962         GET_REG32_1(SNDBDC_MODE);
8963         GET_REG32_LOOP(RCVLPC_MODE, 0x20);
8964         GET_REG32_LOOP(RCVLPC_SELLST_BASE, 0x15c);
8965         GET_REG32_LOOP(RCVDBDI_MODE, 0x0c);
8966         GET_REG32_LOOP(RCVDBDI_JUMBO_BD, 0x3c);
8967         GET_REG32_LOOP(RCVDBDI_BD_PROD_IDX_0, 0x44);
8968         GET_REG32_1(RCVDCC_MODE);
8969         GET_REG32_LOOP(RCVBDI_MODE, 0x20);
8970         GET_REG32_LOOP(RCVCC_MODE, 0x14);
8971         GET_REG32_LOOP(RCVLSC_MODE, 0x08);
8972         GET_REG32_1(MBFREE_MODE);
8973         GET_REG32_LOOP(HOSTCC_MODE, 0x100);
8974         GET_REG32_LOOP(MEMARB_MODE, 0x10);
8975         GET_REG32_LOOP(BUFMGR_MODE, 0x58);
8976         GET_REG32_LOOP(RDMAC_MODE, 0x08);
8977         GET_REG32_LOOP(WDMAC_MODE, 0x08);
8978         GET_REG32_1(RX_CPU_MODE);
8979         GET_REG32_1(RX_CPU_STATE);
8980         GET_REG32_1(RX_CPU_PGMCTR);
8981         GET_REG32_1(RX_CPU_HWBKPT);
8982         GET_REG32_1(TX_CPU_MODE);
8983         GET_REG32_1(TX_CPU_STATE);
8984         GET_REG32_1(TX_CPU_PGMCTR);
8985         GET_REG32_LOOP(GRCMBOX_INTERRUPT_0, 0x110);
8986         GET_REG32_LOOP(FTQ_RESET, 0x120);
8987         GET_REG32_LOOP(MSGINT_MODE, 0x0c);
8988         GET_REG32_1(DMAC_MODE);
8989         GET_REG32_LOOP(GRC_MODE, 0x4c);
8990         if (tp->tg3_flags & TG3_FLAG_NVRAM)
8991                 GET_REG32_LOOP(NVRAM_CMD, 0x24);
8992
8993 #undef __GET_REG32
8994 #undef GET_REG32_LOOP
8995 #undef GET_REG32_1
8996
8997         tg3_full_unlock(tp);
8998 }
8999
9000 static int tg3_get_eeprom_len(struct net_device *dev)
9001 {
9002         struct tg3 *tp = netdev_priv(dev);
9003
9004         return tp->nvram_size;
9005 }
9006
9007 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val);
9008 static int tg3_nvram_read_le(struct tg3 *tp, u32 offset, __le32 *val);
9009 static int tg3_nvram_read_swab(struct tg3 *tp, u32 offset, u32 *val);
9010
9011 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
9012 {
9013         struct tg3 *tp = netdev_priv(dev);
9014         int ret;
9015         u8  *pd;
9016         u32 i, offset, len, b_offset, b_count;
9017         __le32 val;
9018
9019         if (tp->link_config.phy_is_low_power)
9020                 return -EAGAIN;
9021
9022         offset = eeprom->offset;
9023         len = eeprom->len;
9024         eeprom->len = 0;
9025
9026         eeprom->magic = TG3_EEPROM_MAGIC;
9027
9028         if (offset & 3) {
9029                 /* adjustments to start on required 4 byte boundary */
9030                 b_offset = offset & 3;
9031                 b_count = 4 - b_offset;
9032                 if (b_count > len) {
9033                         /* i.e. offset=1 len=2 */
9034                         b_count = len;
9035                 }
9036                 ret = tg3_nvram_read_le(tp, offset-b_offset, &val);
9037                 if (ret)
9038                         return ret;
9039                 memcpy(data, ((char*)&val) + b_offset, b_count);
9040                 len -= b_count;
9041                 offset += b_count;
9042                 eeprom->len += b_count;
9043         }
9044
9045         /* read bytes upto the last 4 byte boundary */
9046         pd = &data[eeprom->len];
9047         for (i = 0; i < (len - (len & 3)); i += 4) {
9048                 ret = tg3_nvram_read_le(tp, offset + i, &val);
9049                 if (ret) {
9050                         eeprom->len += i;
9051                         return ret;
9052                 }
9053                 memcpy(pd + i, &val, 4);
9054         }
9055         eeprom->len += i;
9056
9057         if (len & 3) {
9058                 /* read last bytes not ending on 4 byte boundary */
9059                 pd = &data[eeprom->len];
9060                 b_count = len & 3;
9061                 b_offset = offset + len - b_count;
9062                 ret = tg3_nvram_read_le(tp, b_offset, &val);
9063                 if (ret)
9064                         return ret;
9065                 memcpy(pd, &val, b_count);
9066                 eeprom->len += b_count;
9067         }
9068         return 0;
9069 }
9070
9071 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf);
9072
9073 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
9074 {
9075         struct tg3 *tp = netdev_priv(dev);
9076         int ret;
9077         u32 offset, len, b_offset, odd_len;
9078         u8 *buf;
9079         __le32 start, end;
9080
9081         if (tp->link_config.phy_is_low_power)
9082                 return -EAGAIN;
9083
9084         if (eeprom->magic != TG3_EEPROM_MAGIC)
9085                 return -EINVAL;
9086
9087         offset = eeprom->offset;
9088         len = eeprom->len;
9089
9090         if ((b_offset = (offset & 3))) {
9091                 /* adjustments to start on required 4 byte boundary */
9092                 ret = tg3_nvram_read_le(tp, offset-b_offset, &start);
9093                 if (ret)
9094                         return ret;
9095                 len += b_offset;
9096                 offset &= ~3;
9097                 if (len < 4)
9098                         len = 4;
9099         }
9100
9101         odd_len = 0;
9102         if (len & 3) {
9103                 /* adjustments to end on required 4 byte boundary */
9104                 odd_len = 1;
9105                 len = (len + 3) & ~3;
9106                 ret = tg3_nvram_read_le(tp, offset+len-4, &end);
9107                 if (ret)
9108                         return ret;
9109         }
9110
9111         buf = data;
9112         if (b_offset || odd_len) {
9113                 buf = kmalloc(len, GFP_KERNEL);
9114                 if (!buf)
9115                         return -ENOMEM;
9116                 if (b_offset)
9117                         memcpy(buf, &start, 4);
9118                 if (odd_len)
9119                         memcpy(buf+len-4, &end, 4);
9120                 memcpy(buf + b_offset, data, eeprom->len);
9121         }
9122
9123         ret = tg3_nvram_write_block(tp, offset, len, buf);
9124
9125         if (buf != data)
9126                 kfree(buf);
9127
9128         return ret;
9129 }
9130
9131 static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
9132 {
9133         struct tg3 *tp = netdev_priv(dev);
9134
9135         if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
9136                 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
9137                         return -EAGAIN;
9138                 return phy_ethtool_gset(tp->mdio_bus->phy_map[PHY_ADDR], cmd);
9139         }
9140
9141         cmd->supported = (SUPPORTED_Autoneg);
9142
9143         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
9144                 cmd->supported |= (SUPPORTED_1000baseT_Half |
9145                                    SUPPORTED_1000baseT_Full);
9146
9147         if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)) {
9148                 cmd->supported |= (SUPPORTED_100baseT_Half |
9149                                   SUPPORTED_100baseT_Full |
9150                                   SUPPORTED_10baseT_Half |
9151                                   SUPPORTED_10baseT_Full |
9152                                   SUPPORTED_TP);
9153                 cmd->port = PORT_TP;
9154         } else {
9155                 cmd->supported |= SUPPORTED_FIBRE;
9156                 cmd->port = PORT_FIBRE;
9157         }
9158
9159         cmd->advertising = tp->link_config.advertising;
9160         if (netif_running(dev)) {
9161                 cmd->speed = tp->link_config.active_speed;
9162                 cmd->duplex = tp->link_config.active_duplex;
9163         }
9164         cmd->phy_address = PHY_ADDR;
9165         cmd->transceiver = 0;
9166         cmd->autoneg = tp->link_config.autoneg;
9167         cmd->maxtxpkt = 0;
9168         cmd->maxrxpkt = 0;
9169         return 0;
9170 }
9171
9172 static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
9173 {
9174         struct tg3 *tp = netdev_priv(dev);
9175
9176         if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
9177                 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
9178                         return -EAGAIN;
9179                 return phy_ethtool_sset(tp->mdio_bus->phy_map[PHY_ADDR], cmd);
9180         }
9181
9182         if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) {
9183                 /* These are the only valid advertisement bits allowed.  */
9184                 if (cmd->autoneg == AUTONEG_ENABLE &&
9185                     (cmd->advertising & ~(ADVERTISED_1000baseT_Half |
9186                                           ADVERTISED_1000baseT_Full |
9187                                           ADVERTISED_Autoneg |
9188                                           ADVERTISED_FIBRE)))
9189                         return -EINVAL;
9190                 /* Fiber can only do SPEED_1000.  */
9191                 else if ((cmd->autoneg != AUTONEG_ENABLE) &&
9192                          (cmd->speed != SPEED_1000))
9193                         return -EINVAL;
9194         /* Copper cannot force SPEED_1000.  */
9195         } else if ((cmd->autoneg != AUTONEG_ENABLE) &&
9196                    (cmd->speed == SPEED_1000))
9197                 return -EINVAL;
9198         else if ((cmd->speed == SPEED_1000) &&
9199                  (tp->tg3_flags & TG3_FLAG_10_100_ONLY))
9200                 return -EINVAL;
9201
9202         tg3_full_lock(tp, 0);
9203
9204         tp->link_config.autoneg = cmd->autoneg;
9205         if (cmd->autoneg == AUTONEG_ENABLE) {
9206                 tp->link_config.advertising = (cmd->advertising |
9207                                               ADVERTISED_Autoneg);
9208                 tp->link_config.speed = SPEED_INVALID;
9209                 tp->link_config.duplex = DUPLEX_INVALID;
9210         } else {
9211                 tp->link_config.advertising = 0;
9212                 tp->link_config.speed = cmd->speed;
9213                 tp->link_config.duplex = cmd->duplex;
9214         }
9215
9216         tp->link_config.orig_speed = tp->link_config.speed;
9217         tp->link_config.orig_duplex = tp->link_config.duplex;
9218         tp->link_config.orig_autoneg = tp->link_config.autoneg;
9219
9220         if (netif_running(dev))
9221                 tg3_setup_phy(tp, 1);
9222
9223         tg3_full_unlock(tp);
9224
9225         return 0;
9226 }
9227
9228 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
9229 {
9230         struct tg3 *tp = netdev_priv(dev);
9231
9232         strcpy(info->driver, DRV_MODULE_NAME);
9233         strcpy(info->version, DRV_MODULE_VERSION);
9234         strcpy(info->fw_version, tp->fw_ver);
9235         strcpy(info->bus_info, pci_name(tp->pdev));
9236 }
9237
9238 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
9239 {
9240         struct tg3 *tp = netdev_priv(dev);
9241
9242         if ((tp->tg3_flags & TG3_FLAG_WOL_CAP) &&
9243             device_can_wakeup(&tp->pdev->dev))
9244                 wol->supported = WAKE_MAGIC;
9245         else
9246                 wol->supported = 0;
9247         wol->wolopts = 0;
9248         if ((tp->tg3_flags & TG3_FLAG_WOL_ENABLE) &&
9249             device_can_wakeup(&tp->pdev->dev))
9250                 wol->wolopts = WAKE_MAGIC;
9251         memset(&wol->sopass, 0, sizeof(wol->sopass));
9252 }
9253
9254 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
9255 {
9256         struct tg3 *tp = netdev_priv(dev);
9257         struct device *dp = &tp->pdev->dev;
9258
9259         if (wol->wolopts & ~WAKE_MAGIC)
9260                 return -EINVAL;
9261         if ((wol->wolopts & WAKE_MAGIC) &&
9262             !((tp->tg3_flags & TG3_FLAG_WOL_CAP) && device_can_wakeup(dp)))
9263                 return -EINVAL;
9264
9265         spin_lock_bh(&tp->lock);
9266         if (wol->wolopts & WAKE_MAGIC) {
9267                 tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
9268                 device_set_wakeup_enable(dp, true);
9269         } else {
9270                 tp->tg3_flags &= ~TG3_FLAG_WOL_ENABLE;
9271                 device_set_wakeup_enable(dp, false);
9272         }
9273         spin_unlock_bh(&tp->lock);
9274
9275         return 0;
9276 }
9277
9278 static u32 tg3_get_msglevel(struct net_device *dev)
9279 {
9280         struct tg3 *tp = netdev_priv(dev);
9281         return tp->msg_enable;
9282 }
9283
9284 static void tg3_set_msglevel(struct net_device *dev, u32 value)
9285 {
9286         struct tg3 *tp = netdev_priv(dev);
9287         tp->msg_enable = value;
9288 }
9289
9290 static int tg3_set_tso(struct net_device *dev, u32 value)
9291 {
9292         struct tg3 *tp = netdev_priv(dev);
9293
9294         if (!(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
9295                 if (value)
9296                         return -EINVAL;
9297                 return 0;
9298         }
9299         if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_2) &&
9300             (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906)) {
9301                 if (value) {
9302                         dev->features |= NETIF_F_TSO6;
9303                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
9304                             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
9305                              GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) ||
9306                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
9307                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
9308                                 dev->features |= NETIF_F_TSO_ECN;
9309                 } else
9310                         dev->features &= ~(NETIF_F_TSO6 | NETIF_F_TSO_ECN);
9311         }
9312         return ethtool_op_set_tso(dev, value);
9313 }
9314
9315 static int tg3_nway_reset(struct net_device *dev)
9316 {
9317         struct tg3 *tp = netdev_priv(dev);
9318         int r;
9319
9320         if (!netif_running(dev))
9321                 return -EAGAIN;
9322
9323         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
9324                 return -EINVAL;
9325
9326         if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
9327                 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
9328                         return -EAGAIN;
9329                 r = phy_start_aneg(tp->mdio_bus->phy_map[PHY_ADDR]);
9330         } else {
9331                 u32 bmcr;
9332
9333                 spin_lock_bh(&tp->lock);
9334                 r = -EINVAL;
9335                 tg3_readphy(tp, MII_BMCR, &bmcr);
9336                 if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
9337                     ((bmcr & BMCR_ANENABLE) ||
9338                      (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT))) {
9339                         tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
9340                                                    BMCR_ANENABLE);
9341                         r = 0;
9342                 }
9343                 spin_unlock_bh(&tp->lock);
9344         }
9345
9346         return r;
9347 }
9348
9349 static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
9350 {
9351         struct tg3 *tp = netdev_priv(dev);
9352
9353         ering->rx_max_pending = TG3_RX_RING_SIZE - 1;
9354         ering->rx_mini_max_pending = 0;
9355         if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE)
9356                 ering->rx_jumbo_max_pending = TG3_RX_JUMBO_RING_SIZE - 1;
9357         else
9358                 ering->rx_jumbo_max_pending = 0;
9359
9360         ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
9361
9362         ering->rx_pending = tp->rx_pending;
9363         ering->rx_mini_pending = 0;
9364         if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE)
9365                 ering->rx_jumbo_pending = tp->rx_jumbo_pending;
9366         else
9367                 ering->rx_jumbo_pending = 0;
9368
9369         ering->tx_pending = tp->tx_pending;
9370 }
9371
9372 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
9373 {
9374         struct tg3 *tp = netdev_priv(dev);
9375         int irq_sync = 0, err = 0;
9376
9377         if ((ering->rx_pending > TG3_RX_RING_SIZE - 1) ||
9378             (ering->rx_jumbo_pending > TG3_RX_JUMBO_RING_SIZE - 1) ||
9379             (ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
9380             (ering->tx_pending <= MAX_SKB_FRAGS) ||
9381             ((tp->tg3_flags2 & TG3_FLG2_TSO_BUG) &&
9382              (ering->tx_pending <= (MAX_SKB_FRAGS * 3))))
9383                 return -EINVAL;
9384
9385         if (netif_running(dev)) {
9386                 tg3_phy_stop(tp);
9387                 tg3_netif_stop(tp);
9388                 irq_sync = 1;
9389         }
9390
9391         tg3_full_lock(tp, irq_sync);
9392
9393         tp->rx_pending = ering->rx_pending;
9394
9395         if ((tp->tg3_flags2 & TG3_FLG2_MAX_RXPEND_64) &&
9396             tp->rx_pending > 63)
9397                 tp->rx_pending = 63;
9398         tp->rx_jumbo_pending = ering->rx_jumbo_pending;
9399         tp->tx_pending = ering->tx_pending;
9400
9401         if (netif_running(dev)) {
9402                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9403                 err = tg3_restart_hw(tp, 1);
9404                 if (!err)
9405                         tg3_netif_start(tp);
9406         }
9407
9408         tg3_full_unlock(tp);
9409
9410         if (irq_sync && !err)
9411                 tg3_phy_start(tp);
9412
9413         return err;
9414 }
9415
9416 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
9417 {
9418         struct tg3 *tp = netdev_priv(dev);
9419
9420         epause->autoneg = (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG) != 0;
9421
9422         if (tp->link_config.active_flowctrl & FLOW_CTRL_RX)
9423                 epause->rx_pause = 1;
9424         else
9425                 epause->rx_pause = 0;
9426
9427         if (tp->link_config.active_flowctrl & FLOW_CTRL_TX)
9428                 epause->tx_pause = 1;
9429         else
9430                 epause->tx_pause = 0;
9431 }
9432
9433 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
9434 {
9435         struct tg3 *tp = netdev_priv(dev);
9436         int err = 0;
9437
9438         if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
9439                 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
9440                         return -EAGAIN;
9441
9442                 if (epause->autoneg) {
9443                         u32 newadv;
9444                         struct phy_device *phydev;
9445
9446                         phydev = tp->mdio_bus->phy_map[PHY_ADDR];
9447
9448                         if (epause->rx_pause) {
9449                                 if (epause->tx_pause)
9450                                         newadv = ADVERTISED_Pause;
9451                                 else
9452                                         newadv = ADVERTISED_Pause |
9453                                                  ADVERTISED_Asym_Pause;
9454                         } else if (epause->tx_pause) {
9455                                 newadv = ADVERTISED_Asym_Pause;
9456                         } else
9457                                 newadv = 0;
9458
9459                         if (tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED) {
9460                                 u32 oldadv = phydev->advertising &
9461                                              (ADVERTISED_Pause |
9462                                               ADVERTISED_Asym_Pause);
9463                                 if (oldadv != newadv) {
9464                                         phydev->advertising &=
9465                                                 ~(ADVERTISED_Pause |
9466                                                   ADVERTISED_Asym_Pause);
9467                                         phydev->advertising |= newadv;
9468                                         err = phy_start_aneg(phydev);
9469                                 }
9470                         } else {
9471                                 tp->link_config.advertising &=
9472                                                 ~(ADVERTISED_Pause |
9473                                                   ADVERTISED_Asym_Pause);
9474                                 tp->link_config.advertising |= newadv;
9475                         }
9476                 } else {
9477                         if (epause->rx_pause)
9478                                 tp->link_config.flowctrl |= FLOW_CTRL_RX;
9479                         else
9480                                 tp->link_config.flowctrl &= ~FLOW_CTRL_RX;
9481
9482                         if (epause->tx_pause)
9483                                 tp->link_config.flowctrl |= FLOW_CTRL_TX;
9484                         else
9485                                 tp->link_config.flowctrl &= ~FLOW_CTRL_TX;
9486
9487                         if (netif_running(dev))
9488                                 tg3_setup_flow_control(tp, 0, 0);
9489                 }
9490         } else {
9491                 int irq_sync = 0;
9492
9493                 if (netif_running(dev)) {
9494                         tg3_netif_stop(tp);
9495                         irq_sync = 1;
9496                 }
9497
9498                 tg3_full_lock(tp, irq_sync);
9499
9500                 if (epause->autoneg)
9501                         tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
9502                 else
9503                         tp->tg3_flags &= ~TG3_FLAG_PAUSE_AUTONEG;
9504                 if (epause->rx_pause)
9505                         tp->link_config.flowctrl |= FLOW_CTRL_RX;
9506                 else
9507                         tp->link_config.flowctrl &= ~FLOW_CTRL_RX;
9508                 if (epause->tx_pause)
9509                         tp->link_config.flowctrl |= FLOW_CTRL_TX;
9510                 else
9511                         tp->link_config.flowctrl &= ~FLOW_CTRL_TX;
9512
9513                 if (netif_running(dev)) {
9514                         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9515                         err = tg3_restart_hw(tp, 1);
9516                         if (!err)
9517                                 tg3_netif_start(tp);
9518                 }
9519
9520                 tg3_full_unlock(tp);
9521         }
9522
9523         return err;
9524 }
9525
9526 static u32 tg3_get_rx_csum(struct net_device *dev)
9527 {
9528         struct tg3 *tp = netdev_priv(dev);
9529         return (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0;
9530 }
9531
9532 static int tg3_set_rx_csum(struct net_device *dev, u32 data)
9533 {
9534         struct tg3 *tp = netdev_priv(dev);
9535
9536         if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
9537                 if (data != 0)
9538                         return -EINVAL;
9539                 return 0;
9540         }
9541
9542         spin_lock_bh(&tp->lock);
9543         if (data)
9544                 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
9545         else
9546                 tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
9547         spin_unlock_bh(&tp->lock);
9548
9549         return 0;
9550 }
9551
9552 static int tg3_set_tx_csum(struct net_device *dev, u32 data)
9553 {
9554         struct tg3 *tp = netdev_priv(dev);
9555
9556         if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
9557                 if (data != 0)
9558                         return -EINVAL;
9559                 return 0;
9560         }
9561
9562         if (tp->tg3_flags3 & TG3_FLG3_5755_PLUS)
9563                 ethtool_op_set_tx_ipv6_csum(dev, data);
9564         else
9565                 ethtool_op_set_tx_csum(dev, data);
9566
9567         return 0;
9568 }
9569
9570 static int tg3_get_sset_count (struct net_device *dev, int sset)
9571 {
9572         switch (sset) {
9573         case ETH_SS_TEST:
9574                 return TG3_NUM_TEST;
9575         case ETH_SS_STATS:
9576                 return TG3_NUM_STATS;
9577         default:
9578                 return -EOPNOTSUPP;
9579         }
9580 }
9581
9582 static void tg3_get_strings (struct net_device *dev, u32 stringset, u8 *buf)
9583 {
9584         switch (stringset) {
9585         case ETH_SS_STATS:
9586                 memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
9587                 break;
9588         case ETH_SS_TEST:
9589                 memcpy(buf, &ethtool_test_keys, sizeof(ethtool_test_keys));
9590                 break;
9591         default:
9592                 WARN_ON(1);     /* we need a WARN() */
9593                 break;
9594         }
9595 }
9596
9597 static int tg3_phys_id(struct net_device *dev, u32 data)
9598 {
9599         struct tg3 *tp = netdev_priv(dev);
9600         int i;
9601
9602         if (!netif_running(tp->dev))
9603                 return -EAGAIN;
9604
9605         if (data == 0)
9606                 data = UINT_MAX / 2;
9607
9608         for (i = 0; i < (data * 2); i++) {
9609                 if ((i % 2) == 0)
9610                         tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
9611                                            LED_CTRL_1000MBPS_ON |
9612                                            LED_CTRL_100MBPS_ON |
9613                                            LED_CTRL_10MBPS_ON |
9614                                            LED_CTRL_TRAFFIC_OVERRIDE |
9615                                            LED_CTRL_TRAFFIC_BLINK |
9616                                            LED_CTRL_TRAFFIC_LED);
9617
9618                 else
9619                         tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
9620                                            LED_CTRL_TRAFFIC_OVERRIDE);
9621
9622                 if (msleep_interruptible(500))
9623                         break;
9624         }
9625         tw32(MAC_LED_CTRL, tp->led_ctrl);
9626         return 0;
9627 }
9628
9629 static void tg3_get_ethtool_stats (struct net_device *dev,
9630                                    struct ethtool_stats *estats, u64 *tmp_stats)
9631 {
9632         struct tg3 *tp = netdev_priv(dev);
9633         memcpy(tmp_stats, tg3_get_estats(tp), sizeof(tp->estats));
9634 }
9635
9636 #define NVRAM_TEST_SIZE 0x100
9637 #define NVRAM_SELFBOOT_FORMAT1_0_SIZE   0x14
9638 #define NVRAM_SELFBOOT_FORMAT1_2_SIZE   0x18
9639 #define NVRAM_SELFBOOT_FORMAT1_3_SIZE   0x1c
9640 #define NVRAM_SELFBOOT_HW_SIZE 0x20
9641 #define NVRAM_SELFBOOT_DATA_SIZE 0x1c
9642
9643 static int tg3_test_nvram(struct tg3 *tp)
9644 {
9645         u32 csum, magic;
9646         __le32 *buf;
9647         int i, j, k, err = 0, size;
9648
9649         if (tg3_nvram_read_swab(tp, 0, &magic) != 0)
9650                 return -EIO;
9651
9652         if (magic == TG3_EEPROM_MAGIC)
9653                 size = NVRAM_TEST_SIZE;
9654         else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) {
9655                 if ((magic & TG3_EEPROM_SB_FORMAT_MASK) ==
9656                     TG3_EEPROM_SB_FORMAT_1) {
9657                         switch (magic & TG3_EEPROM_SB_REVISION_MASK) {
9658                         case TG3_EEPROM_SB_REVISION_0:
9659                                 size = NVRAM_SELFBOOT_FORMAT1_0_SIZE;
9660                                 break;
9661                         case TG3_EEPROM_SB_REVISION_2:
9662                                 size = NVRAM_SELFBOOT_FORMAT1_2_SIZE;
9663                                 break;
9664                         case TG3_EEPROM_SB_REVISION_3:
9665                                 size = NVRAM_SELFBOOT_FORMAT1_3_SIZE;
9666                                 break;
9667                         default:
9668                                 return 0;
9669                         }
9670                 } else
9671                         return 0;
9672         } else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
9673                 size = NVRAM_SELFBOOT_HW_SIZE;
9674         else
9675                 return -EIO;
9676
9677         buf = kmalloc(size, GFP_KERNEL);
9678         if (buf == NULL)
9679                 return -ENOMEM;
9680
9681         err = -EIO;
9682         for (i = 0, j = 0; i < size; i += 4, j++) {
9683                 if ((err = tg3_nvram_read_le(tp, i, &buf[j])) != 0)
9684                         break;
9685         }
9686         if (i < size)
9687                 goto out;
9688
9689         /* Selfboot format */
9690         magic = swab32(le32_to_cpu(buf[0]));
9691         if ((magic & TG3_EEPROM_MAGIC_FW_MSK) ==
9692             TG3_EEPROM_MAGIC_FW) {
9693                 u8 *buf8 = (u8 *) buf, csum8 = 0;
9694
9695                 if ((magic & TG3_EEPROM_SB_REVISION_MASK) ==
9696                     TG3_EEPROM_SB_REVISION_2) {
9697                         /* For rev 2, the csum doesn't include the MBA. */
9698                         for (i = 0; i < TG3_EEPROM_SB_F1R2_MBA_OFF; i++)
9699                                 csum8 += buf8[i];
9700                         for (i = TG3_EEPROM_SB_F1R2_MBA_OFF + 4; i < size; i++)
9701                                 csum8 += buf8[i];
9702                 } else {
9703                         for (i = 0; i < size; i++)
9704                                 csum8 += buf8[i];
9705                 }
9706
9707                 if (csum8 == 0) {
9708                         err = 0;
9709                         goto out;
9710                 }
9711
9712                 err = -EIO;
9713                 goto out;
9714         }
9715
9716         if ((magic & TG3_EEPROM_MAGIC_HW_MSK) ==
9717             TG3_EEPROM_MAGIC_HW) {
9718                 u8 data[NVRAM_SELFBOOT_DATA_SIZE];
9719                 u8 parity[NVRAM_SELFBOOT_DATA_SIZE];
9720                 u8 *buf8 = (u8 *) buf;
9721
9722                 /* Separate the parity bits and the data bytes.  */
9723                 for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) {
9724                         if ((i == 0) || (i == 8)) {
9725                                 int l;
9726                                 u8 msk;
9727
9728                                 for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1)
9729                                         parity[k++] = buf8[i] & msk;
9730                                 i++;
9731                         }
9732                         else if (i == 16) {
9733                                 int l;
9734                                 u8 msk;
9735
9736                                 for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1)
9737                                         parity[k++] = buf8[i] & msk;
9738                                 i++;
9739
9740                                 for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1)
9741                                         parity[k++] = buf8[i] & msk;
9742                                 i++;
9743                         }
9744                         data[j++] = buf8[i];
9745                 }
9746
9747                 err = -EIO;
9748                 for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) {
9749                         u8 hw8 = hweight8(data[i]);
9750
9751                         if ((hw8 & 0x1) && parity[i])
9752                                 goto out;
9753                         else if (!(hw8 & 0x1) && !parity[i])
9754                                 goto out;
9755                 }
9756                 err = 0;
9757                 goto out;
9758         }
9759
9760         /* Bootstrap checksum at offset 0x10 */
9761         csum = calc_crc((unsigned char *) buf, 0x10);
9762         if(csum != le32_to_cpu(buf[0x10/4]))
9763                 goto out;
9764
9765         /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
9766         csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
9767         if (csum != le32_to_cpu(buf[0xfc/4]))
9768                  goto out;
9769
9770         err = 0;
9771
9772 out:
9773         kfree(buf);
9774         return err;
9775 }
9776
9777 #define TG3_SERDES_TIMEOUT_SEC  2
9778 #define TG3_COPPER_TIMEOUT_SEC  6
9779
9780 static int tg3_test_link(struct tg3 *tp)
9781 {
9782         int i, max;
9783
9784         if (!netif_running(tp->dev))
9785                 return -ENODEV;
9786
9787         if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
9788                 max = TG3_SERDES_TIMEOUT_SEC;
9789         else
9790                 max = TG3_COPPER_TIMEOUT_SEC;
9791
9792         for (i = 0; i < max; i++) {
9793                 if (netif_carrier_ok(tp->dev))
9794                         return 0;
9795
9796                 if (msleep_interruptible(1000))
9797                         break;
9798         }
9799
9800         return -EIO;
9801 }
9802
9803 /* Only test the commonly used registers */
9804 static int tg3_test_registers(struct tg3 *tp)
9805 {
9806         int i, is_5705, is_5750;
9807         u32 offset, read_mask, write_mask, val, save_val, read_val;
9808         static struct {
9809                 u16 offset;
9810                 u16 flags;
9811 #define TG3_FL_5705     0x1
9812 #define TG3_FL_NOT_5705 0x2
9813 #define TG3_FL_NOT_5788 0x4
9814 #define TG3_FL_NOT_5750 0x8
9815                 u32 read_mask;
9816                 u32 write_mask;
9817         } reg_tbl[] = {
9818                 /* MAC Control Registers */
9819                 { MAC_MODE, TG3_FL_NOT_5705,
9820                         0x00000000, 0x00ef6f8c },
9821                 { MAC_MODE, TG3_FL_5705,
9822                         0x00000000, 0x01ef6b8c },
9823                 { MAC_STATUS, TG3_FL_NOT_5705,
9824                         0x03800107, 0x00000000 },
9825                 { MAC_STATUS, TG3_FL_5705,
9826                         0x03800100, 0x00000000 },
9827                 { MAC_ADDR_0_HIGH, 0x0000,
9828                         0x00000000, 0x0000ffff },
9829                 { MAC_ADDR_0_LOW, 0x0000,
9830                         0x00000000, 0xffffffff },
9831                 { MAC_RX_MTU_SIZE, 0x0000,
9832                         0x00000000, 0x0000ffff },
9833                 { MAC_TX_MODE, 0x0000,
9834                         0x00000000, 0x00000070 },
9835                 { MAC_TX_LENGTHS, 0x0000,
9836                         0x00000000, 0x00003fff },
9837                 { MAC_RX_MODE, TG3_FL_NOT_5705,
9838                         0x00000000, 0x000007fc },
9839                 { MAC_RX_MODE, TG3_FL_5705,
9840                         0x00000000, 0x000007dc },
9841                 { MAC_HASH_REG_0, 0x0000,
9842                         0x00000000, 0xffffffff },
9843                 { MAC_HASH_REG_1, 0x0000,
9844                         0x00000000, 0xffffffff },
9845                 { MAC_HASH_REG_2, 0x0000,
9846                         0x00000000, 0xffffffff },
9847                 { MAC_HASH_REG_3, 0x0000,
9848                         0x00000000, 0xffffffff },
9849
9850                 /* Receive Data and Receive BD Initiator Control Registers. */
9851                 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
9852                         0x00000000, 0xffffffff },
9853                 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
9854                         0x00000000, 0xffffffff },
9855                 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
9856                         0x00000000, 0x00000003 },
9857                 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
9858                         0x00000000, 0xffffffff },
9859                 { RCVDBDI_STD_BD+0, 0x0000,
9860                         0x00000000, 0xffffffff },
9861                 { RCVDBDI_STD_BD+4, 0x0000,
9862                         0x00000000, 0xffffffff },
9863                 { RCVDBDI_STD_BD+8, 0x0000,
9864                         0x00000000, 0xffff0002 },
9865                 { RCVDBDI_STD_BD+0xc, 0x0000,
9866                         0x00000000, 0xffffffff },
9867
9868                 /* Receive BD Initiator Control Registers. */
9869                 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
9870                         0x00000000, 0xffffffff },
9871                 { RCVBDI_STD_THRESH, TG3_FL_5705,
9872                         0x00000000, 0x000003ff },
9873                 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
9874                         0x00000000, 0xffffffff },
9875
9876                 /* Host Coalescing Control Registers. */
9877                 { HOSTCC_MODE, TG3_FL_NOT_5705,
9878                         0x00000000, 0x00000004 },
9879                 { HOSTCC_MODE, TG3_FL_5705,
9880                         0x00000000, 0x000000f6 },
9881                 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
9882                         0x00000000, 0xffffffff },
9883                 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
9884                         0x00000000, 0x000003ff },
9885                 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
9886                         0x00000000, 0xffffffff },
9887                 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
9888                         0x00000000, 0x000003ff },
9889                 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
9890                         0x00000000, 0xffffffff },
9891                 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
9892                         0x00000000, 0x000000ff },
9893                 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
9894                         0x00000000, 0xffffffff },
9895                 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
9896                         0x00000000, 0x000000ff },
9897                 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
9898                         0x00000000, 0xffffffff },
9899                 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
9900                         0x00000000, 0xffffffff },
9901                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
9902                         0x00000000, 0xffffffff },
9903                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
9904                         0x00000000, 0x000000ff },
9905                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
9906                         0x00000000, 0xffffffff },
9907                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
9908                         0x00000000, 0x000000ff },
9909                 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
9910                         0x00000000, 0xffffffff },
9911                 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
9912                         0x00000000, 0xffffffff },
9913                 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
9914                         0x00000000, 0xffffffff },
9915                 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
9916                         0x00000000, 0xffffffff },
9917                 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
9918                         0x00000000, 0xffffffff },
9919                 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
9920                         0xffffffff, 0x00000000 },
9921                 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
9922                         0xffffffff, 0x00000000 },
9923
9924                 /* Buffer Manager Control Registers. */
9925                 { BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750,
9926                         0x00000000, 0x007fff80 },
9927                 { BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750,
9928                         0x00000000, 0x007fffff },
9929                 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
9930                         0x00000000, 0x0000003f },
9931                 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
9932                         0x00000000, 0x000001ff },
9933                 { BUFMGR_MB_HIGH_WATER, 0x0000,
9934                         0x00000000, 0x000001ff },
9935                 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
9936                         0xffffffff, 0x00000000 },
9937                 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
9938                         0xffffffff, 0x00000000 },
9939
9940                 /* Mailbox Registers */
9941                 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
9942                         0x00000000, 0x000001ff },
9943                 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
9944                         0x00000000, 0x000001ff },
9945                 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
9946                         0x00000000, 0x000007ff },
9947                 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
9948                         0x00000000, 0x000001ff },
9949
9950                 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
9951         };
9952
9953         is_5705 = is_5750 = 0;
9954         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
9955                 is_5705 = 1;
9956                 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
9957                         is_5750 = 1;
9958         }
9959
9960         for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
9961                 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
9962                         continue;
9963
9964                 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
9965                         continue;
9966
9967                 if ((tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
9968                     (reg_tbl[i].flags & TG3_FL_NOT_5788))
9969                         continue;
9970
9971                 if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750))
9972                         continue;
9973
9974                 offset = (u32) reg_tbl[i].offset;
9975                 read_mask = reg_tbl[i].read_mask;
9976                 write_mask = reg_tbl[i].write_mask;
9977
9978                 /* Save the original register content */
9979                 save_val = tr32(offset);
9980
9981                 /* Determine the read-only value. */
9982                 read_val = save_val & read_mask;
9983
9984                 /* Write zero to the register, then make sure the read-only bits
9985                  * are not changed and the read/write bits are all zeros.
9986                  */
9987                 tw32(offset, 0);
9988
9989                 val = tr32(offset);
9990
9991                 /* Test the read-only and read/write bits. */
9992                 if (((val & read_mask) != read_val) || (val & write_mask))
9993                         goto out;
9994
9995                 /* Write ones to all the bits defined by RdMask and WrMask, then
9996                  * make sure the read-only bits are not changed and the
9997                  * read/write bits are all ones.
9998                  */
9999                 tw32(offset, read_mask | write_mask);
10000
10001                 val = tr32(offset);
10002
10003                 /* Test the read-only bits. */
10004                 if ((val & read_mask) != read_val)
10005                         goto out;
10006
10007                 /* Test the read/write bits. */
10008                 if ((val & write_mask) != write_mask)
10009                         goto out;
10010
10011                 tw32(offset, save_val);
10012         }
10013
10014         return 0;
10015
10016 out:
10017         if (netif_msg_hw(tp))
10018                 printk(KERN_ERR PFX "Register test failed at offset %x\n",
10019                        offset);
10020         tw32(offset, save_val);
10021         return -EIO;
10022 }
10023
10024 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
10025 {
10026         static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
10027         int i;
10028         u32 j;
10029
10030         for (i = 0; i < ARRAY_SIZE(test_pattern); i++) {
10031                 for (j = 0; j < len; j += 4) {
10032                         u32 val;
10033
10034                         tg3_write_mem(tp, offset + j, test_pattern[i]);
10035                         tg3_read_mem(tp, offset + j, &val);
10036                         if (val != test_pattern[i])
10037                                 return -EIO;
10038                 }
10039         }
10040         return 0;
10041 }
10042
10043 static int tg3_test_memory(struct tg3 *tp)
10044 {
10045         static struct mem_entry {
10046                 u32 offset;
10047                 u32 len;
10048         } mem_tbl_570x[] = {
10049                 { 0x00000000, 0x00b50},
10050                 { 0x00002000, 0x1c000},
10051                 { 0xffffffff, 0x00000}
10052         }, mem_tbl_5705[] = {
10053                 { 0x00000100, 0x0000c},
10054                 { 0x00000200, 0x00008},
10055                 { 0x00004000, 0x00800},
10056                 { 0x00006000, 0x01000},
10057                 { 0x00008000, 0x02000},
10058                 { 0x00010000, 0x0e000},
10059                 { 0xffffffff, 0x00000}
10060         }, mem_tbl_5755[] = {
10061                 { 0x00000200, 0x00008},
10062                 { 0x00004000, 0x00800},
10063                 { 0x00006000, 0x00800},
10064                 { 0x00008000, 0x02000},
10065                 { 0x00010000, 0x0c000},
10066                 { 0xffffffff, 0x00000}
10067         }, mem_tbl_5906[] = {
10068                 { 0x00000200, 0x00008},
10069                 { 0x00004000, 0x00400},
10070                 { 0x00006000, 0x00400},
10071                 { 0x00008000, 0x01000},
10072                 { 0x00010000, 0x01000},
10073                 { 0xffffffff, 0x00000}
10074         };
10075         struct mem_entry *mem_tbl;
10076         int err = 0;
10077         int i;
10078
10079         if (tp->tg3_flags3 & TG3_FLG3_5755_PLUS)
10080                 mem_tbl = mem_tbl_5755;
10081         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
10082                 mem_tbl = mem_tbl_5906;
10083         else if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
10084                 mem_tbl = mem_tbl_5705;
10085         else
10086                 mem_tbl = mem_tbl_570x;
10087
10088         for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
10089                 if ((err = tg3_do_mem_test(tp, mem_tbl[i].offset,
10090                     mem_tbl[i].len)) != 0)
10091                         break;
10092         }
10093
10094         return err;
10095 }
10096
10097 #define TG3_MAC_LOOPBACK        0
10098 #define TG3_PHY_LOOPBACK        1
10099
10100 static int tg3_run_loopback(struct tg3 *tp, int loopback_mode)
10101 {
10102         u32 mac_mode, rx_start_idx, rx_idx, tx_idx, opaque_key;
10103         u32 desc_idx;
10104         struct sk_buff *skb, *rx_skb;
10105         u8 *tx_data;
10106         dma_addr_t map;
10107         int num_pkts, tx_len, rx_len, i, err;
10108         struct tg3_rx_buffer_desc *desc;
10109
10110         if (loopback_mode == TG3_MAC_LOOPBACK) {
10111                 /* HW errata - mac loopback fails in some cases on 5780.
10112                  * Normal traffic and PHY loopback are not affected by
10113                  * errata.
10114                  */
10115                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780)
10116                         return 0;
10117
10118                 mac_mode = (tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK) |
10119                            MAC_MODE_PORT_INT_LPBACK;
10120                 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
10121                         mac_mode |= MAC_MODE_LINK_POLARITY;
10122                 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
10123                         mac_mode |= MAC_MODE_PORT_MODE_MII;
10124                 else
10125                         mac_mode |= MAC_MODE_PORT_MODE_GMII;
10126                 tw32(MAC_MODE, mac_mode);
10127         } else if (loopback_mode == TG3_PHY_LOOPBACK) {
10128                 u32 val;
10129
10130                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
10131                         u32 phytest;
10132
10133                         if (!tg3_readphy(tp, MII_TG3_EPHY_TEST, &phytest)) {
10134                                 u32 phy;
10135
10136                                 tg3_writephy(tp, MII_TG3_EPHY_TEST,
10137                                              phytest | MII_TG3_EPHY_SHADOW_EN);
10138                                 if (!tg3_readphy(tp, 0x1b, &phy))
10139                                         tg3_writephy(tp, 0x1b, phy & ~0x20);
10140                                 tg3_writephy(tp, MII_TG3_EPHY_TEST, phytest);
10141                         }
10142                         val = BMCR_LOOPBACK | BMCR_FULLDPLX | BMCR_SPEED100;
10143                 } else
10144                         val = BMCR_LOOPBACK | BMCR_FULLDPLX | BMCR_SPEED1000;
10145
10146                 tg3_phy_toggle_automdix(tp, 0);
10147
10148                 tg3_writephy(tp, MII_BMCR, val);
10149                 udelay(40);
10150
10151                 mac_mode = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
10152                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
10153                         tg3_writephy(tp, MII_TG3_EPHY_PTEST, 0x1800);
10154                         mac_mode |= MAC_MODE_PORT_MODE_MII;
10155                 } else
10156                         mac_mode |= MAC_MODE_PORT_MODE_GMII;
10157
10158                 /* reset to prevent losing 1st rx packet intermittently */
10159                 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
10160                         tw32_f(MAC_RX_MODE, RX_MODE_RESET);
10161                         udelay(10);
10162                         tw32_f(MAC_RX_MODE, tp->rx_mode);
10163                 }
10164                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
10165                         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401)
10166                                 mac_mode &= ~MAC_MODE_LINK_POLARITY;
10167                         else if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411)
10168                                 mac_mode |= MAC_MODE_LINK_POLARITY;
10169                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
10170                                      MII_TG3_EXT_CTRL_LNK3_LED_MODE);
10171                 }
10172                 tw32(MAC_MODE, mac_mode);
10173         }
10174         else
10175                 return -EINVAL;
10176
10177         err = -EIO;
10178
10179         tx_len = 1514;
10180         skb = netdev_alloc_skb(tp->dev, tx_len);
10181         if (!skb)
10182                 return -ENOMEM;
10183
10184         tx_data = skb_put(skb, tx_len);
10185         memcpy(tx_data, tp->dev->dev_addr, 6);
10186         memset(tx_data + 6, 0x0, 8);
10187
10188         tw32(MAC_RX_MTU_SIZE, tx_len + 4);
10189
10190         for (i = 14; i < tx_len; i++)
10191                 tx_data[i] = (u8) (i & 0xff);
10192
10193         map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
10194
10195         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
10196              HOSTCC_MODE_NOW);
10197
10198         udelay(10);
10199
10200         rx_start_idx = tp->hw_status->idx[0].rx_producer;
10201
10202         num_pkts = 0;
10203
10204         tg3_set_txd(tp, tp->tx_prod, map, tx_len, 0, 1);
10205
10206         tp->tx_prod++;
10207         num_pkts++;
10208
10209         tw32_tx_mbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW,
10210                      tp->tx_prod);
10211         tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW);
10212
10213         udelay(10);
10214
10215         /* 250 usec to allow enough time on some 10/100 Mbps devices.  */
10216         for (i = 0; i < 25; i++) {
10217                 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
10218                        HOSTCC_MODE_NOW);
10219
10220                 udelay(10);
10221
10222                 tx_idx = tp->hw_status->idx[0].tx_consumer;
10223                 rx_idx = tp->hw_status->idx[0].rx_producer;
10224                 if ((tx_idx == tp->tx_prod) &&
10225                     (rx_idx == (rx_start_idx + num_pkts)))
10226                         break;
10227         }
10228
10229         pci_unmap_single(tp->pdev, map, tx_len, PCI_DMA_TODEVICE);
10230         dev_kfree_skb(skb);
10231
10232         if (tx_idx != tp->tx_prod)
10233                 goto out;
10234
10235         if (rx_idx != rx_start_idx + num_pkts)
10236                 goto out;
10237
10238         desc = &tp->rx_rcb[rx_start_idx];
10239         desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
10240         opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
10241         if (opaque_key != RXD_OPAQUE_RING_STD)
10242                 goto out;
10243
10244         if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
10245             (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
10246                 goto out;
10247
10248         rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4;
10249         if (rx_len != tx_len)
10250                 goto out;
10251
10252         rx_skb = tp->rx_std_buffers[desc_idx].skb;
10253
10254         map = pci_unmap_addr(&tp->rx_std_buffers[desc_idx], mapping);
10255         pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len, PCI_DMA_FROMDEVICE);
10256
10257         for (i = 14; i < tx_len; i++) {
10258                 if (*(rx_skb->data + i) != (u8) (i & 0xff))
10259                         goto out;
10260         }
10261         err = 0;
10262
10263         /* tg3_free_rings will unmap and free the rx_skb */
10264 out:
10265         return err;
10266 }
10267
10268 #define TG3_MAC_LOOPBACK_FAILED         1
10269 #define TG3_PHY_LOOPBACK_FAILED         2
10270 #define TG3_LOOPBACK_FAILED             (TG3_MAC_LOOPBACK_FAILED |      \
10271                                          TG3_PHY_LOOPBACK_FAILED)
10272
10273 static int tg3_test_loopback(struct tg3 *tp)
10274 {
10275         int err = 0;
10276         u32 cpmuctrl = 0;
10277
10278         if (!netif_running(tp->dev))
10279                 return TG3_LOOPBACK_FAILED;
10280
10281         err = tg3_reset_hw(tp, 1);
10282         if (err)
10283                 return TG3_LOOPBACK_FAILED;
10284
10285         /* Turn off gphy autopowerdown. */
10286         if (tp->tg3_flags3 & TG3_FLG3_PHY_ENABLE_APD)
10287                 tg3_phy_toggle_apd(tp, false);
10288
10289         if (tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) {
10290                 int i;
10291                 u32 status;
10292
10293                 tw32(TG3_CPMU_MUTEX_REQ, CPMU_MUTEX_REQ_DRIVER);
10294
10295                 /* Wait for up to 40 microseconds to acquire lock. */
10296                 for (i = 0; i < 4; i++) {
10297                         status = tr32(TG3_CPMU_MUTEX_GNT);
10298                         if (status == CPMU_MUTEX_GNT_DRIVER)
10299                                 break;
10300                         udelay(10);
10301                 }
10302
10303                 if (status != CPMU_MUTEX_GNT_DRIVER)
10304                         return TG3_LOOPBACK_FAILED;
10305
10306                 /* Turn off link-based power management. */
10307                 cpmuctrl = tr32(TG3_CPMU_CTRL);
10308                 tw32(TG3_CPMU_CTRL,
10309                      cpmuctrl & ~(CPMU_CTRL_LINK_SPEED_MODE |
10310                                   CPMU_CTRL_LINK_AWARE_MODE));
10311         }
10312
10313         if (tg3_run_loopback(tp, TG3_MAC_LOOPBACK))
10314                 err |= TG3_MAC_LOOPBACK_FAILED;
10315
10316         if (tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) {
10317                 tw32(TG3_CPMU_CTRL, cpmuctrl);
10318
10319                 /* Release the mutex */
10320                 tw32(TG3_CPMU_MUTEX_GNT, CPMU_MUTEX_GNT_DRIVER);
10321         }
10322
10323         if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
10324             !(tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)) {
10325                 if (tg3_run_loopback(tp, TG3_PHY_LOOPBACK))
10326                         err |= TG3_PHY_LOOPBACK_FAILED;
10327         }
10328
10329         /* Re-enable gphy autopowerdown. */
10330         if (tp->tg3_flags3 & TG3_FLG3_PHY_ENABLE_APD)
10331                 tg3_phy_toggle_apd(tp, true);
10332
10333         return err;
10334 }
10335
10336 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
10337                           u64 *data)
10338 {
10339         struct tg3 *tp = netdev_priv(dev);
10340
10341         if (tp->link_config.phy_is_low_power)
10342                 tg3_set_power_state(tp, PCI_D0);
10343
10344         memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
10345
10346         if (tg3_test_nvram(tp) != 0) {
10347                 etest->flags |= ETH_TEST_FL_FAILED;
10348                 data[0] = 1;
10349         }
10350         if (tg3_test_link(tp) != 0) {
10351                 etest->flags |= ETH_TEST_FL_FAILED;
10352                 data[1] = 1;
10353         }
10354         if (etest->flags & ETH_TEST_FL_OFFLINE) {
10355                 int err, err2 = 0, irq_sync = 0;
10356
10357                 if (netif_running(dev)) {
10358                         tg3_phy_stop(tp);
10359                         tg3_netif_stop(tp);
10360                         irq_sync = 1;
10361                 }
10362
10363                 tg3_full_lock(tp, irq_sync);
10364
10365                 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
10366                 err = tg3_nvram_lock(tp);
10367                 tg3_halt_cpu(tp, RX_CPU_BASE);
10368                 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
10369                         tg3_halt_cpu(tp, TX_CPU_BASE);
10370                 if (!err)
10371                         tg3_nvram_unlock(tp);
10372
10373                 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
10374                         tg3_phy_reset(tp);
10375
10376                 if (tg3_test_registers(tp) != 0) {
10377                         etest->flags |= ETH_TEST_FL_FAILED;
10378                         data[2] = 1;
10379                 }
10380                 if (tg3_test_memory(tp) != 0) {
10381                         etest->flags |= ETH_TEST_FL_FAILED;
10382                         data[3] = 1;
10383                 }
10384                 if ((data[4] = tg3_test_loopback(tp)) != 0)
10385                         etest->flags |= ETH_TEST_FL_FAILED;
10386
10387                 tg3_full_unlock(tp);
10388
10389                 if (tg3_test_interrupt(tp) != 0) {
10390                         etest->flags |= ETH_TEST_FL_FAILED;
10391                         data[5] = 1;
10392                 }
10393
10394                 tg3_full_lock(tp, 0);
10395
10396                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10397                 if (netif_running(dev)) {
10398                         tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
10399                         err2 = tg3_restart_hw(tp, 1);
10400                         if (!err2)
10401                                 tg3_netif_start(tp);
10402                 }
10403
10404                 tg3_full_unlock(tp);
10405
10406                 if (irq_sync && !err2)
10407                         tg3_phy_start(tp);
10408         }
10409         if (tp->link_config.phy_is_low_power)
10410                 tg3_set_power_state(tp, PCI_D3hot);
10411
10412 }
10413
10414 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
10415 {
10416         struct mii_ioctl_data *data = if_mii(ifr);
10417         struct tg3 *tp = netdev_priv(dev);
10418         int err;
10419
10420         if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
10421                 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
10422                         return -EAGAIN;
10423                 return phy_mii_ioctl(tp->mdio_bus->phy_map[PHY_ADDR], data, cmd);
10424         }
10425
10426         switch(cmd) {
10427         case SIOCGMIIPHY:
10428                 data->phy_id = PHY_ADDR;
10429
10430                 /* fallthru */
10431         case SIOCGMIIREG: {
10432                 u32 mii_regval;
10433
10434                 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
10435                         break;                  /* We have no PHY */
10436
10437                 if (tp->link_config.phy_is_low_power)
10438                         return -EAGAIN;
10439
10440                 spin_lock_bh(&tp->lock);
10441                 err = tg3_readphy(tp, data->reg_num & 0x1f, &mii_regval);
10442                 spin_unlock_bh(&tp->lock);
10443
10444                 data->val_out = mii_regval;
10445
10446                 return err;
10447         }
10448
10449         case SIOCSMIIREG:
10450                 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
10451                         break;                  /* We have no PHY */
10452
10453                 if (!capable(CAP_NET_ADMIN))
10454                         return -EPERM;
10455
10456                 if (tp->link_config.phy_is_low_power)
10457                         return -EAGAIN;
10458
10459                 spin_lock_bh(&tp->lock);
10460                 err = tg3_writephy(tp, data->reg_num & 0x1f, data->val_in);
10461                 spin_unlock_bh(&tp->lock);
10462
10463                 return err;
10464
10465         default:
10466                 /* do nothing */
10467                 break;
10468         }
10469         return -EOPNOTSUPP;
10470 }
10471
10472 #if TG3_VLAN_TAG_USED
10473 static void tg3_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
10474 {
10475         struct tg3 *tp = netdev_priv(dev);
10476
10477         if (netif_running(dev))
10478                 tg3_netif_stop(tp);
10479
10480         tg3_full_lock(tp, 0);
10481
10482         tp->vlgrp = grp;
10483
10484         /* Update RX_MODE_KEEP_VLAN_TAG bit in RX_MODE register. */
10485         __tg3_set_rx_mode(dev);
10486
10487         if (netif_running(dev))
10488                 tg3_netif_start(tp);
10489
10490         tg3_full_unlock(tp);
10491 }
10492 #endif
10493
10494 static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
10495 {
10496         struct tg3 *tp = netdev_priv(dev);
10497
10498         memcpy(ec, &tp->coal, sizeof(*ec));
10499         return 0;
10500 }
10501
10502 static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
10503 {
10504         struct tg3 *tp = netdev_priv(dev);
10505         u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
10506         u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
10507
10508         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
10509                 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
10510                 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
10511                 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
10512                 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
10513         }
10514
10515         if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
10516             (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
10517             (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
10518             (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
10519             (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
10520             (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
10521             (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
10522             (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
10523             (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
10524             (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
10525                 return -EINVAL;
10526
10527         /* No rx interrupts will be generated if both are zero */
10528         if ((ec->rx_coalesce_usecs == 0) &&
10529             (ec->rx_max_coalesced_frames == 0))
10530                 return -EINVAL;
10531
10532         /* No tx interrupts will be generated if both are zero */
10533         if ((ec->tx_coalesce_usecs == 0) &&
10534             (ec->tx_max_coalesced_frames == 0))
10535                 return -EINVAL;
10536
10537         /* Only copy relevant parameters, ignore all others. */
10538         tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
10539         tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
10540         tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
10541         tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
10542         tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
10543         tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
10544         tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
10545         tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
10546         tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
10547
10548         if (netif_running(dev)) {
10549                 tg3_full_lock(tp, 0);
10550                 __tg3_set_coalesce(tp, &tp->coal);
10551                 tg3_full_unlock(tp);
10552         }
10553         return 0;
10554 }
10555
10556 static const struct ethtool_ops tg3_ethtool_ops = {
10557         .get_settings           = tg3_get_settings,
10558         .set_settings           = tg3_set_settings,
10559         .get_drvinfo            = tg3_get_drvinfo,
10560         .get_regs_len           = tg3_get_regs_len,
10561         .get_regs               = tg3_get_regs,
10562         .get_wol                = tg3_get_wol,
10563         .set_wol                = tg3_set_wol,
10564         .get_msglevel           = tg3_get_msglevel,
10565         .set_msglevel           = tg3_set_msglevel,
10566         .nway_reset             = tg3_nway_reset,
10567         .get_link               = ethtool_op_get_link,
10568         .get_eeprom_len         = tg3_get_eeprom_len,
10569         .get_eeprom             = tg3_get_eeprom,
10570         .set_eeprom             = tg3_set_eeprom,
10571         .get_ringparam          = tg3_get_ringparam,
10572         .set_ringparam          = tg3_set_ringparam,
10573         .get_pauseparam         = tg3_get_pauseparam,
10574         .set_pauseparam         = tg3_set_pauseparam,
10575         .get_rx_csum            = tg3_get_rx_csum,
10576         .set_rx_csum            = tg3_set_rx_csum,
10577         .set_tx_csum            = tg3_set_tx_csum,
10578         .set_sg                 = ethtool_op_set_sg,
10579         .set_tso                = tg3_set_tso,
10580         .self_test              = tg3_self_test,
10581         .get_strings            = tg3_get_strings,
10582         .phys_id                = tg3_phys_id,
10583         .get_ethtool_stats      = tg3_get_ethtool_stats,
10584         .get_coalesce           = tg3_get_coalesce,
10585         .set_coalesce           = tg3_set_coalesce,
10586         .get_sset_count         = tg3_get_sset_count,
10587 };
10588
10589 static void __devinit tg3_get_eeprom_size(struct tg3 *tp)
10590 {
10591         u32 cursize, val, magic;
10592
10593         tp->nvram_size = EEPROM_CHIP_SIZE;
10594
10595         if (tg3_nvram_read_swab(tp, 0, &magic) != 0)
10596                 return;
10597
10598         if ((magic != TG3_EEPROM_MAGIC) &&
10599             ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) &&
10600             ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW))
10601                 return;
10602
10603         /*
10604          * Size the chip by reading offsets at increasing powers of two.
10605          * When we encounter our validation signature, we know the addressing
10606          * has wrapped around, and thus have our chip size.
10607          */
10608         cursize = 0x10;
10609
10610         while (cursize < tp->nvram_size) {
10611                 if (tg3_nvram_read_swab(tp, cursize, &val) != 0)
10612                         return;
10613
10614                 if (val == magic)
10615                         break;
10616
10617                 cursize <<= 1;
10618         }
10619
10620         tp->nvram_size = cursize;
10621 }
10622
10623 static void __devinit tg3_get_nvram_size(struct tg3 *tp)
10624 {
10625         u32 val;
10626
10627         if (tg3_nvram_read_swab(tp, 0, &val) != 0)
10628                 return;
10629
10630         /* Selfboot format */
10631         if (val != TG3_EEPROM_MAGIC) {
10632                 tg3_get_eeprom_size(tp);
10633                 return;
10634         }
10635
10636         if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
10637                 if (val != 0) {
10638                         tp->nvram_size = (val >> 16) * 1024;
10639                         return;
10640                 }
10641         }
10642         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
10643 }
10644
10645 static void __devinit tg3_get_nvram_info(struct tg3 *tp)
10646 {
10647         u32 nvcfg1;
10648
10649         nvcfg1 = tr32(NVRAM_CFG1);
10650         if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
10651                 tp->tg3_flags2 |= TG3_FLG2_FLASH;
10652         }
10653         else {
10654                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
10655                 tw32(NVRAM_CFG1, nvcfg1);
10656         }
10657
10658         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) ||
10659             (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
10660                 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
10661                         case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
10662                                 tp->nvram_jedecnum = JEDEC_ATMEL;
10663                                 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
10664                                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10665                                 break;
10666                         case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
10667                                 tp->nvram_jedecnum = JEDEC_ATMEL;
10668                                 tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
10669                                 break;
10670                         case FLASH_VENDOR_ATMEL_EEPROM:
10671                                 tp->nvram_jedecnum = JEDEC_ATMEL;
10672                                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
10673                                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10674                                 break;
10675                         case FLASH_VENDOR_ST:
10676                                 tp->nvram_jedecnum = JEDEC_ST;
10677                                 tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
10678                                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10679                                 break;
10680                         case FLASH_VENDOR_SAIFUN:
10681                                 tp->nvram_jedecnum = JEDEC_SAIFUN;
10682                                 tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
10683                                 break;
10684                         case FLASH_VENDOR_SST_SMALL:
10685                         case FLASH_VENDOR_SST_LARGE:
10686                                 tp->nvram_jedecnum = JEDEC_SST;
10687                                 tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
10688                                 break;
10689                 }
10690         }
10691         else {
10692                 tp->nvram_jedecnum = JEDEC_ATMEL;
10693                 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
10694                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10695         }
10696 }
10697
10698 static void __devinit tg3_get_5752_nvram_info(struct tg3 *tp)
10699 {
10700         u32 nvcfg1;
10701
10702         nvcfg1 = tr32(NVRAM_CFG1);
10703
10704         /* NVRAM protection for TPM */
10705         if (nvcfg1 & (1 << 27))
10706                 tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM;
10707
10708         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
10709                 case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
10710                 case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
10711                         tp->nvram_jedecnum = JEDEC_ATMEL;
10712                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10713                         break;
10714                 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
10715                         tp->nvram_jedecnum = JEDEC_ATMEL;
10716                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10717                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
10718                         break;
10719                 case FLASH_5752VENDOR_ST_M45PE10:
10720                 case FLASH_5752VENDOR_ST_M45PE20:
10721                 case FLASH_5752VENDOR_ST_M45PE40:
10722                         tp->nvram_jedecnum = JEDEC_ST;
10723                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10724                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
10725                         break;
10726         }
10727
10728         if (tp->tg3_flags2 & TG3_FLG2_FLASH) {
10729                 switch (nvcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
10730                         case FLASH_5752PAGE_SIZE_256:
10731                                 tp->nvram_pagesize = 256;
10732                                 break;
10733                         case FLASH_5752PAGE_SIZE_512:
10734                                 tp->nvram_pagesize = 512;
10735                                 break;
10736                         case FLASH_5752PAGE_SIZE_1K:
10737                                 tp->nvram_pagesize = 1024;
10738                                 break;
10739                         case FLASH_5752PAGE_SIZE_2K:
10740                                 tp->nvram_pagesize = 2048;
10741                                 break;
10742                         case FLASH_5752PAGE_SIZE_4K:
10743                                 tp->nvram_pagesize = 4096;
10744                                 break;
10745                         case FLASH_5752PAGE_SIZE_264:
10746                                 tp->nvram_pagesize = 264;
10747                                 break;
10748                 }
10749         }
10750         else {
10751                 /* For eeprom, set pagesize to maximum eeprom size */
10752                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
10753
10754                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
10755                 tw32(NVRAM_CFG1, nvcfg1);
10756         }
10757 }
10758
10759 static void __devinit tg3_get_5755_nvram_info(struct tg3 *tp)
10760 {
10761         u32 nvcfg1, protect = 0;
10762
10763         nvcfg1 = tr32(NVRAM_CFG1);
10764
10765         /* NVRAM protection for TPM */
10766         if (nvcfg1 & (1 << 27)) {
10767                 tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM;
10768                 protect = 1;
10769         }
10770
10771         nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
10772         switch (nvcfg1) {
10773                 case FLASH_5755VENDOR_ATMEL_FLASH_1:
10774                 case FLASH_5755VENDOR_ATMEL_FLASH_2:
10775                 case FLASH_5755VENDOR_ATMEL_FLASH_3:
10776                 case FLASH_5755VENDOR_ATMEL_FLASH_5:
10777                         tp->nvram_jedecnum = JEDEC_ATMEL;
10778                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10779                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
10780                         tp->nvram_pagesize = 264;
10781                         if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 ||
10782                             nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5)
10783                                 tp->nvram_size = (protect ? 0x3e200 :
10784                                                   TG3_NVRAM_SIZE_512KB);
10785                         else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2)
10786                                 tp->nvram_size = (protect ? 0x1f200 :
10787                                                   TG3_NVRAM_SIZE_256KB);
10788                         else
10789                                 tp->nvram_size = (protect ? 0x1f200 :
10790                                                   TG3_NVRAM_SIZE_128KB);
10791                         break;
10792                 case FLASH_5752VENDOR_ST_M45PE10:
10793                 case FLASH_5752VENDOR_ST_M45PE20:
10794                 case FLASH_5752VENDOR_ST_M45PE40:
10795                         tp->nvram_jedecnum = JEDEC_ST;
10796                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10797                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
10798                         tp->nvram_pagesize = 256;
10799                         if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10)
10800                                 tp->nvram_size = (protect ?
10801                                                   TG3_NVRAM_SIZE_64KB :
10802                                                   TG3_NVRAM_SIZE_128KB);
10803                         else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20)
10804                                 tp->nvram_size = (protect ?
10805                                                   TG3_NVRAM_SIZE_64KB :
10806                                                   TG3_NVRAM_SIZE_256KB);
10807                         else
10808                                 tp->nvram_size = (protect ?
10809                                                   TG3_NVRAM_SIZE_128KB :
10810                                                   TG3_NVRAM_SIZE_512KB);
10811                         break;
10812         }
10813 }
10814
10815 static void __devinit tg3_get_5787_nvram_info(struct tg3 *tp)
10816 {
10817         u32 nvcfg1;
10818
10819         nvcfg1 = tr32(NVRAM_CFG1);
10820
10821         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
10822                 case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
10823                 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
10824                 case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
10825                 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
10826                         tp->nvram_jedecnum = JEDEC_ATMEL;
10827                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10828                         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
10829
10830                         nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
10831                         tw32(NVRAM_CFG1, nvcfg1);
10832                         break;
10833                 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
10834                 case FLASH_5755VENDOR_ATMEL_FLASH_1:
10835                 case FLASH_5755VENDOR_ATMEL_FLASH_2:
10836                 case FLASH_5755VENDOR_ATMEL_FLASH_3:
10837                         tp->nvram_jedecnum = JEDEC_ATMEL;
10838                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10839                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
10840                         tp->nvram_pagesize = 264;
10841                         break;
10842                 case FLASH_5752VENDOR_ST_M45PE10:
10843                 case FLASH_5752VENDOR_ST_M45PE20:
10844                 case FLASH_5752VENDOR_ST_M45PE40:
10845                         tp->nvram_jedecnum = JEDEC_ST;
10846                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10847                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
10848                         tp->nvram_pagesize = 256;
10849                         break;
10850         }
10851 }
10852
10853 static void __devinit tg3_get_5761_nvram_info(struct tg3 *tp)
10854 {
10855         u32 nvcfg1, protect = 0;
10856
10857         nvcfg1 = tr32(NVRAM_CFG1);
10858
10859         /* NVRAM protection for TPM */
10860         if (nvcfg1 & (1 << 27)) {
10861                 tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM;
10862                 protect = 1;
10863         }
10864
10865         nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
10866         switch (nvcfg1) {
10867                 case FLASH_5761VENDOR_ATMEL_ADB021D:
10868                 case FLASH_5761VENDOR_ATMEL_ADB041D:
10869                 case FLASH_5761VENDOR_ATMEL_ADB081D:
10870                 case FLASH_5761VENDOR_ATMEL_ADB161D:
10871                 case FLASH_5761VENDOR_ATMEL_MDB021D:
10872                 case FLASH_5761VENDOR_ATMEL_MDB041D:
10873                 case FLASH_5761VENDOR_ATMEL_MDB081D:
10874                 case FLASH_5761VENDOR_ATMEL_MDB161D:
10875                         tp->nvram_jedecnum = JEDEC_ATMEL;
10876                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10877                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
10878                         tp->tg3_flags3 |= TG3_FLG3_NO_NVRAM_ADDR_TRANS;
10879                         tp->nvram_pagesize = 256;
10880                         break;
10881                 case FLASH_5761VENDOR_ST_A_M45PE20:
10882                 case FLASH_5761VENDOR_ST_A_M45PE40:
10883                 case FLASH_5761VENDOR_ST_A_M45PE80:
10884                 case FLASH_5761VENDOR_ST_A_M45PE16:
10885                 case FLASH_5761VENDOR_ST_M_M45PE20:
10886                 case FLASH_5761VENDOR_ST_M_M45PE40:
10887                 case FLASH_5761VENDOR_ST_M_M45PE80:
10888                 case FLASH_5761VENDOR_ST_M_M45PE16:
10889                         tp->nvram_jedecnum = JEDEC_ST;
10890                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10891                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
10892                         tp->nvram_pagesize = 256;
10893                         break;
10894         }
10895
10896         if (protect) {
10897                 tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT);
10898         } else {
10899                 switch (nvcfg1) {
10900                         case FLASH_5761VENDOR_ATMEL_ADB161D:
10901                         case FLASH_5761VENDOR_ATMEL_MDB161D:
10902                         case FLASH_5761VENDOR_ST_A_M45PE16:
10903                         case FLASH_5761VENDOR_ST_M_M45PE16:
10904                                 tp->nvram_size = TG3_NVRAM_SIZE_2MB;
10905                                 break;
10906                         case FLASH_5761VENDOR_ATMEL_ADB081D:
10907                         case FLASH_5761VENDOR_ATMEL_MDB081D:
10908                         case FLASH_5761VENDOR_ST_A_M45PE80:
10909                         case FLASH_5761VENDOR_ST_M_M45PE80:
10910                                 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
10911                                 break;
10912                         case FLASH_5761VENDOR_ATMEL_ADB041D:
10913                         case FLASH_5761VENDOR_ATMEL_MDB041D:
10914                         case FLASH_5761VENDOR_ST_A_M45PE40:
10915                         case FLASH_5761VENDOR_ST_M_M45PE40:
10916                                 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
10917                                 break;
10918                         case FLASH_5761VENDOR_ATMEL_ADB021D:
10919                         case FLASH_5761VENDOR_ATMEL_MDB021D:
10920                         case FLASH_5761VENDOR_ST_A_M45PE20:
10921                         case FLASH_5761VENDOR_ST_M_M45PE20:
10922                                 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
10923                                 break;
10924                 }
10925         }
10926 }
10927
10928 static void __devinit tg3_get_5906_nvram_info(struct tg3 *tp)
10929 {
10930         tp->nvram_jedecnum = JEDEC_ATMEL;
10931         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10932         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
10933 }
10934
10935 static void __devinit tg3_get_57780_nvram_info(struct tg3 *tp)
10936 {
10937         u32 nvcfg1;
10938
10939         nvcfg1 = tr32(NVRAM_CFG1);
10940
10941         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
10942         case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
10943         case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
10944                 tp->nvram_jedecnum = JEDEC_ATMEL;
10945                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10946                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
10947
10948                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
10949                 tw32(NVRAM_CFG1, nvcfg1);
10950                 return;
10951         case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
10952         case FLASH_57780VENDOR_ATMEL_AT45DB011D:
10953         case FLASH_57780VENDOR_ATMEL_AT45DB011B:
10954         case FLASH_57780VENDOR_ATMEL_AT45DB021D:
10955         case FLASH_57780VENDOR_ATMEL_AT45DB021B:
10956         case FLASH_57780VENDOR_ATMEL_AT45DB041D:
10957         case FLASH_57780VENDOR_ATMEL_AT45DB041B:
10958                 tp->nvram_jedecnum = JEDEC_ATMEL;
10959                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10960                 tp->tg3_flags2 |= TG3_FLG2_FLASH;
10961
10962                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
10963                 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
10964                 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
10965                 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
10966                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
10967                         break;
10968                 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
10969                 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
10970                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
10971                         break;
10972                 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
10973                 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
10974                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
10975                         break;
10976                 }
10977                 break;
10978         case FLASH_5752VENDOR_ST_M45PE10:
10979         case FLASH_5752VENDOR_ST_M45PE20:
10980         case FLASH_5752VENDOR_ST_M45PE40:
10981                 tp->nvram_jedecnum = JEDEC_ST;
10982                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10983                 tp->tg3_flags2 |= TG3_FLG2_FLASH;
10984
10985                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
10986                 case FLASH_5752VENDOR_ST_M45PE10:
10987                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
10988                         break;
10989                 case FLASH_5752VENDOR_ST_M45PE20:
10990                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
10991                         break;
10992                 case FLASH_5752VENDOR_ST_M45PE40:
10993                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
10994                         break;
10995                 }
10996                 break;
10997         default:
10998                 return;
10999         }
11000
11001         switch (nvcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
11002         case FLASH_5752PAGE_SIZE_256:
11003                 tp->tg3_flags3 |= TG3_FLG3_NO_NVRAM_ADDR_TRANS;
11004                 tp->nvram_pagesize = 256;
11005                 break;
11006         case FLASH_5752PAGE_SIZE_512:
11007                 tp->tg3_flags3 |= TG3_FLG3_NO_NVRAM_ADDR_TRANS;
11008                 tp->nvram_pagesize = 512;
11009                 break;
11010         case FLASH_5752PAGE_SIZE_1K:
11011                 tp->tg3_flags3 |= TG3_FLG3_NO_NVRAM_ADDR_TRANS;
11012                 tp->nvram_pagesize = 1024;
11013                 break;
11014         case FLASH_5752PAGE_SIZE_2K:
11015                 tp->tg3_flags3 |= TG3_FLG3_NO_NVRAM_ADDR_TRANS;
11016                 tp->nvram_pagesize = 2048;
11017                 break;
11018         case FLASH_5752PAGE_SIZE_4K:
11019                 tp->tg3_flags3 |= TG3_FLG3_NO_NVRAM_ADDR_TRANS;
11020                 tp->nvram_pagesize = 4096;
11021                 break;
11022         case FLASH_5752PAGE_SIZE_264:
11023                 tp->nvram_pagesize = 264;
11024                 break;
11025         case FLASH_5752PAGE_SIZE_528:
11026                 tp->nvram_pagesize = 528;
11027                 break;
11028         }
11029 }
11030
11031 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
11032 static void __devinit tg3_nvram_init(struct tg3 *tp)
11033 {
11034         tw32_f(GRC_EEPROM_ADDR,
11035              (EEPROM_ADDR_FSM_RESET |
11036               (EEPROM_DEFAULT_CLOCK_PERIOD <<
11037                EEPROM_ADDR_CLKPERD_SHIFT)));
11038
11039         msleep(1);
11040
11041         /* Enable seeprom accesses. */
11042         tw32_f(GRC_LOCAL_CTRL,
11043              tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
11044         udelay(100);
11045
11046         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
11047             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
11048                 tp->tg3_flags |= TG3_FLAG_NVRAM;
11049
11050                 if (tg3_nvram_lock(tp)) {
11051                         printk(KERN_WARNING PFX "%s: Cannot get nvarm lock, "
11052                                "tg3_nvram_init failed.\n", tp->dev->name);
11053                         return;
11054                 }
11055                 tg3_enable_nvram_access(tp);
11056
11057                 tp->nvram_size = 0;
11058
11059                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
11060                         tg3_get_5752_nvram_info(tp);
11061                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
11062                         tg3_get_5755_nvram_info(tp);
11063                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
11064                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
11065                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
11066                         tg3_get_5787_nvram_info(tp);
11067                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
11068                         tg3_get_5761_nvram_info(tp);
11069                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
11070                         tg3_get_5906_nvram_info(tp);
11071                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
11072                         tg3_get_57780_nvram_info(tp);
11073                 else
11074                         tg3_get_nvram_info(tp);
11075
11076                 if (tp->nvram_size == 0)
11077                         tg3_get_nvram_size(tp);
11078
11079                 tg3_disable_nvram_access(tp);
11080                 tg3_nvram_unlock(tp);
11081
11082         } else {
11083                 tp->tg3_flags &= ~(TG3_FLAG_NVRAM | TG3_FLAG_NVRAM_BUFFERED);
11084
11085                 tg3_get_eeprom_size(tp);
11086         }
11087 }
11088
11089 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
11090                                         u32 offset, u32 *val)
11091 {
11092         u32 tmp;
11093         int i;
11094
11095         if (offset > EEPROM_ADDR_ADDR_MASK ||
11096             (offset % 4) != 0)
11097                 return -EINVAL;
11098
11099         tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
11100                                         EEPROM_ADDR_DEVID_MASK |
11101                                         EEPROM_ADDR_READ);
11102         tw32(GRC_EEPROM_ADDR,
11103              tmp |
11104              (0 << EEPROM_ADDR_DEVID_SHIFT) |
11105              ((offset << EEPROM_ADDR_ADDR_SHIFT) &
11106               EEPROM_ADDR_ADDR_MASK) |
11107              EEPROM_ADDR_READ | EEPROM_ADDR_START);
11108
11109         for (i = 0; i < 1000; i++) {
11110                 tmp = tr32(GRC_EEPROM_ADDR);
11111
11112                 if (tmp & EEPROM_ADDR_COMPLETE)
11113                         break;
11114                 msleep(1);
11115         }
11116         if (!(tmp & EEPROM_ADDR_COMPLETE))
11117                 return -EBUSY;
11118
11119         *val = tr32(GRC_EEPROM_DATA);
11120         return 0;
11121 }
11122
11123 #define NVRAM_CMD_TIMEOUT 10000
11124
11125 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
11126 {
11127         int i;
11128
11129         tw32(NVRAM_CMD, nvram_cmd);
11130         for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
11131                 udelay(10);
11132                 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
11133                         udelay(10);
11134                         break;
11135                 }
11136         }
11137         if (i == NVRAM_CMD_TIMEOUT) {
11138                 return -EBUSY;
11139         }
11140         return 0;
11141 }
11142
11143 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
11144 {
11145         if ((tp->tg3_flags & TG3_FLAG_NVRAM) &&
11146             (tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) &&
11147             (tp->tg3_flags2 & TG3_FLG2_FLASH) &&
11148            !(tp->tg3_flags3 & TG3_FLG3_NO_NVRAM_ADDR_TRANS) &&
11149             (tp->nvram_jedecnum == JEDEC_ATMEL))
11150
11151                 addr = ((addr / tp->nvram_pagesize) <<
11152                         ATMEL_AT45DB0X1B_PAGE_POS) +
11153                        (addr % tp->nvram_pagesize);
11154
11155         return addr;
11156 }
11157
11158 static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
11159 {
11160         if ((tp->tg3_flags & TG3_FLAG_NVRAM) &&
11161             (tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) &&
11162             (tp->tg3_flags2 & TG3_FLG2_FLASH) &&
11163            !(tp->tg3_flags3 & TG3_FLG3_NO_NVRAM_ADDR_TRANS) &&
11164             (tp->nvram_jedecnum == JEDEC_ATMEL))
11165
11166                 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
11167                         tp->nvram_pagesize) +
11168                        (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
11169
11170         return addr;
11171 }
11172
11173 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
11174 {
11175         int ret;
11176
11177         if (!(tp->tg3_flags & TG3_FLAG_NVRAM))
11178                 return tg3_nvram_read_using_eeprom(tp, offset, val);
11179
11180         offset = tg3_nvram_phys_addr(tp, offset);
11181
11182         if (offset > NVRAM_ADDR_MSK)
11183                 return -EINVAL;
11184
11185         ret = tg3_nvram_lock(tp);
11186         if (ret)
11187                 return ret;
11188
11189         tg3_enable_nvram_access(tp);
11190
11191         tw32(NVRAM_ADDR, offset);
11192         ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
11193                 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
11194
11195         if (ret == 0)
11196                 *val = swab32(tr32(NVRAM_RDDATA));
11197
11198         tg3_disable_nvram_access(tp);
11199
11200         tg3_nvram_unlock(tp);
11201
11202         return ret;
11203 }
11204
11205 static int tg3_nvram_read_le(struct tg3 *tp, u32 offset, __le32 *val)
11206 {
11207         u32 v;
11208         int res = tg3_nvram_read(tp, offset, &v);
11209         if (!res)
11210                 *val = cpu_to_le32(v);
11211         return res;
11212 }
11213
11214 static int tg3_nvram_read_swab(struct tg3 *tp, u32 offset, u32 *val)
11215 {
11216         int err;
11217         u32 tmp;
11218
11219         err = tg3_nvram_read(tp, offset, &tmp);
11220         *val = swab32(tmp);
11221         return err;
11222 }
11223
11224 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
11225                                     u32 offset, u32 len, u8 *buf)
11226 {
11227         int i, j, rc = 0;
11228         u32 val;
11229
11230         for (i = 0; i < len; i += 4) {
11231                 u32 addr;
11232                 __le32 data;
11233
11234                 addr = offset + i;
11235
11236                 memcpy(&data, buf + i, 4);
11237
11238                 tw32(GRC_EEPROM_DATA, le32_to_cpu(data));
11239
11240                 val = tr32(GRC_EEPROM_ADDR);
11241                 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
11242
11243                 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
11244                         EEPROM_ADDR_READ);
11245                 tw32(GRC_EEPROM_ADDR, val |
11246                         (0 << EEPROM_ADDR_DEVID_SHIFT) |
11247                         (addr & EEPROM_ADDR_ADDR_MASK) |
11248                         EEPROM_ADDR_START |
11249                         EEPROM_ADDR_WRITE);
11250
11251                 for (j = 0; j < 1000; j++) {
11252                         val = tr32(GRC_EEPROM_ADDR);
11253
11254                         if (val & EEPROM_ADDR_COMPLETE)
11255                                 break;
11256                         msleep(1);
11257                 }
11258                 if (!(val & EEPROM_ADDR_COMPLETE)) {
11259                         rc = -EBUSY;
11260                         break;
11261                 }
11262         }
11263
11264         return rc;
11265 }
11266
11267 /* offset and length are dword aligned */
11268 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
11269                 u8 *buf)
11270 {
11271         int ret = 0;
11272         u32 pagesize = tp->nvram_pagesize;
11273         u32 pagemask = pagesize - 1;
11274         u32 nvram_cmd;
11275         u8 *tmp;
11276
11277         tmp = kmalloc(pagesize, GFP_KERNEL);
11278         if (tmp == NULL)
11279                 return -ENOMEM;
11280
11281         while (len) {
11282                 int j;
11283                 u32 phy_addr, page_off, size;
11284
11285                 phy_addr = offset & ~pagemask;
11286
11287                 for (j = 0; j < pagesize; j += 4) {
11288                         if ((ret = tg3_nvram_read_le(tp, phy_addr + j,
11289                                                 (__le32 *) (tmp + j))))
11290                                 break;
11291                 }
11292                 if (ret)
11293                         break;
11294
11295                 page_off = offset & pagemask;
11296                 size = pagesize;
11297                 if (len < size)
11298                         size = len;
11299
11300                 len -= size;
11301
11302                 memcpy(tmp + page_off, buf, size);
11303
11304                 offset = offset + (pagesize - page_off);
11305
11306                 tg3_enable_nvram_access(tp);
11307
11308                 /*
11309                  * Before we can erase the flash page, we need
11310                  * to issue a special "write enable" command.
11311                  */
11312                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
11313
11314                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
11315                         break;
11316
11317                 /* Erase the target page */
11318                 tw32(NVRAM_ADDR, phy_addr);
11319
11320                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
11321                         NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
11322
11323                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
11324                         break;
11325
11326                 /* Issue another write enable to start the write. */
11327                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
11328
11329                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
11330                         break;
11331
11332                 for (j = 0; j < pagesize; j += 4) {
11333                         __be32 data;
11334
11335                         data = *((__be32 *) (tmp + j));
11336                         /* swab32(le32_to_cpu(data)), actually */
11337                         tw32(NVRAM_WRDATA, be32_to_cpu(data));
11338
11339                         tw32(NVRAM_ADDR, phy_addr + j);
11340
11341                         nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
11342                                 NVRAM_CMD_WR;
11343
11344                         if (j == 0)
11345                                 nvram_cmd |= NVRAM_CMD_FIRST;
11346                         else if (j == (pagesize - 4))
11347                                 nvram_cmd |= NVRAM_CMD_LAST;
11348
11349                         if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
11350                                 break;
11351                 }
11352                 if (ret)
11353                         break;
11354         }
11355
11356         nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
11357         tg3_nvram_exec_cmd(tp, nvram_cmd);
11358
11359         kfree(tmp);
11360
11361         return ret;
11362 }
11363
11364 /* offset and length are dword aligned */
11365 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
11366                 u8 *buf)
11367 {
11368         int i, ret = 0;
11369
11370         for (i = 0; i < len; i += 4, offset += 4) {
11371                 u32 page_off, phy_addr, nvram_cmd;
11372                 __be32 data;
11373
11374                 memcpy(&data, buf + i, 4);
11375                 tw32(NVRAM_WRDATA, be32_to_cpu(data));
11376
11377                 page_off = offset % tp->nvram_pagesize;
11378
11379                 phy_addr = tg3_nvram_phys_addr(tp, offset);
11380
11381                 tw32(NVRAM_ADDR, phy_addr);
11382
11383                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
11384
11385                 if ((page_off == 0) || (i == 0))
11386                         nvram_cmd |= NVRAM_CMD_FIRST;
11387                 if (page_off == (tp->nvram_pagesize - 4))
11388                         nvram_cmd |= NVRAM_CMD_LAST;
11389
11390                 if (i == (len - 4))
11391                         nvram_cmd |= NVRAM_CMD_LAST;
11392
11393                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752 &&
11394                     !(tp->tg3_flags3 & TG3_FLG3_5755_PLUS) &&
11395                     (tp->nvram_jedecnum == JEDEC_ST) &&
11396                     (nvram_cmd & NVRAM_CMD_FIRST)) {
11397
11398                         if ((ret = tg3_nvram_exec_cmd(tp,
11399                                 NVRAM_CMD_WREN | NVRAM_CMD_GO |
11400                                 NVRAM_CMD_DONE)))
11401
11402                                 break;
11403                 }
11404                 if (!(tp->tg3_flags2 & TG3_FLG2_FLASH)) {
11405                         /* We always do complete word writes to eeprom. */
11406                         nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
11407                 }
11408
11409                 if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
11410                         break;
11411         }
11412         return ret;
11413 }
11414
11415 /* offset and length are dword aligned */
11416 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
11417 {
11418         int ret;
11419
11420         if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
11421                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
11422                        ~GRC_LCLCTRL_GPIO_OUTPUT1);
11423                 udelay(40);
11424         }
11425
11426         if (!(tp->tg3_flags & TG3_FLAG_NVRAM)) {
11427                 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
11428         }
11429         else {
11430                 u32 grc_mode;
11431
11432                 ret = tg3_nvram_lock(tp);
11433                 if (ret)
11434                         return ret;
11435
11436                 tg3_enable_nvram_access(tp);
11437                 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
11438                     !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM))
11439                         tw32(NVRAM_WRITE1, 0x406);
11440
11441                 grc_mode = tr32(GRC_MODE);
11442                 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
11443
11444                 if ((tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) ||
11445                         !(tp->tg3_flags2 & TG3_FLG2_FLASH)) {
11446
11447                         ret = tg3_nvram_write_block_buffered(tp, offset, len,
11448                                 buf);
11449                 }
11450                 else {
11451                         ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
11452                                 buf);
11453                 }
11454
11455                 grc_mode = tr32(GRC_MODE);
11456                 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
11457
11458                 tg3_disable_nvram_access(tp);
11459                 tg3_nvram_unlock(tp);
11460         }
11461
11462         if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
11463                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
11464                 udelay(40);
11465         }
11466
11467         return ret;
11468 }
11469
11470 struct subsys_tbl_ent {
11471         u16 subsys_vendor, subsys_devid;
11472         u32 phy_id;
11473 };
11474
11475 static struct subsys_tbl_ent subsys_id_to_phy_id[] = {
11476         /* Broadcom boards. */
11477         { PCI_VENDOR_ID_BROADCOM, 0x1644, PHY_ID_BCM5401 }, /* BCM95700A6 */
11478         { PCI_VENDOR_ID_BROADCOM, 0x0001, PHY_ID_BCM5701 }, /* BCM95701A5 */
11479         { PCI_VENDOR_ID_BROADCOM, 0x0002, PHY_ID_BCM8002 }, /* BCM95700T6 */
11480         { PCI_VENDOR_ID_BROADCOM, 0x0003, 0 },              /* BCM95700A9 */
11481         { PCI_VENDOR_ID_BROADCOM, 0x0005, PHY_ID_BCM5701 }, /* BCM95701T1 */
11482         { PCI_VENDOR_ID_BROADCOM, 0x0006, PHY_ID_BCM5701 }, /* BCM95701T8 */
11483         { PCI_VENDOR_ID_BROADCOM, 0x0007, 0 },              /* BCM95701A7 */
11484         { PCI_VENDOR_ID_BROADCOM, 0x0008, PHY_ID_BCM5701 }, /* BCM95701A10 */
11485         { PCI_VENDOR_ID_BROADCOM, 0x8008, PHY_ID_BCM5701 }, /* BCM95701A12 */
11486         { PCI_VENDOR_ID_BROADCOM, 0x0009, PHY_ID_BCM5703 }, /* BCM95703Ax1 */
11487         { PCI_VENDOR_ID_BROADCOM, 0x8009, PHY_ID_BCM5703 }, /* BCM95703Ax2 */
11488
11489         /* 3com boards. */
11490         { PCI_VENDOR_ID_3COM, 0x1000, PHY_ID_BCM5401 }, /* 3C996T */
11491         { PCI_VENDOR_ID_3COM, 0x1006, PHY_ID_BCM5701 }, /* 3C996BT */
11492         { PCI_VENDOR_ID_3COM, 0x1004, 0 },              /* 3C996SX */
11493         { PCI_VENDOR_ID_3COM, 0x1007, PHY_ID_BCM5701 }, /* 3C1000T */
11494         { PCI_VENDOR_ID_3COM, 0x1008, PHY_ID_BCM5701 }, /* 3C940BR01 */
11495
11496         /* DELL boards. */
11497         { PCI_VENDOR_ID_DELL, 0x00d1, PHY_ID_BCM5401 }, /* VIPER */
11498         { PCI_VENDOR_ID_DELL, 0x0106, PHY_ID_BCM5401 }, /* JAGUAR */
11499         { PCI_VENDOR_ID_DELL, 0x0109, PHY_ID_BCM5411 }, /* MERLOT */
11500         { PCI_VENDOR_ID_DELL, 0x010a, PHY_ID_BCM5411 }, /* SLIM_MERLOT */
11501
11502         /* Compaq boards. */
11503         { PCI_VENDOR_ID_COMPAQ, 0x007c, PHY_ID_BCM5701 }, /* BANSHEE */
11504         { PCI_VENDOR_ID_COMPAQ, 0x009a, PHY_ID_BCM5701 }, /* BANSHEE_2 */
11505         { PCI_VENDOR_ID_COMPAQ, 0x007d, 0 },              /* CHANGELING */
11506         { PCI_VENDOR_ID_COMPAQ, 0x0085, PHY_ID_BCM5701 }, /* NC7780 */
11507         { PCI_VENDOR_ID_COMPAQ, 0x0099, PHY_ID_BCM5701 }, /* NC7780_2 */
11508
11509         /* IBM boards. */
11510         { PCI_VENDOR_ID_IBM, 0x0281, 0 } /* IBM??? */
11511 };
11512
11513 static inline struct subsys_tbl_ent *lookup_by_subsys(struct tg3 *tp)
11514 {
11515         int i;
11516
11517         for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
11518                 if ((subsys_id_to_phy_id[i].subsys_vendor ==
11519                      tp->pdev->subsystem_vendor) &&
11520                     (subsys_id_to_phy_id[i].subsys_devid ==
11521                      tp->pdev->subsystem_device))
11522                         return &subsys_id_to_phy_id[i];
11523         }
11524         return NULL;
11525 }
11526
11527 static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
11528 {
11529         u32 val;
11530         u16 pmcsr;
11531
11532         /* On some early chips the SRAM cannot be accessed in D3hot state,
11533          * so need make sure we're in D0.
11534          */
11535         pci_read_config_word(tp->pdev, tp->pm_cap + PCI_PM_CTRL, &pmcsr);
11536         pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
11537         pci_write_config_word(tp->pdev, tp->pm_cap + PCI_PM_CTRL, pmcsr);
11538         msleep(1);
11539
11540         /* Make sure register accesses (indirect or otherwise)
11541          * will function correctly.
11542          */
11543         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
11544                                tp->misc_host_ctrl);
11545
11546         /* The memory arbiter has to be enabled in order for SRAM accesses
11547          * to succeed.  Normally on powerup the tg3 chip firmware will make
11548          * sure it is enabled, but other entities such as system netboot
11549          * code might disable it.
11550          */
11551         val = tr32(MEMARB_MODE);
11552         tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
11553
11554         tp->phy_id = PHY_ID_INVALID;
11555         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
11556
11557         /* Assume an onboard device and WOL capable by default.  */
11558         tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT | TG3_FLAG_WOL_CAP;
11559
11560         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
11561                 if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
11562                         tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT;
11563                         tp->tg3_flags2 |= TG3_FLG2_IS_NIC;
11564                 }
11565                 val = tr32(VCPU_CFGSHDW);
11566                 if (val & VCPU_CFGSHDW_ASPM_DBNC)
11567                         tp->tg3_flags |= TG3_FLAG_ASPM_WORKAROUND;
11568                 if ((val & VCPU_CFGSHDW_WOL_ENABLE) &&
11569                     (val & VCPU_CFGSHDW_WOL_MAGPKT) &&
11570                     device_may_wakeup(&tp->pdev->dev))
11571                         tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
11572                 goto done;
11573         }
11574
11575         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
11576         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
11577                 u32 nic_cfg, led_cfg;
11578                 u32 nic_phy_id, ver, cfg2 = 0, cfg4 = 0, eeprom_phy_id;
11579                 int eeprom_phy_serdes = 0;
11580
11581                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
11582                 tp->nic_sram_data_cfg = nic_cfg;
11583
11584                 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
11585                 ver >>= NIC_SRAM_DATA_VER_SHIFT;
11586                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700) &&
11587                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) &&
11588                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5703) &&
11589                     (ver > 0) && (ver < 0x100))
11590                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
11591
11592                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
11593                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_4, &cfg4);
11594
11595                 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
11596                     NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
11597                         eeprom_phy_serdes = 1;
11598
11599                 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
11600                 if (nic_phy_id != 0) {
11601                         u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
11602                         u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
11603
11604                         eeprom_phy_id  = (id1 >> 16) << 10;
11605                         eeprom_phy_id |= (id2 & 0xfc00) << 16;
11606                         eeprom_phy_id |= (id2 & 0x03ff) <<  0;
11607                 } else
11608                         eeprom_phy_id = 0;
11609
11610                 tp->phy_id = eeprom_phy_id;
11611                 if (eeprom_phy_serdes) {
11612                         if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
11613                                 tp->tg3_flags2 |= TG3_FLG2_MII_SERDES;
11614                         else
11615                                 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
11616                 }
11617
11618                 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
11619                         led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
11620                                     SHASTA_EXT_LED_MODE_MASK);
11621                 else
11622                         led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
11623
11624                 switch (led_cfg) {
11625                 default:
11626                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
11627                         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
11628                         break;
11629
11630                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
11631                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
11632                         break;
11633
11634                 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
11635                         tp->led_ctrl = LED_CTRL_MODE_MAC;
11636
11637                         /* Default to PHY_1_MODE if 0 (MAC_MODE) is
11638                          * read on some older 5700/5701 bootcode.
11639                          */
11640                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
11641                             ASIC_REV_5700 ||
11642                             GET_ASIC_REV(tp->pci_chip_rev_id) ==
11643                             ASIC_REV_5701)
11644                                 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
11645
11646                         break;
11647
11648                 case SHASTA_EXT_LED_SHARED:
11649                         tp->led_ctrl = LED_CTRL_MODE_SHARED;
11650                         if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
11651                             tp->pci_chip_rev_id != CHIPREV_ID_5750_A1)
11652                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
11653                                                  LED_CTRL_MODE_PHY_2);
11654                         break;
11655
11656                 case SHASTA_EXT_LED_MAC:
11657                         tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
11658                         break;
11659
11660                 case SHASTA_EXT_LED_COMBO:
11661                         tp->led_ctrl = LED_CTRL_MODE_COMBO;
11662                         if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0)
11663                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
11664                                                  LED_CTRL_MODE_PHY_2);
11665                         break;
11666
11667                 }
11668
11669                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
11670                      GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) &&
11671                     tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
11672                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
11673
11674                 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX)
11675                         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
11676
11677                 if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
11678                         tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT;
11679                         if ((tp->pdev->subsystem_vendor ==
11680                              PCI_VENDOR_ID_ARIMA) &&
11681                             (tp->pdev->subsystem_device == 0x205a ||
11682                              tp->pdev->subsystem_device == 0x2063))
11683                                 tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT;
11684                 } else {
11685                         tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT;
11686                         tp->tg3_flags2 |= TG3_FLG2_IS_NIC;
11687                 }
11688
11689                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
11690                         tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
11691                         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
11692                                 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
11693                 }
11694
11695                 if ((nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE) &&
11696                         (tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
11697                         tp->tg3_flags3 |= TG3_FLG3_ENABLE_APE;
11698
11699                 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES &&
11700                     !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL))
11701                         tp->tg3_flags &= ~TG3_FLAG_WOL_CAP;
11702
11703                 if ((tp->tg3_flags & TG3_FLAG_WOL_CAP) &&
11704                     (nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE))
11705                         tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
11706
11707                 if (cfg2 & (1 << 17))
11708                         tp->tg3_flags2 |= TG3_FLG2_CAPACITIVE_COUPLING;
11709
11710                 /* serdes signal pre-emphasis in register 0x590 set by */
11711                 /* bootcode if bit 18 is set */
11712                 if (cfg2 & (1 << 18))
11713                         tp->tg3_flags2 |= TG3_FLG2_SERDES_PREEMPHASIS;
11714
11715                 if (((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
11716                       GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX)) &&
11717                     (cfg2 & NIC_SRAM_DATA_CFG_2_APD_EN))
11718                         tp->tg3_flags3 |= TG3_FLG3_PHY_ENABLE_APD;
11719
11720                 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
11721                         u32 cfg3;
11722
11723                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
11724                         if (cfg3 & NIC_SRAM_ASPM_DEBOUNCE)
11725                                 tp->tg3_flags |= TG3_FLAG_ASPM_WORKAROUND;
11726                 }
11727
11728                 if (cfg4 & NIC_SRAM_RGMII_STD_IBND_DISABLE)
11729                         tp->tg3_flags3 |= TG3_FLG3_RGMII_STD_IBND_DISABLE;
11730                 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_RX_EN)
11731                         tp->tg3_flags3 |= TG3_FLG3_RGMII_EXT_IBND_RX_EN;
11732                 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN)
11733                         tp->tg3_flags3 |= TG3_FLG3_RGMII_EXT_IBND_TX_EN;
11734         }
11735 done:
11736         device_init_wakeup(&tp->pdev->dev, tp->tg3_flags & TG3_FLAG_WOL_CAP);
11737         device_set_wakeup_enable(&tp->pdev->dev,
11738                                  tp->tg3_flags & TG3_FLAG_WOL_ENABLE);
11739 }
11740
11741 static int __devinit tg3_issue_otp_command(struct tg3 *tp, u32 cmd)
11742 {
11743         int i;
11744         u32 val;
11745
11746         tw32(OTP_CTRL, cmd | OTP_CTRL_OTP_CMD_START);
11747         tw32(OTP_CTRL, cmd);
11748
11749         /* Wait for up to 1 ms for command to execute. */
11750         for (i = 0; i < 100; i++) {
11751                 val = tr32(OTP_STATUS);
11752                 if (val & OTP_STATUS_CMD_DONE)
11753                         break;
11754                 udelay(10);
11755         }
11756
11757         return (val & OTP_STATUS_CMD_DONE) ? 0 : -EBUSY;
11758 }
11759
11760 /* Read the gphy configuration from the OTP region of the chip.  The gphy
11761  * configuration is a 32-bit value that straddles the alignment boundary.
11762  * We do two 32-bit reads and then shift and merge the results.
11763  */
11764 static u32 __devinit tg3_read_otp_phycfg(struct tg3 *tp)
11765 {
11766         u32 bhalf_otp, thalf_otp;
11767
11768         tw32(OTP_MODE, OTP_MODE_OTP_THRU_GRC);
11769
11770         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_INIT))
11771                 return 0;
11772
11773         tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC1);
11774
11775         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
11776                 return 0;
11777
11778         thalf_otp = tr32(OTP_READ_DATA);
11779
11780         tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC2);
11781
11782         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
11783                 return 0;
11784
11785         bhalf_otp = tr32(OTP_READ_DATA);
11786
11787         return ((thalf_otp & 0x0000ffff) << 16) | (bhalf_otp >> 16);
11788 }
11789
11790 static int __devinit tg3_phy_probe(struct tg3 *tp)
11791 {
11792         u32 hw_phy_id_1, hw_phy_id_2;
11793         u32 hw_phy_id, hw_phy_id_masked;
11794         int err;
11795
11796         if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)
11797                 return tg3_phy_init(tp);
11798
11799         /* Reading the PHY ID register can conflict with ASF
11800          * firwmare access to the PHY hardware.
11801          */
11802         err = 0;
11803         if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) ||
11804             (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)) {
11805                 hw_phy_id = hw_phy_id_masked = PHY_ID_INVALID;
11806         } else {
11807                 /* Now read the physical PHY_ID from the chip and verify
11808                  * that it is sane.  If it doesn't look good, we fall back
11809                  * to either the hard-coded table based PHY_ID and failing
11810                  * that the value found in the eeprom area.
11811                  */
11812                 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
11813                 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
11814
11815                 hw_phy_id  = (hw_phy_id_1 & 0xffff) << 10;
11816                 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
11817                 hw_phy_id |= (hw_phy_id_2 & 0x03ff) <<  0;
11818
11819                 hw_phy_id_masked = hw_phy_id & PHY_ID_MASK;
11820         }
11821
11822         if (!err && KNOWN_PHY_ID(hw_phy_id_masked)) {
11823                 tp->phy_id = hw_phy_id;
11824                 if (hw_phy_id_masked == PHY_ID_BCM8002)
11825                         tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
11826                 else
11827                         tp->tg3_flags2 &= ~TG3_FLG2_PHY_SERDES;
11828         } else {
11829                 if (tp->phy_id != PHY_ID_INVALID) {
11830                         /* Do nothing, phy ID already set up in
11831                          * tg3_get_eeprom_hw_cfg().
11832                          */
11833                 } else {
11834                         struct subsys_tbl_ent *p;
11835
11836                         /* No eeprom signature?  Try the hardcoded
11837                          * subsys device table.
11838                          */
11839                         p = lookup_by_subsys(tp);
11840                         if (!p)
11841                                 return -ENODEV;
11842
11843                         tp->phy_id = p->phy_id;
11844                         if (!tp->phy_id ||
11845                             tp->phy_id == PHY_ID_BCM8002)
11846                                 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
11847                 }
11848         }
11849
11850         if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) &&
11851             !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) &&
11852             !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
11853                 u32 bmsr, adv_reg, tg3_ctrl, mask;
11854
11855                 tg3_readphy(tp, MII_BMSR, &bmsr);
11856                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
11857                     (bmsr & BMSR_LSTATUS))
11858                         goto skip_phy_reset;
11859
11860                 err = tg3_phy_reset(tp);
11861                 if (err)
11862                         return err;
11863
11864                 adv_reg = (ADVERTISE_10HALF | ADVERTISE_10FULL |
11865                            ADVERTISE_100HALF | ADVERTISE_100FULL |
11866                            ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
11867                 tg3_ctrl = 0;
11868                 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
11869                         tg3_ctrl = (MII_TG3_CTRL_ADV_1000_HALF |
11870                                     MII_TG3_CTRL_ADV_1000_FULL);
11871                         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
11872                             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
11873                                 tg3_ctrl |= (MII_TG3_CTRL_AS_MASTER |
11874                                              MII_TG3_CTRL_ENABLE_AS_MASTER);
11875                 }
11876
11877                 mask = (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
11878                         ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
11879                         ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full);
11880                 if (!tg3_copper_is_advertising_all(tp, mask)) {
11881                         tg3_writephy(tp, MII_ADVERTISE, adv_reg);
11882
11883                         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
11884                                 tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
11885
11886                         tg3_writephy(tp, MII_BMCR,
11887                                      BMCR_ANENABLE | BMCR_ANRESTART);
11888                 }
11889                 tg3_phy_set_wirespeed(tp);
11890
11891                 tg3_writephy(tp, MII_ADVERTISE, adv_reg);
11892                 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
11893                         tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
11894         }
11895
11896 skip_phy_reset:
11897         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
11898                 err = tg3_init_5401phy_dsp(tp);
11899                 if (err)
11900                         return err;
11901         }
11902
11903         if (!err && ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401)) {
11904                 err = tg3_init_5401phy_dsp(tp);
11905         }
11906
11907         if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
11908                 tp->link_config.advertising =
11909                         (ADVERTISED_1000baseT_Half |
11910                          ADVERTISED_1000baseT_Full |
11911                          ADVERTISED_Autoneg |
11912                          ADVERTISED_FIBRE);
11913         if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
11914                 tp->link_config.advertising &=
11915                         ~(ADVERTISED_1000baseT_Half |
11916                           ADVERTISED_1000baseT_Full);
11917
11918         return err;
11919 }
11920
11921 static void __devinit tg3_read_partno(struct tg3 *tp)
11922 {
11923         unsigned char vpd_data[256];
11924         unsigned int i;
11925         u32 magic;
11926
11927         if (tg3_nvram_read_swab(tp, 0x0, &magic))
11928                 goto out_not_found;
11929
11930         if (magic == TG3_EEPROM_MAGIC) {
11931                 for (i = 0; i < 256; i += 4) {
11932                         u32 tmp;
11933
11934                         if (tg3_nvram_read(tp, 0x100 + i, &tmp))
11935                                 goto out_not_found;
11936
11937                         vpd_data[i + 0] = ((tmp >>  0) & 0xff);
11938                         vpd_data[i + 1] = ((tmp >>  8) & 0xff);
11939                         vpd_data[i + 2] = ((tmp >> 16) & 0xff);
11940                         vpd_data[i + 3] = ((tmp >> 24) & 0xff);
11941                 }
11942         } else {
11943                 int vpd_cap;
11944
11945                 vpd_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_VPD);
11946                 for (i = 0; i < 256; i += 4) {
11947                         u32 tmp, j = 0;
11948                         __le32 v;
11949                         u16 tmp16;
11950
11951                         pci_write_config_word(tp->pdev, vpd_cap + PCI_VPD_ADDR,
11952                                               i);
11953                         while (j++ < 100) {
11954                                 pci_read_config_word(tp->pdev, vpd_cap +
11955                                                      PCI_VPD_ADDR, &tmp16);
11956                                 if (tmp16 & 0x8000)
11957                                         break;
11958                                 msleep(1);
11959                         }
11960                         if (!(tmp16 & 0x8000))
11961                                 goto out_not_found;
11962
11963                         pci_read_config_dword(tp->pdev, vpd_cap + PCI_VPD_DATA,
11964                                               &tmp);
11965                         v = cpu_to_le32(tmp);
11966                         memcpy(&vpd_data[i], &v, 4);
11967                 }
11968         }
11969
11970         /* Now parse and find the part number. */
11971         for (i = 0; i < 254; ) {
11972                 unsigned char val = vpd_data[i];
11973                 unsigned int block_end;
11974
11975                 if (val == 0x82 || val == 0x91) {
11976                         i = (i + 3 +
11977                              (vpd_data[i + 1] +
11978                               (vpd_data[i + 2] << 8)));
11979                         continue;
11980                 }
11981
11982                 if (val != 0x90)
11983                         goto out_not_found;
11984
11985                 block_end = (i + 3 +
11986                              (vpd_data[i + 1] +
11987                               (vpd_data[i + 2] << 8)));
11988                 i += 3;
11989
11990                 if (block_end > 256)
11991                         goto out_not_found;
11992
11993                 while (i < (block_end - 2)) {
11994                         if (vpd_data[i + 0] == 'P' &&
11995                             vpd_data[i + 1] == 'N') {
11996                                 int partno_len = vpd_data[i + 2];
11997
11998                                 i += 3;
11999                                 if (partno_len > 24 || (partno_len + i) > 256)
12000                                         goto out_not_found;
12001
12002                                 memcpy(tp->board_part_number,
12003                                        &vpd_data[i], partno_len);
12004
12005                                 /* Success. */
12006                                 return;
12007                         }
12008                         i += 3 + vpd_data[i + 2];
12009                 }
12010
12011                 /* Part number not found. */
12012                 goto out_not_found;
12013         }
12014
12015 out_not_found:
12016         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
12017                 strcpy(tp->board_part_number, "BCM95906");
12018         else
12019                 strcpy(tp->board_part_number, "none");
12020 }
12021
12022 static int __devinit tg3_fw_img_is_valid(struct tg3 *tp, u32 offset)
12023 {
12024         u32 val;
12025
12026         if (tg3_nvram_read_swab(tp, offset, &val) ||
12027             (val & 0xfc000000) != 0x0c000000 ||
12028             tg3_nvram_read_swab(tp, offset + 4, &val) ||
12029             val != 0)
12030                 return 0;
12031
12032         return 1;
12033 }
12034
12035 static void __devinit tg3_read_sb_ver(struct tg3 *tp, u32 val)
12036 {
12037         u32 offset, major, minor, build;
12038
12039         tp->fw_ver[0] = 's';
12040         tp->fw_ver[1] = 'b';
12041         tp->fw_ver[2] = '\0';
12042
12043         if ((val & TG3_EEPROM_SB_FORMAT_MASK) != TG3_EEPROM_SB_FORMAT_1)
12044                 return;
12045
12046         switch (val & TG3_EEPROM_SB_REVISION_MASK) {
12047         case TG3_EEPROM_SB_REVISION_0:
12048                 offset = TG3_EEPROM_SB_F1R0_EDH_OFF;
12049                 break;
12050         case TG3_EEPROM_SB_REVISION_2:
12051                 offset = TG3_EEPROM_SB_F1R2_EDH_OFF;
12052                 break;
12053         case TG3_EEPROM_SB_REVISION_3:
12054                 offset = TG3_EEPROM_SB_F1R3_EDH_OFF;
12055                 break;
12056         default:
12057                 return;
12058         }
12059
12060         if (tg3_nvram_read_swab(tp, offset, &val))
12061                 return;
12062
12063         build = (val & TG3_EEPROM_SB_EDH_BLD_MASK) >>
12064                 TG3_EEPROM_SB_EDH_BLD_SHFT;
12065         major = (val & TG3_EEPROM_SB_EDH_MAJ_MASK) >>
12066                 TG3_EEPROM_SB_EDH_MAJ_SHFT;
12067         minor =  val & TG3_EEPROM_SB_EDH_MIN_MASK;
12068
12069         if (minor > 99 || build > 26)
12070                 return;
12071
12072         snprintf(&tp->fw_ver[2], 30, " v%d.%02d", major, minor);
12073
12074         if (build > 0) {
12075                 tp->fw_ver[8] = 'a' + build - 1;
12076                 tp->fw_ver[9] = '\0';
12077         }
12078 }
12079
12080 static void __devinit tg3_read_fw_ver(struct tg3 *tp)
12081 {
12082         u32 val, offset, start;
12083         u32 ver_offset;
12084         int i, bcnt;
12085
12086         if (tg3_nvram_read_swab(tp, 0, &val))
12087                 return;
12088
12089         if (val != TG3_EEPROM_MAGIC) {
12090                 if ((val & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW)
12091                         tg3_read_sb_ver(tp, val);
12092
12093                 return;
12094         }
12095
12096         if (tg3_nvram_read_swab(tp, 0xc, &offset) ||
12097             tg3_nvram_read_swab(tp, 0x4, &start))
12098                 return;
12099
12100         offset = tg3_nvram_logical_addr(tp, offset);
12101
12102         if (!tg3_fw_img_is_valid(tp, offset) ||
12103             tg3_nvram_read_swab(tp, offset + 8, &ver_offset))
12104                 return;
12105
12106         offset = offset + ver_offset - start;
12107         for (i = 0; i < 16; i += 4) {
12108                 __le32 v;
12109                 if (tg3_nvram_read_le(tp, offset + i, &v))
12110                         return;
12111
12112                 memcpy(tp->fw_ver + i, &v, 4);
12113         }
12114
12115         if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF) ||
12116              (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
12117                 return;
12118
12119         for (offset = TG3_NVM_DIR_START;
12120              offset < TG3_NVM_DIR_END;
12121              offset += TG3_NVM_DIRENT_SIZE) {
12122                 if (tg3_nvram_read_swab(tp, offset, &val))
12123                         return;
12124
12125                 if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI)
12126                         break;
12127         }
12128
12129         if (offset == TG3_NVM_DIR_END)
12130                 return;
12131
12132         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
12133                 start = 0x08000000;
12134         else if (tg3_nvram_read_swab(tp, offset - 4, &start))
12135                 return;
12136
12137         if (tg3_nvram_read_swab(tp, offset + 4, &offset) ||
12138             !tg3_fw_img_is_valid(tp, offset) ||
12139             tg3_nvram_read_swab(tp, offset + 8, &val))
12140                 return;
12141
12142         offset += val - start;
12143
12144         bcnt = strlen(tp->fw_ver);
12145
12146         tp->fw_ver[bcnt++] = ',';
12147         tp->fw_ver[bcnt++] = ' ';
12148
12149         for (i = 0; i < 4; i++) {
12150                 __le32 v;
12151                 if (tg3_nvram_read_le(tp, offset, &v))
12152                         return;
12153
12154                 offset += sizeof(v);
12155
12156                 if (bcnt > TG3_VER_SIZE - sizeof(v)) {
12157                         memcpy(&tp->fw_ver[bcnt], &v, TG3_VER_SIZE - bcnt);
12158                         break;
12159                 }
12160
12161                 memcpy(&tp->fw_ver[bcnt], &v, sizeof(v));
12162                 bcnt += sizeof(v);
12163         }
12164
12165         tp->fw_ver[TG3_VER_SIZE - 1] = 0;
12166 }
12167
12168 static struct pci_dev * __devinit tg3_find_peer(struct tg3 *);
12169
12170 static int __devinit tg3_get_invariants(struct tg3 *tp)
12171 {
12172         static struct pci_device_id write_reorder_chipsets[] = {
12173                 { PCI_DEVICE(PCI_VENDOR_ID_AMD,
12174                              PCI_DEVICE_ID_AMD_FE_GATE_700C) },
12175                 { PCI_DEVICE(PCI_VENDOR_ID_AMD,
12176                              PCI_DEVICE_ID_AMD_8131_BRIDGE) },
12177                 { PCI_DEVICE(PCI_VENDOR_ID_VIA,
12178                              PCI_DEVICE_ID_VIA_8385_0) },
12179                 { },
12180         };
12181         u32 misc_ctrl_reg;
12182         u32 cacheline_sz_reg;
12183         u32 pci_state_reg, grc_misc_cfg;
12184         u32 val;
12185         u16 pci_cmd;
12186         int err;
12187
12188         /* Force memory write invalidate off.  If we leave it on,
12189          * then on 5700_BX chips we have to enable a workaround.
12190          * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
12191          * to match the cacheline size.  The Broadcom driver have this
12192          * workaround but turns MWI off all the times so never uses
12193          * it.  This seems to suggest that the workaround is insufficient.
12194          */
12195         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
12196         pci_cmd &= ~PCI_COMMAND_INVALIDATE;
12197         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
12198
12199         /* It is absolutely critical that TG3PCI_MISC_HOST_CTRL
12200          * has the register indirect write enable bit set before
12201          * we try to access any of the MMIO registers.  It is also
12202          * critical that the PCI-X hw workaround situation is decided
12203          * before that as well.
12204          */
12205         pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
12206                               &misc_ctrl_reg);
12207
12208         tp->pci_chip_rev_id = (misc_ctrl_reg >>
12209                                MISC_HOST_CTRL_CHIPREV_SHIFT);
12210         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_USE_PROD_ID_REG) {
12211                 u32 prod_id_asic_rev;
12212
12213                 pci_read_config_dword(tp->pdev, TG3PCI_PRODID_ASICREV,
12214                                       &prod_id_asic_rev);
12215                 tp->pci_chip_rev_id = prod_id_asic_rev;
12216         }
12217
12218         /* Wrong chip ID in 5752 A0. This code can be removed later
12219          * as A0 is not in production.
12220          */
12221         if (tp->pci_chip_rev_id == CHIPREV_ID_5752_A0_HW)
12222                 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
12223
12224         /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
12225          * we need to disable memory and use config. cycles
12226          * only to access all registers. The 5702/03 chips
12227          * can mistakenly decode the special cycles from the
12228          * ICH chipsets as memory write cycles, causing corruption
12229          * of register and memory space. Only certain ICH bridges
12230          * will drive special cycles with non-zero data during the
12231          * address phase which can fall within the 5703's address
12232          * range. This is not an ICH bug as the PCI spec allows
12233          * non-zero address during special cycles. However, only
12234          * these ICH bridges are known to drive non-zero addresses
12235          * during special cycles.
12236          *
12237          * Since special cycles do not cross PCI bridges, we only
12238          * enable this workaround if the 5703 is on the secondary
12239          * bus of these ICH bridges.
12240          */
12241         if ((tp->pci_chip_rev_id == CHIPREV_ID_5703_A1) ||
12242             (tp->pci_chip_rev_id == CHIPREV_ID_5703_A2)) {
12243                 static struct tg3_dev_id {
12244                         u32     vendor;
12245                         u32     device;
12246                         u32     rev;
12247                 } ich_chipsets[] = {
12248                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
12249                           PCI_ANY_ID },
12250                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
12251                           PCI_ANY_ID },
12252                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
12253                           0xa },
12254                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
12255                           PCI_ANY_ID },
12256                         { },
12257                 };
12258                 struct tg3_dev_id *pci_id = &ich_chipsets[0];
12259                 struct pci_dev *bridge = NULL;
12260
12261                 while (pci_id->vendor != 0) {
12262                         bridge = pci_get_device(pci_id->vendor, pci_id->device,
12263                                                 bridge);
12264                         if (!bridge) {
12265                                 pci_id++;
12266                                 continue;
12267                         }
12268                         if (pci_id->rev != PCI_ANY_ID) {
12269                                 if (bridge->revision > pci_id->rev)
12270                                         continue;
12271                         }
12272                         if (bridge->subordinate &&
12273                             (bridge->subordinate->number ==
12274                              tp->pdev->bus->number)) {
12275
12276                                 tp->tg3_flags2 |= TG3_FLG2_ICH_WORKAROUND;
12277                                 pci_dev_put(bridge);
12278                                 break;
12279                         }
12280                 }
12281         }
12282
12283         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
12284                 static struct tg3_dev_id {
12285                         u32     vendor;
12286                         u32     device;
12287                 } bridge_chipsets[] = {
12288                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0 },
12289                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1 },
12290                         { },
12291                 };
12292                 struct tg3_dev_id *pci_id = &bridge_chipsets[0];
12293                 struct pci_dev *bridge = NULL;
12294
12295                 while (pci_id->vendor != 0) {
12296                         bridge = pci_get_device(pci_id->vendor,
12297                                                 pci_id->device,
12298                                                 bridge);
12299                         if (!bridge) {
12300                                 pci_id++;
12301                                 continue;
12302                         }
12303                         if (bridge->subordinate &&
12304                             (bridge->subordinate->number <=
12305                              tp->pdev->bus->number) &&
12306                             (bridge->subordinate->subordinate >=
12307                              tp->pdev->bus->number)) {
12308                                 tp->tg3_flags3 |= TG3_FLG3_5701_DMA_BUG;
12309                                 pci_dev_put(bridge);
12310                                 break;
12311                         }
12312                 }
12313         }
12314
12315         /* The EPB bridge inside 5714, 5715, and 5780 cannot support
12316          * DMA addresses > 40-bit. This bridge may have other additional
12317          * 57xx devices behind it in some 4-port NIC designs for example.
12318          * Any tg3 device found behind the bridge will also need the 40-bit
12319          * DMA workaround.
12320          */
12321         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 ||
12322             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
12323                 tp->tg3_flags2 |= TG3_FLG2_5780_CLASS;
12324                 tp->tg3_flags |= TG3_FLAG_40BIT_DMA_BUG;
12325                 tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI);
12326         }
12327         else {
12328                 struct pci_dev *bridge = NULL;
12329
12330                 do {
12331                         bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
12332                                                 PCI_DEVICE_ID_SERVERWORKS_EPB,
12333                                                 bridge);
12334                         if (bridge && bridge->subordinate &&
12335                             (bridge->subordinate->number <=
12336                              tp->pdev->bus->number) &&
12337                             (bridge->subordinate->subordinate >=
12338                              tp->pdev->bus->number)) {
12339                                 tp->tg3_flags |= TG3_FLAG_40BIT_DMA_BUG;
12340                                 pci_dev_put(bridge);
12341                                 break;
12342                         }
12343                 } while (bridge);
12344         }
12345
12346         /* Initialize misc host control in PCI block. */
12347         tp->misc_host_ctrl |= (misc_ctrl_reg &
12348                                MISC_HOST_CTRL_CHIPREV);
12349         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
12350                                tp->misc_host_ctrl);
12351
12352         pci_read_config_dword(tp->pdev, TG3PCI_CACHELINESZ,
12353                               &cacheline_sz_reg);
12354
12355         tp->pci_cacheline_sz = (cacheline_sz_reg >>  0) & 0xff;
12356         tp->pci_lat_timer    = (cacheline_sz_reg >>  8) & 0xff;
12357         tp->pci_hdr_type     = (cacheline_sz_reg >> 16) & 0xff;
12358         tp->pci_bist         = (cacheline_sz_reg >> 24) & 0xff;
12359
12360         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
12361             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714))
12362                 tp->pdev_peer = tg3_find_peer(tp);
12363
12364         /* Intentionally exclude ASIC_REV_5906 */
12365         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
12366             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
12367             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
12368             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
12369             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
12370             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
12371                 tp->tg3_flags3 |= TG3_FLG3_5755_PLUS;
12372
12373         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
12374             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
12375             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 ||
12376             (tp->tg3_flags3 & TG3_FLG3_5755_PLUS) ||
12377             (tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
12378                 tp->tg3_flags2 |= TG3_FLG2_5750_PLUS;
12379
12380         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) ||
12381             (tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
12382                 tp->tg3_flags2 |= TG3_FLG2_5705_PLUS;
12383
12384         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
12385                 tp->tg3_flags |= TG3_FLAG_SUPPORT_MSI;
12386                 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX ||
12387                     GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX ||
12388                     (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 &&
12389                      tp->pci_chip_rev_id <= CHIPREV_ID_5714_A2 &&
12390                      tp->pdev_peer == tp->pdev))
12391                         tp->tg3_flags &= ~TG3_FLAG_SUPPORT_MSI;
12392
12393                 if ((tp->tg3_flags3 & TG3_FLG3_5755_PLUS) ||
12394                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
12395                         tp->tg3_flags2 |= TG3_FLG2_HW_TSO_2;
12396                         tp->tg3_flags2 |= TG3_FLG2_1SHOT_MSI;
12397                 } else {
12398                         tp->tg3_flags2 |= TG3_FLG2_HW_TSO_1 | TG3_FLG2_TSO_BUG;
12399                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
12400                                 ASIC_REV_5750 &&
12401                             tp->pci_chip_rev_id >= CHIPREV_ID_5750_C2)
12402                                 tp->tg3_flags2 &= ~TG3_FLG2_TSO_BUG;
12403                 }
12404         }
12405
12406         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
12407              (tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
12408                 tp->tg3_flags2 |= TG3_FLG2_JUMBO_CAPABLE;
12409
12410         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
12411                               &pci_state_reg);
12412
12413         tp->pcie_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_EXP);
12414         if (tp->pcie_cap != 0) {
12415                 u16 lnkctl;
12416
12417                 tp->tg3_flags2 |= TG3_FLG2_PCI_EXPRESS;
12418
12419                 pcie_set_readrq(tp->pdev, 4096);
12420
12421                 pci_read_config_word(tp->pdev,
12422                                      tp->pcie_cap + PCI_EXP_LNKCTL,
12423                                      &lnkctl);
12424                 if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN) {
12425                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
12426                                 tp->tg3_flags2 &= ~TG3_FLG2_HW_TSO_2;
12427                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
12428                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
12429                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
12430                                 tp->tg3_flags3 |= TG3_FLG3_CLKREQ_BUG;
12431                 }
12432         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
12433                 tp->tg3_flags2 |= TG3_FLG2_PCI_EXPRESS;
12434         } else if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
12435                    (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
12436                 tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
12437                 if (!tp->pcix_cap) {
12438                         printk(KERN_ERR PFX "Cannot find PCI-X "
12439                                             "capability, aborting.\n");
12440                         return -EIO;
12441                 }
12442
12443                 if (!(pci_state_reg & PCISTATE_CONV_PCI_MODE))
12444                         tp->tg3_flags |= TG3_FLAG_PCIX_MODE;
12445         }
12446
12447         /* If we have an AMD 762 or VIA K8T800 chipset, write
12448          * reordering to the mailbox registers done by the host
12449          * controller can cause major troubles.  We read back from
12450          * every mailbox register write to force the writes to be
12451          * posted to the chip in order.
12452          */
12453         if (pci_dev_present(write_reorder_chipsets) &&
12454             !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
12455                 tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;
12456
12457         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
12458             tp->pci_lat_timer < 64) {
12459                 tp->pci_lat_timer = 64;
12460
12461                 cacheline_sz_reg  = ((tp->pci_cacheline_sz & 0xff) <<  0);
12462                 cacheline_sz_reg |= ((tp->pci_lat_timer    & 0xff) <<  8);
12463                 cacheline_sz_reg |= ((tp->pci_hdr_type     & 0xff) << 16);
12464                 cacheline_sz_reg |= ((tp->pci_bist         & 0xff) << 24);
12465
12466                 pci_write_config_dword(tp->pdev, TG3PCI_CACHELINESZ,
12467                                        cacheline_sz_reg);
12468         }
12469
12470         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX) {
12471                 /* 5700 BX chips need to have their TX producer index
12472                  * mailboxes written twice to workaround a bug.
12473                  */
12474                 tp->tg3_flags |= TG3_FLAG_TXD_MBOX_HWBUG;
12475
12476                 /* If we are in PCI-X mode, enable register write workaround.
12477                  *
12478                  * The workaround is to use indirect register accesses
12479                  * for all chip writes not to mailbox registers.
12480                  */
12481                 if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) {
12482                         u32 pm_reg;
12483
12484                         tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
12485
12486                         /* The chip can have it's power management PCI config
12487                          * space registers clobbered due to this bug.
12488                          * So explicitly force the chip into D0 here.
12489                          */
12490                         pci_read_config_dword(tp->pdev,
12491                                               tp->pm_cap + PCI_PM_CTRL,
12492                                               &pm_reg);
12493                         pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
12494                         pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
12495                         pci_write_config_dword(tp->pdev,
12496                                                tp->pm_cap + PCI_PM_CTRL,
12497                                                pm_reg);
12498
12499                         /* Also, force SERR#/PERR# in PCI command. */
12500                         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
12501                         pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
12502                         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
12503                 }
12504         }
12505
12506         if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
12507                 tp->tg3_flags |= TG3_FLAG_PCI_HIGH_SPEED;
12508         if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
12509                 tp->tg3_flags |= TG3_FLAG_PCI_32BIT;
12510
12511         /* Chip-specific fixup from Broadcom driver */
12512         if ((tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) &&
12513             (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
12514                 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
12515                 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
12516         }
12517
12518         /* Default fast path register access methods */
12519         tp->read32 = tg3_read32;
12520         tp->write32 = tg3_write32;
12521         tp->read32_mbox = tg3_read32;
12522         tp->write32_mbox = tg3_write32;
12523         tp->write32_tx_mbox = tg3_write32;
12524         tp->write32_rx_mbox = tg3_write32;
12525
12526         /* Various workaround register access methods */
12527         if (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG)
12528                 tp->write32 = tg3_write_indirect_reg32;
12529         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
12530                  ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
12531                   tp->pci_chip_rev_id == CHIPREV_ID_5750_A0)) {
12532                 /*
12533                  * Back to back register writes can cause problems on these
12534                  * chips, the workaround is to read back all reg writes
12535                  * except those to mailbox regs.
12536                  *
12537                  * See tg3_write_indirect_reg32().
12538                  */
12539                 tp->write32 = tg3_write_flush_reg32;
12540         }
12541
12542
12543         if ((tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG) ||
12544             (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)) {
12545                 tp->write32_tx_mbox = tg3_write32_tx_mbox;
12546                 if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
12547                         tp->write32_rx_mbox = tg3_write_flush_reg32;
12548         }
12549
12550         if (tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND) {
12551                 tp->read32 = tg3_read_indirect_reg32;
12552                 tp->write32 = tg3_write_indirect_reg32;
12553                 tp->read32_mbox = tg3_read_indirect_mbox;
12554                 tp->write32_mbox = tg3_write_indirect_mbox;
12555                 tp->write32_tx_mbox = tg3_write_indirect_mbox;
12556                 tp->write32_rx_mbox = tg3_write_indirect_mbox;
12557
12558                 iounmap(tp->regs);
12559                 tp->regs = NULL;
12560
12561                 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
12562                 pci_cmd &= ~PCI_COMMAND_MEMORY;
12563                 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
12564         }
12565         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
12566                 tp->read32_mbox = tg3_read32_mbox_5906;
12567                 tp->write32_mbox = tg3_write32_mbox_5906;
12568                 tp->write32_tx_mbox = tg3_write32_mbox_5906;
12569                 tp->write32_rx_mbox = tg3_write32_mbox_5906;
12570         }
12571
12572         if (tp->write32 == tg3_write_indirect_reg32 ||
12573             ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
12574              (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
12575               GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)))
12576                 tp->tg3_flags |= TG3_FLAG_SRAM_USE_CONFIG;
12577
12578         /* Get eeprom hw config before calling tg3_set_power_state().
12579          * In particular, the TG3_FLG2_IS_NIC flag must be
12580          * determined before calling tg3_set_power_state() so that
12581          * we know whether or not to switch out of Vaux power.
12582          * When the flag is set, it means that GPIO1 is used for eeprom
12583          * write protect and also implies that it is a LOM where GPIOs
12584          * are not used to switch power.
12585          */
12586         tg3_get_eeprom_hw_cfg(tp);
12587
12588         if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
12589                 /* Allow reads and writes to the
12590                  * APE register and memory space.
12591                  */
12592                 pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR |
12593                                  PCISTATE_ALLOW_APE_SHMEM_WR;
12594                 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE,
12595                                        pci_state_reg);
12596         }
12597
12598         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
12599             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
12600             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
12601             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
12602                 tp->tg3_flags |= TG3_FLAG_CPMU_PRESENT;
12603
12604         /* Set up tp->grc_local_ctrl before calling tg3_set_power_state().
12605          * GPIO1 driven high will bring 5700's external PHY out of reset.
12606          * It is also used as eeprom write protect on LOMs.
12607          */
12608         tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
12609         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
12610             (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT))
12611                 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
12612                                        GRC_LCLCTRL_GPIO_OUTPUT1);
12613         /* Unused GPIO3 must be driven as output on 5752 because there
12614          * are no pull-up resistors on unused GPIO pins.
12615          */
12616         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
12617                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
12618
12619         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
12620             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
12621                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
12622
12623         if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761) {
12624                 /* Turn off the debug UART. */
12625                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
12626                 if (tp->tg3_flags2 & TG3_FLG2_IS_NIC)
12627                         /* Keep VMain power. */
12628                         tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
12629                                               GRC_LCLCTRL_GPIO_OUTPUT0;
12630         }
12631
12632         /* Force the chip into D0. */
12633         err = tg3_set_power_state(tp, PCI_D0);
12634         if (err) {
12635                 printk(KERN_ERR PFX "(%s) transition to D0 failed\n",
12636                        pci_name(tp->pdev));
12637                 return err;
12638         }
12639
12640         /* 5700 B0 chips do not support checksumming correctly due
12641          * to hardware bugs.
12642          */
12643         if (tp->pci_chip_rev_id == CHIPREV_ID_5700_B0)
12644                 tp->tg3_flags |= TG3_FLAG_BROKEN_CHECKSUMS;
12645
12646         /* Derive initial jumbo mode from MTU assigned in
12647          * ether_setup() via the alloc_etherdev() call
12648          */
12649         if (tp->dev->mtu > ETH_DATA_LEN &&
12650             !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
12651                 tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE;
12652
12653         /* Determine WakeOnLan speed to use. */
12654         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
12655             tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
12656             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0 ||
12657             tp->pci_chip_rev_id == CHIPREV_ID_5701_B2) {
12658                 tp->tg3_flags &= ~(TG3_FLAG_WOL_SPEED_100MB);
12659         } else {
12660                 tp->tg3_flags |= TG3_FLAG_WOL_SPEED_100MB;
12661         }
12662
12663         /* A few boards don't want Ethernet@WireSpeed phy feature */
12664         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
12665             ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
12666              (tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) &&
12667              (tp->pci_chip_rev_id != CHIPREV_ID_5705_A1)) ||
12668             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) ||
12669             (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES))
12670                 tp->tg3_flags2 |= TG3_FLG2_NO_ETH_WIRE_SPEED;
12671
12672         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5703_AX ||
12673             GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_AX)
12674                 tp->tg3_flags2 |= TG3_FLG2_PHY_ADC_BUG;
12675         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0)
12676                 tp->tg3_flags2 |= TG3_FLG2_PHY_5704_A0_BUG;
12677
12678         if ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
12679             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906 &&
12680             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
12681             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57780) {
12682                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
12683                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
12684                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
12685                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
12686                         if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
12687                             tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
12688                                 tp->tg3_flags2 |= TG3_FLG2_PHY_JITTER_BUG;
12689                         if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
12690                                 tp->tg3_flags2 |= TG3_FLG2_PHY_ADJUST_TRIM;
12691                 } else
12692                         tp->tg3_flags2 |= TG3_FLG2_PHY_BER_BUG;
12693         }
12694
12695         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
12696             GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
12697                 tp->phy_otp = tg3_read_otp_phycfg(tp);
12698                 if (tp->phy_otp == 0)
12699                         tp->phy_otp = TG3_OTP_DEFAULT;
12700         }
12701
12702         if (tp->tg3_flags & TG3_FLAG_CPMU_PRESENT)
12703                 tp->mi_mode = MAC_MI_MODE_500KHZ_CONST;
12704         else
12705                 tp->mi_mode = MAC_MI_MODE_BASE;
12706
12707         tp->coalesce_mode = 0;
12708         if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX &&
12709             GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX)
12710                 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
12711
12712         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
12713             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
12714                 tp->tg3_flags3 |= TG3_FLG3_USE_PHYLIB;
12715
12716         err = tg3_mdio_init(tp);
12717         if (err)
12718                 return err;
12719
12720         /* Initialize data/descriptor byte/word swapping. */
12721         val = tr32(GRC_MODE);
12722         val &= GRC_MODE_HOST_STACKUP;
12723         tw32(GRC_MODE, val | tp->grc_mode);
12724
12725         tg3_switch_clocks(tp);
12726
12727         /* Clear this out for sanity. */
12728         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
12729
12730         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
12731                               &pci_state_reg);
12732         if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
12733             (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) == 0) {
12734                 u32 chiprevid = GET_CHIP_REV_ID(tp->misc_host_ctrl);
12735
12736                 if (chiprevid == CHIPREV_ID_5701_A0 ||
12737                     chiprevid == CHIPREV_ID_5701_B0 ||
12738                     chiprevid == CHIPREV_ID_5701_B2 ||
12739                     chiprevid == CHIPREV_ID_5701_B5) {
12740                         void __iomem *sram_base;
12741
12742                         /* Write some dummy words into the SRAM status block
12743                          * area, see if it reads back correctly.  If the return
12744                          * value is bad, force enable the PCIX workaround.
12745                          */
12746                         sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
12747
12748                         writel(0x00000000, sram_base);
12749                         writel(0x00000000, sram_base + 4);
12750                         writel(0xffffffff, sram_base + 4);
12751                         if (readl(sram_base) != 0x00000000)
12752                                 tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
12753                 }
12754         }
12755
12756         udelay(50);
12757         tg3_nvram_init(tp);
12758
12759         grc_misc_cfg = tr32(GRC_MISC_CFG);
12760         grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
12761
12762         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
12763             (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
12764              grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
12765                 tp->tg3_flags2 |= TG3_FLG2_IS_5788;
12766
12767         if (!(tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
12768             (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700))
12769                 tp->tg3_flags |= TG3_FLAG_TAGGED_STATUS;
12770         if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) {
12771                 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
12772                                       HOSTCC_MODE_CLRTICK_TXBD);
12773
12774                 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
12775                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
12776                                        tp->misc_host_ctrl);
12777         }
12778
12779         /* Preserve the APE MAC_MODE bits */
12780         if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
12781                 tp->mac_mode = tr32(MAC_MODE) |
12782                                MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
12783         else
12784                 tp->mac_mode = TG3_DEF_MAC_MODE;
12785
12786         /* these are limited to 10/100 only */
12787         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
12788              (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
12789             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
12790              tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
12791              (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901 ||
12792               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901_2 ||
12793               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5705F)) ||
12794             (tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
12795              (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5751F ||
12796               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5753F ||
12797               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5787F)) ||
12798             tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790 ||
12799             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
12800                 tp->tg3_flags |= TG3_FLAG_10_100_ONLY;
12801
12802         err = tg3_phy_probe(tp);
12803         if (err) {
12804                 printk(KERN_ERR PFX "(%s) phy probe failed, err %d\n",
12805                        pci_name(tp->pdev), err);
12806                 /* ... but do not return immediately ... */
12807                 tg3_mdio_fini(tp);
12808         }
12809
12810         tg3_read_partno(tp);
12811         tg3_read_fw_ver(tp);
12812
12813         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
12814                 tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
12815         } else {
12816                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
12817                         tp->tg3_flags |= TG3_FLAG_USE_MI_INTERRUPT;
12818                 else
12819                         tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
12820         }
12821
12822         /* 5700 {AX,BX} chips have a broken status block link
12823          * change bit implementation, so we must use the
12824          * status register in those cases.
12825          */
12826         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
12827                 tp->tg3_flags |= TG3_FLAG_USE_LINKCHG_REG;
12828         else
12829                 tp->tg3_flags &= ~TG3_FLAG_USE_LINKCHG_REG;
12830
12831         /* The led_ctrl is set during tg3_phy_probe, here we might
12832          * have to force the link status polling mechanism based
12833          * upon subsystem IDs.
12834          */
12835         if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
12836             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
12837             !(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
12838                 tp->tg3_flags |= (TG3_FLAG_USE_MI_INTERRUPT |
12839                                   TG3_FLAG_USE_LINKCHG_REG);
12840         }
12841
12842         /* For all SERDES we poll the MAC status register. */
12843         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
12844                 tp->tg3_flags |= TG3_FLAG_POLL_SERDES;
12845         else
12846                 tp->tg3_flags &= ~TG3_FLAG_POLL_SERDES;
12847
12848         tp->rx_offset = NET_IP_ALIGN;
12849         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
12850             (tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0)
12851                 tp->rx_offset = 0;
12852
12853         tp->rx_std_max_post = TG3_RX_RING_SIZE;
12854
12855         /* Increment the rx prod index on the rx std ring by at most
12856          * 8 for these chips to workaround hw errata.
12857          */
12858         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
12859             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
12860             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
12861                 tp->rx_std_max_post = 8;
12862
12863         if (tp->tg3_flags & TG3_FLAG_ASPM_WORKAROUND)
12864                 tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) &
12865                                      PCIE_PWR_MGMT_L1_THRESH_MSK;
12866
12867         return err;
12868 }
12869
12870 #ifdef CONFIG_SPARC
12871 static int __devinit tg3_get_macaddr_sparc(struct tg3 *tp)
12872 {
12873         struct net_device *dev = tp->dev;
12874         struct pci_dev *pdev = tp->pdev;
12875         struct device_node *dp = pci_device_to_OF_node(pdev);
12876         const unsigned char *addr;
12877         int len;
12878
12879         addr = of_get_property(dp, "local-mac-address", &len);
12880         if (addr && len == 6) {
12881                 memcpy(dev->dev_addr, addr, 6);
12882                 memcpy(dev->perm_addr, dev->dev_addr, 6);
12883                 return 0;
12884         }
12885         return -ENODEV;
12886 }
12887
12888 static int __devinit tg3_get_default_macaddr_sparc(struct tg3 *tp)
12889 {
12890         struct net_device *dev = tp->dev;
12891
12892         memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
12893         memcpy(dev->perm_addr, idprom->id_ethaddr, 6);
12894         return 0;
12895 }
12896 #endif
12897
12898 static int __devinit tg3_get_device_address(struct tg3 *tp)
12899 {
12900         struct net_device *dev = tp->dev;
12901         u32 hi, lo, mac_offset;
12902         int addr_ok = 0;
12903
12904 #ifdef CONFIG_SPARC
12905         if (!tg3_get_macaddr_sparc(tp))
12906                 return 0;
12907 #endif
12908
12909         mac_offset = 0x7c;
12910         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
12911             (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
12912                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
12913                         mac_offset = 0xcc;
12914                 if (tg3_nvram_lock(tp))
12915                         tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
12916                 else
12917                         tg3_nvram_unlock(tp);
12918         }
12919         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
12920                 mac_offset = 0x10;
12921
12922         /* First try to get it from MAC address mailbox. */
12923         tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
12924         if ((hi >> 16) == 0x484b) {
12925                 dev->dev_addr[0] = (hi >>  8) & 0xff;
12926                 dev->dev_addr[1] = (hi >>  0) & 0xff;
12927
12928                 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
12929                 dev->dev_addr[2] = (lo >> 24) & 0xff;
12930                 dev->dev_addr[3] = (lo >> 16) & 0xff;
12931                 dev->dev_addr[4] = (lo >>  8) & 0xff;
12932                 dev->dev_addr[5] = (lo >>  0) & 0xff;
12933
12934                 /* Some old bootcode may report a 0 MAC address in SRAM */
12935                 addr_ok = is_valid_ether_addr(&dev->dev_addr[0]);
12936         }
12937         if (!addr_ok) {
12938                 /* Next, try NVRAM. */
12939                 if (!tg3_nvram_read(tp, mac_offset + 0, &hi) &&
12940                     !tg3_nvram_read(tp, mac_offset + 4, &lo)) {
12941                         dev->dev_addr[0] = ((hi >> 16) & 0xff);
12942                         dev->dev_addr[1] = ((hi >> 24) & 0xff);
12943                         dev->dev_addr[2] = ((lo >>  0) & 0xff);
12944                         dev->dev_addr[3] = ((lo >>  8) & 0xff);
12945                         dev->dev_addr[4] = ((lo >> 16) & 0xff);
12946                         dev->dev_addr[5] = ((lo >> 24) & 0xff);
12947                 }
12948                 /* Finally just fetch it out of the MAC control regs. */
12949                 else {
12950                         hi = tr32(MAC_ADDR_0_HIGH);
12951                         lo = tr32(MAC_ADDR_0_LOW);
12952
12953                         dev->dev_addr[5] = lo & 0xff;
12954                         dev->dev_addr[4] = (lo >> 8) & 0xff;
12955                         dev->dev_addr[3] = (lo >> 16) & 0xff;
12956                         dev->dev_addr[2] = (lo >> 24) & 0xff;
12957                         dev->dev_addr[1] = hi & 0xff;
12958                         dev->dev_addr[0] = (hi >> 8) & 0xff;
12959                 }
12960         }
12961
12962         if (!is_valid_ether_addr(&dev->dev_addr[0])) {
12963 #ifdef CONFIG_SPARC
12964                 if (!tg3_get_default_macaddr_sparc(tp))
12965                         return 0;
12966 #endif
12967                 return -EINVAL;
12968         }
12969         memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
12970         return 0;
12971 }
12972
12973 #define BOUNDARY_SINGLE_CACHELINE       1
12974 #define BOUNDARY_MULTI_CACHELINE        2
12975
12976 static u32 __devinit tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
12977 {
12978         int cacheline_size;
12979         u8 byte;
12980         int goal;
12981
12982         pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
12983         if (byte == 0)
12984                 cacheline_size = 1024;
12985         else
12986                 cacheline_size = (int) byte * 4;
12987
12988         /* On 5703 and later chips, the boundary bits have no
12989          * effect.
12990          */
12991         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
12992             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
12993             !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
12994                 goto out;
12995
12996 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
12997         goal = BOUNDARY_MULTI_CACHELINE;
12998 #else
12999 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
13000         goal = BOUNDARY_SINGLE_CACHELINE;
13001 #else
13002         goal = 0;
13003 #endif
13004 #endif
13005
13006         if (!goal)
13007                 goto out;
13008
13009         /* PCI controllers on most RISC systems tend to disconnect
13010          * when a device tries to burst across a cache-line boundary.
13011          * Therefore, letting tg3 do so just wastes PCI bandwidth.
13012          *
13013          * Unfortunately, for PCI-E there are only limited
13014          * write-side controls for this, and thus for reads
13015          * we will still get the disconnects.  We'll also waste
13016          * these PCI cycles for both read and write for chips
13017          * other than 5700 and 5701 which do not implement the
13018          * boundary bits.
13019          */
13020         if ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
13021             !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
13022                 switch (cacheline_size) {
13023                 case 16:
13024                 case 32:
13025                 case 64:
13026                 case 128:
13027                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
13028                                 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
13029                                         DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
13030                         } else {
13031                                 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
13032                                         DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
13033                         }
13034                         break;
13035
13036                 case 256:
13037                         val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
13038                                 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
13039                         break;
13040
13041                 default:
13042                         val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
13043                                 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
13044                         break;
13045                 }
13046         } else if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
13047                 switch (cacheline_size) {
13048                 case 16:
13049                 case 32:
13050                 case 64:
13051                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
13052                                 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
13053                                 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
13054                                 break;
13055                         }
13056                         /* fallthrough */
13057                 case 128:
13058                 default:
13059                         val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
13060                         val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
13061                         break;
13062                 }
13063         } else {
13064                 switch (cacheline_size) {
13065                 case 16:
13066                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
13067                                 val |= (DMA_RWCTRL_READ_BNDRY_16 |
13068                                         DMA_RWCTRL_WRITE_BNDRY_16);
13069                                 break;
13070                         }
13071                         /* fallthrough */
13072                 case 32:
13073                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
13074                                 val |= (DMA_RWCTRL_READ_BNDRY_32 |
13075                                         DMA_RWCTRL_WRITE_BNDRY_32);
13076                                 break;
13077                         }
13078                         /* fallthrough */
13079                 case 64:
13080                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
13081                                 val |= (DMA_RWCTRL_READ_BNDRY_64 |
13082                                         DMA_RWCTRL_WRITE_BNDRY_64);
13083                                 break;
13084                         }
13085                         /* fallthrough */
13086                 case 128:
13087                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
13088                                 val |= (DMA_RWCTRL_READ_BNDRY_128 |
13089                                         DMA_RWCTRL_WRITE_BNDRY_128);
13090                                 break;
13091                         }
13092                         /* fallthrough */
13093                 case 256:
13094                         val |= (DMA_RWCTRL_READ_BNDRY_256 |
13095                                 DMA_RWCTRL_WRITE_BNDRY_256);
13096                         break;
13097                 case 512:
13098                         val |= (DMA_RWCTRL_READ_BNDRY_512 |
13099                                 DMA_RWCTRL_WRITE_BNDRY_512);
13100                         break;
13101                 case 1024:
13102                 default:
13103                         val |= (DMA_RWCTRL_READ_BNDRY_1024 |
13104                                 DMA_RWCTRL_WRITE_BNDRY_1024);
13105                         break;
13106                 }
13107         }
13108
13109 out:
13110         return val;
13111 }
13112
13113 static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma, int size, int to_device)
13114 {
13115         struct tg3_internal_buffer_desc test_desc;
13116         u32 sram_dma_descs;
13117         int i, ret;
13118
13119         sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
13120
13121         tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
13122         tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
13123         tw32(RDMAC_STATUS, 0);
13124         tw32(WDMAC_STATUS, 0);
13125
13126         tw32(BUFMGR_MODE, 0);
13127         tw32(FTQ_RESET, 0);
13128
13129         test_desc.addr_hi = ((u64) buf_dma) >> 32;
13130         test_desc.addr_lo = buf_dma & 0xffffffff;
13131         test_desc.nic_mbuf = 0x00002100;
13132         test_desc.len = size;
13133
13134         /*
13135          * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
13136          * the *second* time the tg3 driver was getting loaded after an
13137          * initial scan.
13138          *
13139          * Broadcom tells me:
13140          *   ...the DMA engine is connected to the GRC block and a DMA
13141          *   reset may affect the GRC block in some unpredictable way...
13142          *   The behavior of resets to individual blocks has not been tested.
13143          *
13144          * Broadcom noted the GRC reset will also reset all sub-components.
13145          */
13146         if (to_device) {
13147                 test_desc.cqid_sqid = (13 << 8) | 2;
13148
13149                 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
13150                 udelay(40);
13151         } else {
13152                 test_desc.cqid_sqid = (16 << 8) | 7;
13153
13154                 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
13155                 udelay(40);
13156         }
13157         test_desc.flags = 0x00000005;
13158
13159         for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
13160                 u32 val;
13161
13162                 val = *(((u32 *)&test_desc) + i);
13163                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
13164                                        sram_dma_descs + (i * sizeof(u32)));
13165                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
13166         }
13167         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
13168
13169         if (to_device) {
13170                 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
13171         } else {
13172                 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
13173         }
13174
13175         ret = -ENODEV;
13176         for (i = 0; i < 40; i++) {
13177                 u32 val;
13178
13179                 if (to_device)
13180                         val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
13181                 else
13182                         val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
13183                 if ((val & 0xffff) == sram_dma_descs) {
13184                         ret = 0;
13185                         break;
13186                 }
13187
13188                 udelay(100);
13189         }
13190
13191         return ret;
13192 }
13193
13194 #define TEST_BUFFER_SIZE        0x2000
13195
13196 static int __devinit tg3_test_dma(struct tg3 *tp)
13197 {
13198         dma_addr_t buf_dma;
13199         u32 *buf, saved_dma_rwctrl;
13200         int ret;
13201
13202         buf = pci_alloc_consistent(tp->pdev, TEST_BUFFER_SIZE, &buf_dma);
13203         if (!buf) {
13204                 ret = -ENOMEM;
13205                 goto out_nofree;
13206         }
13207
13208         tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
13209                           (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
13210
13211         tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
13212
13213         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
13214                 /* DMA read watermark not used on PCIE */
13215                 tp->dma_rwctrl |= 0x00180000;
13216         } else if (!(tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
13217                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
13218                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
13219                         tp->dma_rwctrl |= 0x003f0000;
13220                 else
13221                         tp->dma_rwctrl |= 0x003f000f;
13222         } else {
13223                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
13224                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
13225                         u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
13226                         u32 read_water = 0x7;
13227
13228                         /* If the 5704 is behind the EPB bridge, we can
13229                          * do the less restrictive ONE_DMA workaround for
13230                          * better performance.
13231                          */
13232                         if ((tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG) &&
13233                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
13234                                 tp->dma_rwctrl |= 0x8000;
13235                         else if (ccval == 0x6 || ccval == 0x7)
13236                                 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
13237
13238                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703)
13239                                 read_water = 4;
13240                         /* Set bit 23 to enable PCIX hw bug fix */
13241                         tp->dma_rwctrl |=
13242                                 (read_water << DMA_RWCTRL_READ_WATER_SHIFT) |
13243                                 (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
13244                                 (1 << 23);
13245                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) {
13246                         /* 5780 always in PCIX mode */
13247                         tp->dma_rwctrl |= 0x00144000;
13248                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
13249                         /* 5714 always in PCIX mode */
13250                         tp->dma_rwctrl |= 0x00148000;
13251                 } else {
13252                         tp->dma_rwctrl |= 0x001b000f;
13253                 }
13254         }
13255
13256         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
13257             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
13258                 tp->dma_rwctrl &= 0xfffffff0;
13259
13260         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
13261             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
13262                 /* Remove this if it causes problems for some boards. */
13263                 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
13264
13265                 /* On 5700/5701 chips, we need to set this bit.
13266                  * Otherwise the chip will issue cacheline transactions
13267                  * to streamable DMA memory with not all the byte
13268                  * enables turned on.  This is an error on several
13269                  * RISC PCI controllers, in particular sparc64.
13270                  *
13271                  * On 5703/5704 chips, this bit has been reassigned
13272                  * a different meaning.  In particular, it is used
13273                  * on those chips to enable a PCI-X workaround.
13274                  */
13275                 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
13276         }
13277
13278         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
13279
13280 #if 0
13281         /* Unneeded, already done by tg3_get_invariants.  */
13282         tg3_switch_clocks(tp);
13283 #endif
13284
13285         ret = 0;
13286         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
13287             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
13288                 goto out;
13289
13290         /* It is best to perform DMA test with maximum write burst size
13291          * to expose the 5700/5701 write DMA bug.
13292          */
13293         saved_dma_rwctrl = tp->dma_rwctrl;
13294         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
13295         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
13296
13297         while (1) {
13298                 u32 *p = buf, i;
13299
13300                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
13301                         p[i] = i;
13302
13303                 /* Send the buffer to the chip. */
13304                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1);
13305                 if (ret) {
13306                         printk(KERN_ERR "tg3_test_dma() Write the buffer failed %d\n", ret);
13307                         break;
13308                 }
13309
13310 #if 0
13311                 /* validate data reached card RAM correctly. */
13312                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
13313                         u32 val;
13314                         tg3_read_mem(tp, 0x2100 + (i*4), &val);
13315                         if (le32_to_cpu(val) != p[i]) {
13316                                 printk(KERN_ERR "  tg3_test_dma()  Card buffer corrupted on write! (%d != %d)\n", val, i);
13317                                 /* ret = -ENODEV here? */
13318                         }
13319                         p[i] = 0;
13320                 }
13321 #endif
13322                 /* Now read it back. */
13323                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0);
13324                 if (ret) {
13325                         printk(KERN_ERR "tg3_test_dma() Read the buffer failed %d\n", ret);
13326
13327                         break;
13328                 }
13329
13330                 /* Verify it. */
13331                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
13332                         if (p[i] == i)
13333                                 continue;
13334
13335                         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
13336                             DMA_RWCTRL_WRITE_BNDRY_16) {
13337                                 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
13338                                 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
13339                                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
13340                                 break;
13341                         } else {
13342                                 printk(KERN_ERR "tg3_test_dma() buffer corrupted on read back! (%d != %d)\n", p[i], i);
13343                                 ret = -ENODEV;
13344                                 goto out;
13345                         }
13346                 }
13347
13348                 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
13349                         /* Success. */
13350                         ret = 0;
13351                         break;
13352                 }
13353         }
13354         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
13355             DMA_RWCTRL_WRITE_BNDRY_16) {
13356                 static struct pci_device_id dma_wait_state_chipsets[] = {
13357                         { PCI_DEVICE(PCI_VENDOR_ID_APPLE,
13358                                      PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
13359                         { },
13360                 };
13361
13362                 /* DMA test passed without adjusting DMA boundary,
13363                  * now look for chipsets that are known to expose the
13364                  * DMA bug without failing the test.
13365                  */
13366                 if (pci_dev_present(dma_wait_state_chipsets)) {
13367                         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
13368                         tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
13369                 }
13370                 else
13371                         /* Safe to use the calculated DMA boundary. */
13372                         tp->dma_rwctrl = saved_dma_rwctrl;
13373
13374                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
13375         }
13376
13377 out:
13378         pci_free_consistent(tp->pdev, TEST_BUFFER_SIZE, buf, buf_dma);
13379 out_nofree:
13380         return ret;
13381 }
13382
13383 static void __devinit tg3_init_link_config(struct tg3 *tp)
13384 {
13385         tp->link_config.advertising =
13386                 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
13387                  ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
13388                  ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full |
13389                  ADVERTISED_Autoneg | ADVERTISED_MII);
13390         tp->link_config.speed = SPEED_INVALID;
13391         tp->link_config.duplex = DUPLEX_INVALID;
13392         tp->link_config.autoneg = AUTONEG_ENABLE;
13393         tp->link_config.active_speed = SPEED_INVALID;
13394         tp->link_config.active_duplex = DUPLEX_INVALID;
13395         tp->link_config.phy_is_low_power = 0;
13396         tp->link_config.orig_speed = SPEED_INVALID;
13397         tp->link_config.orig_duplex = DUPLEX_INVALID;
13398         tp->link_config.orig_autoneg = AUTONEG_INVALID;
13399 }
13400
13401 static void __devinit tg3_init_bufmgr_config(struct tg3 *tp)
13402 {
13403         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
13404                 tp->bufmgr_config.mbuf_read_dma_low_water =
13405                         DEFAULT_MB_RDMA_LOW_WATER_5705;
13406                 tp->bufmgr_config.mbuf_mac_rx_low_water =
13407                         DEFAULT_MB_MACRX_LOW_WATER_5705;
13408                 tp->bufmgr_config.mbuf_high_water =
13409                         DEFAULT_MB_HIGH_WATER_5705;
13410                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
13411                         tp->bufmgr_config.mbuf_mac_rx_low_water =
13412                                 DEFAULT_MB_MACRX_LOW_WATER_5906;
13413                         tp->bufmgr_config.mbuf_high_water =
13414                                 DEFAULT_MB_HIGH_WATER_5906;
13415                 }
13416
13417                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
13418                         DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
13419                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
13420                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
13421                 tp->bufmgr_config.mbuf_high_water_jumbo =
13422                         DEFAULT_MB_HIGH_WATER_JUMBO_5780;
13423         } else {
13424                 tp->bufmgr_config.mbuf_read_dma_low_water =
13425                         DEFAULT_MB_RDMA_LOW_WATER;
13426                 tp->bufmgr_config.mbuf_mac_rx_low_water =
13427                         DEFAULT_MB_MACRX_LOW_WATER;
13428                 tp->bufmgr_config.mbuf_high_water =
13429                         DEFAULT_MB_HIGH_WATER;
13430
13431                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
13432                         DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
13433                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
13434                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
13435                 tp->bufmgr_config.mbuf_high_water_jumbo =
13436                         DEFAULT_MB_HIGH_WATER_JUMBO;
13437         }
13438
13439         tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
13440         tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
13441 }
13442
13443 static char * __devinit tg3_phy_string(struct tg3 *tp)
13444 {
13445         switch (tp->phy_id & PHY_ID_MASK) {
13446         case PHY_ID_BCM5400:    return "5400";
13447         case PHY_ID_BCM5401:    return "5401";
13448         case PHY_ID_BCM5411:    return "5411";
13449         case PHY_ID_BCM5701:    return "5701";
13450         case PHY_ID_BCM5703:    return "5703";
13451         case PHY_ID_BCM5704:    return "5704";
13452         case PHY_ID_BCM5705:    return "5705";
13453         case PHY_ID_BCM5750:    return "5750";
13454         case PHY_ID_BCM5752:    return "5752";
13455         case PHY_ID_BCM5714:    return "5714";
13456         case PHY_ID_BCM5780:    return "5780";
13457         case PHY_ID_BCM5755:    return "5755";
13458         case PHY_ID_BCM5787:    return "5787";
13459         case PHY_ID_BCM5784:    return "5784";
13460         case PHY_ID_BCM5756:    return "5722/5756";
13461         case PHY_ID_BCM5906:    return "5906";
13462         case PHY_ID_BCM5761:    return "5761";
13463         case PHY_ID_BCM8002:    return "8002/serdes";
13464         case 0:                 return "serdes";
13465         default:                return "unknown";
13466         }
13467 }
13468
13469 static char * __devinit tg3_bus_string(struct tg3 *tp, char *str)
13470 {
13471         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
13472                 strcpy(str, "PCI Express");
13473                 return str;
13474         } else if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) {
13475                 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
13476
13477                 strcpy(str, "PCIX:");
13478
13479                 if ((clock_ctrl == 7) ||
13480                     ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
13481                      GRC_MISC_CFG_BOARD_ID_5704CIOBE))
13482                         strcat(str, "133MHz");
13483                 else if (clock_ctrl == 0)
13484                         strcat(str, "33MHz");
13485                 else if (clock_ctrl == 2)
13486                         strcat(str, "50MHz");
13487                 else if (clock_ctrl == 4)
13488                         strcat(str, "66MHz");
13489                 else if (clock_ctrl == 6)
13490                         strcat(str, "100MHz");
13491         } else {
13492                 strcpy(str, "PCI:");
13493                 if (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED)
13494                         strcat(str, "66MHz");
13495                 else
13496                         strcat(str, "33MHz");
13497         }
13498         if (tp->tg3_flags & TG3_FLAG_PCI_32BIT)
13499                 strcat(str, ":32-bit");
13500         else
13501                 strcat(str, ":64-bit");
13502         return str;
13503 }
13504
13505 static struct pci_dev * __devinit tg3_find_peer(struct tg3 *tp)
13506 {
13507         struct pci_dev *peer;
13508         unsigned int func, devnr = tp->pdev->devfn & ~7;
13509
13510         for (func = 0; func < 8; func++) {
13511                 peer = pci_get_slot(tp->pdev->bus, devnr | func);
13512                 if (peer && peer != tp->pdev)
13513                         break;
13514                 pci_dev_put(peer);
13515         }
13516         /* 5704 can be configured in single-port mode, set peer to
13517          * tp->pdev in that case.
13518          */
13519         if (!peer) {
13520                 peer = tp->pdev;
13521                 return peer;
13522         }
13523
13524         /*
13525          * We don't need to keep the refcount elevated; there's no way
13526          * to remove one half of this device without removing the other
13527          */
13528         pci_dev_put(peer);
13529
13530         return peer;
13531 }
13532
13533 static void __devinit tg3_init_coal(struct tg3 *tp)
13534 {
13535         struct ethtool_coalesce *ec = &tp->coal;
13536
13537         memset(ec, 0, sizeof(*ec));
13538         ec->cmd = ETHTOOL_GCOALESCE;
13539         ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
13540         ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
13541         ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
13542         ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
13543         ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
13544         ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
13545         ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
13546         ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
13547         ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
13548
13549         if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
13550                                  HOSTCC_MODE_CLRTICK_TXBD)) {
13551                 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
13552                 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
13553                 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
13554                 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
13555         }
13556
13557         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
13558                 ec->rx_coalesce_usecs_irq = 0;
13559                 ec->tx_coalesce_usecs_irq = 0;
13560                 ec->stats_block_coalesce_usecs = 0;
13561         }
13562 }
13563
13564 static const struct net_device_ops tg3_netdev_ops = {
13565         .ndo_open               = tg3_open,
13566         .ndo_stop               = tg3_close,
13567         .ndo_start_xmit         = tg3_start_xmit,
13568         .ndo_get_stats          = tg3_get_stats,
13569         .ndo_validate_addr      = eth_validate_addr,
13570         .ndo_set_multicast_list = tg3_set_rx_mode,
13571         .ndo_set_mac_address    = tg3_set_mac_addr,
13572         .ndo_do_ioctl           = tg3_ioctl,
13573         .ndo_tx_timeout         = tg3_tx_timeout,
13574         .ndo_change_mtu         = tg3_change_mtu,
13575 #if TG3_VLAN_TAG_USED
13576         .ndo_vlan_rx_register   = tg3_vlan_rx_register,
13577 #endif
13578 #ifdef CONFIG_NET_POLL_CONTROLLER
13579         .ndo_poll_controller    = tg3_poll_controller,
13580 #endif
13581 };
13582
13583 static const struct net_device_ops tg3_netdev_ops_dma_bug = {
13584         .ndo_open               = tg3_open,
13585         .ndo_stop               = tg3_close,
13586         .ndo_start_xmit         = tg3_start_xmit_dma_bug,
13587         .ndo_get_stats          = tg3_get_stats,
13588         .ndo_validate_addr      = eth_validate_addr,
13589         .ndo_set_multicast_list = tg3_set_rx_mode,
13590         .ndo_set_mac_address    = tg3_set_mac_addr,
13591         .ndo_do_ioctl           = tg3_ioctl,
13592         .ndo_tx_timeout         = tg3_tx_timeout,
13593         .ndo_change_mtu         = tg3_change_mtu,
13594 #if TG3_VLAN_TAG_USED
13595         .ndo_vlan_rx_register   = tg3_vlan_rx_register,
13596 #endif
13597 #ifdef CONFIG_NET_POLL_CONTROLLER
13598         .ndo_poll_controller    = tg3_poll_controller,
13599 #endif
13600 };
13601
13602 static int __devinit tg3_init_one(struct pci_dev *pdev,
13603                                   const struct pci_device_id *ent)
13604 {
13605         static int tg3_version_printed = 0;
13606         struct net_device *dev;
13607         struct tg3 *tp;
13608         int err, pm_cap;
13609         char str[40];
13610         u64 dma_mask, persist_dma_mask;
13611
13612         if (tg3_version_printed++ == 0)
13613                 printk(KERN_INFO "%s", version);
13614
13615         err = pci_enable_device(pdev);
13616         if (err) {
13617                 printk(KERN_ERR PFX "Cannot enable PCI device, "
13618                        "aborting.\n");
13619                 return err;
13620         }
13621
13622         err = pci_request_regions(pdev, DRV_MODULE_NAME);
13623         if (err) {
13624                 printk(KERN_ERR PFX "Cannot obtain PCI resources, "
13625                        "aborting.\n");
13626                 goto err_out_disable_pdev;
13627         }
13628
13629         pci_set_master(pdev);
13630
13631         /* Find power-management capability. */
13632         pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
13633         if (pm_cap == 0) {
13634                 printk(KERN_ERR PFX "Cannot find PowerManagement capability, "
13635                        "aborting.\n");
13636                 err = -EIO;
13637                 goto err_out_free_res;
13638         }
13639
13640         dev = alloc_etherdev(sizeof(*tp));
13641         if (!dev) {
13642                 printk(KERN_ERR PFX "Etherdev alloc failed, aborting.\n");
13643                 err = -ENOMEM;
13644                 goto err_out_free_res;
13645         }
13646
13647         SET_NETDEV_DEV(dev, &pdev->dev);
13648
13649 #if TG3_VLAN_TAG_USED
13650         dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
13651 #endif
13652
13653         tp = netdev_priv(dev);
13654         tp->pdev = pdev;
13655         tp->dev = dev;
13656         tp->pm_cap = pm_cap;
13657         tp->rx_mode = TG3_DEF_RX_MODE;
13658         tp->tx_mode = TG3_DEF_TX_MODE;
13659
13660         if (tg3_debug > 0)
13661                 tp->msg_enable = tg3_debug;
13662         else
13663                 tp->msg_enable = TG3_DEF_MSG_ENABLE;
13664
13665         /* The word/byte swap controls here control register access byte
13666          * swapping.  DMA data byte swapping is controlled in the GRC_MODE
13667          * setting below.
13668          */
13669         tp->misc_host_ctrl =
13670                 MISC_HOST_CTRL_MASK_PCI_INT |
13671                 MISC_HOST_CTRL_WORD_SWAP |
13672                 MISC_HOST_CTRL_INDIR_ACCESS |
13673                 MISC_HOST_CTRL_PCISTATE_RW;
13674
13675         /* The NONFRM (non-frame) byte/word swap controls take effect
13676          * on descriptor entries, anything which isn't packet data.
13677          *
13678          * The StrongARM chips on the board (one for tx, one for rx)
13679          * are running in big-endian mode.
13680          */
13681         tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
13682                         GRC_MODE_WSWAP_NONFRM_DATA);
13683 #ifdef __BIG_ENDIAN
13684         tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
13685 #endif
13686         spin_lock_init(&tp->lock);
13687         spin_lock_init(&tp->indirect_lock);
13688         INIT_WORK(&tp->reset_task, tg3_reset_task);
13689
13690         tp->regs = pci_ioremap_bar(pdev, BAR_0);
13691         if (!tp->regs) {
13692                 printk(KERN_ERR PFX "Cannot map device registers, "
13693                        "aborting.\n");
13694                 err = -ENOMEM;
13695                 goto err_out_free_dev;
13696         }
13697
13698         tg3_init_link_config(tp);
13699
13700         tp->rx_pending = TG3_DEF_RX_RING_PENDING;
13701         tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
13702         tp->tx_pending = TG3_DEF_TX_RING_PENDING;
13703
13704         netif_napi_add(dev, &tp->napi, tg3_poll, 64);
13705         dev->ethtool_ops = &tg3_ethtool_ops;
13706         dev->watchdog_timeo = TG3_TX_TIMEOUT;
13707         dev->irq = pdev->irq;
13708
13709         err = tg3_get_invariants(tp);
13710         if (err) {
13711                 printk(KERN_ERR PFX "Problem fetching invariants of chip, "
13712                        "aborting.\n");
13713                 goto err_out_iounmap;
13714         }
13715
13716         if ((tp->tg3_flags3 & TG3_FLG3_5755_PLUS) ||
13717             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
13718                 dev->netdev_ops = &tg3_netdev_ops;
13719         else
13720                 dev->netdev_ops = &tg3_netdev_ops_dma_bug;
13721
13722
13723         /* The EPB bridge inside 5714, 5715, and 5780 and any
13724          * device behind the EPB cannot support DMA addresses > 40-bit.
13725          * On 64-bit systems with IOMMU, use 40-bit dma_mask.
13726          * On 64-bit systems without IOMMU, use 64-bit dma_mask and
13727          * do DMA address check in tg3_start_xmit().
13728          */
13729         if (tp->tg3_flags2 & TG3_FLG2_IS_5788)
13730                 persist_dma_mask = dma_mask = DMA_32BIT_MASK;
13731         else if (tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG) {
13732                 persist_dma_mask = dma_mask = DMA_40BIT_MASK;
13733 #ifdef CONFIG_HIGHMEM
13734                 dma_mask = DMA_64BIT_MASK;
13735 #endif
13736         } else
13737                 persist_dma_mask = dma_mask = DMA_64BIT_MASK;
13738
13739         /* Configure DMA attributes. */
13740         if (dma_mask > DMA_32BIT_MASK) {
13741                 err = pci_set_dma_mask(pdev, dma_mask);
13742                 if (!err) {
13743                         dev->features |= NETIF_F_HIGHDMA;
13744                         err = pci_set_consistent_dma_mask(pdev,
13745                                                           persist_dma_mask);
13746                         if (err < 0) {
13747                                 printk(KERN_ERR PFX "Unable to obtain 64 bit "
13748                                        "DMA for consistent allocations\n");
13749                                 goto err_out_iounmap;
13750                         }
13751                 }
13752         }
13753         if (err || dma_mask == DMA_32BIT_MASK) {
13754                 err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
13755                 if (err) {
13756                         printk(KERN_ERR PFX "No usable DMA configuration, "
13757                                "aborting.\n");
13758                         goto err_out_iounmap;
13759                 }
13760         }
13761
13762         tg3_init_bufmgr_config(tp);
13763
13764         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
13765                 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
13766         }
13767         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
13768             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
13769             tp->pci_chip_rev_id == CHIPREV_ID_5705_A0 ||
13770             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 ||
13771             (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0) {
13772                 tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
13773         } else {
13774                 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE | TG3_FLG2_TSO_BUG;
13775         }
13776
13777         /* TSO is on by default on chips that support hardware TSO.
13778          * Firmware TSO on older chips gives lower performance, so it
13779          * is off by default, but can be enabled using ethtool.
13780          */
13781         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
13782                 dev->features |= NETIF_F_TSO;
13783                 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_2) &&
13784                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906))
13785                         dev->features |= NETIF_F_TSO6;
13786                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
13787                     (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
13788                      GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) ||
13789                         GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
13790                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
13791                         dev->features |= NETIF_F_TSO_ECN;
13792         }
13793
13794
13795         if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 &&
13796             !(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) &&
13797             !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
13798                 tp->tg3_flags2 |= TG3_FLG2_MAX_RXPEND_64;
13799                 tp->rx_pending = 63;
13800         }
13801
13802         err = tg3_get_device_address(tp);
13803         if (err) {
13804                 printk(KERN_ERR PFX "Could not obtain valid ethernet address, "
13805                        "aborting.\n");
13806                 goto err_out_iounmap;
13807         }
13808
13809         if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
13810                 tp->aperegs = pci_ioremap_bar(pdev, BAR_2);
13811                 if (!tp->aperegs) {
13812                         printk(KERN_ERR PFX "Cannot map APE registers, "
13813                                "aborting.\n");
13814                         err = -ENOMEM;
13815                         goto err_out_iounmap;
13816                 }
13817
13818                 tg3_ape_lock_init(tp);
13819         }
13820
13821         /*
13822          * Reset chip in case UNDI or EFI driver did not shutdown
13823          * DMA self test will enable WDMAC and we'll see (spurious)
13824          * pending DMA on the PCI bus at that point.
13825          */
13826         if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
13827             (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
13828                 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
13829                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
13830         }
13831
13832         err = tg3_test_dma(tp);
13833         if (err) {
13834                 printk(KERN_ERR PFX "DMA engine test failed, aborting.\n");
13835                 goto err_out_apeunmap;
13836         }
13837
13838         /* Tigon3 can do ipv4 only... and some chips have buggy
13839          * checksumming.
13840          */
13841         if ((tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) == 0) {
13842                 dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
13843                 if (tp->tg3_flags3 & TG3_FLG3_5755_PLUS)
13844                         dev->features |= NETIF_F_IPV6_CSUM;
13845
13846                 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
13847         } else
13848                 tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
13849
13850         /* flow control autonegotiation is default behavior */
13851         tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
13852         tp->link_config.flowctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
13853
13854         tg3_init_coal(tp);
13855
13856         pci_set_drvdata(pdev, dev);
13857
13858         err = register_netdev(dev);
13859         if (err) {
13860                 printk(KERN_ERR PFX "Cannot register net device, "
13861                        "aborting.\n");
13862                 goto err_out_apeunmap;
13863         }
13864
13865         printk(KERN_INFO "%s: Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n",
13866                dev->name,
13867                tp->board_part_number,
13868                tp->pci_chip_rev_id,
13869                tg3_bus_string(tp, str),
13870                dev->dev_addr);
13871
13872         if (tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED)
13873                 printk(KERN_INFO
13874                        "%s: attached PHY driver [%s] (mii_bus:phy_addr=%s)\n",
13875                        tp->dev->name,
13876                        tp->mdio_bus->phy_map[PHY_ADDR]->drv->name,
13877                        dev_name(&tp->mdio_bus->phy_map[PHY_ADDR]->dev));
13878         else
13879                 printk(KERN_INFO
13880                        "%s: attached PHY is %s (%s Ethernet) (WireSpeed[%d])\n",
13881                        tp->dev->name, tg3_phy_string(tp),
13882                        ((tp->tg3_flags & TG3_FLAG_10_100_ONLY) ? "10/100Base-TX" :
13883                         ((tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) ? "1000Base-SX" :
13884                          "10/100/1000Base-T")),
13885                        (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED) == 0);
13886
13887         printk(KERN_INFO "%s: RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n",
13888                dev->name,
13889                (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0,
13890                (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) != 0,
13891                (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) != 0,
13892                (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0,
13893                (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) != 0);
13894         printk(KERN_INFO "%s: dma_rwctrl[%08x] dma_mask[%d-bit]\n",
13895                dev->name, tp->dma_rwctrl,
13896                (pdev->dma_mask == DMA_32BIT_MASK) ? 32 :
13897                 (((u64) pdev->dma_mask == DMA_40BIT_MASK) ? 40 : 64));
13898
13899         return 0;
13900
13901 err_out_apeunmap:
13902         if (tp->aperegs) {
13903                 iounmap(tp->aperegs);
13904                 tp->aperegs = NULL;
13905         }
13906
13907 err_out_iounmap:
13908         if (tp->regs) {
13909                 iounmap(tp->regs);
13910                 tp->regs = NULL;
13911         }
13912
13913 err_out_free_dev:
13914         free_netdev(dev);
13915
13916 err_out_free_res:
13917         pci_release_regions(pdev);
13918
13919 err_out_disable_pdev:
13920         pci_disable_device(pdev);
13921         pci_set_drvdata(pdev, NULL);
13922         return err;
13923 }
13924
13925 static void __devexit tg3_remove_one(struct pci_dev *pdev)
13926 {
13927         struct net_device *dev = pci_get_drvdata(pdev);
13928
13929         if (dev) {
13930                 struct tg3 *tp = netdev_priv(dev);
13931
13932                 flush_scheduled_work();
13933
13934                 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
13935                         tg3_phy_fini(tp);
13936                         tg3_mdio_fini(tp);
13937                 }
13938
13939                 unregister_netdev(dev);
13940                 if (tp->aperegs) {
13941                         iounmap(tp->aperegs);
13942                         tp->aperegs = NULL;
13943                 }
13944                 if (tp->regs) {
13945                         iounmap(tp->regs);
13946                         tp->regs = NULL;
13947                 }
13948                 free_netdev(dev);
13949                 pci_release_regions(pdev);
13950                 pci_disable_device(pdev);
13951                 pci_set_drvdata(pdev, NULL);
13952         }
13953 }
13954
13955 static int tg3_suspend(struct pci_dev *pdev, pm_message_t state)
13956 {
13957         struct net_device *dev = pci_get_drvdata(pdev);
13958         struct tg3 *tp = netdev_priv(dev);
13959         pci_power_t target_state;
13960         int err;
13961
13962         /* PCI register 4 needs to be saved whether netif_running() or not.
13963          * MSI address and data need to be saved if using MSI and
13964          * netif_running().
13965          */
13966         pci_save_state(pdev);
13967
13968         if (!netif_running(dev))
13969                 return 0;
13970
13971         flush_scheduled_work();
13972         tg3_phy_stop(tp);
13973         tg3_netif_stop(tp);
13974
13975         del_timer_sync(&tp->timer);
13976
13977         tg3_full_lock(tp, 1);
13978         tg3_disable_ints(tp);
13979         tg3_full_unlock(tp);
13980
13981         netif_device_detach(dev);
13982
13983         tg3_full_lock(tp, 0);
13984         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
13985         tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
13986         tg3_full_unlock(tp);
13987
13988         target_state = pdev->pm_cap ? pci_target_state(pdev) : PCI_D3hot;
13989
13990         err = tg3_set_power_state(tp, target_state);
13991         if (err) {
13992                 int err2;
13993
13994                 tg3_full_lock(tp, 0);
13995
13996                 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
13997                 err2 = tg3_restart_hw(tp, 1);
13998                 if (err2)
13999                         goto out;
14000
14001                 tp->timer.expires = jiffies + tp->timer_offset;
14002                 add_timer(&tp->timer);
14003
14004                 netif_device_attach(dev);
14005                 tg3_netif_start(tp);
14006
14007 out:
14008                 tg3_full_unlock(tp);
14009
14010                 if (!err2)
14011                         tg3_phy_start(tp);
14012         }
14013
14014         return err;
14015 }
14016
14017 static int tg3_resume(struct pci_dev *pdev)
14018 {
14019         struct net_device *dev = pci_get_drvdata(pdev);
14020         struct tg3 *tp = netdev_priv(dev);
14021         int err;
14022
14023         pci_restore_state(tp->pdev);
14024
14025         if (!netif_running(dev))
14026                 return 0;
14027
14028         err = tg3_set_power_state(tp, PCI_D0);
14029         if (err)
14030                 return err;
14031
14032         netif_device_attach(dev);
14033
14034         tg3_full_lock(tp, 0);
14035
14036         tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
14037         err = tg3_restart_hw(tp, 1);
14038         if (err)
14039                 goto out;
14040
14041         tp->timer.expires = jiffies + tp->timer_offset;
14042         add_timer(&tp->timer);
14043
14044         tg3_netif_start(tp);
14045
14046 out:
14047         tg3_full_unlock(tp);
14048
14049         if (!err)
14050                 tg3_phy_start(tp);
14051
14052         return err;
14053 }
14054
14055 static struct pci_driver tg3_driver = {
14056         .name           = DRV_MODULE_NAME,
14057         .id_table       = tg3_pci_tbl,
14058         .probe          = tg3_init_one,
14059         .remove         = __devexit_p(tg3_remove_one),
14060         .suspend        = tg3_suspend,
14061         .resume         = tg3_resume
14062 };
14063
14064 static int __init tg3_init(void)
14065 {
14066         return pci_register_driver(&tg3_driver);
14067 }
14068
14069 static void __exit tg3_cleanup(void)
14070 {
14071         pci_unregister_driver(&tg3_driver);
14072 }
14073
14074 module_init(tg3_init);
14075 module_exit(tg3_cleanup);