net/ieee80211 -> drivers/net/ipw2x00/libipw_* rename
[linux-2.6] / drivers / net / tg3.c
1 /*
2  * tg3.c: Broadcom Tigon3 ethernet driver.
3  *
4  * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5  * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6  * Copyright (C) 2004 Sun Microsystems Inc.
7  * Copyright (C) 2005-2007 Broadcom Corporation.
8  *
9  * Firmware is:
10  *      Derived from proprietary unpublished source code,
11  *      Copyright (C) 2000-2003 Broadcom Corporation.
12  *
13  *      Permission is hereby granted for the distribution of this firmware
14  *      data in hexadecimal or equivalent format, provided this copyright
15  *      notice is accompanying it.
16  */
17
18
19 #include <linux/module.h>
20 #include <linux/moduleparam.h>
21 #include <linux/kernel.h>
22 #include <linux/types.h>
23 #include <linux/compiler.h>
24 #include <linux/slab.h>
25 #include <linux/delay.h>
26 #include <linux/in.h>
27 #include <linux/init.h>
28 #include <linux/ioport.h>
29 #include <linux/pci.h>
30 #include <linux/netdevice.h>
31 #include <linux/etherdevice.h>
32 #include <linux/skbuff.h>
33 #include <linux/ethtool.h>
34 #include <linux/mii.h>
35 #include <linux/phy.h>
36 #include <linux/brcmphy.h>
37 #include <linux/if_vlan.h>
38 #include <linux/ip.h>
39 #include <linux/tcp.h>
40 #include <linux/workqueue.h>
41 #include <linux/prefetch.h>
42 #include <linux/dma-mapping.h>
43
44 #include <net/checksum.h>
45 #include <net/ip.h>
46
47 #include <asm/system.h>
48 #include <asm/io.h>
49 #include <asm/byteorder.h>
50 #include <asm/uaccess.h>
51
52 #ifdef CONFIG_SPARC
53 #include <asm/idprom.h>
54 #include <asm/prom.h>
55 #endif
56
57 #define BAR_0   0
58 #define BAR_2   2
59
60 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
61 #define TG3_VLAN_TAG_USED 1
62 #else
63 #define TG3_VLAN_TAG_USED 0
64 #endif
65
66 #define TG3_TSO_SUPPORT 1
67
68 #include "tg3.h"
69
70 #define DRV_MODULE_NAME         "tg3"
71 #define PFX DRV_MODULE_NAME     ": "
72 #define DRV_MODULE_VERSION      "3.95"
73 #define DRV_MODULE_RELDATE      "November 3, 2008"
74
75 #define TG3_DEF_MAC_MODE        0
76 #define TG3_DEF_RX_MODE         0
77 #define TG3_DEF_TX_MODE         0
78 #define TG3_DEF_MSG_ENABLE        \
79         (NETIF_MSG_DRV          | \
80          NETIF_MSG_PROBE        | \
81          NETIF_MSG_LINK         | \
82          NETIF_MSG_TIMER        | \
83          NETIF_MSG_IFDOWN       | \
84          NETIF_MSG_IFUP         | \
85          NETIF_MSG_RX_ERR       | \
86          NETIF_MSG_TX_ERR)
87
88 /* length of time before we decide the hardware is borked,
89  * and dev->tx_timeout() should be called to fix the problem
90  */
91 #define TG3_TX_TIMEOUT                  (5 * HZ)
92
93 /* hardware minimum and maximum for a single frame's data payload */
94 #define TG3_MIN_MTU                     60
95 #define TG3_MAX_MTU(tp) \
96         ((tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) ? 9000 : 1500)
97
98 /* These numbers seem to be hard coded in the NIC firmware somehow.
99  * You can't change the ring sizes, but you can change where you place
100  * them in the NIC onboard memory.
101  */
102 #define TG3_RX_RING_SIZE                512
103 #define TG3_DEF_RX_RING_PENDING         200
104 #define TG3_RX_JUMBO_RING_SIZE          256
105 #define TG3_DEF_RX_JUMBO_RING_PENDING   100
106
107 /* Do not place this n-ring entries value into the tp struct itself,
108  * we really want to expose these constants to GCC so that modulo et
109  * al.  operations are done with shifts and masks instead of with
110  * hw multiply/modulo instructions.  Another solution would be to
111  * replace things like '% foo' with '& (foo - 1)'.
112  */
113 #define TG3_RX_RCB_RING_SIZE(tp)        \
114         ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ?  512 : 1024)
115
116 #define TG3_TX_RING_SIZE                512
117 #define TG3_DEF_TX_RING_PENDING         (TG3_TX_RING_SIZE - 1)
118
119 #define TG3_RX_RING_BYTES       (sizeof(struct tg3_rx_buffer_desc) * \
120                                  TG3_RX_RING_SIZE)
121 #define TG3_RX_JUMBO_RING_BYTES (sizeof(struct tg3_rx_buffer_desc) * \
122                                  TG3_RX_JUMBO_RING_SIZE)
123 #define TG3_RX_RCB_RING_BYTES(tp) (sizeof(struct tg3_rx_buffer_desc) * \
124                                    TG3_RX_RCB_RING_SIZE(tp))
125 #define TG3_TX_RING_BYTES       (sizeof(struct tg3_tx_buffer_desc) * \
126                                  TG3_TX_RING_SIZE)
127 #define NEXT_TX(N)              (((N) + 1) & (TG3_TX_RING_SIZE - 1))
128
129 #define RX_PKT_BUF_SZ           (1536 + tp->rx_offset + 64)
130 #define RX_JUMBO_PKT_BUF_SZ     (9046 + tp->rx_offset + 64)
131
132 /* minimum number of free TX descriptors required to wake up TX process */
133 #define TG3_TX_WAKEUP_THRESH(tp)                ((tp)->tx_pending / 4)
134
135 /* number of ETHTOOL_GSTATS u64's */
136 #define TG3_NUM_STATS           (sizeof(struct tg3_ethtool_stats)/sizeof(u64))
137
138 #define TG3_NUM_TEST            6
139
140 static char version[] __devinitdata =
141         DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
142
143 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
144 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
145 MODULE_LICENSE("GPL");
146 MODULE_VERSION(DRV_MODULE_VERSION);
147
148 static int tg3_debug = -1;      /* -1 == use TG3_DEF_MSG_ENABLE as value */
149 module_param(tg3_debug, int, 0);
150 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
151
152 static struct pci_device_id tg3_pci_tbl[] = {
153         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
154         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
155         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
156         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
157         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
158         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
159         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
160         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
161         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
162         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
163         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
164         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
165         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
166         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
167         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
168         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
169         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
170         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
171         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901)},
172         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2)},
173         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
174         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F)},
175         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5720)},
176         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
177         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
178         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750)},
179         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
180         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750M)},
181         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
182         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F)},
183         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
184         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
185         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
186         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
187         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F)},
188         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
189         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
190         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
191         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
192         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
193         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
194         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
195         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
196         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F)},
197         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
198         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
199         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
200         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
201         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
202         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
203         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
204         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
205         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
206         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
207         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
208         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
209         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
210         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
211         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761S)},
212         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761SE)},
213         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5785)},
214         {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
215         {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
216         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
217         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
218         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
219         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
220         {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
221         {}
222 };
223
224 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
225
226 static const struct {
227         const char string[ETH_GSTRING_LEN];
228 } ethtool_stats_keys[TG3_NUM_STATS] = {
229         { "rx_octets" },
230         { "rx_fragments" },
231         { "rx_ucast_packets" },
232         { "rx_mcast_packets" },
233         { "rx_bcast_packets" },
234         { "rx_fcs_errors" },
235         { "rx_align_errors" },
236         { "rx_xon_pause_rcvd" },
237         { "rx_xoff_pause_rcvd" },
238         { "rx_mac_ctrl_rcvd" },
239         { "rx_xoff_entered" },
240         { "rx_frame_too_long_errors" },
241         { "rx_jabbers" },
242         { "rx_undersize_packets" },
243         { "rx_in_length_errors" },
244         { "rx_out_length_errors" },
245         { "rx_64_or_less_octet_packets" },
246         { "rx_65_to_127_octet_packets" },
247         { "rx_128_to_255_octet_packets" },
248         { "rx_256_to_511_octet_packets" },
249         { "rx_512_to_1023_octet_packets" },
250         { "rx_1024_to_1522_octet_packets" },
251         { "rx_1523_to_2047_octet_packets" },
252         { "rx_2048_to_4095_octet_packets" },
253         { "rx_4096_to_8191_octet_packets" },
254         { "rx_8192_to_9022_octet_packets" },
255
256         { "tx_octets" },
257         { "tx_collisions" },
258
259         { "tx_xon_sent" },
260         { "tx_xoff_sent" },
261         { "tx_flow_control" },
262         { "tx_mac_errors" },
263         { "tx_single_collisions" },
264         { "tx_mult_collisions" },
265         { "tx_deferred" },
266         { "tx_excessive_collisions" },
267         { "tx_late_collisions" },
268         { "tx_collide_2times" },
269         { "tx_collide_3times" },
270         { "tx_collide_4times" },
271         { "tx_collide_5times" },
272         { "tx_collide_6times" },
273         { "tx_collide_7times" },
274         { "tx_collide_8times" },
275         { "tx_collide_9times" },
276         { "tx_collide_10times" },
277         { "tx_collide_11times" },
278         { "tx_collide_12times" },
279         { "tx_collide_13times" },
280         { "tx_collide_14times" },
281         { "tx_collide_15times" },
282         { "tx_ucast_packets" },
283         { "tx_mcast_packets" },
284         { "tx_bcast_packets" },
285         { "tx_carrier_sense_errors" },
286         { "tx_discards" },
287         { "tx_errors" },
288
289         { "dma_writeq_full" },
290         { "dma_write_prioq_full" },
291         { "rxbds_empty" },
292         { "rx_discards" },
293         { "rx_errors" },
294         { "rx_threshold_hit" },
295
296         { "dma_readq_full" },
297         { "dma_read_prioq_full" },
298         { "tx_comp_queue_full" },
299
300         { "ring_set_send_prod_index" },
301         { "ring_status_update" },
302         { "nic_irqs" },
303         { "nic_avoided_irqs" },
304         { "nic_tx_threshold_hit" }
305 };
306
307 static const struct {
308         const char string[ETH_GSTRING_LEN];
309 } ethtool_test_keys[TG3_NUM_TEST] = {
310         { "nvram test     (online) " },
311         { "link test      (online) " },
312         { "register test  (offline)" },
313         { "memory test    (offline)" },
314         { "loopback test  (offline)" },
315         { "interrupt test (offline)" },
316 };
317
318 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
319 {
320         writel(val, tp->regs + off);
321 }
322
323 static u32 tg3_read32(struct tg3 *tp, u32 off)
324 {
325         return (readl(tp->regs + off));
326 }
327
328 static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
329 {
330         writel(val, tp->aperegs + off);
331 }
332
333 static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
334 {
335         return (readl(tp->aperegs + off));
336 }
337
338 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
339 {
340         unsigned long flags;
341
342         spin_lock_irqsave(&tp->indirect_lock, flags);
343         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
344         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
345         spin_unlock_irqrestore(&tp->indirect_lock, flags);
346 }
347
348 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
349 {
350         writel(val, tp->regs + off);
351         readl(tp->regs + off);
352 }
353
354 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
355 {
356         unsigned long flags;
357         u32 val;
358
359         spin_lock_irqsave(&tp->indirect_lock, flags);
360         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
361         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
362         spin_unlock_irqrestore(&tp->indirect_lock, flags);
363         return val;
364 }
365
366 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
367 {
368         unsigned long flags;
369
370         if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
371                 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
372                                        TG3_64BIT_REG_LOW, val);
373                 return;
374         }
375         if (off == (MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW)) {
376                 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
377                                        TG3_64BIT_REG_LOW, val);
378                 return;
379         }
380
381         spin_lock_irqsave(&tp->indirect_lock, flags);
382         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
383         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
384         spin_unlock_irqrestore(&tp->indirect_lock, flags);
385
386         /* In indirect mode when disabling interrupts, we also need
387          * to clear the interrupt bit in the GRC local ctrl register.
388          */
389         if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
390             (val == 0x1)) {
391                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
392                                        tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
393         }
394 }
395
396 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
397 {
398         unsigned long flags;
399         u32 val;
400
401         spin_lock_irqsave(&tp->indirect_lock, flags);
402         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
403         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
404         spin_unlock_irqrestore(&tp->indirect_lock, flags);
405         return val;
406 }
407
408 /* usec_wait specifies the wait time in usec when writing to certain registers
409  * where it is unsafe to read back the register without some delay.
410  * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
411  * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
412  */
413 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
414 {
415         if ((tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) ||
416             (tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND))
417                 /* Non-posted methods */
418                 tp->write32(tp, off, val);
419         else {
420                 /* Posted method */
421                 tg3_write32(tp, off, val);
422                 if (usec_wait)
423                         udelay(usec_wait);
424                 tp->read32(tp, off);
425         }
426         /* Wait again after the read for the posted method to guarantee that
427          * the wait time is met.
428          */
429         if (usec_wait)
430                 udelay(usec_wait);
431 }
432
433 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
434 {
435         tp->write32_mbox(tp, off, val);
436         if (!(tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) &&
437             !(tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND))
438                 tp->read32_mbox(tp, off);
439 }
440
441 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
442 {
443         void __iomem *mbox = tp->regs + off;
444         writel(val, mbox);
445         if (tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG)
446                 writel(val, mbox);
447         if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
448                 readl(mbox);
449 }
450
451 static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
452 {
453         return (readl(tp->regs + off + GRCMBOX_BASE));
454 }
455
456 static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
457 {
458         writel(val, tp->regs + off + GRCMBOX_BASE);
459 }
460
461 #define tw32_mailbox(reg, val)  tp->write32_mbox(tp, reg, val)
462 #define tw32_mailbox_f(reg, val)        tw32_mailbox_flush(tp, (reg), (val))
463 #define tw32_rx_mbox(reg, val)  tp->write32_rx_mbox(tp, reg, val)
464 #define tw32_tx_mbox(reg, val)  tp->write32_tx_mbox(tp, reg, val)
465 #define tr32_mailbox(reg)       tp->read32_mbox(tp, reg)
466
467 #define tw32(reg,val)           tp->write32(tp, reg, val)
468 #define tw32_f(reg,val)         _tw32_flush(tp,(reg),(val), 0)
469 #define tw32_wait_f(reg,val,us) _tw32_flush(tp,(reg),(val), (us))
470 #define tr32(reg)               tp->read32(tp, reg)
471
472 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
473 {
474         unsigned long flags;
475
476         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) &&
477             (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
478                 return;
479
480         spin_lock_irqsave(&tp->indirect_lock, flags);
481         if (tp->tg3_flags & TG3_FLAG_SRAM_USE_CONFIG) {
482                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
483                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
484
485                 /* Always leave this as zero. */
486                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
487         } else {
488                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
489                 tw32_f(TG3PCI_MEM_WIN_DATA, val);
490
491                 /* Always leave this as zero. */
492                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
493         }
494         spin_unlock_irqrestore(&tp->indirect_lock, flags);
495 }
496
497 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
498 {
499         unsigned long flags;
500
501         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) &&
502             (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
503                 *val = 0;
504                 return;
505         }
506
507         spin_lock_irqsave(&tp->indirect_lock, flags);
508         if (tp->tg3_flags & TG3_FLAG_SRAM_USE_CONFIG) {
509                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
510                 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
511
512                 /* Always leave this as zero. */
513                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
514         } else {
515                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
516                 *val = tr32(TG3PCI_MEM_WIN_DATA);
517
518                 /* Always leave this as zero. */
519                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
520         }
521         spin_unlock_irqrestore(&tp->indirect_lock, flags);
522 }
523
524 static void tg3_ape_lock_init(struct tg3 *tp)
525 {
526         int i;
527
528         /* Make sure the driver hasn't any stale locks. */
529         for (i = 0; i < 8; i++)
530                 tg3_ape_write32(tp, TG3_APE_LOCK_GRANT + 4 * i,
531                                 APE_LOCK_GRANT_DRIVER);
532 }
533
534 static int tg3_ape_lock(struct tg3 *tp, int locknum)
535 {
536         int i, off;
537         int ret = 0;
538         u32 status;
539
540         if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
541                 return 0;
542
543         switch (locknum) {
544                 case TG3_APE_LOCK_GRC:
545                 case TG3_APE_LOCK_MEM:
546                         break;
547                 default:
548                         return -EINVAL;
549         }
550
551         off = 4 * locknum;
552
553         tg3_ape_write32(tp, TG3_APE_LOCK_REQ + off, APE_LOCK_REQ_DRIVER);
554
555         /* Wait for up to 1 millisecond to acquire lock. */
556         for (i = 0; i < 100; i++) {
557                 status = tg3_ape_read32(tp, TG3_APE_LOCK_GRANT + off);
558                 if (status == APE_LOCK_GRANT_DRIVER)
559                         break;
560                 udelay(10);
561         }
562
563         if (status != APE_LOCK_GRANT_DRIVER) {
564                 /* Revoke the lock request. */
565                 tg3_ape_write32(tp, TG3_APE_LOCK_GRANT + off,
566                                 APE_LOCK_GRANT_DRIVER);
567
568                 ret = -EBUSY;
569         }
570
571         return ret;
572 }
573
574 static void tg3_ape_unlock(struct tg3 *tp, int locknum)
575 {
576         int off;
577
578         if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
579                 return;
580
581         switch (locknum) {
582                 case TG3_APE_LOCK_GRC:
583                 case TG3_APE_LOCK_MEM:
584                         break;
585                 default:
586                         return;
587         }
588
589         off = 4 * locknum;
590         tg3_ape_write32(tp, TG3_APE_LOCK_GRANT + off, APE_LOCK_GRANT_DRIVER);
591 }
592
593 static void tg3_disable_ints(struct tg3 *tp)
594 {
595         tw32(TG3PCI_MISC_HOST_CTRL,
596              (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
597         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
598 }
599
600 static inline void tg3_cond_int(struct tg3 *tp)
601 {
602         if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
603             (tp->hw_status->status & SD_STATUS_UPDATED))
604                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
605         else
606                 tw32(HOSTCC_MODE, tp->coalesce_mode |
607                      (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
608 }
609
610 static void tg3_enable_ints(struct tg3 *tp)
611 {
612         tp->irq_sync = 0;
613         wmb();
614
615         tw32(TG3PCI_MISC_HOST_CTRL,
616              (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
617         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
618                        (tp->last_tag << 24));
619         if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI)
620                 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
621                                (tp->last_tag << 24));
622         tg3_cond_int(tp);
623 }
624
625 static inline unsigned int tg3_has_work(struct tg3 *tp)
626 {
627         struct tg3_hw_status *sblk = tp->hw_status;
628         unsigned int work_exists = 0;
629
630         /* check for phy events */
631         if (!(tp->tg3_flags &
632               (TG3_FLAG_USE_LINKCHG_REG |
633                TG3_FLAG_POLL_SERDES))) {
634                 if (sblk->status & SD_STATUS_LINK_CHG)
635                         work_exists = 1;
636         }
637         /* check for RX/TX work to do */
638         if (sblk->idx[0].tx_consumer != tp->tx_cons ||
639             sblk->idx[0].rx_producer != tp->rx_rcb_ptr)
640                 work_exists = 1;
641
642         return work_exists;
643 }
644
645 /* tg3_restart_ints
646  *  similar to tg3_enable_ints, but it accurately determines whether there
647  *  is new work pending and can return without flushing the PIO write
648  *  which reenables interrupts
649  */
650 static void tg3_restart_ints(struct tg3 *tp)
651 {
652         tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
653                      tp->last_tag << 24);
654         mmiowb();
655
656         /* When doing tagged status, this work check is unnecessary.
657          * The last_tag we write above tells the chip which piece of
658          * work we've completed.
659          */
660         if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
661             tg3_has_work(tp))
662                 tw32(HOSTCC_MODE, tp->coalesce_mode |
663                      (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
664 }
665
666 static inline void tg3_netif_stop(struct tg3 *tp)
667 {
668         tp->dev->trans_start = jiffies; /* prevent tx timeout */
669         napi_disable(&tp->napi);
670         netif_tx_disable(tp->dev);
671 }
672
673 static inline void tg3_netif_start(struct tg3 *tp)
674 {
675         netif_wake_queue(tp->dev);
676         /* NOTE: unconditional netif_wake_queue is only appropriate
677          * so long as all callers are assured to have free tx slots
678          * (such as after tg3_init_hw)
679          */
680         napi_enable(&tp->napi);
681         tp->hw_status->status |= SD_STATUS_UPDATED;
682         tg3_enable_ints(tp);
683 }
684
685 static void tg3_switch_clocks(struct tg3 *tp)
686 {
687         u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
688         u32 orig_clock_ctrl;
689
690         if ((tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) ||
691             (tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
692                 return;
693
694         orig_clock_ctrl = clock_ctrl;
695         clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
696                        CLOCK_CTRL_CLKRUN_OENABLE |
697                        0x1f);
698         tp->pci_clock_ctrl = clock_ctrl;
699
700         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
701                 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
702                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
703                                     clock_ctrl | CLOCK_CTRL_625_CORE, 40);
704                 }
705         } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
706                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
707                             clock_ctrl |
708                             (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
709                             40);
710                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
711                             clock_ctrl | (CLOCK_CTRL_ALTCLK),
712                             40);
713         }
714         tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
715 }
716
717 #define PHY_BUSY_LOOPS  5000
718
719 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
720 {
721         u32 frame_val;
722         unsigned int loops;
723         int ret;
724
725         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
726                 tw32_f(MAC_MI_MODE,
727                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
728                 udelay(80);
729         }
730
731         *val = 0x0;
732
733         frame_val  = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
734                       MI_COM_PHY_ADDR_MASK);
735         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
736                       MI_COM_REG_ADDR_MASK);
737         frame_val |= (MI_COM_CMD_READ | MI_COM_START);
738
739         tw32_f(MAC_MI_COM, frame_val);
740
741         loops = PHY_BUSY_LOOPS;
742         while (loops != 0) {
743                 udelay(10);
744                 frame_val = tr32(MAC_MI_COM);
745
746                 if ((frame_val & MI_COM_BUSY) == 0) {
747                         udelay(5);
748                         frame_val = tr32(MAC_MI_COM);
749                         break;
750                 }
751                 loops -= 1;
752         }
753
754         ret = -EBUSY;
755         if (loops != 0) {
756                 *val = frame_val & MI_COM_DATA_MASK;
757                 ret = 0;
758         }
759
760         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
761                 tw32_f(MAC_MI_MODE, tp->mi_mode);
762                 udelay(80);
763         }
764
765         return ret;
766 }
767
768 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
769 {
770         u32 frame_val;
771         unsigned int loops;
772         int ret;
773
774         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 &&
775             (reg == MII_TG3_CTRL || reg == MII_TG3_AUX_CTRL))
776                 return 0;
777
778         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
779                 tw32_f(MAC_MI_MODE,
780                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
781                 udelay(80);
782         }
783
784         frame_val  = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
785                       MI_COM_PHY_ADDR_MASK);
786         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
787                       MI_COM_REG_ADDR_MASK);
788         frame_val |= (val & MI_COM_DATA_MASK);
789         frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
790
791         tw32_f(MAC_MI_COM, frame_val);
792
793         loops = PHY_BUSY_LOOPS;
794         while (loops != 0) {
795                 udelay(10);
796                 frame_val = tr32(MAC_MI_COM);
797                 if ((frame_val & MI_COM_BUSY) == 0) {
798                         udelay(5);
799                         frame_val = tr32(MAC_MI_COM);
800                         break;
801                 }
802                 loops -= 1;
803         }
804
805         ret = -EBUSY;
806         if (loops != 0)
807                 ret = 0;
808
809         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
810                 tw32_f(MAC_MI_MODE, tp->mi_mode);
811                 udelay(80);
812         }
813
814         return ret;
815 }
816
817 static int tg3_bmcr_reset(struct tg3 *tp)
818 {
819         u32 phy_control;
820         int limit, err;
821
822         /* OK, reset it, and poll the BMCR_RESET bit until it
823          * clears or we time out.
824          */
825         phy_control = BMCR_RESET;
826         err = tg3_writephy(tp, MII_BMCR, phy_control);
827         if (err != 0)
828                 return -EBUSY;
829
830         limit = 5000;
831         while (limit--) {
832                 err = tg3_readphy(tp, MII_BMCR, &phy_control);
833                 if (err != 0)
834                         return -EBUSY;
835
836                 if ((phy_control & BMCR_RESET) == 0) {
837                         udelay(40);
838                         break;
839                 }
840                 udelay(10);
841         }
842         if (limit <= 0)
843                 return -EBUSY;
844
845         return 0;
846 }
847
848 static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)
849 {
850         struct tg3 *tp = (struct tg3 *)bp->priv;
851         u32 val;
852
853         if (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_PAUSED)
854                 return -EAGAIN;
855
856         if (tg3_readphy(tp, reg, &val))
857                 return -EIO;
858
859         return val;
860 }
861
862 static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val)
863 {
864         struct tg3 *tp = (struct tg3 *)bp->priv;
865
866         if (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_PAUSED)
867                 return -EAGAIN;
868
869         if (tg3_writephy(tp, reg, val))
870                 return -EIO;
871
872         return 0;
873 }
874
875 static int tg3_mdio_reset(struct mii_bus *bp)
876 {
877         return 0;
878 }
879
880 static void tg3_mdio_config_5785(struct tg3 *tp)
881 {
882         u32 val;
883         struct phy_device *phydev;
884
885         phydev = tp->mdio_bus->phy_map[PHY_ADDR];
886         switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
887         case TG3_PHY_ID_BCM50610:
888                 val = MAC_PHYCFG2_50610_LED_MODES;
889                 break;
890         case TG3_PHY_ID_BCMAC131:
891                 val = MAC_PHYCFG2_AC131_LED_MODES;
892                 break;
893         case TG3_PHY_ID_RTL8211C:
894                 val = MAC_PHYCFG2_RTL8211C_LED_MODES;
895                 break;
896         case TG3_PHY_ID_RTL8201E:
897                 val = MAC_PHYCFG2_RTL8201E_LED_MODES;
898                 break;
899         default:
900                 return;
901         }
902
903         if (phydev->interface != PHY_INTERFACE_MODE_RGMII) {
904                 tw32(MAC_PHYCFG2, val);
905
906                 val = tr32(MAC_PHYCFG1);
907                 val &= ~MAC_PHYCFG1_RGMII_INT;
908                 tw32(MAC_PHYCFG1, val);
909
910                 return;
911         }
912
913         if (!(tp->tg3_flags3 & TG3_FLG3_RGMII_STD_IBND_DISABLE))
914                 val |= MAC_PHYCFG2_EMODE_MASK_MASK |
915                        MAC_PHYCFG2_FMODE_MASK_MASK |
916                        MAC_PHYCFG2_GMODE_MASK_MASK |
917                        MAC_PHYCFG2_ACT_MASK_MASK   |
918                        MAC_PHYCFG2_QUAL_MASK_MASK |
919                        MAC_PHYCFG2_INBAND_ENABLE;
920
921         tw32(MAC_PHYCFG2, val);
922
923         val = tr32(MAC_PHYCFG1) & ~(MAC_PHYCFG1_RGMII_EXT_RX_DEC |
924                                     MAC_PHYCFG1_RGMII_SND_STAT_EN);
925         if (tp->tg3_flags3 & TG3_FLG3_RGMII_STD_IBND_DISABLE) {
926                 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_RX_EN)
927                         val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC;
928                 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_TX_EN)
929                         val |= MAC_PHYCFG1_RGMII_SND_STAT_EN;
930         }
931         tw32(MAC_PHYCFG1, val | MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV);
932
933         val = tr32(MAC_EXT_RGMII_MODE);
934         val &= ~(MAC_RGMII_MODE_RX_INT_B |
935                  MAC_RGMII_MODE_RX_QUALITY |
936                  MAC_RGMII_MODE_RX_ACTIVITY |
937                  MAC_RGMII_MODE_RX_ENG_DET |
938                  MAC_RGMII_MODE_TX_ENABLE |
939                  MAC_RGMII_MODE_TX_LOWPWR |
940                  MAC_RGMII_MODE_TX_RESET);
941         if (!(tp->tg3_flags3 & TG3_FLG3_RGMII_STD_IBND_DISABLE)) {
942                 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_RX_EN)
943                         val |= MAC_RGMII_MODE_RX_INT_B |
944                                MAC_RGMII_MODE_RX_QUALITY |
945                                MAC_RGMII_MODE_RX_ACTIVITY |
946                                MAC_RGMII_MODE_RX_ENG_DET;
947                 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_TX_EN)
948                         val |= MAC_RGMII_MODE_TX_ENABLE |
949                                MAC_RGMII_MODE_TX_LOWPWR |
950                                MAC_RGMII_MODE_TX_RESET;
951         }
952         tw32(MAC_EXT_RGMII_MODE, val);
953 }
954
955 static void tg3_mdio_start(struct tg3 *tp)
956 {
957         if (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED) {
958                 mutex_lock(&tp->mdio_bus->mdio_lock);
959                 tp->tg3_flags3 &= ~TG3_FLG3_MDIOBUS_PAUSED;
960                 mutex_unlock(&tp->mdio_bus->mdio_lock);
961         }
962
963         tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL;
964         tw32_f(MAC_MI_MODE, tp->mi_mode);
965         udelay(80);
966
967         if ((tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED) &&
968             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
969                 tg3_mdio_config_5785(tp);
970 }
971
972 static void tg3_mdio_stop(struct tg3 *tp)
973 {
974         if (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED) {
975                 mutex_lock(&tp->mdio_bus->mdio_lock);
976                 tp->tg3_flags3 |= TG3_FLG3_MDIOBUS_PAUSED;
977                 mutex_unlock(&tp->mdio_bus->mdio_lock);
978         }
979 }
980
981 static int tg3_mdio_init(struct tg3 *tp)
982 {
983         int i;
984         u32 reg;
985         struct phy_device *phydev;
986
987         tg3_mdio_start(tp);
988
989         if (!(tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) ||
990             (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED))
991                 return 0;
992
993         tp->mdio_bus = mdiobus_alloc();
994         if (tp->mdio_bus == NULL)
995                 return -ENOMEM;
996
997         tp->mdio_bus->name     = "tg3 mdio bus";
998         snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x",
999                  (tp->pdev->bus->number << 8) | tp->pdev->devfn);
1000         tp->mdio_bus->priv     = tp;
1001         tp->mdio_bus->parent   = &tp->pdev->dev;
1002         tp->mdio_bus->read     = &tg3_mdio_read;
1003         tp->mdio_bus->write    = &tg3_mdio_write;
1004         tp->mdio_bus->reset    = &tg3_mdio_reset;
1005         tp->mdio_bus->phy_mask = ~(1 << PHY_ADDR);
1006         tp->mdio_bus->irq      = &tp->mdio_irq[0];
1007
1008         for (i = 0; i < PHY_MAX_ADDR; i++)
1009                 tp->mdio_bus->irq[i] = PHY_POLL;
1010
1011         /* The bus registration will look for all the PHYs on the mdio bus.
1012          * Unfortunately, it does not ensure the PHY is powered up before
1013          * accessing the PHY ID registers.  A chip reset is the
1014          * quickest way to bring the device back to an operational state..
1015          */
1016         if (tg3_readphy(tp, MII_BMCR, &reg) || (reg & BMCR_PDOWN))
1017                 tg3_bmcr_reset(tp);
1018
1019         i = mdiobus_register(tp->mdio_bus);
1020         if (i) {
1021                 printk(KERN_WARNING "%s: mdiobus_reg failed (0x%x)\n",
1022                         tp->dev->name, i);
1023                 mdiobus_free(tp->mdio_bus);
1024                 return i;
1025         }
1026
1027         phydev = tp->mdio_bus->phy_map[PHY_ADDR];
1028
1029         if (!phydev || !phydev->drv) {
1030                 printk(KERN_WARNING "%s: No PHY devices\n", tp->dev->name);
1031                 mdiobus_unregister(tp->mdio_bus);
1032                 mdiobus_free(tp->mdio_bus);
1033                 return -ENODEV;
1034         }
1035
1036         switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1037         case TG3_PHY_ID_BCM50610:
1038                 if (tp->tg3_flags3 & TG3_FLG3_RGMII_STD_IBND_DISABLE)
1039                         phydev->dev_flags |= PHY_BRCM_STD_IBND_DISABLE;
1040                 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_RX_EN)
1041                         phydev->dev_flags |= PHY_BRCM_EXT_IBND_RX_ENABLE;
1042                 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_TX_EN)
1043                         phydev->dev_flags |= PHY_BRCM_EXT_IBND_TX_ENABLE;
1044                 /* fallthru */
1045         case TG3_PHY_ID_RTL8211C:
1046                 phydev->interface = PHY_INTERFACE_MODE_RGMII;
1047                 break;
1048         case TG3_PHY_ID_RTL8201E:
1049         case TG3_PHY_ID_BCMAC131:
1050                 phydev->interface = PHY_INTERFACE_MODE_MII;
1051                 break;
1052         }
1053
1054         tp->tg3_flags3 |= TG3_FLG3_MDIOBUS_INITED;
1055
1056         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
1057                 tg3_mdio_config_5785(tp);
1058
1059         return 0;
1060 }
1061
1062 static void tg3_mdio_fini(struct tg3 *tp)
1063 {
1064         if (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED) {
1065                 tp->tg3_flags3 &= ~TG3_FLG3_MDIOBUS_INITED;
1066                 mdiobus_unregister(tp->mdio_bus);
1067                 mdiobus_free(tp->mdio_bus);
1068                 tp->tg3_flags3 &= ~TG3_FLG3_MDIOBUS_PAUSED;
1069         }
1070 }
1071
1072 /* tp->lock is held. */
1073 static inline void tg3_generate_fw_event(struct tg3 *tp)
1074 {
1075         u32 val;
1076
1077         val = tr32(GRC_RX_CPU_EVENT);
1078         val |= GRC_RX_CPU_DRIVER_EVENT;
1079         tw32_f(GRC_RX_CPU_EVENT, val);
1080
1081         tp->last_event_jiffies = jiffies;
1082 }
1083
1084 #define TG3_FW_EVENT_TIMEOUT_USEC 2500
1085
1086 /* tp->lock is held. */
1087 static void tg3_wait_for_event_ack(struct tg3 *tp)
1088 {
1089         int i;
1090         unsigned int delay_cnt;
1091         long time_remain;
1092
1093         /* If enough time has passed, no wait is necessary. */
1094         time_remain = (long)(tp->last_event_jiffies + 1 +
1095                       usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC)) -
1096                       (long)jiffies;
1097         if (time_remain < 0)
1098                 return;
1099
1100         /* Check if we can shorten the wait time. */
1101         delay_cnt = jiffies_to_usecs(time_remain);
1102         if (delay_cnt > TG3_FW_EVENT_TIMEOUT_USEC)
1103                 delay_cnt = TG3_FW_EVENT_TIMEOUT_USEC;
1104         delay_cnt = (delay_cnt >> 3) + 1;
1105
1106         for (i = 0; i < delay_cnt; i++) {
1107                 if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
1108                         break;
1109                 udelay(8);
1110         }
1111 }
1112
1113 /* tp->lock is held. */
1114 static void tg3_ump_link_report(struct tg3 *tp)
1115 {
1116         u32 reg;
1117         u32 val;
1118
1119         if (!(tp->tg3_flags2 & TG3_FLG2_5780_CLASS) ||
1120             !(tp->tg3_flags  & TG3_FLAG_ENABLE_ASF))
1121                 return;
1122
1123         tg3_wait_for_event_ack(tp);
1124
1125         tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE);
1126
1127         tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14);
1128
1129         val = 0;
1130         if (!tg3_readphy(tp, MII_BMCR, &reg))
1131                 val = reg << 16;
1132         if (!tg3_readphy(tp, MII_BMSR, &reg))
1133                 val |= (reg & 0xffff);
1134         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, val);
1135
1136         val = 0;
1137         if (!tg3_readphy(tp, MII_ADVERTISE, &reg))
1138                 val = reg << 16;
1139         if (!tg3_readphy(tp, MII_LPA, &reg))
1140                 val |= (reg & 0xffff);
1141         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 4, val);
1142
1143         val = 0;
1144         if (!(tp->tg3_flags2 & TG3_FLG2_MII_SERDES)) {
1145                 if (!tg3_readphy(tp, MII_CTRL1000, &reg))
1146                         val = reg << 16;
1147                 if (!tg3_readphy(tp, MII_STAT1000, &reg))
1148                         val |= (reg & 0xffff);
1149         }
1150         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 8, val);
1151
1152         if (!tg3_readphy(tp, MII_PHYADDR, &reg))
1153                 val = reg << 16;
1154         else
1155                 val = 0;
1156         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 12, val);
1157
1158         tg3_generate_fw_event(tp);
1159 }
1160
1161 static void tg3_link_report(struct tg3 *tp)
1162 {
1163         if (!netif_carrier_ok(tp->dev)) {
1164                 if (netif_msg_link(tp))
1165                         printk(KERN_INFO PFX "%s: Link is down.\n",
1166                                tp->dev->name);
1167                 tg3_ump_link_report(tp);
1168         } else if (netif_msg_link(tp)) {
1169                 printk(KERN_INFO PFX "%s: Link is up at %d Mbps, %s duplex.\n",
1170                        tp->dev->name,
1171                        (tp->link_config.active_speed == SPEED_1000 ?
1172                         1000 :
1173                         (tp->link_config.active_speed == SPEED_100 ?
1174                          100 : 10)),
1175                        (tp->link_config.active_duplex == DUPLEX_FULL ?
1176                         "full" : "half"));
1177
1178                 printk(KERN_INFO PFX
1179                        "%s: Flow control is %s for TX and %s for RX.\n",
1180                        tp->dev->name,
1181                        (tp->link_config.active_flowctrl & TG3_FLOW_CTRL_TX) ?
1182                        "on" : "off",
1183                        (tp->link_config.active_flowctrl & TG3_FLOW_CTRL_RX) ?
1184                        "on" : "off");
1185                 tg3_ump_link_report(tp);
1186         }
1187 }
1188
1189 static u16 tg3_advert_flowctrl_1000T(u8 flow_ctrl)
1190 {
1191         u16 miireg;
1192
1193         if ((flow_ctrl & TG3_FLOW_CTRL_TX) && (flow_ctrl & TG3_FLOW_CTRL_RX))
1194                 miireg = ADVERTISE_PAUSE_CAP;
1195         else if (flow_ctrl & TG3_FLOW_CTRL_TX)
1196                 miireg = ADVERTISE_PAUSE_ASYM;
1197         else if (flow_ctrl & TG3_FLOW_CTRL_RX)
1198                 miireg = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1199         else
1200                 miireg = 0;
1201
1202         return miireg;
1203 }
1204
1205 static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
1206 {
1207         u16 miireg;
1208
1209         if ((flow_ctrl & TG3_FLOW_CTRL_TX) && (flow_ctrl & TG3_FLOW_CTRL_RX))
1210                 miireg = ADVERTISE_1000XPAUSE;
1211         else if (flow_ctrl & TG3_FLOW_CTRL_TX)
1212                 miireg = ADVERTISE_1000XPSE_ASYM;
1213         else if (flow_ctrl & TG3_FLOW_CTRL_RX)
1214                 miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1215         else
1216                 miireg = 0;
1217
1218         return miireg;
1219 }
1220
1221 static u8 tg3_resolve_flowctrl_1000T(u16 lcladv, u16 rmtadv)
1222 {
1223         u8 cap = 0;
1224
1225         if (lcladv & ADVERTISE_PAUSE_CAP) {
1226                 if (lcladv & ADVERTISE_PAUSE_ASYM) {
1227                         if (rmtadv & LPA_PAUSE_CAP)
1228                                 cap = TG3_FLOW_CTRL_TX | TG3_FLOW_CTRL_RX;
1229                         else if (rmtadv & LPA_PAUSE_ASYM)
1230                                 cap = TG3_FLOW_CTRL_RX;
1231                 } else {
1232                         if (rmtadv & LPA_PAUSE_CAP)
1233                                 cap = TG3_FLOW_CTRL_TX | TG3_FLOW_CTRL_RX;
1234                 }
1235         } else if (lcladv & ADVERTISE_PAUSE_ASYM) {
1236                 if ((rmtadv & LPA_PAUSE_CAP) && (rmtadv & LPA_PAUSE_ASYM))
1237                         cap = TG3_FLOW_CTRL_TX;
1238         }
1239
1240         return cap;
1241 }
1242
1243 static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
1244 {
1245         u8 cap = 0;
1246
1247         if (lcladv & ADVERTISE_1000XPAUSE) {
1248                 if (lcladv & ADVERTISE_1000XPSE_ASYM) {
1249                         if (rmtadv & LPA_1000XPAUSE)
1250                                 cap = TG3_FLOW_CTRL_TX | TG3_FLOW_CTRL_RX;
1251                         else if (rmtadv & LPA_1000XPAUSE_ASYM)
1252                                 cap = TG3_FLOW_CTRL_RX;
1253                 } else {
1254                         if (rmtadv & LPA_1000XPAUSE)
1255                                 cap = TG3_FLOW_CTRL_TX | TG3_FLOW_CTRL_RX;
1256                 }
1257         } else if (lcladv & ADVERTISE_1000XPSE_ASYM) {
1258                 if ((rmtadv & LPA_1000XPAUSE) && (rmtadv & LPA_1000XPAUSE_ASYM))
1259                         cap = TG3_FLOW_CTRL_TX;
1260         }
1261
1262         return cap;
1263 }
1264
1265 static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv)
1266 {
1267         u8 autoneg;
1268         u8 flowctrl = 0;
1269         u32 old_rx_mode = tp->rx_mode;
1270         u32 old_tx_mode = tp->tx_mode;
1271
1272         if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)
1273                 autoneg = tp->mdio_bus->phy_map[PHY_ADDR]->autoneg;
1274         else
1275                 autoneg = tp->link_config.autoneg;
1276
1277         if (autoneg == AUTONEG_ENABLE &&
1278             (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG)) {
1279                 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
1280                         flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv);
1281                 else
1282                         flowctrl = tg3_resolve_flowctrl_1000T(lcladv, rmtadv);
1283         } else
1284                 flowctrl = tp->link_config.flowctrl;
1285
1286         tp->link_config.active_flowctrl = flowctrl;
1287
1288         if (flowctrl & TG3_FLOW_CTRL_RX)
1289                 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1290         else
1291                 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1292
1293         if (old_rx_mode != tp->rx_mode)
1294                 tw32_f(MAC_RX_MODE, tp->rx_mode);
1295
1296         if (flowctrl & TG3_FLOW_CTRL_TX)
1297                 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1298         else
1299                 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1300
1301         if (old_tx_mode != tp->tx_mode)
1302                 tw32_f(MAC_TX_MODE, tp->tx_mode);
1303 }
1304
1305 static void tg3_adjust_link(struct net_device *dev)
1306 {
1307         u8 oldflowctrl, linkmesg = 0;
1308         u32 mac_mode, lcl_adv, rmt_adv;
1309         struct tg3 *tp = netdev_priv(dev);
1310         struct phy_device *phydev = tp->mdio_bus->phy_map[PHY_ADDR];
1311
1312         spin_lock(&tp->lock);
1313
1314         mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK |
1315                                     MAC_MODE_HALF_DUPLEX);
1316
1317         oldflowctrl = tp->link_config.active_flowctrl;
1318
1319         if (phydev->link) {
1320                 lcl_adv = 0;
1321                 rmt_adv = 0;
1322
1323                 if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10)
1324                         mac_mode |= MAC_MODE_PORT_MODE_MII;
1325                 else
1326                         mac_mode |= MAC_MODE_PORT_MODE_GMII;
1327
1328                 if (phydev->duplex == DUPLEX_HALF)
1329                         mac_mode |= MAC_MODE_HALF_DUPLEX;
1330                 else {
1331                         lcl_adv = tg3_advert_flowctrl_1000T(
1332                                   tp->link_config.flowctrl);
1333
1334                         if (phydev->pause)
1335                                 rmt_adv = LPA_PAUSE_CAP;
1336                         if (phydev->asym_pause)
1337                                 rmt_adv |= LPA_PAUSE_ASYM;
1338                 }
1339
1340                 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
1341         } else
1342                 mac_mode |= MAC_MODE_PORT_MODE_GMII;
1343
1344         if (mac_mode != tp->mac_mode) {
1345                 tp->mac_mode = mac_mode;
1346                 tw32_f(MAC_MODE, tp->mac_mode);
1347                 udelay(40);
1348         }
1349
1350         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
1351                 if (phydev->speed == SPEED_10)
1352                         tw32(MAC_MI_STAT,
1353                              MAC_MI_STAT_10MBPS_MODE |
1354                              MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
1355                 else
1356                         tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
1357         }
1358
1359         if (phydev->speed == SPEED_1000 && phydev->duplex == DUPLEX_HALF)
1360                 tw32(MAC_TX_LENGTHS,
1361                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1362                       (6 << TX_LENGTHS_IPG_SHIFT) |
1363                       (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
1364         else
1365                 tw32(MAC_TX_LENGTHS,
1366                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1367                       (6 << TX_LENGTHS_IPG_SHIFT) |
1368                       (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
1369
1370         if ((phydev->link && tp->link_config.active_speed == SPEED_INVALID) ||
1371             (!phydev->link && tp->link_config.active_speed != SPEED_INVALID) ||
1372             phydev->speed != tp->link_config.active_speed ||
1373             phydev->duplex != tp->link_config.active_duplex ||
1374             oldflowctrl != tp->link_config.active_flowctrl)
1375             linkmesg = 1;
1376
1377         tp->link_config.active_speed = phydev->speed;
1378         tp->link_config.active_duplex = phydev->duplex;
1379
1380         spin_unlock(&tp->lock);
1381
1382         if (linkmesg)
1383                 tg3_link_report(tp);
1384 }
1385
1386 static int tg3_phy_init(struct tg3 *tp)
1387 {
1388         struct phy_device *phydev;
1389
1390         if (tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED)
1391                 return 0;
1392
1393         /* Bring the PHY back to a known state. */
1394         tg3_bmcr_reset(tp);
1395
1396         phydev = tp->mdio_bus->phy_map[PHY_ADDR];
1397
1398         /* Attach the MAC to the PHY. */
1399         phydev = phy_connect(tp->dev, dev_name(&phydev->dev), tg3_adjust_link,
1400                              phydev->dev_flags, phydev->interface);
1401         if (IS_ERR(phydev)) {
1402                 printk(KERN_ERR "%s: Could not attach to PHY\n", tp->dev->name);
1403                 return PTR_ERR(phydev);
1404         }
1405
1406         /* Mask with MAC supported features. */
1407         switch (phydev->interface) {
1408         case PHY_INTERFACE_MODE_GMII:
1409         case PHY_INTERFACE_MODE_RGMII:
1410                 phydev->supported &= (PHY_GBIT_FEATURES |
1411                                       SUPPORTED_Pause |
1412                                       SUPPORTED_Asym_Pause);
1413                 break;
1414         case PHY_INTERFACE_MODE_MII:
1415                 phydev->supported &= (PHY_BASIC_FEATURES |
1416                                       SUPPORTED_Pause |
1417                                       SUPPORTED_Asym_Pause);
1418                 break;
1419         default:
1420                 phy_disconnect(tp->mdio_bus->phy_map[PHY_ADDR]);
1421                 return -EINVAL;
1422         }
1423
1424         tp->tg3_flags3 |= TG3_FLG3_PHY_CONNECTED;
1425
1426         phydev->advertising = phydev->supported;
1427
1428         return 0;
1429 }
1430
1431 static void tg3_phy_start(struct tg3 *tp)
1432 {
1433         struct phy_device *phydev;
1434
1435         if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
1436                 return;
1437
1438         phydev = tp->mdio_bus->phy_map[PHY_ADDR];
1439
1440         if (tp->link_config.phy_is_low_power) {
1441                 tp->link_config.phy_is_low_power = 0;
1442                 phydev->speed = tp->link_config.orig_speed;
1443                 phydev->duplex = tp->link_config.orig_duplex;
1444                 phydev->autoneg = tp->link_config.orig_autoneg;
1445                 phydev->advertising = tp->link_config.orig_advertising;
1446         }
1447
1448         phy_start(phydev);
1449
1450         phy_start_aneg(phydev);
1451 }
1452
1453 static void tg3_phy_stop(struct tg3 *tp)
1454 {
1455         if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
1456                 return;
1457
1458         phy_stop(tp->mdio_bus->phy_map[PHY_ADDR]);
1459 }
1460
1461 static void tg3_phy_fini(struct tg3 *tp)
1462 {
1463         if (tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED) {
1464                 phy_disconnect(tp->mdio_bus->phy_map[PHY_ADDR]);
1465                 tp->tg3_flags3 &= ~TG3_FLG3_PHY_CONNECTED;
1466         }
1467 }
1468
1469 static void tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
1470 {
1471         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1472         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
1473 }
1474
1475 static void tg3_phy_toggle_automdix(struct tg3 *tp, int enable)
1476 {
1477         u32 phy;
1478
1479         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
1480             (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES))
1481                 return;
1482
1483         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1484                 u32 ephy;
1485
1486                 if (!tg3_readphy(tp, MII_TG3_EPHY_TEST, &ephy)) {
1487                         tg3_writephy(tp, MII_TG3_EPHY_TEST,
1488                                      ephy | MII_TG3_EPHY_SHADOW_EN);
1489                         if (!tg3_readphy(tp, MII_TG3_EPHYTST_MISCCTRL, &phy)) {
1490                                 if (enable)
1491                                         phy |= MII_TG3_EPHYTST_MISCCTRL_MDIX;
1492                                 else
1493                                         phy &= ~MII_TG3_EPHYTST_MISCCTRL_MDIX;
1494                                 tg3_writephy(tp, MII_TG3_EPHYTST_MISCCTRL, phy);
1495                         }
1496                         tg3_writephy(tp, MII_TG3_EPHY_TEST, ephy);
1497                 }
1498         } else {
1499                 phy = MII_TG3_AUXCTL_MISC_RDSEL_MISC |
1500                       MII_TG3_AUXCTL_SHDWSEL_MISC;
1501                 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, phy) &&
1502                     !tg3_readphy(tp, MII_TG3_AUX_CTRL, &phy)) {
1503                         if (enable)
1504                                 phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
1505                         else
1506                                 phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
1507                         phy |= MII_TG3_AUXCTL_MISC_WREN;
1508                         tg3_writephy(tp, MII_TG3_AUX_CTRL, phy);
1509                 }
1510         }
1511 }
1512
1513 static void tg3_phy_set_wirespeed(struct tg3 *tp)
1514 {
1515         u32 val;
1516
1517         if (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED)
1518                 return;
1519
1520         if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x7007) &&
1521             !tg3_readphy(tp, MII_TG3_AUX_CTRL, &val))
1522                 tg3_writephy(tp, MII_TG3_AUX_CTRL,
1523                              (val | (1 << 15) | (1 << 4)));
1524 }
1525
1526 static void tg3_phy_apply_otp(struct tg3 *tp)
1527 {
1528         u32 otp, phy;
1529
1530         if (!tp->phy_otp)
1531                 return;
1532
1533         otp = tp->phy_otp;
1534
1535         /* Enable SM_DSP clock and tx 6dB coding. */
1536         phy = MII_TG3_AUXCTL_SHDWSEL_AUXCTL |
1537               MII_TG3_AUXCTL_ACTL_SMDSP_ENA |
1538               MII_TG3_AUXCTL_ACTL_TX_6DB;
1539         tg3_writephy(tp, MII_TG3_AUX_CTRL, phy);
1540
1541         phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
1542         phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT;
1543         tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy);
1544
1545         phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) |
1546               ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT);
1547         tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy);
1548
1549         phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT);
1550         phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ;
1551         tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy);
1552
1553         phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT);
1554         tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy);
1555
1556         phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT);
1557         tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy);
1558
1559         phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) |
1560               ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
1561         tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);
1562
1563         /* Turn off SM_DSP clock. */
1564         phy = MII_TG3_AUXCTL_SHDWSEL_AUXCTL |
1565               MII_TG3_AUXCTL_ACTL_TX_6DB;
1566         tg3_writephy(tp, MII_TG3_AUX_CTRL, phy);
1567 }
1568
1569 static int tg3_wait_macro_done(struct tg3 *tp)
1570 {
1571         int limit = 100;
1572
1573         while (limit--) {
1574                 u32 tmp32;
1575
1576                 if (!tg3_readphy(tp, 0x16, &tmp32)) {
1577                         if ((tmp32 & 0x1000) == 0)
1578                                 break;
1579                 }
1580         }
1581         if (limit <= 0)
1582                 return -EBUSY;
1583
1584         return 0;
1585 }
1586
1587 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
1588 {
1589         static const u32 test_pat[4][6] = {
1590         { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
1591         { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
1592         { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
1593         { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
1594         };
1595         int chan;
1596
1597         for (chan = 0; chan < 4; chan++) {
1598                 int i;
1599
1600                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1601                              (chan * 0x2000) | 0x0200);
1602                 tg3_writephy(tp, 0x16, 0x0002);
1603
1604                 for (i = 0; i < 6; i++)
1605                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
1606                                      test_pat[chan][i]);
1607
1608                 tg3_writephy(tp, 0x16, 0x0202);
1609                 if (tg3_wait_macro_done(tp)) {
1610                         *resetp = 1;
1611                         return -EBUSY;
1612                 }
1613
1614                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1615                              (chan * 0x2000) | 0x0200);
1616                 tg3_writephy(tp, 0x16, 0x0082);
1617                 if (tg3_wait_macro_done(tp)) {
1618                         *resetp = 1;
1619                         return -EBUSY;
1620                 }
1621
1622                 tg3_writephy(tp, 0x16, 0x0802);
1623                 if (tg3_wait_macro_done(tp)) {
1624                         *resetp = 1;
1625                         return -EBUSY;
1626                 }
1627
1628                 for (i = 0; i < 6; i += 2) {
1629                         u32 low, high;
1630
1631                         if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
1632                             tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
1633                             tg3_wait_macro_done(tp)) {
1634                                 *resetp = 1;
1635                                 return -EBUSY;
1636                         }
1637                         low &= 0x7fff;
1638                         high &= 0x000f;
1639                         if (low != test_pat[chan][i] ||
1640                             high != test_pat[chan][i+1]) {
1641                                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
1642                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
1643                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
1644
1645                                 return -EBUSY;
1646                         }
1647                 }
1648         }
1649
1650         return 0;
1651 }
1652
1653 static int tg3_phy_reset_chanpat(struct tg3 *tp)
1654 {
1655         int chan;
1656
1657         for (chan = 0; chan < 4; chan++) {
1658                 int i;
1659
1660                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1661                              (chan * 0x2000) | 0x0200);
1662                 tg3_writephy(tp, 0x16, 0x0002);
1663                 for (i = 0; i < 6; i++)
1664                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
1665                 tg3_writephy(tp, 0x16, 0x0202);
1666                 if (tg3_wait_macro_done(tp))
1667                         return -EBUSY;
1668         }
1669
1670         return 0;
1671 }
1672
1673 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
1674 {
1675         u32 reg32, phy9_orig;
1676         int retries, do_phy_reset, err;
1677
1678         retries = 10;
1679         do_phy_reset = 1;
1680         do {
1681                 if (do_phy_reset) {
1682                         err = tg3_bmcr_reset(tp);
1683                         if (err)
1684                                 return err;
1685                         do_phy_reset = 0;
1686                 }
1687
1688                 /* Disable transmitter and interrupt.  */
1689                 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
1690                         continue;
1691
1692                 reg32 |= 0x3000;
1693                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
1694
1695                 /* Set full-duplex, 1000 mbps.  */
1696                 tg3_writephy(tp, MII_BMCR,
1697                              BMCR_FULLDPLX | TG3_BMCR_SPEED1000);
1698
1699                 /* Set to master mode.  */
1700                 if (tg3_readphy(tp, MII_TG3_CTRL, &phy9_orig))
1701                         continue;
1702
1703                 tg3_writephy(tp, MII_TG3_CTRL,
1704                              (MII_TG3_CTRL_AS_MASTER |
1705                               MII_TG3_CTRL_ENABLE_AS_MASTER));
1706
1707                 /* Enable SM_DSP_CLOCK and 6dB.  */
1708                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1709
1710                 /* Block the PHY control access.  */
1711                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
1712                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0800);
1713
1714                 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
1715                 if (!err)
1716                         break;
1717         } while (--retries);
1718
1719         err = tg3_phy_reset_chanpat(tp);
1720         if (err)
1721                 return err;
1722
1723         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
1724         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0000);
1725
1726         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
1727         tg3_writephy(tp, 0x16, 0x0000);
1728
1729         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
1730             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
1731                 /* Set Extended packet length bit for jumbo frames */
1732                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4400);
1733         }
1734         else {
1735                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1736         }
1737
1738         tg3_writephy(tp, MII_TG3_CTRL, phy9_orig);
1739
1740         if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32)) {
1741                 reg32 &= ~0x3000;
1742                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
1743         } else if (!err)
1744                 err = -EBUSY;
1745
1746         return err;
1747 }
1748
1749 /* This will reset the tigon3 PHY if there is no valid
1750  * link unless the FORCE argument is non-zero.
1751  */
1752 static int tg3_phy_reset(struct tg3 *tp)
1753 {
1754         u32 cpmuctrl;
1755         u32 phy_status;
1756         int err;
1757
1758         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1759                 u32 val;
1760
1761                 val = tr32(GRC_MISC_CFG);
1762                 tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
1763                 udelay(40);
1764         }
1765         err  = tg3_readphy(tp, MII_BMSR, &phy_status);
1766         err |= tg3_readphy(tp, MII_BMSR, &phy_status);
1767         if (err != 0)
1768                 return -EBUSY;
1769
1770         if (netif_running(tp->dev) && netif_carrier_ok(tp->dev)) {
1771                 netif_carrier_off(tp->dev);
1772                 tg3_link_report(tp);
1773         }
1774
1775         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
1776             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
1777             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
1778                 err = tg3_phy_reset_5703_4_5(tp);
1779                 if (err)
1780                         return err;
1781                 goto out;
1782         }
1783
1784         cpmuctrl = 0;
1785         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
1786             GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
1787                 cpmuctrl = tr32(TG3_CPMU_CTRL);
1788                 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY)
1789                         tw32(TG3_CPMU_CTRL,
1790                              cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY);
1791         }
1792
1793         err = tg3_bmcr_reset(tp);
1794         if (err)
1795                 return err;
1796
1797         if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) {
1798                 u32 phy;
1799
1800                 phy = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz;
1801                 tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, phy);
1802
1803                 tw32(TG3_CPMU_CTRL, cpmuctrl);
1804         }
1805
1806         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
1807             GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
1808                 u32 val;
1809
1810                 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
1811                 if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
1812                     CPMU_LSPD_1000MB_MACCLK_12_5) {
1813                         val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
1814                         udelay(40);
1815                         tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
1816                 }
1817
1818                 /* Disable GPHY autopowerdown. */
1819                 tg3_writephy(tp, MII_TG3_MISC_SHDW,
1820                              MII_TG3_MISC_SHDW_WREN |
1821                              MII_TG3_MISC_SHDW_APD_SEL |
1822                              MII_TG3_MISC_SHDW_APD_WKTM_84MS);
1823         }
1824
1825         tg3_phy_apply_otp(tp);
1826
1827 out:
1828         if (tp->tg3_flags2 & TG3_FLG2_PHY_ADC_BUG) {
1829                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1830                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1831                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x2aaa);
1832                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1833                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0323);
1834                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1835         }
1836         if (tp->tg3_flags2 & TG3_FLG2_PHY_5704_A0_BUG) {
1837                 tg3_writephy(tp, 0x1c, 0x8d68);
1838                 tg3_writephy(tp, 0x1c, 0x8d68);
1839         }
1840         if (tp->tg3_flags2 & TG3_FLG2_PHY_BER_BUG) {
1841                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1842                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1843                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x310b);
1844                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1845                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x9506);
1846                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x401f);
1847                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x14e2);
1848                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1849         }
1850         else if (tp->tg3_flags2 & TG3_FLG2_PHY_JITTER_BUG) {
1851                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1852                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1853                 if (tp->tg3_flags2 & TG3_FLG2_PHY_ADJUST_TRIM) {
1854                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
1855                         tg3_writephy(tp, MII_TG3_TEST1,
1856                                      MII_TG3_TEST1_TRIM_EN | 0x4);
1857                 } else
1858                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
1859                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1860         }
1861         /* Set Extended packet length bit (bit 14) on all chips that */
1862         /* support jumbo frames */
1863         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
1864                 /* Cannot do read-modify-write on 5401 */
1865                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
1866         } else if (tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) {
1867                 u32 phy_reg;
1868
1869                 /* Set bit 14 with read-modify-write to preserve other bits */
1870                 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0007) &&
1871                     !tg3_readphy(tp, MII_TG3_AUX_CTRL, &phy_reg))
1872                         tg3_writephy(tp, MII_TG3_AUX_CTRL, phy_reg | 0x4000);
1873         }
1874
1875         /* Set phy register 0x10 bit 0 to high fifo elasticity to support
1876          * jumbo frames transmission.
1877          */
1878         if (tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) {
1879                 u32 phy_reg;
1880
1881                 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &phy_reg))
1882                     tg3_writephy(tp, MII_TG3_EXT_CTRL,
1883                                  phy_reg | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
1884         }
1885
1886         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1887                 /* adjust output voltage */
1888                 tg3_writephy(tp, MII_TG3_EPHY_PTEST, 0x12);
1889         }
1890
1891         tg3_phy_toggle_automdix(tp, 1);
1892         tg3_phy_set_wirespeed(tp);
1893         return 0;
1894 }
1895
1896 static void tg3_frob_aux_power(struct tg3 *tp)
1897 {
1898         struct tg3 *tp_peer = tp;
1899
1900         if ((tp->tg3_flags2 & TG3_FLG2_IS_NIC) == 0)
1901                 return;
1902
1903         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
1904             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)) {
1905                 struct net_device *dev_peer;
1906
1907                 dev_peer = pci_get_drvdata(tp->pdev_peer);
1908                 /* remove_one() may have been run on the peer. */
1909                 if (!dev_peer)
1910                         tp_peer = tp;
1911                 else
1912                         tp_peer = netdev_priv(dev_peer);
1913         }
1914
1915         if ((tp->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
1916             (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0 ||
1917             (tp_peer->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
1918             (tp_peer->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0) {
1919                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1920                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1921                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1922                                     (GRC_LCLCTRL_GPIO_OE0 |
1923                                      GRC_LCLCTRL_GPIO_OE1 |
1924                                      GRC_LCLCTRL_GPIO_OE2 |
1925                                      GRC_LCLCTRL_GPIO_OUTPUT0 |
1926                                      GRC_LCLCTRL_GPIO_OUTPUT1),
1927                                     100);
1928                 } else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761) {
1929                         /* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
1930                         u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
1931                                              GRC_LCLCTRL_GPIO_OE1 |
1932                                              GRC_LCLCTRL_GPIO_OE2 |
1933                                              GRC_LCLCTRL_GPIO_OUTPUT0 |
1934                                              GRC_LCLCTRL_GPIO_OUTPUT1 |
1935                                              tp->grc_local_ctrl;
1936                         tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, 100);
1937
1938                         grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2;
1939                         tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, 100);
1940
1941                         grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0;
1942                         tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, 100);
1943                 } else {
1944                         u32 no_gpio2;
1945                         u32 grc_local_ctrl = 0;
1946
1947                         if (tp_peer != tp &&
1948                             (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
1949                                 return;
1950
1951                         /* Workaround to prevent overdrawing Amps. */
1952                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
1953                             ASIC_REV_5714) {
1954                                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
1955                                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1956                                             grc_local_ctrl, 100);
1957                         }
1958
1959                         /* On 5753 and variants, GPIO2 cannot be used. */
1960                         no_gpio2 = tp->nic_sram_data_cfg &
1961                                     NIC_SRAM_DATA_CFG_NO_GPIO2;
1962
1963                         grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
1964                                          GRC_LCLCTRL_GPIO_OE1 |
1965                                          GRC_LCLCTRL_GPIO_OE2 |
1966                                          GRC_LCLCTRL_GPIO_OUTPUT1 |
1967                                          GRC_LCLCTRL_GPIO_OUTPUT2;
1968                         if (no_gpio2) {
1969                                 grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
1970                                                     GRC_LCLCTRL_GPIO_OUTPUT2);
1971                         }
1972                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1973                                                     grc_local_ctrl, 100);
1974
1975                         grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
1976
1977                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1978                                                     grc_local_ctrl, 100);
1979
1980                         if (!no_gpio2) {
1981                                 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
1982                                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1983                                             grc_local_ctrl, 100);
1984                         }
1985                 }
1986         } else {
1987                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
1988                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
1989                         if (tp_peer != tp &&
1990                             (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
1991                                 return;
1992
1993                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1994                                     (GRC_LCLCTRL_GPIO_OE1 |
1995                                      GRC_LCLCTRL_GPIO_OUTPUT1), 100);
1996
1997                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1998                                     GRC_LCLCTRL_GPIO_OE1, 100);
1999
2000                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2001                                     (GRC_LCLCTRL_GPIO_OE1 |
2002                                      GRC_LCLCTRL_GPIO_OUTPUT1), 100);
2003                 }
2004         }
2005 }
2006
2007 static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
2008 {
2009         if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
2010                 return 1;
2011         else if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411) {
2012                 if (speed != SPEED_10)
2013                         return 1;
2014         } else if (speed == SPEED_10)
2015                 return 1;
2016
2017         return 0;
2018 }
2019
2020 static int tg3_setup_phy(struct tg3 *, int);
2021
2022 #define RESET_KIND_SHUTDOWN     0
2023 #define RESET_KIND_INIT         1
2024 #define RESET_KIND_SUSPEND      2
2025
2026 static void tg3_write_sig_post_reset(struct tg3 *, int);
2027 static int tg3_halt_cpu(struct tg3 *, u32);
2028 static int tg3_nvram_lock(struct tg3 *);
2029 static void tg3_nvram_unlock(struct tg3 *);
2030
2031 static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
2032 {
2033         u32 val;
2034
2035         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
2036                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
2037                         u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
2038                         u32 serdes_cfg = tr32(MAC_SERDES_CFG);
2039
2040                         sg_dig_ctrl |=
2041                                 SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
2042                         tw32(SG_DIG_CTRL, sg_dig_ctrl);
2043                         tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
2044                 }
2045                 return;
2046         }
2047
2048         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2049                 tg3_bmcr_reset(tp);
2050                 val = tr32(GRC_MISC_CFG);
2051                 tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
2052                 udelay(40);
2053                 return;
2054         } else if (do_low_power) {
2055                 tg3_writephy(tp, MII_TG3_EXT_CTRL,
2056                              MII_TG3_EXT_CTRL_FORCE_LED_OFF);
2057
2058                 tg3_writephy(tp, MII_TG3_AUX_CTRL,
2059                              MII_TG3_AUXCTL_SHDWSEL_PWRCTL |
2060                              MII_TG3_AUXCTL_PCTL_100TX_LPWR |
2061                              MII_TG3_AUXCTL_PCTL_SPR_ISOLATE |
2062                              MII_TG3_AUXCTL_PCTL_VREG_11V);
2063         }
2064
2065         /* The PHY should not be powered down on some chips because
2066          * of bugs.
2067          */
2068         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2069             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2070             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 &&
2071              (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)))
2072                 return;
2073
2074         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
2075             GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
2076                 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2077                 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2078                 val |= CPMU_LSPD_1000MB_MACCLK_12_5;
2079                 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2080         }
2081
2082         tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
2083 }
2084
2085 /* tp->lock is held. */
2086 static void __tg3_set_mac_addr(struct tg3 *tp, int skip_mac_1)
2087 {
2088         u32 addr_high, addr_low;
2089         int i;
2090
2091         addr_high = ((tp->dev->dev_addr[0] << 8) |
2092                      tp->dev->dev_addr[1]);
2093         addr_low = ((tp->dev->dev_addr[2] << 24) |
2094                     (tp->dev->dev_addr[3] << 16) |
2095                     (tp->dev->dev_addr[4] <<  8) |
2096                     (tp->dev->dev_addr[5] <<  0));
2097         for (i = 0; i < 4; i++) {
2098                 if (i == 1 && skip_mac_1)
2099                         continue;
2100                 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
2101                 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
2102         }
2103
2104         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
2105             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
2106                 for (i = 0; i < 12; i++) {
2107                         tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
2108                         tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
2109                 }
2110         }
2111
2112         addr_high = (tp->dev->dev_addr[0] +
2113                      tp->dev->dev_addr[1] +
2114                      tp->dev->dev_addr[2] +
2115                      tp->dev->dev_addr[3] +
2116                      tp->dev->dev_addr[4] +
2117                      tp->dev->dev_addr[5]) &
2118                 TX_BACKOFF_SEED_MASK;
2119         tw32(MAC_TX_BACKOFF_SEED, addr_high);
2120 }
2121
2122 static int tg3_set_power_state(struct tg3 *tp, pci_power_t state)
2123 {
2124         u32 misc_host_ctrl;
2125         bool device_should_wake, do_low_power;
2126
2127         /* Make sure register accesses (indirect or otherwise)
2128          * will function correctly.
2129          */
2130         pci_write_config_dword(tp->pdev,
2131                                TG3PCI_MISC_HOST_CTRL,
2132                                tp->misc_host_ctrl);
2133
2134         switch (state) {
2135         case PCI_D0:
2136                 pci_enable_wake(tp->pdev, state, false);
2137                 pci_set_power_state(tp->pdev, PCI_D0);
2138
2139                 /* Switch out of Vaux if it is a NIC */
2140                 if (tp->tg3_flags2 & TG3_FLG2_IS_NIC)
2141                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl, 100);
2142
2143                 return 0;
2144
2145         case PCI_D1:
2146         case PCI_D2:
2147         case PCI_D3hot:
2148                 break;
2149
2150         default:
2151                 printk(KERN_ERR PFX "%s: Invalid power state (D%d) requested\n",
2152                         tp->dev->name, state);
2153                 return -EINVAL;
2154         }
2155         misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
2156         tw32(TG3PCI_MISC_HOST_CTRL,
2157              misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
2158
2159         device_should_wake = pci_pme_capable(tp->pdev, state) &&
2160                              device_may_wakeup(&tp->pdev->dev) &&
2161                              (tp->tg3_flags & TG3_FLAG_WOL_ENABLE);
2162
2163         if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
2164                 do_low_power = false;
2165                 if ((tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED) &&
2166                     !tp->link_config.phy_is_low_power) {
2167                         struct phy_device *phydev;
2168                         u32 phyid, advertising;
2169
2170                         phydev = tp->mdio_bus->phy_map[PHY_ADDR];
2171
2172                         tp->link_config.phy_is_low_power = 1;
2173
2174                         tp->link_config.orig_speed = phydev->speed;
2175                         tp->link_config.orig_duplex = phydev->duplex;
2176                         tp->link_config.orig_autoneg = phydev->autoneg;
2177                         tp->link_config.orig_advertising = phydev->advertising;
2178
2179                         advertising = ADVERTISED_TP |
2180                                       ADVERTISED_Pause |
2181                                       ADVERTISED_Autoneg |
2182                                       ADVERTISED_10baseT_Half;
2183
2184                         if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) ||
2185                             device_should_wake) {
2186                                 if (tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB)
2187                                         advertising |=
2188                                                 ADVERTISED_100baseT_Half |
2189                                                 ADVERTISED_100baseT_Full |
2190                                                 ADVERTISED_10baseT_Full;
2191                                 else
2192                                         advertising |= ADVERTISED_10baseT_Full;
2193                         }
2194
2195                         phydev->advertising = advertising;
2196
2197                         phy_start_aneg(phydev);
2198
2199                         phyid = phydev->drv->phy_id & phydev->drv->phy_id_mask;
2200                         if (phyid != TG3_PHY_ID_BCMAC131) {
2201                                 phyid &= TG3_PHY_OUI_MASK;
2202                                 if (phyid == TG3_PHY_OUI_1 &&
2203                                     phyid == TG3_PHY_OUI_2 &&
2204                                     phyid == TG3_PHY_OUI_3)
2205                                         do_low_power = true;
2206                         }
2207                 }
2208         } else {
2209                 do_low_power = false;
2210
2211                 if (tp->link_config.phy_is_low_power == 0) {
2212                         tp->link_config.phy_is_low_power = 1;
2213                         tp->link_config.orig_speed = tp->link_config.speed;
2214                         tp->link_config.orig_duplex = tp->link_config.duplex;
2215                         tp->link_config.orig_autoneg = tp->link_config.autoneg;
2216                 }
2217
2218                 if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)) {
2219                         tp->link_config.speed = SPEED_10;
2220                         tp->link_config.duplex = DUPLEX_HALF;
2221                         tp->link_config.autoneg = AUTONEG_ENABLE;
2222                         tg3_setup_phy(tp, 0);
2223                 }
2224         }
2225
2226         __tg3_set_mac_addr(tp, 0);
2227
2228         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2229                 u32 val;
2230
2231                 val = tr32(GRC_VCPU_EXT_CTRL);
2232                 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
2233         } else if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
2234                 int i;
2235                 u32 val;
2236
2237                 for (i = 0; i < 200; i++) {
2238                         tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
2239                         if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
2240                                 break;
2241                         msleep(1);
2242                 }
2243         }
2244         if (tp->tg3_flags & TG3_FLAG_WOL_CAP)
2245                 tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
2246                                                      WOL_DRV_STATE_SHUTDOWN |
2247                                                      WOL_DRV_WOL |
2248                                                      WOL_SET_MAGIC_PKT);
2249
2250         if (device_should_wake) {
2251                 u32 mac_mode;
2252
2253                 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
2254                         if (do_low_power) {
2255                                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x5a);
2256                                 udelay(40);
2257                         }
2258
2259                         if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
2260                                 mac_mode = MAC_MODE_PORT_MODE_GMII;
2261                         else
2262                                 mac_mode = MAC_MODE_PORT_MODE_MII;
2263
2264                         mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
2265                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
2266                             ASIC_REV_5700) {
2267                                 u32 speed = (tp->tg3_flags &
2268                                              TG3_FLAG_WOL_SPEED_100MB) ?
2269                                              SPEED_100 : SPEED_10;
2270                                 if (tg3_5700_link_polarity(tp, speed))
2271                                         mac_mode |= MAC_MODE_LINK_POLARITY;
2272                                 else
2273                                         mac_mode &= ~MAC_MODE_LINK_POLARITY;
2274                         }
2275                 } else {
2276                         mac_mode = MAC_MODE_PORT_MODE_TBI;
2277                 }
2278
2279                 if (!(tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
2280                         tw32(MAC_LED_CTRL, tp->led_ctrl);
2281
2282                 mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
2283                 if (((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
2284                     !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) &&
2285                     ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) ||
2286                      (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)))
2287                         mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL;
2288
2289                 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
2290                         mac_mode |= tp->mac_mode &
2291                                     (MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN);
2292                         if (mac_mode & MAC_MODE_APE_TX_EN)
2293                                 mac_mode |= MAC_MODE_TDE_ENABLE;
2294                 }
2295
2296                 tw32_f(MAC_MODE, mac_mode);
2297                 udelay(100);
2298
2299                 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
2300                 udelay(10);
2301         }
2302
2303         if (!(tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB) &&
2304             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2305              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
2306                 u32 base_val;
2307
2308                 base_val = tp->pci_clock_ctrl;
2309                 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
2310                              CLOCK_CTRL_TXCLK_DISABLE);
2311
2312                 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
2313                             CLOCK_CTRL_PWRDOWN_PLL133, 40);
2314         } else if ((tp->tg3_flags2 & TG3_FLG2_5780_CLASS) ||
2315                    (tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) ||
2316                    (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)) {
2317                 /* do nothing */
2318         } else if (!((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
2319                      (tp->tg3_flags & TG3_FLAG_ENABLE_ASF))) {
2320                 u32 newbits1, newbits2;
2321
2322                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2323                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2324                         newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
2325                                     CLOCK_CTRL_TXCLK_DISABLE |
2326                                     CLOCK_CTRL_ALTCLK);
2327                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
2328                 } else if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
2329                         newbits1 = CLOCK_CTRL_625_CORE;
2330                         newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
2331                 } else {
2332                         newbits1 = CLOCK_CTRL_ALTCLK;
2333                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
2334                 }
2335
2336                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
2337                             40);
2338
2339                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
2340                             40);
2341
2342                 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
2343                         u32 newbits3;
2344
2345                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2346                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2347                                 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
2348                                             CLOCK_CTRL_TXCLK_DISABLE |
2349                                             CLOCK_CTRL_44MHZ_CORE);
2350                         } else {
2351                                 newbits3 = CLOCK_CTRL_44MHZ_CORE;
2352                         }
2353
2354                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
2355                                     tp->pci_clock_ctrl | newbits3, 40);
2356                 }
2357         }
2358
2359         if (!(device_should_wake) &&
2360             !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF) &&
2361             !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
2362                 tg3_power_down_phy(tp, do_low_power);
2363
2364         tg3_frob_aux_power(tp);
2365
2366         /* Workaround for unstable PLL clock */
2367         if ((GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX) ||
2368             (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX)) {
2369                 u32 val = tr32(0x7d00);
2370
2371                 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
2372                 tw32(0x7d00, val);
2373                 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
2374                         int err;
2375
2376                         err = tg3_nvram_lock(tp);
2377                         tg3_halt_cpu(tp, RX_CPU_BASE);
2378                         if (!err)
2379                                 tg3_nvram_unlock(tp);
2380                 }
2381         }
2382
2383         tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
2384
2385         if (device_should_wake)
2386                 pci_enable_wake(tp->pdev, state, true);
2387
2388         /* Finally, set the new power state. */
2389         pci_set_power_state(tp->pdev, state);
2390
2391         return 0;
2392 }
2393
2394 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
2395 {
2396         switch (val & MII_TG3_AUX_STAT_SPDMASK) {
2397         case MII_TG3_AUX_STAT_10HALF:
2398                 *speed = SPEED_10;
2399                 *duplex = DUPLEX_HALF;
2400                 break;
2401
2402         case MII_TG3_AUX_STAT_10FULL:
2403                 *speed = SPEED_10;
2404                 *duplex = DUPLEX_FULL;
2405                 break;
2406
2407         case MII_TG3_AUX_STAT_100HALF:
2408                 *speed = SPEED_100;
2409                 *duplex = DUPLEX_HALF;
2410                 break;
2411
2412         case MII_TG3_AUX_STAT_100FULL:
2413                 *speed = SPEED_100;
2414                 *duplex = DUPLEX_FULL;
2415                 break;
2416
2417         case MII_TG3_AUX_STAT_1000HALF:
2418                 *speed = SPEED_1000;
2419                 *duplex = DUPLEX_HALF;
2420                 break;
2421
2422         case MII_TG3_AUX_STAT_1000FULL:
2423                 *speed = SPEED_1000;
2424                 *duplex = DUPLEX_FULL;
2425                 break;
2426
2427         default:
2428                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2429                         *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
2430                                  SPEED_10;
2431                         *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
2432                                   DUPLEX_HALF;
2433                         break;
2434                 }
2435                 *speed = SPEED_INVALID;
2436                 *duplex = DUPLEX_INVALID;
2437                 break;
2438         }
2439 }
2440
2441 static void tg3_phy_copper_begin(struct tg3 *tp)
2442 {
2443         u32 new_adv;
2444         int i;
2445
2446         if (tp->link_config.phy_is_low_power) {
2447                 /* Entering low power mode.  Disable gigabit and
2448                  * 100baseT advertisements.
2449                  */
2450                 tg3_writephy(tp, MII_TG3_CTRL, 0);
2451
2452                 new_adv = (ADVERTISE_10HALF | ADVERTISE_10FULL |
2453                            ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
2454                 if (tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB)
2455                         new_adv |= (ADVERTISE_100HALF | ADVERTISE_100FULL);
2456
2457                 tg3_writephy(tp, MII_ADVERTISE, new_adv);
2458         } else if (tp->link_config.speed == SPEED_INVALID) {
2459                 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
2460                         tp->link_config.advertising &=
2461                                 ~(ADVERTISED_1000baseT_Half |
2462                                   ADVERTISED_1000baseT_Full);
2463
2464                 new_adv = ADVERTISE_CSMA;
2465                 if (tp->link_config.advertising & ADVERTISED_10baseT_Half)
2466                         new_adv |= ADVERTISE_10HALF;
2467                 if (tp->link_config.advertising & ADVERTISED_10baseT_Full)
2468                         new_adv |= ADVERTISE_10FULL;
2469                 if (tp->link_config.advertising & ADVERTISED_100baseT_Half)
2470                         new_adv |= ADVERTISE_100HALF;
2471                 if (tp->link_config.advertising & ADVERTISED_100baseT_Full)
2472                         new_adv |= ADVERTISE_100FULL;
2473
2474                 new_adv |= tg3_advert_flowctrl_1000T(tp->link_config.flowctrl);
2475
2476                 tg3_writephy(tp, MII_ADVERTISE, new_adv);
2477
2478                 if (tp->link_config.advertising &
2479                     (ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full)) {
2480                         new_adv = 0;
2481                         if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
2482                                 new_adv |= MII_TG3_CTRL_ADV_1000_HALF;
2483                         if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
2484                                 new_adv |= MII_TG3_CTRL_ADV_1000_FULL;
2485                         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY) &&
2486                             (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
2487                              tp->pci_chip_rev_id == CHIPREV_ID_5701_B0))
2488                                 new_adv |= (MII_TG3_CTRL_AS_MASTER |
2489                                             MII_TG3_CTRL_ENABLE_AS_MASTER);
2490                         tg3_writephy(tp, MII_TG3_CTRL, new_adv);
2491                 } else {
2492                         tg3_writephy(tp, MII_TG3_CTRL, 0);
2493                 }
2494         } else {
2495                 new_adv = tg3_advert_flowctrl_1000T(tp->link_config.flowctrl);
2496                 new_adv |= ADVERTISE_CSMA;
2497
2498                 /* Asking for a specific link mode. */
2499                 if (tp->link_config.speed == SPEED_1000) {
2500                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
2501
2502                         if (tp->link_config.duplex == DUPLEX_FULL)
2503                                 new_adv = MII_TG3_CTRL_ADV_1000_FULL;
2504                         else
2505                                 new_adv = MII_TG3_CTRL_ADV_1000_HALF;
2506                         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
2507                             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
2508                                 new_adv |= (MII_TG3_CTRL_AS_MASTER |
2509                                             MII_TG3_CTRL_ENABLE_AS_MASTER);
2510                 } else {
2511                         if (tp->link_config.speed == SPEED_100) {
2512                                 if (tp->link_config.duplex == DUPLEX_FULL)
2513                                         new_adv |= ADVERTISE_100FULL;
2514                                 else
2515                                         new_adv |= ADVERTISE_100HALF;
2516                         } else {
2517                                 if (tp->link_config.duplex == DUPLEX_FULL)
2518                                         new_adv |= ADVERTISE_10FULL;
2519                                 else
2520                                         new_adv |= ADVERTISE_10HALF;
2521                         }
2522                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
2523
2524                         new_adv = 0;
2525                 }
2526
2527                 tg3_writephy(tp, MII_TG3_CTRL, new_adv);
2528         }
2529
2530         if (tp->link_config.autoneg == AUTONEG_DISABLE &&
2531             tp->link_config.speed != SPEED_INVALID) {
2532                 u32 bmcr, orig_bmcr;
2533
2534                 tp->link_config.active_speed = tp->link_config.speed;
2535                 tp->link_config.active_duplex = tp->link_config.duplex;
2536
2537                 bmcr = 0;
2538                 switch (tp->link_config.speed) {
2539                 default:
2540                 case SPEED_10:
2541                         break;
2542
2543                 case SPEED_100:
2544                         bmcr |= BMCR_SPEED100;
2545                         break;
2546
2547                 case SPEED_1000:
2548                         bmcr |= TG3_BMCR_SPEED1000;
2549                         break;
2550                 }
2551
2552                 if (tp->link_config.duplex == DUPLEX_FULL)
2553                         bmcr |= BMCR_FULLDPLX;
2554
2555                 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
2556                     (bmcr != orig_bmcr)) {
2557                         tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
2558                         for (i = 0; i < 1500; i++) {
2559                                 u32 tmp;
2560
2561                                 udelay(10);
2562                                 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
2563                                     tg3_readphy(tp, MII_BMSR, &tmp))
2564                                         continue;
2565                                 if (!(tmp & BMSR_LSTATUS)) {
2566                                         udelay(40);
2567                                         break;
2568                                 }
2569                         }
2570                         tg3_writephy(tp, MII_BMCR, bmcr);
2571                         udelay(40);
2572                 }
2573         } else {
2574                 tg3_writephy(tp, MII_BMCR,
2575                              BMCR_ANENABLE | BMCR_ANRESTART);
2576         }
2577 }
2578
2579 static int tg3_init_5401phy_dsp(struct tg3 *tp)
2580 {
2581         int err;
2582
2583         /* Turn off tap power management. */
2584         /* Set Extended packet length bit */
2585         err  = tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
2586
2587         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0012);
2588         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1804);
2589
2590         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0013);
2591         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1204);
2592
2593         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
2594         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0132);
2595
2596         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
2597         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0232);
2598
2599         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
2600         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0a20);
2601
2602         udelay(40);
2603
2604         return err;
2605 }
2606
2607 static int tg3_copper_is_advertising_all(struct tg3 *tp, u32 mask)
2608 {
2609         u32 adv_reg, all_mask = 0;
2610
2611         if (mask & ADVERTISED_10baseT_Half)
2612                 all_mask |= ADVERTISE_10HALF;
2613         if (mask & ADVERTISED_10baseT_Full)
2614                 all_mask |= ADVERTISE_10FULL;
2615         if (mask & ADVERTISED_100baseT_Half)
2616                 all_mask |= ADVERTISE_100HALF;
2617         if (mask & ADVERTISED_100baseT_Full)
2618                 all_mask |= ADVERTISE_100FULL;
2619
2620         if (tg3_readphy(tp, MII_ADVERTISE, &adv_reg))
2621                 return 0;
2622
2623         if ((adv_reg & all_mask) != all_mask)
2624                 return 0;
2625         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
2626                 u32 tg3_ctrl;
2627
2628                 all_mask = 0;
2629                 if (mask & ADVERTISED_1000baseT_Half)
2630                         all_mask |= ADVERTISE_1000HALF;
2631                 if (mask & ADVERTISED_1000baseT_Full)
2632                         all_mask |= ADVERTISE_1000FULL;
2633
2634                 if (tg3_readphy(tp, MII_TG3_CTRL, &tg3_ctrl))
2635                         return 0;
2636
2637                 if ((tg3_ctrl & all_mask) != all_mask)
2638                         return 0;
2639         }
2640         return 1;
2641 }
2642
2643 static int tg3_adv_1000T_flowctrl_ok(struct tg3 *tp, u32 *lcladv, u32 *rmtadv)
2644 {
2645         u32 curadv, reqadv;
2646
2647         if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
2648                 return 1;
2649
2650         curadv = *lcladv & (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
2651         reqadv = tg3_advert_flowctrl_1000T(tp->link_config.flowctrl);
2652
2653         if (tp->link_config.active_duplex == DUPLEX_FULL) {
2654                 if (curadv != reqadv)
2655                         return 0;
2656
2657                 if (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG)
2658                         tg3_readphy(tp, MII_LPA, rmtadv);
2659         } else {
2660                 /* Reprogram the advertisement register, even if it
2661                  * does not affect the current link.  If the link
2662                  * gets renegotiated in the future, we can save an
2663                  * additional renegotiation cycle by advertising
2664                  * it correctly in the first place.
2665                  */
2666                 if (curadv != reqadv) {
2667                         *lcladv &= ~(ADVERTISE_PAUSE_CAP |
2668                                      ADVERTISE_PAUSE_ASYM);
2669                         tg3_writephy(tp, MII_ADVERTISE, *lcladv | reqadv);
2670                 }
2671         }
2672
2673         return 1;
2674 }
2675
2676 static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
2677 {
2678         int current_link_up;
2679         u32 bmsr, dummy;
2680         u32 lcl_adv, rmt_adv;
2681         u16 current_speed;
2682         u8 current_duplex;
2683         int i, err;
2684
2685         tw32(MAC_EVENT, 0);
2686
2687         tw32_f(MAC_STATUS,
2688              (MAC_STATUS_SYNC_CHANGED |
2689               MAC_STATUS_CFG_CHANGED |
2690               MAC_STATUS_MI_COMPLETION |
2691               MAC_STATUS_LNKSTATE_CHANGED));
2692         udelay(40);
2693
2694         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
2695                 tw32_f(MAC_MI_MODE,
2696                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
2697                 udelay(80);
2698         }
2699
2700         tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x02);
2701
2702         /* Some third-party PHYs need to be reset on link going
2703          * down.
2704          */
2705         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
2706              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2707              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
2708             netif_carrier_ok(tp->dev)) {
2709                 tg3_readphy(tp, MII_BMSR, &bmsr);
2710                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
2711                     !(bmsr & BMSR_LSTATUS))
2712                         force_reset = 1;
2713         }
2714         if (force_reset)
2715                 tg3_phy_reset(tp);
2716
2717         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
2718                 tg3_readphy(tp, MII_BMSR, &bmsr);
2719                 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
2720                     !(tp->tg3_flags & TG3_FLAG_INIT_COMPLETE))
2721                         bmsr = 0;
2722
2723                 if (!(bmsr & BMSR_LSTATUS)) {
2724                         err = tg3_init_5401phy_dsp(tp);
2725                         if (err)
2726                                 return err;
2727
2728                         tg3_readphy(tp, MII_BMSR, &bmsr);
2729                         for (i = 0; i < 1000; i++) {
2730                                 udelay(10);
2731                                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
2732                                     (bmsr & BMSR_LSTATUS)) {
2733                                         udelay(40);
2734                                         break;
2735                                 }
2736                         }
2737
2738                         if ((tp->phy_id & PHY_ID_REV_MASK) == PHY_REV_BCM5401_B0 &&
2739                             !(bmsr & BMSR_LSTATUS) &&
2740                             tp->link_config.active_speed == SPEED_1000) {
2741                                 err = tg3_phy_reset(tp);
2742                                 if (!err)
2743                                         err = tg3_init_5401phy_dsp(tp);
2744                                 if (err)
2745                                         return err;
2746                         }
2747                 }
2748         } else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
2749                    tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
2750                 /* 5701 {A0,B0} CRC bug workaround */
2751                 tg3_writephy(tp, 0x15, 0x0a75);
2752                 tg3_writephy(tp, 0x1c, 0x8c68);
2753                 tg3_writephy(tp, 0x1c, 0x8d68);
2754                 tg3_writephy(tp, 0x1c, 0x8c68);
2755         }
2756
2757         /* Clear pending interrupts... */
2758         tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
2759         tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
2760
2761         if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT)
2762                 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
2763         else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906)
2764                 tg3_writephy(tp, MII_TG3_IMASK, ~0);
2765
2766         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2767             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2768                 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
2769                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
2770                                      MII_TG3_EXT_CTRL_LNK3_LED_MODE);
2771                 else
2772                         tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
2773         }
2774
2775         current_link_up = 0;
2776         current_speed = SPEED_INVALID;
2777         current_duplex = DUPLEX_INVALID;
2778
2779         if (tp->tg3_flags2 & TG3_FLG2_CAPACITIVE_COUPLING) {
2780                 u32 val;
2781
2782                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4007);
2783                 tg3_readphy(tp, MII_TG3_AUX_CTRL, &val);
2784                 if (!(val & (1 << 10))) {
2785                         val |= (1 << 10);
2786                         tg3_writephy(tp, MII_TG3_AUX_CTRL, val);
2787                         goto relink;
2788                 }
2789         }
2790
2791         bmsr = 0;
2792         for (i = 0; i < 100; i++) {
2793                 tg3_readphy(tp, MII_BMSR, &bmsr);
2794                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
2795                     (bmsr & BMSR_LSTATUS))
2796                         break;
2797                 udelay(40);
2798         }
2799
2800         if (bmsr & BMSR_LSTATUS) {
2801                 u32 aux_stat, bmcr;
2802
2803                 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
2804                 for (i = 0; i < 2000; i++) {
2805                         udelay(10);
2806                         if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
2807                             aux_stat)
2808                                 break;
2809                 }
2810
2811                 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
2812                                              &current_speed,
2813                                              &current_duplex);
2814
2815                 bmcr = 0;
2816                 for (i = 0; i < 200; i++) {
2817                         tg3_readphy(tp, MII_BMCR, &bmcr);
2818                         if (tg3_readphy(tp, MII_BMCR, &bmcr))
2819                                 continue;
2820                         if (bmcr && bmcr != 0x7fff)
2821                                 break;
2822                         udelay(10);
2823                 }
2824
2825                 lcl_adv = 0;
2826                 rmt_adv = 0;
2827
2828                 tp->link_config.active_speed = current_speed;
2829                 tp->link_config.active_duplex = current_duplex;
2830
2831                 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2832                         if ((bmcr & BMCR_ANENABLE) &&
2833                             tg3_copper_is_advertising_all(tp,
2834                                                 tp->link_config.advertising)) {
2835                                 if (tg3_adv_1000T_flowctrl_ok(tp, &lcl_adv,
2836                                                                   &rmt_adv))
2837                                         current_link_up = 1;
2838                         }
2839                 } else {
2840                         if (!(bmcr & BMCR_ANENABLE) &&
2841                             tp->link_config.speed == current_speed &&
2842                             tp->link_config.duplex == current_duplex &&
2843                             tp->link_config.flowctrl ==
2844                             tp->link_config.active_flowctrl) {
2845                                 current_link_up = 1;
2846                         }
2847                 }
2848
2849                 if (current_link_up == 1 &&
2850                     tp->link_config.active_duplex == DUPLEX_FULL)
2851                         tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
2852         }
2853
2854 relink:
2855         if (current_link_up == 0 || tp->link_config.phy_is_low_power) {
2856                 u32 tmp;
2857
2858                 tg3_phy_copper_begin(tp);
2859
2860                 tg3_readphy(tp, MII_BMSR, &tmp);
2861                 if (!tg3_readphy(tp, MII_BMSR, &tmp) &&
2862                     (tmp & BMSR_LSTATUS))
2863                         current_link_up = 1;
2864         }
2865
2866         tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
2867         if (current_link_up == 1) {
2868                 if (tp->link_config.active_speed == SPEED_100 ||
2869                     tp->link_config.active_speed == SPEED_10)
2870                         tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
2871                 else
2872                         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
2873         } else
2874                 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
2875
2876         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
2877         if (tp->link_config.active_duplex == DUPLEX_HALF)
2878                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
2879
2880         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
2881                 if (current_link_up == 1 &&
2882                     tg3_5700_link_polarity(tp, tp->link_config.active_speed))
2883                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
2884                 else
2885                         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
2886         }
2887
2888         /* ??? Without this setting Netgear GA302T PHY does not
2889          * ??? send/receive packets...
2890          */
2891         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411 &&
2892             tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) {
2893                 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
2894                 tw32_f(MAC_MI_MODE, tp->mi_mode);
2895                 udelay(80);
2896         }
2897
2898         tw32_f(MAC_MODE, tp->mac_mode);
2899         udelay(40);
2900
2901         if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
2902                 /* Polled via timer. */
2903                 tw32_f(MAC_EVENT, 0);
2904         } else {
2905                 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2906         }
2907         udelay(40);
2908
2909         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 &&
2910             current_link_up == 1 &&
2911             tp->link_config.active_speed == SPEED_1000 &&
2912             ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ||
2913              (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED))) {
2914                 udelay(120);
2915                 tw32_f(MAC_STATUS,
2916                      (MAC_STATUS_SYNC_CHANGED |
2917                       MAC_STATUS_CFG_CHANGED));
2918                 udelay(40);
2919                 tg3_write_mem(tp,
2920                               NIC_SRAM_FIRMWARE_MBOX,
2921                               NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
2922         }
2923
2924         if (current_link_up != netif_carrier_ok(tp->dev)) {
2925                 if (current_link_up)
2926                         netif_carrier_on(tp->dev);
2927                 else
2928                         netif_carrier_off(tp->dev);
2929                 tg3_link_report(tp);
2930         }
2931
2932         return 0;
2933 }
2934
2935 struct tg3_fiber_aneginfo {
2936         int state;
2937 #define ANEG_STATE_UNKNOWN              0
2938 #define ANEG_STATE_AN_ENABLE            1
2939 #define ANEG_STATE_RESTART_INIT         2
2940 #define ANEG_STATE_RESTART              3
2941 #define ANEG_STATE_DISABLE_LINK_OK      4
2942 #define ANEG_STATE_ABILITY_DETECT_INIT  5
2943 #define ANEG_STATE_ABILITY_DETECT       6
2944 #define ANEG_STATE_ACK_DETECT_INIT      7
2945 #define ANEG_STATE_ACK_DETECT           8
2946 #define ANEG_STATE_COMPLETE_ACK_INIT    9
2947 #define ANEG_STATE_COMPLETE_ACK         10
2948 #define ANEG_STATE_IDLE_DETECT_INIT     11
2949 #define ANEG_STATE_IDLE_DETECT          12
2950 #define ANEG_STATE_LINK_OK              13
2951 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT  14
2952 #define ANEG_STATE_NEXT_PAGE_WAIT       15
2953
2954         u32 flags;
2955 #define MR_AN_ENABLE            0x00000001
2956 #define MR_RESTART_AN           0x00000002
2957 #define MR_AN_COMPLETE          0x00000004
2958 #define MR_PAGE_RX              0x00000008
2959 #define MR_NP_LOADED            0x00000010
2960 #define MR_TOGGLE_TX            0x00000020
2961 #define MR_LP_ADV_FULL_DUPLEX   0x00000040
2962 #define MR_LP_ADV_HALF_DUPLEX   0x00000080
2963 #define MR_LP_ADV_SYM_PAUSE     0x00000100
2964 #define MR_LP_ADV_ASYM_PAUSE    0x00000200
2965 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
2966 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
2967 #define MR_LP_ADV_NEXT_PAGE     0x00001000
2968 #define MR_TOGGLE_RX            0x00002000
2969 #define MR_NP_RX                0x00004000
2970
2971 #define MR_LINK_OK              0x80000000
2972
2973         unsigned long link_time, cur_time;
2974
2975         u32 ability_match_cfg;
2976         int ability_match_count;
2977
2978         char ability_match, idle_match, ack_match;
2979
2980         u32 txconfig, rxconfig;
2981 #define ANEG_CFG_NP             0x00000080
2982 #define ANEG_CFG_ACK            0x00000040
2983 #define ANEG_CFG_RF2            0x00000020
2984 #define ANEG_CFG_RF1            0x00000010
2985 #define ANEG_CFG_PS2            0x00000001
2986 #define ANEG_CFG_PS1            0x00008000
2987 #define ANEG_CFG_HD             0x00004000
2988 #define ANEG_CFG_FD             0x00002000
2989 #define ANEG_CFG_INVAL          0x00001f06
2990
2991 };
2992 #define ANEG_OK         0
2993 #define ANEG_DONE       1
2994 #define ANEG_TIMER_ENAB 2
2995 #define ANEG_FAILED     -1
2996
2997 #define ANEG_STATE_SETTLE_TIME  10000
2998
2999 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
3000                                    struct tg3_fiber_aneginfo *ap)
3001 {
3002         u16 flowctrl;
3003         unsigned long delta;
3004         u32 rx_cfg_reg;
3005         int ret;
3006
3007         if (ap->state == ANEG_STATE_UNKNOWN) {
3008                 ap->rxconfig = 0;
3009                 ap->link_time = 0;
3010                 ap->cur_time = 0;
3011                 ap->ability_match_cfg = 0;
3012                 ap->ability_match_count = 0;
3013                 ap->ability_match = 0;
3014                 ap->idle_match = 0;
3015                 ap->ack_match = 0;
3016         }
3017         ap->cur_time++;
3018
3019         if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
3020                 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
3021
3022                 if (rx_cfg_reg != ap->ability_match_cfg) {
3023                         ap->ability_match_cfg = rx_cfg_reg;
3024                         ap->ability_match = 0;
3025                         ap->ability_match_count = 0;
3026                 } else {
3027                         if (++ap->ability_match_count > 1) {
3028                                 ap->ability_match = 1;
3029                                 ap->ability_match_cfg = rx_cfg_reg;
3030                         }
3031                 }
3032                 if (rx_cfg_reg & ANEG_CFG_ACK)
3033                         ap->ack_match = 1;
3034                 else
3035                         ap->ack_match = 0;
3036
3037                 ap->idle_match = 0;
3038         } else {
3039                 ap->idle_match = 1;
3040                 ap->ability_match_cfg = 0;
3041                 ap->ability_match_count = 0;
3042                 ap->ability_match = 0;
3043                 ap->ack_match = 0;
3044
3045                 rx_cfg_reg = 0;
3046         }
3047
3048         ap->rxconfig = rx_cfg_reg;
3049         ret = ANEG_OK;
3050
3051         switch(ap->state) {
3052         case ANEG_STATE_UNKNOWN:
3053                 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
3054                         ap->state = ANEG_STATE_AN_ENABLE;
3055
3056                 /* fallthru */
3057         case ANEG_STATE_AN_ENABLE:
3058                 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
3059                 if (ap->flags & MR_AN_ENABLE) {
3060                         ap->link_time = 0;
3061                         ap->cur_time = 0;
3062                         ap->ability_match_cfg = 0;
3063                         ap->ability_match_count = 0;
3064                         ap->ability_match = 0;
3065                         ap->idle_match = 0;
3066                         ap->ack_match = 0;
3067
3068                         ap->state = ANEG_STATE_RESTART_INIT;
3069                 } else {
3070                         ap->state = ANEG_STATE_DISABLE_LINK_OK;
3071                 }
3072                 break;
3073
3074         case ANEG_STATE_RESTART_INIT:
3075                 ap->link_time = ap->cur_time;
3076                 ap->flags &= ~(MR_NP_LOADED);
3077                 ap->txconfig = 0;
3078                 tw32(MAC_TX_AUTO_NEG, 0);
3079                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
3080                 tw32_f(MAC_MODE, tp->mac_mode);
3081                 udelay(40);
3082
3083                 ret = ANEG_TIMER_ENAB;
3084                 ap->state = ANEG_STATE_RESTART;
3085
3086                 /* fallthru */
3087         case ANEG_STATE_RESTART:
3088                 delta = ap->cur_time - ap->link_time;
3089                 if (delta > ANEG_STATE_SETTLE_TIME) {
3090                         ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
3091                 } else {
3092                         ret = ANEG_TIMER_ENAB;
3093                 }
3094                 break;
3095
3096         case ANEG_STATE_DISABLE_LINK_OK:
3097                 ret = ANEG_DONE;
3098                 break;
3099
3100         case ANEG_STATE_ABILITY_DETECT_INIT:
3101                 ap->flags &= ~(MR_TOGGLE_TX);
3102                 ap->txconfig = ANEG_CFG_FD;
3103                 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
3104                 if (flowctrl & ADVERTISE_1000XPAUSE)
3105                         ap->txconfig |= ANEG_CFG_PS1;
3106                 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
3107                         ap->txconfig |= ANEG_CFG_PS2;
3108                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
3109                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
3110                 tw32_f(MAC_MODE, tp->mac_mode);
3111                 udelay(40);
3112
3113                 ap->state = ANEG_STATE_ABILITY_DETECT;
3114                 break;
3115
3116         case ANEG_STATE_ABILITY_DETECT:
3117                 if (ap->ability_match != 0 && ap->rxconfig != 0) {
3118                         ap->state = ANEG_STATE_ACK_DETECT_INIT;
3119                 }
3120                 break;
3121
3122         case ANEG_STATE_ACK_DETECT_INIT:
3123                 ap->txconfig |= ANEG_CFG_ACK;
3124                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
3125                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
3126                 tw32_f(MAC_MODE, tp->mac_mode);
3127                 udelay(40);
3128
3129                 ap->state = ANEG_STATE_ACK_DETECT;
3130
3131                 /* fallthru */
3132         case ANEG_STATE_ACK_DETECT:
3133                 if (ap->ack_match != 0) {
3134                         if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
3135                             (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
3136                                 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
3137                         } else {
3138                                 ap->state = ANEG_STATE_AN_ENABLE;
3139                         }
3140                 } else if (ap->ability_match != 0 &&
3141                            ap->rxconfig == 0) {
3142                         ap->state = ANEG_STATE_AN_ENABLE;
3143                 }
3144                 break;
3145
3146         case ANEG_STATE_COMPLETE_ACK_INIT:
3147                 if (ap->rxconfig & ANEG_CFG_INVAL) {
3148                         ret = ANEG_FAILED;
3149                         break;
3150                 }
3151                 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
3152                                MR_LP_ADV_HALF_DUPLEX |
3153                                MR_LP_ADV_SYM_PAUSE |
3154                                MR_LP_ADV_ASYM_PAUSE |
3155                                MR_LP_ADV_REMOTE_FAULT1 |
3156                                MR_LP_ADV_REMOTE_FAULT2 |
3157                                MR_LP_ADV_NEXT_PAGE |
3158                                MR_TOGGLE_RX |
3159                                MR_NP_RX);
3160                 if (ap->rxconfig & ANEG_CFG_FD)
3161                         ap->flags |= MR_LP_ADV_FULL_DUPLEX;
3162                 if (ap->rxconfig & ANEG_CFG_HD)
3163                         ap->flags |= MR_LP_ADV_HALF_DUPLEX;
3164                 if (ap->rxconfig & ANEG_CFG_PS1)
3165                         ap->flags |= MR_LP_ADV_SYM_PAUSE;
3166                 if (ap->rxconfig & ANEG_CFG_PS2)
3167                         ap->flags |= MR_LP_ADV_ASYM_PAUSE;
3168                 if (ap->rxconfig & ANEG_CFG_RF1)
3169                         ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
3170                 if (ap->rxconfig & ANEG_CFG_RF2)
3171                         ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
3172                 if (ap->rxconfig & ANEG_CFG_NP)
3173                         ap->flags |= MR_LP_ADV_NEXT_PAGE;
3174
3175                 ap->link_time = ap->cur_time;
3176
3177                 ap->flags ^= (MR_TOGGLE_TX);
3178                 if (ap->rxconfig & 0x0008)
3179                         ap->flags |= MR_TOGGLE_RX;
3180                 if (ap->rxconfig & ANEG_CFG_NP)
3181                         ap->flags |= MR_NP_RX;
3182                 ap->flags |= MR_PAGE_RX;
3183
3184                 ap->state = ANEG_STATE_COMPLETE_ACK;
3185                 ret = ANEG_TIMER_ENAB;
3186                 break;
3187
3188         case ANEG_STATE_COMPLETE_ACK:
3189                 if (ap->ability_match != 0 &&
3190                     ap->rxconfig == 0) {
3191                         ap->state = ANEG_STATE_AN_ENABLE;
3192                         break;
3193                 }
3194                 delta = ap->cur_time - ap->link_time;
3195                 if (delta > ANEG_STATE_SETTLE_TIME) {
3196                         if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
3197                                 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
3198                         } else {
3199                                 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
3200                                     !(ap->flags & MR_NP_RX)) {
3201                                         ap->state = ANEG_STATE_IDLE_DETECT_INIT;
3202                                 } else {
3203                                         ret = ANEG_FAILED;
3204                                 }
3205                         }
3206                 }
3207                 break;
3208
3209         case ANEG_STATE_IDLE_DETECT_INIT:
3210                 ap->link_time = ap->cur_time;
3211                 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
3212                 tw32_f(MAC_MODE, tp->mac_mode);
3213                 udelay(40);
3214
3215                 ap->state = ANEG_STATE_IDLE_DETECT;
3216                 ret = ANEG_TIMER_ENAB;
3217                 break;
3218
3219         case ANEG_STATE_IDLE_DETECT:
3220                 if (ap->ability_match != 0 &&
3221                     ap->rxconfig == 0) {
3222                         ap->state = ANEG_STATE_AN_ENABLE;
3223                         break;
3224                 }
3225                 delta = ap->cur_time - ap->link_time;
3226                 if (delta > ANEG_STATE_SETTLE_TIME) {
3227                         /* XXX another gem from the Broadcom driver :( */
3228                         ap->state = ANEG_STATE_LINK_OK;
3229                 }
3230                 break;
3231
3232         case ANEG_STATE_LINK_OK:
3233                 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
3234                 ret = ANEG_DONE;
3235                 break;
3236
3237         case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
3238                 /* ??? unimplemented */
3239                 break;
3240
3241         case ANEG_STATE_NEXT_PAGE_WAIT:
3242                 /* ??? unimplemented */
3243                 break;
3244
3245         default:
3246                 ret = ANEG_FAILED;
3247                 break;
3248         }
3249
3250         return ret;
3251 }
3252
3253 static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags)
3254 {
3255         int res = 0;
3256         struct tg3_fiber_aneginfo aninfo;
3257         int status = ANEG_FAILED;
3258         unsigned int tick;
3259         u32 tmp;
3260
3261         tw32_f(MAC_TX_AUTO_NEG, 0);
3262
3263         tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
3264         tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
3265         udelay(40);
3266
3267         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
3268         udelay(40);
3269
3270         memset(&aninfo, 0, sizeof(aninfo));
3271         aninfo.flags |= MR_AN_ENABLE;
3272         aninfo.state = ANEG_STATE_UNKNOWN;
3273         aninfo.cur_time = 0;
3274         tick = 0;
3275         while (++tick < 195000) {
3276                 status = tg3_fiber_aneg_smachine(tp, &aninfo);
3277                 if (status == ANEG_DONE || status == ANEG_FAILED)
3278                         break;
3279
3280                 udelay(1);
3281         }
3282
3283         tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
3284         tw32_f(MAC_MODE, tp->mac_mode);
3285         udelay(40);
3286
3287         *txflags = aninfo.txconfig;
3288         *rxflags = aninfo.flags;
3289
3290         if (status == ANEG_DONE &&
3291             (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
3292                              MR_LP_ADV_FULL_DUPLEX)))
3293                 res = 1;
3294
3295         return res;
3296 }
3297
3298 static void tg3_init_bcm8002(struct tg3 *tp)
3299 {
3300         u32 mac_status = tr32(MAC_STATUS);
3301         int i;
3302
3303         /* Reset when initting first time or we have a link. */
3304         if ((tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) &&
3305             !(mac_status & MAC_STATUS_PCS_SYNCED))
3306                 return;
3307
3308         /* Set PLL lock range. */
3309         tg3_writephy(tp, 0x16, 0x8007);
3310
3311         /* SW reset */
3312         tg3_writephy(tp, MII_BMCR, BMCR_RESET);
3313
3314         /* Wait for reset to complete. */
3315         /* XXX schedule_timeout() ... */
3316         for (i = 0; i < 500; i++)
3317                 udelay(10);
3318
3319         /* Config mode; select PMA/Ch 1 regs. */
3320         tg3_writephy(tp, 0x10, 0x8411);
3321
3322         /* Enable auto-lock and comdet, select txclk for tx. */
3323         tg3_writephy(tp, 0x11, 0x0a10);
3324
3325         tg3_writephy(tp, 0x18, 0x00a0);
3326         tg3_writephy(tp, 0x16, 0x41ff);
3327
3328         /* Assert and deassert POR. */
3329         tg3_writephy(tp, 0x13, 0x0400);
3330         udelay(40);
3331         tg3_writephy(tp, 0x13, 0x0000);
3332
3333         tg3_writephy(tp, 0x11, 0x0a50);
3334         udelay(40);
3335         tg3_writephy(tp, 0x11, 0x0a10);
3336
3337         /* Wait for signal to stabilize */
3338         /* XXX schedule_timeout() ... */
3339         for (i = 0; i < 15000; i++)
3340                 udelay(10);
3341
3342         /* Deselect the channel register so we can read the PHYID
3343          * later.
3344          */
3345         tg3_writephy(tp, 0x10, 0x8011);
3346 }
3347
3348 static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
3349 {
3350         u16 flowctrl;
3351         u32 sg_dig_ctrl, sg_dig_status;
3352         u32 serdes_cfg, expected_sg_dig_ctrl;
3353         int workaround, port_a;
3354         int current_link_up;
3355
3356         serdes_cfg = 0;
3357         expected_sg_dig_ctrl = 0;
3358         workaround = 0;
3359         port_a = 1;
3360         current_link_up = 0;
3361
3362         if (tp->pci_chip_rev_id != CHIPREV_ID_5704_A0 &&
3363             tp->pci_chip_rev_id != CHIPREV_ID_5704_A1) {
3364                 workaround = 1;
3365                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
3366                         port_a = 0;
3367
3368                 /* preserve bits 0-11,13,14 for signal pre-emphasis */
3369                 /* preserve bits 20-23 for voltage regulator */
3370                 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
3371         }
3372
3373         sg_dig_ctrl = tr32(SG_DIG_CTRL);
3374
3375         if (tp->link_config.autoneg != AUTONEG_ENABLE) {
3376                 if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) {
3377                         if (workaround) {
3378                                 u32 val = serdes_cfg;
3379
3380                                 if (port_a)
3381                                         val |= 0xc010000;
3382                                 else
3383                                         val |= 0x4010000;
3384                                 tw32_f(MAC_SERDES_CFG, val);
3385                         }
3386
3387                         tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
3388                 }
3389                 if (mac_status & MAC_STATUS_PCS_SYNCED) {
3390                         tg3_setup_flow_control(tp, 0, 0);
3391                         current_link_up = 1;
3392                 }
3393                 goto out;
3394         }
3395
3396         /* Want auto-negotiation.  */
3397         expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP;
3398
3399         flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
3400         if (flowctrl & ADVERTISE_1000XPAUSE)
3401                 expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP;
3402         if (flowctrl & ADVERTISE_1000XPSE_ASYM)
3403                 expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE;
3404
3405         if (sg_dig_ctrl != expected_sg_dig_ctrl) {
3406                 if ((tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT) &&
3407                     tp->serdes_counter &&
3408                     ((mac_status & (MAC_STATUS_PCS_SYNCED |
3409                                     MAC_STATUS_RCVD_CFG)) ==
3410                      MAC_STATUS_PCS_SYNCED)) {
3411                         tp->serdes_counter--;
3412                         current_link_up = 1;
3413                         goto out;
3414                 }
3415 restart_autoneg:
3416                 if (workaround)
3417                         tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
3418                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET);
3419                 udelay(5);
3420                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
3421
3422                 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
3423                 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3424         } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
3425                                  MAC_STATUS_SIGNAL_DET)) {
3426                 sg_dig_status = tr32(SG_DIG_STATUS);
3427                 mac_status = tr32(MAC_STATUS);
3428
3429                 if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) &&
3430                     (mac_status & MAC_STATUS_PCS_SYNCED)) {
3431                         u32 local_adv = 0, remote_adv = 0;
3432
3433                         if (sg_dig_ctrl & SG_DIG_PAUSE_CAP)
3434                                 local_adv |= ADVERTISE_1000XPAUSE;
3435                         if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE)
3436                                 local_adv |= ADVERTISE_1000XPSE_ASYM;
3437
3438                         if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE)
3439                                 remote_adv |= LPA_1000XPAUSE;
3440                         if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE)
3441                                 remote_adv |= LPA_1000XPAUSE_ASYM;
3442
3443                         tg3_setup_flow_control(tp, local_adv, remote_adv);
3444                         current_link_up = 1;
3445                         tp->serdes_counter = 0;
3446                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3447                 } else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) {
3448                         if (tp->serdes_counter)
3449                                 tp->serdes_counter--;
3450                         else {
3451                                 if (workaround) {
3452                                         u32 val = serdes_cfg;
3453
3454                                         if (port_a)
3455                                                 val |= 0xc010000;
3456                                         else
3457                                                 val |= 0x4010000;
3458
3459                                         tw32_f(MAC_SERDES_CFG, val);
3460                                 }
3461
3462                                 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
3463                                 udelay(40);
3464
3465                                 /* Link parallel detection - link is up */
3466                                 /* only if we have PCS_SYNC and not */
3467                                 /* receiving config code words */
3468                                 mac_status = tr32(MAC_STATUS);
3469                                 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
3470                                     !(mac_status & MAC_STATUS_RCVD_CFG)) {
3471                                         tg3_setup_flow_control(tp, 0, 0);
3472                                         current_link_up = 1;
3473                                         tp->tg3_flags2 |=
3474                                                 TG3_FLG2_PARALLEL_DETECT;
3475                                         tp->serdes_counter =
3476                                                 SERDES_PARALLEL_DET_TIMEOUT;
3477                                 } else
3478                                         goto restart_autoneg;
3479                         }
3480                 }
3481         } else {
3482                 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
3483                 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3484         }
3485
3486 out:
3487         return current_link_up;
3488 }
3489
3490 static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
3491 {
3492         int current_link_up = 0;
3493
3494         if (!(mac_status & MAC_STATUS_PCS_SYNCED))
3495                 goto out;
3496
3497         if (tp->link_config.autoneg == AUTONEG_ENABLE) {
3498                 u32 txflags, rxflags;
3499                 int i;
3500
3501                 if (fiber_autoneg(tp, &txflags, &rxflags)) {
3502                         u32 local_adv = 0, remote_adv = 0;
3503
3504                         if (txflags & ANEG_CFG_PS1)
3505                                 local_adv |= ADVERTISE_1000XPAUSE;
3506                         if (txflags & ANEG_CFG_PS2)
3507                                 local_adv |= ADVERTISE_1000XPSE_ASYM;
3508
3509                         if (rxflags & MR_LP_ADV_SYM_PAUSE)
3510                                 remote_adv |= LPA_1000XPAUSE;
3511                         if (rxflags & MR_LP_ADV_ASYM_PAUSE)
3512                                 remote_adv |= LPA_1000XPAUSE_ASYM;
3513
3514                         tg3_setup_flow_control(tp, local_adv, remote_adv);
3515
3516                         current_link_up = 1;
3517                 }
3518                 for (i = 0; i < 30; i++) {
3519                         udelay(20);
3520                         tw32_f(MAC_STATUS,
3521                                (MAC_STATUS_SYNC_CHANGED |
3522                                 MAC_STATUS_CFG_CHANGED));
3523                         udelay(40);
3524                         if ((tr32(MAC_STATUS) &
3525                              (MAC_STATUS_SYNC_CHANGED |
3526                               MAC_STATUS_CFG_CHANGED)) == 0)
3527                                 break;
3528                 }
3529
3530                 mac_status = tr32(MAC_STATUS);
3531                 if (current_link_up == 0 &&
3532                     (mac_status & MAC_STATUS_PCS_SYNCED) &&
3533                     !(mac_status & MAC_STATUS_RCVD_CFG))
3534                         current_link_up = 1;
3535         } else {
3536                 tg3_setup_flow_control(tp, 0, 0);
3537
3538                 /* Forcing 1000FD link up. */
3539                 current_link_up = 1;
3540
3541                 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
3542                 udelay(40);
3543
3544                 tw32_f(MAC_MODE, tp->mac_mode);
3545                 udelay(40);
3546         }
3547
3548 out:
3549         return current_link_up;
3550 }
3551
3552 static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
3553 {
3554         u32 orig_pause_cfg;
3555         u16 orig_active_speed;
3556         u8 orig_active_duplex;
3557         u32 mac_status;
3558         int current_link_up;
3559         int i;
3560
3561         orig_pause_cfg = tp->link_config.active_flowctrl;
3562         orig_active_speed = tp->link_config.active_speed;
3563         orig_active_duplex = tp->link_config.active_duplex;
3564
3565         if (!(tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG) &&
3566             netif_carrier_ok(tp->dev) &&
3567             (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE)) {
3568                 mac_status = tr32(MAC_STATUS);
3569                 mac_status &= (MAC_STATUS_PCS_SYNCED |
3570                                MAC_STATUS_SIGNAL_DET |
3571                                MAC_STATUS_CFG_CHANGED |
3572                                MAC_STATUS_RCVD_CFG);
3573                 if (mac_status == (MAC_STATUS_PCS_SYNCED |
3574                                    MAC_STATUS_SIGNAL_DET)) {
3575                         tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
3576                                             MAC_STATUS_CFG_CHANGED));
3577                         return 0;
3578                 }
3579         }
3580
3581         tw32_f(MAC_TX_AUTO_NEG, 0);
3582
3583         tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
3584         tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
3585         tw32_f(MAC_MODE, tp->mac_mode);
3586         udelay(40);
3587
3588         if (tp->phy_id == PHY_ID_BCM8002)
3589                 tg3_init_bcm8002(tp);
3590
3591         /* Enable link change event even when serdes polling.  */
3592         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
3593         udelay(40);
3594
3595         current_link_up = 0;
3596         mac_status = tr32(MAC_STATUS);
3597
3598         if (tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG)
3599                 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
3600         else
3601                 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
3602
3603         tp->hw_status->status =
3604                 (SD_STATUS_UPDATED |
3605                  (tp->hw_status->status & ~SD_STATUS_LINK_CHG));
3606
3607         for (i = 0; i < 100; i++) {
3608                 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
3609                                     MAC_STATUS_CFG_CHANGED));
3610                 udelay(5);
3611                 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
3612                                          MAC_STATUS_CFG_CHANGED |
3613                                          MAC_STATUS_LNKSTATE_CHANGED)) == 0)
3614                         break;
3615         }
3616
3617         mac_status = tr32(MAC_STATUS);
3618         if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
3619                 current_link_up = 0;
3620                 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
3621                     tp->serdes_counter == 0) {
3622                         tw32_f(MAC_MODE, (tp->mac_mode |
3623                                           MAC_MODE_SEND_CONFIGS));
3624                         udelay(1);
3625                         tw32_f(MAC_MODE, tp->mac_mode);
3626                 }
3627         }
3628
3629         if (current_link_up == 1) {
3630                 tp->link_config.active_speed = SPEED_1000;
3631                 tp->link_config.active_duplex = DUPLEX_FULL;
3632                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
3633                                     LED_CTRL_LNKLED_OVERRIDE |
3634                                     LED_CTRL_1000MBPS_ON));
3635         } else {
3636                 tp->link_config.active_speed = SPEED_INVALID;
3637                 tp->link_config.active_duplex = DUPLEX_INVALID;
3638                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
3639                                     LED_CTRL_LNKLED_OVERRIDE |
3640                                     LED_CTRL_TRAFFIC_OVERRIDE));
3641         }
3642
3643         if (current_link_up != netif_carrier_ok(tp->dev)) {
3644                 if (current_link_up)
3645                         netif_carrier_on(tp->dev);
3646                 else
3647                         netif_carrier_off(tp->dev);
3648                 tg3_link_report(tp);
3649         } else {
3650                 u32 now_pause_cfg = tp->link_config.active_flowctrl;
3651                 if (orig_pause_cfg != now_pause_cfg ||
3652                     orig_active_speed != tp->link_config.active_speed ||
3653                     orig_active_duplex != tp->link_config.active_duplex)
3654                         tg3_link_report(tp);
3655         }
3656
3657         return 0;
3658 }
3659
3660 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
3661 {
3662         int current_link_up, err = 0;
3663         u32 bmsr, bmcr;
3664         u16 current_speed;
3665         u8 current_duplex;
3666         u32 local_adv, remote_adv;
3667
3668         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
3669         tw32_f(MAC_MODE, tp->mac_mode);
3670         udelay(40);
3671
3672         tw32(MAC_EVENT, 0);
3673
3674         tw32_f(MAC_STATUS,
3675              (MAC_STATUS_SYNC_CHANGED |
3676               MAC_STATUS_CFG_CHANGED |
3677               MAC_STATUS_MI_COMPLETION |
3678               MAC_STATUS_LNKSTATE_CHANGED));
3679         udelay(40);
3680
3681         if (force_reset)
3682                 tg3_phy_reset(tp);
3683
3684         current_link_up = 0;
3685         current_speed = SPEED_INVALID;
3686         current_duplex = DUPLEX_INVALID;
3687
3688         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
3689         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
3690         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
3691                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
3692                         bmsr |= BMSR_LSTATUS;
3693                 else
3694                         bmsr &= ~BMSR_LSTATUS;
3695         }
3696
3697         err |= tg3_readphy(tp, MII_BMCR, &bmcr);
3698
3699         if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
3700             (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT)) {
3701                 /* do nothing, just check for link up at the end */
3702         } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
3703                 u32 adv, new_adv;
3704
3705                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
3706                 new_adv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
3707                                   ADVERTISE_1000XPAUSE |
3708                                   ADVERTISE_1000XPSE_ASYM |
3709                                   ADVERTISE_SLCT);
3710
3711                 new_adv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
3712
3713                 if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
3714                         new_adv |= ADVERTISE_1000XHALF;
3715                 if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
3716                         new_adv |= ADVERTISE_1000XFULL;
3717
3718                 if ((new_adv != adv) || !(bmcr & BMCR_ANENABLE)) {
3719                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
3720                         bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
3721                         tg3_writephy(tp, MII_BMCR, bmcr);
3722
3723                         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
3724                         tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
3725                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3726
3727                         return err;
3728                 }
3729         } else {
3730                 u32 new_bmcr;
3731
3732                 bmcr &= ~BMCR_SPEED1000;
3733                 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
3734
3735                 if (tp->link_config.duplex == DUPLEX_FULL)
3736                         new_bmcr |= BMCR_FULLDPLX;
3737
3738                 if (new_bmcr != bmcr) {
3739                         /* BMCR_SPEED1000 is a reserved bit that needs
3740                          * to be set on write.
3741                          */
3742                         new_bmcr |= BMCR_SPEED1000;
3743
3744                         /* Force a linkdown */
3745                         if (netif_carrier_ok(tp->dev)) {
3746                                 u32 adv;
3747
3748                                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
3749                                 adv &= ~(ADVERTISE_1000XFULL |
3750                                          ADVERTISE_1000XHALF |
3751                                          ADVERTISE_SLCT);
3752                                 tg3_writephy(tp, MII_ADVERTISE, adv);
3753                                 tg3_writephy(tp, MII_BMCR, bmcr |
3754                                                            BMCR_ANRESTART |
3755                                                            BMCR_ANENABLE);
3756                                 udelay(10);
3757                                 netif_carrier_off(tp->dev);
3758                         }
3759                         tg3_writephy(tp, MII_BMCR, new_bmcr);
3760                         bmcr = new_bmcr;
3761                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
3762                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
3763                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
3764                             ASIC_REV_5714) {
3765                                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
3766                                         bmsr |= BMSR_LSTATUS;
3767                                 else
3768                                         bmsr &= ~BMSR_LSTATUS;
3769                         }
3770                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3771                 }
3772         }
3773
3774         if (bmsr & BMSR_LSTATUS) {
3775                 current_speed = SPEED_1000;
3776                 current_link_up = 1;
3777                 if (bmcr & BMCR_FULLDPLX)
3778                         current_duplex = DUPLEX_FULL;
3779                 else
3780                         current_duplex = DUPLEX_HALF;
3781
3782                 local_adv = 0;
3783                 remote_adv = 0;
3784
3785                 if (bmcr & BMCR_ANENABLE) {
3786                         u32 common;
3787
3788                         err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
3789                         err |= tg3_readphy(tp, MII_LPA, &remote_adv);
3790                         common = local_adv & remote_adv;
3791                         if (common & (ADVERTISE_1000XHALF |
3792                                       ADVERTISE_1000XFULL)) {
3793                                 if (common & ADVERTISE_1000XFULL)
3794                                         current_duplex = DUPLEX_FULL;
3795                                 else
3796                                         current_duplex = DUPLEX_HALF;
3797                         }
3798                         else
3799                                 current_link_up = 0;
3800                 }
3801         }
3802
3803         if (current_link_up == 1 && current_duplex == DUPLEX_FULL)
3804                 tg3_setup_flow_control(tp, local_adv, remote_adv);
3805
3806         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
3807         if (tp->link_config.active_duplex == DUPLEX_HALF)
3808                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
3809
3810         tw32_f(MAC_MODE, tp->mac_mode);
3811         udelay(40);
3812
3813         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
3814
3815         tp->link_config.active_speed = current_speed;
3816         tp->link_config.active_duplex = current_duplex;
3817
3818         if (current_link_up != netif_carrier_ok(tp->dev)) {
3819                 if (current_link_up)
3820                         netif_carrier_on(tp->dev);
3821                 else {
3822                         netif_carrier_off(tp->dev);
3823                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3824                 }
3825                 tg3_link_report(tp);
3826         }
3827         return err;
3828 }
3829
3830 static void tg3_serdes_parallel_detect(struct tg3 *tp)
3831 {
3832         if (tp->serdes_counter) {
3833                 /* Give autoneg time to complete. */
3834                 tp->serdes_counter--;
3835                 return;
3836         }
3837         if (!netif_carrier_ok(tp->dev) &&
3838             (tp->link_config.autoneg == AUTONEG_ENABLE)) {
3839                 u32 bmcr;
3840
3841                 tg3_readphy(tp, MII_BMCR, &bmcr);
3842                 if (bmcr & BMCR_ANENABLE) {
3843                         u32 phy1, phy2;
3844
3845                         /* Select shadow register 0x1f */
3846                         tg3_writephy(tp, 0x1c, 0x7c00);
3847                         tg3_readphy(tp, 0x1c, &phy1);
3848
3849                         /* Select expansion interrupt status register */
3850                         tg3_writephy(tp, 0x17, 0x0f01);
3851                         tg3_readphy(tp, 0x15, &phy2);
3852                         tg3_readphy(tp, 0x15, &phy2);
3853
3854                         if ((phy1 & 0x10) && !(phy2 & 0x20)) {
3855                                 /* We have signal detect and not receiving
3856                                  * config code words, link is up by parallel
3857                                  * detection.
3858                                  */
3859
3860                                 bmcr &= ~BMCR_ANENABLE;
3861                                 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
3862                                 tg3_writephy(tp, MII_BMCR, bmcr);
3863                                 tp->tg3_flags2 |= TG3_FLG2_PARALLEL_DETECT;
3864                         }
3865                 }
3866         }
3867         else if (netif_carrier_ok(tp->dev) &&
3868                  (tp->link_config.autoneg == AUTONEG_ENABLE) &&
3869                  (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT)) {
3870                 u32 phy2;
3871
3872                 /* Select expansion interrupt status register */
3873                 tg3_writephy(tp, 0x17, 0x0f01);
3874                 tg3_readphy(tp, 0x15, &phy2);
3875                 if (phy2 & 0x20) {
3876                         u32 bmcr;
3877
3878                         /* Config code words received, turn on autoneg. */
3879                         tg3_readphy(tp, MII_BMCR, &bmcr);
3880                         tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
3881
3882                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3883
3884                 }
3885         }
3886 }
3887
3888 static int tg3_setup_phy(struct tg3 *tp, int force_reset)
3889 {
3890         int err;
3891
3892         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
3893                 err = tg3_setup_fiber_phy(tp, force_reset);
3894         } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
3895                 err = tg3_setup_fiber_mii_phy(tp, force_reset);
3896         } else {
3897                 err = tg3_setup_copper_phy(tp, force_reset);
3898         }
3899
3900         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
3901                 u32 val, scale;
3902
3903                 val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
3904                 if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5)
3905                         scale = 65;
3906                 else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25)
3907                         scale = 6;
3908                 else
3909                         scale = 12;
3910
3911                 val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK;
3912                 val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT);
3913                 tw32(GRC_MISC_CFG, val);
3914         }
3915
3916         if (tp->link_config.active_speed == SPEED_1000 &&
3917             tp->link_config.active_duplex == DUPLEX_HALF)
3918                 tw32(MAC_TX_LENGTHS,
3919                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
3920                       (6 << TX_LENGTHS_IPG_SHIFT) |
3921                       (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
3922         else
3923                 tw32(MAC_TX_LENGTHS,
3924                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
3925                       (6 << TX_LENGTHS_IPG_SHIFT) |
3926                       (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
3927
3928         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
3929                 if (netif_carrier_ok(tp->dev)) {
3930                         tw32(HOSTCC_STAT_COAL_TICKS,
3931                              tp->coal.stats_block_coalesce_usecs);
3932                 } else {
3933                         tw32(HOSTCC_STAT_COAL_TICKS, 0);
3934                 }
3935         }
3936
3937         if (tp->tg3_flags & TG3_FLAG_ASPM_WORKAROUND) {
3938                 u32 val = tr32(PCIE_PWR_MGMT_THRESH);
3939                 if (!netif_carrier_ok(tp->dev))
3940                         val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
3941                               tp->pwrmgmt_thresh;
3942                 else
3943                         val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
3944                 tw32(PCIE_PWR_MGMT_THRESH, val);
3945         }
3946
3947         return err;
3948 }
3949
3950 /* This is called whenever we suspect that the system chipset is re-
3951  * ordering the sequence of MMIO to the tx send mailbox. The symptom
3952  * is bogus tx completions. We try to recover by setting the
3953  * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
3954  * in the workqueue.
3955  */
3956 static void tg3_tx_recover(struct tg3 *tp)
3957 {
3958         BUG_ON((tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) ||
3959                tp->write32_tx_mbox == tg3_write_indirect_mbox);
3960
3961         printk(KERN_WARNING PFX "%s: The system may be re-ordering memory-"
3962                "mapped I/O cycles to the network device, attempting to "
3963                "recover. Please report the problem to the driver maintainer "
3964                "and include system chipset information.\n", tp->dev->name);
3965
3966         spin_lock(&tp->lock);
3967         tp->tg3_flags |= TG3_FLAG_TX_RECOVERY_PENDING;
3968         spin_unlock(&tp->lock);
3969 }
3970
3971 static inline u32 tg3_tx_avail(struct tg3 *tp)
3972 {
3973         smp_mb();
3974         return (tp->tx_pending -
3975                 ((tp->tx_prod - tp->tx_cons) & (TG3_TX_RING_SIZE - 1)));
3976 }
3977
3978 /* Tigon3 never reports partial packet sends.  So we do not
3979  * need special logic to handle SKBs that have not had all
3980  * of their frags sent yet, like SunGEM does.
3981  */
3982 static void tg3_tx(struct tg3 *tp)
3983 {
3984         u32 hw_idx = tp->hw_status->idx[0].tx_consumer;
3985         u32 sw_idx = tp->tx_cons;
3986
3987         while (sw_idx != hw_idx) {
3988                 struct tx_ring_info *ri = &tp->tx_buffers[sw_idx];
3989                 struct sk_buff *skb = ri->skb;
3990                 int i, tx_bug = 0;
3991
3992                 if (unlikely(skb == NULL)) {
3993                         tg3_tx_recover(tp);
3994                         return;
3995                 }
3996
3997                 skb_dma_unmap(&tp->pdev->dev, skb, DMA_TO_DEVICE);
3998
3999                 ri->skb = NULL;
4000
4001                 sw_idx = NEXT_TX(sw_idx);
4002
4003                 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
4004                         ri = &tp->tx_buffers[sw_idx];
4005                         if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
4006                                 tx_bug = 1;
4007                         sw_idx = NEXT_TX(sw_idx);
4008                 }
4009
4010                 dev_kfree_skb(skb);
4011
4012                 if (unlikely(tx_bug)) {
4013                         tg3_tx_recover(tp);
4014                         return;
4015                 }
4016         }
4017
4018         tp->tx_cons = sw_idx;
4019
4020         /* Need to make the tx_cons update visible to tg3_start_xmit()
4021          * before checking for netif_queue_stopped().  Without the
4022          * memory barrier, there is a small possibility that tg3_start_xmit()
4023          * will miss it and cause the queue to be stopped forever.
4024          */
4025         smp_mb();
4026
4027         if (unlikely(netif_queue_stopped(tp->dev) &&
4028                      (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp)))) {
4029                 netif_tx_lock(tp->dev);
4030                 if (netif_queue_stopped(tp->dev) &&
4031                     (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp)))
4032                         netif_wake_queue(tp->dev);
4033                 netif_tx_unlock(tp->dev);
4034         }
4035 }
4036
4037 /* Returns size of skb allocated or < 0 on error.
4038  *
4039  * We only need to fill in the address because the other members
4040  * of the RX descriptor are invariant, see tg3_init_rings.
4041  *
4042  * Note the purposeful assymetry of cpu vs. chip accesses.  For
4043  * posting buffers we only dirty the first cache line of the RX
4044  * descriptor (containing the address).  Whereas for the RX status
4045  * buffers the cpu only reads the last cacheline of the RX descriptor
4046  * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
4047  */
4048 static int tg3_alloc_rx_skb(struct tg3 *tp, u32 opaque_key,
4049                             int src_idx, u32 dest_idx_unmasked)
4050 {
4051         struct tg3_rx_buffer_desc *desc;
4052         struct ring_info *map, *src_map;
4053         struct sk_buff *skb;
4054         dma_addr_t mapping;
4055         int skb_size, dest_idx;
4056
4057         src_map = NULL;
4058         switch (opaque_key) {
4059         case RXD_OPAQUE_RING_STD:
4060                 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
4061                 desc = &tp->rx_std[dest_idx];
4062                 map = &tp->rx_std_buffers[dest_idx];
4063                 if (src_idx >= 0)
4064                         src_map = &tp->rx_std_buffers[src_idx];
4065                 skb_size = tp->rx_pkt_buf_sz;
4066                 break;
4067
4068         case RXD_OPAQUE_RING_JUMBO:
4069                 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
4070                 desc = &tp->rx_jumbo[dest_idx];
4071                 map = &tp->rx_jumbo_buffers[dest_idx];
4072                 if (src_idx >= 0)
4073                         src_map = &tp->rx_jumbo_buffers[src_idx];
4074                 skb_size = RX_JUMBO_PKT_BUF_SZ;
4075                 break;
4076
4077         default:
4078                 return -EINVAL;
4079         }
4080
4081         /* Do not overwrite any of the map or rp information
4082          * until we are sure we can commit to a new buffer.
4083          *
4084          * Callers depend upon this behavior and assume that
4085          * we leave everything unchanged if we fail.
4086          */
4087         skb = netdev_alloc_skb(tp->dev, skb_size);
4088         if (skb == NULL)
4089                 return -ENOMEM;
4090
4091         skb_reserve(skb, tp->rx_offset);
4092
4093         mapping = pci_map_single(tp->pdev, skb->data,
4094                                  skb_size - tp->rx_offset,
4095                                  PCI_DMA_FROMDEVICE);
4096
4097         map->skb = skb;
4098         pci_unmap_addr_set(map, mapping, mapping);
4099
4100         if (src_map != NULL)
4101                 src_map->skb = NULL;
4102
4103         desc->addr_hi = ((u64)mapping >> 32);
4104         desc->addr_lo = ((u64)mapping & 0xffffffff);
4105
4106         return skb_size;
4107 }
4108
4109 /* We only need to move over in the address because the other
4110  * members of the RX descriptor are invariant.  See notes above
4111  * tg3_alloc_rx_skb for full details.
4112  */
4113 static void tg3_recycle_rx(struct tg3 *tp, u32 opaque_key,
4114                            int src_idx, u32 dest_idx_unmasked)
4115 {
4116         struct tg3_rx_buffer_desc *src_desc, *dest_desc;
4117         struct ring_info *src_map, *dest_map;
4118         int dest_idx;
4119
4120         switch (opaque_key) {
4121         case RXD_OPAQUE_RING_STD:
4122                 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
4123                 dest_desc = &tp->rx_std[dest_idx];
4124                 dest_map = &tp->rx_std_buffers[dest_idx];
4125                 src_desc = &tp->rx_std[src_idx];
4126                 src_map = &tp->rx_std_buffers[src_idx];
4127                 break;
4128
4129         case RXD_OPAQUE_RING_JUMBO:
4130                 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
4131                 dest_desc = &tp->rx_jumbo[dest_idx];
4132                 dest_map = &tp->rx_jumbo_buffers[dest_idx];
4133                 src_desc = &tp->rx_jumbo[src_idx];
4134                 src_map = &tp->rx_jumbo_buffers[src_idx];
4135                 break;
4136
4137         default:
4138                 return;
4139         }
4140
4141         dest_map->skb = src_map->skb;
4142         pci_unmap_addr_set(dest_map, mapping,
4143                            pci_unmap_addr(src_map, mapping));
4144         dest_desc->addr_hi = src_desc->addr_hi;
4145         dest_desc->addr_lo = src_desc->addr_lo;
4146
4147         src_map->skb = NULL;
4148 }
4149
4150 #if TG3_VLAN_TAG_USED
4151 static int tg3_vlan_rx(struct tg3 *tp, struct sk_buff *skb, u16 vlan_tag)
4152 {
4153         return vlan_hwaccel_receive_skb(skb, tp->vlgrp, vlan_tag);
4154 }
4155 #endif
4156
4157 /* The RX ring scheme is composed of multiple rings which post fresh
4158  * buffers to the chip, and one special ring the chip uses to report
4159  * status back to the host.
4160  *
4161  * The special ring reports the status of received packets to the
4162  * host.  The chip does not write into the original descriptor the
4163  * RX buffer was obtained from.  The chip simply takes the original
4164  * descriptor as provided by the host, updates the status and length
4165  * field, then writes this into the next status ring entry.
4166  *
4167  * Each ring the host uses to post buffers to the chip is described
4168  * by a TG3_BDINFO entry in the chips SRAM area.  When a packet arrives,
4169  * it is first placed into the on-chip ram.  When the packet's length
4170  * is known, it walks down the TG3_BDINFO entries to select the ring.
4171  * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
4172  * which is within the range of the new packet's length is chosen.
4173  *
4174  * The "separate ring for rx status" scheme may sound queer, but it makes
4175  * sense from a cache coherency perspective.  If only the host writes
4176  * to the buffer post rings, and only the chip writes to the rx status
4177  * rings, then cache lines never move beyond shared-modified state.
4178  * If both the host and chip were to write into the same ring, cache line
4179  * eviction could occur since both entities want it in an exclusive state.
4180  */
4181 static int tg3_rx(struct tg3 *tp, int budget)
4182 {
4183         u32 work_mask, rx_std_posted = 0;
4184         u32 sw_idx = tp->rx_rcb_ptr;
4185         u16 hw_idx;
4186         int received;
4187
4188         hw_idx = tp->hw_status->idx[0].rx_producer;
4189         /*
4190          * We need to order the read of hw_idx and the read of
4191          * the opaque cookie.
4192          */
4193         rmb();
4194         work_mask = 0;
4195         received = 0;
4196         while (sw_idx != hw_idx && budget > 0) {
4197                 struct tg3_rx_buffer_desc *desc = &tp->rx_rcb[sw_idx];
4198                 unsigned int len;
4199                 struct sk_buff *skb;
4200                 dma_addr_t dma_addr;
4201                 u32 opaque_key, desc_idx, *post_ptr;
4202
4203                 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
4204                 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
4205                 if (opaque_key == RXD_OPAQUE_RING_STD) {
4206                         dma_addr = pci_unmap_addr(&tp->rx_std_buffers[desc_idx],
4207                                                   mapping);
4208                         skb = tp->rx_std_buffers[desc_idx].skb;
4209                         post_ptr = &tp->rx_std_ptr;
4210                         rx_std_posted++;
4211                 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
4212                         dma_addr = pci_unmap_addr(&tp->rx_jumbo_buffers[desc_idx],
4213                                                   mapping);
4214                         skb = tp->rx_jumbo_buffers[desc_idx].skb;
4215                         post_ptr = &tp->rx_jumbo_ptr;
4216                 }
4217                 else {
4218                         goto next_pkt_nopost;
4219                 }
4220
4221                 work_mask |= opaque_key;
4222
4223                 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
4224                     (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
4225                 drop_it:
4226                         tg3_recycle_rx(tp, opaque_key,
4227                                        desc_idx, *post_ptr);
4228                 drop_it_no_recycle:
4229                         /* Other statistics kept track of by card. */
4230                         tp->net_stats.rx_dropped++;
4231                         goto next_pkt;
4232                 }
4233
4234                 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4; /* omit crc */
4235
4236                 if (len > RX_COPY_THRESHOLD
4237                         && tp->rx_offset == 2
4238                         /* rx_offset != 2 iff this is a 5701 card running
4239                          * in PCI-X mode [see tg3_get_invariants()] */
4240                 ) {
4241                         int skb_size;
4242
4243                         skb_size = tg3_alloc_rx_skb(tp, opaque_key,
4244                                                     desc_idx, *post_ptr);
4245                         if (skb_size < 0)
4246                                 goto drop_it;
4247
4248                         pci_unmap_single(tp->pdev, dma_addr,
4249                                          skb_size - tp->rx_offset,
4250                                          PCI_DMA_FROMDEVICE);
4251
4252                         skb_put(skb, len);
4253                 } else {
4254                         struct sk_buff *copy_skb;
4255
4256                         tg3_recycle_rx(tp, opaque_key,
4257                                        desc_idx, *post_ptr);
4258
4259                         copy_skb = netdev_alloc_skb(tp->dev, len + 2);
4260                         if (copy_skb == NULL)
4261                                 goto drop_it_no_recycle;
4262
4263                         skb_reserve(copy_skb, 2);
4264                         skb_put(copy_skb, len);
4265                         pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
4266                         skb_copy_from_linear_data(skb, copy_skb->data, len);
4267                         pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
4268
4269                         /* We'll reuse the original ring buffer. */
4270                         skb = copy_skb;
4271                 }
4272
4273                 if ((tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) &&
4274                     (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
4275                     (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
4276                       >> RXD_TCPCSUM_SHIFT) == 0xffff))
4277                         skb->ip_summed = CHECKSUM_UNNECESSARY;
4278                 else
4279                         skb->ip_summed = CHECKSUM_NONE;
4280
4281                 skb->protocol = eth_type_trans(skb, tp->dev);
4282 #if TG3_VLAN_TAG_USED
4283                 if (tp->vlgrp != NULL &&
4284                     desc->type_flags & RXD_FLAG_VLAN) {
4285                         tg3_vlan_rx(tp, skb,
4286                                     desc->err_vlan & RXD_VLAN_MASK);
4287                 } else
4288 #endif
4289                         netif_receive_skb(skb);
4290
4291                 received++;
4292                 budget--;
4293
4294 next_pkt:
4295                 (*post_ptr)++;
4296
4297                 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
4298                         u32 idx = *post_ptr % TG3_RX_RING_SIZE;
4299
4300                         tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX +
4301                                      TG3_64BIT_REG_LOW, idx);
4302                         work_mask &= ~RXD_OPAQUE_RING_STD;
4303                         rx_std_posted = 0;
4304                 }
4305 next_pkt_nopost:
4306                 sw_idx++;
4307                 sw_idx &= (TG3_RX_RCB_RING_SIZE(tp) - 1);
4308
4309                 /* Refresh hw_idx to see if there is new work */
4310                 if (sw_idx == hw_idx) {
4311                         hw_idx = tp->hw_status->idx[0].rx_producer;
4312                         rmb();
4313                 }
4314         }
4315
4316         /* ACK the status ring. */
4317         tp->rx_rcb_ptr = sw_idx;
4318         tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW, sw_idx);
4319
4320         /* Refill RX ring(s). */
4321         if (work_mask & RXD_OPAQUE_RING_STD) {
4322                 sw_idx = tp->rx_std_ptr % TG3_RX_RING_SIZE;
4323                 tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
4324                              sw_idx);
4325         }
4326         if (work_mask & RXD_OPAQUE_RING_JUMBO) {
4327                 sw_idx = tp->rx_jumbo_ptr % TG3_RX_JUMBO_RING_SIZE;
4328                 tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
4329                              sw_idx);
4330         }
4331         mmiowb();
4332
4333         return received;
4334 }
4335
4336 static int tg3_poll_work(struct tg3 *tp, int work_done, int budget)
4337 {
4338         struct tg3_hw_status *sblk = tp->hw_status;
4339
4340         /* handle link change and other phy events */
4341         if (!(tp->tg3_flags &
4342               (TG3_FLAG_USE_LINKCHG_REG |
4343                TG3_FLAG_POLL_SERDES))) {
4344                 if (sblk->status & SD_STATUS_LINK_CHG) {
4345                         sblk->status = SD_STATUS_UPDATED |
4346                                 (sblk->status & ~SD_STATUS_LINK_CHG);
4347                         spin_lock(&tp->lock);
4348                         if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
4349                                 tw32_f(MAC_STATUS,
4350                                      (MAC_STATUS_SYNC_CHANGED |
4351                                       MAC_STATUS_CFG_CHANGED |
4352                                       MAC_STATUS_MI_COMPLETION |
4353                                       MAC_STATUS_LNKSTATE_CHANGED));
4354                                 udelay(40);
4355                         } else
4356                                 tg3_setup_phy(tp, 0);
4357                         spin_unlock(&tp->lock);
4358                 }
4359         }
4360
4361         /* run TX completion thread */
4362         if (sblk->idx[0].tx_consumer != tp->tx_cons) {
4363                 tg3_tx(tp);
4364                 if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING))
4365                         return work_done;
4366         }
4367
4368         /* run RX thread, within the bounds set by NAPI.
4369          * All RX "locking" is done by ensuring outside
4370          * code synchronizes with tg3->napi.poll()
4371          */
4372         if (sblk->idx[0].rx_producer != tp->rx_rcb_ptr)
4373                 work_done += tg3_rx(tp, budget - work_done);
4374
4375         return work_done;
4376 }
4377
4378 static int tg3_poll(struct napi_struct *napi, int budget)
4379 {
4380         struct tg3 *tp = container_of(napi, struct tg3, napi);
4381         int work_done = 0;
4382         struct tg3_hw_status *sblk = tp->hw_status;
4383
4384         while (1) {
4385                 work_done = tg3_poll_work(tp, work_done, budget);
4386
4387                 if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING))
4388                         goto tx_recovery;
4389
4390                 if (unlikely(work_done >= budget))
4391                         break;
4392
4393                 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) {
4394                         /* tp->last_tag is used in tg3_restart_ints() below
4395                          * to tell the hw how much work has been processed,
4396                          * so we must read it before checking for more work.
4397                          */
4398                         tp->last_tag = sblk->status_tag;
4399                         rmb();
4400                 } else
4401                         sblk->status &= ~SD_STATUS_UPDATED;
4402
4403                 if (likely(!tg3_has_work(tp))) {
4404                         netif_rx_complete(tp->dev, napi);
4405                         tg3_restart_ints(tp);
4406                         break;
4407                 }
4408         }
4409
4410         return work_done;
4411
4412 tx_recovery:
4413         /* work_done is guaranteed to be less than budget. */
4414         netif_rx_complete(tp->dev, napi);
4415         schedule_work(&tp->reset_task);
4416         return work_done;
4417 }
4418
4419 static void tg3_irq_quiesce(struct tg3 *tp)
4420 {
4421         BUG_ON(tp->irq_sync);
4422
4423         tp->irq_sync = 1;
4424         smp_mb();
4425
4426         synchronize_irq(tp->pdev->irq);
4427 }
4428
4429 static inline int tg3_irq_sync(struct tg3 *tp)
4430 {
4431         return tp->irq_sync;
4432 }
4433
4434 /* Fully shutdown all tg3 driver activity elsewhere in the system.
4435  * If irq_sync is non-zero, then the IRQ handler must be synchronized
4436  * with as well.  Most of the time, this is not necessary except when
4437  * shutting down the device.
4438  */
4439 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
4440 {
4441         spin_lock_bh(&tp->lock);
4442         if (irq_sync)
4443                 tg3_irq_quiesce(tp);
4444 }
4445
4446 static inline void tg3_full_unlock(struct tg3 *tp)
4447 {
4448         spin_unlock_bh(&tp->lock);
4449 }
4450
4451 /* One-shot MSI handler - Chip automatically disables interrupt
4452  * after sending MSI so driver doesn't have to do it.
4453  */
4454 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
4455 {
4456         struct net_device *dev = dev_id;
4457         struct tg3 *tp = netdev_priv(dev);
4458
4459         prefetch(tp->hw_status);
4460         prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
4461
4462         if (likely(!tg3_irq_sync(tp)))
4463                 netif_rx_schedule(dev, &tp->napi);
4464
4465         return IRQ_HANDLED;
4466 }
4467
4468 /* MSI ISR - No need to check for interrupt sharing and no need to
4469  * flush status block and interrupt mailbox. PCI ordering rules
4470  * guarantee that MSI will arrive after the status block.
4471  */
4472 static irqreturn_t tg3_msi(int irq, void *dev_id)
4473 {
4474         struct net_device *dev = dev_id;
4475         struct tg3 *tp = netdev_priv(dev);
4476
4477         prefetch(tp->hw_status);
4478         prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
4479         /*
4480          * Writing any value to intr-mbox-0 clears PCI INTA# and
4481          * chip-internal interrupt pending events.
4482          * Writing non-zero to intr-mbox-0 additional tells the
4483          * NIC to stop sending us irqs, engaging "in-intr-handler"
4484          * event coalescing.
4485          */
4486         tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
4487         if (likely(!tg3_irq_sync(tp)))
4488                 netif_rx_schedule(dev, &tp->napi);
4489
4490         return IRQ_RETVAL(1);
4491 }
4492
4493 static irqreturn_t tg3_interrupt(int irq, void *dev_id)
4494 {
4495         struct net_device *dev = dev_id;
4496         struct tg3 *tp = netdev_priv(dev);
4497         struct tg3_hw_status *sblk = tp->hw_status;
4498         unsigned int handled = 1;
4499
4500         /* In INTx mode, it is possible for the interrupt to arrive at
4501          * the CPU before the status block posted prior to the interrupt.
4502          * Reading the PCI State register will confirm whether the
4503          * interrupt is ours and will flush the status block.
4504          */
4505         if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
4506                 if ((tp->tg3_flags & TG3_FLAG_CHIP_RESETTING) ||
4507                     (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
4508                         handled = 0;
4509                         goto out;
4510                 }
4511         }
4512
4513         /*
4514          * Writing any value to intr-mbox-0 clears PCI INTA# and
4515          * chip-internal interrupt pending events.
4516          * Writing non-zero to intr-mbox-0 additional tells the
4517          * NIC to stop sending us irqs, engaging "in-intr-handler"
4518          * event coalescing.
4519          *
4520          * Flush the mailbox to de-assert the IRQ immediately to prevent
4521          * spurious interrupts.  The flush impacts performance but
4522          * excessive spurious interrupts can be worse in some cases.
4523          */
4524         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
4525         if (tg3_irq_sync(tp))
4526                 goto out;
4527         sblk->status &= ~SD_STATUS_UPDATED;
4528         if (likely(tg3_has_work(tp))) {
4529                 prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
4530                 netif_rx_schedule(dev, &tp->napi);
4531         } else {
4532                 /* No work, shared interrupt perhaps?  re-enable
4533                  * interrupts, and flush that PCI write
4534                  */
4535                 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
4536                                0x00000000);
4537         }
4538 out:
4539         return IRQ_RETVAL(handled);
4540 }
4541
4542 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
4543 {
4544         struct net_device *dev = dev_id;
4545         struct tg3 *tp = netdev_priv(dev);
4546         struct tg3_hw_status *sblk = tp->hw_status;
4547         unsigned int handled = 1;
4548
4549         /* In INTx mode, it is possible for the interrupt to arrive at
4550          * the CPU before the status block posted prior to the interrupt.
4551          * Reading the PCI State register will confirm whether the
4552          * interrupt is ours and will flush the status block.
4553          */
4554         if (unlikely(sblk->status_tag == tp->last_tag)) {
4555                 if ((tp->tg3_flags & TG3_FLAG_CHIP_RESETTING) ||
4556                     (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
4557                         handled = 0;
4558                         goto out;
4559                 }
4560         }
4561
4562         /*
4563          * writing any value to intr-mbox-0 clears PCI INTA# and
4564          * chip-internal interrupt pending events.
4565          * writing non-zero to intr-mbox-0 additional tells the
4566          * NIC to stop sending us irqs, engaging "in-intr-handler"
4567          * event coalescing.
4568          *
4569          * Flush the mailbox to de-assert the IRQ immediately to prevent
4570          * spurious interrupts.  The flush impacts performance but
4571          * excessive spurious interrupts can be worse in some cases.
4572          */
4573         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
4574         if (tg3_irq_sync(tp))
4575                 goto out;
4576         if (netif_rx_schedule_prep(dev, &tp->napi)) {
4577                 prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
4578                 /* Update last_tag to mark that this status has been
4579                  * seen. Because interrupt may be shared, we may be
4580                  * racing with tg3_poll(), so only update last_tag
4581                  * if tg3_poll() is not scheduled.
4582                  */
4583                 tp->last_tag = sblk->status_tag;
4584                 __netif_rx_schedule(dev, &tp->napi);
4585         }
4586 out:
4587         return IRQ_RETVAL(handled);
4588 }
4589
4590 /* ISR for interrupt test */
4591 static irqreturn_t tg3_test_isr(int irq, void *dev_id)
4592 {
4593         struct net_device *dev = dev_id;
4594         struct tg3 *tp = netdev_priv(dev);
4595         struct tg3_hw_status *sblk = tp->hw_status;
4596
4597         if ((sblk->status & SD_STATUS_UPDATED) ||
4598             !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
4599                 tg3_disable_ints(tp);
4600                 return IRQ_RETVAL(1);
4601         }
4602         return IRQ_RETVAL(0);
4603 }
4604
4605 static int tg3_init_hw(struct tg3 *, int);
4606 static int tg3_halt(struct tg3 *, int, int);
4607
4608 /* Restart hardware after configuration changes, self-test, etc.
4609  * Invoked with tp->lock held.
4610  */
4611 static int tg3_restart_hw(struct tg3 *tp, int reset_phy)
4612         __releases(tp->lock)
4613         __acquires(tp->lock)
4614 {
4615         int err;
4616
4617         err = tg3_init_hw(tp, reset_phy);
4618         if (err) {
4619                 printk(KERN_ERR PFX "%s: Failed to re-initialize device, "
4620                        "aborting.\n", tp->dev->name);
4621                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
4622                 tg3_full_unlock(tp);
4623                 del_timer_sync(&tp->timer);
4624                 tp->irq_sync = 0;
4625                 napi_enable(&tp->napi);
4626                 dev_close(tp->dev);
4627                 tg3_full_lock(tp, 0);
4628         }
4629         return err;
4630 }
4631
4632 #ifdef CONFIG_NET_POLL_CONTROLLER
4633 static void tg3_poll_controller(struct net_device *dev)
4634 {
4635         struct tg3 *tp = netdev_priv(dev);
4636
4637         tg3_interrupt(tp->pdev->irq, dev);
4638 }
4639 #endif
4640
4641 static void tg3_reset_task(struct work_struct *work)
4642 {
4643         struct tg3 *tp = container_of(work, struct tg3, reset_task);
4644         int err;
4645         unsigned int restart_timer;
4646
4647         tg3_full_lock(tp, 0);
4648
4649         if (!netif_running(tp->dev)) {
4650                 tg3_full_unlock(tp);
4651                 return;
4652         }
4653
4654         tg3_full_unlock(tp);
4655
4656         tg3_phy_stop(tp);
4657
4658         tg3_netif_stop(tp);
4659
4660         tg3_full_lock(tp, 1);
4661
4662         restart_timer = tp->tg3_flags2 & TG3_FLG2_RESTART_TIMER;
4663         tp->tg3_flags2 &= ~TG3_FLG2_RESTART_TIMER;
4664
4665         if (tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING) {
4666                 tp->write32_tx_mbox = tg3_write32_tx_mbox;
4667                 tp->write32_rx_mbox = tg3_write_flush_reg32;
4668                 tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;
4669                 tp->tg3_flags &= ~TG3_FLAG_TX_RECOVERY_PENDING;
4670         }
4671
4672         tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
4673         err = tg3_init_hw(tp, 1);
4674         if (err)
4675                 goto out;
4676
4677         tg3_netif_start(tp);
4678
4679         if (restart_timer)
4680                 mod_timer(&tp->timer, jiffies + 1);
4681
4682 out:
4683         tg3_full_unlock(tp);
4684
4685         if (!err)
4686                 tg3_phy_start(tp);
4687 }
4688
4689 static void tg3_dump_short_state(struct tg3 *tp)
4690 {
4691         printk(KERN_ERR PFX "DEBUG: MAC_TX_STATUS[%08x] MAC_RX_STATUS[%08x]\n",
4692                tr32(MAC_TX_STATUS), tr32(MAC_RX_STATUS));
4693         printk(KERN_ERR PFX "DEBUG: RDMAC_STATUS[%08x] WDMAC_STATUS[%08x]\n",
4694                tr32(RDMAC_STATUS), tr32(WDMAC_STATUS));
4695 }
4696
4697 static void tg3_tx_timeout(struct net_device *dev)
4698 {
4699         struct tg3 *tp = netdev_priv(dev);
4700
4701         if (netif_msg_tx_err(tp)) {
4702                 printk(KERN_ERR PFX "%s: transmit timed out, resetting\n",
4703                        dev->name);
4704                 tg3_dump_short_state(tp);
4705         }
4706
4707         schedule_work(&tp->reset_task);
4708 }
4709
4710 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
4711 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
4712 {
4713         u32 base = (u32) mapping & 0xffffffff;
4714
4715         return ((base > 0xffffdcc0) &&
4716                 (base + len + 8 < base));
4717 }
4718
4719 /* Test for DMA addresses > 40-bit */
4720 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
4721                                           int len)
4722 {
4723 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
4724         if (tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG)
4725                 return (((u64) mapping + len) > DMA_40BIT_MASK);
4726         return 0;
4727 #else
4728         return 0;
4729 #endif
4730 }
4731
4732 static void tg3_set_txd(struct tg3 *, int, dma_addr_t, int, u32, u32);
4733
4734 /* Workaround 4GB and 40-bit hardware DMA bugs. */
4735 static int tigon3_dma_hwbug_workaround(struct tg3 *tp, struct sk_buff *skb,
4736                                        u32 last_plus_one, u32 *start,
4737                                        u32 base_flags, u32 mss)
4738 {
4739         struct sk_buff *new_skb;
4740         dma_addr_t new_addr = 0;
4741         u32 entry = *start;
4742         int i, ret = 0;
4743
4744         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
4745                 new_skb = skb_copy(skb, GFP_ATOMIC);
4746         else {
4747                 int more_headroom = 4 - ((unsigned long)skb->data & 3);
4748
4749                 new_skb = skb_copy_expand(skb,
4750                                           skb_headroom(skb) + more_headroom,
4751                                           skb_tailroom(skb), GFP_ATOMIC);
4752         }
4753
4754         if (!new_skb) {
4755                 ret = -1;
4756         } else {
4757                 /* New SKB is guaranteed to be linear. */
4758                 entry = *start;
4759                 ret = skb_dma_map(&tp->pdev->dev, new_skb, DMA_TO_DEVICE);
4760                 new_addr = skb_shinfo(new_skb)->dma_maps[0];
4761
4762                 /* Make sure new skb does not cross any 4G boundaries.
4763                  * Drop the packet if it does.
4764                  */
4765                 if (ret || tg3_4g_overflow_test(new_addr, new_skb->len)) {
4766                         if (!ret)
4767                                 skb_dma_unmap(&tp->pdev->dev, new_skb,
4768                                               DMA_TO_DEVICE);
4769                         ret = -1;
4770                         dev_kfree_skb(new_skb);
4771                         new_skb = NULL;
4772                 } else {
4773                         tg3_set_txd(tp, entry, new_addr, new_skb->len,
4774                                     base_flags, 1 | (mss << 1));
4775                         *start = NEXT_TX(entry);
4776                 }
4777         }
4778
4779         /* Now clean up the sw ring entries. */
4780         i = 0;
4781         while (entry != last_plus_one) {
4782                 if (i == 0) {
4783                         tp->tx_buffers[entry].skb = new_skb;
4784                 } else {
4785                         tp->tx_buffers[entry].skb = NULL;
4786                 }
4787                 entry = NEXT_TX(entry);
4788                 i++;
4789         }
4790
4791         skb_dma_unmap(&tp->pdev->dev, skb, DMA_TO_DEVICE);
4792         dev_kfree_skb(skb);
4793
4794         return ret;
4795 }
4796
4797 static void tg3_set_txd(struct tg3 *tp, int entry,
4798                         dma_addr_t mapping, int len, u32 flags,
4799                         u32 mss_and_is_end)
4800 {
4801         struct tg3_tx_buffer_desc *txd = &tp->tx_ring[entry];
4802         int is_end = (mss_and_is_end & 0x1);
4803         u32 mss = (mss_and_is_end >> 1);
4804         u32 vlan_tag = 0;
4805
4806         if (is_end)
4807                 flags |= TXD_FLAG_END;
4808         if (flags & TXD_FLAG_VLAN) {
4809                 vlan_tag = flags >> 16;
4810                 flags &= 0xffff;
4811         }
4812         vlan_tag |= (mss << TXD_MSS_SHIFT);
4813
4814         txd->addr_hi = ((u64) mapping >> 32);
4815         txd->addr_lo = ((u64) mapping & 0xffffffff);
4816         txd->len_flags = (len << TXD_LEN_SHIFT) | flags;
4817         txd->vlan_tag = vlan_tag << TXD_VLAN_TAG_SHIFT;
4818 }
4819
4820 /* hard_start_xmit for devices that don't have any bugs and
4821  * support TG3_FLG2_HW_TSO_2 only.
4822  */
4823 static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
4824 {
4825         struct tg3 *tp = netdev_priv(dev);
4826         u32 len, entry, base_flags, mss;
4827         struct skb_shared_info *sp;
4828         dma_addr_t mapping;
4829
4830         len = skb_headlen(skb);
4831
4832         /* We are running in BH disabled context with netif_tx_lock
4833          * and TX reclaim runs via tp->napi.poll inside of a software
4834          * interrupt.  Furthermore, IRQ processing runs lockless so we have
4835          * no IRQ context deadlocks to worry about either.  Rejoice!
4836          */
4837         if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->nr_frags + 1))) {
4838                 if (!netif_queue_stopped(dev)) {
4839                         netif_stop_queue(dev);
4840
4841                         /* This is a hard error, log it. */
4842                         printk(KERN_ERR PFX "%s: BUG! Tx Ring full when "
4843                                "queue awake!\n", dev->name);
4844                 }
4845                 return NETDEV_TX_BUSY;
4846         }
4847
4848         entry = tp->tx_prod;
4849         base_flags = 0;
4850         mss = 0;
4851         if ((mss = skb_shinfo(skb)->gso_size) != 0) {
4852                 int tcp_opt_len, ip_tcp_len;
4853
4854                 if (skb_header_cloned(skb) &&
4855                     pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
4856                         dev_kfree_skb(skb);
4857                         goto out_unlock;
4858                 }
4859
4860                 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
4861                         mss |= (skb_headlen(skb) - ETH_HLEN) << 9;
4862                 else {
4863                         struct iphdr *iph = ip_hdr(skb);
4864
4865                         tcp_opt_len = tcp_optlen(skb);
4866                         ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
4867
4868                         iph->check = 0;
4869                         iph->tot_len = htons(mss + ip_tcp_len + tcp_opt_len);
4870                         mss |= (ip_tcp_len + tcp_opt_len) << 9;
4871                 }
4872
4873                 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
4874                                TXD_FLAG_CPU_POST_DMA);
4875
4876                 tcp_hdr(skb)->check = 0;
4877
4878         }
4879         else if (skb->ip_summed == CHECKSUM_PARTIAL)
4880                 base_flags |= TXD_FLAG_TCPUDP_CSUM;
4881 #if TG3_VLAN_TAG_USED
4882         if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
4883                 base_flags |= (TXD_FLAG_VLAN |
4884                                (vlan_tx_tag_get(skb) << 16));
4885 #endif
4886
4887         if (skb_dma_map(&tp->pdev->dev, skb, DMA_TO_DEVICE)) {
4888                 dev_kfree_skb(skb);
4889                 goto out_unlock;
4890         }
4891
4892         sp = skb_shinfo(skb);
4893
4894         mapping = sp->dma_maps[0];
4895
4896         tp->tx_buffers[entry].skb = skb;
4897
4898         tg3_set_txd(tp, entry, mapping, len, base_flags,
4899                     (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
4900
4901         entry = NEXT_TX(entry);
4902
4903         /* Now loop through additional data fragments, and queue them. */
4904         if (skb_shinfo(skb)->nr_frags > 0) {
4905                 unsigned int i, last;
4906
4907                 last = skb_shinfo(skb)->nr_frags - 1;
4908                 for (i = 0; i <= last; i++) {
4909                         skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4910
4911                         len = frag->size;
4912                         mapping = sp->dma_maps[i + 1];
4913                         tp->tx_buffers[entry].skb = NULL;
4914
4915                         tg3_set_txd(tp, entry, mapping, len,
4916                                     base_flags, (i == last) | (mss << 1));
4917
4918                         entry = NEXT_TX(entry);
4919                 }
4920         }
4921
4922         /* Packets are ready, update Tx producer idx local and on card. */
4923         tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry);
4924
4925         tp->tx_prod = entry;
4926         if (unlikely(tg3_tx_avail(tp) <= (MAX_SKB_FRAGS + 1))) {
4927                 netif_stop_queue(dev);
4928                 if (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp))
4929                         netif_wake_queue(tp->dev);
4930         }
4931
4932 out_unlock:
4933         mmiowb();
4934
4935         dev->trans_start = jiffies;
4936
4937         return NETDEV_TX_OK;
4938 }
4939
4940 static int tg3_start_xmit_dma_bug(struct sk_buff *, struct net_device *);
4941
4942 /* Use GSO to workaround a rare TSO bug that may be triggered when the
4943  * TSO header is greater than 80 bytes.
4944  */
4945 static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
4946 {
4947         struct sk_buff *segs, *nskb;
4948
4949         /* Estimate the number of fragments in the worst case */
4950         if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->gso_segs * 3))) {
4951                 netif_stop_queue(tp->dev);
4952                 if (tg3_tx_avail(tp) <= (skb_shinfo(skb)->gso_segs * 3))
4953                         return NETDEV_TX_BUSY;
4954
4955                 netif_wake_queue(tp->dev);
4956         }
4957
4958         segs = skb_gso_segment(skb, tp->dev->features & ~NETIF_F_TSO);
4959         if (IS_ERR(segs))
4960                 goto tg3_tso_bug_end;
4961
4962         do {
4963                 nskb = segs;
4964                 segs = segs->next;
4965                 nskb->next = NULL;
4966                 tg3_start_xmit_dma_bug(nskb, tp->dev);
4967         } while (segs);
4968
4969 tg3_tso_bug_end:
4970         dev_kfree_skb(skb);
4971
4972         return NETDEV_TX_OK;
4973 }
4974
4975 /* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
4976  * support TG3_FLG2_HW_TSO_1 or firmware TSO only.
4977  */
4978 static int tg3_start_xmit_dma_bug(struct sk_buff *skb, struct net_device *dev)
4979 {
4980         struct tg3 *tp = netdev_priv(dev);
4981         u32 len, entry, base_flags, mss;
4982         struct skb_shared_info *sp;
4983         int would_hit_hwbug;
4984         dma_addr_t mapping;
4985
4986         len = skb_headlen(skb);
4987
4988         /* We are running in BH disabled context with netif_tx_lock
4989          * and TX reclaim runs via tp->napi.poll inside of a software
4990          * interrupt.  Furthermore, IRQ processing runs lockless so we have
4991          * no IRQ context deadlocks to worry about either.  Rejoice!
4992          */
4993         if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->nr_frags + 1))) {
4994                 if (!netif_queue_stopped(dev)) {
4995                         netif_stop_queue(dev);
4996
4997                         /* This is a hard error, log it. */
4998                         printk(KERN_ERR PFX "%s: BUG! Tx Ring full when "
4999                                "queue awake!\n", dev->name);
5000                 }
5001                 return NETDEV_TX_BUSY;
5002         }
5003
5004         entry = tp->tx_prod;
5005         base_flags = 0;
5006         if (skb->ip_summed == CHECKSUM_PARTIAL)
5007                 base_flags |= TXD_FLAG_TCPUDP_CSUM;
5008         mss = 0;
5009         if ((mss = skb_shinfo(skb)->gso_size) != 0) {
5010                 struct iphdr *iph;
5011                 int tcp_opt_len, ip_tcp_len, hdr_len;
5012
5013                 if (skb_header_cloned(skb) &&
5014                     pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
5015                         dev_kfree_skb(skb);
5016                         goto out_unlock;
5017                 }
5018
5019                 tcp_opt_len = tcp_optlen(skb);
5020                 ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
5021
5022                 hdr_len = ip_tcp_len + tcp_opt_len;
5023                 if (unlikely((ETH_HLEN + hdr_len) > 80) &&
5024                              (tp->tg3_flags2 & TG3_FLG2_TSO_BUG))
5025                         return (tg3_tso_bug(tp, skb));
5026
5027                 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
5028                                TXD_FLAG_CPU_POST_DMA);
5029
5030                 iph = ip_hdr(skb);
5031                 iph->check = 0;
5032                 iph->tot_len = htons(mss + hdr_len);
5033                 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
5034                         tcp_hdr(skb)->check = 0;
5035                         base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
5036                 } else
5037                         tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
5038                                                                  iph->daddr, 0,
5039                                                                  IPPROTO_TCP,
5040                                                                  0);
5041
5042                 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO) ||
5043                     (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)) {
5044                         if (tcp_opt_len || iph->ihl > 5) {
5045                                 int tsflags;
5046
5047                                 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
5048                                 mss |= (tsflags << 11);
5049                         }
5050                 } else {
5051                         if (tcp_opt_len || iph->ihl > 5) {
5052                                 int tsflags;
5053
5054                                 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
5055                                 base_flags |= tsflags << 12;
5056                         }
5057                 }
5058         }
5059 #if TG3_VLAN_TAG_USED
5060         if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
5061                 base_flags |= (TXD_FLAG_VLAN |
5062                                (vlan_tx_tag_get(skb) << 16));
5063 #endif
5064
5065         if (skb_dma_map(&tp->pdev->dev, skb, DMA_TO_DEVICE)) {
5066                 dev_kfree_skb(skb);
5067                 goto out_unlock;
5068         }
5069
5070         sp = skb_shinfo(skb);
5071
5072         mapping = sp->dma_maps[0];
5073
5074         tp->tx_buffers[entry].skb = skb;
5075
5076         would_hit_hwbug = 0;
5077
5078         if (tp->tg3_flags3 & TG3_FLG3_5701_DMA_BUG)
5079                 would_hit_hwbug = 1;
5080         else if (tg3_4g_overflow_test(mapping, len))
5081                 would_hit_hwbug = 1;
5082
5083         tg3_set_txd(tp, entry, mapping, len, base_flags,
5084                     (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
5085
5086         entry = NEXT_TX(entry);
5087
5088         /* Now loop through additional data fragments, and queue them. */
5089         if (skb_shinfo(skb)->nr_frags > 0) {
5090                 unsigned int i, last;
5091
5092                 last = skb_shinfo(skb)->nr_frags - 1;
5093                 for (i = 0; i <= last; i++) {
5094                         skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
5095
5096                         len = frag->size;
5097                         mapping = sp->dma_maps[i + 1];
5098
5099                         tp->tx_buffers[entry].skb = NULL;
5100
5101                         if (tg3_4g_overflow_test(mapping, len))
5102                                 would_hit_hwbug = 1;
5103
5104                         if (tg3_40bit_overflow_test(tp, mapping, len))
5105                                 would_hit_hwbug = 1;
5106
5107                         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
5108                                 tg3_set_txd(tp, entry, mapping, len,
5109                                             base_flags, (i == last)|(mss << 1));
5110                         else
5111                                 tg3_set_txd(tp, entry, mapping, len,
5112                                             base_flags, (i == last));
5113
5114                         entry = NEXT_TX(entry);
5115                 }
5116         }
5117
5118         if (would_hit_hwbug) {
5119                 u32 last_plus_one = entry;
5120                 u32 start;
5121
5122                 start = entry - 1 - skb_shinfo(skb)->nr_frags;
5123                 start &= (TG3_TX_RING_SIZE - 1);
5124
5125                 /* If the workaround fails due to memory/mapping
5126                  * failure, silently drop this packet.
5127                  */
5128                 if (tigon3_dma_hwbug_workaround(tp, skb, last_plus_one,
5129                                                 &start, base_flags, mss))
5130                         goto out_unlock;
5131
5132                 entry = start;
5133         }
5134
5135         /* Packets are ready, update Tx producer idx local and on card. */
5136         tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry);
5137
5138         tp->tx_prod = entry;
5139         if (unlikely(tg3_tx_avail(tp) <= (MAX_SKB_FRAGS + 1))) {
5140                 netif_stop_queue(dev);
5141                 if (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp))
5142                         netif_wake_queue(tp->dev);
5143         }
5144
5145 out_unlock:
5146         mmiowb();
5147
5148         dev->trans_start = jiffies;
5149
5150         return NETDEV_TX_OK;
5151 }
5152
5153 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
5154                                int new_mtu)
5155 {
5156         dev->mtu = new_mtu;
5157
5158         if (new_mtu > ETH_DATA_LEN) {
5159                 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
5160                         tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
5161                         ethtool_op_set_tso(dev, 0);
5162                 }
5163                 else
5164                         tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE;
5165         } else {
5166                 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
5167                         tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
5168                 tp->tg3_flags &= ~TG3_FLAG_JUMBO_RING_ENABLE;
5169         }
5170 }
5171
5172 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
5173 {
5174         struct tg3 *tp = netdev_priv(dev);
5175         int err;
5176
5177         if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
5178                 return -EINVAL;
5179
5180         if (!netif_running(dev)) {
5181                 /* We'll just catch it later when the
5182                  * device is up'd.
5183                  */
5184                 tg3_set_mtu(dev, tp, new_mtu);
5185                 return 0;
5186         }
5187
5188         tg3_phy_stop(tp);
5189
5190         tg3_netif_stop(tp);
5191
5192         tg3_full_lock(tp, 1);
5193
5194         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
5195
5196         tg3_set_mtu(dev, tp, new_mtu);
5197
5198         err = tg3_restart_hw(tp, 0);
5199
5200         if (!err)
5201                 tg3_netif_start(tp);
5202
5203         tg3_full_unlock(tp);
5204
5205         if (!err)
5206                 tg3_phy_start(tp);
5207
5208         return err;
5209 }
5210
5211 /* Free up pending packets in all rx/tx rings.
5212  *
5213  * The chip has been shut down and the driver detached from
5214  * the networking, so no interrupts or new tx packets will
5215  * end up in the driver.  tp->{tx,}lock is not held and we are not
5216  * in an interrupt context and thus may sleep.
5217  */
5218 static void tg3_free_rings(struct tg3 *tp)
5219 {
5220         struct ring_info *rxp;
5221         int i;
5222
5223         for (i = 0; i < TG3_RX_RING_SIZE; i++) {
5224                 rxp = &tp->rx_std_buffers[i];
5225
5226                 if (rxp->skb == NULL)
5227                         continue;
5228                 pci_unmap_single(tp->pdev,
5229                                  pci_unmap_addr(rxp, mapping),
5230                                  tp->rx_pkt_buf_sz - tp->rx_offset,
5231                                  PCI_DMA_FROMDEVICE);
5232                 dev_kfree_skb_any(rxp->skb);
5233                 rxp->skb = NULL;
5234         }
5235
5236         for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
5237                 rxp = &tp->rx_jumbo_buffers[i];
5238
5239                 if (rxp->skb == NULL)
5240                         continue;
5241                 pci_unmap_single(tp->pdev,
5242                                  pci_unmap_addr(rxp, mapping),
5243                                  RX_JUMBO_PKT_BUF_SZ - tp->rx_offset,
5244                                  PCI_DMA_FROMDEVICE);
5245                 dev_kfree_skb_any(rxp->skb);
5246                 rxp->skb = NULL;
5247         }
5248
5249         for (i = 0; i < TG3_TX_RING_SIZE; ) {
5250                 struct tx_ring_info *txp;
5251                 struct sk_buff *skb;
5252
5253                 txp = &tp->tx_buffers[i];
5254                 skb = txp->skb;
5255
5256                 if (skb == NULL) {
5257                         i++;
5258                         continue;
5259                 }
5260
5261                 skb_dma_unmap(&tp->pdev->dev, skb, DMA_TO_DEVICE);
5262
5263                 txp->skb = NULL;
5264
5265                 i += skb_shinfo(skb)->nr_frags + 1;
5266
5267                 dev_kfree_skb_any(skb);
5268         }
5269 }
5270
5271 /* Initialize tx/rx rings for packet processing.
5272  *
5273  * The chip has been shut down and the driver detached from
5274  * the networking, so no interrupts or new tx packets will
5275  * end up in the driver.  tp->{tx,}lock are held and thus
5276  * we may not sleep.
5277  */
5278 static int tg3_init_rings(struct tg3 *tp)
5279 {
5280         u32 i;
5281
5282         /* Free up all the SKBs. */
5283         tg3_free_rings(tp);
5284
5285         /* Zero out all descriptors. */
5286         memset(tp->rx_std, 0, TG3_RX_RING_BYTES);
5287         memset(tp->rx_jumbo, 0, TG3_RX_JUMBO_RING_BYTES);
5288         memset(tp->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
5289         memset(tp->tx_ring, 0, TG3_TX_RING_BYTES);
5290
5291         tp->rx_pkt_buf_sz = RX_PKT_BUF_SZ;
5292         if ((tp->tg3_flags2 & TG3_FLG2_5780_CLASS) &&
5293             (tp->dev->mtu > ETH_DATA_LEN))
5294                 tp->rx_pkt_buf_sz = RX_JUMBO_PKT_BUF_SZ;
5295
5296         /* Initialize invariants of the rings, we only set this
5297          * stuff once.  This works because the card does not
5298          * write into the rx buffer posting rings.
5299          */
5300         for (i = 0; i < TG3_RX_RING_SIZE; i++) {
5301                 struct tg3_rx_buffer_desc *rxd;
5302
5303                 rxd = &tp->rx_std[i];
5304                 rxd->idx_len = (tp->rx_pkt_buf_sz - tp->rx_offset - 64)
5305                         << RXD_LEN_SHIFT;
5306                 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
5307                 rxd->opaque = (RXD_OPAQUE_RING_STD |
5308                                (i << RXD_OPAQUE_INDEX_SHIFT));
5309         }
5310
5311         if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
5312                 for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
5313                         struct tg3_rx_buffer_desc *rxd;
5314
5315                         rxd = &tp->rx_jumbo[i];
5316                         rxd->idx_len = (RX_JUMBO_PKT_BUF_SZ - tp->rx_offset - 64)
5317                                 << RXD_LEN_SHIFT;
5318                         rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
5319                                 RXD_FLAG_JUMBO;
5320                         rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
5321                                (i << RXD_OPAQUE_INDEX_SHIFT));
5322                 }
5323         }
5324
5325         /* Now allocate fresh SKBs for each rx ring. */
5326         for (i = 0; i < tp->rx_pending; i++) {
5327                 if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_STD, -1, i) < 0) {
5328                         printk(KERN_WARNING PFX
5329                                "%s: Using a smaller RX standard ring, "
5330                                "only %d out of %d buffers were allocated "
5331                                "successfully.\n",
5332                                tp->dev->name, i, tp->rx_pending);
5333                         if (i == 0)
5334                                 return -ENOMEM;
5335                         tp->rx_pending = i;
5336                         break;
5337                 }
5338         }
5339
5340         if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
5341                 for (i = 0; i < tp->rx_jumbo_pending; i++) {
5342                         if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_JUMBO,
5343                                              -1, i) < 0) {
5344                                 printk(KERN_WARNING PFX
5345                                        "%s: Using a smaller RX jumbo ring, "
5346                                        "only %d out of %d buffers were "
5347                                        "allocated successfully.\n",
5348                                        tp->dev->name, i, tp->rx_jumbo_pending);
5349                                 if (i == 0) {
5350                                         tg3_free_rings(tp);
5351                                         return -ENOMEM;
5352                                 }
5353                                 tp->rx_jumbo_pending = i;
5354                                 break;
5355                         }
5356                 }
5357         }
5358         return 0;
5359 }
5360
5361 /*
5362  * Must not be invoked with interrupt sources disabled and
5363  * the hardware shutdown down.
5364  */
5365 static void tg3_free_consistent(struct tg3 *tp)
5366 {
5367         kfree(tp->rx_std_buffers);
5368         tp->rx_std_buffers = NULL;
5369         if (tp->rx_std) {
5370                 pci_free_consistent(tp->pdev, TG3_RX_RING_BYTES,
5371                                     tp->rx_std, tp->rx_std_mapping);
5372                 tp->rx_std = NULL;
5373         }
5374         if (tp->rx_jumbo) {
5375                 pci_free_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
5376                                     tp->rx_jumbo, tp->rx_jumbo_mapping);
5377                 tp->rx_jumbo = NULL;
5378         }
5379         if (tp->rx_rcb) {
5380                 pci_free_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
5381                                     tp->rx_rcb, tp->rx_rcb_mapping);
5382                 tp->rx_rcb = NULL;
5383         }
5384         if (tp->tx_ring) {
5385                 pci_free_consistent(tp->pdev, TG3_TX_RING_BYTES,
5386                         tp->tx_ring, tp->tx_desc_mapping);
5387                 tp->tx_ring = NULL;
5388         }
5389         if (tp->hw_status) {
5390                 pci_free_consistent(tp->pdev, TG3_HW_STATUS_SIZE,
5391                                     tp->hw_status, tp->status_mapping);
5392                 tp->hw_status = NULL;
5393         }
5394         if (tp->hw_stats) {
5395                 pci_free_consistent(tp->pdev, sizeof(struct tg3_hw_stats),
5396                                     tp->hw_stats, tp->stats_mapping);
5397                 tp->hw_stats = NULL;
5398         }
5399 }
5400
5401 /*
5402  * Must not be invoked with interrupt sources disabled and
5403  * the hardware shutdown down.  Can sleep.
5404  */
5405 static int tg3_alloc_consistent(struct tg3 *tp)
5406 {
5407         tp->rx_std_buffers = kzalloc((sizeof(struct ring_info) *
5408                                       (TG3_RX_RING_SIZE +
5409                                        TG3_RX_JUMBO_RING_SIZE)) +
5410                                      (sizeof(struct tx_ring_info) *
5411                                       TG3_TX_RING_SIZE),
5412                                      GFP_KERNEL);
5413         if (!tp->rx_std_buffers)
5414                 return -ENOMEM;
5415
5416         tp->rx_jumbo_buffers = &tp->rx_std_buffers[TG3_RX_RING_SIZE];
5417         tp->tx_buffers = (struct tx_ring_info *)
5418                 &tp->rx_jumbo_buffers[TG3_RX_JUMBO_RING_SIZE];
5419
5420         tp->rx_std = pci_alloc_consistent(tp->pdev, TG3_RX_RING_BYTES,
5421                                           &tp->rx_std_mapping);
5422         if (!tp->rx_std)
5423                 goto err_out;
5424
5425         tp->rx_jumbo = pci_alloc_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
5426                                             &tp->rx_jumbo_mapping);
5427
5428         if (!tp->rx_jumbo)
5429                 goto err_out;
5430
5431         tp->rx_rcb = pci_alloc_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
5432                                           &tp->rx_rcb_mapping);
5433         if (!tp->rx_rcb)
5434                 goto err_out;
5435
5436         tp->tx_ring = pci_alloc_consistent(tp->pdev, TG3_TX_RING_BYTES,
5437                                            &tp->tx_desc_mapping);
5438         if (!tp->tx_ring)
5439                 goto err_out;
5440
5441         tp->hw_status = pci_alloc_consistent(tp->pdev,
5442                                              TG3_HW_STATUS_SIZE,
5443                                              &tp->status_mapping);
5444         if (!tp->hw_status)
5445                 goto err_out;
5446
5447         tp->hw_stats = pci_alloc_consistent(tp->pdev,
5448                                             sizeof(struct tg3_hw_stats),
5449                                             &tp->stats_mapping);
5450         if (!tp->hw_stats)
5451                 goto err_out;
5452
5453         memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
5454         memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
5455
5456         return 0;
5457
5458 err_out:
5459         tg3_free_consistent(tp);
5460         return -ENOMEM;
5461 }
5462
5463 #define MAX_WAIT_CNT 1000
5464
5465 /* To stop a block, clear the enable bit and poll till it
5466  * clears.  tp->lock is held.
5467  */
5468 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, int silent)
5469 {
5470         unsigned int i;
5471         u32 val;
5472
5473         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
5474                 switch (ofs) {
5475                 case RCVLSC_MODE:
5476                 case DMAC_MODE:
5477                 case MBFREE_MODE:
5478                 case BUFMGR_MODE:
5479                 case MEMARB_MODE:
5480                         /* We can't enable/disable these bits of the
5481                          * 5705/5750, just say success.
5482                          */
5483                         return 0;
5484
5485                 default:
5486                         break;
5487                 }
5488         }
5489
5490         val = tr32(ofs);
5491         val &= ~enable_bit;
5492         tw32_f(ofs, val);
5493
5494         for (i = 0; i < MAX_WAIT_CNT; i++) {
5495                 udelay(100);
5496                 val = tr32(ofs);
5497                 if ((val & enable_bit) == 0)
5498                         break;
5499         }
5500
5501         if (i == MAX_WAIT_CNT && !silent) {
5502                 printk(KERN_ERR PFX "tg3_stop_block timed out, "
5503                        "ofs=%lx enable_bit=%x\n",
5504                        ofs, enable_bit);
5505                 return -ENODEV;
5506         }
5507
5508         return 0;
5509 }
5510
5511 /* tp->lock is held. */
5512 static int tg3_abort_hw(struct tg3 *tp, int silent)
5513 {
5514         int i, err;
5515
5516         tg3_disable_ints(tp);
5517
5518         tp->rx_mode &= ~RX_MODE_ENABLE;
5519         tw32_f(MAC_RX_MODE, tp->rx_mode);
5520         udelay(10);
5521
5522         err  = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
5523         err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
5524         err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
5525         err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
5526         err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
5527         err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
5528
5529         err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
5530         err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
5531         err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
5532         err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
5533         err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
5534         err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
5535         err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
5536
5537         tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
5538         tw32_f(MAC_MODE, tp->mac_mode);
5539         udelay(40);
5540
5541         tp->tx_mode &= ~TX_MODE_ENABLE;
5542         tw32_f(MAC_TX_MODE, tp->tx_mode);
5543
5544         for (i = 0; i < MAX_WAIT_CNT; i++) {
5545                 udelay(100);
5546                 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
5547                         break;
5548         }
5549         if (i >= MAX_WAIT_CNT) {
5550                 printk(KERN_ERR PFX "tg3_abort_hw timed out for %s, "
5551                        "TX_MODE_ENABLE will not clear MAC_TX_MODE=%08x\n",
5552                        tp->dev->name, tr32(MAC_TX_MODE));
5553                 err |= -ENODEV;
5554         }
5555
5556         err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
5557         err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
5558         err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
5559
5560         tw32(FTQ_RESET, 0xffffffff);
5561         tw32(FTQ_RESET, 0x00000000);
5562
5563         err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
5564         err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
5565
5566         if (tp->hw_status)
5567                 memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
5568         if (tp->hw_stats)
5569                 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
5570
5571         return err;
5572 }
5573
5574 /* tp->lock is held. */
5575 static int tg3_nvram_lock(struct tg3 *tp)
5576 {
5577         if (tp->tg3_flags & TG3_FLAG_NVRAM) {
5578                 int i;
5579
5580                 if (tp->nvram_lock_cnt == 0) {
5581                         tw32(NVRAM_SWARB, SWARB_REQ_SET1);
5582                         for (i = 0; i < 8000; i++) {
5583                                 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
5584                                         break;
5585                                 udelay(20);
5586                         }
5587                         if (i == 8000) {
5588                                 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
5589                                 return -ENODEV;
5590                         }
5591                 }
5592                 tp->nvram_lock_cnt++;
5593         }
5594         return 0;
5595 }
5596
5597 /* tp->lock is held. */
5598 static void tg3_nvram_unlock(struct tg3 *tp)
5599 {
5600         if (tp->tg3_flags & TG3_FLAG_NVRAM) {
5601                 if (tp->nvram_lock_cnt > 0)
5602                         tp->nvram_lock_cnt--;
5603                 if (tp->nvram_lock_cnt == 0)
5604                         tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
5605         }
5606 }
5607
5608 /* tp->lock is held. */
5609 static void tg3_enable_nvram_access(struct tg3 *tp)
5610 {
5611         if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
5612             !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM)) {
5613                 u32 nvaccess = tr32(NVRAM_ACCESS);
5614
5615                 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
5616         }
5617 }
5618
5619 /* tp->lock is held. */
5620 static void tg3_disable_nvram_access(struct tg3 *tp)
5621 {
5622         if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
5623             !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM)) {
5624                 u32 nvaccess = tr32(NVRAM_ACCESS);
5625
5626                 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
5627         }
5628 }
5629
5630 static void tg3_ape_send_event(struct tg3 *tp, u32 event)
5631 {
5632         int i;
5633         u32 apedata;
5634
5635         apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
5636         if (apedata != APE_SEG_SIG_MAGIC)
5637                 return;
5638
5639         apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
5640         if (!(apedata & APE_FW_STATUS_READY))
5641                 return;
5642
5643         /* Wait for up to 1 millisecond for APE to service previous event. */
5644         for (i = 0; i < 10; i++) {
5645                 if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
5646                         return;
5647
5648                 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
5649
5650                 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
5651                         tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
5652                                         event | APE_EVENT_STATUS_EVENT_PENDING);
5653
5654                 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
5655
5656                 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
5657                         break;
5658
5659                 udelay(100);
5660         }
5661
5662         if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
5663                 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
5664 }
5665
5666 static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
5667 {
5668         u32 event;
5669         u32 apedata;
5670
5671         if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
5672                 return;
5673
5674         switch (kind) {
5675                 case RESET_KIND_INIT:
5676                         tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
5677                                         APE_HOST_SEG_SIG_MAGIC);
5678                         tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
5679                                         APE_HOST_SEG_LEN_MAGIC);
5680                         apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
5681                         tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
5682                         tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
5683                                         APE_HOST_DRIVER_ID_MAGIC);
5684                         tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
5685                                         APE_HOST_BEHAV_NO_PHYLOCK);
5686
5687                         event = APE_EVENT_STATUS_STATE_START;
5688                         break;
5689                 case RESET_KIND_SHUTDOWN:
5690                         /* With the interface we are currently using,
5691                          * APE does not track driver state.  Wiping
5692                          * out the HOST SEGMENT SIGNATURE forces
5693                          * the APE to assume OS absent status.
5694                          */
5695                         tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG, 0x0);
5696
5697                         event = APE_EVENT_STATUS_STATE_UNLOAD;
5698                         break;
5699                 case RESET_KIND_SUSPEND:
5700                         event = APE_EVENT_STATUS_STATE_SUSPEND;
5701                         break;
5702                 default:
5703                         return;
5704         }
5705
5706         event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
5707
5708         tg3_ape_send_event(tp, event);
5709 }
5710
5711 /* tp->lock is held. */
5712 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
5713 {
5714         tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
5715                       NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
5716
5717         if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
5718                 switch (kind) {
5719                 case RESET_KIND_INIT:
5720                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5721                                       DRV_STATE_START);
5722                         break;
5723
5724                 case RESET_KIND_SHUTDOWN:
5725                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5726                                       DRV_STATE_UNLOAD);
5727                         break;
5728
5729                 case RESET_KIND_SUSPEND:
5730                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5731                                       DRV_STATE_SUSPEND);
5732                         break;
5733
5734                 default:
5735                         break;
5736                 }
5737         }
5738
5739         if (kind == RESET_KIND_INIT ||
5740             kind == RESET_KIND_SUSPEND)
5741                 tg3_ape_driver_state_change(tp, kind);
5742 }
5743
5744 /* tp->lock is held. */
5745 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
5746 {
5747         if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
5748                 switch (kind) {
5749                 case RESET_KIND_INIT:
5750                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5751                                       DRV_STATE_START_DONE);
5752                         break;
5753
5754                 case RESET_KIND_SHUTDOWN:
5755                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5756                                       DRV_STATE_UNLOAD_DONE);
5757                         break;
5758
5759                 default:
5760                         break;
5761                 }
5762         }
5763
5764         if (kind == RESET_KIND_SHUTDOWN)
5765                 tg3_ape_driver_state_change(tp, kind);
5766 }
5767
5768 /* tp->lock is held. */
5769 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
5770 {
5771         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
5772                 switch (kind) {
5773                 case RESET_KIND_INIT:
5774                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5775                                       DRV_STATE_START);
5776                         break;
5777
5778                 case RESET_KIND_SHUTDOWN:
5779                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5780                                       DRV_STATE_UNLOAD);
5781                         break;
5782
5783                 case RESET_KIND_SUSPEND:
5784                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5785                                       DRV_STATE_SUSPEND);
5786                         break;
5787
5788                 default:
5789                         break;
5790                 }
5791         }
5792 }
5793
5794 static int tg3_poll_fw(struct tg3 *tp)
5795 {
5796         int i;
5797         u32 val;
5798
5799         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
5800                 /* Wait up to 20ms for init done. */
5801                 for (i = 0; i < 200; i++) {
5802                         if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
5803                                 return 0;
5804                         udelay(100);
5805                 }
5806                 return -ENODEV;
5807         }
5808
5809         /* Wait for firmware initialization to complete. */
5810         for (i = 0; i < 100000; i++) {
5811                 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
5812                 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
5813                         break;
5814                 udelay(10);
5815         }
5816
5817         /* Chip might not be fitted with firmware.  Some Sun onboard
5818          * parts are configured like that.  So don't signal the timeout
5819          * of the above loop as an error, but do report the lack of
5820          * running firmware once.
5821          */
5822         if (i >= 100000 &&
5823             !(tp->tg3_flags2 & TG3_FLG2_NO_FWARE_REPORTED)) {
5824                 tp->tg3_flags2 |= TG3_FLG2_NO_FWARE_REPORTED;
5825
5826                 printk(KERN_INFO PFX "%s: No firmware running.\n",
5827                        tp->dev->name);
5828         }
5829
5830         return 0;
5831 }
5832
5833 /* Save PCI command register before chip reset */
5834 static void tg3_save_pci_state(struct tg3 *tp)
5835 {
5836         pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd);
5837 }
5838
5839 /* Restore PCI state after chip reset */
5840 static void tg3_restore_pci_state(struct tg3 *tp)
5841 {
5842         u32 val;
5843
5844         /* Re-enable indirect register accesses. */
5845         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
5846                                tp->misc_host_ctrl);
5847
5848         /* Set MAX PCI retry to zero. */
5849         val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
5850         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
5851             (tp->tg3_flags & TG3_FLAG_PCIX_MODE))
5852                 val |= PCISTATE_RETRY_SAME_DMA;
5853         /* Allow reads and writes to the APE register and memory space. */
5854         if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
5855                 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
5856                        PCISTATE_ALLOW_APE_SHMEM_WR;
5857         pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
5858
5859         pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
5860
5861         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785) {
5862                 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)
5863                         pcie_set_readrq(tp->pdev, 4096);
5864                 else {
5865                         pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
5866                                               tp->pci_cacheline_sz);
5867                         pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
5868                                               tp->pci_lat_timer);
5869                 }
5870         }
5871
5872         /* Make sure PCI-X relaxed ordering bit is clear. */
5873         if (tp->pcix_cap) {
5874                 u16 pcix_cmd;
5875
5876                 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
5877                                      &pcix_cmd);
5878                 pcix_cmd &= ~PCI_X_CMD_ERO;
5879                 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
5880                                       pcix_cmd);
5881         }
5882
5883         if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
5884
5885                 /* Chip reset on 5780 will reset MSI enable bit,
5886                  * so need to restore it.
5887                  */
5888                 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
5889                         u16 ctrl;
5890
5891                         pci_read_config_word(tp->pdev,
5892                                              tp->msi_cap + PCI_MSI_FLAGS,
5893                                              &ctrl);
5894                         pci_write_config_word(tp->pdev,
5895                                               tp->msi_cap + PCI_MSI_FLAGS,
5896                                               ctrl | PCI_MSI_FLAGS_ENABLE);
5897                         val = tr32(MSGINT_MODE);
5898                         tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
5899                 }
5900         }
5901 }
5902
5903 static void tg3_stop_fw(struct tg3 *);
5904
5905 /* tp->lock is held. */
5906 static int tg3_chip_reset(struct tg3 *tp)
5907 {
5908         u32 val;
5909         void (*write_op)(struct tg3 *, u32, u32);
5910         int err;
5911
5912         tg3_nvram_lock(tp);
5913
5914         tg3_mdio_stop(tp);
5915
5916         tg3_ape_lock(tp, TG3_APE_LOCK_GRC);
5917
5918         /* No matching tg3_nvram_unlock() after this because
5919          * chip reset below will undo the nvram lock.
5920          */
5921         tp->nvram_lock_cnt = 0;
5922
5923         /* GRC_MISC_CFG core clock reset will clear the memory
5924          * enable bit in PCI register 4 and the MSI enable bit
5925          * on some chips, so we save relevant registers here.
5926          */
5927         tg3_save_pci_state(tp);
5928
5929         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
5930             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
5931             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
5932             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
5933             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
5934             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
5935                 tw32(GRC_FASTBOOT_PC, 0);
5936
5937         /*
5938          * We must avoid the readl() that normally takes place.
5939          * It locks machines, causes machine checks, and other
5940          * fun things.  So, temporarily disable the 5701
5941          * hardware workaround, while we do the reset.
5942          */
5943         write_op = tp->write32;
5944         if (write_op == tg3_write_flush_reg32)
5945                 tp->write32 = tg3_write32;
5946
5947         /* Prevent the irq handler from reading or writing PCI registers
5948          * during chip reset when the memory enable bit in the PCI command
5949          * register may be cleared.  The chip does not generate interrupt
5950          * at this time, but the irq handler may still be called due to irq
5951          * sharing or irqpoll.
5952          */
5953         tp->tg3_flags |= TG3_FLAG_CHIP_RESETTING;
5954         if (tp->hw_status) {
5955                 tp->hw_status->status = 0;
5956                 tp->hw_status->status_tag = 0;
5957         }
5958         tp->last_tag = 0;
5959         smp_mb();
5960         synchronize_irq(tp->pdev->irq);
5961
5962         /* do the reset */
5963         val = GRC_MISC_CFG_CORECLK_RESET;
5964
5965         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
5966                 if (tr32(0x7e2c) == 0x60) {
5967                         tw32(0x7e2c, 0x20);
5968                 }
5969                 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
5970                         tw32(GRC_MISC_CFG, (1 << 29));
5971                         val |= (1 << 29);
5972                 }
5973         }
5974
5975         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
5976                 tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
5977                 tw32(GRC_VCPU_EXT_CTRL,
5978                      tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
5979         }
5980
5981         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
5982                 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
5983         tw32(GRC_MISC_CFG, val);
5984
5985         /* restore 5701 hardware bug workaround write method */
5986         tp->write32 = write_op;
5987
5988         /* Unfortunately, we have to delay before the PCI read back.
5989          * Some 575X chips even will not respond to a PCI cfg access
5990          * when the reset command is given to the chip.
5991          *
5992          * How do these hardware designers expect things to work
5993          * properly if the PCI write is posted for a long period
5994          * of time?  It is always necessary to have some method by
5995          * which a register read back can occur to push the write
5996          * out which does the reset.
5997          *
5998          * For most tg3 variants the trick below was working.
5999          * Ho hum...
6000          */
6001         udelay(120);
6002
6003         /* Flush PCI posted writes.  The normal MMIO registers
6004          * are inaccessible at this time so this is the only
6005          * way to make this reliably (actually, this is no longer
6006          * the case, see above).  I tried to use indirect
6007          * register read/write but this upset some 5701 variants.
6008          */
6009         pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
6010
6011         udelay(120);
6012
6013         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
6014                 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) {
6015                         int i;
6016                         u32 cfg_val;
6017
6018                         /* Wait for link training to complete.  */
6019                         for (i = 0; i < 5000; i++)
6020                                 udelay(100);
6021
6022                         pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
6023                         pci_write_config_dword(tp->pdev, 0xc4,
6024                                                cfg_val | (1 << 15));
6025                 }
6026                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785)
6027                         /* Set PCIE max payload size and clear error status.  */
6028                         pci_write_config_dword(tp->pdev, 0xd8, 0xf5000);
6029         }
6030
6031         tg3_restore_pci_state(tp);
6032
6033         tp->tg3_flags &= ~TG3_FLAG_CHIP_RESETTING;
6034
6035         val = 0;
6036         if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
6037                 val = tr32(MEMARB_MODE);
6038         tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
6039
6040         if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A3) {
6041                 tg3_stop_fw(tp);
6042                 tw32(0x5000, 0x400);
6043         }
6044
6045         tw32(GRC_MODE, tp->grc_mode);
6046
6047         if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0) {
6048                 val = tr32(0xc4);
6049
6050                 tw32(0xc4, val | (1 << 15));
6051         }
6052
6053         if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
6054             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
6055                 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
6056                 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0)
6057                         tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
6058                 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
6059         }
6060
6061         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
6062                 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
6063                 tw32_f(MAC_MODE, tp->mac_mode);
6064         } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
6065                 tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
6066                 tw32_f(MAC_MODE, tp->mac_mode);
6067         } else if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
6068                 tp->mac_mode &= (MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN);
6069                 if (tp->mac_mode & MAC_MODE_APE_TX_EN)
6070                         tp->mac_mode |= MAC_MODE_TDE_ENABLE;
6071                 tw32_f(MAC_MODE, tp->mac_mode);
6072         } else
6073                 tw32_f(MAC_MODE, 0);
6074         udelay(40);
6075
6076         tg3_mdio_start(tp);
6077
6078         tg3_ape_unlock(tp, TG3_APE_LOCK_GRC);
6079
6080         err = tg3_poll_fw(tp);
6081         if (err)
6082                 return err;
6083
6084         if ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
6085             tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
6086                 val = tr32(0x7c00);
6087
6088                 tw32(0x7c00, val | (1 << 25));
6089         }
6090
6091         /* Reprobe ASF enable state.  */
6092         tp->tg3_flags &= ~TG3_FLAG_ENABLE_ASF;
6093         tp->tg3_flags2 &= ~TG3_FLG2_ASF_NEW_HANDSHAKE;
6094         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
6095         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
6096                 u32 nic_cfg;
6097
6098                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
6099                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
6100                         tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
6101                         tp->last_event_jiffies = jiffies;
6102                         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
6103                                 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
6104                 }
6105         }
6106
6107         return 0;
6108 }
6109
6110 /* tp->lock is held. */
6111 static void tg3_stop_fw(struct tg3 *tp)
6112 {
6113         if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) &&
6114            !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)) {
6115                 /* Wait for RX cpu to ACK the previous event. */
6116                 tg3_wait_for_event_ack(tp);
6117
6118                 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
6119
6120                 tg3_generate_fw_event(tp);
6121
6122                 /* Wait for RX cpu to ACK this event. */
6123                 tg3_wait_for_event_ack(tp);
6124         }
6125 }
6126
6127 /* tp->lock is held. */
6128 static int tg3_halt(struct tg3 *tp, int kind, int silent)
6129 {
6130         int err;
6131
6132         tg3_stop_fw(tp);
6133
6134         tg3_write_sig_pre_reset(tp, kind);
6135
6136         tg3_abort_hw(tp, silent);
6137         err = tg3_chip_reset(tp);
6138
6139         tg3_write_sig_legacy(tp, kind);
6140         tg3_write_sig_post_reset(tp, kind);
6141
6142         if (err)
6143                 return err;
6144
6145         return 0;
6146 }
6147
6148 #define TG3_FW_RELEASE_MAJOR    0x0
6149 #define TG3_FW_RELASE_MINOR     0x0
6150 #define TG3_FW_RELEASE_FIX      0x0
6151 #define TG3_FW_START_ADDR       0x08000000
6152 #define TG3_FW_TEXT_ADDR        0x08000000
6153 #define TG3_FW_TEXT_LEN         0x9c0
6154 #define TG3_FW_RODATA_ADDR      0x080009c0
6155 #define TG3_FW_RODATA_LEN       0x60
6156 #define TG3_FW_DATA_ADDR        0x08000a40
6157 #define TG3_FW_DATA_LEN         0x20
6158 #define TG3_FW_SBSS_ADDR        0x08000a60
6159 #define TG3_FW_SBSS_LEN         0xc
6160 #define TG3_FW_BSS_ADDR         0x08000a70
6161 #define TG3_FW_BSS_LEN          0x10
6162
6163 static const u32 tg3FwText[(TG3_FW_TEXT_LEN / sizeof(u32)) + 1] = {
6164         0x00000000, 0x10000003, 0x00000000, 0x0000000d, 0x0000000d, 0x3c1d0800,
6165         0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100000, 0x0e000018, 0x00000000,
6166         0x0000000d, 0x3c1d0800, 0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100034,
6167         0x0e00021c, 0x00000000, 0x0000000d, 0x00000000, 0x00000000, 0x00000000,
6168         0x27bdffe0, 0x3c1cc000, 0xafbf0018, 0xaf80680c, 0x0e00004c, 0x241b2105,
6169         0x97850000, 0x97870002, 0x9782002c, 0x9783002e, 0x3c040800, 0x248409c0,
6170         0xafa00014, 0x00021400, 0x00621825, 0x00052c00, 0xafa30010, 0x8f860010,
6171         0x00e52825, 0x0e000060, 0x24070102, 0x3c02ac00, 0x34420100, 0x3c03ac01,
6172         0x34630100, 0xaf820490, 0x3c02ffff, 0xaf820494, 0xaf830498, 0xaf82049c,
6173         0x24020001, 0xaf825ce0, 0x0e00003f, 0xaf825d00, 0x0e000140, 0x00000000,
6174         0x8fbf0018, 0x03e00008, 0x27bd0020, 0x2402ffff, 0xaf825404, 0x8f835400,
6175         0x34630400, 0xaf835400, 0xaf825404, 0x3c020800, 0x24420034, 0xaf82541c,
6176         0x03e00008, 0xaf805400, 0x00000000, 0x00000000, 0x3c020800, 0x34423000,
6177         0x3c030800, 0x34633000, 0x3c040800, 0x348437ff, 0x3c010800, 0xac220a64,
6178         0x24020040, 0x3c010800, 0xac220a68, 0x3c010800, 0xac200a60, 0xac600000,
6179         0x24630004, 0x0083102b, 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000,
6180         0x00804821, 0x8faa0010, 0x3c020800, 0x8c420a60, 0x3c040800, 0x8c840a68,
6181         0x8fab0014, 0x24430001, 0x0044102b, 0x3c010800, 0xac230a60, 0x14400003,
6182         0x00004021, 0x3c010800, 0xac200a60, 0x3c020800, 0x8c420a60, 0x3c030800,
6183         0x8c630a64, 0x91240000, 0x00021140, 0x00431021, 0x00481021, 0x25080001,
6184         0xa0440000, 0x29020008, 0x1440fff4, 0x25290001, 0x3c020800, 0x8c420a60,
6185         0x3c030800, 0x8c630a64, 0x8f84680c, 0x00021140, 0x00431021, 0xac440008,
6186         0xac45000c, 0xac460010, 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c,
6187         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
6188         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
6189         0, 0, 0, 0, 0, 0,
6190         0x02000008, 0x00000000, 0x0a0001e3, 0x3c0a0001, 0x0a0001e3, 0x3c0a0002,
6191         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
6192         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
6193         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
6194         0x0a0001e3, 0x3c0a0007, 0x0a0001e3, 0x3c0a0008, 0x0a0001e3, 0x3c0a0009,
6195         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a000b,
6196         0x0a0001e3, 0x3c0a000c, 0x0a0001e3, 0x3c0a000d, 0x0a0001e3, 0x00000000,
6197         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a000e, 0x0a0001e3, 0x00000000,
6198         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
6199         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
6200         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a0013, 0x0a0001e3, 0x3c0a0014,
6201         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
6202         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
6203         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
6204         0x27bdffe0, 0x00001821, 0x00001021, 0xafbf0018, 0xafb10014, 0xafb00010,
6205         0x3c010800, 0x00220821, 0xac200a70, 0x3c010800, 0x00220821, 0xac200a74,
6206         0x3c010800, 0x00220821, 0xac200a78, 0x24630001, 0x1860fff5, 0x2442000c,
6207         0x24110001, 0x8f906810, 0x32020004, 0x14400005, 0x24040001, 0x3c020800,
6208         0x8c420a78, 0x18400003, 0x00002021, 0x0e000182, 0x00000000, 0x32020001,
6209         0x10400003, 0x00000000, 0x0e000169, 0x00000000, 0x0a000153, 0xaf915028,
6210         0x8fbf0018, 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0020, 0x3c050800,
6211         0x8ca50a70, 0x3c060800, 0x8cc60a80, 0x3c070800, 0x8ce70a78, 0x27bdffe0,
6212         0x3c040800, 0x248409d0, 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014,
6213         0x0e00017b, 0x00002021, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x24020001,
6214         0x8f836810, 0x00821004, 0x00021027, 0x00621824, 0x03e00008, 0xaf836810,
6215         0x27bdffd8, 0xafbf0024, 0x1080002e, 0xafb00020, 0x8f825cec, 0xafa20018,
6216         0x8f825cec, 0x3c100800, 0x26100a78, 0xafa2001c, 0x34028000, 0xaf825cec,
6217         0x8e020000, 0x18400016, 0x00000000, 0x3c020800, 0x94420a74, 0x8fa3001c,
6218         0x000221c0, 0xac830004, 0x8fa2001c, 0x3c010800, 0x0e000201, 0xac220a74,
6219         0x10400005, 0x00000000, 0x8e020000, 0x24420001, 0x0a0001df, 0xae020000,
6220         0x3c020800, 0x8c420a70, 0x00021c02, 0x000321c0, 0x0a0001c5, 0xafa2001c,
6221         0x0e000201, 0x00000000, 0x1040001f, 0x00000000, 0x8e020000, 0x8fa3001c,
6222         0x24420001, 0x3c010800, 0xac230a70, 0x3c010800, 0xac230a74, 0x0a0001df,
6223         0xae020000, 0x3c100800, 0x26100a78, 0x8e020000, 0x18400028, 0x00000000,
6224         0x0e000201, 0x00000000, 0x14400024, 0x00000000, 0x8e020000, 0x3c030800,
6225         0x8c630a70, 0x2442ffff, 0xafa3001c, 0x18400006, 0xae020000, 0x00031402,
6226         0x000221c0, 0x8c820004, 0x3c010800, 0xac220a70, 0x97a2001e, 0x2442ff00,
6227         0x2c420300, 0x1440000b, 0x24024000, 0x3c040800, 0x248409dc, 0xafa00010,
6228         0xafa00014, 0x8fa6001c, 0x24050008, 0x0e000060, 0x00003821, 0x0a0001df,
6229         0x00000000, 0xaf825cf8, 0x3c020800, 0x8c420a40, 0x8fa3001c, 0x24420001,
6230         0xaf835cf8, 0x3c010800, 0xac220a40, 0x8fbf0024, 0x8fb00020, 0x03e00008,
6231         0x27bd0028, 0x27bdffe0, 0x3c040800, 0x248409e8, 0x00002821, 0x00003021,
6232         0x00003821, 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014, 0x8fbf0018,
6233         0x03e00008, 0x27bd0020, 0x8f82680c, 0x8f85680c, 0x00021827, 0x0003182b,
6234         0x00031823, 0x00431024, 0x00441021, 0x00a2282b, 0x10a00006, 0x00000000,
6235         0x00401821, 0x8f82680c, 0x0043102b, 0x1440fffd, 0x00000000, 0x03e00008,
6236         0x00000000, 0x3c040800, 0x8c840000, 0x3c030800, 0x8c630a40, 0x0064102b,
6237         0x54400002, 0x00831023, 0x00641023, 0x2c420008, 0x03e00008, 0x38420001,
6238         0x27bdffe0, 0x00802821, 0x3c040800, 0x24840a00, 0x00003021, 0x00003821,
6239         0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014, 0x0a000216, 0x00000000,
6240         0x8fbf0018, 0x03e00008, 0x27bd0020, 0x00000000, 0x27bdffe0, 0x3c1cc000,
6241         0xafbf0018, 0x0e00004c, 0xaf80680c, 0x3c040800, 0x24840a10, 0x03802821,
6242         0x00003021, 0x00003821, 0xafa00010, 0x0e000060, 0xafa00014, 0x2402ffff,
6243         0xaf825404, 0x3c0200aa, 0x0e000234, 0xaf825434, 0x8fbf0018, 0x03e00008,
6244         0x27bd0020, 0x00000000, 0x00000000, 0x00000000, 0x27bdffe8, 0xafb00010,
6245         0x24100001, 0xafbf0014, 0x3c01c003, 0xac200000, 0x8f826810, 0x30422000,
6246         0x10400003, 0x00000000, 0x0e000246, 0x00000000, 0x0a00023a, 0xaf905428,
6247         0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x27bdfff8, 0x8f845d0c,
6248         0x3c0200ff, 0x3c030800, 0x8c630a50, 0x3442fff8, 0x00821024, 0x1043001e,
6249         0x3c0500ff, 0x34a5fff8, 0x3c06c003, 0x3c074000, 0x00851824, 0x8c620010,
6250         0x3c010800, 0xac230a50, 0x30420008, 0x10400005, 0x00871025, 0x8cc20000,
6251         0x24420001, 0xacc20000, 0x00871025, 0xaf825d0c, 0x8fa20000, 0x24420001,
6252         0xafa20000, 0x8fa20000, 0x8fa20000, 0x24420001, 0xafa20000, 0x8fa20000,
6253         0x8f845d0c, 0x3c030800, 0x8c630a50, 0x00851024, 0x1443ffe8, 0x00851824,
6254         0x27bd0008, 0x03e00008, 0x00000000, 0x00000000, 0x00000000
6255 };
6256
6257 static const u32 tg3FwRodata[(TG3_FW_RODATA_LEN / sizeof(u32)) + 1] = {
6258         0x35373031, 0x726c7341, 0x00000000, 0x00000000, 0x53774576, 0x656e7430,
6259         0x00000000, 0x726c7045, 0x76656e74, 0x31000000, 0x556e6b6e, 0x45766e74,
6260         0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x66617461, 0x6c457272,
6261         0x00000000, 0x00000000, 0x4d61696e, 0x43707542, 0x00000000, 0x00000000,
6262         0x00000000
6263 };
6264
6265 #if 0 /* All zeros, don't eat up space with it. */
6266 u32 tg3FwData[(TG3_FW_DATA_LEN / sizeof(u32)) + 1] = {
6267         0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
6268         0x00000000, 0x00000000, 0x00000000, 0x00000000
6269 };
6270 #endif
6271
6272 #define RX_CPU_SCRATCH_BASE     0x30000
6273 #define RX_CPU_SCRATCH_SIZE     0x04000
6274 #define TX_CPU_SCRATCH_BASE     0x34000
6275 #define TX_CPU_SCRATCH_SIZE     0x04000
6276
6277 /* tp->lock is held. */
6278 static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
6279 {
6280         int i;
6281
6282         BUG_ON(offset == TX_CPU_BASE &&
6283             (tp->tg3_flags2 & TG3_FLG2_5705_PLUS));
6284
6285         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
6286                 u32 val = tr32(GRC_VCPU_EXT_CTRL);
6287
6288                 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
6289                 return 0;
6290         }
6291         if (offset == RX_CPU_BASE) {
6292                 for (i = 0; i < 10000; i++) {
6293                         tw32(offset + CPU_STATE, 0xffffffff);
6294                         tw32(offset + CPU_MODE,  CPU_MODE_HALT);
6295                         if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
6296                                 break;
6297                 }
6298
6299                 tw32(offset + CPU_STATE, 0xffffffff);
6300                 tw32_f(offset + CPU_MODE,  CPU_MODE_HALT);
6301                 udelay(10);
6302         } else {
6303                 for (i = 0; i < 10000; i++) {
6304                         tw32(offset + CPU_STATE, 0xffffffff);
6305                         tw32(offset + CPU_MODE,  CPU_MODE_HALT);
6306                         if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
6307                                 break;
6308                 }
6309         }
6310
6311         if (i >= 10000) {
6312                 printk(KERN_ERR PFX "tg3_reset_cpu timed out for %s, "
6313                        "and %s CPU\n",
6314                        tp->dev->name,
6315                        (offset == RX_CPU_BASE ? "RX" : "TX"));
6316                 return -ENODEV;
6317         }
6318
6319         /* Clear firmware's nvram arbitration. */
6320         if (tp->tg3_flags & TG3_FLAG_NVRAM)
6321                 tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
6322         return 0;
6323 }
6324
6325 struct fw_info {
6326         unsigned int text_base;
6327         unsigned int text_len;
6328         const u32 *text_data;
6329         unsigned int rodata_base;
6330         unsigned int rodata_len;
6331         const u32 *rodata_data;
6332         unsigned int data_base;
6333         unsigned int data_len;
6334         const u32 *data_data;
6335 };
6336
6337 /* tp->lock is held. */
6338 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base, u32 cpu_scratch_base,
6339                                  int cpu_scratch_size, struct fw_info *info)
6340 {
6341         int err, lock_err, i;
6342         void (*write_op)(struct tg3 *, u32, u32);
6343
6344         if (cpu_base == TX_CPU_BASE &&
6345             (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6346                 printk(KERN_ERR PFX "tg3_load_firmware_cpu: Trying to load "
6347                        "TX cpu firmware on %s which is 5705.\n",
6348                        tp->dev->name);
6349                 return -EINVAL;
6350         }
6351
6352         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
6353                 write_op = tg3_write_mem;
6354         else
6355                 write_op = tg3_write_indirect_reg32;
6356
6357         /* It is possible that bootcode is still loading at this point.
6358          * Get the nvram lock first before halting the cpu.
6359          */
6360         lock_err = tg3_nvram_lock(tp);
6361         err = tg3_halt_cpu(tp, cpu_base);
6362         if (!lock_err)
6363                 tg3_nvram_unlock(tp);
6364         if (err)
6365                 goto out;
6366
6367         for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
6368                 write_op(tp, cpu_scratch_base + i, 0);
6369         tw32(cpu_base + CPU_STATE, 0xffffffff);
6370         tw32(cpu_base + CPU_MODE, tr32(cpu_base+CPU_MODE)|CPU_MODE_HALT);
6371         for (i = 0; i < (info->text_len / sizeof(u32)); i++)
6372                 write_op(tp, (cpu_scratch_base +
6373                               (info->text_base & 0xffff) +
6374                               (i * sizeof(u32))),
6375                          (info->text_data ?
6376                           info->text_data[i] : 0));
6377         for (i = 0; i < (info->rodata_len / sizeof(u32)); i++)
6378                 write_op(tp, (cpu_scratch_base +
6379                               (info->rodata_base & 0xffff) +
6380                               (i * sizeof(u32))),
6381                          (info->rodata_data ?
6382                           info->rodata_data[i] : 0));
6383         for (i = 0; i < (info->data_len / sizeof(u32)); i++)
6384                 write_op(tp, (cpu_scratch_base +
6385                               (info->data_base & 0xffff) +
6386                               (i * sizeof(u32))),
6387                          (info->data_data ?
6388                           info->data_data[i] : 0));
6389
6390         err = 0;
6391
6392 out:
6393         return err;
6394 }
6395
6396 /* tp->lock is held. */
6397 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
6398 {
6399         struct fw_info info;
6400         int err, i;
6401
6402         info.text_base = TG3_FW_TEXT_ADDR;
6403         info.text_len = TG3_FW_TEXT_LEN;
6404         info.text_data = &tg3FwText[0];
6405         info.rodata_base = TG3_FW_RODATA_ADDR;
6406         info.rodata_len = TG3_FW_RODATA_LEN;
6407         info.rodata_data = &tg3FwRodata[0];
6408         info.data_base = TG3_FW_DATA_ADDR;
6409         info.data_len = TG3_FW_DATA_LEN;
6410         info.data_data = NULL;
6411
6412         err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
6413                                     RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
6414                                     &info);
6415         if (err)
6416                 return err;
6417
6418         err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
6419                                     TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
6420                                     &info);
6421         if (err)
6422                 return err;
6423
6424         /* Now startup only the RX cpu. */
6425         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
6426         tw32_f(RX_CPU_BASE + CPU_PC,    TG3_FW_TEXT_ADDR);
6427
6428         for (i = 0; i < 5; i++) {
6429                 if (tr32(RX_CPU_BASE + CPU_PC) == TG3_FW_TEXT_ADDR)
6430                         break;
6431                 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
6432                 tw32(RX_CPU_BASE + CPU_MODE,  CPU_MODE_HALT);
6433                 tw32_f(RX_CPU_BASE + CPU_PC,    TG3_FW_TEXT_ADDR);
6434                 udelay(1000);
6435         }
6436         if (i >= 5) {
6437                 printk(KERN_ERR PFX "tg3_load_firmware fails for %s "
6438                        "to set RX CPU PC, is %08x should be %08x\n",
6439                        tp->dev->name, tr32(RX_CPU_BASE + CPU_PC),
6440                        TG3_FW_TEXT_ADDR);
6441                 return -ENODEV;
6442         }
6443         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
6444         tw32_f(RX_CPU_BASE + CPU_MODE,  0x00000000);
6445
6446         return 0;
6447 }
6448
6449
6450 #define TG3_TSO_FW_RELEASE_MAJOR        0x1
6451 #define TG3_TSO_FW_RELASE_MINOR         0x6
6452 #define TG3_TSO_FW_RELEASE_FIX          0x0
6453 #define TG3_TSO_FW_START_ADDR           0x08000000
6454 #define TG3_TSO_FW_TEXT_ADDR            0x08000000
6455 #define TG3_TSO_FW_TEXT_LEN             0x1aa0
6456 #define TG3_TSO_FW_RODATA_ADDR          0x08001aa0
6457 #define TG3_TSO_FW_RODATA_LEN           0x60
6458 #define TG3_TSO_FW_DATA_ADDR            0x08001b20
6459 #define TG3_TSO_FW_DATA_LEN             0x30
6460 #define TG3_TSO_FW_SBSS_ADDR            0x08001b50
6461 #define TG3_TSO_FW_SBSS_LEN             0x2c
6462 #define TG3_TSO_FW_BSS_ADDR             0x08001b80
6463 #define TG3_TSO_FW_BSS_LEN              0x894
6464
6465 static const u32 tg3TsoFwText[(TG3_TSO_FW_TEXT_LEN / 4) + 1] = {
6466         0x0e000003, 0x00000000, 0x08001b24, 0x00000000, 0x10000003, 0x00000000,
6467         0x0000000d, 0x0000000d, 0x3c1d0800, 0x37bd4000, 0x03a0f021, 0x3c100800,
6468         0x26100000, 0x0e000010, 0x00000000, 0x0000000d, 0x27bdffe0, 0x3c04fefe,
6469         0xafbf0018, 0x0e0005d8, 0x34840002, 0x0e000668, 0x00000000, 0x3c030800,
6470         0x90631b68, 0x24020002, 0x3c040800, 0x24841aac, 0x14620003, 0x24050001,
6471         0x3c040800, 0x24841aa0, 0x24060006, 0x00003821, 0xafa00010, 0x0e00067c,
6472         0xafa00014, 0x8f625c50, 0x34420001, 0xaf625c50, 0x8f625c90, 0x34420001,
6473         0xaf625c90, 0x2402ffff, 0x0e000034, 0xaf625404, 0x8fbf0018, 0x03e00008,
6474         0x27bd0020, 0x00000000, 0x00000000, 0x00000000, 0x27bdffe0, 0xafbf001c,
6475         0xafb20018, 0xafb10014, 0x0e00005b, 0xafb00010, 0x24120002, 0x24110001,
6476         0x8f706820, 0x32020100, 0x10400003, 0x00000000, 0x0e0000bb, 0x00000000,
6477         0x8f706820, 0x32022000, 0x10400004, 0x32020001, 0x0e0001f0, 0x24040001,
6478         0x32020001, 0x10400003, 0x00000000, 0x0e0000a3, 0x00000000, 0x3c020800,
6479         0x90421b98, 0x14520003, 0x00000000, 0x0e0004c0, 0x00000000, 0x0a00003c,
6480         0xaf715028, 0x8fbf001c, 0x8fb20018, 0x8fb10014, 0x8fb00010, 0x03e00008,
6481         0x27bd0020, 0x27bdffe0, 0x3c040800, 0x24841ac0, 0x00002821, 0x00003021,
6482         0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014, 0x3c040800,
6483         0x248423d8, 0xa4800000, 0x3c010800, 0xa0201b98, 0x3c010800, 0xac201b9c,
6484         0x3c010800, 0xac201ba0, 0x3c010800, 0xac201ba4, 0x3c010800, 0xac201bac,
6485         0x3c010800, 0xac201bb8, 0x3c010800, 0xac201bbc, 0x8f624434, 0x3c010800,
6486         0xac221b88, 0x8f624438, 0x3c010800, 0xac221b8c, 0x8f624410, 0xac80f7a8,
6487         0x3c010800, 0xac201b84, 0x3c010800, 0xac2023e0, 0x3c010800, 0xac2023c8,
6488         0x3c010800, 0xac2023cc, 0x3c010800, 0xac202400, 0x3c010800, 0xac221b90,
6489         0x8f620068, 0x24030007, 0x00021702, 0x10430005, 0x00000000, 0x8f620068,
6490         0x00021702, 0x14400004, 0x24020001, 0x3c010800, 0x0a000097, 0xac20240c,
6491         0xac820034, 0x3c040800, 0x24841acc, 0x3c050800, 0x8ca5240c, 0x00003021,
6492         0x00003821, 0xafa00010, 0x0e00067c, 0xafa00014, 0x8fbf0018, 0x03e00008,
6493         0x27bd0020, 0x27bdffe0, 0x3c040800, 0x24841ad8, 0x00002821, 0x00003021,
6494         0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014, 0x0e00005b,
6495         0x00000000, 0x0e0000b4, 0x00002021, 0x8fbf0018, 0x03e00008, 0x27bd0020,
6496         0x24020001, 0x8f636820, 0x00821004, 0x00021027, 0x00621824, 0x03e00008,
6497         0xaf636820, 0x27bdffd0, 0xafbf002c, 0xafb60028, 0xafb50024, 0xafb40020,
6498         0xafb3001c, 0xafb20018, 0xafb10014, 0xafb00010, 0x8f675c5c, 0x3c030800,
6499         0x24631bbc, 0x8c620000, 0x14470005, 0x3c0200ff, 0x3c020800, 0x90421b98,
6500         0x14400119, 0x3c0200ff, 0x3442fff8, 0x00e28824, 0xac670000, 0x00111902,
6501         0x306300ff, 0x30e20003, 0x000211c0, 0x00622825, 0x00a04021, 0x00071602,
6502         0x3c030800, 0x90631b98, 0x3044000f, 0x14600036, 0x00804821, 0x24020001,
6503         0x3c010800, 0xa0221b98, 0x00051100, 0x00821025, 0x3c010800, 0xac201b9c,
6504         0x3c010800, 0xac201ba0, 0x3c010800, 0xac201ba4, 0x3c010800, 0xac201bac,
6505         0x3c010800, 0xac201bb8, 0x3c010800, 0xac201bb0, 0x3c010800, 0xac201bb4,
6506         0x3c010800, 0xa42223d8, 0x9622000c, 0x30437fff, 0x3c010800, 0xa4222410,
6507         0x30428000, 0x3c010800, 0xa4231bc6, 0x10400005, 0x24020001, 0x3c010800,
6508         0xac2223f4, 0x0a000102, 0x2406003e, 0x24060036, 0x3c010800, 0xac2023f4,
6509         0x9622000a, 0x3c030800, 0x94631bc6, 0x3c010800, 0xac2023f0, 0x3c010800,
6510         0xac2023f8, 0x00021302, 0x00021080, 0x00c21021, 0x00621821, 0x3c010800,
6511         0xa42223d0, 0x3c010800, 0x0a000115, 0xa4231b96, 0x9622000c, 0x3c010800,
6512         0xa42223ec, 0x3c040800, 0x24841b9c, 0x8c820000, 0x00021100, 0x3c010800,
6513         0x00220821, 0xac311bc8, 0x8c820000, 0x00021100, 0x3c010800, 0x00220821,
6514         0xac271bcc, 0x8c820000, 0x25030001, 0x306601ff, 0x00021100, 0x3c010800,
6515         0x00220821, 0xac261bd0, 0x8c820000, 0x00021100, 0x3c010800, 0x00220821,
6516         0xac291bd4, 0x96230008, 0x3c020800, 0x8c421bac, 0x00432821, 0x3c010800,
6517         0xac251bac, 0x9622000a, 0x30420004, 0x14400018, 0x00061100, 0x8f630c14,
6518         0x3063000f, 0x2c620002, 0x1440000b, 0x3c02c000, 0x8f630c14, 0x3c020800,
6519         0x8c421b40, 0x3063000f, 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002,
6520         0x1040fff7, 0x3c02c000, 0x00e21825, 0xaf635c5c, 0x8f625c50, 0x30420002,
6521         0x10400014, 0x00000000, 0x0a000147, 0x00000000, 0x3c030800, 0x8c631b80,
6522         0x3c040800, 0x94841b94, 0x01221025, 0x3c010800, 0xa42223da, 0x24020001,
6523         0x3c010800, 0xac221bb8, 0x24630001, 0x0085202a, 0x3c010800, 0x10800003,
6524         0xac231b80, 0x3c010800, 0xa4251b94, 0x3c060800, 0x24c61b9c, 0x8cc20000,
6525         0x24420001, 0xacc20000, 0x28420080, 0x14400005, 0x00000000, 0x0e000656,
6526         0x24040002, 0x0a0001e6, 0x00000000, 0x3c020800, 0x8c421bb8, 0x10400078,
6527         0x24020001, 0x3c050800, 0x90a51b98, 0x14a20072, 0x00000000, 0x3c150800,
6528         0x96b51b96, 0x3c040800, 0x8c841bac, 0x32a3ffff, 0x0083102a, 0x1440006c,
6529         0x00000000, 0x14830003, 0x00000000, 0x3c010800, 0xac2523f0, 0x1060005c,
6530         0x00009021, 0x24d60004, 0x0060a021, 0x24d30014, 0x8ec20000, 0x00028100,
6531         0x3c110800, 0x02308821, 0x0e000625, 0x8e311bc8, 0x00402821, 0x10a00054,
6532         0x00000000, 0x9628000a, 0x31020040, 0x10400005, 0x2407180c, 0x8e22000c,
6533         0x2407188c, 0x00021400, 0xaca20018, 0x3c030800, 0x00701821, 0x8c631bd0,
6534         0x3c020800, 0x00501021, 0x8c421bd4, 0x00031d00, 0x00021400, 0x00621825,
6535         0xaca30014, 0x8ec30004, 0x96220008, 0x00432023, 0x3242ffff, 0x3083ffff,
6536         0x00431021, 0x0282102a, 0x14400002, 0x02b23023, 0x00803021, 0x8e620000,
6537         0x30c4ffff, 0x00441021, 0xae620000, 0x8e220000, 0xaca20000, 0x8e220004,
6538         0x8e63fff4, 0x00431021, 0xaca20004, 0xa4a6000e, 0x8e62fff4, 0x00441021,
6539         0xae62fff4, 0x96230008, 0x0043102a, 0x14400005, 0x02469021, 0x8e62fff0,
6540         0xae60fff4, 0x24420001, 0xae62fff0, 0xaca00008, 0x3242ffff, 0x14540008,
6541         0x24020305, 0x31020080, 0x54400001, 0x34e70010, 0x24020905, 0xa4a2000c,
6542         0x0a0001cb, 0x34e70020, 0xa4a2000c, 0x3c020800, 0x8c4223f0, 0x10400003,
6543         0x3c024b65, 0x0a0001d3, 0x34427654, 0x3c02b49a, 0x344289ab, 0xaca2001c,
6544         0x30e2ffff, 0xaca20010, 0x0e0005a2, 0x00a02021, 0x3242ffff, 0x0054102b,
6545         0x1440ffa9, 0x00000000, 0x24020002, 0x3c010800, 0x0a0001e6, 0xa0221b98,
6546         0x8ec2083c, 0x24420001, 0x0a0001e6, 0xaec2083c, 0x0e0004c0, 0x00000000,
6547         0x8fbf002c, 0x8fb60028, 0x8fb50024, 0x8fb40020, 0x8fb3001c, 0x8fb20018,
6548         0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0030, 0x27bdffd0, 0xafbf0028,
6549         0xafb30024, 0xafb20020, 0xafb1001c, 0xafb00018, 0x8f725c9c, 0x3c0200ff,
6550         0x3442fff8, 0x3c070800, 0x24e71bb4, 0x02428824, 0x9623000e, 0x8ce20000,
6551         0x00431021, 0xace20000, 0x8e220010, 0x30420020, 0x14400011, 0x00809821,
6552         0x0e00063b, 0x02202021, 0x3c02c000, 0x02421825, 0xaf635c9c, 0x8f625c90,
6553         0x30420002, 0x1040011e, 0x00000000, 0xaf635c9c, 0x8f625c90, 0x30420002,
6554         0x10400119, 0x00000000, 0x0a00020d, 0x00000000, 0x8e240008, 0x8e230014,
6555         0x00041402, 0x000231c0, 0x00031502, 0x304201ff, 0x2442ffff, 0x3042007f,
6556         0x00031942, 0x30637800, 0x00021100, 0x24424000, 0x00624821, 0x9522000a,
6557         0x3084ffff, 0x30420008, 0x104000b0, 0x000429c0, 0x3c020800, 0x8c422400,
6558         0x14400024, 0x24c50008, 0x94c20014, 0x3c010800, 0xa42223d0, 0x8cc40010,
6559         0x00041402, 0x3c010800, 0xa42223d2, 0x3c010800, 0xa42423d4, 0x94c2000e,
6560         0x3083ffff, 0x00431023, 0x3c010800, 0xac222408, 0x94c2001a, 0x3c010800,
6561         0xac262400, 0x3c010800, 0xac322404, 0x3c010800, 0xac2223fc, 0x3c02c000,
6562         0x02421825, 0xaf635c9c, 0x8f625c90, 0x30420002, 0x104000e5, 0x00000000,
6563         0xaf635c9c, 0x8f625c90, 0x30420002, 0x104000e0, 0x00000000, 0x0a000246,
6564         0x00000000, 0x94c2000e, 0x3c030800, 0x946323d4, 0x00434023, 0x3103ffff,
6565         0x2c620008, 0x1040001c, 0x00000000, 0x94c20014, 0x24420028, 0x00a22821,
6566         0x00031042, 0x1840000b, 0x00002021, 0x24e60848, 0x00403821, 0x94a30000,
6567         0x8cc20000, 0x24840001, 0x00431021, 0xacc20000, 0x0087102a, 0x1440fff9,
6568         0x24a50002, 0x31020001, 0x1040001f, 0x3c024000, 0x3c040800, 0x248423fc,
6569         0xa0a00001, 0x94a30000, 0x8c820000, 0x00431021, 0x0a000285, 0xac820000,
6570         0x8f626800, 0x3c030010, 0x00431024, 0x10400009, 0x00000000, 0x94c2001a,
6571         0x3c030800, 0x8c6323fc, 0x00431021, 0x3c010800, 0xac2223fc, 0x0a000286,
6572         0x3c024000, 0x94c2001a, 0x94c4001c, 0x3c030800, 0x8c6323fc, 0x00441023,
6573         0x00621821, 0x3c010800, 0xac2323fc, 0x3c024000, 0x02421825, 0xaf635c9c,
6574         0x8f625c90, 0x30420002, 0x1440fffc, 0x00000000, 0x9522000a, 0x30420010,
6575         0x1040009b, 0x00000000, 0x3c030800, 0x946323d4, 0x3c070800, 0x24e72400,
6576         0x8ce40000, 0x8f626800, 0x24630030, 0x00832821, 0x3c030010, 0x00431024,
6577         0x1440000a, 0x00000000, 0x94a20004, 0x3c040800, 0x8c842408, 0x3c030800,
6578         0x8c6323fc, 0x00441023, 0x00621821, 0x3c010800, 0xac2323fc, 0x3c040800,
6579         0x8c8423fc, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402, 0x00822021,
6580         0x00041027, 0xa4a20006, 0x3c030800, 0x8c632404, 0x3c0200ff, 0x3442fff8,
6581         0x00628824, 0x96220008, 0x24050001, 0x24034000, 0x000231c0, 0x00801021,
6582         0xa4c2001a, 0xa4c0001c, 0xace00000, 0x3c010800, 0xac251b60, 0xaf635cb8,
6583         0x8f625cb0, 0x30420002, 0x10400003, 0x00000000, 0x3c010800, 0xac201b60,
6584         0x8e220008, 0xaf625cb8, 0x8f625cb0, 0x30420002, 0x10400003, 0x00000000,
6585         0x3c010800, 0xac201b60, 0x3c020800, 0x8c421b60, 0x1040ffec, 0x00000000,
6586         0x3c040800, 0x0e00063b, 0x8c842404, 0x0a00032a, 0x00000000, 0x3c030800,
6587         0x90631b98, 0x24020002, 0x14620003, 0x3c034b65, 0x0a0002e1, 0x00008021,
6588         0x8e22001c, 0x34637654, 0x10430002, 0x24100002, 0x24100001, 0x00c02021,
6589         0x0e000350, 0x02003021, 0x24020003, 0x3c010800, 0xa0221b98, 0x24020002,
6590         0x1202000a, 0x24020001, 0x3c030800, 0x8c6323f0, 0x10620006, 0x00000000,
6591         0x3c020800, 0x944223d8, 0x00021400, 0x0a00031f, 0xae220014, 0x3c040800,
6592         0x248423da, 0x94820000, 0x00021400, 0xae220014, 0x3c020800, 0x8c421bbc,
6593         0x3c03c000, 0x3c010800, 0xa0201b98, 0x00431025, 0xaf625c5c, 0x8f625c50,
6594         0x30420002, 0x10400009, 0x00000000, 0x2484f7e2, 0x8c820000, 0x00431025,
6595         0xaf625c5c, 0x8f625c50, 0x30420002, 0x1440fffa, 0x00000000, 0x3c020800,
6596         0x24421b84, 0x8c430000, 0x24630001, 0xac430000, 0x8f630c14, 0x3063000f,
6597         0x2c620002, 0x1440000c, 0x3c024000, 0x8f630c14, 0x3c020800, 0x8c421b40,
6598         0x3063000f, 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002, 0x1040fff7,
6599         0x00000000, 0x3c024000, 0x02421825, 0xaf635c9c, 0x8f625c90, 0x30420002,
6600         0x1440fffc, 0x00000000, 0x12600003, 0x00000000, 0x0e0004c0, 0x00000000,
6601         0x8fbf0028, 0x8fb30024, 0x8fb20020, 0x8fb1001c, 0x8fb00018, 0x03e00008,
6602         0x27bd0030, 0x8f634450, 0x3c040800, 0x24841b88, 0x8c820000, 0x00031c02,
6603         0x0043102b, 0x14400007, 0x3c038000, 0x8c840004, 0x8f624450, 0x00021c02,
6604         0x0083102b, 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024,
6605         0x1440fffd, 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3c024000,
6606         0x00822025, 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00000000,
6607         0x03e00008, 0x00000000, 0x27bdffe0, 0x00805821, 0x14c00011, 0x256e0008,
6608         0x3c020800, 0x8c4223f4, 0x10400007, 0x24020016, 0x3c010800, 0xa42223d2,
6609         0x2402002a, 0x3c010800, 0x0a000364, 0xa42223d4, 0x8d670010, 0x00071402,
6610         0x3c010800, 0xa42223d2, 0x3c010800, 0xa42723d4, 0x3c040800, 0x948423d4,
6611         0x3c030800, 0x946323d2, 0x95cf0006, 0x3c020800, 0x944223d0, 0x00832023,
6612         0x01e2c023, 0x3065ffff, 0x24a20028, 0x01c24821, 0x3082ffff, 0x14c0001a,
6613         0x01226021, 0x9582000c, 0x3042003f, 0x3c010800, 0xa42223d6, 0x95820004,
6614         0x95830006, 0x3c010800, 0xac2023e4, 0x3c010800, 0xac2023e8, 0x00021400,
6615         0x00431025, 0x3c010800, 0xac221bc0, 0x95220004, 0x3c010800, 0xa4221bc4,
6616         0x95230002, 0x01e51023, 0x0043102a, 0x10400010, 0x24020001, 0x3c010800,
6617         0x0a000398, 0xac2223f8, 0x3c030800, 0x8c6323e8, 0x3c020800, 0x94421bc4,
6618         0x00431021, 0xa5220004, 0x3c020800, 0x94421bc0, 0xa5820004, 0x3c020800,
6619         0x8c421bc0, 0xa5820006, 0x3c020800, 0x8c4223f0, 0x3c0d0800, 0x8dad23e4,
6620         0x3c0a0800, 0x144000e5, 0x8d4a23e8, 0x3c020800, 0x94421bc4, 0x004a1821,
6621         0x3063ffff, 0x0062182b, 0x24020002, 0x10c2000d, 0x01435023, 0x3c020800,
6622         0x944223d6, 0x30420009, 0x10400008, 0x00000000, 0x9582000c, 0x3042fff6,
6623         0xa582000c, 0x3c020800, 0x944223d6, 0x30420009, 0x01a26823, 0x3c020800,
6624         0x8c4223f8, 0x1040004a, 0x01203821, 0x3c020800, 0x944223d2, 0x00004021,
6625         0xa520000a, 0x01e21023, 0xa5220002, 0x3082ffff, 0x00021042, 0x18400008,
6626         0x00003021, 0x00401821, 0x94e20000, 0x25080001, 0x00c23021, 0x0103102a,
6627         0x1440fffb, 0x24e70002, 0x00061c02, 0x30c2ffff, 0x00623021, 0x00061402,
6628         0x00c23021, 0x00c02821, 0x00061027, 0xa522000a, 0x00003021, 0x2527000c,
6629         0x00004021, 0x94e20000, 0x25080001, 0x00c23021, 0x2d020004, 0x1440fffb,
6630         0x24e70002, 0x95220002, 0x00004021, 0x91230009, 0x00442023, 0x01803821,
6631         0x3082ffff, 0xa4e00010, 0x00621821, 0x00021042, 0x18400010, 0x00c33021,
6632         0x00404821, 0x94e20000, 0x24e70002, 0x00c23021, 0x30e2007f, 0x14400006,
6633         0x25080001, 0x8d630000, 0x3c02007f, 0x3442ff80, 0x00625824, 0x25670008,
6634         0x0109102a, 0x1440fff3, 0x00000000, 0x30820001, 0x10400005, 0x00061c02,
6635         0xa0e00001, 0x94e20000, 0x00c23021, 0x00061c02, 0x30c2ffff, 0x00623021,
6636         0x00061402, 0x00c23021, 0x0a00047d, 0x30c6ffff, 0x24020002, 0x14c20081,
6637         0x00000000, 0x3c020800, 0x8c42240c, 0x14400007, 0x00000000, 0x3c020800,
6638         0x944223d2, 0x95230002, 0x01e21023, 0x10620077, 0x00000000, 0x3c020800,
6639         0x944223d2, 0x01e21023, 0xa5220002, 0x3c020800, 0x8c42240c, 0x1040001a,
6640         0x31e3ffff, 0x8dc70010, 0x3c020800, 0x94421b96, 0x00e04021, 0x00072c02,
6641         0x00aa2021, 0x00431023, 0x00823823, 0x00072402, 0x30e2ffff, 0x00823821,
6642         0x00071027, 0xa522000a, 0x3102ffff, 0x3c040800, 0x948423d4, 0x00453023,
6643         0x00e02821, 0x00641823, 0x006d1821, 0x00c33021, 0x00061c02, 0x30c2ffff,
6644         0x0a00047d, 0x00623021, 0x01203821, 0x00004021, 0x3082ffff, 0x00021042,
6645         0x18400008, 0x00003021, 0x00401821, 0x94e20000, 0x25080001, 0x00c23021,
6646         0x0103102a, 0x1440fffb, 0x24e70002, 0x00061c02, 0x30c2ffff, 0x00623021,
6647         0x00061402, 0x00c23021, 0x00c02821, 0x00061027, 0xa522000a, 0x00003021,
6648         0x2527000c, 0x00004021, 0x94e20000, 0x25080001, 0x00c23021, 0x2d020004,
6649         0x1440fffb, 0x24e70002, 0x95220002, 0x00004021, 0x91230009, 0x00442023,
6650         0x01803821, 0x3082ffff, 0xa4e00010, 0x3c040800, 0x948423d4, 0x00621821,
6651         0x00c33021, 0x00061c02, 0x30c2ffff, 0x00623021, 0x00061c02, 0x3c020800,
6652         0x944223d0, 0x00c34821, 0x00441023, 0x00021fc2, 0x00431021, 0x00021043,
6653         0x18400010, 0x00003021, 0x00402021, 0x94e20000, 0x24e70002, 0x00c23021,
6654         0x30e2007f, 0x14400006, 0x25080001, 0x8d630000, 0x3c02007f, 0x3442ff80,
6655         0x00625824, 0x25670008, 0x0104102a, 0x1440fff3, 0x00000000, 0x3c020800,
6656         0x944223ec, 0x00c23021, 0x3122ffff, 0x00c23021, 0x00061c02, 0x30c2ffff,
6657         0x00623021, 0x00061402, 0x00c23021, 0x00c04021, 0x00061027, 0xa5820010,
6658         0xadc00014, 0x0a00049d, 0xadc00000, 0x8dc70010, 0x00e04021, 0x11400007,
6659         0x00072c02, 0x00aa3021, 0x00061402, 0x30c3ffff, 0x00433021, 0x00061402,
6660         0x00c22821, 0x00051027, 0xa522000a, 0x3c030800, 0x946323d4, 0x3102ffff,
6661         0x01e21021, 0x00433023, 0x00cd3021, 0x00061c02, 0x30c2ffff, 0x00623021,
6662         0x00061402, 0x00c23021, 0x00c04021, 0x00061027, 0xa5820010, 0x3102ffff,
6663         0x00051c00, 0x00431025, 0xadc20010, 0x3c020800, 0x8c4223f4, 0x10400005,
6664         0x2de205eb, 0x14400002, 0x25e2fff2, 0x34028870, 0xa5c20034, 0x3c030800,
6665         0x246323e8, 0x8c620000, 0x24420001, 0xac620000, 0x3c040800, 0x8c8423e4,
6666         0x3c020800, 0x8c421bc0, 0x3303ffff, 0x00832021, 0x00431821, 0x0062102b,
6667         0x3c010800, 0xac2423e4, 0x10400003, 0x2482ffff, 0x3c010800, 0xac2223e4,
6668         0x3c010800, 0xac231bc0, 0x03e00008, 0x27bd0020, 0x27bdffb8, 0x3c050800,
6669         0x24a51b96, 0xafbf0044, 0xafbe0040, 0xafb7003c, 0xafb60038, 0xafb50034,
6670         0xafb40030, 0xafb3002c, 0xafb20028, 0xafb10024, 0xafb00020, 0x94a90000,
6671         0x3c020800, 0x944223d0, 0x3c030800, 0x8c631bb0, 0x3c040800, 0x8c841bac,
6672         0x01221023, 0x0064182a, 0xa7a9001e, 0x106000be, 0xa7a20016, 0x24be0022,
6673         0x97b6001e, 0x24b3001a, 0x24b70016, 0x8fc20000, 0x14400008, 0x00000000,
6674         0x8fc2fff8, 0x97a30016, 0x8fc4fff4, 0x00431021, 0x0082202a, 0x148000b0,
6675         0x00000000, 0x97d50818, 0x32a2ffff, 0x104000a3, 0x00009021, 0x0040a021,
6676         0x00008821, 0x0e000625, 0x00000000, 0x00403021, 0x14c00007, 0x00000000,
6677         0x3c020800, 0x8c4223dc, 0x24420001, 0x3c010800, 0x0a000596, 0xac2223dc,
6678         0x3c100800, 0x02118021, 0x8e101bc8, 0x9608000a, 0x31020040, 0x10400005,
6679         0x2407180c, 0x8e02000c, 0x2407188c, 0x00021400, 0xacc20018, 0x31020080,
6680         0x54400001, 0x34e70010, 0x3c020800, 0x00511021, 0x8c421bd0, 0x3c030800,
6681         0x00711821, 0x8c631bd4, 0x00021500, 0x00031c00, 0x00431025, 0xacc20014,
6682         0x96040008, 0x3242ffff, 0x00821021, 0x0282102a, 0x14400002, 0x02b22823,
6683         0x00802821, 0x8e020000, 0x02459021, 0xacc20000, 0x8e020004, 0x00c02021,
6684         0x26310010, 0xac820004, 0x30e2ffff, 0xac800008, 0xa485000e, 0xac820010,
6685         0x24020305, 0x0e0005a2, 0xa482000c, 0x3242ffff, 0x0054102b, 0x1440ffc5,
6686         0x3242ffff, 0x0a00058e, 0x00000000, 0x8e620000, 0x8e63fffc, 0x0043102a,
6687         0x10400067, 0x00000000, 0x8e62fff0, 0x00028900, 0x3c100800, 0x02118021,
6688         0x0e000625, 0x8e101bc8, 0x00403021, 0x14c00005, 0x00000000, 0x8e62082c,
6689         0x24420001, 0x0a000596, 0xae62082c, 0x9608000a, 0x31020040, 0x10400005,
6690         0x2407180c, 0x8e02000c, 0x2407188c, 0x00021400, 0xacc20018, 0x3c020800,
6691         0x00511021, 0x8c421bd0, 0x3c030800, 0x00711821, 0x8c631bd4, 0x00021500,
6692         0x00031c00, 0x00431025, 0xacc20014, 0x8e63fff4, 0x96020008, 0x00432023,
6693         0x3242ffff, 0x3083ffff, 0x00431021, 0x02c2102a, 0x10400003, 0x00802821,
6694         0x97a9001e, 0x01322823, 0x8e620000, 0x30a4ffff, 0x00441021, 0xae620000,
6695         0xa4c5000e, 0x8e020000, 0xacc20000, 0x8e020004, 0x8e63fff4, 0x00431021,
6696         0xacc20004, 0x8e63fff4, 0x96020008, 0x00641821, 0x0062102a, 0x14400006,
6697         0x02459021, 0x8e62fff0, 0xae60fff4, 0x24420001, 0x0a000571, 0xae62fff0,
6698         0xae63fff4, 0xacc00008, 0x3242ffff, 0x10560003, 0x31020004, 0x10400006,
6699         0x24020305, 0x31020080, 0x54400001, 0x34e70010, 0x34e70020, 0x24020905,
6700         0xa4c2000c, 0x8ee30000, 0x8ee20004, 0x14620007, 0x3c02b49a, 0x8ee20860,
6701         0x54400001, 0x34e70400, 0x3c024b65, 0x0a000588, 0x34427654, 0x344289ab,
6702         0xacc2001c, 0x30e2ffff, 0xacc20010, 0x0e0005a2, 0x00c02021, 0x3242ffff,
6703         0x0056102b, 0x1440ff9b, 0x00000000, 0x8e620000, 0x8e63fffc, 0x0043102a,
6704         0x1440ff48, 0x00000000, 0x8fbf0044, 0x8fbe0040, 0x8fb7003c, 0x8fb60038,
6705         0x8fb50034, 0x8fb40030, 0x8fb3002c, 0x8fb20028, 0x8fb10024, 0x8fb00020,
6706         0x03e00008, 0x27bd0048, 0x27bdffe8, 0xafbf0014, 0xafb00010, 0x8f624450,
6707         0x8f634410, 0x0a0005b1, 0x00808021, 0x8f626820, 0x30422000, 0x10400003,
6708         0x00000000, 0x0e0001f0, 0x00002021, 0x8f624450, 0x8f634410, 0x3042ffff,
6709         0x0043102b, 0x1440fff5, 0x00000000, 0x8f630c14, 0x3063000f, 0x2c620002,
6710         0x1440000b, 0x00000000, 0x8f630c14, 0x3c020800, 0x8c421b40, 0x3063000f,
6711         0x24420001, 0x3c010800, 0xac221b40, 0x2c620002, 0x1040fff7, 0x00000000,
6712         0xaf705c18, 0x8f625c10, 0x30420002, 0x10400009, 0x00000000, 0x8f626820,
6713         0x30422000, 0x1040fff8, 0x00000000, 0x0e0001f0, 0x00002021, 0x0a0005c4,
6714         0x00000000, 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x00000000,
6715         0x00000000, 0x00000000, 0x27bdffe8, 0x3c1bc000, 0xafbf0014, 0xafb00010,
6716         0xaf60680c, 0x8f626804, 0x34420082, 0xaf626804, 0x8f634000, 0x24020b50,
6717         0x3c010800, 0xac221b54, 0x24020b78, 0x3c010800, 0xac221b64, 0x34630002,
6718         0xaf634000, 0x0e000605, 0x00808021, 0x3c010800, 0xa0221b68, 0x304200ff,
6719         0x24030002, 0x14430005, 0x00000000, 0x3c020800, 0x8c421b54, 0x0a0005f8,
6720         0xac5000c0, 0x3c020800, 0x8c421b54, 0xac5000bc, 0x8f624434, 0x8f634438,
6721         0x8f644410, 0x3c010800, 0xac221b5c, 0x3c010800, 0xac231b6c, 0x3c010800,
6722         0xac241b58, 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x3c040800,
6723         0x8c870000, 0x3c03aa55, 0x3463aa55, 0x3c06c003, 0xac830000, 0x8cc20000,
6724         0x14430007, 0x24050002, 0x3c0355aa, 0x346355aa, 0xac830000, 0x8cc20000,
6725         0x50430001, 0x24050001, 0x3c020800, 0xac470000, 0x03e00008, 0x00a01021,
6726         0x27bdfff8, 0x18800009, 0x00002821, 0x8f63680c, 0x8f62680c, 0x1043fffe,
6727         0x00000000, 0x24a50001, 0x00a4102a, 0x1440fff9, 0x00000000, 0x03e00008,
6728         0x27bd0008, 0x8f634450, 0x3c020800, 0x8c421b5c, 0x00031c02, 0x0043102b,
6729         0x14400008, 0x3c038000, 0x3c040800, 0x8c841b6c, 0x8f624450, 0x00021c02,
6730         0x0083102b, 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024,
6731         0x1440fffd, 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3082ffff,
6732         0x2442e000, 0x2c422001, 0x14400003, 0x3c024000, 0x0a000648, 0x2402ffff,
6733         0x00822025, 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00001021,
6734         0x03e00008, 0x00000000, 0x8f624450, 0x3c030800, 0x8c631b58, 0x0a000651,
6735         0x3042ffff, 0x8f624450, 0x3042ffff, 0x0043102b, 0x1440fffc, 0x00000000,
6736         0x03e00008, 0x00000000, 0x27bdffe0, 0x00802821, 0x3c040800, 0x24841af0,
6737         0x00003021, 0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014,
6738         0x0a000660, 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x00000000,
6739         0x00000000, 0x00000000, 0x3c020800, 0x34423000, 0x3c030800, 0x34633000,
6740         0x3c040800, 0x348437ff, 0x3c010800, 0xac221b74, 0x24020040, 0x3c010800,
6741         0xac221b78, 0x3c010800, 0xac201b70, 0xac600000, 0x24630004, 0x0083102b,
6742         0x5040fffd, 0xac600000, 0x03e00008, 0x00000000, 0x00804821, 0x8faa0010,
6743         0x3c020800, 0x8c421b70, 0x3c040800, 0x8c841b78, 0x8fab0014, 0x24430001,
6744         0x0044102b, 0x3c010800, 0xac231b70, 0x14400003, 0x00004021, 0x3c010800,
6745         0xac201b70, 0x3c020800, 0x8c421b70, 0x3c030800, 0x8c631b74, 0x91240000,
6746         0x00021140, 0x00431021, 0x00481021, 0x25080001, 0xa0440000, 0x29020008,
6747         0x1440fff4, 0x25290001, 0x3c020800, 0x8c421b70, 0x3c030800, 0x8c631b74,
6748         0x8f64680c, 0x00021140, 0x00431021, 0xac440008, 0xac45000c, 0xac460010,
6749         0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c, 0x00000000, 0x00000000,
6750 };
6751
6752 static const u32 tg3TsoFwRodata[] = {
6753         0x4d61696e, 0x43707542, 0x00000000, 0x4d61696e, 0x43707541, 0x00000000,
6754         0x00000000, 0x00000000, 0x73746b6f, 0x66666c64, 0x496e0000, 0x73746b6f,
6755         0x66662a2a, 0x00000000, 0x53774576, 0x656e7430, 0x00000000, 0x00000000,
6756         0x00000000, 0x00000000, 0x66617461, 0x6c457272, 0x00000000, 0x00000000,
6757         0x00000000,
6758 };
6759
6760 static const u32 tg3TsoFwData[] = {
6761         0x00000000, 0x73746b6f, 0x66666c64, 0x5f76312e, 0x362e3000, 0x00000000,
6762         0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
6763         0x00000000,
6764 };
6765
6766 /* 5705 needs a special version of the TSO firmware.  */
6767 #define TG3_TSO5_FW_RELEASE_MAJOR       0x1
6768 #define TG3_TSO5_FW_RELASE_MINOR        0x2
6769 #define TG3_TSO5_FW_RELEASE_FIX         0x0
6770 #define TG3_TSO5_FW_START_ADDR          0x00010000
6771 #define TG3_TSO5_FW_TEXT_ADDR           0x00010000
6772 #define TG3_TSO5_FW_TEXT_LEN            0xe90
6773 #define TG3_TSO5_FW_RODATA_ADDR         0x00010e90
6774 #define TG3_TSO5_FW_RODATA_LEN          0x50
6775 #define TG3_TSO5_FW_DATA_ADDR           0x00010f00
6776 #define TG3_TSO5_FW_DATA_LEN            0x20
6777 #define TG3_TSO5_FW_SBSS_ADDR           0x00010f20
6778 #define TG3_TSO5_FW_SBSS_LEN            0x28
6779 #define TG3_TSO5_FW_BSS_ADDR            0x00010f50
6780 #define TG3_TSO5_FW_BSS_LEN             0x88
6781
6782 static const u32 tg3Tso5FwText[(TG3_TSO5_FW_TEXT_LEN / 4) + 1] = {
6783         0x0c004003, 0x00000000, 0x00010f04, 0x00000000, 0x10000003, 0x00000000,
6784         0x0000000d, 0x0000000d, 0x3c1d0001, 0x37bde000, 0x03a0f021, 0x3c100001,
6785         0x26100000, 0x0c004010, 0x00000000, 0x0000000d, 0x27bdffe0, 0x3c04fefe,
6786         0xafbf0018, 0x0c0042e8, 0x34840002, 0x0c004364, 0x00000000, 0x3c030001,
6787         0x90630f34, 0x24020002, 0x3c040001, 0x24840e9c, 0x14620003, 0x24050001,
6788         0x3c040001, 0x24840e90, 0x24060002, 0x00003821, 0xafa00010, 0x0c004378,
6789         0xafa00014, 0x0c00402c, 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020,
6790         0x00000000, 0x00000000, 0x27bdffe0, 0xafbf001c, 0xafb20018, 0xafb10014,
6791         0x0c0042d4, 0xafb00010, 0x3c128000, 0x24110001, 0x8f706810, 0x32020400,
6792         0x10400007, 0x00000000, 0x8f641008, 0x00921024, 0x14400003, 0x00000000,
6793         0x0c004064, 0x00000000, 0x3c020001, 0x90420f56, 0x10510003, 0x32020200,
6794         0x1040fff1, 0x00000000, 0x0c0041b4, 0x00000000, 0x08004034, 0x00000000,
6795         0x8fbf001c, 0x8fb20018, 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0020,
6796         0x27bdffe0, 0x3c040001, 0x24840eb0, 0x00002821, 0x00003021, 0x00003821,
6797         0xafbf0018, 0xafa00010, 0x0c004378, 0xafa00014, 0x0000d021, 0x24020130,
6798         0xaf625000, 0x3c010001, 0xa4200f50, 0x3c010001, 0xa0200f57, 0x8fbf0018,
6799         0x03e00008, 0x27bd0020, 0x00000000, 0x00000000, 0x3c030001, 0x24630f60,
6800         0x90620000, 0x27bdfff0, 0x14400003, 0x0080c021, 0x08004073, 0x00004821,
6801         0x3c022000, 0x03021024, 0x10400003, 0x24090002, 0x08004073, 0xa0600000,
6802         0x24090001, 0x00181040, 0x30431f80, 0x346f8008, 0x1520004b, 0x25eb0028,
6803         0x3c040001, 0x00832021, 0x8c848010, 0x3c050001, 0x24a50f7a, 0x00041402,
6804         0xa0a20000, 0x3c010001, 0xa0240f7b, 0x3c020001, 0x00431021, 0x94428014,
6805         0x3c010001, 0xa0220f7c, 0x3c0c0001, 0x01836021, 0x8d8c8018, 0x304200ff,
6806         0x24420008, 0x000220c3, 0x24020001, 0x3c010001, 0xa0220f60, 0x0124102b,
6807         0x1040000c, 0x00003821, 0x24a6000e, 0x01602821, 0x8ca20000, 0x8ca30004,
6808         0x24a50008, 0x24e70001, 0xacc20000, 0xacc30004, 0x00e4102b, 0x1440fff8,
6809         0x24c60008, 0x00003821, 0x3c080001, 0x25080f7b, 0x91060000, 0x3c020001,
6810         0x90420f7c, 0x2503000d, 0x00c32821, 0x00461023, 0x00021fc2, 0x00431021,
6811         0x00021043, 0x1840000c, 0x00002021, 0x91020001, 0x00461023, 0x00021fc2,
6812         0x00431021, 0x00021843, 0x94a20000, 0x24e70001, 0x00822021, 0x00e3102a,
6813         0x1440fffb, 0x24a50002, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402,
6814         0x00822021, 0x3c02ffff, 0x01821024, 0x3083ffff, 0x00431025, 0x3c010001,
6815         0x080040fa, 0xac220f80, 0x3c050001, 0x24a50f7c, 0x90a20000, 0x3c0c0001,
6816         0x01836021, 0x8d8c8018, 0x000220c2, 0x1080000e, 0x00003821, 0x01603021,
6817         0x24a5000c, 0x8ca20000, 0x8ca30004, 0x24a50008, 0x24e70001, 0xacc20000,
6818         0xacc30004, 0x00e4102b, 0x1440fff8, 0x24c60008, 0x3c050001, 0x24a50f7c,
6819         0x90a20000, 0x30430007, 0x24020004, 0x10620011, 0x28620005, 0x10400005,
6820         0x24020002, 0x10620008, 0x000710c0, 0x080040fa, 0x00000000, 0x24020006,
6821         0x1062000e, 0x000710c0, 0x080040fa, 0x00000000, 0x00a21821, 0x9463000c,
6822         0x004b1021, 0x080040fa, 0xa4430000, 0x000710c0, 0x00a21821, 0x8c63000c,
6823         0x004b1021, 0x080040fa, 0xac430000, 0x00a21821, 0x8c63000c, 0x004b2021,
6824         0x00a21021, 0xac830000, 0x94420010, 0xa4820004, 0x95e70006, 0x3c020001,
6825         0x90420f7c, 0x3c030001, 0x90630f7a, 0x00e2c823, 0x3c020001, 0x90420f7b,
6826         0x24630028, 0x01e34021, 0x24420028, 0x15200012, 0x01e23021, 0x94c2000c,
6827         0x3c010001, 0xa4220f78, 0x94c20004, 0x94c30006, 0x3c010001, 0xa4200f76,
6828         0x3c010001, 0xa4200f72, 0x00021400, 0x00431025, 0x3c010001, 0xac220f6c,
6829         0x95020004, 0x3c010001, 0x08004124, 0xa4220f70, 0x3c020001, 0x94420f70,
6830         0x3c030001, 0x94630f72, 0x00431021, 0xa5020004, 0x3c020001, 0x94420f6c,
6831         0xa4c20004, 0x3c020001, 0x8c420f6c, 0xa4c20006, 0x3c040001, 0x94840f72,
6832         0x3c020001, 0x94420f70, 0x3c0a0001, 0x954a0f76, 0x00441821, 0x3063ffff,
6833         0x0062182a, 0x24020002, 0x1122000b, 0x00832023, 0x3c030001, 0x94630f78,
6834         0x30620009, 0x10400006, 0x3062fff6, 0xa4c2000c, 0x3c020001, 0x94420f78,
6835         0x30420009, 0x01425023, 0x24020001, 0x1122001b, 0x29220002, 0x50400005,
6836         0x24020002, 0x11200007, 0x31a2ffff, 0x08004197, 0x00000000, 0x1122001d,
6837         0x24020016, 0x08004197, 0x31a2ffff, 0x3c0e0001, 0x95ce0f80, 0x10800005,
6838         0x01806821, 0x01c42021, 0x00041c02, 0x3082ffff, 0x00627021, 0x000e1027,
6839         0xa502000a, 0x3c030001, 0x90630f7b, 0x31a2ffff, 0x00e21021, 0x0800418d,
6840         0x00432023, 0x3c020001, 0x94420f80, 0x00442021, 0x00041c02, 0x3082ffff,
6841         0x00622021, 0x00807021, 0x00041027, 0x08004185, 0xa502000a, 0x3c050001,
6842         0x24a50f7a, 0x90a30000, 0x14620002, 0x24e2fff2, 0xa5e20034, 0x90a20000,
6843         0x00e21023, 0xa5020002, 0x3c030001, 0x94630f80, 0x3c020001, 0x94420f5a,
6844         0x30e5ffff, 0x00641821, 0x00451023, 0x00622023, 0x00041c02, 0x3082ffff,
6845         0x00622021, 0x00041027, 0xa502000a, 0x3c030001, 0x90630f7c, 0x24620001,
6846         0x14a20005, 0x00807021, 0x01631021, 0x90420000, 0x08004185, 0x00026200,
6847         0x24620002, 0x14a20003, 0x306200fe, 0x004b1021, 0x944c0000, 0x3c020001,
6848         0x94420f82, 0x3183ffff, 0x3c040001, 0x90840f7b, 0x00431021, 0x00e21021,
6849         0x00442023, 0x008a2021, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402,
6850         0x00822021, 0x00806821, 0x00041027, 0xa4c20010, 0x31a2ffff, 0x000e1c00,
6851         0x00431025, 0x3c040001, 0x24840f72, 0xade20010, 0x94820000, 0x3c050001,
6852         0x94a50f76, 0x3c030001, 0x8c630f6c, 0x24420001, 0x00b92821, 0xa4820000,
6853         0x3322ffff, 0x00622021, 0x0083182b, 0x3c010001, 0xa4250f76, 0x10600003,
6854         0x24a2ffff, 0x3c010001, 0xa4220f76, 0x3c024000, 0x03021025, 0x3c010001,
6855         0xac240f6c, 0xaf621008, 0x03e00008, 0x27bd0010, 0x3c030001, 0x90630f56,
6856         0x27bdffe8, 0x24020001, 0xafbf0014, 0x10620026, 0xafb00010, 0x8f620cf4,
6857         0x2442ffff, 0x3042007f, 0x00021100, 0x8c434000, 0x3c010001, 0xac230f64,
6858         0x8c434008, 0x24444000, 0x8c5c4004, 0x30620040, 0x14400002, 0x24020088,
6859         0x24020008, 0x3c010001, 0xa4220f68, 0x30620004, 0x10400005, 0x24020001,
6860         0x3c010001, 0xa0220f57, 0x080041d5, 0x00031402, 0x3c010001, 0xa0200f57,
6861         0x00031402, 0x3c010001, 0xa4220f54, 0x9483000c, 0x24020001, 0x3c010001,
6862         0xa4200f50, 0x3c010001, 0xa0220f56, 0x3c010001, 0xa4230f62, 0x24020001,
6863         0x1342001e, 0x00000000, 0x13400005, 0x24020003, 0x13420067, 0x00000000,
6864         0x080042cf, 0x00000000, 0x3c020001, 0x94420f62, 0x241a0001, 0x3c010001,
6865         0xa4200f5e, 0x3c010001, 0xa4200f52, 0x304407ff, 0x00021bc2, 0x00031823,
6866         0x3063003e, 0x34630036, 0x00021242, 0x3042003c, 0x00621821, 0x3c010001,
6867         0xa4240f58, 0x00832021, 0x24630030, 0x3c010001, 0xa4240f5a, 0x3c010001,
6868         0xa4230f5c, 0x3c060001, 0x24c60f52, 0x94c50000, 0x94c30002, 0x3c040001,
6869         0x94840f5a, 0x00651021, 0x0044102a, 0x10400013, 0x3c108000, 0x00a31021,
6870         0xa4c20000, 0x3c02a000, 0xaf620cf4, 0x3c010001, 0xa0200f56, 0x8f641008,
6871         0x00901024, 0x14400003, 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4,
6872         0x00501024, 0x104000b7, 0x00000000, 0x0800420f, 0x00000000, 0x3c030001,
6873         0x94630f50, 0x00851023, 0xa4c40000, 0x00621821, 0x3042ffff, 0x3c010001,
6874         0xa4230f50, 0xaf620ce8, 0x3c020001, 0x94420f68, 0x34420024, 0xaf620cec,
6875         0x94c30002, 0x3c020001, 0x94420f50, 0x14620012, 0x3c028000, 0x3c108000,
6876         0x3c02a000, 0xaf620cf4, 0x3c010001, 0xa0200f56, 0x8f641008, 0x00901024,
6877         0x14400003, 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4, 0x00501024,
6878         0x1440fff7, 0x00000000, 0x080042cf, 0x241a0003, 0xaf620cf4, 0x3c108000,
6879         0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064, 0x00000000,
6880         0x8f620cf4, 0x00501024, 0x1440fff7, 0x00000000, 0x080042cf, 0x241a0003,
6881         0x3c070001, 0x24e70f50, 0x94e20000, 0x03821021, 0xaf620ce0, 0x3c020001,
6882         0x8c420f64, 0xaf620ce4, 0x3c050001, 0x94a50f54, 0x94e30000, 0x3c040001,
6883         0x94840f58, 0x3c020001, 0x94420f5e, 0x00a32823, 0x00822023, 0x30a6ffff,
6884         0x3083ffff, 0x00c3102b, 0x14400043, 0x00000000, 0x3c020001, 0x94420f5c,
6885         0x00021400, 0x00621025, 0xaf620ce8, 0x94e20000, 0x3c030001, 0x94630f54,
6886         0x00441021, 0xa4e20000, 0x3042ffff, 0x14430021, 0x3c020008, 0x3c020001,
6887         0x90420f57, 0x10400006, 0x3c03000c, 0x3c020001, 0x94420f68, 0x34630624,
6888         0x0800427c, 0x0000d021, 0x3c020001, 0x94420f68, 0x3c030008, 0x34630624,
6889         0x00431025, 0xaf620cec, 0x3c108000, 0x3c02a000, 0xaf620cf4, 0x3c010001,
6890         0xa0200f56, 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064,
6891         0x00000000, 0x8f620cf4, 0x00501024, 0x10400015, 0x00000000, 0x08004283,
6892         0x00000000, 0x3c030001, 0x94630f68, 0x34420624, 0x3c108000, 0x00621825,
6893         0x3c028000, 0xaf630cec, 0xaf620cf4, 0x8f641008, 0x00901024, 0x14400003,
6894         0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4, 0x00501024, 0x1440fff7,
6895         0x00000000, 0x3c010001, 0x080042cf, 0xa4200f5e, 0x3c020001, 0x94420f5c,
6896         0x00021400, 0x00c21025, 0xaf620ce8, 0x3c020001, 0x90420f57, 0x10400009,
6897         0x3c03000c, 0x3c020001, 0x94420f68, 0x34630624, 0x0000d021, 0x00431025,
6898         0xaf620cec, 0x080042c1, 0x3c108000, 0x3c020001, 0x94420f68, 0x3c030008,
6899         0x34630604, 0x00431025, 0xaf620cec, 0x3c020001, 0x94420f5e, 0x00451021,
6900         0x3c010001, 0xa4220f5e, 0x3c108000, 0x3c02a000, 0xaf620cf4, 0x3c010001,
6901         0xa0200f56, 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064,
6902         0x00000000, 0x8f620cf4, 0x00501024, 0x1440fff7, 0x00000000, 0x8fbf0014,
6903         0x8fb00010, 0x03e00008, 0x27bd0018, 0x00000000, 0x27bdffe0, 0x3c040001,
6904         0x24840ec0, 0x00002821, 0x00003021, 0x00003821, 0xafbf0018, 0xafa00010,
6905         0x0c004378, 0xafa00014, 0x0000d021, 0x24020130, 0xaf625000, 0x3c010001,
6906         0xa4200f50, 0x3c010001, 0xa0200f57, 0x8fbf0018, 0x03e00008, 0x27bd0020,
6907         0x27bdffe8, 0x3c1bc000, 0xafbf0014, 0xafb00010, 0xaf60680c, 0x8f626804,
6908         0x34420082, 0xaf626804, 0x8f634000, 0x24020b50, 0x3c010001, 0xac220f20,
6909         0x24020b78, 0x3c010001, 0xac220f30, 0x34630002, 0xaf634000, 0x0c004315,
6910         0x00808021, 0x3c010001, 0xa0220f34, 0x304200ff, 0x24030002, 0x14430005,
6911         0x00000000, 0x3c020001, 0x8c420f20, 0x08004308, 0xac5000c0, 0x3c020001,
6912         0x8c420f20, 0xac5000bc, 0x8f624434, 0x8f634438, 0x8f644410, 0x3c010001,
6913         0xac220f28, 0x3c010001, 0xac230f38, 0x3c010001, 0xac240f24, 0x8fbf0014,
6914         0x8fb00010, 0x03e00008, 0x27bd0018, 0x03e00008, 0x24020001, 0x27bdfff8,
6915         0x18800009, 0x00002821, 0x8f63680c, 0x8f62680c, 0x1043fffe, 0x00000000,
6916         0x24a50001, 0x00a4102a, 0x1440fff9, 0x00000000, 0x03e00008, 0x27bd0008,
6917         0x8f634450, 0x3c020001, 0x8c420f28, 0x00031c02, 0x0043102b, 0x14400008,
6918         0x3c038000, 0x3c040001, 0x8c840f38, 0x8f624450, 0x00021c02, 0x0083102b,
6919         0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024, 0x1440fffd,
6920         0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3082ffff, 0x2442e000,
6921         0x2c422001, 0x14400003, 0x3c024000, 0x08004347, 0x2402ffff, 0x00822025,
6922         0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00001021, 0x03e00008,
6923         0x00000000, 0x8f624450, 0x3c030001, 0x8c630f24, 0x08004350, 0x3042ffff,
6924         0x8f624450, 0x3042ffff, 0x0043102b, 0x1440fffc, 0x00000000, 0x03e00008,
6925         0x00000000, 0x27bdffe0, 0x00802821, 0x3c040001, 0x24840ed0, 0x00003021,
6926         0x00003821, 0xafbf0018, 0xafa00010, 0x0c004378, 0xafa00014, 0x0800435f,
6927         0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x3c020001, 0x3442d600,
6928         0x3c030001, 0x3463d600, 0x3c040001, 0x3484ddff, 0x3c010001, 0xac220f40,
6929         0x24020040, 0x3c010001, 0xac220f44, 0x3c010001, 0xac200f3c, 0xac600000,
6930         0x24630004, 0x0083102b, 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000,
6931         0x00804821, 0x8faa0010, 0x3c020001, 0x8c420f3c, 0x3c040001, 0x8c840f44,
6932         0x8fab0014, 0x24430001, 0x0044102b, 0x3c010001, 0xac230f3c, 0x14400003,
6933         0x00004021, 0x3c010001, 0xac200f3c, 0x3c020001, 0x8c420f3c, 0x3c030001,
6934         0x8c630f40, 0x91240000, 0x00021140, 0x00431021, 0x00481021, 0x25080001,
6935         0xa0440000, 0x29020008, 0x1440fff4, 0x25290001, 0x3c020001, 0x8c420f3c,
6936         0x3c030001, 0x8c630f40, 0x8f64680c, 0x00021140, 0x00431021, 0xac440008,
6937         0xac45000c, 0xac460010, 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c,
6938         0x00000000, 0x00000000, 0x00000000,
6939 };
6940
6941 static const u32 tg3Tso5FwRodata[(TG3_TSO5_FW_RODATA_LEN / 4) + 1] = {
6942         0x4d61696e, 0x43707542, 0x00000000, 0x4d61696e, 0x43707541, 0x00000000,
6943         0x00000000, 0x00000000, 0x73746b6f, 0x66666c64, 0x00000000, 0x00000000,
6944         0x73746b6f, 0x66666c64, 0x00000000, 0x00000000, 0x66617461, 0x6c457272,
6945         0x00000000, 0x00000000, 0x00000000,
6946 };
6947
6948 static const u32 tg3Tso5FwData[(TG3_TSO5_FW_DATA_LEN / 4) + 1] = {
6949         0x00000000, 0x73746b6f, 0x66666c64, 0x5f76312e, 0x322e3000, 0x00000000,
6950         0x00000000, 0x00000000, 0x00000000,
6951 };
6952
6953 /* tp->lock is held. */
6954 static int tg3_load_tso_firmware(struct tg3 *tp)
6955 {
6956         struct fw_info info;
6957         unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
6958         int err, i;
6959
6960         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
6961                 return 0;
6962
6963         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
6964                 info.text_base = TG3_TSO5_FW_TEXT_ADDR;
6965                 info.text_len = TG3_TSO5_FW_TEXT_LEN;
6966                 info.text_data = &tg3Tso5FwText[0];
6967                 info.rodata_base = TG3_TSO5_FW_RODATA_ADDR;
6968                 info.rodata_len = TG3_TSO5_FW_RODATA_LEN;
6969                 info.rodata_data = &tg3Tso5FwRodata[0];
6970                 info.data_base = TG3_TSO5_FW_DATA_ADDR;
6971                 info.data_len = TG3_TSO5_FW_DATA_LEN;
6972                 info.data_data = &tg3Tso5FwData[0];
6973                 cpu_base = RX_CPU_BASE;
6974                 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
6975                 cpu_scratch_size = (info.text_len +
6976                                     info.rodata_len +
6977                                     info.data_len +
6978                                     TG3_TSO5_FW_SBSS_LEN +
6979                                     TG3_TSO5_FW_BSS_LEN);
6980         } else {
6981                 info.text_base = TG3_TSO_FW_TEXT_ADDR;
6982                 info.text_len = TG3_TSO_FW_TEXT_LEN;
6983                 info.text_data = &tg3TsoFwText[0];
6984                 info.rodata_base = TG3_TSO_FW_RODATA_ADDR;
6985                 info.rodata_len = TG3_TSO_FW_RODATA_LEN;
6986                 info.rodata_data = &tg3TsoFwRodata[0];
6987                 info.data_base = TG3_TSO_FW_DATA_ADDR;
6988                 info.data_len = TG3_TSO_FW_DATA_LEN;
6989                 info.data_data = &tg3TsoFwData[0];
6990                 cpu_base = TX_CPU_BASE;
6991                 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
6992                 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
6993         }
6994
6995         err = tg3_load_firmware_cpu(tp, cpu_base,
6996                                     cpu_scratch_base, cpu_scratch_size,
6997                                     &info);
6998         if (err)
6999                 return err;
7000
7001         /* Now startup the cpu. */
7002         tw32(cpu_base + CPU_STATE, 0xffffffff);
7003         tw32_f(cpu_base + CPU_PC,    info.text_base);
7004
7005         for (i = 0; i < 5; i++) {
7006                 if (tr32(cpu_base + CPU_PC) == info.text_base)
7007                         break;
7008                 tw32(cpu_base + CPU_STATE, 0xffffffff);
7009                 tw32(cpu_base + CPU_MODE,  CPU_MODE_HALT);
7010                 tw32_f(cpu_base + CPU_PC,    info.text_base);
7011                 udelay(1000);
7012         }
7013         if (i >= 5) {
7014                 printk(KERN_ERR PFX "tg3_load_tso_firmware fails for %s "
7015                        "to set CPU PC, is %08x should be %08x\n",
7016                        tp->dev->name, tr32(cpu_base + CPU_PC),
7017                        info.text_base);
7018                 return -ENODEV;
7019         }
7020         tw32(cpu_base + CPU_STATE, 0xffffffff);
7021         tw32_f(cpu_base + CPU_MODE,  0x00000000);
7022         return 0;
7023 }
7024
7025
7026 static int tg3_set_mac_addr(struct net_device *dev, void *p)
7027 {
7028         struct tg3 *tp = netdev_priv(dev);
7029         struct sockaddr *addr = p;
7030         int err = 0, skip_mac_1 = 0;
7031
7032         if (!is_valid_ether_addr(addr->sa_data))
7033                 return -EINVAL;
7034
7035         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
7036
7037         if (!netif_running(dev))
7038                 return 0;
7039
7040         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
7041                 u32 addr0_high, addr0_low, addr1_high, addr1_low;
7042
7043                 addr0_high = tr32(MAC_ADDR_0_HIGH);
7044                 addr0_low = tr32(MAC_ADDR_0_LOW);
7045                 addr1_high = tr32(MAC_ADDR_1_HIGH);
7046                 addr1_low = tr32(MAC_ADDR_1_LOW);
7047
7048                 /* Skip MAC addr 1 if ASF is using it. */
7049                 if ((addr0_high != addr1_high || addr0_low != addr1_low) &&
7050                     !(addr1_high == 0 && addr1_low == 0))
7051                         skip_mac_1 = 1;
7052         }
7053         spin_lock_bh(&tp->lock);
7054         __tg3_set_mac_addr(tp, skip_mac_1);
7055         spin_unlock_bh(&tp->lock);
7056
7057         return err;
7058 }
7059
7060 /* tp->lock is held. */
7061 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
7062                            dma_addr_t mapping, u32 maxlen_flags,
7063                            u32 nic_addr)
7064 {
7065         tg3_write_mem(tp,
7066                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
7067                       ((u64) mapping >> 32));
7068         tg3_write_mem(tp,
7069                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
7070                       ((u64) mapping & 0xffffffff));
7071         tg3_write_mem(tp,
7072                       (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
7073                        maxlen_flags);
7074
7075         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
7076                 tg3_write_mem(tp,
7077                               (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
7078                               nic_addr);
7079 }
7080
7081 static void __tg3_set_rx_mode(struct net_device *);
7082 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
7083 {
7084         tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
7085         tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
7086         tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
7087         tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
7088         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
7089                 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
7090                 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
7091         }
7092         tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
7093         tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
7094         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
7095                 u32 val = ec->stats_block_coalesce_usecs;
7096
7097                 if (!netif_carrier_ok(tp->dev))
7098                         val = 0;
7099
7100                 tw32(HOSTCC_STAT_COAL_TICKS, val);
7101         }
7102 }
7103
7104 /* tp->lock is held. */
7105 static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
7106 {
7107         u32 val, rdmac_mode;
7108         int i, err, limit;
7109
7110         tg3_disable_ints(tp);
7111
7112         tg3_stop_fw(tp);
7113
7114         tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
7115
7116         if (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) {
7117                 tg3_abort_hw(tp, 1);
7118         }
7119
7120         if (reset_phy &&
7121             !(tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB))
7122                 tg3_phy_reset(tp);
7123
7124         err = tg3_chip_reset(tp);
7125         if (err)
7126                 return err;
7127
7128         tg3_write_sig_legacy(tp, RESET_KIND_INIT);
7129
7130         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
7131                 val = tr32(TG3_CPMU_CTRL);
7132                 val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE);
7133                 tw32(TG3_CPMU_CTRL, val);
7134
7135                 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
7136                 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
7137                 val |= CPMU_LSPD_10MB_MACCLK_6_25;
7138                 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
7139
7140                 val = tr32(TG3_CPMU_LNK_AWARE_PWRMD);
7141                 val &= ~CPMU_LNK_AWARE_MACCLK_MASK;
7142                 val |= CPMU_LNK_AWARE_MACCLK_6_25;
7143                 tw32(TG3_CPMU_LNK_AWARE_PWRMD, val);
7144
7145                 val = tr32(TG3_CPMU_HST_ACC);
7146                 val &= ~CPMU_HST_ACC_MACCLK_MASK;
7147                 val |= CPMU_HST_ACC_MACCLK_6_25;
7148                 tw32(TG3_CPMU_HST_ACC, val);
7149         }
7150
7151         /* This works around an issue with Athlon chipsets on
7152          * B3 tigon3 silicon.  This bit has no effect on any
7153          * other revision.  But do not set this on PCI Express
7154          * chips and don't even touch the clocks if the CPMU is present.
7155          */
7156         if (!(tp->tg3_flags & TG3_FLAG_CPMU_PRESENT)) {
7157                 if (!(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
7158                         tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
7159                 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
7160         }
7161
7162         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
7163             (tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
7164                 val = tr32(TG3PCI_PCISTATE);
7165                 val |= PCISTATE_RETRY_SAME_DMA;
7166                 tw32(TG3PCI_PCISTATE, val);
7167         }
7168
7169         if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
7170                 /* Allow reads and writes to the
7171                  * APE register and memory space.
7172                  */
7173                 val = tr32(TG3PCI_PCISTATE);
7174                 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
7175                        PCISTATE_ALLOW_APE_SHMEM_WR;
7176                 tw32(TG3PCI_PCISTATE, val);
7177         }
7178
7179         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_BX) {
7180                 /* Enable some hw fixes.  */
7181                 val = tr32(TG3PCI_MSI_DATA);
7182                 val |= (1 << 26) | (1 << 28) | (1 << 29);
7183                 tw32(TG3PCI_MSI_DATA, val);
7184         }
7185
7186         /* Descriptor ring init may make accesses to the
7187          * NIC SRAM area to setup the TX descriptors, so we
7188          * can only do this after the hardware has been
7189          * successfully reset.
7190          */
7191         err = tg3_init_rings(tp);
7192         if (err)
7193                 return err;
7194
7195         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 &&
7196             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761) {
7197                 /* This value is determined during the probe time DMA
7198                  * engine test, tg3_test_dma.
7199                  */
7200                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
7201         }
7202
7203         tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
7204                           GRC_MODE_4X_NIC_SEND_RINGS |
7205                           GRC_MODE_NO_TX_PHDR_CSUM |
7206                           GRC_MODE_NO_RX_PHDR_CSUM);
7207         tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
7208
7209         /* Pseudo-header checksum is done by hardware logic and not
7210          * the offload processers, so make the chip do the pseudo-
7211          * header checksums on receive.  For transmit it is more
7212          * convenient to do the pseudo-header checksum in software
7213          * as Linux does that on transmit for us in all cases.
7214          */
7215         tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
7216
7217         tw32(GRC_MODE,
7218              tp->grc_mode |
7219              (GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP));
7220
7221         /* Setup the timer prescalar register.  Clock is always 66Mhz. */
7222         val = tr32(GRC_MISC_CFG);
7223         val &= ~0xff;
7224         val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
7225         tw32(GRC_MISC_CFG, val);
7226
7227         /* Initialize MBUF/DESC pool. */
7228         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
7229                 /* Do nothing.  */
7230         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
7231                 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
7232                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
7233                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
7234                 else
7235                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
7236                 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
7237                 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
7238         }
7239         else if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
7240                 int fw_len;
7241
7242                 fw_len = (TG3_TSO5_FW_TEXT_LEN +
7243                           TG3_TSO5_FW_RODATA_LEN +
7244                           TG3_TSO5_FW_DATA_LEN +
7245                           TG3_TSO5_FW_SBSS_LEN +
7246                           TG3_TSO5_FW_BSS_LEN);
7247                 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
7248                 tw32(BUFMGR_MB_POOL_ADDR,
7249                      NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
7250                 tw32(BUFMGR_MB_POOL_SIZE,
7251                      NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
7252         }
7253
7254         if (tp->dev->mtu <= ETH_DATA_LEN) {
7255                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
7256                      tp->bufmgr_config.mbuf_read_dma_low_water);
7257                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
7258                      tp->bufmgr_config.mbuf_mac_rx_low_water);
7259                 tw32(BUFMGR_MB_HIGH_WATER,
7260                      tp->bufmgr_config.mbuf_high_water);
7261         } else {
7262                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
7263                      tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
7264                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
7265                      tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
7266                 tw32(BUFMGR_MB_HIGH_WATER,
7267                      tp->bufmgr_config.mbuf_high_water_jumbo);
7268         }
7269         tw32(BUFMGR_DMA_LOW_WATER,
7270              tp->bufmgr_config.dma_low_water);
7271         tw32(BUFMGR_DMA_HIGH_WATER,
7272              tp->bufmgr_config.dma_high_water);
7273
7274         tw32(BUFMGR_MODE, BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE);
7275         for (i = 0; i < 2000; i++) {
7276                 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
7277                         break;
7278                 udelay(10);
7279         }
7280         if (i >= 2000) {
7281                 printk(KERN_ERR PFX "tg3_reset_hw cannot enable BUFMGR for %s.\n",
7282                        tp->dev->name);
7283                 return -ENODEV;
7284         }
7285
7286         /* Setup replenish threshold. */
7287         val = tp->rx_pending / 8;
7288         if (val == 0)
7289                 val = 1;
7290         else if (val > tp->rx_std_max_post)
7291                 val = tp->rx_std_max_post;
7292         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
7293                 if (tp->pci_chip_rev_id == CHIPREV_ID_5906_A1)
7294                         tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
7295
7296                 if (val > (TG3_RX_INTERNAL_RING_SZ_5906 / 2))
7297                         val = TG3_RX_INTERNAL_RING_SZ_5906 / 2;
7298         }
7299
7300         tw32(RCVBDI_STD_THRESH, val);
7301
7302         /* Initialize TG3_BDINFO's at:
7303          *  RCVDBDI_STD_BD:     standard eth size rx ring
7304          *  RCVDBDI_JUMBO_BD:   jumbo frame rx ring
7305          *  RCVDBDI_MINI_BD:    small frame rx ring (??? does not work)
7306          *
7307          * like so:
7308          *  TG3_BDINFO_HOST_ADDR:       high/low parts of DMA address of ring
7309          *  TG3_BDINFO_MAXLEN_FLAGS:    (rx max buffer size << 16) |
7310          *                              ring attribute flags
7311          *  TG3_BDINFO_NIC_ADDR:        location of descriptors in nic SRAM
7312          *
7313          * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
7314          * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
7315          *
7316          * The size of each ring is fixed in the firmware, but the location is
7317          * configurable.
7318          */
7319         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
7320              ((u64) tp->rx_std_mapping >> 32));
7321         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
7322              ((u64) tp->rx_std_mapping & 0xffffffff));
7323         tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
7324              NIC_SRAM_RX_BUFFER_DESC);
7325
7326         /* Don't even try to program the JUMBO/MINI buffer descriptor
7327          * configs on 5705.
7328          */
7329         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
7330                 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS,
7331                      RX_STD_MAX_SIZE_5705 << BDINFO_FLAGS_MAXLEN_SHIFT);
7332         } else {
7333                 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS,
7334                      RX_STD_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT);
7335
7336                 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
7337                      BDINFO_FLAGS_DISABLED);
7338
7339                 /* Setup replenish threshold. */
7340                 tw32(RCVBDI_JUMBO_THRESH, tp->rx_jumbo_pending / 8);
7341
7342                 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
7343                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
7344                              ((u64) tp->rx_jumbo_mapping >> 32));
7345                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
7346                              ((u64) tp->rx_jumbo_mapping & 0xffffffff));
7347                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
7348                              RX_JUMBO_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT);
7349                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
7350                              NIC_SRAM_RX_JUMBO_BUFFER_DESC);
7351                 } else {
7352                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
7353                              BDINFO_FLAGS_DISABLED);
7354                 }
7355
7356         }
7357
7358         /* There is only one send ring on 5705/5750, no need to explicitly
7359          * disable the others.
7360          */
7361         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
7362                 /* Clear out send RCB ring in SRAM. */
7363                 for (i = NIC_SRAM_SEND_RCB; i < NIC_SRAM_RCV_RET_RCB; i += TG3_BDINFO_SIZE)
7364                         tg3_write_mem(tp, i + TG3_BDINFO_MAXLEN_FLAGS,
7365                                       BDINFO_FLAGS_DISABLED);
7366         }
7367
7368         tp->tx_prod = 0;
7369         tp->tx_cons = 0;
7370         tw32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0);
7371         tw32_tx_mbox(MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0);
7372
7373         tg3_set_bdinfo(tp, NIC_SRAM_SEND_RCB,
7374                        tp->tx_desc_mapping,
7375                        (TG3_TX_RING_SIZE <<
7376                         BDINFO_FLAGS_MAXLEN_SHIFT),
7377                        NIC_SRAM_TX_BUFFER_DESC);
7378
7379         /* There is only one receive return ring on 5705/5750, no need
7380          * to explicitly disable the others.
7381          */
7382         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
7383                 for (i = NIC_SRAM_RCV_RET_RCB; i < NIC_SRAM_STATS_BLK;
7384                      i += TG3_BDINFO_SIZE) {
7385                         tg3_write_mem(tp, i + TG3_BDINFO_MAXLEN_FLAGS,
7386                                       BDINFO_FLAGS_DISABLED);
7387                 }
7388         }
7389
7390         tp->rx_rcb_ptr = 0;
7391         tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW, 0);
7392
7393         tg3_set_bdinfo(tp, NIC_SRAM_RCV_RET_RCB,
7394                        tp->rx_rcb_mapping,
7395                        (TG3_RX_RCB_RING_SIZE(tp) <<
7396                         BDINFO_FLAGS_MAXLEN_SHIFT),
7397                        0);
7398
7399         tp->rx_std_ptr = tp->rx_pending;
7400         tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
7401                      tp->rx_std_ptr);
7402
7403         tp->rx_jumbo_ptr = (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) ?
7404                                                 tp->rx_jumbo_pending : 0;
7405         tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
7406                      tp->rx_jumbo_ptr);
7407
7408         /* Initialize MAC address and backoff seed. */
7409         __tg3_set_mac_addr(tp, 0);
7410
7411         /* MTU + ethernet header + FCS + optional VLAN tag */
7412         tw32(MAC_RX_MTU_SIZE, tp->dev->mtu + ETH_HLEN + 8);
7413
7414         /* The slot time is changed by tg3_setup_phy if we
7415          * run at gigabit with half duplex.
7416          */
7417         tw32(MAC_TX_LENGTHS,
7418              (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
7419              (6 << TX_LENGTHS_IPG_SHIFT) |
7420              (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
7421
7422         /* Receive rules. */
7423         tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
7424         tw32(RCVLPC_CONFIG, 0x0181);
7425
7426         /* Calculate RDMAC_MODE setting early, we need it to determine
7427          * the RCVLPC_STATE_ENABLE mask.
7428          */
7429         rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
7430                       RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
7431                       RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
7432                       RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
7433                       RDMAC_MODE_LNGREAD_ENAB);
7434
7435         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
7436             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
7437                 rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
7438                               RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
7439                               RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
7440
7441         /* If statement applies to 5705 and 5750 PCI devices only */
7442         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
7443              tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
7444             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)) {
7445                 if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE &&
7446                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
7447                         rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
7448                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
7449                            !(tp->tg3_flags2 & TG3_FLG2_IS_5788)) {
7450                         rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
7451                 }
7452         }
7453
7454         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)
7455                 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
7456
7457         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
7458                 rdmac_mode |= (1 << 27);
7459
7460         /* Receive/send statistics. */
7461         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
7462                 val = tr32(RCVLPC_STATS_ENABLE);
7463                 val &= ~RCVLPC_STATSENAB_DACK_FIX;
7464                 tw32(RCVLPC_STATS_ENABLE, val);
7465         } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
7466                    (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
7467                 val = tr32(RCVLPC_STATS_ENABLE);
7468                 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
7469                 tw32(RCVLPC_STATS_ENABLE, val);
7470         } else {
7471                 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
7472         }
7473         tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
7474         tw32(SNDDATAI_STATSENAB, 0xffffff);
7475         tw32(SNDDATAI_STATSCTRL,
7476              (SNDDATAI_SCTRL_ENABLE |
7477               SNDDATAI_SCTRL_FASTUPD));
7478
7479         /* Setup host coalescing engine. */
7480         tw32(HOSTCC_MODE, 0);
7481         for (i = 0; i < 2000; i++) {
7482                 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
7483                         break;
7484                 udelay(10);
7485         }
7486
7487         __tg3_set_coalesce(tp, &tp->coal);
7488
7489         /* set status block DMA address */
7490         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
7491              ((u64) tp->status_mapping >> 32));
7492         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
7493              ((u64) tp->status_mapping & 0xffffffff));
7494
7495         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
7496                 /* Status/statistics block address.  See tg3_timer,
7497                  * the tg3_periodic_fetch_stats call there, and
7498                  * tg3_get_stats to see how this works for 5705/5750 chips.
7499                  */
7500                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
7501                      ((u64) tp->stats_mapping >> 32));
7502                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
7503                      ((u64) tp->stats_mapping & 0xffffffff));
7504                 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
7505                 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
7506         }
7507
7508         tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
7509
7510         tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
7511         tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
7512         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
7513                 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
7514
7515         /* Clear statistics/status block in chip, and status block in ram. */
7516         for (i = NIC_SRAM_STATS_BLK;
7517              i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
7518              i += sizeof(u32)) {
7519                 tg3_write_mem(tp, i, 0);
7520                 udelay(40);
7521         }
7522         memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
7523
7524         if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
7525                 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
7526                 /* reset to prevent losing 1st rx packet intermittently */
7527                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
7528                 udelay(10);
7529         }
7530
7531         if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
7532                 tp->mac_mode &= MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
7533         else
7534                 tp->mac_mode = 0;
7535         tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
7536                 MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE | MAC_MODE_FHDE_ENABLE;
7537         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
7538             !(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
7539             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
7540                 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
7541         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
7542         udelay(40);
7543
7544         /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
7545          * If TG3_FLG2_IS_NIC is zero, we should read the
7546          * register to preserve the GPIO settings for LOMs. The GPIOs,
7547          * whether used as inputs or outputs, are set by boot code after
7548          * reset.
7549          */
7550         if (!(tp->tg3_flags2 & TG3_FLG2_IS_NIC)) {
7551                 u32 gpio_mask;
7552
7553                 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
7554                             GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
7555                             GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
7556
7557                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
7558                         gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
7559                                      GRC_LCLCTRL_GPIO_OUTPUT3;
7560
7561                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
7562                         gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
7563
7564                 tp->grc_local_ctrl &= ~gpio_mask;
7565                 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
7566
7567                 /* GPIO1 must be driven high for eeprom write protect */
7568                 if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT)
7569                         tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
7570                                                GRC_LCLCTRL_GPIO_OUTPUT1);
7571         }
7572         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
7573         udelay(100);
7574
7575         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0);
7576         tp->last_tag = 0;
7577
7578         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
7579                 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
7580                 udelay(40);
7581         }
7582
7583         val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
7584                WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
7585                WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
7586                WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
7587                WDMAC_MODE_LNGREAD_ENAB);
7588
7589         /* If statement applies to 5705 and 5750 PCI devices only */
7590         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
7591              tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
7592             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
7593                 if ((tp->tg3_flags & TG3_FLG2_TSO_CAPABLE) &&
7594                     (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
7595                      tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
7596                         /* nothing */
7597                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
7598                            !(tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
7599                            !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
7600                         val |= WDMAC_MODE_RX_ACCEL;
7601                 }
7602         }
7603
7604         /* Enable host coalescing bug fix */
7605         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755) ||
7606             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787) ||
7607             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784) ||
7608             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) ||
7609             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785))
7610                 val |= WDMAC_MODE_STATUS_TAG_FIX;
7611
7612         tw32_f(WDMAC_MODE, val);
7613         udelay(40);
7614
7615         if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) {
7616                 u16 pcix_cmd;
7617
7618                 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
7619                                      &pcix_cmd);
7620                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) {
7621                         pcix_cmd &= ~PCI_X_CMD_MAX_READ;
7622                         pcix_cmd |= PCI_X_CMD_READ_2K;
7623                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
7624                         pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ);
7625                         pcix_cmd |= PCI_X_CMD_READ_2K;
7626                 }
7627                 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
7628                                       pcix_cmd);
7629         }
7630
7631         tw32_f(RDMAC_MODE, rdmac_mode);
7632         udelay(40);
7633
7634         tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
7635         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
7636                 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
7637
7638         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
7639                 tw32(SNDDATAC_MODE,
7640                      SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY);
7641         else
7642                 tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
7643
7644         tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
7645         tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
7646         tw32(RCVDBDI_MODE, RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ);
7647         tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
7648         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
7649                 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
7650         tw32(SNDBDI_MODE, SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE);
7651         tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
7652
7653         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
7654                 err = tg3_load_5701_a0_firmware_fix(tp);
7655                 if (err)
7656                         return err;
7657         }
7658
7659         if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
7660                 err = tg3_load_tso_firmware(tp);
7661                 if (err)
7662                         return err;
7663         }
7664
7665         tp->tx_mode = TX_MODE_ENABLE;
7666         tw32_f(MAC_TX_MODE, tp->tx_mode);
7667         udelay(100);
7668
7669         tp->rx_mode = RX_MODE_ENABLE;
7670         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
7671             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
7672             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
7673             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
7674                 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
7675
7676         tw32_f(MAC_RX_MODE, tp->rx_mode);
7677         udelay(10);
7678
7679         tw32(MAC_LED_CTRL, tp->led_ctrl);
7680
7681         tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
7682         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
7683                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
7684                 udelay(10);
7685         }
7686         tw32_f(MAC_RX_MODE, tp->rx_mode);
7687         udelay(10);
7688
7689         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
7690                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) &&
7691                         !(tp->tg3_flags2 & TG3_FLG2_SERDES_PREEMPHASIS)) {
7692                         /* Set drive transmission level to 1.2V  */
7693                         /* only if the signal pre-emphasis bit is not set  */
7694                         val = tr32(MAC_SERDES_CFG);
7695                         val &= 0xfffff000;
7696                         val |= 0x880;
7697                         tw32(MAC_SERDES_CFG, val);
7698                 }
7699                 if (tp->pci_chip_rev_id == CHIPREV_ID_5703_A1)
7700                         tw32(MAC_SERDES_CFG, 0x616000);
7701         }
7702
7703         /* Prevent chip from dropping frames when flow control
7704          * is enabled.
7705          */
7706         tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, 2);
7707
7708         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
7709             (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
7710                 /* Use hardware link auto-negotiation */
7711                 tp->tg3_flags2 |= TG3_FLG2_HW_AUTONEG;
7712         }
7713
7714         if ((tp->tg3_flags2 & TG3_FLG2_MII_SERDES) &&
7715             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)) {
7716                 u32 tmp;
7717
7718                 tmp = tr32(SERDES_RX_CTRL);
7719                 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
7720                 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
7721                 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
7722                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
7723         }
7724
7725         if (!(tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)) {
7726                 if (tp->link_config.phy_is_low_power) {
7727                         tp->link_config.phy_is_low_power = 0;
7728                         tp->link_config.speed = tp->link_config.orig_speed;
7729                         tp->link_config.duplex = tp->link_config.orig_duplex;
7730                         tp->link_config.autoneg = tp->link_config.orig_autoneg;
7731                 }
7732
7733                 err = tg3_setup_phy(tp, 0);
7734                 if (err)
7735                         return err;
7736
7737                 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
7738                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906) {
7739                         u32 tmp;
7740
7741                         /* Clear CRC stats. */
7742                         if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
7743                                 tg3_writephy(tp, MII_TG3_TEST1,
7744                                              tmp | MII_TG3_TEST1_CRC_EN);
7745                                 tg3_readphy(tp, 0x14, &tmp);
7746                         }
7747                 }
7748         }
7749
7750         __tg3_set_rx_mode(tp->dev);
7751
7752         /* Initialize receive rules. */
7753         tw32(MAC_RCV_RULE_0,  0xc2000000 & RCV_RULE_DISABLE_MASK);
7754         tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
7755         tw32(MAC_RCV_RULE_1,  0x86000004 & RCV_RULE_DISABLE_MASK);
7756         tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
7757
7758         if ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
7759             !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
7760                 limit = 8;
7761         else
7762                 limit = 16;
7763         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF)
7764                 limit -= 4;
7765         switch (limit) {
7766         case 16:
7767                 tw32(MAC_RCV_RULE_15,  0); tw32(MAC_RCV_VALUE_15,  0);
7768         case 15:
7769                 tw32(MAC_RCV_RULE_14,  0); tw32(MAC_RCV_VALUE_14,  0);
7770         case 14:
7771                 tw32(MAC_RCV_RULE_13,  0); tw32(MAC_RCV_VALUE_13,  0);
7772         case 13:
7773                 tw32(MAC_RCV_RULE_12,  0); tw32(MAC_RCV_VALUE_12,  0);
7774         case 12:
7775                 tw32(MAC_RCV_RULE_11,  0); tw32(MAC_RCV_VALUE_11,  0);
7776         case 11:
7777                 tw32(MAC_RCV_RULE_10,  0); tw32(MAC_RCV_VALUE_10,  0);
7778         case 10:
7779                 tw32(MAC_RCV_RULE_9,  0); tw32(MAC_RCV_VALUE_9,  0);
7780         case 9:
7781                 tw32(MAC_RCV_RULE_8,  0); tw32(MAC_RCV_VALUE_8,  0);
7782         case 8:
7783                 tw32(MAC_RCV_RULE_7,  0); tw32(MAC_RCV_VALUE_7,  0);
7784         case 7:
7785                 tw32(MAC_RCV_RULE_6,  0); tw32(MAC_RCV_VALUE_6,  0);
7786         case 6:
7787                 tw32(MAC_RCV_RULE_5,  0); tw32(MAC_RCV_VALUE_5,  0);
7788         case 5:
7789                 tw32(MAC_RCV_RULE_4,  0); tw32(MAC_RCV_VALUE_4,  0);
7790         case 4:
7791                 /* tw32(MAC_RCV_RULE_3,  0); tw32(MAC_RCV_VALUE_3,  0); */
7792         case 3:
7793                 /* tw32(MAC_RCV_RULE_2,  0); tw32(MAC_RCV_VALUE_2,  0); */
7794         case 2:
7795         case 1:
7796
7797         default:
7798                 break;
7799         }
7800
7801         if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
7802                 /* Write our heartbeat update interval to APE. */
7803                 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS,
7804                                 APE_HOST_HEARTBEAT_INT_DISABLE);
7805
7806         tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
7807
7808         return 0;
7809 }
7810
7811 /* Called at device open time to get the chip ready for
7812  * packet processing.  Invoked with tp->lock held.
7813  */
7814 static int tg3_init_hw(struct tg3 *tp, int reset_phy)
7815 {
7816         tg3_switch_clocks(tp);
7817
7818         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
7819
7820         return tg3_reset_hw(tp, reset_phy);
7821 }
7822
7823 #define TG3_STAT_ADD32(PSTAT, REG) \
7824 do {    u32 __val = tr32(REG); \
7825         (PSTAT)->low += __val; \
7826         if ((PSTAT)->low < __val) \
7827                 (PSTAT)->high += 1; \
7828 } while (0)
7829
7830 static void tg3_periodic_fetch_stats(struct tg3 *tp)
7831 {
7832         struct tg3_hw_stats *sp = tp->hw_stats;
7833
7834         if (!netif_carrier_ok(tp->dev))
7835                 return;
7836
7837         TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
7838         TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
7839         TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
7840         TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
7841         TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
7842         TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
7843         TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
7844         TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
7845         TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
7846         TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
7847         TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
7848         TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
7849         TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
7850
7851         TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
7852         TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
7853         TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
7854         TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
7855         TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
7856         TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
7857         TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
7858         TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
7859         TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
7860         TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
7861         TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
7862         TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
7863         TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
7864         TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
7865
7866         TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
7867         TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
7868         TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
7869 }
7870
7871 static void tg3_timer(unsigned long __opaque)
7872 {
7873         struct tg3 *tp = (struct tg3 *) __opaque;
7874
7875         if (tp->irq_sync)
7876                 goto restart_timer;
7877
7878         spin_lock(&tp->lock);
7879
7880         if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) {
7881                 /* All of this garbage is because when using non-tagged
7882                  * IRQ status the mailbox/status_block protocol the chip
7883                  * uses with the cpu is race prone.
7884                  */
7885                 if (tp->hw_status->status & SD_STATUS_UPDATED) {
7886                         tw32(GRC_LOCAL_CTRL,
7887                              tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
7888                 } else {
7889                         tw32(HOSTCC_MODE, tp->coalesce_mode |
7890                              (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
7891                 }
7892
7893                 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
7894                         tp->tg3_flags2 |= TG3_FLG2_RESTART_TIMER;
7895                         spin_unlock(&tp->lock);
7896                         schedule_work(&tp->reset_task);
7897                         return;
7898                 }
7899         }
7900
7901         /* This part only runs once per second. */
7902         if (!--tp->timer_counter) {
7903                 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
7904                         tg3_periodic_fetch_stats(tp);
7905
7906                 if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
7907                         u32 mac_stat;
7908                         int phy_event;
7909
7910                         mac_stat = tr32(MAC_STATUS);
7911
7912                         phy_event = 0;
7913                         if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) {
7914                                 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
7915                                         phy_event = 1;
7916                         } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
7917                                 phy_event = 1;
7918
7919                         if (phy_event)
7920                                 tg3_setup_phy(tp, 0);
7921                 } else if (tp->tg3_flags & TG3_FLAG_POLL_SERDES) {
7922                         u32 mac_stat = tr32(MAC_STATUS);
7923                         int need_setup = 0;
7924
7925                         if (netif_carrier_ok(tp->dev) &&
7926                             (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
7927                                 need_setup = 1;
7928                         }
7929                         if (! netif_carrier_ok(tp->dev) &&
7930                             (mac_stat & (MAC_STATUS_PCS_SYNCED |
7931                                          MAC_STATUS_SIGNAL_DET))) {
7932                                 need_setup = 1;
7933                         }
7934                         if (need_setup) {
7935                                 if (!tp->serdes_counter) {
7936                                         tw32_f(MAC_MODE,
7937                                              (tp->mac_mode &
7938                                               ~MAC_MODE_PORT_MODE_MASK));
7939                                         udelay(40);
7940                                         tw32_f(MAC_MODE, tp->mac_mode);
7941                                         udelay(40);
7942                                 }
7943                                 tg3_setup_phy(tp, 0);
7944                         }
7945                 } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
7946                         tg3_serdes_parallel_detect(tp);
7947
7948                 tp->timer_counter = tp->timer_multiplier;
7949         }
7950
7951         /* Heartbeat is only sent once every 2 seconds.
7952          *
7953          * The heartbeat is to tell the ASF firmware that the host
7954          * driver is still alive.  In the event that the OS crashes,
7955          * ASF needs to reset the hardware to free up the FIFO space
7956          * that may be filled with rx packets destined for the host.
7957          * If the FIFO is full, ASF will no longer function properly.
7958          *
7959          * Unintended resets have been reported on real time kernels
7960          * where the timer doesn't run on time.  Netpoll will also have
7961          * same problem.
7962          *
7963          * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
7964          * to check the ring condition when the heartbeat is expiring
7965          * before doing the reset.  This will prevent most unintended
7966          * resets.
7967          */
7968         if (!--tp->asf_counter) {
7969                 if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) &&
7970                     !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)) {
7971                         tg3_wait_for_event_ack(tp);
7972
7973                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
7974                                       FWCMD_NICDRV_ALIVE3);
7975                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
7976                         /* 5 seconds timeout */
7977                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, 5);
7978
7979                         tg3_generate_fw_event(tp);
7980                 }
7981                 tp->asf_counter = tp->asf_multiplier;
7982         }
7983
7984         spin_unlock(&tp->lock);
7985
7986 restart_timer:
7987         tp->timer.expires = jiffies + tp->timer_offset;
7988         add_timer(&tp->timer);
7989 }
7990
7991 static int tg3_request_irq(struct tg3 *tp)
7992 {
7993         irq_handler_t fn;
7994         unsigned long flags;
7995         struct net_device *dev = tp->dev;
7996
7997         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7998                 fn = tg3_msi;
7999                 if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI)
8000                         fn = tg3_msi_1shot;
8001                 flags = IRQF_SAMPLE_RANDOM;
8002         } else {
8003                 fn = tg3_interrupt;
8004                 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
8005                         fn = tg3_interrupt_tagged;
8006                 flags = IRQF_SHARED | IRQF_SAMPLE_RANDOM;
8007         }
8008         return (request_irq(tp->pdev->irq, fn, flags, dev->name, dev));
8009 }
8010
8011 static int tg3_test_interrupt(struct tg3 *tp)
8012 {
8013         struct net_device *dev = tp->dev;
8014         int err, i, intr_ok = 0;
8015
8016         if (!netif_running(dev))
8017                 return -ENODEV;
8018
8019         tg3_disable_ints(tp);
8020
8021         free_irq(tp->pdev->irq, dev);
8022
8023         err = request_irq(tp->pdev->irq, tg3_test_isr,
8024                           IRQF_SHARED | IRQF_SAMPLE_RANDOM, dev->name, dev);
8025         if (err)
8026                 return err;
8027
8028         tp->hw_status->status &= ~SD_STATUS_UPDATED;
8029         tg3_enable_ints(tp);
8030
8031         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
8032                HOSTCC_MODE_NOW);
8033
8034         for (i = 0; i < 5; i++) {
8035                 u32 int_mbox, misc_host_ctrl;
8036
8037                 int_mbox = tr32_mailbox(MAILBOX_INTERRUPT_0 +
8038                                         TG3_64BIT_REG_LOW);
8039                 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
8040
8041                 if ((int_mbox != 0) ||
8042                     (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) {
8043                         intr_ok = 1;
8044                         break;
8045                 }
8046
8047                 msleep(10);
8048         }
8049
8050         tg3_disable_ints(tp);
8051
8052         free_irq(tp->pdev->irq, dev);
8053
8054         err = tg3_request_irq(tp);
8055
8056         if (err)
8057                 return err;
8058
8059         if (intr_ok)
8060                 return 0;
8061
8062         return -EIO;
8063 }
8064
8065 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
8066  * successfully restored
8067  */
8068 static int tg3_test_msi(struct tg3 *tp)
8069 {
8070         struct net_device *dev = tp->dev;
8071         int err;
8072         u16 pci_cmd;
8073
8074         if (!(tp->tg3_flags2 & TG3_FLG2_USING_MSI))
8075                 return 0;
8076
8077         /* Turn off SERR reporting in case MSI terminates with Master
8078          * Abort.
8079          */
8080         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
8081         pci_write_config_word(tp->pdev, PCI_COMMAND,
8082                               pci_cmd & ~PCI_COMMAND_SERR);
8083
8084         err = tg3_test_interrupt(tp);
8085
8086         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
8087
8088         if (!err)
8089                 return 0;
8090
8091         /* other failures */
8092         if (err != -EIO)
8093                 return err;
8094
8095         /* MSI test failed, go back to INTx mode */
8096         printk(KERN_WARNING PFX "%s: No interrupt was generated using MSI, "
8097                "switching to INTx mode. Please report this failure to "
8098                "the PCI maintainer and include system chipset information.\n",
8099                        tp->dev->name);
8100
8101         free_irq(tp->pdev->irq, dev);
8102         pci_disable_msi(tp->pdev);
8103
8104         tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
8105
8106         err = tg3_request_irq(tp);
8107         if (err)
8108                 return err;
8109
8110         /* Need to reset the chip because the MSI cycle may have terminated
8111          * with Master Abort.
8112          */
8113         tg3_full_lock(tp, 1);
8114
8115         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
8116         err = tg3_init_hw(tp, 1);
8117
8118         tg3_full_unlock(tp);
8119
8120         if (err)
8121                 free_irq(tp->pdev->irq, dev);
8122
8123         return err;
8124 }
8125
8126 static int tg3_open(struct net_device *dev)
8127 {
8128         struct tg3 *tp = netdev_priv(dev);
8129         int err;
8130
8131         netif_carrier_off(tp->dev);
8132
8133         err = tg3_set_power_state(tp, PCI_D0);
8134         if (err)
8135                 return err;
8136
8137         tg3_full_lock(tp, 0);
8138
8139         tg3_disable_ints(tp);
8140         tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
8141
8142         tg3_full_unlock(tp);
8143
8144         /* The placement of this call is tied
8145          * to the setup and use of Host TX descriptors.
8146          */
8147         err = tg3_alloc_consistent(tp);
8148         if (err)
8149                 return err;
8150
8151         if (tp->tg3_flags & TG3_FLAG_SUPPORT_MSI) {
8152                 /* All MSI supporting chips should support tagged
8153                  * status.  Assert that this is the case.
8154                  */
8155                 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) {
8156                         printk(KERN_WARNING PFX "%s: MSI without TAGGED? "
8157                                "Not using MSI.\n", tp->dev->name);
8158                 } else if (pci_enable_msi(tp->pdev) == 0) {
8159                         u32 msi_mode;
8160
8161                         msi_mode = tr32(MSGINT_MODE);
8162                         tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
8163                         tp->tg3_flags2 |= TG3_FLG2_USING_MSI;
8164                 }
8165         }
8166         err = tg3_request_irq(tp);
8167
8168         if (err) {
8169                 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
8170                         pci_disable_msi(tp->pdev);
8171                         tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
8172                 }
8173                 tg3_free_consistent(tp);
8174                 return err;
8175         }
8176
8177         napi_enable(&tp->napi);
8178
8179         tg3_full_lock(tp, 0);
8180
8181         err = tg3_init_hw(tp, 1);
8182         if (err) {
8183                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
8184                 tg3_free_rings(tp);
8185         } else {
8186                 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
8187                         tp->timer_offset = HZ;
8188                 else
8189                         tp->timer_offset = HZ / 10;
8190
8191                 BUG_ON(tp->timer_offset > HZ);
8192                 tp->timer_counter = tp->timer_multiplier =
8193                         (HZ / tp->timer_offset);
8194                 tp->asf_counter = tp->asf_multiplier =
8195                         ((HZ / tp->timer_offset) * 2);
8196
8197                 init_timer(&tp->timer);
8198                 tp->timer.expires = jiffies + tp->timer_offset;
8199                 tp->timer.data = (unsigned long) tp;
8200                 tp->timer.function = tg3_timer;
8201         }
8202
8203         tg3_full_unlock(tp);
8204
8205         if (err) {
8206                 napi_disable(&tp->napi);
8207                 free_irq(tp->pdev->irq, dev);
8208                 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
8209                         pci_disable_msi(tp->pdev);
8210                         tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
8211                 }
8212                 tg3_free_consistent(tp);
8213                 return err;
8214         }
8215
8216         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
8217                 err = tg3_test_msi(tp);
8218
8219                 if (err) {
8220                         tg3_full_lock(tp, 0);
8221
8222                         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
8223                                 pci_disable_msi(tp->pdev);
8224                                 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
8225                         }
8226                         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
8227                         tg3_free_rings(tp);
8228                         tg3_free_consistent(tp);
8229
8230                         tg3_full_unlock(tp);
8231
8232                         napi_disable(&tp->napi);
8233
8234                         return err;
8235                 }
8236
8237                 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
8238                         if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI) {
8239                                 u32 val = tr32(PCIE_TRANSACTION_CFG);
8240
8241                                 tw32(PCIE_TRANSACTION_CFG,
8242                                      val | PCIE_TRANS_CFG_1SHOT_MSI);
8243                         }
8244                 }
8245         }
8246
8247         tg3_phy_start(tp);
8248
8249         tg3_full_lock(tp, 0);
8250
8251         add_timer(&tp->timer);
8252         tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
8253         tg3_enable_ints(tp);
8254
8255         tg3_full_unlock(tp);
8256
8257         netif_start_queue(dev);
8258
8259         return 0;
8260 }
8261
8262 #if 0
8263 /*static*/ void tg3_dump_state(struct tg3 *tp)
8264 {
8265         u32 val32, val32_2, val32_3, val32_4, val32_5;
8266         u16 val16;
8267         int i;
8268
8269         pci_read_config_word(tp->pdev, PCI_STATUS, &val16);
8270         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE, &val32);
8271         printk("DEBUG: PCI status [%04x] TG3PCI state[%08x]\n",
8272                val16, val32);
8273
8274         /* MAC block */
8275         printk("DEBUG: MAC_MODE[%08x] MAC_STATUS[%08x]\n",
8276                tr32(MAC_MODE), tr32(MAC_STATUS));
8277         printk("       MAC_EVENT[%08x] MAC_LED_CTRL[%08x]\n",
8278                tr32(MAC_EVENT), tr32(MAC_LED_CTRL));
8279         printk("DEBUG: MAC_TX_MODE[%08x] MAC_TX_STATUS[%08x]\n",
8280                tr32(MAC_TX_MODE), tr32(MAC_TX_STATUS));
8281         printk("       MAC_RX_MODE[%08x] MAC_RX_STATUS[%08x]\n",
8282                tr32(MAC_RX_MODE), tr32(MAC_RX_STATUS));
8283
8284         /* Send data initiator control block */
8285         printk("DEBUG: SNDDATAI_MODE[%08x] SNDDATAI_STATUS[%08x]\n",
8286                tr32(SNDDATAI_MODE), tr32(SNDDATAI_STATUS));
8287         printk("       SNDDATAI_STATSCTRL[%08x]\n",
8288                tr32(SNDDATAI_STATSCTRL));
8289
8290         /* Send data completion control block */
8291         printk("DEBUG: SNDDATAC_MODE[%08x]\n", tr32(SNDDATAC_MODE));
8292
8293         /* Send BD ring selector block */
8294         printk("DEBUG: SNDBDS_MODE[%08x] SNDBDS_STATUS[%08x]\n",
8295                tr32(SNDBDS_MODE), tr32(SNDBDS_STATUS));
8296
8297         /* Send BD initiator control block */
8298         printk("DEBUG: SNDBDI_MODE[%08x] SNDBDI_STATUS[%08x]\n",
8299                tr32(SNDBDI_MODE), tr32(SNDBDI_STATUS));
8300
8301         /* Send BD completion control block */
8302         printk("DEBUG: SNDBDC_MODE[%08x]\n", tr32(SNDBDC_MODE));
8303
8304         /* Receive list placement control block */
8305         printk("DEBUG: RCVLPC_MODE[%08x] RCVLPC_STATUS[%08x]\n",
8306                tr32(RCVLPC_MODE), tr32(RCVLPC_STATUS));
8307         printk("       RCVLPC_STATSCTRL[%08x]\n",
8308                tr32(RCVLPC_STATSCTRL));
8309
8310         /* Receive data and receive BD initiator control block */
8311         printk("DEBUG: RCVDBDI_MODE[%08x] RCVDBDI_STATUS[%08x]\n",
8312                tr32(RCVDBDI_MODE), tr32(RCVDBDI_STATUS));
8313
8314         /* Receive data completion control block */
8315         printk("DEBUG: RCVDCC_MODE[%08x]\n",
8316                tr32(RCVDCC_MODE));
8317
8318         /* Receive BD initiator control block */
8319         printk("DEBUG: RCVBDI_MODE[%08x] RCVBDI_STATUS[%08x]\n",
8320                tr32(RCVBDI_MODE), tr32(RCVBDI_STATUS));
8321
8322         /* Receive BD completion control block */
8323         printk("DEBUG: RCVCC_MODE[%08x] RCVCC_STATUS[%08x]\n",
8324                tr32(RCVCC_MODE), tr32(RCVCC_STATUS));
8325
8326         /* Receive list selector control block */
8327         printk("DEBUG: RCVLSC_MODE[%08x] RCVLSC_STATUS[%08x]\n",
8328                tr32(RCVLSC_MODE), tr32(RCVLSC_STATUS));
8329
8330         /* Mbuf cluster free block */
8331         printk("DEBUG: MBFREE_MODE[%08x] MBFREE_STATUS[%08x]\n",
8332                tr32(MBFREE_MODE), tr32(MBFREE_STATUS));
8333
8334         /* Host coalescing control block */
8335         printk("DEBUG: HOSTCC_MODE[%08x] HOSTCC_STATUS[%08x]\n",
8336                tr32(HOSTCC_MODE), tr32(HOSTCC_STATUS));
8337         printk("DEBUG: HOSTCC_STATS_BLK_HOST_ADDR[%08x%08x]\n",
8338                tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
8339                tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
8340         printk("DEBUG: HOSTCC_STATUS_BLK_HOST_ADDR[%08x%08x]\n",
8341                tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
8342                tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
8343         printk("DEBUG: HOSTCC_STATS_BLK_NIC_ADDR[%08x]\n",
8344                tr32(HOSTCC_STATS_BLK_NIC_ADDR));
8345         printk("DEBUG: HOSTCC_STATUS_BLK_NIC_ADDR[%08x]\n",
8346                tr32(HOSTCC_STATUS_BLK_NIC_ADDR));
8347
8348         /* Memory arbiter control block */
8349         printk("DEBUG: MEMARB_MODE[%08x] MEMARB_STATUS[%08x]\n",
8350                tr32(MEMARB_MODE), tr32(MEMARB_STATUS));
8351
8352         /* Buffer manager control block */
8353         printk("DEBUG: BUFMGR_MODE[%08x] BUFMGR_STATUS[%08x]\n",
8354                tr32(BUFMGR_MODE), tr32(BUFMGR_STATUS));
8355         printk("DEBUG: BUFMGR_MB_POOL_ADDR[%08x] BUFMGR_MB_POOL_SIZE[%08x]\n",
8356                tr32(BUFMGR_MB_POOL_ADDR), tr32(BUFMGR_MB_POOL_SIZE));
8357         printk("DEBUG: BUFMGR_DMA_DESC_POOL_ADDR[%08x] "
8358                "BUFMGR_DMA_DESC_POOL_SIZE[%08x]\n",
8359                tr32(BUFMGR_DMA_DESC_POOL_ADDR),
8360                tr32(BUFMGR_DMA_DESC_POOL_SIZE));
8361
8362         /* Read DMA control block */
8363         printk("DEBUG: RDMAC_MODE[%08x] RDMAC_STATUS[%08x]\n",
8364                tr32(RDMAC_MODE), tr32(RDMAC_STATUS));
8365
8366         /* Write DMA control block */
8367         printk("DEBUG: WDMAC_MODE[%08x] WDMAC_STATUS[%08x]\n",
8368                tr32(WDMAC_MODE), tr32(WDMAC_STATUS));
8369
8370         /* DMA completion block */
8371         printk("DEBUG: DMAC_MODE[%08x]\n",
8372                tr32(DMAC_MODE));
8373
8374         /* GRC block */
8375         printk("DEBUG: GRC_MODE[%08x] GRC_MISC_CFG[%08x]\n",
8376                tr32(GRC_MODE), tr32(GRC_MISC_CFG));
8377         printk("DEBUG: GRC_LOCAL_CTRL[%08x]\n",
8378                tr32(GRC_LOCAL_CTRL));
8379
8380         /* TG3_BDINFOs */
8381         printk("DEBUG: RCVDBDI_JUMBO_BD[%08x%08x:%08x:%08x]\n",
8382                tr32(RCVDBDI_JUMBO_BD + 0x0),
8383                tr32(RCVDBDI_JUMBO_BD + 0x4),
8384                tr32(RCVDBDI_JUMBO_BD + 0x8),
8385                tr32(RCVDBDI_JUMBO_BD + 0xc));
8386         printk("DEBUG: RCVDBDI_STD_BD[%08x%08x:%08x:%08x]\n",
8387                tr32(RCVDBDI_STD_BD + 0x0),
8388                tr32(RCVDBDI_STD_BD + 0x4),
8389                tr32(RCVDBDI_STD_BD + 0x8),
8390                tr32(RCVDBDI_STD_BD + 0xc));
8391         printk("DEBUG: RCVDBDI_MINI_BD[%08x%08x:%08x:%08x]\n",
8392                tr32(RCVDBDI_MINI_BD + 0x0),
8393                tr32(RCVDBDI_MINI_BD + 0x4),
8394                tr32(RCVDBDI_MINI_BD + 0x8),
8395                tr32(RCVDBDI_MINI_BD + 0xc));
8396
8397         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x0, &val32);
8398         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x4, &val32_2);
8399         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x8, &val32_3);
8400         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0xc, &val32_4);
8401         printk("DEBUG: SRAM_SEND_RCB_0[%08x%08x:%08x:%08x]\n",
8402                val32, val32_2, val32_3, val32_4);
8403
8404         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x0, &val32);
8405         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x4, &val32_2);
8406         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x8, &val32_3);
8407         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0xc, &val32_4);
8408         printk("DEBUG: SRAM_RCV_RET_RCB_0[%08x%08x:%08x:%08x]\n",
8409                val32, val32_2, val32_3, val32_4);
8410
8411         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x0, &val32);
8412         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x4, &val32_2);
8413         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x8, &val32_3);
8414         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0xc, &val32_4);
8415         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x10, &val32_5);
8416         printk("DEBUG: SRAM_STATUS_BLK[%08x:%08x:%08x:%08x:%08x]\n",
8417                val32, val32_2, val32_3, val32_4, val32_5);
8418
8419         /* SW status block */
8420         printk("DEBUG: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
8421                tp->hw_status->status,
8422                tp->hw_status->status_tag,
8423                tp->hw_status->rx_jumbo_consumer,
8424                tp->hw_status->rx_consumer,
8425                tp->hw_status->rx_mini_consumer,
8426                tp->hw_status->idx[0].rx_producer,
8427                tp->hw_status->idx[0].tx_consumer);
8428
8429         /* SW statistics block */
8430         printk("DEBUG: Host statistics block [%08x:%08x:%08x:%08x]\n",
8431                ((u32 *)tp->hw_stats)[0],
8432                ((u32 *)tp->hw_stats)[1],
8433                ((u32 *)tp->hw_stats)[2],
8434                ((u32 *)tp->hw_stats)[3]);
8435
8436         /* Mailboxes */
8437         printk("DEBUG: SNDHOST_PROD[%08x%08x] SNDNIC_PROD[%08x%08x]\n",
8438                tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + 0x0),
8439                tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + 0x4),
8440                tr32_mailbox(MAILBOX_SNDNIC_PROD_IDX_0 + 0x0),
8441                tr32_mailbox(MAILBOX_SNDNIC_PROD_IDX_0 + 0x4));
8442
8443         /* NIC side send descriptors. */
8444         for (i = 0; i < 6; i++) {
8445                 unsigned long txd;
8446
8447                 txd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_TX_BUFFER_DESC
8448                         + (i * sizeof(struct tg3_tx_buffer_desc));
8449                 printk("DEBUG: NIC TXD(%d)[%08x:%08x:%08x:%08x]\n",
8450                        i,
8451                        readl(txd + 0x0), readl(txd + 0x4),
8452                        readl(txd + 0x8), readl(txd + 0xc));
8453         }
8454
8455         /* NIC side RX descriptors. */
8456         for (i = 0; i < 6; i++) {
8457                 unsigned long rxd;
8458
8459                 rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_BUFFER_DESC
8460                         + (i * sizeof(struct tg3_rx_buffer_desc));
8461                 printk("DEBUG: NIC RXD_STD(%d)[0][%08x:%08x:%08x:%08x]\n",
8462                        i,
8463                        readl(rxd + 0x0), readl(rxd + 0x4),
8464                        readl(rxd + 0x8), readl(rxd + 0xc));
8465                 rxd += (4 * sizeof(u32));
8466                 printk("DEBUG: NIC RXD_STD(%d)[1][%08x:%08x:%08x:%08x]\n",
8467                        i,
8468                        readl(rxd + 0x0), readl(rxd + 0x4),
8469                        readl(rxd + 0x8), readl(rxd + 0xc));
8470         }
8471
8472         for (i = 0; i < 6; i++) {
8473                 unsigned long rxd;
8474
8475                 rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_JUMBO_BUFFER_DESC
8476                         + (i * sizeof(struct tg3_rx_buffer_desc));
8477                 printk("DEBUG: NIC RXD_JUMBO(%d)[0][%08x:%08x:%08x:%08x]\n",
8478                        i,
8479                        readl(rxd + 0x0), readl(rxd + 0x4),
8480                        readl(rxd + 0x8), readl(rxd + 0xc));
8481                 rxd += (4 * sizeof(u32));
8482                 printk("DEBUG: NIC RXD_JUMBO(%d)[1][%08x:%08x:%08x:%08x]\n",
8483                        i,
8484                        readl(rxd + 0x0), readl(rxd + 0x4),
8485                        readl(rxd + 0x8), readl(rxd + 0xc));
8486         }
8487 }
8488 #endif
8489
8490 static struct net_device_stats *tg3_get_stats(struct net_device *);
8491 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *);
8492
8493 static int tg3_close(struct net_device *dev)
8494 {
8495         struct tg3 *tp = netdev_priv(dev);
8496
8497         napi_disable(&tp->napi);
8498         cancel_work_sync(&tp->reset_task);
8499
8500         netif_stop_queue(dev);
8501
8502         del_timer_sync(&tp->timer);
8503
8504         tg3_full_lock(tp, 1);
8505 #if 0
8506         tg3_dump_state(tp);
8507 #endif
8508
8509         tg3_disable_ints(tp);
8510
8511         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
8512         tg3_free_rings(tp);
8513         tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
8514
8515         tg3_full_unlock(tp);
8516
8517         free_irq(tp->pdev->irq, dev);
8518         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
8519                 pci_disable_msi(tp->pdev);
8520                 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
8521         }
8522
8523         memcpy(&tp->net_stats_prev, tg3_get_stats(tp->dev),
8524                sizeof(tp->net_stats_prev));
8525         memcpy(&tp->estats_prev, tg3_get_estats(tp),
8526                sizeof(tp->estats_prev));
8527
8528         tg3_free_consistent(tp);
8529
8530         tg3_set_power_state(tp, PCI_D3hot);
8531
8532         netif_carrier_off(tp->dev);
8533
8534         return 0;
8535 }
8536
8537 static inline unsigned long get_stat64(tg3_stat64_t *val)
8538 {
8539         unsigned long ret;
8540
8541 #if (BITS_PER_LONG == 32)
8542         ret = val->low;
8543 #else
8544         ret = ((u64)val->high << 32) | ((u64)val->low);
8545 #endif
8546         return ret;
8547 }
8548
8549 static inline u64 get_estat64(tg3_stat64_t *val)
8550 {
8551        return ((u64)val->high << 32) | ((u64)val->low);
8552 }
8553
8554 static unsigned long calc_crc_errors(struct tg3 *tp)
8555 {
8556         struct tg3_hw_stats *hw_stats = tp->hw_stats;
8557
8558         if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
8559             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
8560              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
8561                 u32 val;
8562
8563                 spin_lock_bh(&tp->lock);
8564                 if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) {
8565                         tg3_writephy(tp, MII_TG3_TEST1,
8566                                      val | MII_TG3_TEST1_CRC_EN);
8567                         tg3_readphy(tp, 0x14, &val);
8568                 } else
8569                         val = 0;
8570                 spin_unlock_bh(&tp->lock);
8571
8572                 tp->phy_crc_errors += val;
8573
8574                 return tp->phy_crc_errors;
8575         }
8576
8577         return get_stat64(&hw_stats->rx_fcs_errors);
8578 }
8579
8580 #define ESTAT_ADD(member) \
8581         estats->member =        old_estats->member + \
8582                                 get_estat64(&hw_stats->member)
8583
8584 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *tp)
8585 {
8586         struct tg3_ethtool_stats *estats = &tp->estats;
8587         struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
8588         struct tg3_hw_stats *hw_stats = tp->hw_stats;
8589
8590         if (!hw_stats)
8591                 return old_estats;
8592
8593         ESTAT_ADD(rx_octets);
8594         ESTAT_ADD(rx_fragments);
8595         ESTAT_ADD(rx_ucast_packets);
8596         ESTAT_ADD(rx_mcast_packets);
8597         ESTAT_ADD(rx_bcast_packets);
8598         ESTAT_ADD(rx_fcs_errors);
8599         ESTAT_ADD(rx_align_errors);
8600         ESTAT_ADD(rx_xon_pause_rcvd);
8601         ESTAT_ADD(rx_xoff_pause_rcvd);
8602         ESTAT_ADD(rx_mac_ctrl_rcvd);
8603         ESTAT_ADD(rx_xoff_entered);
8604         ESTAT_ADD(rx_frame_too_long_errors);
8605         ESTAT_ADD(rx_jabbers);
8606         ESTAT_ADD(rx_undersize_packets);
8607         ESTAT_ADD(rx_in_length_errors);
8608         ESTAT_ADD(rx_out_length_errors);
8609         ESTAT_ADD(rx_64_or_less_octet_packets);
8610         ESTAT_ADD(rx_65_to_127_octet_packets);
8611         ESTAT_ADD(rx_128_to_255_octet_packets);
8612         ESTAT_ADD(rx_256_to_511_octet_packets);
8613         ESTAT_ADD(rx_512_to_1023_octet_packets);
8614         ESTAT_ADD(rx_1024_to_1522_octet_packets);
8615         ESTAT_ADD(rx_1523_to_2047_octet_packets);
8616         ESTAT_ADD(rx_2048_to_4095_octet_packets);
8617         ESTAT_ADD(rx_4096_to_8191_octet_packets);
8618         ESTAT_ADD(rx_8192_to_9022_octet_packets);
8619
8620         ESTAT_ADD(tx_octets);
8621         ESTAT_ADD(tx_collisions);
8622         ESTAT_ADD(tx_xon_sent);
8623         ESTAT_ADD(tx_xoff_sent);
8624         ESTAT_ADD(tx_flow_control);
8625         ESTAT_ADD(tx_mac_errors);
8626         ESTAT_ADD(tx_single_collisions);
8627         ESTAT_ADD(tx_mult_collisions);
8628         ESTAT_ADD(tx_deferred);
8629         ESTAT_ADD(tx_excessive_collisions);
8630         ESTAT_ADD(tx_late_collisions);
8631         ESTAT_ADD(tx_collide_2times);
8632         ESTAT_ADD(tx_collide_3times);
8633         ESTAT_ADD(tx_collide_4times);
8634         ESTAT_ADD(tx_collide_5times);
8635         ESTAT_ADD(tx_collide_6times);
8636         ESTAT_ADD(tx_collide_7times);
8637         ESTAT_ADD(tx_collide_8times);
8638         ESTAT_ADD(tx_collide_9times);
8639         ESTAT_ADD(tx_collide_10times);
8640         ESTAT_ADD(tx_collide_11times);
8641         ESTAT_ADD(tx_collide_12times);
8642         ESTAT_ADD(tx_collide_13times);
8643         ESTAT_ADD(tx_collide_14times);
8644         ESTAT_ADD(tx_collide_15times);
8645         ESTAT_ADD(tx_ucast_packets);
8646         ESTAT_ADD(tx_mcast_packets);
8647         ESTAT_ADD(tx_bcast_packets);
8648         ESTAT_ADD(tx_carrier_sense_errors);
8649         ESTAT_ADD(tx_discards);
8650         ESTAT_ADD(tx_errors);
8651
8652         ESTAT_ADD(dma_writeq_full);
8653         ESTAT_ADD(dma_write_prioq_full);
8654         ESTAT_ADD(rxbds_empty);
8655         ESTAT_ADD(rx_discards);
8656         ESTAT_ADD(rx_errors);
8657         ESTAT_ADD(rx_threshold_hit);
8658
8659         ESTAT_ADD(dma_readq_full);
8660         ESTAT_ADD(dma_read_prioq_full);
8661         ESTAT_ADD(tx_comp_queue_full);
8662
8663         ESTAT_ADD(ring_set_send_prod_index);
8664         ESTAT_ADD(ring_status_update);
8665         ESTAT_ADD(nic_irqs);
8666         ESTAT_ADD(nic_avoided_irqs);
8667         ESTAT_ADD(nic_tx_threshold_hit);
8668
8669         return estats;
8670 }
8671
8672 static struct net_device_stats *tg3_get_stats(struct net_device *dev)
8673 {
8674         struct tg3 *tp = netdev_priv(dev);
8675         struct net_device_stats *stats = &tp->net_stats;
8676         struct net_device_stats *old_stats = &tp->net_stats_prev;
8677         struct tg3_hw_stats *hw_stats = tp->hw_stats;
8678
8679         if (!hw_stats)
8680                 return old_stats;
8681
8682         stats->rx_packets = old_stats->rx_packets +
8683                 get_stat64(&hw_stats->rx_ucast_packets) +
8684                 get_stat64(&hw_stats->rx_mcast_packets) +
8685                 get_stat64(&hw_stats->rx_bcast_packets);
8686
8687         stats->tx_packets = old_stats->tx_packets +
8688                 get_stat64(&hw_stats->tx_ucast_packets) +
8689                 get_stat64(&hw_stats->tx_mcast_packets) +
8690                 get_stat64(&hw_stats->tx_bcast_packets);
8691
8692         stats->rx_bytes = old_stats->rx_bytes +
8693                 get_stat64(&hw_stats->rx_octets);
8694         stats->tx_bytes = old_stats->tx_bytes +
8695                 get_stat64(&hw_stats->tx_octets);
8696
8697         stats->rx_errors = old_stats->rx_errors +
8698                 get_stat64(&hw_stats->rx_errors);
8699         stats->tx_errors = old_stats->tx_errors +
8700                 get_stat64(&hw_stats->tx_errors) +
8701                 get_stat64(&hw_stats->tx_mac_errors) +
8702                 get_stat64(&hw_stats->tx_carrier_sense_errors) +
8703                 get_stat64(&hw_stats->tx_discards);
8704
8705         stats->multicast = old_stats->multicast +
8706                 get_stat64(&hw_stats->rx_mcast_packets);
8707         stats->collisions = old_stats->collisions +
8708                 get_stat64(&hw_stats->tx_collisions);
8709
8710         stats->rx_length_errors = old_stats->rx_length_errors +
8711                 get_stat64(&hw_stats->rx_frame_too_long_errors) +
8712                 get_stat64(&hw_stats->rx_undersize_packets);
8713
8714         stats->rx_over_errors = old_stats->rx_over_errors +
8715                 get_stat64(&hw_stats->rxbds_empty);
8716         stats->rx_frame_errors = old_stats->rx_frame_errors +
8717                 get_stat64(&hw_stats->rx_align_errors);
8718         stats->tx_aborted_errors = old_stats->tx_aborted_errors +
8719                 get_stat64(&hw_stats->tx_discards);
8720         stats->tx_carrier_errors = old_stats->tx_carrier_errors +
8721                 get_stat64(&hw_stats->tx_carrier_sense_errors);
8722
8723         stats->rx_crc_errors = old_stats->rx_crc_errors +
8724                 calc_crc_errors(tp);
8725
8726         stats->rx_missed_errors = old_stats->rx_missed_errors +
8727                 get_stat64(&hw_stats->rx_discards);
8728
8729         return stats;
8730 }
8731
8732 static inline u32 calc_crc(unsigned char *buf, int len)
8733 {
8734         u32 reg;
8735         u32 tmp;
8736         int j, k;
8737
8738         reg = 0xffffffff;
8739
8740         for (j = 0; j < len; j++) {
8741                 reg ^= buf[j];
8742
8743                 for (k = 0; k < 8; k++) {
8744                         tmp = reg & 0x01;
8745
8746                         reg >>= 1;
8747
8748                         if (tmp) {
8749                                 reg ^= 0xedb88320;
8750                         }
8751                 }
8752         }
8753
8754         return ~reg;
8755 }
8756
8757 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
8758 {
8759         /* accept or reject all multicast frames */
8760         tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
8761         tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
8762         tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
8763         tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
8764 }
8765
8766 static void __tg3_set_rx_mode(struct net_device *dev)
8767 {
8768         struct tg3 *tp = netdev_priv(dev);
8769         u32 rx_mode;
8770
8771         rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
8772                                   RX_MODE_KEEP_VLAN_TAG);
8773
8774         /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
8775          * flag clear.
8776          */
8777 #if TG3_VLAN_TAG_USED
8778         if (!tp->vlgrp &&
8779             !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
8780                 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
8781 #else
8782         /* By definition, VLAN is disabled always in this
8783          * case.
8784          */
8785         if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
8786                 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
8787 #endif
8788
8789         if (dev->flags & IFF_PROMISC) {
8790                 /* Promiscuous mode. */
8791                 rx_mode |= RX_MODE_PROMISC;
8792         } else if (dev->flags & IFF_ALLMULTI) {
8793                 /* Accept all multicast. */
8794                 tg3_set_multi (tp, 1);
8795         } else if (dev->mc_count < 1) {
8796                 /* Reject all multicast. */
8797                 tg3_set_multi (tp, 0);
8798         } else {
8799                 /* Accept one or more multicast(s). */
8800                 struct dev_mc_list *mclist;
8801                 unsigned int i;
8802                 u32 mc_filter[4] = { 0, };
8803                 u32 regidx;
8804                 u32 bit;
8805                 u32 crc;
8806
8807                 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
8808                      i++, mclist = mclist->next) {
8809
8810                         crc = calc_crc (mclist->dmi_addr, ETH_ALEN);
8811                         bit = ~crc & 0x7f;
8812                         regidx = (bit & 0x60) >> 5;
8813                         bit &= 0x1f;
8814                         mc_filter[regidx] |= (1 << bit);
8815                 }
8816
8817                 tw32(MAC_HASH_REG_0, mc_filter[0]);
8818                 tw32(MAC_HASH_REG_1, mc_filter[1]);
8819                 tw32(MAC_HASH_REG_2, mc_filter[2]);
8820                 tw32(MAC_HASH_REG_3, mc_filter[3]);
8821         }
8822
8823         if (rx_mode != tp->rx_mode) {
8824                 tp->rx_mode = rx_mode;
8825                 tw32_f(MAC_RX_MODE, rx_mode);
8826                 udelay(10);
8827         }
8828 }
8829
8830 static void tg3_set_rx_mode(struct net_device *dev)
8831 {
8832         struct tg3 *tp = netdev_priv(dev);
8833
8834         if (!netif_running(dev))
8835                 return;
8836
8837         tg3_full_lock(tp, 0);
8838         __tg3_set_rx_mode(dev);
8839         tg3_full_unlock(tp);
8840 }
8841
8842 #define TG3_REGDUMP_LEN         (32 * 1024)
8843
8844 static int tg3_get_regs_len(struct net_device *dev)
8845 {
8846         return TG3_REGDUMP_LEN;
8847 }
8848
8849 static void tg3_get_regs(struct net_device *dev,
8850                 struct ethtool_regs *regs, void *_p)
8851 {
8852         u32 *p = _p;
8853         struct tg3 *tp = netdev_priv(dev);
8854         u8 *orig_p = _p;
8855         int i;
8856
8857         regs->version = 0;
8858
8859         memset(p, 0, TG3_REGDUMP_LEN);
8860
8861         if (tp->link_config.phy_is_low_power)
8862                 return;
8863
8864         tg3_full_lock(tp, 0);
8865
8866 #define __GET_REG32(reg)        (*(p)++ = tr32(reg))
8867 #define GET_REG32_LOOP(base,len)                \
8868 do {    p = (u32 *)(orig_p + (base));           \
8869         for (i = 0; i < len; i += 4)            \
8870                 __GET_REG32((base) + i);        \
8871 } while (0)
8872 #define GET_REG32_1(reg)                        \
8873 do {    p = (u32 *)(orig_p + (reg));            \
8874         __GET_REG32((reg));                     \
8875 } while (0)
8876
8877         GET_REG32_LOOP(TG3PCI_VENDOR, 0xb0);
8878         GET_REG32_LOOP(MAILBOX_INTERRUPT_0, 0x200);
8879         GET_REG32_LOOP(MAC_MODE, 0x4f0);
8880         GET_REG32_LOOP(SNDDATAI_MODE, 0xe0);
8881         GET_REG32_1(SNDDATAC_MODE);
8882         GET_REG32_LOOP(SNDBDS_MODE, 0x80);
8883         GET_REG32_LOOP(SNDBDI_MODE, 0x48);
8884         GET_REG32_1(SNDBDC_MODE);
8885         GET_REG32_LOOP(RCVLPC_MODE, 0x20);
8886         GET_REG32_LOOP(RCVLPC_SELLST_BASE, 0x15c);
8887         GET_REG32_LOOP(RCVDBDI_MODE, 0x0c);
8888         GET_REG32_LOOP(RCVDBDI_JUMBO_BD, 0x3c);
8889         GET_REG32_LOOP(RCVDBDI_BD_PROD_IDX_0, 0x44);
8890         GET_REG32_1(RCVDCC_MODE);
8891         GET_REG32_LOOP(RCVBDI_MODE, 0x20);
8892         GET_REG32_LOOP(RCVCC_MODE, 0x14);
8893         GET_REG32_LOOP(RCVLSC_MODE, 0x08);
8894         GET_REG32_1(MBFREE_MODE);
8895         GET_REG32_LOOP(HOSTCC_MODE, 0x100);
8896         GET_REG32_LOOP(MEMARB_MODE, 0x10);
8897         GET_REG32_LOOP(BUFMGR_MODE, 0x58);
8898         GET_REG32_LOOP(RDMAC_MODE, 0x08);
8899         GET_REG32_LOOP(WDMAC_MODE, 0x08);
8900         GET_REG32_1(RX_CPU_MODE);
8901         GET_REG32_1(RX_CPU_STATE);
8902         GET_REG32_1(RX_CPU_PGMCTR);
8903         GET_REG32_1(RX_CPU_HWBKPT);
8904         GET_REG32_1(TX_CPU_MODE);
8905         GET_REG32_1(TX_CPU_STATE);
8906         GET_REG32_1(TX_CPU_PGMCTR);
8907         GET_REG32_LOOP(GRCMBOX_INTERRUPT_0, 0x110);
8908         GET_REG32_LOOP(FTQ_RESET, 0x120);
8909         GET_REG32_LOOP(MSGINT_MODE, 0x0c);
8910         GET_REG32_1(DMAC_MODE);
8911         GET_REG32_LOOP(GRC_MODE, 0x4c);
8912         if (tp->tg3_flags & TG3_FLAG_NVRAM)
8913                 GET_REG32_LOOP(NVRAM_CMD, 0x24);
8914
8915 #undef __GET_REG32
8916 #undef GET_REG32_LOOP
8917 #undef GET_REG32_1
8918
8919         tg3_full_unlock(tp);
8920 }
8921
8922 static int tg3_get_eeprom_len(struct net_device *dev)
8923 {
8924         struct tg3 *tp = netdev_priv(dev);
8925
8926         return tp->nvram_size;
8927 }
8928
8929 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val);
8930 static int tg3_nvram_read_le(struct tg3 *tp, u32 offset, __le32 *val);
8931 static int tg3_nvram_read_swab(struct tg3 *tp, u32 offset, u32 *val);
8932
8933 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
8934 {
8935         struct tg3 *tp = netdev_priv(dev);
8936         int ret;
8937         u8  *pd;
8938         u32 i, offset, len, b_offset, b_count;
8939         __le32 val;
8940
8941         if (tp->link_config.phy_is_low_power)
8942                 return -EAGAIN;
8943
8944         offset = eeprom->offset;
8945         len = eeprom->len;
8946         eeprom->len = 0;
8947
8948         eeprom->magic = TG3_EEPROM_MAGIC;
8949
8950         if (offset & 3) {
8951                 /* adjustments to start on required 4 byte boundary */
8952                 b_offset = offset & 3;
8953                 b_count = 4 - b_offset;
8954                 if (b_count > len) {
8955                         /* i.e. offset=1 len=2 */
8956                         b_count = len;
8957                 }
8958                 ret = tg3_nvram_read_le(tp, offset-b_offset, &val);
8959                 if (ret)
8960                         return ret;
8961                 memcpy(data, ((char*)&val) + b_offset, b_count);
8962                 len -= b_count;
8963                 offset += b_count;
8964                 eeprom->len += b_count;
8965         }
8966
8967         /* read bytes upto the last 4 byte boundary */
8968         pd = &data[eeprom->len];
8969         for (i = 0; i < (len - (len & 3)); i += 4) {
8970                 ret = tg3_nvram_read_le(tp, offset + i, &val);
8971                 if (ret) {
8972                         eeprom->len += i;
8973                         return ret;
8974                 }
8975                 memcpy(pd + i, &val, 4);
8976         }
8977         eeprom->len += i;
8978
8979         if (len & 3) {
8980                 /* read last bytes not ending on 4 byte boundary */
8981                 pd = &data[eeprom->len];
8982                 b_count = len & 3;
8983                 b_offset = offset + len - b_count;
8984                 ret = tg3_nvram_read_le(tp, b_offset, &val);
8985                 if (ret)
8986                         return ret;
8987                 memcpy(pd, &val, b_count);
8988                 eeprom->len += b_count;
8989         }
8990         return 0;
8991 }
8992
8993 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf);
8994
8995 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
8996 {
8997         struct tg3 *tp = netdev_priv(dev);
8998         int ret;
8999         u32 offset, len, b_offset, odd_len;
9000         u8 *buf;
9001         __le32 start, end;
9002
9003         if (tp->link_config.phy_is_low_power)
9004                 return -EAGAIN;
9005
9006         if (eeprom->magic != TG3_EEPROM_MAGIC)
9007                 return -EINVAL;
9008
9009         offset = eeprom->offset;
9010         len = eeprom->len;
9011
9012         if ((b_offset = (offset & 3))) {
9013                 /* adjustments to start on required 4 byte boundary */
9014                 ret = tg3_nvram_read_le(tp, offset-b_offset, &start);
9015                 if (ret)
9016                         return ret;
9017                 len += b_offset;
9018                 offset &= ~3;
9019                 if (len < 4)
9020                         len = 4;
9021         }
9022
9023         odd_len = 0;
9024         if (len & 3) {
9025                 /* adjustments to end on required 4 byte boundary */
9026                 odd_len = 1;
9027                 len = (len + 3) & ~3;
9028                 ret = tg3_nvram_read_le(tp, offset+len-4, &end);
9029                 if (ret)
9030                         return ret;
9031         }
9032
9033         buf = data;
9034         if (b_offset || odd_len) {
9035                 buf = kmalloc(len, GFP_KERNEL);
9036                 if (!buf)
9037                         return -ENOMEM;
9038                 if (b_offset)
9039                         memcpy(buf, &start, 4);
9040                 if (odd_len)
9041                         memcpy(buf+len-4, &end, 4);
9042                 memcpy(buf + b_offset, data, eeprom->len);
9043         }
9044
9045         ret = tg3_nvram_write_block(tp, offset, len, buf);
9046
9047         if (buf != data)
9048                 kfree(buf);
9049
9050         return ret;
9051 }
9052
9053 static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
9054 {
9055         struct tg3 *tp = netdev_priv(dev);
9056
9057         if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
9058                 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
9059                         return -EAGAIN;
9060                 return phy_ethtool_gset(tp->mdio_bus->phy_map[PHY_ADDR], cmd);
9061         }
9062
9063         cmd->supported = (SUPPORTED_Autoneg);
9064
9065         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
9066                 cmd->supported |= (SUPPORTED_1000baseT_Half |
9067                                    SUPPORTED_1000baseT_Full);
9068
9069         if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)) {
9070                 cmd->supported |= (SUPPORTED_100baseT_Half |
9071                                   SUPPORTED_100baseT_Full |
9072                                   SUPPORTED_10baseT_Half |
9073                                   SUPPORTED_10baseT_Full |
9074                                   SUPPORTED_TP);
9075                 cmd->port = PORT_TP;
9076         } else {
9077                 cmd->supported |= SUPPORTED_FIBRE;
9078                 cmd->port = PORT_FIBRE;
9079         }
9080
9081         cmd->advertising = tp->link_config.advertising;
9082         if (netif_running(dev)) {
9083                 cmd->speed = tp->link_config.active_speed;
9084                 cmd->duplex = tp->link_config.active_duplex;
9085         }
9086         cmd->phy_address = PHY_ADDR;
9087         cmd->transceiver = 0;
9088         cmd->autoneg = tp->link_config.autoneg;
9089         cmd->maxtxpkt = 0;
9090         cmd->maxrxpkt = 0;
9091         return 0;
9092 }
9093
9094 static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
9095 {
9096         struct tg3 *tp = netdev_priv(dev);
9097
9098         if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
9099                 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
9100                         return -EAGAIN;
9101                 return phy_ethtool_sset(tp->mdio_bus->phy_map[PHY_ADDR], cmd);
9102         }
9103
9104         if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) {
9105                 /* These are the only valid advertisement bits allowed.  */
9106                 if (cmd->autoneg == AUTONEG_ENABLE &&
9107                     (cmd->advertising & ~(ADVERTISED_1000baseT_Half |
9108                                           ADVERTISED_1000baseT_Full |
9109                                           ADVERTISED_Autoneg |
9110                                           ADVERTISED_FIBRE)))
9111                         return -EINVAL;
9112                 /* Fiber can only do SPEED_1000.  */
9113                 else if ((cmd->autoneg != AUTONEG_ENABLE) &&
9114                          (cmd->speed != SPEED_1000))
9115                         return -EINVAL;
9116         /* Copper cannot force SPEED_1000.  */
9117         } else if ((cmd->autoneg != AUTONEG_ENABLE) &&
9118                    (cmd->speed == SPEED_1000))
9119                 return -EINVAL;
9120         else if ((cmd->speed == SPEED_1000) &&
9121                  (tp->tg3_flags & TG3_FLAG_10_100_ONLY))
9122                 return -EINVAL;
9123
9124         tg3_full_lock(tp, 0);
9125
9126         tp->link_config.autoneg = cmd->autoneg;
9127         if (cmd->autoneg == AUTONEG_ENABLE) {
9128                 tp->link_config.advertising = (cmd->advertising |
9129                                               ADVERTISED_Autoneg);
9130                 tp->link_config.speed = SPEED_INVALID;
9131                 tp->link_config.duplex = DUPLEX_INVALID;
9132         } else {
9133                 tp->link_config.advertising = 0;
9134                 tp->link_config.speed = cmd->speed;
9135                 tp->link_config.duplex = cmd->duplex;
9136         }
9137
9138         tp->link_config.orig_speed = tp->link_config.speed;
9139         tp->link_config.orig_duplex = tp->link_config.duplex;
9140         tp->link_config.orig_autoneg = tp->link_config.autoneg;
9141
9142         if (netif_running(dev))
9143                 tg3_setup_phy(tp, 1);
9144
9145         tg3_full_unlock(tp);
9146
9147         return 0;
9148 }
9149
9150 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
9151 {
9152         struct tg3 *tp = netdev_priv(dev);
9153
9154         strcpy(info->driver, DRV_MODULE_NAME);
9155         strcpy(info->version, DRV_MODULE_VERSION);
9156         strcpy(info->fw_version, tp->fw_ver);
9157         strcpy(info->bus_info, pci_name(tp->pdev));
9158 }
9159
9160 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
9161 {
9162         struct tg3 *tp = netdev_priv(dev);
9163
9164         if ((tp->tg3_flags & TG3_FLAG_WOL_CAP) &&
9165             device_can_wakeup(&tp->pdev->dev))
9166                 wol->supported = WAKE_MAGIC;
9167         else
9168                 wol->supported = 0;
9169         wol->wolopts = 0;
9170         if ((tp->tg3_flags & TG3_FLAG_WOL_ENABLE) &&
9171             device_can_wakeup(&tp->pdev->dev))
9172                 wol->wolopts = WAKE_MAGIC;
9173         memset(&wol->sopass, 0, sizeof(wol->sopass));
9174 }
9175
9176 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
9177 {
9178         struct tg3 *tp = netdev_priv(dev);
9179         struct device *dp = &tp->pdev->dev;
9180
9181         if (wol->wolopts & ~WAKE_MAGIC)
9182                 return -EINVAL;
9183         if ((wol->wolopts & WAKE_MAGIC) &&
9184             !((tp->tg3_flags & TG3_FLAG_WOL_CAP) && device_can_wakeup(dp)))
9185                 return -EINVAL;
9186
9187         spin_lock_bh(&tp->lock);
9188         if (wol->wolopts & WAKE_MAGIC) {
9189                 tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
9190                 device_set_wakeup_enable(dp, true);
9191         } else {
9192                 tp->tg3_flags &= ~TG3_FLAG_WOL_ENABLE;
9193                 device_set_wakeup_enable(dp, false);
9194         }
9195         spin_unlock_bh(&tp->lock);
9196
9197         return 0;
9198 }
9199
9200 static u32 tg3_get_msglevel(struct net_device *dev)
9201 {
9202         struct tg3 *tp = netdev_priv(dev);
9203         return tp->msg_enable;
9204 }
9205
9206 static void tg3_set_msglevel(struct net_device *dev, u32 value)
9207 {
9208         struct tg3 *tp = netdev_priv(dev);
9209         tp->msg_enable = value;
9210 }
9211
9212 static int tg3_set_tso(struct net_device *dev, u32 value)
9213 {
9214         struct tg3 *tp = netdev_priv(dev);
9215
9216         if (!(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
9217                 if (value)
9218                         return -EINVAL;
9219                 return 0;
9220         }
9221         if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_2) &&
9222             (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906)) {
9223                 if (value) {
9224                         dev->features |= NETIF_F_TSO6;
9225                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
9226                             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
9227                              GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) ||
9228                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
9229                                 dev->features |= NETIF_F_TSO_ECN;
9230                 } else
9231                         dev->features &= ~(NETIF_F_TSO6 | NETIF_F_TSO_ECN);
9232         }
9233         return ethtool_op_set_tso(dev, value);
9234 }
9235
9236 static int tg3_nway_reset(struct net_device *dev)
9237 {
9238         struct tg3 *tp = netdev_priv(dev);
9239         int r;
9240
9241         if (!netif_running(dev))
9242                 return -EAGAIN;
9243
9244         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
9245                 return -EINVAL;
9246
9247         if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
9248                 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
9249                         return -EAGAIN;
9250                 r = phy_start_aneg(tp->mdio_bus->phy_map[PHY_ADDR]);
9251         } else {
9252                 u32 bmcr;
9253
9254                 spin_lock_bh(&tp->lock);
9255                 r = -EINVAL;
9256                 tg3_readphy(tp, MII_BMCR, &bmcr);
9257                 if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
9258                     ((bmcr & BMCR_ANENABLE) ||
9259                      (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT))) {
9260                         tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
9261                                                    BMCR_ANENABLE);
9262                         r = 0;
9263                 }
9264                 spin_unlock_bh(&tp->lock);
9265         }
9266
9267         return r;
9268 }
9269
9270 static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
9271 {
9272         struct tg3 *tp = netdev_priv(dev);
9273
9274         ering->rx_max_pending = TG3_RX_RING_SIZE - 1;
9275         ering->rx_mini_max_pending = 0;
9276         if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE)
9277                 ering->rx_jumbo_max_pending = TG3_RX_JUMBO_RING_SIZE - 1;
9278         else
9279                 ering->rx_jumbo_max_pending = 0;
9280
9281         ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
9282
9283         ering->rx_pending = tp->rx_pending;
9284         ering->rx_mini_pending = 0;
9285         if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE)
9286                 ering->rx_jumbo_pending = tp->rx_jumbo_pending;
9287         else
9288                 ering->rx_jumbo_pending = 0;
9289
9290         ering->tx_pending = tp->tx_pending;
9291 }
9292
9293 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
9294 {
9295         struct tg3 *tp = netdev_priv(dev);
9296         int irq_sync = 0, err = 0;
9297
9298         if ((ering->rx_pending > TG3_RX_RING_SIZE - 1) ||
9299             (ering->rx_jumbo_pending > TG3_RX_JUMBO_RING_SIZE - 1) ||
9300             (ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
9301             (ering->tx_pending <= MAX_SKB_FRAGS) ||
9302             ((tp->tg3_flags2 & TG3_FLG2_TSO_BUG) &&
9303              (ering->tx_pending <= (MAX_SKB_FRAGS * 3))))
9304                 return -EINVAL;
9305
9306         if (netif_running(dev)) {
9307                 tg3_phy_stop(tp);
9308                 tg3_netif_stop(tp);
9309                 irq_sync = 1;
9310         }
9311
9312         tg3_full_lock(tp, irq_sync);
9313
9314         tp->rx_pending = ering->rx_pending;
9315
9316         if ((tp->tg3_flags2 & TG3_FLG2_MAX_RXPEND_64) &&
9317             tp->rx_pending > 63)
9318                 tp->rx_pending = 63;
9319         tp->rx_jumbo_pending = ering->rx_jumbo_pending;
9320         tp->tx_pending = ering->tx_pending;
9321
9322         if (netif_running(dev)) {
9323                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9324                 err = tg3_restart_hw(tp, 1);
9325                 if (!err)
9326                         tg3_netif_start(tp);
9327         }
9328
9329         tg3_full_unlock(tp);
9330
9331         if (irq_sync && !err)
9332                 tg3_phy_start(tp);
9333
9334         return err;
9335 }
9336
9337 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
9338 {
9339         struct tg3 *tp = netdev_priv(dev);
9340
9341         epause->autoneg = (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG) != 0;
9342
9343         if (tp->link_config.active_flowctrl & TG3_FLOW_CTRL_RX)
9344                 epause->rx_pause = 1;
9345         else
9346                 epause->rx_pause = 0;
9347
9348         if (tp->link_config.active_flowctrl & TG3_FLOW_CTRL_TX)
9349                 epause->tx_pause = 1;
9350         else
9351                 epause->tx_pause = 0;
9352 }
9353
9354 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
9355 {
9356         struct tg3 *tp = netdev_priv(dev);
9357         int err = 0;
9358
9359         if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
9360                 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
9361                         return -EAGAIN;
9362
9363                 if (epause->autoneg) {
9364                         u32 newadv;
9365                         struct phy_device *phydev;
9366
9367                         phydev = tp->mdio_bus->phy_map[PHY_ADDR];
9368
9369                         if (epause->rx_pause) {
9370                                 if (epause->tx_pause)
9371                                         newadv = ADVERTISED_Pause;
9372                                 else
9373                                         newadv = ADVERTISED_Pause |
9374                                                  ADVERTISED_Asym_Pause;
9375                         } else if (epause->tx_pause) {
9376                                 newadv = ADVERTISED_Asym_Pause;
9377                         } else
9378                                 newadv = 0;
9379
9380                         if (tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED) {
9381                                 u32 oldadv = phydev->advertising &
9382                                              (ADVERTISED_Pause |
9383                                               ADVERTISED_Asym_Pause);
9384                                 if (oldadv != newadv) {
9385                                         phydev->advertising &=
9386                                                 ~(ADVERTISED_Pause |
9387                                                   ADVERTISED_Asym_Pause);
9388                                         phydev->advertising |= newadv;
9389                                         err = phy_start_aneg(phydev);
9390                                 }
9391                         } else {
9392                                 tp->link_config.advertising &=
9393                                                 ~(ADVERTISED_Pause |
9394                                                   ADVERTISED_Asym_Pause);
9395                                 tp->link_config.advertising |= newadv;
9396                         }
9397                 } else {
9398                         if (epause->rx_pause)
9399                                 tp->link_config.flowctrl |= TG3_FLOW_CTRL_RX;
9400                         else
9401                                 tp->link_config.flowctrl &= ~TG3_FLOW_CTRL_RX;
9402
9403                         if (epause->tx_pause)
9404                                 tp->link_config.flowctrl |= TG3_FLOW_CTRL_TX;
9405                         else
9406                                 tp->link_config.flowctrl &= ~TG3_FLOW_CTRL_TX;
9407
9408                         if (netif_running(dev))
9409                                 tg3_setup_flow_control(tp, 0, 0);
9410                 }
9411         } else {
9412                 int irq_sync = 0;
9413
9414                 if (netif_running(dev)) {
9415                         tg3_netif_stop(tp);
9416                         irq_sync = 1;
9417                 }
9418
9419                 tg3_full_lock(tp, irq_sync);
9420
9421                 if (epause->autoneg)
9422                         tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
9423                 else
9424                         tp->tg3_flags &= ~TG3_FLAG_PAUSE_AUTONEG;
9425                 if (epause->rx_pause)
9426                         tp->link_config.flowctrl |= TG3_FLOW_CTRL_RX;
9427                 else
9428                         tp->link_config.flowctrl &= ~TG3_FLOW_CTRL_RX;
9429                 if (epause->tx_pause)
9430                         tp->link_config.flowctrl |= TG3_FLOW_CTRL_TX;
9431                 else
9432                         tp->link_config.flowctrl &= ~TG3_FLOW_CTRL_TX;
9433
9434                 if (netif_running(dev)) {
9435                         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9436                         err = tg3_restart_hw(tp, 1);
9437                         if (!err)
9438                                 tg3_netif_start(tp);
9439                 }
9440
9441                 tg3_full_unlock(tp);
9442         }
9443
9444         return err;
9445 }
9446
9447 static u32 tg3_get_rx_csum(struct net_device *dev)
9448 {
9449         struct tg3 *tp = netdev_priv(dev);
9450         return (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0;
9451 }
9452
9453 static int tg3_set_rx_csum(struct net_device *dev, u32 data)
9454 {
9455         struct tg3 *tp = netdev_priv(dev);
9456
9457         if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
9458                 if (data != 0)
9459                         return -EINVAL;
9460                 return 0;
9461         }
9462
9463         spin_lock_bh(&tp->lock);
9464         if (data)
9465                 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
9466         else
9467                 tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
9468         spin_unlock_bh(&tp->lock);
9469
9470         return 0;
9471 }
9472
9473 static int tg3_set_tx_csum(struct net_device *dev, u32 data)
9474 {
9475         struct tg3 *tp = netdev_priv(dev);
9476
9477         if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
9478                 if (data != 0)
9479                         return -EINVAL;
9480                 return 0;
9481         }
9482
9483         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
9484             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
9485             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
9486             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
9487             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
9488                 ethtool_op_set_tx_ipv6_csum(dev, data);
9489         else
9490                 ethtool_op_set_tx_csum(dev, data);
9491
9492         return 0;
9493 }
9494
9495 static int tg3_get_sset_count (struct net_device *dev, int sset)
9496 {
9497         switch (sset) {
9498         case ETH_SS_TEST:
9499                 return TG3_NUM_TEST;
9500         case ETH_SS_STATS:
9501                 return TG3_NUM_STATS;
9502         default:
9503                 return -EOPNOTSUPP;
9504         }
9505 }
9506
9507 static void tg3_get_strings (struct net_device *dev, u32 stringset, u8 *buf)
9508 {
9509         switch (stringset) {
9510         case ETH_SS_STATS:
9511                 memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
9512                 break;
9513         case ETH_SS_TEST:
9514                 memcpy(buf, &ethtool_test_keys, sizeof(ethtool_test_keys));
9515                 break;
9516         default:
9517                 WARN_ON(1);     /* we need a WARN() */
9518                 break;
9519         }
9520 }
9521
9522 static int tg3_phys_id(struct net_device *dev, u32 data)
9523 {
9524         struct tg3 *tp = netdev_priv(dev);
9525         int i;
9526
9527         if (!netif_running(tp->dev))
9528                 return -EAGAIN;
9529
9530         if (data == 0)
9531                 data = UINT_MAX / 2;
9532
9533         for (i = 0; i < (data * 2); i++) {
9534                 if ((i % 2) == 0)
9535                         tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
9536                                            LED_CTRL_1000MBPS_ON |
9537                                            LED_CTRL_100MBPS_ON |
9538                                            LED_CTRL_10MBPS_ON |
9539                                            LED_CTRL_TRAFFIC_OVERRIDE |
9540                                            LED_CTRL_TRAFFIC_BLINK |
9541                                            LED_CTRL_TRAFFIC_LED);
9542
9543                 else
9544                         tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
9545                                            LED_CTRL_TRAFFIC_OVERRIDE);
9546
9547                 if (msleep_interruptible(500))
9548                         break;
9549         }
9550         tw32(MAC_LED_CTRL, tp->led_ctrl);
9551         return 0;
9552 }
9553
9554 static void tg3_get_ethtool_stats (struct net_device *dev,
9555                                    struct ethtool_stats *estats, u64 *tmp_stats)
9556 {
9557         struct tg3 *tp = netdev_priv(dev);
9558         memcpy(tmp_stats, tg3_get_estats(tp), sizeof(tp->estats));
9559 }
9560
9561 #define NVRAM_TEST_SIZE 0x100
9562 #define NVRAM_SELFBOOT_FORMAT1_0_SIZE   0x14
9563 #define NVRAM_SELFBOOT_FORMAT1_2_SIZE   0x18
9564 #define NVRAM_SELFBOOT_FORMAT1_3_SIZE   0x1c
9565 #define NVRAM_SELFBOOT_HW_SIZE 0x20
9566 #define NVRAM_SELFBOOT_DATA_SIZE 0x1c
9567
9568 static int tg3_test_nvram(struct tg3 *tp)
9569 {
9570         u32 csum, magic;
9571         __le32 *buf;
9572         int i, j, k, err = 0, size;
9573
9574         if (tg3_nvram_read_swab(tp, 0, &magic) != 0)
9575                 return -EIO;
9576
9577         if (magic == TG3_EEPROM_MAGIC)
9578                 size = NVRAM_TEST_SIZE;
9579         else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) {
9580                 if ((magic & TG3_EEPROM_SB_FORMAT_MASK) ==
9581                     TG3_EEPROM_SB_FORMAT_1) {
9582                         switch (magic & TG3_EEPROM_SB_REVISION_MASK) {
9583                         case TG3_EEPROM_SB_REVISION_0:
9584                                 size = NVRAM_SELFBOOT_FORMAT1_0_SIZE;
9585                                 break;
9586                         case TG3_EEPROM_SB_REVISION_2:
9587                                 size = NVRAM_SELFBOOT_FORMAT1_2_SIZE;
9588                                 break;
9589                         case TG3_EEPROM_SB_REVISION_3:
9590                                 size = NVRAM_SELFBOOT_FORMAT1_3_SIZE;
9591                                 break;
9592                         default:
9593                                 return 0;
9594                         }
9595                 } else
9596                         return 0;
9597         } else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
9598                 size = NVRAM_SELFBOOT_HW_SIZE;
9599         else
9600                 return -EIO;
9601
9602         buf = kmalloc(size, GFP_KERNEL);
9603         if (buf == NULL)
9604                 return -ENOMEM;
9605
9606         err = -EIO;
9607         for (i = 0, j = 0; i < size; i += 4, j++) {
9608                 if ((err = tg3_nvram_read_le(tp, i, &buf[j])) != 0)
9609                         break;
9610         }
9611         if (i < size)
9612                 goto out;
9613
9614         /* Selfboot format */
9615         magic = swab32(le32_to_cpu(buf[0]));
9616         if ((magic & TG3_EEPROM_MAGIC_FW_MSK) ==
9617             TG3_EEPROM_MAGIC_FW) {
9618                 u8 *buf8 = (u8 *) buf, csum8 = 0;
9619
9620                 if ((magic & TG3_EEPROM_SB_REVISION_MASK) ==
9621                     TG3_EEPROM_SB_REVISION_2) {
9622                         /* For rev 2, the csum doesn't include the MBA. */
9623                         for (i = 0; i < TG3_EEPROM_SB_F1R2_MBA_OFF; i++)
9624                                 csum8 += buf8[i];
9625                         for (i = TG3_EEPROM_SB_F1R2_MBA_OFF + 4; i < size; i++)
9626                                 csum8 += buf8[i];
9627                 } else {
9628                         for (i = 0; i < size; i++)
9629                                 csum8 += buf8[i];
9630                 }
9631
9632                 if (csum8 == 0) {
9633                         err = 0;
9634                         goto out;
9635                 }
9636
9637                 err = -EIO;
9638                 goto out;
9639         }
9640
9641         if ((magic & TG3_EEPROM_MAGIC_HW_MSK) ==
9642             TG3_EEPROM_MAGIC_HW) {
9643                 u8 data[NVRAM_SELFBOOT_DATA_SIZE];
9644                 u8 parity[NVRAM_SELFBOOT_DATA_SIZE];
9645                 u8 *buf8 = (u8 *) buf;
9646
9647                 /* Separate the parity bits and the data bytes.  */
9648                 for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) {
9649                         if ((i == 0) || (i == 8)) {
9650                                 int l;
9651                                 u8 msk;
9652
9653                                 for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1)
9654                                         parity[k++] = buf8[i] & msk;
9655                                 i++;
9656                         }
9657                         else if (i == 16) {
9658                                 int l;
9659                                 u8 msk;
9660
9661                                 for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1)
9662                                         parity[k++] = buf8[i] & msk;
9663                                 i++;
9664
9665                                 for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1)
9666                                         parity[k++] = buf8[i] & msk;
9667                                 i++;
9668                         }
9669                         data[j++] = buf8[i];
9670                 }
9671
9672                 err = -EIO;
9673                 for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) {
9674                         u8 hw8 = hweight8(data[i]);
9675
9676                         if ((hw8 & 0x1) && parity[i])
9677                                 goto out;
9678                         else if (!(hw8 & 0x1) && !parity[i])
9679                                 goto out;
9680                 }
9681                 err = 0;
9682                 goto out;
9683         }
9684
9685         /* Bootstrap checksum at offset 0x10 */
9686         csum = calc_crc((unsigned char *) buf, 0x10);
9687         if(csum != le32_to_cpu(buf[0x10/4]))
9688                 goto out;
9689
9690         /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
9691         csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
9692         if (csum != le32_to_cpu(buf[0xfc/4]))
9693                  goto out;
9694
9695         err = 0;
9696
9697 out:
9698         kfree(buf);
9699         return err;
9700 }
9701
9702 #define TG3_SERDES_TIMEOUT_SEC  2
9703 #define TG3_COPPER_TIMEOUT_SEC  6
9704
9705 static int tg3_test_link(struct tg3 *tp)
9706 {
9707         int i, max;
9708
9709         if (!netif_running(tp->dev))
9710                 return -ENODEV;
9711
9712         if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
9713                 max = TG3_SERDES_TIMEOUT_SEC;
9714         else
9715                 max = TG3_COPPER_TIMEOUT_SEC;
9716
9717         for (i = 0; i < max; i++) {
9718                 if (netif_carrier_ok(tp->dev))
9719                         return 0;
9720
9721                 if (msleep_interruptible(1000))
9722                         break;
9723         }
9724
9725         return -EIO;
9726 }
9727
9728 /* Only test the commonly used registers */
9729 static int tg3_test_registers(struct tg3 *tp)
9730 {
9731         int i, is_5705, is_5750;
9732         u32 offset, read_mask, write_mask, val, save_val, read_val;
9733         static struct {
9734                 u16 offset;
9735                 u16 flags;
9736 #define TG3_FL_5705     0x1
9737 #define TG3_FL_NOT_5705 0x2
9738 #define TG3_FL_NOT_5788 0x4
9739 #define TG3_FL_NOT_5750 0x8
9740                 u32 read_mask;
9741                 u32 write_mask;
9742         } reg_tbl[] = {
9743                 /* MAC Control Registers */
9744                 { MAC_MODE, TG3_FL_NOT_5705,
9745                         0x00000000, 0x00ef6f8c },
9746                 { MAC_MODE, TG3_FL_5705,
9747                         0x00000000, 0x01ef6b8c },
9748                 { MAC_STATUS, TG3_FL_NOT_5705,
9749                         0x03800107, 0x00000000 },
9750                 { MAC_STATUS, TG3_FL_5705,
9751                         0x03800100, 0x00000000 },
9752                 { MAC_ADDR_0_HIGH, 0x0000,
9753                         0x00000000, 0x0000ffff },
9754                 { MAC_ADDR_0_LOW, 0x0000,
9755                         0x00000000, 0xffffffff },
9756                 { MAC_RX_MTU_SIZE, 0x0000,
9757                         0x00000000, 0x0000ffff },
9758                 { MAC_TX_MODE, 0x0000,
9759                         0x00000000, 0x00000070 },
9760                 { MAC_TX_LENGTHS, 0x0000,
9761                         0x00000000, 0x00003fff },
9762                 { MAC_RX_MODE, TG3_FL_NOT_5705,
9763                         0x00000000, 0x000007fc },
9764                 { MAC_RX_MODE, TG3_FL_5705,
9765                         0x00000000, 0x000007dc },
9766                 { MAC_HASH_REG_0, 0x0000,
9767                         0x00000000, 0xffffffff },
9768                 { MAC_HASH_REG_1, 0x0000,
9769                         0x00000000, 0xffffffff },
9770                 { MAC_HASH_REG_2, 0x0000,
9771                         0x00000000, 0xffffffff },
9772                 { MAC_HASH_REG_3, 0x0000,
9773                         0x00000000, 0xffffffff },
9774
9775                 /* Receive Data and Receive BD Initiator Control Registers. */
9776                 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
9777                         0x00000000, 0xffffffff },
9778                 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
9779                         0x00000000, 0xffffffff },
9780                 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
9781                         0x00000000, 0x00000003 },
9782                 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
9783                         0x00000000, 0xffffffff },
9784                 { RCVDBDI_STD_BD+0, 0x0000,
9785                         0x00000000, 0xffffffff },
9786                 { RCVDBDI_STD_BD+4, 0x0000,
9787                         0x00000000, 0xffffffff },
9788                 { RCVDBDI_STD_BD+8, 0x0000,
9789                         0x00000000, 0xffff0002 },
9790                 { RCVDBDI_STD_BD+0xc, 0x0000,
9791                         0x00000000, 0xffffffff },
9792
9793                 /* Receive BD Initiator Control Registers. */
9794                 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
9795                         0x00000000, 0xffffffff },
9796                 { RCVBDI_STD_THRESH, TG3_FL_5705,
9797                         0x00000000, 0x000003ff },
9798                 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
9799                         0x00000000, 0xffffffff },
9800
9801                 /* Host Coalescing Control Registers. */
9802                 { HOSTCC_MODE, TG3_FL_NOT_5705,
9803                         0x00000000, 0x00000004 },
9804                 { HOSTCC_MODE, TG3_FL_5705,
9805                         0x00000000, 0x000000f6 },
9806                 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
9807                         0x00000000, 0xffffffff },
9808                 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
9809                         0x00000000, 0x000003ff },
9810                 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
9811                         0x00000000, 0xffffffff },
9812                 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
9813                         0x00000000, 0x000003ff },
9814                 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
9815                         0x00000000, 0xffffffff },
9816                 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
9817                         0x00000000, 0x000000ff },
9818                 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
9819                         0x00000000, 0xffffffff },
9820                 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
9821                         0x00000000, 0x000000ff },
9822                 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
9823                         0x00000000, 0xffffffff },
9824                 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
9825                         0x00000000, 0xffffffff },
9826                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
9827                         0x00000000, 0xffffffff },
9828                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
9829                         0x00000000, 0x000000ff },
9830                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
9831                         0x00000000, 0xffffffff },
9832                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
9833                         0x00000000, 0x000000ff },
9834                 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
9835                         0x00000000, 0xffffffff },
9836                 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
9837                         0x00000000, 0xffffffff },
9838                 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
9839                         0x00000000, 0xffffffff },
9840                 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
9841                         0x00000000, 0xffffffff },
9842                 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
9843                         0x00000000, 0xffffffff },
9844                 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
9845                         0xffffffff, 0x00000000 },
9846                 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
9847                         0xffffffff, 0x00000000 },
9848
9849                 /* Buffer Manager Control Registers. */
9850                 { BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750,
9851                         0x00000000, 0x007fff80 },
9852                 { BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750,
9853                         0x00000000, 0x007fffff },
9854                 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
9855                         0x00000000, 0x0000003f },
9856                 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
9857                         0x00000000, 0x000001ff },
9858                 { BUFMGR_MB_HIGH_WATER, 0x0000,
9859                         0x00000000, 0x000001ff },
9860                 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
9861                         0xffffffff, 0x00000000 },
9862                 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
9863                         0xffffffff, 0x00000000 },
9864
9865                 /* Mailbox Registers */
9866                 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
9867                         0x00000000, 0x000001ff },
9868                 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
9869                         0x00000000, 0x000001ff },
9870                 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
9871                         0x00000000, 0x000007ff },
9872                 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
9873                         0x00000000, 0x000001ff },
9874
9875                 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
9876         };
9877
9878         is_5705 = is_5750 = 0;
9879         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
9880                 is_5705 = 1;
9881                 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
9882                         is_5750 = 1;
9883         }
9884
9885         for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
9886                 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
9887                         continue;
9888
9889                 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
9890                         continue;
9891
9892                 if ((tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
9893                     (reg_tbl[i].flags & TG3_FL_NOT_5788))
9894                         continue;
9895
9896                 if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750))
9897                         continue;
9898
9899                 offset = (u32) reg_tbl[i].offset;
9900                 read_mask = reg_tbl[i].read_mask;
9901                 write_mask = reg_tbl[i].write_mask;
9902
9903                 /* Save the original register content */
9904                 save_val = tr32(offset);
9905
9906                 /* Determine the read-only value. */
9907                 read_val = save_val & read_mask;
9908
9909                 /* Write zero to the register, then make sure the read-only bits
9910                  * are not changed and the read/write bits are all zeros.
9911                  */
9912                 tw32(offset, 0);
9913
9914                 val = tr32(offset);
9915
9916                 /* Test the read-only and read/write bits. */
9917                 if (((val & read_mask) != read_val) || (val & write_mask))
9918                         goto out;
9919
9920                 /* Write ones to all the bits defined by RdMask and WrMask, then
9921                  * make sure the read-only bits are not changed and the
9922                  * read/write bits are all ones.
9923                  */
9924                 tw32(offset, read_mask | write_mask);
9925
9926                 val = tr32(offset);
9927
9928                 /* Test the read-only bits. */
9929                 if ((val & read_mask) != read_val)
9930                         goto out;
9931
9932                 /* Test the read/write bits. */
9933                 if ((val & write_mask) != write_mask)
9934                         goto out;
9935
9936                 tw32(offset, save_val);
9937         }
9938
9939         return 0;
9940
9941 out:
9942         if (netif_msg_hw(tp))
9943                 printk(KERN_ERR PFX "Register test failed at offset %x\n",
9944                        offset);
9945         tw32(offset, save_val);
9946         return -EIO;
9947 }
9948
9949 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
9950 {
9951         static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
9952         int i;
9953         u32 j;
9954
9955         for (i = 0; i < ARRAY_SIZE(test_pattern); i++) {
9956                 for (j = 0; j < len; j += 4) {
9957                         u32 val;
9958
9959                         tg3_write_mem(tp, offset + j, test_pattern[i]);
9960                         tg3_read_mem(tp, offset + j, &val);
9961                         if (val != test_pattern[i])
9962                                 return -EIO;
9963                 }
9964         }
9965         return 0;
9966 }
9967
9968 static int tg3_test_memory(struct tg3 *tp)
9969 {
9970         static struct mem_entry {
9971                 u32 offset;
9972                 u32 len;
9973         } mem_tbl_570x[] = {
9974                 { 0x00000000, 0x00b50},
9975                 { 0x00002000, 0x1c000},
9976                 { 0xffffffff, 0x00000}
9977         }, mem_tbl_5705[] = {
9978                 { 0x00000100, 0x0000c},
9979                 { 0x00000200, 0x00008},
9980                 { 0x00004000, 0x00800},
9981                 { 0x00006000, 0x01000},
9982                 { 0x00008000, 0x02000},
9983                 { 0x00010000, 0x0e000},
9984                 { 0xffffffff, 0x00000}
9985         }, mem_tbl_5755[] = {
9986                 { 0x00000200, 0x00008},
9987                 { 0x00004000, 0x00800},
9988                 { 0x00006000, 0x00800},
9989                 { 0x00008000, 0x02000},
9990                 { 0x00010000, 0x0c000},
9991                 { 0xffffffff, 0x00000}
9992         }, mem_tbl_5906[] = {
9993                 { 0x00000200, 0x00008},
9994                 { 0x00004000, 0x00400},
9995                 { 0x00006000, 0x00400},
9996                 { 0x00008000, 0x01000},
9997                 { 0x00010000, 0x01000},
9998                 { 0xffffffff, 0x00000}
9999         };
10000         struct mem_entry *mem_tbl;
10001         int err = 0;
10002         int i;
10003
10004         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
10005                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
10006                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
10007                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
10008                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
10009                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
10010                         mem_tbl = mem_tbl_5755;
10011                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
10012                         mem_tbl = mem_tbl_5906;
10013                 else
10014                         mem_tbl = mem_tbl_5705;
10015         } else
10016                 mem_tbl = mem_tbl_570x;
10017
10018         for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
10019                 if ((err = tg3_do_mem_test(tp, mem_tbl[i].offset,
10020                     mem_tbl[i].len)) != 0)
10021                         break;
10022         }
10023
10024         return err;
10025 }
10026
10027 #define TG3_MAC_LOOPBACK        0
10028 #define TG3_PHY_LOOPBACK        1
10029
10030 static int tg3_run_loopback(struct tg3 *tp, int loopback_mode)
10031 {
10032         u32 mac_mode, rx_start_idx, rx_idx, tx_idx, opaque_key;
10033         u32 desc_idx;
10034         struct sk_buff *skb, *rx_skb;
10035         u8 *tx_data;
10036         dma_addr_t map;
10037         int num_pkts, tx_len, rx_len, i, err;
10038         struct tg3_rx_buffer_desc *desc;
10039
10040         if (loopback_mode == TG3_MAC_LOOPBACK) {
10041                 /* HW errata - mac loopback fails in some cases on 5780.
10042                  * Normal traffic and PHY loopback are not affected by
10043                  * errata.
10044                  */
10045                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780)
10046                         return 0;
10047
10048                 mac_mode = (tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK) |
10049                            MAC_MODE_PORT_INT_LPBACK;
10050                 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
10051                         mac_mode |= MAC_MODE_LINK_POLARITY;
10052                 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
10053                         mac_mode |= MAC_MODE_PORT_MODE_MII;
10054                 else
10055                         mac_mode |= MAC_MODE_PORT_MODE_GMII;
10056                 tw32(MAC_MODE, mac_mode);
10057         } else if (loopback_mode == TG3_PHY_LOOPBACK) {
10058                 u32 val;
10059
10060                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
10061                         u32 phytest;
10062
10063                         if (!tg3_readphy(tp, MII_TG3_EPHY_TEST, &phytest)) {
10064                                 u32 phy;
10065
10066                                 tg3_writephy(tp, MII_TG3_EPHY_TEST,
10067                                              phytest | MII_TG3_EPHY_SHADOW_EN);
10068                                 if (!tg3_readphy(tp, 0x1b, &phy))
10069                                         tg3_writephy(tp, 0x1b, phy & ~0x20);
10070                                 tg3_writephy(tp, MII_TG3_EPHY_TEST, phytest);
10071                         }
10072                         val = BMCR_LOOPBACK | BMCR_FULLDPLX | BMCR_SPEED100;
10073                 } else
10074                         val = BMCR_LOOPBACK | BMCR_FULLDPLX | BMCR_SPEED1000;
10075
10076                 tg3_phy_toggle_automdix(tp, 0);
10077
10078                 tg3_writephy(tp, MII_BMCR, val);
10079                 udelay(40);
10080
10081                 mac_mode = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
10082                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
10083                         tg3_writephy(tp, MII_TG3_EPHY_PTEST, 0x1800);
10084                         mac_mode |= MAC_MODE_PORT_MODE_MII;
10085                 } else
10086                         mac_mode |= MAC_MODE_PORT_MODE_GMII;
10087
10088                 /* reset to prevent losing 1st rx packet intermittently */
10089                 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
10090                         tw32_f(MAC_RX_MODE, RX_MODE_RESET);
10091                         udelay(10);
10092                         tw32_f(MAC_RX_MODE, tp->rx_mode);
10093                 }
10094                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
10095                         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401)
10096                                 mac_mode &= ~MAC_MODE_LINK_POLARITY;
10097                         else if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411)
10098                                 mac_mode |= MAC_MODE_LINK_POLARITY;
10099                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
10100                                      MII_TG3_EXT_CTRL_LNK3_LED_MODE);
10101                 }
10102                 tw32(MAC_MODE, mac_mode);
10103         }
10104         else
10105                 return -EINVAL;
10106
10107         err = -EIO;
10108
10109         tx_len = 1514;
10110         skb = netdev_alloc_skb(tp->dev, tx_len);
10111         if (!skb)
10112                 return -ENOMEM;
10113
10114         tx_data = skb_put(skb, tx_len);
10115         memcpy(tx_data, tp->dev->dev_addr, 6);
10116         memset(tx_data + 6, 0x0, 8);
10117
10118         tw32(MAC_RX_MTU_SIZE, tx_len + 4);
10119
10120         for (i = 14; i < tx_len; i++)
10121                 tx_data[i] = (u8) (i & 0xff);
10122
10123         map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
10124
10125         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
10126              HOSTCC_MODE_NOW);
10127
10128         udelay(10);
10129
10130         rx_start_idx = tp->hw_status->idx[0].rx_producer;
10131
10132         num_pkts = 0;
10133
10134         tg3_set_txd(tp, tp->tx_prod, map, tx_len, 0, 1);
10135
10136         tp->tx_prod++;
10137         num_pkts++;
10138
10139         tw32_tx_mbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW,
10140                      tp->tx_prod);
10141         tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW);
10142
10143         udelay(10);
10144
10145         /* 250 usec to allow enough time on some 10/100 Mbps devices.  */
10146         for (i = 0; i < 25; i++) {
10147                 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
10148                        HOSTCC_MODE_NOW);
10149
10150                 udelay(10);
10151
10152                 tx_idx = tp->hw_status->idx[0].tx_consumer;
10153                 rx_idx = tp->hw_status->idx[0].rx_producer;
10154                 if ((tx_idx == tp->tx_prod) &&
10155                     (rx_idx == (rx_start_idx + num_pkts)))
10156                         break;
10157         }
10158
10159         pci_unmap_single(tp->pdev, map, tx_len, PCI_DMA_TODEVICE);
10160         dev_kfree_skb(skb);
10161
10162         if (tx_idx != tp->tx_prod)
10163                 goto out;
10164
10165         if (rx_idx != rx_start_idx + num_pkts)
10166                 goto out;
10167
10168         desc = &tp->rx_rcb[rx_start_idx];
10169         desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
10170         opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
10171         if (opaque_key != RXD_OPAQUE_RING_STD)
10172                 goto out;
10173
10174         if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
10175             (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
10176                 goto out;
10177
10178         rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4;
10179         if (rx_len != tx_len)
10180                 goto out;
10181
10182         rx_skb = tp->rx_std_buffers[desc_idx].skb;
10183
10184         map = pci_unmap_addr(&tp->rx_std_buffers[desc_idx], mapping);
10185         pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len, PCI_DMA_FROMDEVICE);
10186
10187         for (i = 14; i < tx_len; i++) {
10188                 if (*(rx_skb->data + i) != (u8) (i & 0xff))
10189                         goto out;
10190         }
10191         err = 0;
10192
10193         /* tg3_free_rings will unmap and free the rx_skb */
10194 out:
10195         return err;
10196 }
10197
10198 #define TG3_MAC_LOOPBACK_FAILED         1
10199 #define TG3_PHY_LOOPBACK_FAILED         2
10200 #define TG3_LOOPBACK_FAILED             (TG3_MAC_LOOPBACK_FAILED |      \
10201                                          TG3_PHY_LOOPBACK_FAILED)
10202
10203 static int tg3_test_loopback(struct tg3 *tp)
10204 {
10205         int err = 0;
10206         u32 cpmuctrl = 0;
10207
10208         if (!netif_running(tp->dev))
10209                 return TG3_LOOPBACK_FAILED;
10210
10211         err = tg3_reset_hw(tp, 1);
10212         if (err)
10213                 return TG3_LOOPBACK_FAILED;
10214
10215         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
10216             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
10217             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
10218                 int i;
10219                 u32 status;
10220
10221                 tw32(TG3_CPMU_MUTEX_REQ, CPMU_MUTEX_REQ_DRIVER);
10222
10223                 /* Wait for up to 40 microseconds to acquire lock. */
10224                 for (i = 0; i < 4; i++) {
10225                         status = tr32(TG3_CPMU_MUTEX_GNT);
10226                         if (status == CPMU_MUTEX_GNT_DRIVER)
10227                                 break;
10228                         udelay(10);
10229                 }
10230
10231                 if (status != CPMU_MUTEX_GNT_DRIVER)
10232                         return TG3_LOOPBACK_FAILED;
10233
10234                 /* Turn off link-based power management. */
10235                 cpmuctrl = tr32(TG3_CPMU_CTRL);
10236                 tw32(TG3_CPMU_CTRL,
10237                      cpmuctrl & ~(CPMU_CTRL_LINK_SPEED_MODE |
10238                                   CPMU_CTRL_LINK_AWARE_MODE));
10239         }
10240
10241         if (tg3_run_loopback(tp, TG3_MAC_LOOPBACK))
10242                 err |= TG3_MAC_LOOPBACK_FAILED;
10243
10244         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
10245             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
10246             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
10247                 tw32(TG3_CPMU_CTRL, cpmuctrl);
10248
10249                 /* Release the mutex */
10250                 tw32(TG3_CPMU_MUTEX_GNT, CPMU_MUTEX_GNT_DRIVER);
10251         }
10252
10253         if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
10254             !(tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)) {
10255                 if (tg3_run_loopback(tp, TG3_PHY_LOOPBACK))
10256                         err |= TG3_PHY_LOOPBACK_FAILED;
10257         }
10258
10259         return err;
10260 }
10261
10262 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
10263                           u64 *data)
10264 {
10265         struct tg3 *tp = netdev_priv(dev);
10266
10267         if (tp->link_config.phy_is_low_power)
10268                 tg3_set_power_state(tp, PCI_D0);
10269
10270         memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
10271
10272         if (tg3_test_nvram(tp) != 0) {
10273                 etest->flags |= ETH_TEST_FL_FAILED;
10274                 data[0] = 1;
10275         }
10276         if (tg3_test_link(tp) != 0) {
10277                 etest->flags |= ETH_TEST_FL_FAILED;
10278                 data[1] = 1;
10279         }
10280         if (etest->flags & ETH_TEST_FL_OFFLINE) {
10281                 int err, err2 = 0, irq_sync = 0;
10282
10283                 if (netif_running(dev)) {
10284                         tg3_phy_stop(tp);
10285                         tg3_netif_stop(tp);
10286                         irq_sync = 1;
10287                 }
10288
10289                 tg3_full_lock(tp, irq_sync);
10290
10291                 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
10292                 err = tg3_nvram_lock(tp);
10293                 tg3_halt_cpu(tp, RX_CPU_BASE);
10294                 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
10295                         tg3_halt_cpu(tp, TX_CPU_BASE);
10296                 if (!err)
10297                         tg3_nvram_unlock(tp);
10298
10299                 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
10300                         tg3_phy_reset(tp);
10301
10302                 if (tg3_test_registers(tp) != 0) {
10303                         etest->flags |= ETH_TEST_FL_FAILED;
10304                         data[2] = 1;
10305                 }
10306                 if (tg3_test_memory(tp) != 0) {
10307                         etest->flags |= ETH_TEST_FL_FAILED;
10308                         data[3] = 1;
10309                 }
10310                 if ((data[4] = tg3_test_loopback(tp)) != 0)
10311                         etest->flags |= ETH_TEST_FL_FAILED;
10312
10313                 tg3_full_unlock(tp);
10314
10315                 if (tg3_test_interrupt(tp) != 0) {
10316                         etest->flags |= ETH_TEST_FL_FAILED;
10317                         data[5] = 1;
10318                 }
10319
10320                 tg3_full_lock(tp, 0);
10321
10322                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10323                 if (netif_running(dev)) {
10324                         tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
10325                         err2 = tg3_restart_hw(tp, 1);
10326                         if (!err2)
10327                                 tg3_netif_start(tp);
10328                 }
10329
10330                 tg3_full_unlock(tp);
10331
10332                 if (irq_sync && !err2)
10333                         tg3_phy_start(tp);
10334         }
10335         if (tp->link_config.phy_is_low_power)
10336                 tg3_set_power_state(tp, PCI_D3hot);
10337
10338 }
10339
10340 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
10341 {
10342         struct mii_ioctl_data *data = if_mii(ifr);
10343         struct tg3 *tp = netdev_priv(dev);
10344         int err;
10345
10346         if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
10347                 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
10348                         return -EAGAIN;
10349                 return phy_mii_ioctl(tp->mdio_bus->phy_map[PHY_ADDR], data, cmd);
10350         }
10351
10352         switch(cmd) {
10353         case SIOCGMIIPHY:
10354                 data->phy_id = PHY_ADDR;
10355
10356                 /* fallthru */
10357         case SIOCGMIIREG: {
10358                 u32 mii_regval;
10359
10360                 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
10361                         break;                  /* We have no PHY */
10362
10363                 if (tp->link_config.phy_is_low_power)
10364                         return -EAGAIN;
10365
10366                 spin_lock_bh(&tp->lock);
10367                 err = tg3_readphy(tp, data->reg_num & 0x1f, &mii_regval);
10368                 spin_unlock_bh(&tp->lock);
10369
10370                 data->val_out = mii_regval;
10371
10372                 return err;
10373         }
10374
10375         case SIOCSMIIREG:
10376                 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
10377                         break;                  /* We have no PHY */
10378
10379                 if (!capable(CAP_NET_ADMIN))
10380                         return -EPERM;
10381
10382                 if (tp->link_config.phy_is_low_power)
10383                         return -EAGAIN;
10384
10385                 spin_lock_bh(&tp->lock);
10386                 err = tg3_writephy(tp, data->reg_num & 0x1f, data->val_in);
10387                 spin_unlock_bh(&tp->lock);
10388
10389                 return err;
10390
10391         default:
10392                 /* do nothing */
10393                 break;
10394         }
10395         return -EOPNOTSUPP;
10396 }
10397
10398 #if TG3_VLAN_TAG_USED
10399 static void tg3_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
10400 {
10401         struct tg3 *tp = netdev_priv(dev);
10402
10403         if (netif_running(dev))
10404                 tg3_netif_stop(tp);
10405
10406         tg3_full_lock(tp, 0);
10407
10408         tp->vlgrp = grp;
10409
10410         /* Update RX_MODE_KEEP_VLAN_TAG bit in RX_MODE register. */
10411         __tg3_set_rx_mode(dev);
10412
10413         if (netif_running(dev))
10414                 tg3_netif_start(tp);
10415
10416         tg3_full_unlock(tp);
10417 }
10418 #endif
10419
10420 static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
10421 {
10422         struct tg3 *tp = netdev_priv(dev);
10423
10424         memcpy(ec, &tp->coal, sizeof(*ec));
10425         return 0;
10426 }
10427
10428 static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
10429 {
10430         struct tg3 *tp = netdev_priv(dev);
10431         u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
10432         u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
10433
10434         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
10435                 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
10436                 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
10437                 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
10438                 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
10439         }
10440
10441         if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
10442             (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
10443             (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
10444             (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
10445             (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
10446             (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
10447             (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
10448             (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
10449             (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
10450             (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
10451                 return -EINVAL;
10452
10453         /* No rx interrupts will be generated if both are zero */
10454         if ((ec->rx_coalesce_usecs == 0) &&
10455             (ec->rx_max_coalesced_frames == 0))
10456                 return -EINVAL;
10457
10458         /* No tx interrupts will be generated if both are zero */
10459         if ((ec->tx_coalesce_usecs == 0) &&
10460             (ec->tx_max_coalesced_frames == 0))
10461                 return -EINVAL;
10462
10463         /* Only copy relevant parameters, ignore all others. */
10464         tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
10465         tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
10466         tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
10467         tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
10468         tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
10469         tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
10470         tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
10471         tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
10472         tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
10473
10474         if (netif_running(dev)) {
10475                 tg3_full_lock(tp, 0);
10476                 __tg3_set_coalesce(tp, &tp->coal);
10477                 tg3_full_unlock(tp);
10478         }
10479         return 0;
10480 }
10481
10482 static const struct ethtool_ops tg3_ethtool_ops = {
10483         .get_settings           = tg3_get_settings,
10484         .set_settings           = tg3_set_settings,
10485         .get_drvinfo            = tg3_get_drvinfo,
10486         .get_regs_len           = tg3_get_regs_len,
10487         .get_regs               = tg3_get_regs,
10488         .get_wol                = tg3_get_wol,
10489         .set_wol                = tg3_set_wol,
10490         .get_msglevel           = tg3_get_msglevel,
10491         .set_msglevel           = tg3_set_msglevel,
10492         .nway_reset             = tg3_nway_reset,
10493         .get_link               = ethtool_op_get_link,
10494         .get_eeprom_len         = tg3_get_eeprom_len,
10495         .get_eeprom             = tg3_get_eeprom,
10496         .set_eeprom             = tg3_set_eeprom,
10497         .get_ringparam          = tg3_get_ringparam,
10498         .set_ringparam          = tg3_set_ringparam,
10499         .get_pauseparam         = tg3_get_pauseparam,
10500         .set_pauseparam         = tg3_set_pauseparam,
10501         .get_rx_csum            = tg3_get_rx_csum,
10502         .set_rx_csum            = tg3_set_rx_csum,
10503         .set_tx_csum            = tg3_set_tx_csum,
10504         .set_sg                 = ethtool_op_set_sg,
10505         .set_tso                = tg3_set_tso,
10506         .self_test              = tg3_self_test,
10507         .get_strings            = tg3_get_strings,
10508         .phys_id                = tg3_phys_id,
10509         .get_ethtool_stats      = tg3_get_ethtool_stats,
10510         .get_coalesce           = tg3_get_coalesce,
10511         .set_coalesce           = tg3_set_coalesce,
10512         .get_sset_count         = tg3_get_sset_count,
10513 };
10514
10515 static void __devinit tg3_get_eeprom_size(struct tg3 *tp)
10516 {
10517         u32 cursize, val, magic;
10518
10519         tp->nvram_size = EEPROM_CHIP_SIZE;
10520
10521         if (tg3_nvram_read_swab(tp, 0, &magic) != 0)
10522                 return;
10523
10524         if ((magic != TG3_EEPROM_MAGIC) &&
10525             ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) &&
10526             ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW))
10527                 return;
10528
10529         /*
10530          * Size the chip by reading offsets at increasing powers of two.
10531          * When we encounter our validation signature, we know the addressing
10532          * has wrapped around, and thus have our chip size.
10533          */
10534         cursize = 0x10;
10535
10536         while (cursize < tp->nvram_size) {
10537                 if (tg3_nvram_read_swab(tp, cursize, &val) != 0)
10538                         return;
10539
10540                 if (val == magic)
10541                         break;
10542
10543                 cursize <<= 1;
10544         }
10545
10546         tp->nvram_size = cursize;
10547 }
10548
10549 static void __devinit tg3_get_nvram_size(struct tg3 *tp)
10550 {
10551         u32 val;
10552
10553         if (tg3_nvram_read_swab(tp, 0, &val) != 0)
10554                 return;
10555
10556         /* Selfboot format */
10557         if (val != TG3_EEPROM_MAGIC) {
10558                 tg3_get_eeprom_size(tp);
10559                 return;
10560         }
10561
10562         if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
10563                 if (val != 0) {
10564                         tp->nvram_size = (val >> 16) * 1024;
10565                         return;
10566                 }
10567         }
10568         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
10569 }
10570
10571 static void __devinit tg3_get_nvram_info(struct tg3 *tp)
10572 {
10573         u32 nvcfg1;
10574
10575         nvcfg1 = tr32(NVRAM_CFG1);
10576         if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
10577                 tp->tg3_flags2 |= TG3_FLG2_FLASH;
10578         }
10579         else {
10580                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
10581                 tw32(NVRAM_CFG1, nvcfg1);
10582         }
10583
10584         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) ||
10585             (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
10586                 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
10587                         case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
10588                                 tp->nvram_jedecnum = JEDEC_ATMEL;
10589                                 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
10590                                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10591                                 break;
10592                         case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
10593                                 tp->nvram_jedecnum = JEDEC_ATMEL;
10594                                 tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
10595                                 break;
10596                         case FLASH_VENDOR_ATMEL_EEPROM:
10597                                 tp->nvram_jedecnum = JEDEC_ATMEL;
10598                                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
10599                                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10600                                 break;
10601                         case FLASH_VENDOR_ST:
10602                                 tp->nvram_jedecnum = JEDEC_ST;
10603                                 tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
10604                                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10605                                 break;
10606                         case FLASH_VENDOR_SAIFUN:
10607                                 tp->nvram_jedecnum = JEDEC_SAIFUN;
10608                                 tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
10609                                 break;
10610                         case FLASH_VENDOR_SST_SMALL:
10611                         case FLASH_VENDOR_SST_LARGE:
10612                                 tp->nvram_jedecnum = JEDEC_SST;
10613                                 tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
10614                                 break;
10615                 }
10616         }
10617         else {
10618                 tp->nvram_jedecnum = JEDEC_ATMEL;
10619                 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
10620                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10621         }
10622 }
10623
10624 static void __devinit tg3_get_5752_nvram_info(struct tg3 *tp)
10625 {
10626         u32 nvcfg1;
10627
10628         nvcfg1 = tr32(NVRAM_CFG1);
10629
10630         /* NVRAM protection for TPM */
10631         if (nvcfg1 & (1 << 27))
10632                 tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM;
10633
10634         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
10635                 case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
10636                 case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
10637                         tp->nvram_jedecnum = JEDEC_ATMEL;
10638                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10639                         break;
10640                 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
10641                         tp->nvram_jedecnum = JEDEC_ATMEL;
10642                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10643                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
10644                         break;
10645                 case FLASH_5752VENDOR_ST_M45PE10:
10646                 case FLASH_5752VENDOR_ST_M45PE20:
10647                 case FLASH_5752VENDOR_ST_M45PE40:
10648                         tp->nvram_jedecnum = JEDEC_ST;
10649                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10650                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
10651                         break;
10652         }
10653
10654         if (tp->tg3_flags2 & TG3_FLG2_FLASH) {
10655                 switch (nvcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
10656                         case FLASH_5752PAGE_SIZE_256:
10657                                 tp->nvram_pagesize = 256;
10658                                 break;
10659                         case FLASH_5752PAGE_SIZE_512:
10660                                 tp->nvram_pagesize = 512;
10661                                 break;
10662                         case FLASH_5752PAGE_SIZE_1K:
10663                                 tp->nvram_pagesize = 1024;
10664                                 break;
10665                         case FLASH_5752PAGE_SIZE_2K:
10666                                 tp->nvram_pagesize = 2048;
10667                                 break;
10668                         case FLASH_5752PAGE_SIZE_4K:
10669                                 tp->nvram_pagesize = 4096;
10670                                 break;
10671                         case FLASH_5752PAGE_SIZE_264:
10672                                 tp->nvram_pagesize = 264;
10673                                 break;
10674                 }
10675         }
10676         else {
10677                 /* For eeprom, set pagesize to maximum eeprom size */
10678                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
10679
10680                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
10681                 tw32(NVRAM_CFG1, nvcfg1);
10682         }
10683 }
10684
10685 static void __devinit tg3_get_5755_nvram_info(struct tg3 *tp)
10686 {
10687         u32 nvcfg1, protect = 0;
10688
10689         nvcfg1 = tr32(NVRAM_CFG1);
10690
10691         /* NVRAM protection for TPM */
10692         if (nvcfg1 & (1 << 27)) {
10693                 tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM;
10694                 protect = 1;
10695         }
10696
10697         nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
10698         switch (nvcfg1) {
10699                 case FLASH_5755VENDOR_ATMEL_FLASH_1:
10700                 case FLASH_5755VENDOR_ATMEL_FLASH_2:
10701                 case FLASH_5755VENDOR_ATMEL_FLASH_3:
10702                 case FLASH_5755VENDOR_ATMEL_FLASH_5:
10703                         tp->nvram_jedecnum = JEDEC_ATMEL;
10704                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10705                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
10706                         tp->nvram_pagesize = 264;
10707                         if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 ||
10708                             nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5)
10709                                 tp->nvram_size = (protect ? 0x3e200 :
10710                                                   TG3_NVRAM_SIZE_512KB);
10711                         else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2)
10712                                 tp->nvram_size = (protect ? 0x1f200 :
10713                                                   TG3_NVRAM_SIZE_256KB);
10714                         else
10715                                 tp->nvram_size = (protect ? 0x1f200 :
10716                                                   TG3_NVRAM_SIZE_128KB);
10717                         break;
10718                 case FLASH_5752VENDOR_ST_M45PE10:
10719                 case FLASH_5752VENDOR_ST_M45PE20:
10720                 case FLASH_5752VENDOR_ST_M45PE40:
10721                         tp->nvram_jedecnum = JEDEC_ST;
10722                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10723                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
10724                         tp->nvram_pagesize = 256;
10725                         if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10)
10726                                 tp->nvram_size = (protect ?
10727                                                   TG3_NVRAM_SIZE_64KB :
10728                                                   TG3_NVRAM_SIZE_128KB);
10729                         else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20)
10730                                 tp->nvram_size = (protect ?
10731                                                   TG3_NVRAM_SIZE_64KB :
10732                                                   TG3_NVRAM_SIZE_256KB);
10733                         else
10734                                 tp->nvram_size = (protect ?
10735                                                   TG3_NVRAM_SIZE_128KB :
10736                                                   TG3_NVRAM_SIZE_512KB);
10737                         break;
10738         }
10739 }
10740
10741 static void __devinit tg3_get_5787_nvram_info(struct tg3 *tp)
10742 {
10743         u32 nvcfg1;
10744
10745         nvcfg1 = tr32(NVRAM_CFG1);
10746
10747         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
10748                 case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
10749                 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
10750                 case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
10751                 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
10752                         tp->nvram_jedecnum = JEDEC_ATMEL;
10753                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10754                         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
10755
10756                         nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
10757                         tw32(NVRAM_CFG1, nvcfg1);
10758                         break;
10759                 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
10760                 case FLASH_5755VENDOR_ATMEL_FLASH_1:
10761                 case FLASH_5755VENDOR_ATMEL_FLASH_2:
10762                 case FLASH_5755VENDOR_ATMEL_FLASH_3:
10763                         tp->nvram_jedecnum = JEDEC_ATMEL;
10764                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10765                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
10766                         tp->nvram_pagesize = 264;
10767                         break;
10768                 case FLASH_5752VENDOR_ST_M45PE10:
10769                 case FLASH_5752VENDOR_ST_M45PE20:
10770                 case FLASH_5752VENDOR_ST_M45PE40:
10771                         tp->nvram_jedecnum = JEDEC_ST;
10772                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10773                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
10774                         tp->nvram_pagesize = 256;
10775                         break;
10776         }
10777 }
10778
10779 static void __devinit tg3_get_5761_nvram_info(struct tg3 *tp)
10780 {
10781         u32 nvcfg1, protect = 0;
10782
10783         nvcfg1 = tr32(NVRAM_CFG1);
10784
10785         /* NVRAM protection for TPM */
10786         if (nvcfg1 & (1 << 27)) {
10787                 tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM;
10788                 protect = 1;
10789         }
10790
10791         nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
10792         switch (nvcfg1) {
10793                 case FLASH_5761VENDOR_ATMEL_ADB021D:
10794                 case FLASH_5761VENDOR_ATMEL_ADB041D:
10795                 case FLASH_5761VENDOR_ATMEL_ADB081D:
10796                 case FLASH_5761VENDOR_ATMEL_ADB161D:
10797                 case FLASH_5761VENDOR_ATMEL_MDB021D:
10798                 case FLASH_5761VENDOR_ATMEL_MDB041D:
10799                 case FLASH_5761VENDOR_ATMEL_MDB081D:
10800                 case FLASH_5761VENDOR_ATMEL_MDB161D:
10801                         tp->nvram_jedecnum = JEDEC_ATMEL;
10802                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10803                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
10804                         tp->tg3_flags3 |= TG3_FLG3_NO_NVRAM_ADDR_TRANS;
10805                         tp->nvram_pagesize = 256;
10806                         break;
10807                 case FLASH_5761VENDOR_ST_A_M45PE20:
10808                 case FLASH_5761VENDOR_ST_A_M45PE40:
10809                 case FLASH_5761VENDOR_ST_A_M45PE80:
10810                 case FLASH_5761VENDOR_ST_A_M45PE16:
10811                 case FLASH_5761VENDOR_ST_M_M45PE20:
10812                 case FLASH_5761VENDOR_ST_M_M45PE40:
10813                 case FLASH_5761VENDOR_ST_M_M45PE80:
10814                 case FLASH_5761VENDOR_ST_M_M45PE16:
10815                         tp->nvram_jedecnum = JEDEC_ST;
10816                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10817                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
10818                         tp->nvram_pagesize = 256;
10819                         break;
10820         }
10821
10822         if (protect) {
10823                 tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT);
10824         } else {
10825                 switch (nvcfg1) {
10826                         case FLASH_5761VENDOR_ATMEL_ADB161D:
10827                         case FLASH_5761VENDOR_ATMEL_MDB161D:
10828                         case FLASH_5761VENDOR_ST_A_M45PE16:
10829                         case FLASH_5761VENDOR_ST_M_M45PE16:
10830                                 tp->nvram_size = TG3_NVRAM_SIZE_2MB;
10831                                 break;
10832                         case FLASH_5761VENDOR_ATMEL_ADB081D:
10833                         case FLASH_5761VENDOR_ATMEL_MDB081D:
10834                         case FLASH_5761VENDOR_ST_A_M45PE80:
10835                         case FLASH_5761VENDOR_ST_M_M45PE80:
10836                                 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
10837                                 break;
10838                         case FLASH_5761VENDOR_ATMEL_ADB041D:
10839                         case FLASH_5761VENDOR_ATMEL_MDB041D:
10840                         case FLASH_5761VENDOR_ST_A_M45PE40:
10841                         case FLASH_5761VENDOR_ST_M_M45PE40:
10842                                 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
10843                                 break;
10844                         case FLASH_5761VENDOR_ATMEL_ADB021D:
10845                         case FLASH_5761VENDOR_ATMEL_MDB021D:
10846                         case FLASH_5761VENDOR_ST_A_M45PE20:
10847                         case FLASH_5761VENDOR_ST_M_M45PE20:
10848                                 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
10849                                 break;
10850                 }
10851         }
10852 }
10853
10854 static void __devinit tg3_get_5906_nvram_info(struct tg3 *tp)
10855 {
10856         tp->nvram_jedecnum = JEDEC_ATMEL;
10857         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10858         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
10859 }
10860
10861 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
10862 static void __devinit tg3_nvram_init(struct tg3 *tp)
10863 {
10864         tw32_f(GRC_EEPROM_ADDR,
10865              (EEPROM_ADDR_FSM_RESET |
10866               (EEPROM_DEFAULT_CLOCK_PERIOD <<
10867                EEPROM_ADDR_CLKPERD_SHIFT)));
10868
10869         msleep(1);
10870
10871         /* Enable seeprom accesses. */
10872         tw32_f(GRC_LOCAL_CTRL,
10873              tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
10874         udelay(100);
10875
10876         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
10877             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
10878                 tp->tg3_flags |= TG3_FLAG_NVRAM;
10879
10880                 if (tg3_nvram_lock(tp)) {
10881                         printk(KERN_WARNING PFX "%s: Cannot get nvarm lock, "
10882                                "tg3_nvram_init failed.\n", tp->dev->name);
10883                         return;
10884                 }
10885                 tg3_enable_nvram_access(tp);
10886
10887                 tp->nvram_size = 0;
10888
10889                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
10890                         tg3_get_5752_nvram_info(tp);
10891                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
10892                         tg3_get_5755_nvram_info(tp);
10893                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
10894                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
10895                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
10896                         tg3_get_5787_nvram_info(tp);
10897                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
10898                         tg3_get_5761_nvram_info(tp);
10899                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
10900                         tg3_get_5906_nvram_info(tp);
10901                 else
10902                         tg3_get_nvram_info(tp);
10903
10904                 if (tp->nvram_size == 0)
10905                         tg3_get_nvram_size(tp);
10906
10907                 tg3_disable_nvram_access(tp);
10908                 tg3_nvram_unlock(tp);
10909
10910         } else {
10911                 tp->tg3_flags &= ~(TG3_FLAG_NVRAM | TG3_FLAG_NVRAM_BUFFERED);
10912
10913                 tg3_get_eeprom_size(tp);
10914         }
10915 }
10916
10917 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
10918                                         u32 offset, u32 *val)
10919 {
10920         u32 tmp;
10921         int i;
10922
10923         if (offset > EEPROM_ADDR_ADDR_MASK ||
10924             (offset % 4) != 0)
10925                 return -EINVAL;
10926
10927         tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
10928                                         EEPROM_ADDR_DEVID_MASK |
10929                                         EEPROM_ADDR_READ);
10930         tw32(GRC_EEPROM_ADDR,
10931              tmp |
10932              (0 << EEPROM_ADDR_DEVID_SHIFT) |
10933              ((offset << EEPROM_ADDR_ADDR_SHIFT) &
10934               EEPROM_ADDR_ADDR_MASK) |
10935              EEPROM_ADDR_READ | EEPROM_ADDR_START);
10936
10937         for (i = 0; i < 1000; i++) {
10938                 tmp = tr32(GRC_EEPROM_ADDR);
10939
10940                 if (tmp & EEPROM_ADDR_COMPLETE)
10941                         break;
10942                 msleep(1);
10943         }
10944         if (!(tmp & EEPROM_ADDR_COMPLETE))
10945                 return -EBUSY;
10946
10947         *val = tr32(GRC_EEPROM_DATA);
10948         return 0;
10949 }
10950
10951 #define NVRAM_CMD_TIMEOUT 10000
10952
10953 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
10954 {
10955         int i;
10956
10957         tw32(NVRAM_CMD, nvram_cmd);
10958         for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
10959                 udelay(10);
10960                 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
10961                         udelay(10);
10962                         break;
10963                 }
10964         }
10965         if (i == NVRAM_CMD_TIMEOUT) {
10966                 return -EBUSY;
10967         }
10968         return 0;
10969 }
10970
10971 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
10972 {
10973         if ((tp->tg3_flags & TG3_FLAG_NVRAM) &&
10974             (tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) &&
10975             (tp->tg3_flags2 & TG3_FLG2_FLASH) &&
10976            !(tp->tg3_flags3 & TG3_FLG3_NO_NVRAM_ADDR_TRANS) &&
10977             (tp->nvram_jedecnum == JEDEC_ATMEL))
10978
10979                 addr = ((addr / tp->nvram_pagesize) <<
10980                         ATMEL_AT45DB0X1B_PAGE_POS) +
10981                        (addr % tp->nvram_pagesize);
10982
10983         return addr;
10984 }
10985
10986 static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
10987 {
10988         if ((tp->tg3_flags & TG3_FLAG_NVRAM) &&
10989             (tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) &&
10990             (tp->tg3_flags2 & TG3_FLG2_FLASH) &&
10991            !(tp->tg3_flags3 & TG3_FLG3_NO_NVRAM_ADDR_TRANS) &&
10992             (tp->nvram_jedecnum == JEDEC_ATMEL))
10993
10994                 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
10995                         tp->nvram_pagesize) +
10996                        (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
10997
10998         return addr;
10999 }
11000
11001 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
11002 {
11003         int ret;
11004
11005         if (!(tp->tg3_flags & TG3_FLAG_NVRAM))
11006                 return tg3_nvram_read_using_eeprom(tp, offset, val);
11007
11008         offset = tg3_nvram_phys_addr(tp, offset);
11009
11010         if (offset > NVRAM_ADDR_MSK)
11011                 return -EINVAL;
11012
11013         ret = tg3_nvram_lock(tp);
11014         if (ret)
11015                 return ret;
11016
11017         tg3_enable_nvram_access(tp);
11018
11019         tw32(NVRAM_ADDR, offset);
11020         ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
11021                 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
11022
11023         if (ret == 0)
11024                 *val = swab32(tr32(NVRAM_RDDATA));
11025
11026         tg3_disable_nvram_access(tp);
11027
11028         tg3_nvram_unlock(tp);
11029
11030         return ret;
11031 }
11032
11033 static int tg3_nvram_read_le(struct tg3 *tp, u32 offset, __le32 *val)
11034 {
11035         u32 v;
11036         int res = tg3_nvram_read(tp, offset, &v);
11037         if (!res)
11038                 *val = cpu_to_le32(v);
11039         return res;
11040 }
11041
11042 static int tg3_nvram_read_swab(struct tg3 *tp, u32 offset, u32 *val)
11043 {
11044         int err;
11045         u32 tmp;
11046
11047         err = tg3_nvram_read(tp, offset, &tmp);
11048         *val = swab32(tmp);
11049         return err;
11050 }
11051
11052 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
11053                                     u32 offset, u32 len, u8 *buf)
11054 {
11055         int i, j, rc = 0;
11056         u32 val;
11057
11058         for (i = 0; i < len; i += 4) {
11059                 u32 addr;
11060                 __le32 data;
11061
11062                 addr = offset + i;
11063
11064                 memcpy(&data, buf + i, 4);
11065
11066                 tw32(GRC_EEPROM_DATA, le32_to_cpu(data));
11067
11068                 val = tr32(GRC_EEPROM_ADDR);
11069                 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
11070
11071                 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
11072                         EEPROM_ADDR_READ);
11073                 tw32(GRC_EEPROM_ADDR, val |
11074                         (0 << EEPROM_ADDR_DEVID_SHIFT) |
11075                         (addr & EEPROM_ADDR_ADDR_MASK) |
11076                         EEPROM_ADDR_START |
11077                         EEPROM_ADDR_WRITE);
11078
11079                 for (j = 0; j < 1000; j++) {
11080                         val = tr32(GRC_EEPROM_ADDR);
11081
11082                         if (val & EEPROM_ADDR_COMPLETE)
11083                                 break;
11084                         msleep(1);
11085                 }
11086                 if (!(val & EEPROM_ADDR_COMPLETE)) {
11087                         rc = -EBUSY;
11088                         break;
11089                 }
11090         }
11091
11092         return rc;
11093 }
11094
11095 /* offset and length are dword aligned */
11096 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
11097                 u8 *buf)
11098 {
11099         int ret = 0;
11100         u32 pagesize = tp->nvram_pagesize;
11101         u32 pagemask = pagesize - 1;
11102         u32 nvram_cmd;
11103         u8 *tmp;
11104
11105         tmp = kmalloc(pagesize, GFP_KERNEL);
11106         if (tmp == NULL)
11107                 return -ENOMEM;
11108
11109         while (len) {
11110                 int j;
11111                 u32 phy_addr, page_off, size;
11112
11113                 phy_addr = offset & ~pagemask;
11114
11115                 for (j = 0; j < pagesize; j += 4) {
11116                         if ((ret = tg3_nvram_read_le(tp, phy_addr + j,
11117                                                 (__le32 *) (tmp + j))))
11118                                 break;
11119                 }
11120                 if (ret)
11121                         break;
11122
11123                 page_off = offset & pagemask;
11124                 size = pagesize;
11125                 if (len < size)
11126                         size = len;
11127
11128                 len -= size;
11129
11130                 memcpy(tmp + page_off, buf, size);
11131
11132                 offset = offset + (pagesize - page_off);
11133
11134                 tg3_enable_nvram_access(tp);
11135
11136                 /*
11137                  * Before we can erase the flash page, we need
11138                  * to issue a special "write enable" command.
11139                  */
11140                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
11141
11142                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
11143                         break;
11144
11145                 /* Erase the target page */
11146                 tw32(NVRAM_ADDR, phy_addr);
11147
11148                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
11149                         NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
11150
11151                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
11152                         break;
11153
11154                 /* Issue another write enable to start the write. */
11155                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
11156
11157                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
11158                         break;
11159
11160                 for (j = 0; j < pagesize; j += 4) {
11161                         __be32 data;
11162
11163                         data = *((__be32 *) (tmp + j));
11164                         /* swab32(le32_to_cpu(data)), actually */
11165                         tw32(NVRAM_WRDATA, be32_to_cpu(data));
11166
11167                         tw32(NVRAM_ADDR, phy_addr + j);
11168
11169                         nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
11170                                 NVRAM_CMD_WR;
11171
11172                         if (j == 0)
11173                                 nvram_cmd |= NVRAM_CMD_FIRST;
11174                         else if (j == (pagesize - 4))
11175                                 nvram_cmd |= NVRAM_CMD_LAST;
11176
11177                         if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
11178                                 break;
11179                 }
11180                 if (ret)
11181                         break;
11182         }
11183
11184         nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
11185         tg3_nvram_exec_cmd(tp, nvram_cmd);
11186
11187         kfree(tmp);
11188
11189         return ret;
11190 }
11191
11192 /* offset and length are dword aligned */
11193 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
11194                 u8 *buf)
11195 {
11196         int i, ret = 0;
11197
11198         for (i = 0; i < len; i += 4, offset += 4) {
11199                 u32 page_off, phy_addr, nvram_cmd;
11200                 __be32 data;
11201
11202                 memcpy(&data, buf + i, 4);
11203                 tw32(NVRAM_WRDATA, be32_to_cpu(data));
11204
11205                 page_off = offset % tp->nvram_pagesize;
11206
11207                 phy_addr = tg3_nvram_phys_addr(tp, offset);
11208
11209                 tw32(NVRAM_ADDR, phy_addr);
11210
11211                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
11212
11213                 if ((page_off == 0) || (i == 0))
11214                         nvram_cmd |= NVRAM_CMD_FIRST;
11215                 if (page_off == (tp->nvram_pagesize - 4))
11216                         nvram_cmd |= NVRAM_CMD_LAST;
11217
11218                 if (i == (len - 4))
11219                         nvram_cmd |= NVRAM_CMD_LAST;
11220
11221                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752) &&
11222                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5755) &&
11223                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5787) &&
11224                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784) &&
11225                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761) &&
11226                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785) &&
11227                     (tp->nvram_jedecnum == JEDEC_ST) &&
11228                     (nvram_cmd & NVRAM_CMD_FIRST)) {
11229
11230                         if ((ret = tg3_nvram_exec_cmd(tp,
11231                                 NVRAM_CMD_WREN | NVRAM_CMD_GO |
11232                                 NVRAM_CMD_DONE)))
11233
11234                                 break;
11235                 }
11236                 if (!(tp->tg3_flags2 & TG3_FLG2_FLASH)) {
11237                         /* We always do complete word writes to eeprom. */
11238                         nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
11239                 }
11240
11241                 if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
11242                         break;
11243         }
11244         return ret;
11245 }
11246
11247 /* offset and length are dword aligned */
11248 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
11249 {
11250         int ret;
11251
11252         if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
11253                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
11254                        ~GRC_LCLCTRL_GPIO_OUTPUT1);
11255                 udelay(40);
11256         }
11257
11258         if (!(tp->tg3_flags & TG3_FLAG_NVRAM)) {
11259                 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
11260         }
11261         else {
11262                 u32 grc_mode;
11263
11264                 ret = tg3_nvram_lock(tp);
11265                 if (ret)
11266                         return ret;
11267
11268                 tg3_enable_nvram_access(tp);
11269                 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
11270                     !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM))
11271                         tw32(NVRAM_WRITE1, 0x406);
11272
11273                 grc_mode = tr32(GRC_MODE);
11274                 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
11275
11276                 if ((tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) ||
11277                         !(tp->tg3_flags2 & TG3_FLG2_FLASH)) {
11278
11279                         ret = tg3_nvram_write_block_buffered(tp, offset, len,
11280                                 buf);
11281                 }
11282                 else {
11283                         ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
11284                                 buf);
11285                 }
11286
11287                 grc_mode = tr32(GRC_MODE);
11288                 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
11289
11290                 tg3_disable_nvram_access(tp);
11291                 tg3_nvram_unlock(tp);
11292         }
11293
11294         if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
11295                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
11296                 udelay(40);
11297         }
11298
11299         return ret;
11300 }
11301
11302 struct subsys_tbl_ent {
11303         u16 subsys_vendor, subsys_devid;
11304         u32 phy_id;
11305 };
11306
11307 static struct subsys_tbl_ent subsys_id_to_phy_id[] = {
11308         /* Broadcom boards. */
11309         { PCI_VENDOR_ID_BROADCOM, 0x1644, PHY_ID_BCM5401 }, /* BCM95700A6 */
11310         { PCI_VENDOR_ID_BROADCOM, 0x0001, PHY_ID_BCM5701 }, /* BCM95701A5 */
11311         { PCI_VENDOR_ID_BROADCOM, 0x0002, PHY_ID_BCM8002 }, /* BCM95700T6 */
11312         { PCI_VENDOR_ID_BROADCOM, 0x0003, 0 },              /* BCM95700A9 */
11313         { PCI_VENDOR_ID_BROADCOM, 0x0005, PHY_ID_BCM5701 }, /* BCM95701T1 */
11314         { PCI_VENDOR_ID_BROADCOM, 0x0006, PHY_ID_BCM5701 }, /* BCM95701T8 */
11315         { PCI_VENDOR_ID_BROADCOM, 0x0007, 0 },              /* BCM95701A7 */
11316         { PCI_VENDOR_ID_BROADCOM, 0x0008, PHY_ID_BCM5701 }, /* BCM95701A10 */
11317         { PCI_VENDOR_ID_BROADCOM, 0x8008, PHY_ID_BCM5701 }, /* BCM95701A12 */
11318         { PCI_VENDOR_ID_BROADCOM, 0x0009, PHY_ID_BCM5703 }, /* BCM95703Ax1 */
11319         { PCI_VENDOR_ID_BROADCOM, 0x8009, PHY_ID_BCM5703 }, /* BCM95703Ax2 */
11320
11321         /* 3com boards. */
11322         { PCI_VENDOR_ID_3COM, 0x1000, PHY_ID_BCM5401 }, /* 3C996T */
11323         { PCI_VENDOR_ID_3COM, 0x1006, PHY_ID_BCM5701 }, /* 3C996BT */
11324         { PCI_VENDOR_ID_3COM, 0x1004, 0 },              /* 3C996SX */
11325         { PCI_VENDOR_ID_3COM, 0x1007, PHY_ID_BCM5701 }, /* 3C1000T */
11326         { PCI_VENDOR_ID_3COM, 0x1008, PHY_ID_BCM5701 }, /* 3C940BR01 */
11327
11328         /* DELL boards. */
11329         { PCI_VENDOR_ID_DELL, 0x00d1, PHY_ID_BCM5401 }, /* VIPER */
11330         { PCI_VENDOR_ID_DELL, 0x0106, PHY_ID_BCM5401 }, /* JAGUAR */
11331         { PCI_VENDOR_ID_DELL, 0x0109, PHY_ID_BCM5411 }, /* MERLOT */
11332         { PCI_VENDOR_ID_DELL, 0x010a, PHY_ID_BCM5411 }, /* SLIM_MERLOT */
11333
11334         /* Compaq boards. */
11335         { PCI_VENDOR_ID_COMPAQ, 0x007c, PHY_ID_BCM5701 }, /* BANSHEE */
11336         { PCI_VENDOR_ID_COMPAQ, 0x009a, PHY_ID_BCM5701 }, /* BANSHEE_2 */
11337         { PCI_VENDOR_ID_COMPAQ, 0x007d, 0 },              /* CHANGELING */
11338         { PCI_VENDOR_ID_COMPAQ, 0x0085, PHY_ID_BCM5701 }, /* NC7780 */
11339         { PCI_VENDOR_ID_COMPAQ, 0x0099, PHY_ID_BCM5701 }, /* NC7780_2 */
11340
11341         /* IBM boards. */
11342         { PCI_VENDOR_ID_IBM, 0x0281, 0 } /* IBM??? */
11343 };
11344
11345 static inline struct subsys_tbl_ent *lookup_by_subsys(struct tg3 *tp)
11346 {
11347         int i;
11348
11349         for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
11350                 if ((subsys_id_to_phy_id[i].subsys_vendor ==
11351                      tp->pdev->subsystem_vendor) &&
11352                     (subsys_id_to_phy_id[i].subsys_devid ==
11353                      tp->pdev->subsystem_device))
11354                         return &subsys_id_to_phy_id[i];
11355         }
11356         return NULL;
11357 }
11358
11359 static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
11360 {
11361         u32 val;
11362         u16 pmcsr;
11363
11364         /* On some early chips the SRAM cannot be accessed in D3hot state,
11365          * so need make sure we're in D0.
11366          */
11367         pci_read_config_word(tp->pdev, tp->pm_cap + PCI_PM_CTRL, &pmcsr);
11368         pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
11369         pci_write_config_word(tp->pdev, tp->pm_cap + PCI_PM_CTRL, pmcsr);
11370         msleep(1);
11371
11372         /* Make sure register accesses (indirect or otherwise)
11373          * will function correctly.
11374          */
11375         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
11376                                tp->misc_host_ctrl);
11377
11378         /* The memory arbiter has to be enabled in order for SRAM accesses
11379          * to succeed.  Normally on powerup the tg3 chip firmware will make
11380          * sure it is enabled, but other entities such as system netboot
11381          * code might disable it.
11382          */
11383         val = tr32(MEMARB_MODE);
11384         tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
11385
11386         tp->phy_id = PHY_ID_INVALID;
11387         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
11388
11389         /* Assume an onboard device and WOL capable by default.  */
11390         tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT | TG3_FLAG_WOL_CAP;
11391
11392         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
11393                 if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
11394                         tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT;
11395                         tp->tg3_flags2 |= TG3_FLG2_IS_NIC;
11396                 }
11397                 val = tr32(VCPU_CFGSHDW);
11398                 if (val & VCPU_CFGSHDW_ASPM_DBNC)
11399                         tp->tg3_flags |= TG3_FLAG_ASPM_WORKAROUND;
11400                 if ((val & VCPU_CFGSHDW_WOL_ENABLE) &&
11401                     (val & VCPU_CFGSHDW_WOL_MAGPKT) &&
11402                     device_may_wakeup(&tp->pdev->dev))
11403                         tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
11404                 goto done;
11405         }
11406
11407         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
11408         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
11409                 u32 nic_cfg, led_cfg;
11410                 u32 nic_phy_id, ver, cfg2 = 0, cfg4 = 0, eeprom_phy_id;
11411                 int eeprom_phy_serdes = 0;
11412
11413                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
11414                 tp->nic_sram_data_cfg = nic_cfg;
11415
11416                 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
11417                 ver >>= NIC_SRAM_DATA_VER_SHIFT;
11418                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700) &&
11419                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) &&
11420                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5703) &&
11421                     (ver > 0) && (ver < 0x100))
11422                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
11423
11424                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
11425                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_4, &cfg4);
11426
11427                 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
11428                     NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
11429                         eeprom_phy_serdes = 1;
11430
11431                 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
11432                 if (nic_phy_id != 0) {
11433                         u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
11434                         u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
11435
11436                         eeprom_phy_id  = (id1 >> 16) << 10;
11437                         eeprom_phy_id |= (id2 & 0xfc00) << 16;
11438                         eeprom_phy_id |= (id2 & 0x03ff) <<  0;
11439                 } else
11440                         eeprom_phy_id = 0;
11441
11442                 tp->phy_id = eeprom_phy_id;
11443                 if (eeprom_phy_serdes) {
11444                         if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
11445                                 tp->tg3_flags2 |= TG3_FLG2_MII_SERDES;
11446                         else
11447                                 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
11448                 }
11449
11450                 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
11451                         led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
11452                                     SHASTA_EXT_LED_MODE_MASK);
11453                 else
11454                         led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
11455
11456                 switch (led_cfg) {
11457                 default:
11458                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
11459                         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
11460                         break;
11461
11462                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
11463                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
11464                         break;
11465
11466                 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
11467                         tp->led_ctrl = LED_CTRL_MODE_MAC;
11468
11469                         /* Default to PHY_1_MODE if 0 (MAC_MODE) is
11470                          * read on some older 5700/5701 bootcode.
11471                          */
11472                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
11473                             ASIC_REV_5700 ||
11474                             GET_ASIC_REV(tp->pci_chip_rev_id) ==
11475                             ASIC_REV_5701)
11476                                 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
11477
11478                         break;
11479
11480                 case SHASTA_EXT_LED_SHARED:
11481                         tp->led_ctrl = LED_CTRL_MODE_SHARED;
11482                         if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
11483                             tp->pci_chip_rev_id != CHIPREV_ID_5750_A1)
11484                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
11485                                                  LED_CTRL_MODE_PHY_2);
11486                         break;
11487
11488                 case SHASTA_EXT_LED_MAC:
11489                         tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
11490                         break;
11491
11492                 case SHASTA_EXT_LED_COMBO:
11493                         tp->led_ctrl = LED_CTRL_MODE_COMBO;
11494                         if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0)
11495                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
11496                                                  LED_CTRL_MODE_PHY_2);
11497                         break;
11498
11499                 }
11500
11501                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
11502                      GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) &&
11503                     tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
11504                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
11505
11506                 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX)
11507                         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
11508
11509                 if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
11510                         tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT;
11511                         if ((tp->pdev->subsystem_vendor ==
11512                              PCI_VENDOR_ID_ARIMA) &&
11513                             (tp->pdev->subsystem_device == 0x205a ||
11514                              tp->pdev->subsystem_device == 0x2063))
11515                                 tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT;
11516                 } else {
11517                         tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT;
11518                         tp->tg3_flags2 |= TG3_FLG2_IS_NIC;
11519                 }
11520
11521                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
11522                         tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
11523                         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
11524                                 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
11525                 }
11526
11527                 if ((nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE) &&
11528                         (tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
11529                         tp->tg3_flags3 |= TG3_FLG3_ENABLE_APE;
11530
11531                 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES &&
11532                     !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL))
11533                         tp->tg3_flags &= ~TG3_FLAG_WOL_CAP;
11534
11535                 if ((tp->tg3_flags & TG3_FLAG_WOL_CAP) &&
11536                     (nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE))
11537                         tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
11538
11539                 if (cfg2 & (1 << 17))
11540                         tp->tg3_flags2 |= TG3_FLG2_CAPACITIVE_COUPLING;
11541
11542                 /* serdes signal pre-emphasis in register 0x590 set by */
11543                 /* bootcode if bit 18 is set */
11544                 if (cfg2 & (1 << 18))
11545                         tp->tg3_flags2 |= TG3_FLG2_SERDES_PREEMPHASIS;
11546
11547                 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
11548                         u32 cfg3;
11549
11550                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
11551                         if (cfg3 & NIC_SRAM_ASPM_DEBOUNCE)
11552                                 tp->tg3_flags |= TG3_FLAG_ASPM_WORKAROUND;
11553                 }
11554
11555                 if (cfg4 & NIC_SRAM_RGMII_STD_IBND_DISABLE)
11556                         tp->tg3_flags3 |= TG3_FLG3_RGMII_STD_IBND_DISABLE;
11557                 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_RX_EN)
11558                         tp->tg3_flags3 |= TG3_FLG3_RGMII_EXT_IBND_RX_EN;
11559                 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN)
11560                         tp->tg3_flags3 |= TG3_FLG3_RGMII_EXT_IBND_TX_EN;
11561         }
11562 done:
11563         device_init_wakeup(&tp->pdev->dev, tp->tg3_flags & TG3_FLAG_WOL_CAP);
11564         device_set_wakeup_enable(&tp->pdev->dev,
11565                                  tp->tg3_flags & TG3_FLAG_WOL_ENABLE);
11566 }
11567
11568 static int __devinit tg3_issue_otp_command(struct tg3 *tp, u32 cmd)
11569 {
11570         int i;
11571         u32 val;
11572
11573         tw32(OTP_CTRL, cmd | OTP_CTRL_OTP_CMD_START);
11574         tw32(OTP_CTRL, cmd);
11575
11576         /* Wait for up to 1 ms for command to execute. */
11577         for (i = 0; i < 100; i++) {
11578                 val = tr32(OTP_STATUS);
11579                 if (val & OTP_STATUS_CMD_DONE)
11580                         break;
11581                 udelay(10);
11582         }
11583
11584         return (val & OTP_STATUS_CMD_DONE) ? 0 : -EBUSY;
11585 }
11586
11587 /* Read the gphy configuration from the OTP region of the chip.  The gphy
11588  * configuration is a 32-bit value that straddles the alignment boundary.
11589  * We do two 32-bit reads and then shift and merge the results.
11590  */
11591 static u32 __devinit tg3_read_otp_phycfg(struct tg3 *tp)
11592 {
11593         u32 bhalf_otp, thalf_otp;
11594
11595         tw32(OTP_MODE, OTP_MODE_OTP_THRU_GRC);
11596
11597         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_INIT))
11598                 return 0;
11599
11600         tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC1);
11601
11602         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
11603                 return 0;
11604
11605         thalf_otp = tr32(OTP_READ_DATA);
11606
11607         tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC2);
11608
11609         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
11610                 return 0;
11611
11612         bhalf_otp = tr32(OTP_READ_DATA);
11613
11614         return ((thalf_otp & 0x0000ffff) << 16) | (bhalf_otp >> 16);
11615 }
11616
11617 static int __devinit tg3_phy_probe(struct tg3 *tp)
11618 {
11619         u32 hw_phy_id_1, hw_phy_id_2;
11620         u32 hw_phy_id, hw_phy_id_masked;
11621         int err;
11622
11623         if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)
11624                 return tg3_phy_init(tp);
11625
11626         /* Reading the PHY ID register can conflict with ASF
11627          * firwmare access to the PHY hardware.
11628          */
11629         err = 0;
11630         if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) ||
11631             (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)) {
11632                 hw_phy_id = hw_phy_id_masked = PHY_ID_INVALID;
11633         } else {
11634                 /* Now read the physical PHY_ID from the chip and verify
11635                  * that it is sane.  If it doesn't look good, we fall back
11636                  * to either the hard-coded table based PHY_ID and failing
11637                  * that the value found in the eeprom area.
11638                  */
11639                 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
11640                 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
11641
11642                 hw_phy_id  = (hw_phy_id_1 & 0xffff) << 10;
11643                 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
11644                 hw_phy_id |= (hw_phy_id_2 & 0x03ff) <<  0;
11645
11646                 hw_phy_id_masked = hw_phy_id & PHY_ID_MASK;
11647         }
11648
11649         if (!err && KNOWN_PHY_ID(hw_phy_id_masked)) {
11650                 tp->phy_id = hw_phy_id;
11651                 if (hw_phy_id_masked == PHY_ID_BCM8002)
11652                         tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
11653                 else
11654                         tp->tg3_flags2 &= ~TG3_FLG2_PHY_SERDES;
11655         } else {
11656                 if (tp->phy_id != PHY_ID_INVALID) {
11657                         /* Do nothing, phy ID already set up in
11658                          * tg3_get_eeprom_hw_cfg().
11659                          */
11660                 } else {
11661                         struct subsys_tbl_ent *p;
11662
11663                         /* No eeprom signature?  Try the hardcoded
11664                          * subsys device table.
11665                          */
11666                         p = lookup_by_subsys(tp);
11667                         if (!p)
11668                                 return -ENODEV;
11669
11670                         tp->phy_id = p->phy_id;
11671                         if (!tp->phy_id ||
11672                             tp->phy_id == PHY_ID_BCM8002)
11673                                 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
11674                 }
11675         }
11676
11677         if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) &&
11678             !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) &&
11679             !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
11680                 u32 bmsr, adv_reg, tg3_ctrl, mask;
11681
11682                 tg3_readphy(tp, MII_BMSR, &bmsr);
11683                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
11684                     (bmsr & BMSR_LSTATUS))
11685                         goto skip_phy_reset;
11686
11687                 err = tg3_phy_reset(tp);
11688                 if (err)
11689                         return err;
11690
11691                 adv_reg = (ADVERTISE_10HALF | ADVERTISE_10FULL |
11692                            ADVERTISE_100HALF | ADVERTISE_100FULL |
11693                            ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
11694                 tg3_ctrl = 0;
11695                 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
11696                         tg3_ctrl = (MII_TG3_CTRL_ADV_1000_HALF |
11697                                     MII_TG3_CTRL_ADV_1000_FULL);
11698                         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
11699                             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
11700                                 tg3_ctrl |= (MII_TG3_CTRL_AS_MASTER |
11701                                              MII_TG3_CTRL_ENABLE_AS_MASTER);
11702                 }
11703
11704                 mask = (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
11705                         ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
11706                         ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full);
11707                 if (!tg3_copper_is_advertising_all(tp, mask)) {
11708                         tg3_writephy(tp, MII_ADVERTISE, adv_reg);
11709
11710                         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
11711                                 tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
11712
11713                         tg3_writephy(tp, MII_BMCR,
11714                                      BMCR_ANENABLE | BMCR_ANRESTART);
11715                 }
11716                 tg3_phy_set_wirespeed(tp);
11717
11718                 tg3_writephy(tp, MII_ADVERTISE, adv_reg);
11719                 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
11720                         tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
11721         }
11722
11723 skip_phy_reset:
11724         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
11725                 err = tg3_init_5401phy_dsp(tp);
11726                 if (err)
11727                         return err;
11728         }
11729
11730         if (!err && ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401)) {
11731                 err = tg3_init_5401phy_dsp(tp);
11732         }
11733
11734         if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
11735                 tp->link_config.advertising =
11736                         (ADVERTISED_1000baseT_Half |
11737                          ADVERTISED_1000baseT_Full |
11738                          ADVERTISED_Autoneg |
11739                          ADVERTISED_FIBRE);
11740         if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
11741                 tp->link_config.advertising &=
11742                         ~(ADVERTISED_1000baseT_Half |
11743                           ADVERTISED_1000baseT_Full);
11744
11745         return err;
11746 }
11747
11748 static void __devinit tg3_read_partno(struct tg3 *tp)
11749 {
11750         unsigned char vpd_data[256];
11751         unsigned int i;
11752         u32 magic;
11753
11754         if (tg3_nvram_read_swab(tp, 0x0, &magic))
11755                 goto out_not_found;
11756
11757         if (magic == TG3_EEPROM_MAGIC) {
11758                 for (i = 0; i < 256; i += 4) {
11759                         u32 tmp;
11760
11761                         if (tg3_nvram_read(tp, 0x100 + i, &tmp))
11762                                 goto out_not_found;
11763
11764                         vpd_data[i + 0] = ((tmp >>  0) & 0xff);
11765                         vpd_data[i + 1] = ((tmp >>  8) & 0xff);
11766                         vpd_data[i + 2] = ((tmp >> 16) & 0xff);
11767                         vpd_data[i + 3] = ((tmp >> 24) & 0xff);
11768                 }
11769         } else {
11770                 int vpd_cap;
11771
11772                 vpd_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_VPD);
11773                 for (i = 0; i < 256; i += 4) {
11774                         u32 tmp, j = 0;
11775                         __le32 v;
11776                         u16 tmp16;
11777
11778                         pci_write_config_word(tp->pdev, vpd_cap + PCI_VPD_ADDR,
11779                                               i);
11780                         while (j++ < 100) {
11781                                 pci_read_config_word(tp->pdev, vpd_cap +
11782                                                      PCI_VPD_ADDR, &tmp16);
11783                                 if (tmp16 & 0x8000)
11784                                         break;
11785                                 msleep(1);
11786                         }
11787                         if (!(tmp16 & 0x8000))
11788                                 goto out_not_found;
11789
11790                         pci_read_config_dword(tp->pdev, vpd_cap + PCI_VPD_DATA,
11791                                               &tmp);
11792                         v = cpu_to_le32(tmp);
11793                         memcpy(&vpd_data[i], &v, 4);
11794                 }
11795         }
11796
11797         /* Now parse and find the part number. */
11798         for (i = 0; i < 254; ) {
11799                 unsigned char val = vpd_data[i];
11800                 unsigned int block_end;
11801
11802                 if (val == 0x82 || val == 0x91) {
11803                         i = (i + 3 +
11804                              (vpd_data[i + 1] +
11805                               (vpd_data[i + 2] << 8)));
11806                         continue;
11807                 }
11808
11809                 if (val != 0x90)
11810                         goto out_not_found;
11811
11812                 block_end = (i + 3 +
11813                              (vpd_data[i + 1] +
11814                               (vpd_data[i + 2] << 8)));
11815                 i += 3;
11816
11817                 if (block_end > 256)
11818                         goto out_not_found;
11819
11820                 while (i < (block_end - 2)) {
11821                         if (vpd_data[i + 0] == 'P' &&
11822                             vpd_data[i + 1] == 'N') {
11823                                 int partno_len = vpd_data[i + 2];
11824
11825                                 i += 3;
11826                                 if (partno_len > 24 || (partno_len + i) > 256)
11827                                         goto out_not_found;
11828
11829                                 memcpy(tp->board_part_number,
11830                                        &vpd_data[i], partno_len);
11831
11832                                 /* Success. */
11833                                 return;
11834                         }
11835                         i += 3 + vpd_data[i + 2];
11836                 }
11837
11838                 /* Part number not found. */
11839                 goto out_not_found;
11840         }
11841
11842 out_not_found:
11843         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
11844                 strcpy(tp->board_part_number, "BCM95906");
11845         else
11846                 strcpy(tp->board_part_number, "none");
11847 }
11848
11849 static int __devinit tg3_fw_img_is_valid(struct tg3 *tp, u32 offset)
11850 {
11851         u32 val;
11852
11853         if (tg3_nvram_read_swab(tp, offset, &val) ||
11854             (val & 0xfc000000) != 0x0c000000 ||
11855             tg3_nvram_read_swab(tp, offset + 4, &val) ||
11856             val != 0)
11857                 return 0;
11858
11859         return 1;
11860 }
11861
11862 static void __devinit tg3_read_fw_ver(struct tg3 *tp)
11863 {
11864         u32 val, offset, start;
11865         u32 ver_offset;
11866         int i, bcnt;
11867
11868         if (tg3_nvram_read_swab(tp, 0, &val))
11869                 return;
11870
11871         if (val != TG3_EEPROM_MAGIC)
11872                 return;
11873
11874         if (tg3_nvram_read_swab(tp, 0xc, &offset) ||
11875             tg3_nvram_read_swab(tp, 0x4, &start))
11876                 return;
11877
11878         offset = tg3_nvram_logical_addr(tp, offset);
11879
11880         if (!tg3_fw_img_is_valid(tp, offset) ||
11881             tg3_nvram_read_swab(tp, offset + 8, &ver_offset))
11882                 return;
11883
11884         offset = offset + ver_offset - start;
11885         for (i = 0; i < 16; i += 4) {
11886                 __le32 v;
11887                 if (tg3_nvram_read_le(tp, offset + i, &v))
11888                         return;
11889
11890                 memcpy(tp->fw_ver + i, &v, 4);
11891         }
11892
11893         if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF) ||
11894              (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
11895                 return;
11896
11897         for (offset = TG3_NVM_DIR_START;
11898              offset < TG3_NVM_DIR_END;
11899              offset += TG3_NVM_DIRENT_SIZE) {
11900                 if (tg3_nvram_read_swab(tp, offset, &val))
11901                         return;
11902
11903                 if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI)
11904                         break;
11905         }
11906
11907         if (offset == TG3_NVM_DIR_END)
11908                 return;
11909
11910         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
11911                 start = 0x08000000;
11912         else if (tg3_nvram_read_swab(tp, offset - 4, &start))
11913                 return;
11914
11915         if (tg3_nvram_read_swab(tp, offset + 4, &offset) ||
11916             !tg3_fw_img_is_valid(tp, offset) ||
11917             tg3_nvram_read_swab(tp, offset + 8, &val))
11918                 return;
11919
11920         offset += val - start;
11921
11922         bcnt = strlen(tp->fw_ver);
11923
11924         tp->fw_ver[bcnt++] = ',';
11925         tp->fw_ver[bcnt++] = ' ';
11926
11927         for (i = 0; i < 4; i++) {
11928                 __le32 v;
11929                 if (tg3_nvram_read_le(tp, offset, &v))
11930                         return;
11931
11932                 offset += sizeof(v);
11933
11934                 if (bcnt > TG3_VER_SIZE - sizeof(v)) {
11935                         memcpy(&tp->fw_ver[bcnt], &v, TG3_VER_SIZE - bcnt);
11936                         break;
11937                 }
11938
11939                 memcpy(&tp->fw_ver[bcnt], &v, sizeof(v));
11940                 bcnt += sizeof(v);
11941         }
11942
11943         tp->fw_ver[TG3_VER_SIZE - 1] = 0;
11944 }
11945
11946 static struct pci_dev * __devinit tg3_find_peer(struct tg3 *);
11947
11948 static int __devinit tg3_get_invariants(struct tg3 *tp)
11949 {
11950         static struct pci_device_id write_reorder_chipsets[] = {
11951                 { PCI_DEVICE(PCI_VENDOR_ID_AMD,
11952                              PCI_DEVICE_ID_AMD_FE_GATE_700C) },
11953                 { PCI_DEVICE(PCI_VENDOR_ID_AMD,
11954                              PCI_DEVICE_ID_AMD_8131_BRIDGE) },
11955                 { PCI_DEVICE(PCI_VENDOR_ID_VIA,
11956                              PCI_DEVICE_ID_VIA_8385_0) },
11957                 { },
11958         };
11959         u32 misc_ctrl_reg;
11960         u32 cacheline_sz_reg;
11961         u32 pci_state_reg, grc_misc_cfg;
11962         u32 val;
11963         u16 pci_cmd;
11964         int err, pcie_cap;
11965
11966         /* Force memory write invalidate off.  If we leave it on,
11967          * then on 5700_BX chips we have to enable a workaround.
11968          * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
11969          * to match the cacheline size.  The Broadcom driver have this
11970          * workaround but turns MWI off all the times so never uses
11971          * it.  This seems to suggest that the workaround is insufficient.
11972          */
11973         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
11974         pci_cmd &= ~PCI_COMMAND_INVALIDATE;
11975         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
11976
11977         /* It is absolutely critical that TG3PCI_MISC_HOST_CTRL
11978          * has the register indirect write enable bit set before
11979          * we try to access any of the MMIO registers.  It is also
11980          * critical that the PCI-X hw workaround situation is decided
11981          * before that as well.
11982          */
11983         pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
11984                               &misc_ctrl_reg);
11985
11986         tp->pci_chip_rev_id = (misc_ctrl_reg >>
11987                                MISC_HOST_CTRL_CHIPREV_SHIFT);
11988         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_USE_PROD_ID_REG) {
11989                 u32 prod_id_asic_rev;
11990
11991                 pci_read_config_dword(tp->pdev, TG3PCI_PRODID_ASICREV,
11992                                       &prod_id_asic_rev);
11993                 tp->pci_chip_rev_id = prod_id_asic_rev & PROD_ID_ASIC_REV_MASK;
11994         }
11995
11996         /* Wrong chip ID in 5752 A0. This code can be removed later
11997          * as A0 is not in production.
11998          */
11999         if (tp->pci_chip_rev_id == CHIPREV_ID_5752_A0_HW)
12000                 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
12001
12002         /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
12003          * we need to disable memory and use config. cycles
12004          * only to access all registers. The 5702/03 chips
12005          * can mistakenly decode the special cycles from the
12006          * ICH chipsets as memory write cycles, causing corruption
12007          * of register and memory space. Only certain ICH bridges
12008          * will drive special cycles with non-zero data during the
12009          * address phase which can fall within the 5703's address
12010          * range. This is not an ICH bug as the PCI spec allows
12011          * non-zero address during special cycles. However, only
12012          * these ICH bridges are known to drive non-zero addresses
12013          * during special cycles.
12014          *
12015          * Since special cycles do not cross PCI bridges, we only
12016          * enable this workaround if the 5703 is on the secondary
12017          * bus of these ICH bridges.
12018          */
12019         if ((tp->pci_chip_rev_id == CHIPREV_ID_5703_A1) ||
12020             (tp->pci_chip_rev_id == CHIPREV_ID_5703_A2)) {
12021                 static struct tg3_dev_id {
12022                         u32     vendor;
12023                         u32     device;
12024                         u32     rev;
12025                 } ich_chipsets[] = {
12026                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
12027                           PCI_ANY_ID },
12028                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
12029                           PCI_ANY_ID },
12030                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
12031                           0xa },
12032                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
12033                           PCI_ANY_ID },
12034                         { },
12035                 };
12036                 struct tg3_dev_id *pci_id = &ich_chipsets[0];
12037                 struct pci_dev *bridge = NULL;
12038
12039                 while (pci_id->vendor != 0) {
12040                         bridge = pci_get_device(pci_id->vendor, pci_id->device,
12041                                                 bridge);
12042                         if (!bridge) {
12043                                 pci_id++;
12044                                 continue;
12045                         }
12046                         if (pci_id->rev != PCI_ANY_ID) {
12047                                 if (bridge->revision > pci_id->rev)
12048                                         continue;
12049                         }
12050                         if (bridge->subordinate &&
12051                             (bridge->subordinate->number ==
12052                              tp->pdev->bus->number)) {
12053
12054                                 tp->tg3_flags2 |= TG3_FLG2_ICH_WORKAROUND;
12055                                 pci_dev_put(bridge);
12056                                 break;
12057                         }
12058                 }
12059         }
12060
12061         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
12062                 static struct tg3_dev_id {
12063                         u32     vendor;
12064                         u32     device;
12065                 } bridge_chipsets[] = {
12066                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0 },
12067                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1 },
12068                         { },
12069                 };
12070                 struct tg3_dev_id *pci_id = &bridge_chipsets[0];
12071                 struct pci_dev *bridge = NULL;
12072
12073                 while (pci_id->vendor != 0) {
12074                         bridge = pci_get_device(pci_id->vendor,
12075                                                 pci_id->device,
12076                                                 bridge);
12077                         if (!bridge) {
12078                                 pci_id++;
12079                                 continue;
12080                         }
12081                         if (bridge->subordinate &&
12082                             (bridge->subordinate->number <=
12083                              tp->pdev->bus->number) &&
12084                             (bridge->subordinate->subordinate >=
12085                              tp->pdev->bus->number)) {
12086                                 tp->tg3_flags3 |= TG3_FLG3_5701_DMA_BUG;
12087                                 pci_dev_put(bridge);
12088                                 break;
12089                         }
12090                 }
12091         }
12092
12093         /* The EPB bridge inside 5714, 5715, and 5780 cannot support
12094          * DMA addresses > 40-bit. This bridge may have other additional
12095          * 57xx devices behind it in some 4-port NIC designs for example.
12096          * Any tg3 device found behind the bridge will also need the 40-bit
12097          * DMA workaround.
12098          */
12099         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 ||
12100             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
12101                 tp->tg3_flags2 |= TG3_FLG2_5780_CLASS;
12102                 tp->tg3_flags |= TG3_FLAG_40BIT_DMA_BUG;
12103                 tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI);
12104         }
12105         else {
12106                 struct pci_dev *bridge = NULL;
12107
12108                 do {
12109                         bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
12110                                                 PCI_DEVICE_ID_SERVERWORKS_EPB,
12111                                                 bridge);
12112                         if (bridge && bridge->subordinate &&
12113                             (bridge->subordinate->number <=
12114                              tp->pdev->bus->number) &&
12115                             (bridge->subordinate->subordinate >=
12116                              tp->pdev->bus->number)) {
12117                                 tp->tg3_flags |= TG3_FLAG_40BIT_DMA_BUG;
12118                                 pci_dev_put(bridge);
12119                                 break;
12120                         }
12121                 } while (bridge);
12122         }
12123
12124         /* Initialize misc host control in PCI block. */
12125         tp->misc_host_ctrl |= (misc_ctrl_reg &
12126                                MISC_HOST_CTRL_CHIPREV);
12127         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
12128                                tp->misc_host_ctrl);
12129
12130         pci_read_config_dword(tp->pdev, TG3PCI_CACHELINESZ,
12131                               &cacheline_sz_reg);
12132
12133         tp->pci_cacheline_sz = (cacheline_sz_reg >>  0) & 0xff;
12134         tp->pci_lat_timer    = (cacheline_sz_reg >>  8) & 0xff;
12135         tp->pci_hdr_type     = (cacheline_sz_reg >> 16) & 0xff;
12136         tp->pci_bist         = (cacheline_sz_reg >> 24) & 0xff;
12137
12138         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
12139             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714))
12140                 tp->pdev_peer = tg3_find_peer(tp);
12141
12142         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
12143             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
12144             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
12145             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
12146             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
12147             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
12148             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
12149             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 ||
12150             (tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
12151                 tp->tg3_flags2 |= TG3_FLG2_5750_PLUS;
12152
12153         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) ||
12154             (tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
12155                 tp->tg3_flags2 |= TG3_FLG2_5705_PLUS;
12156
12157         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
12158                 tp->tg3_flags |= TG3_FLAG_SUPPORT_MSI;
12159                 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX ||
12160                     GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX ||
12161                     (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 &&
12162                      tp->pci_chip_rev_id <= CHIPREV_ID_5714_A2 &&
12163                      tp->pdev_peer == tp->pdev))
12164                         tp->tg3_flags &= ~TG3_FLAG_SUPPORT_MSI;
12165
12166                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
12167                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
12168                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
12169                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
12170                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
12171                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
12172                         tp->tg3_flags2 |= TG3_FLG2_HW_TSO_2;
12173                         tp->tg3_flags2 |= TG3_FLG2_1SHOT_MSI;
12174                 } else {
12175                         tp->tg3_flags2 |= TG3_FLG2_HW_TSO_1 | TG3_FLG2_TSO_BUG;
12176                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
12177                                 ASIC_REV_5750 &&
12178                             tp->pci_chip_rev_id >= CHIPREV_ID_5750_C2)
12179                                 tp->tg3_flags2 &= ~TG3_FLG2_TSO_BUG;
12180                 }
12181         }
12182
12183         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
12184              (tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
12185                 tp->tg3_flags2 |= TG3_FLG2_JUMBO_CAPABLE;
12186
12187         pcie_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_EXP);
12188         if (pcie_cap != 0) {
12189                 tp->tg3_flags2 |= TG3_FLG2_PCI_EXPRESS;
12190
12191                 pcie_set_readrq(tp->pdev, 4096);
12192
12193                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
12194                         u16 lnkctl;
12195
12196                         pci_read_config_word(tp->pdev,
12197                                              pcie_cap + PCI_EXP_LNKCTL,
12198                                              &lnkctl);
12199                         if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN)
12200                                 tp->tg3_flags2 &= ~TG3_FLG2_HW_TSO_2;
12201                 }
12202         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
12203                 tp->tg3_flags2 |= TG3_FLG2_PCI_EXPRESS;
12204
12205         /* If we have an AMD 762 or VIA K8T800 chipset, write
12206          * reordering to the mailbox registers done by the host
12207          * controller can cause major troubles.  We read back from
12208          * every mailbox register write to force the writes to be
12209          * posted to the chip in order.
12210          */
12211         if (pci_dev_present(write_reorder_chipsets) &&
12212             !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
12213                 tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;
12214
12215         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
12216             tp->pci_lat_timer < 64) {
12217                 tp->pci_lat_timer = 64;
12218
12219                 cacheline_sz_reg  = ((tp->pci_cacheline_sz & 0xff) <<  0);
12220                 cacheline_sz_reg |= ((tp->pci_lat_timer    & 0xff) <<  8);
12221                 cacheline_sz_reg |= ((tp->pci_hdr_type     & 0xff) << 16);
12222                 cacheline_sz_reg |= ((tp->pci_bist         & 0xff) << 24);
12223
12224                 pci_write_config_dword(tp->pdev, TG3PCI_CACHELINESZ,
12225                                        cacheline_sz_reg);
12226         }
12227
12228         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
12229             (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
12230                 tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
12231                 if (!tp->pcix_cap) {
12232                         printk(KERN_ERR PFX "Cannot find PCI-X "
12233                                             "capability, aborting.\n");
12234                         return -EIO;
12235                 }
12236         }
12237
12238         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
12239                               &pci_state_reg);
12240
12241         if (tp->pcix_cap && (pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0) {
12242                 tp->tg3_flags |= TG3_FLAG_PCIX_MODE;
12243
12244                 /* If this is a 5700 BX chipset, and we are in PCI-X
12245                  * mode, enable register write workaround.
12246                  *
12247                  * The workaround is to use indirect register accesses
12248                  * for all chip writes not to mailbox registers.
12249                  */
12250                 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX) {
12251                         u32 pm_reg;
12252
12253                         tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
12254
12255                         /* The chip can have it's power management PCI config
12256                          * space registers clobbered due to this bug.
12257                          * So explicitly force the chip into D0 here.
12258                          */
12259                         pci_read_config_dword(tp->pdev,
12260                                               tp->pm_cap + PCI_PM_CTRL,
12261                                               &pm_reg);
12262                         pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
12263                         pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
12264                         pci_write_config_dword(tp->pdev,
12265                                                tp->pm_cap + PCI_PM_CTRL,
12266                                                pm_reg);
12267
12268                         /* Also, force SERR#/PERR# in PCI command. */
12269                         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
12270                         pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
12271                         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
12272                 }
12273         }
12274
12275         /* 5700 BX chips need to have their TX producer index mailboxes
12276          * written twice to workaround a bug.
12277          */
12278         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX)
12279                 tp->tg3_flags |= TG3_FLAG_TXD_MBOX_HWBUG;
12280
12281         if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
12282                 tp->tg3_flags |= TG3_FLAG_PCI_HIGH_SPEED;
12283         if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
12284                 tp->tg3_flags |= TG3_FLAG_PCI_32BIT;
12285
12286         /* Chip-specific fixup from Broadcom driver */
12287         if ((tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) &&
12288             (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
12289                 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
12290                 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
12291         }
12292
12293         /* Default fast path register access methods */
12294         tp->read32 = tg3_read32;
12295         tp->write32 = tg3_write32;
12296         tp->read32_mbox = tg3_read32;
12297         tp->write32_mbox = tg3_write32;
12298         tp->write32_tx_mbox = tg3_write32;
12299         tp->write32_rx_mbox = tg3_write32;
12300
12301         /* Various workaround register access methods */
12302         if (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG)
12303                 tp->write32 = tg3_write_indirect_reg32;
12304         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
12305                  ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
12306                   tp->pci_chip_rev_id == CHIPREV_ID_5750_A0)) {
12307                 /*
12308                  * Back to back register writes can cause problems on these
12309                  * chips, the workaround is to read back all reg writes
12310                  * except those to mailbox regs.
12311                  *
12312                  * See tg3_write_indirect_reg32().
12313                  */
12314                 tp->write32 = tg3_write_flush_reg32;
12315         }
12316
12317
12318         if ((tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG) ||
12319             (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)) {
12320                 tp->write32_tx_mbox = tg3_write32_tx_mbox;
12321                 if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
12322                         tp->write32_rx_mbox = tg3_write_flush_reg32;
12323         }
12324
12325         if (tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND) {
12326                 tp->read32 = tg3_read_indirect_reg32;
12327                 tp->write32 = tg3_write_indirect_reg32;
12328                 tp->read32_mbox = tg3_read_indirect_mbox;
12329                 tp->write32_mbox = tg3_write_indirect_mbox;
12330                 tp->write32_tx_mbox = tg3_write_indirect_mbox;
12331                 tp->write32_rx_mbox = tg3_write_indirect_mbox;
12332
12333                 iounmap(tp->regs);
12334                 tp->regs = NULL;
12335
12336                 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
12337                 pci_cmd &= ~PCI_COMMAND_MEMORY;
12338                 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
12339         }
12340         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
12341                 tp->read32_mbox = tg3_read32_mbox_5906;
12342                 tp->write32_mbox = tg3_write32_mbox_5906;
12343                 tp->write32_tx_mbox = tg3_write32_mbox_5906;
12344                 tp->write32_rx_mbox = tg3_write32_mbox_5906;
12345         }
12346
12347         if (tp->write32 == tg3_write_indirect_reg32 ||
12348             ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
12349              (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
12350               GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)))
12351                 tp->tg3_flags |= TG3_FLAG_SRAM_USE_CONFIG;
12352
12353         /* Get eeprom hw config before calling tg3_set_power_state().
12354          * In particular, the TG3_FLG2_IS_NIC flag must be
12355          * determined before calling tg3_set_power_state() so that
12356          * we know whether or not to switch out of Vaux power.
12357          * When the flag is set, it means that GPIO1 is used for eeprom
12358          * write protect and also implies that it is a LOM where GPIOs
12359          * are not used to switch power.
12360          */
12361         tg3_get_eeprom_hw_cfg(tp);
12362
12363         if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
12364                 /* Allow reads and writes to the
12365                  * APE register and memory space.
12366                  */
12367                 pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR |
12368                                  PCISTATE_ALLOW_APE_SHMEM_WR;
12369                 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE,
12370                                        pci_state_reg);
12371         }
12372
12373         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
12374             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
12375             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
12376                 tp->tg3_flags |= TG3_FLAG_CPMU_PRESENT;
12377
12378         /* Set up tp->grc_local_ctrl before calling tg3_set_power_state().
12379          * GPIO1 driven high will bring 5700's external PHY out of reset.
12380          * It is also used as eeprom write protect on LOMs.
12381          */
12382         tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
12383         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
12384             (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT))
12385                 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
12386                                        GRC_LCLCTRL_GPIO_OUTPUT1);
12387         /* Unused GPIO3 must be driven as output on 5752 because there
12388          * are no pull-up resistors on unused GPIO pins.
12389          */
12390         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
12391                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
12392
12393         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
12394                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
12395
12396         if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761) {
12397                 /* Turn off the debug UART. */
12398                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
12399                 if (tp->tg3_flags2 & TG3_FLG2_IS_NIC)
12400                         /* Keep VMain power. */
12401                         tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
12402                                               GRC_LCLCTRL_GPIO_OUTPUT0;
12403         }
12404
12405         /* Force the chip into D0. */
12406         err = tg3_set_power_state(tp, PCI_D0);
12407         if (err) {
12408                 printk(KERN_ERR PFX "(%s) transition to D0 failed\n",
12409                        pci_name(tp->pdev));
12410                 return err;
12411         }
12412
12413         /* 5700 B0 chips do not support checksumming correctly due
12414          * to hardware bugs.
12415          */
12416         if (tp->pci_chip_rev_id == CHIPREV_ID_5700_B0)
12417                 tp->tg3_flags |= TG3_FLAG_BROKEN_CHECKSUMS;
12418
12419         /* Derive initial jumbo mode from MTU assigned in
12420          * ether_setup() via the alloc_etherdev() call
12421          */
12422         if (tp->dev->mtu > ETH_DATA_LEN &&
12423             !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
12424                 tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE;
12425
12426         /* Determine WakeOnLan speed to use. */
12427         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
12428             tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
12429             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0 ||
12430             tp->pci_chip_rev_id == CHIPREV_ID_5701_B2) {
12431                 tp->tg3_flags &= ~(TG3_FLAG_WOL_SPEED_100MB);
12432         } else {
12433                 tp->tg3_flags |= TG3_FLAG_WOL_SPEED_100MB;
12434         }
12435
12436         /* A few boards don't want Ethernet@WireSpeed phy feature */
12437         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
12438             ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
12439              (tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) &&
12440              (tp->pci_chip_rev_id != CHIPREV_ID_5705_A1)) ||
12441             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) ||
12442             (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES))
12443                 tp->tg3_flags2 |= TG3_FLG2_NO_ETH_WIRE_SPEED;
12444
12445         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5703_AX ||
12446             GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_AX)
12447                 tp->tg3_flags2 |= TG3_FLG2_PHY_ADC_BUG;
12448         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0)
12449                 tp->tg3_flags2 |= TG3_FLG2_PHY_5704_A0_BUG;
12450
12451         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
12452                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
12453                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
12454                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
12455                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
12456                         if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
12457                             tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
12458                                 tp->tg3_flags2 |= TG3_FLG2_PHY_JITTER_BUG;
12459                         if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
12460                                 tp->tg3_flags2 |= TG3_FLG2_PHY_ADJUST_TRIM;
12461                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906 &&
12462                            GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785)
12463                         tp->tg3_flags2 |= TG3_FLG2_PHY_BER_BUG;
12464         }
12465
12466         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
12467             GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
12468                 tp->phy_otp = tg3_read_otp_phycfg(tp);
12469                 if (tp->phy_otp == 0)
12470                         tp->phy_otp = TG3_OTP_DEFAULT;
12471         }
12472
12473         if (tp->tg3_flags & TG3_FLAG_CPMU_PRESENT)
12474                 tp->mi_mode = MAC_MI_MODE_500KHZ_CONST;
12475         else
12476                 tp->mi_mode = MAC_MI_MODE_BASE;
12477
12478         tp->coalesce_mode = 0;
12479         if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX &&
12480             GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX)
12481                 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
12482
12483         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
12484                 tp->tg3_flags3 |= TG3_FLG3_USE_PHYLIB;
12485
12486         err = tg3_mdio_init(tp);
12487         if (err)
12488                 return err;
12489
12490         /* Initialize data/descriptor byte/word swapping. */
12491         val = tr32(GRC_MODE);
12492         val &= GRC_MODE_HOST_STACKUP;
12493         tw32(GRC_MODE, val | tp->grc_mode);
12494
12495         tg3_switch_clocks(tp);
12496
12497         /* Clear this out for sanity. */
12498         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
12499
12500         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
12501                               &pci_state_reg);
12502         if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
12503             (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) == 0) {
12504                 u32 chiprevid = GET_CHIP_REV_ID(tp->misc_host_ctrl);
12505
12506                 if (chiprevid == CHIPREV_ID_5701_A0 ||
12507                     chiprevid == CHIPREV_ID_5701_B0 ||
12508                     chiprevid == CHIPREV_ID_5701_B2 ||
12509                     chiprevid == CHIPREV_ID_5701_B5) {
12510                         void __iomem *sram_base;
12511
12512                         /* Write some dummy words into the SRAM status block
12513                          * area, see if it reads back correctly.  If the return
12514                          * value is bad, force enable the PCIX workaround.
12515                          */
12516                         sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
12517
12518                         writel(0x00000000, sram_base);
12519                         writel(0x00000000, sram_base + 4);
12520                         writel(0xffffffff, sram_base + 4);
12521                         if (readl(sram_base) != 0x00000000)
12522                                 tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
12523                 }
12524         }
12525
12526         udelay(50);
12527         tg3_nvram_init(tp);
12528
12529         grc_misc_cfg = tr32(GRC_MISC_CFG);
12530         grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
12531
12532         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
12533             (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
12534              grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
12535                 tp->tg3_flags2 |= TG3_FLG2_IS_5788;
12536
12537         if (!(tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
12538             (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700))
12539                 tp->tg3_flags |= TG3_FLAG_TAGGED_STATUS;
12540         if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) {
12541                 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
12542                                       HOSTCC_MODE_CLRTICK_TXBD);
12543
12544                 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
12545                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
12546                                        tp->misc_host_ctrl);
12547         }
12548
12549         /* Preserve the APE MAC_MODE bits */
12550         if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
12551                 tp->mac_mode = tr32(MAC_MODE) |
12552                                MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
12553         else
12554                 tp->mac_mode = TG3_DEF_MAC_MODE;
12555
12556         /* these are limited to 10/100 only */
12557         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
12558              (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
12559             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
12560              tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
12561              (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901 ||
12562               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901_2 ||
12563               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5705F)) ||
12564             (tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
12565              (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5751F ||
12566               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5753F ||
12567               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5787F)) ||
12568             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
12569                 tp->tg3_flags |= TG3_FLAG_10_100_ONLY;
12570
12571         err = tg3_phy_probe(tp);
12572         if (err) {
12573                 printk(KERN_ERR PFX "(%s) phy probe failed, err %d\n",
12574                        pci_name(tp->pdev), err);
12575                 /* ... but do not return immediately ... */
12576                 tg3_mdio_fini(tp);
12577         }
12578
12579         tg3_read_partno(tp);
12580         tg3_read_fw_ver(tp);
12581
12582         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
12583                 tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
12584         } else {
12585                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
12586                         tp->tg3_flags |= TG3_FLAG_USE_MI_INTERRUPT;
12587                 else
12588                         tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
12589         }
12590
12591         /* 5700 {AX,BX} chips have a broken status block link
12592          * change bit implementation, so we must use the
12593          * status register in those cases.
12594          */
12595         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
12596                 tp->tg3_flags |= TG3_FLAG_USE_LINKCHG_REG;
12597         else
12598                 tp->tg3_flags &= ~TG3_FLAG_USE_LINKCHG_REG;
12599
12600         /* The led_ctrl is set during tg3_phy_probe, here we might
12601          * have to force the link status polling mechanism based
12602          * upon subsystem IDs.
12603          */
12604         if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
12605             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
12606             !(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
12607                 tp->tg3_flags |= (TG3_FLAG_USE_MI_INTERRUPT |
12608                                   TG3_FLAG_USE_LINKCHG_REG);
12609         }
12610
12611         /* For all SERDES we poll the MAC status register. */
12612         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
12613                 tp->tg3_flags |= TG3_FLAG_POLL_SERDES;
12614         else
12615                 tp->tg3_flags &= ~TG3_FLAG_POLL_SERDES;
12616
12617         tp->rx_offset = 2;
12618         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
12619             (tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0)
12620                 tp->rx_offset = 0;
12621
12622         tp->rx_std_max_post = TG3_RX_RING_SIZE;
12623
12624         /* Increment the rx prod index on the rx std ring by at most
12625          * 8 for these chips to workaround hw errata.
12626          */
12627         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
12628             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
12629             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
12630                 tp->rx_std_max_post = 8;
12631
12632         if (tp->tg3_flags & TG3_FLAG_ASPM_WORKAROUND)
12633                 tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) &
12634                                      PCIE_PWR_MGMT_L1_THRESH_MSK;
12635
12636         return err;
12637 }
12638
12639 #ifdef CONFIG_SPARC
12640 static int __devinit tg3_get_macaddr_sparc(struct tg3 *tp)
12641 {
12642         struct net_device *dev = tp->dev;
12643         struct pci_dev *pdev = tp->pdev;
12644         struct device_node *dp = pci_device_to_OF_node(pdev);
12645         const unsigned char *addr;
12646         int len;
12647
12648         addr = of_get_property(dp, "local-mac-address", &len);
12649         if (addr && len == 6) {
12650                 memcpy(dev->dev_addr, addr, 6);
12651                 memcpy(dev->perm_addr, dev->dev_addr, 6);
12652                 return 0;
12653         }
12654         return -ENODEV;
12655 }
12656
12657 static int __devinit tg3_get_default_macaddr_sparc(struct tg3 *tp)
12658 {
12659         struct net_device *dev = tp->dev;
12660
12661         memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
12662         memcpy(dev->perm_addr, idprom->id_ethaddr, 6);
12663         return 0;
12664 }
12665 #endif
12666
12667 static int __devinit tg3_get_device_address(struct tg3 *tp)
12668 {
12669         struct net_device *dev = tp->dev;
12670         u32 hi, lo, mac_offset;
12671         int addr_ok = 0;
12672
12673 #ifdef CONFIG_SPARC
12674         if (!tg3_get_macaddr_sparc(tp))
12675                 return 0;
12676 #endif
12677
12678         mac_offset = 0x7c;
12679         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
12680             (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
12681                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
12682                         mac_offset = 0xcc;
12683                 if (tg3_nvram_lock(tp))
12684                         tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
12685                 else
12686                         tg3_nvram_unlock(tp);
12687         }
12688         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
12689                 mac_offset = 0x10;
12690
12691         /* First try to get it from MAC address mailbox. */
12692         tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
12693         if ((hi >> 16) == 0x484b) {
12694                 dev->dev_addr[0] = (hi >>  8) & 0xff;
12695                 dev->dev_addr[1] = (hi >>  0) & 0xff;
12696
12697                 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
12698                 dev->dev_addr[2] = (lo >> 24) & 0xff;
12699                 dev->dev_addr[3] = (lo >> 16) & 0xff;
12700                 dev->dev_addr[4] = (lo >>  8) & 0xff;
12701                 dev->dev_addr[5] = (lo >>  0) & 0xff;
12702
12703                 /* Some old bootcode may report a 0 MAC address in SRAM */
12704                 addr_ok = is_valid_ether_addr(&dev->dev_addr[0]);
12705         }
12706         if (!addr_ok) {
12707                 /* Next, try NVRAM. */
12708                 if (!tg3_nvram_read(tp, mac_offset + 0, &hi) &&
12709                     !tg3_nvram_read(tp, mac_offset + 4, &lo)) {
12710                         dev->dev_addr[0] = ((hi >> 16) & 0xff);
12711                         dev->dev_addr[1] = ((hi >> 24) & 0xff);
12712                         dev->dev_addr[2] = ((lo >>  0) & 0xff);
12713                         dev->dev_addr[3] = ((lo >>  8) & 0xff);
12714                         dev->dev_addr[4] = ((lo >> 16) & 0xff);
12715                         dev->dev_addr[5] = ((lo >> 24) & 0xff);
12716                 }
12717                 /* Finally just fetch it out of the MAC control regs. */
12718                 else {
12719                         hi = tr32(MAC_ADDR_0_HIGH);
12720                         lo = tr32(MAC_ADDR_0_LOW);
12721
12722                         dev->dev_addr[5] = lo & 0xff;
12723                         dev->dev_addr[4] = (lo >> 8) & 0xff;
12724                         dev->dev_addr[3] = (lo >> 16) & 0xff;
12725                         dev->dev_addr[2] = (lo >> 24) & 0xff;
12726                         dev->dev_addr[1] = hi & 0xff;
12727                         dev->dev_addr[0] = (hi >> 8) & 0xff;
12728                 }
12729         }
12730
12731         if (!is_valid_ether_addr(&dev->dev_addr[0])) {
12732 #ifdef CONFIG_SPARC
12733                 if (!tg3_get_default_macaddr_sparc(tp))
12734                         return 0;
12735 #endif
12736                 return -EINVAL;
12737         }
12738         memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
12739         return 0;
12740 }
12741
12742 #define BOUNDARY_SINGLE_CACHELINE       1
12743 #define BOUNDARY_MULTI_CACHELINE        2
12744
12745 static u32 __devinit tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
12746 {
12747         int cacheline_size;
12748         u8 byte;
12749         int goal;
12750
12751         pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
12752         if (byte == 0)
12753                 cacheline_size = 1024;
12754         else
12755                 cacheline_size = (int) byte * 4;
12756
12757         /* On 5703 and later chips, the boundary bits have no
12758          * effect.
12759          */
12760         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
12761             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
12762             !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
12763                 goto out;
12764
12765 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
12766         goal = BOUNDARY_MULTI_CACHELINE;
12767 #else
12768 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
12769         goal = BOUNDARY_SINGLE_CACHELINE;
12770 #else
12771         goal = 0;
12772 #endif
12773 #endif
12774
12775         if (!goal)
12776                 goto out;
12777
12778         /* PCI controllers on most RISC systems tend to disconnect
12779          * when a device tries to burst across a cache-line boundary.
12780          * Therefore, letting tg3 do so just wastes PCI bandwidth.
12781          *
12782          * Unfortunately, for PCI-E there are only limited
12783          * write-side controls for this, and thus for reads
12784          * we will still get the disconnects.  We'll also waste
12785          * these PCI cycles for both read and write for chips
12786          * other than 5700 and 5701 which do not implement the
12787          * boundary bits.
12788          */
12789         if ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
12790             !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
12791                 switch (cacheline_size) {
12792                 case 16:
12793                 case 32:
12794                 case 64:
12795                 case 128:
12796                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
12797                                 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
12798                                         DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
12799                         } else {
12800                                 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
12801                                         DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
12802                         }
12803                         break;
12804
12805                 case 256:
12806                         val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
12807                                 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
12808                         break;
12809
12810                 default:
12811                         val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
12812                                 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
12813                         break;
12814                 }
12815         } else if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
12816                 switch (cacheline_size) {
12817                 case 16:
12818                 case 32:
12819                 case 64:
12820                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
12821                                 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
12822                                 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
12823                                 break;
12824                         }
12825                         /* fallthrough */
12826                 case 128:
12827                 default:
12828                         val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
12829                         val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
12830                         break;
12831                 }
12832         } else {
12833                 switch (cacheline_size) {
12834                 case 16:
12835                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
12836                                 val |= (DMA_RWCTRL_READ_BNDRY_16 |
12837                                         DMA_RWCTRL_WRITE_BNDRY_16);
12838                                 break;
12839                         }
12840                         /* fallthrough */
12841                 case 32:
12842                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
12843                                 val |= (DMA_RWCTRL_READ_BNDRY_32 |
12844                                         DMA_RWCTRL_WRITE_BNDRY_32);
12845                                 break;
12846                         }
12847                         /* fallthrough */
12848                 case 64:
12849                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
12850                                 val |= (DMA_RWCTRL_READ_BNDRY_64 |
12851                                         DMA_RWCTRL_WRITE_BNDRY_64);
12852                                 break;
12853                         }
12854                         /* fallthrough */
12855                 case 128:
12856                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
12857                                 val |= (DMA_RWCTRL_READ_BNDRY_128 |
12858                                         DMA_RWCTRL_WRITE_BNDRY_128);
12859                                 break;
12860                         }
12861                         /* fallthrough */
12862                 case 256:
12863                         val |= (DMA_RWCTRL_READ_BNDRY_256 |
12864                                 DMA_RWCTRL_WRITE_BNDRY_256);
12865                         break;
12866                 case 512:
12867                         val |= (DMA_RWCTRL_READ_BNDRY_512 |
12868                                 DMA_RWCTRL_WRITE_BNDRY_512);
12869                         break;
12870                 case 1024:
12871                 default:
12872                         val |= (DMA_RWCTRL_READ_BNDRY_1024 |
12873                                 DMA_RWCTRL_WRITE_BNDRY_1024);
12874                         break;
12875                 }
12876         }
12877
12878 out:
12879         return val;
12880 }
12881
12882 static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma, int size, int to_device)
12883 {
12884         struct tg3_internal_buffer_desc test_desc;
12885         u32 sram_dma_descs;
12886         int i, ret;
12887
12888         sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
12889
12890         tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
12891         tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
12892         tw32(RDMAC_STATUS, 0);
12893         tw32(WDMAC_STATUS, 0);
12894
12895         tw32(BUFMGR_MODE, 0);
12896         tw32(FTQ_RESET, 0);
12897
12898         test_desc.addr_hi = ((u64) buf_dma) >> 32;
12899         test_desc.addr_lo = buf_dma & 0xffffffff;
12900         test_desc.nic_mbuf = 0x00002100;
12901         test_desc.len = size;
12902
12903         /*
12904          * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
12905          * the *second* time the tg3 driver was getting loaded after an
12906          * initial scan.
12907          *
12908          * Broadcom tells me:
12909          *   ...the DMA engine is connected to the GRC block and a DMA
12910          *   reset may affect the GRC block in some unpredictable way...
12911          *   The behavior of resets to individual blocks has not been tested.
12912          *
12913          * Broadcom noted the GRC reset will also reset all sub-components.
12914          */
12915         if (to_device) {
12916                 test_desc.cqid_sqid = (13 << 8) | 2;
12917
12918                 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
12919                 udelay(40);
12920         } else {
12921                 test_desc.cqid_sqid = (16 << 8) | 7;
12922
12923                 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
12924                 udelay(40);
12925         }
12926         test_desc.flags = 0x00000005;
12927
12928         for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
12929                 u32 val;
12930
12931                 val = *(((u32 *)&test_desc) + i);
12932                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
12933                                        sram_dma_descs + (i * sizeof(u32)));
12934                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
12935         }
12936         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
12937
12938         if (to_device) {
12939                 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
12940         } else {
12941                 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
12942         }
12943
12944         ret = -ENODEV;
12945         for (i = 0; i < 40; i++) {
12946                 u32 val;
12947
12948                 if (to_device)
12949                         val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
12950                 else
12951                         val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
12952                 if ((val & 0xffff) == sram_dma_descs) {
12953                         ret = 0;
12954                         break;
12955                 }
12956
12957                 udelay(100);
12958         }
12959
12960         return ret;
12961 }
12962
12963 #define TEST_BUFFER_SIZE        0x2000
12964
12965 static int __devinit tg3_test_dma(struct tg3 *tp)
12966 {
12967         dma_addr_t buf_dma;
12968         u32 *buf, saved_dma_rwctrl;
12969         int ret;
12970
12971         buf = pci_alloc_consistent(tp->pdev, TEST_BUFFER_SIZE, &buf_dma);
12972         if (!buf) {
12973                 ret = -ENOMEM;
12974                 goto out_nofree;
12975         }
12976
12977         tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
12978                           (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
12979
12980         tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
12981
12982         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
12983                 /* DMA read watermark not used on PCIE */
12984                 tp->dma_rwctrl |= 0x00180000;
12985         } else if (!(tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
12986                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
12987                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
12988                         tp->dma_rwctrl |= 0x003f0000;
12989                 else
12990                         tp->dma_rwctrl |= 0x003f000f;
12991         } else {
12992                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
12993                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
12994                         u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
12995                         u32 read_water = 0x7;
12996
12997                         /* If the 5704 is behind the EPB bridge, we can
12998                          * do the less restrictive ONE_DMA workaround for
12999                          * better performance.
13000                          */
13001                         if ((tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG) &&
13002                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
13003                                 tp->dma_rwctrl |= 0x8000;
13004                         else if (ccval == 0x6 || ccval == 0x7)
13005                                 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
13006
13007                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703)
13008                                 read_water = 4;
13009                         /* Set bit 23 to enable PCIX hw bug fix */
13010                         tp->dma_rwctrl |=
13011                                 (read_water << DMA_RWCTRL_READ_WATER_SHIFT) |
13012                                 (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
13013                                 (1 << 23);
13014                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) {
13015                         /* 5780 always in PCIX mode */
13016                         tp->dma_rwctrl |= 0x00144000;
13017                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
13018                         /* 5714 always in PCIX mode */
13019                         tp->dma_rwctrl |= 0x00148000;
13020                 } else {
13021                         tp->dma_rwctrl |= 0x001b000f;
13022                 }
13023         }
13024
13025         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
13026             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
13027                 tp->dma_rwctrl &= 0xfffffff0;
13028
13029         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
13030             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
13031                 /* Remove this if it causes problems for some boards. */
13032                 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
13033
13034                 /* On 5700/5701 chips, we need to set this bit.
13035                  * Otherwise the chip will issue cacheline transactions
13036                  * to streamable DMA memory with not all the byte
13037                  * enables turned on.  This is an error on several
13038                  * RISC PCI controllers, in particular sparc64.
13039                  *
13040                  * On 5703/5704 chips, this bit has been reassigned
13041                  * a different meaning.  In particular, it is used
13042                  * on those chips to enable a PCI-X workaround.
13043                  */
13044                 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
13045         }
13046
13047         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
13048
13049 #if 0
13050         /* Unneeded, already done by tg3_get_invariants.  */
13051         tg3_switch_clocks(tp);
13052 #endif
13053
13054         ret = 0;
13055         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
13056             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
13057                 goto out;
13058
13059         /* It is best to perform DMA test with maximum write burst size
13060          * to expose the 5700/5701 write DMA bug.
13061          */
13062         saved_dma_rwctrl = tp->dma_rwctrl;
13063         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
13064         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
13065
13066         while (1) {
13067                 u32 *p = buf, i;
13068
13069                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
13070                         p[i] = i;
13071
13072                 /* Send the buffer to the chip. */
13073                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1);
13074                 if (ret) {
13075                         printk(KERN_ERR "tg3_test_dma() Write the buffer failed %d\n", ret);
13076                         break;
13077                 }
13078
13079 #if 0
13080                 /* validate data reached card RAM correctly. */
13081                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
13082                         u32 val;
13083                         tg3_read_mem(tp, 0x2100 + (i*4), &val);
13084                         if (le32_to_cpu(val) != p[i]) {
13085                                 printk(KERN_ERR "  tg3_test_dma()  Card buffer corrupted on write! (%d != %d)\n", val, i);
13086                                 /* ret = -ENODEV here? */
13087                         }
13088                         p[i] = 0;
13089                 }
13090 #endif
13091                 /* Now read it back. */
13092                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0);
13093                 if (ret) {
13094                         printk(KERN_ERR "tg3_test_dma() Read the buffer failed %d\n", ret);
13095
13096                         break;
13097                 }
13098
13099                 /* Verify it. */
13100                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
13101                         if (p[i] == i)
13102                                 continue;
13103
13104                         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
13105                             DMA_RWCTRL_WRITE_BNDRY_16) {
13106                                 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
13107                                 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
13108                                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
13109                                 break;
13110                         } else {
13111                                 printk(KERN_ERR "tg3_test_dma() buffer corrupted on read back! (%d != %d)\n", p[i], i);
13112                                 ret = -ENODEV;
13113                                 goto out;
13114                         }
13115                 }
13116
13117                 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
13118                         /* Success. */
13119                         ret = 0;
13120                         break;
13121                 }
13122         }
13123         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
13124             DMA_RWCTRL_WRITE_BNDRY_16) {
13125                 static struct pci_device_id dma_wait_state_chipsets[] = {
13126                         { PCI_DEVICE(PCI_VENDOR_ID_APPLE,
13127                                      PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
13128                         { },
13129                 };
13130
13131                 /* DMA test passed without adjusting DMA boundary,
13132                  * now look for chipsets that are known to expose the
13133                  * DMA bug without failing the test.
13134                  */
13135                 if (pci_dev_present(dma_wait_state_chipsets)) {
13136                         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
13137                         tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
13138                 }
13139                 else
13140                         /* Safe to use the calculated DMA boundary. */
13141                         tp->dma_rwctrl = saved_dma_rwctrl;
13142
13143                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
13144         }
13145
13146 out:
13147         pci_free_consistent(tp->pdev, TEST_BUFFER_SIZE, buf, buf_dma);
13148 out_nofree:
13149         return ret;
13150 }
13151
13152 static void __devinit tg3_init_link_config(struct tg3 *tp)
13153 {
13154         tp->link_config.advertising =
13155                 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
13156                  ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
13157                  ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full |
13158                  ADVERTISED_Autoneg | ADVERTISED_MII);
13159         tp->link_config.speed = SPEED_INVALID;
13160         tp->link_config.duplex = DUPLEX_INVALID;
13161         tp->link_config.autoneg = AUTONEG_ENABLE;
13162         tp->link_config.active_speed = SPEED_INVALID;
13163         tp->link_config.active_duplex = DUPLEX_INVALID;
13164         tp->link_config.phy_is_low_power = 0;
13165         tp->link_config.orig_speed = SPEED_INVALID;
13166         tp->link_config.orig_duplex = DUPLEX_INVALID;
13167         tp->link_config.orig_autoneg = AUTONEG_INVALID;
13168 }
13169
13170 static void __devinit tg3_init_bufmgr_config(struct tg3 *tp)
13171 {
13172         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
13173                 tp->bufmgr_config.mbuf_read_dma_low_water =
13174                         DEFAULT_MB_RDMA_LOW_WATER_5705;
13175                 tp->bufmgr_config.mbuf_mac_rx_low_water =
13176                         DEFAULT_MB_MACRX_LOW_WATER_5705;
13177                 tp->bufmgr_config.mbuf_high_water =
13178                         DEFAULT_MB_HIGH_WATER_5705;
13179                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
13180                         tp->bufmgr_config.mbuf_mac_rx_low_water =
13181                                 DEFAULT_MB_MACRX_LOW_WATER_5906;
13182                         tp->bufmgr_config.mbuf_high_water =
13183                                 DEFAULT_MB_HIGH_WATER_5906;
13184                 }
13185
13186                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
13187                         DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
13188                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
13189                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
13190                 tp->bufmgr_config.mbuf_high_water_jumbo =
13191                         DEFAULT_MB_HIGH_WATER_JUMBO_5780;
13192         } else {
13193                 tp->bufmgr_config.mbuf_read_dma_low_water =
13194                         DEFAULT_MB_RDMA_LOW_WATER;
13195                 tp->bufmgr_config.mbuf_mac_rx_low_water =
13196                         DEFAULT_MB_MACRX_LOW_WATER;
13197                 tp->bufmgr_config.mbuf_high_water =
13198                         DEFAULT_MB_HIGH_WATER;
13199
13200                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
13201                         DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
13202                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
13203                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
13204                 tp->bufmgr_config.mbuf_high_water_jumbo =
13205                         DEFAULT_MB_HIGH_WATER_JUMBO;
13206         }
13207
13208         tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
13209         tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
13210 }
13211
13212 static char * __devinit tg3_phy_string(struct tg3 *tp)
13213 {
13214         switch (tp->phy_id & PHY_ID_MASK) {
13215         case PHY_ID_BCM5400:    return "5400";
13216         case PHY_ID_BCM5401:    return "5401";
13217         case PHY_ID_BCM5411:    return "5411";
13218         case PHY_ID_BCM5701:    return "5701";
13219         case PHY_ID_BCM5703:    return "5703";
13220         case PHY_ID_BCM5704:    return "5704";
13221         case PHY_ID_BCM5705:    return "5705";
13222         case PHY_ID_BCM5750:    return "5750";
13223         case PHY_ID_BCM5752:    return "5752";
13224         case PHY_ID_BCM5714:    return "5714";
13225         case PHY_ID_BCM5780:    return "5780";
13226         case PHY_ID_BCM5755:    return "5755";
13227         case PHY_ID_BCM5787:    return "5787";
13228         case PHY_ID_BCM5784:    return "5784";
13229         case PHY_ID_BCM5756:    return "5722/5756";
13230         case PHY_ID_BCM5906:    return "5906";
13231         case PHY_ID_BCM5761:    return "5761";
13232         case PHY_ID_BCM8002:    return "8002/serdes";
13233         case 0:                 return "serdes";
13234         default:                return "unknown";
13235         }
13236 }
13237
13238 static char * __devinit tg3_bus_string(struct tg3 *tp, char *str)
13239 {
13240         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
13241                 strcpy(str, "PCI Express");
13242                 return str;
13243         } else if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) {
13244                 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
13245
13246                 strcpy(str, "PCIX:");
13247
13248                 if ((clock_ctrl == 7) ||
13249                     ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
13250                      GRC_MISC_CFG_BOARD_ID_5704CIOBE))
13251                         strcat(str, "133MHz");
13252                 else if (clock_ctrl == 0)
13253                         strcat(str, "33MHz");
13254                 else if (clock_ctrl == 2)
13255                         strcat(str, "50MHz");
13256                 else if (clock_ctrl == 4)
13257                         strcat(str, "66MHz");
13258                 else if (clock_ctrl == 6)
13259                         strcat(str, "100MHz");
13260         } else {
13261                 strcpy(str, "PCI:");
13262                 if (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED)
13263                         strcat(str, "66MHz");
13264                 else
13265                         strcat(str, "33MHz");
13266         }
13267         if (tp->tg3_flags & TG3_FLAG_PCI_32BIT)
13268                 strcat(str, ":32-bit");
13269         else
13270                 strcat(str, ":64-bit");
13271         return str;
13272 }
13273
13274 static struct pci_dev * __devinit tg3_find_peer(struct tg3 *tp)
13275 {
13276         struct pci_dev *peer;
13277         unsigned int func, devnr = tp->pdev->devfn & ~7;
13278
13279         for (func = 0; func < 8; func++) {
13280                 peer = pci_get_slot(tp->pdev->bus, devnr | func);
13281                 if (peer && peer != tp->pdev)
13282                         break;
13283                 pci_dev_put(peer);
13284         }
13285         /* 5704 can be configured in single-port mode, set peer to
13286          * tp->pdev in that case.
13287          */
13288         if (!peer) {
13289                 peer = tp->pdev;
13290                 return peer;
13291         }
13292
13293         /*
13294          * We don't need to keep the refcount elevated; there's no way
13295          * to remove one half of this device without removing the other
13296          */
13297         pci_dev_put(peer);
13298
13299         return peer;
13300 }
13301
13302 static void __devinit tg3_init_coal(struct tg3 *tp)
13303 {
13304         struct ethtool_coalesce *ec = &tp->coal;
13305
13306         memset(ec, 0, sizeof(*ec));
13307         ec->cmd = ETHTOOL_GCOALESCE;
13308         ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
13309         ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
13310         ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
13311         ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
13312         ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
13313         ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
13314         ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
13315         ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
13316         ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
13317
13318         if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
13319                                  HOSTCC_MODE_CLRTICK_TXBD)) {
13320                 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
13321                 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
13322                 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
13323                 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
13324         }
13325
13326         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
13327                 ec->rx_coalesce_usecs_irq = 0;
13328                 ec->tx_coalesce_usecs_irq = 0;
13329                 ec->stats_block_coalesce_usecs = 0;
13330         }
13331 }
13332
13333 static const struct net_device_ops tg3_netdev_ops = {
13334         .ndo_open               = tg3_open,
13335         .ndo_stop               = tg3_close,
13336         .ndo_start_xmit         = tg3_start_xmit,
13337         .ndo_get_stats          = tg3_get_stats,
13338         .ndo_validate_addr      = eth_validate_addr,
13339         .ndo_set_multicast_list = tg3_set_rx_mode,
13340         .ndo_set_mac_address    = tg3_set_mac_addr,
13341         .ndo_do_ioctl           = tg3_ioctl,
13342         .ndo_tx_timeout         = tg3_tx_timeout,
13343         .ndo_change_mtu         = tg3_change_mtu,
13344 #if TG3_VLAN_TAG_USED
13345         .ndo_vlan_rx_register   = tg3_vlan_rx_register,
13346 #endif
13347 #ifdef CONFIG_NET_POLL_CONTROLLER
13348         .ndo_poll_controller    = tg3_poll_controller,
13349 #endif
13350 };
13351
13352 static const struct net_device_ops tg3_netdev_ops_dma_bug = {
13353         .ndo_open               = tg3_open,
13354         .ndo_stop               = tg3_close,
13355         .ndo_start_xmit         = tg3_start_xmit_dma_bug,
13356         .ndo_get_stats          = tg3_get_stats,
13357         .ndo_validate_addr      = eth_validate_addr,
13358         .ndo_set_multicast_list = tg3_set_rx_mode,
13359         .ndo_set_mac_address    = tg3_set_mac_addr,
13360         .ndo_do_ioctl           = tg3_ioctl,
13361         .ndo_tx_timeout         = tg3_tx_timeout,
13362         .ndo_change_mtu         = tg3_change_mtu,
13363 #if TG3_VLAN_TAG_USED
13364         .ndo_vlan_rx_register   = tg3_vlan_rx_register,
13365 #endif
13366 #ifdef CONFIG_NET_POLL_CONTROLLER
13367         .ndo_poll_controller    = tg3_poll_controller,
13368 #endif
13369 };
13370
13371 static int __devinit tg3_init_one(struct pci_dev *pdev,
13372                                   const struct pci_device_id *ent)
13373 {
13374         static int tg3_version_printed = 0;
13375         resource_size_t tg3reg_len;
13376         struct net_device *dev;
13377         struct tg3 *tp;
13378         int err, pm_cap;
13379         char str[40];
13380         u64 dma_mask, persist_dma_mask;
13381
13382         if (tg3_version_printed++ == 0)
13383                 printk(KERN_INFO "%s", version);
13384
13385         err = pci_enable_device(pdev);
13386         if (err) {
13387                 printk(KERN_ERR PFX "Cannot enable PCI device, "
13388                        "aborting.\n");
13389                 return err;
13390         }
13391
13392         if (!(pci_resource_flags(pdev, BAR_0) & IORESOURCE_MEM)) {
13393                 printk(KERN_ERR PFX "Cannot find proper PCI device "
13394                        "base address, aborting.\n");
13395                 err = -ENODEV;
13396                 goto err_out_disable_pdev;
13397         }
13398
13399         err = pci_request_regions(pdev, DRV_MODULE_NAME);
13400         if (err) {
13401                 printk(KERN_ERR PFX "Cannot obtain PCI resources, "
13402                        "aborting.\n");
13403                 goto err_out_disable_pdev;
13404         }
13405
13406         pci_set_master(pdev);
13407
13408         /* Find power-management capability. */
13409         pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
13410         if (pm_cap == 0) {
13411                 printk(KERN_ERR PFX "Cannot find PowerManagement capability, "
13412                        "aborting.\n");
13413                 err = -EIO;
13414                 goto err_out_free_res;
13415         }
13416
13417         dev = alloc_etherdev(sizeof(*tp));
13418         if (!dev) {
13419                 printk(KERN_ERR PFX "Etherdev alloc failed, aborting.\n");
13420                 err = -ENOMEM;
13421                 goto err_out_free_res;
13422         }
13423
13424         SET_NETDEV_DEV(dev, &pdev->dev);
13425
13426 #if TG3_VLAN_TAG_USED
13427         dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
13428 #endif
13429
13430         tp = netdev_priv(dev);
13431         tp->pdev = pdev;
13432         tp->dev = dev;
13433         tp->pm_cap = pm_cap;
13434         tp->rx_mode = TG3_DEF_RX_MODE;
13435         tp->tx_mode = TG3_DEF_TX_MODE;
13436
13437         if (tg3_debug > 0)
13438                 tp->msg_enable = tg3_debug;
13439         else
13440                 tp->msg_enable = TG3_DEF_MSG_ENABLE;
13441
13442         /* The word/byte swap controls here control register access byte
13443          * swapping.  DMA data byte swapping is controlled in the GRC_MODE
13444          * setting below.
13445          */
13446         tp->misc_host_ctrl =
13447                 MISC_HOST_CTRL_MASK_PCI_INT |
13448                 MISC_HOST_CTRL_WORD_SWAP |
13449                 MISC_HOST_CTRL_INDIR_ACCESS |
13450                 MISC_HOST_CTRL_PCISTATE_RW;
13451
13452         /* The NONFRM (non-frame) byte/word swap controls take effect
13453          * on descriptor entries, anything which isn't packet data.
13454          *
13455          * The StrongARM chips on the board (one for tx, one for rx)
13456          * are running in big-endian mode.
13457          */
13458         tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
13459                         GRC_MODE_WSWAP_NONFRM_DATA);
13460 #ifdef __BIG_ENDIAN
13461         tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
13462 #endif
13463         spin_lock_init(&tp->lock);
13464         spin_lock_init(&tp->indirect_lock);
13465         INIT_WORK(&tp->reset_task, tg3_reset_task);
13466
13467         dev->mem_start = pci_resource_start(pdev, BAR_0);
13468         tg3reg_len = pci_resource_len(pdev, BAR_0);
13469         dev->mem_end = dev->mem_start + tg3reg_len;
13470
13471         tp->regs = ioremap_nocache(dev->mem_start, tg3reg_len);
13472         if (!tp->regs) {
13473                 printk(KERN_ERR PFX "Cannot map device registers, "
13474                        "aborting.\n");
13475                 err = -ENOMEM;
13476                 goto err_out_free_dev;
13477         }
13478
13479         tg3_init_link_config(tp);
13480
13481         tp->rx_pending = TG3_DEF_RX_RING_PENDING;
13482         tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
13483         tp->tx_pending = TG3_DEF_TX_RING_PENDING;
13484
13485         netif_napi_add(dev, &tp->napi, tg3_poll, 64);
13486         dev->ethtool_ops = &tg3_ethtool_ops;
13487         dev->watchdog_timeo = TG3_TX_TIMEOUT;
13488         dev->irq = pdev->irq;
13489
13490         err = tg3_get_invariants(tp);
13491         if (err) {
13492                 printk(KERN_ERR PFX "Problem fetching invariants of chip, "
13493                        "aborting.\n");
13494                 goto err_out_iounmap;
13495         }
13496
13497         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
13498             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
13499             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
13500             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
13501             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
13502             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
13503                 dev->netdev_ops = &tg3_netdev_ops;
13504         else
13505                 dev->netdev_ops = &tg3_netdev_ops_dma_bug;
13506
13507
13508         /* The EPB bridge inside 5714, 5715, and 5780 and any
13509          * device behind the EPB cannot support DMA addresses > 40-bit.
13510          * On 64-bit systems with IOMMU, use 40-bit dma_mask.
13511          * On 64-bit systems without IOMMU, use 64-bit dma_mask and
13512          * do DMA address check in tg3_start_xmit().
13513          */
13514         if (tp->tg3_flags2 & TG3_FLG2_IS_5788)
13515                 persist_dma_mask = dma_mask = DMA_32BIT_MASK;
13516         else if (tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG) {
13517                 persist_dma_mask = dma_mask = DMA_40BIT_MASK;
13518 #ifdef CONFIG_HIGHMEM
13519                 dma_mask = DMA_64BIT_MASK;
13520 #endif
13521         } else
13522                 persist_dma_mask = dma_mask = DMA_64BIT_MASK;
13523
13524         /* Configure DMA attributes. */
13525         if (dma_mask > DMA_32BIT_MASK) {
13526                 err = pci_set_dma_mask(pdev, dma_mask);
13527                 if (!err) {
13528                         dev->features |= NETIF_F_HIGHDMA;
13529                         err = pci_set_consistent_dma_mask(pdev,
13530                                                           persist_dma_mask);
13531                         if (err < 0) {
13532                                 printk(KERN_ERR PFX "Unable to obtain 64 bit "
13533                                        "DMA for consistent allocations\n");
13534                                 goto err_out_iounmap;
13535                         }
13536                 }
13537         }
13538         if (err || dma_mask == DMA_32BIT_MASK) {
13539                 err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
13540                 if (err) {
13541                         printk(KERN_ERR PFX "No usable DMA configuration, "
13542                                "aborting.\n");
13543                         goto err_out_iounmap;
13544                 }
13545         }
13546
13547         tg3_init_bufmgr_config(tp);
13548
13549         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
13550                 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
13551         }
13552         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
13553             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
13554             tp->pci_chip_rev_id == CHIPREV_ID_5705_A0 ||
13555             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 ||
13556             (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0) {
13557                 tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
13558         } else {
13559                 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE | TG3_FLG2_TSO_BUG;
13560         }
13561
13562         /* TSO is on by default on chips that support hardware TSO.
13563          * Firmware TSO on older chips gives lower performance, so it
13564          * is off by default, but can be enabled using ethtool.
13565          */
13566         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
13567                 dev->features |= NETIF_F_TSO;
13568                 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_2) &&
13569                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906))
13570                         dev->features |= NETIF_F_TSO6;
13571                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
13572                     (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
13573                      GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) ||
13574                         GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
13575                         dev->features |= NETIF_F_TSO_ECN;
13576         }
13577
13578
13579         if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 &&
13580             !(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) &&
13581             !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
13582                 tp->tg3_flags2 |= TG3_FLG2_MAX_RXPEND_64;
13583                 tp->rx_pending = 63;
13584         }
13585
13586         err = tg3_get_device_address(tp);
13587         if (err) {
13588                 printk(KERN_ERR PFX "Could not obtain valid ethernet address, "
13589                        "aborting.\n");
13590                 goto err_out_iounmap;
13591         }
13592
13593         if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
13594                 if (!(pci_resource_flags(pdev, BAR_2) & IORESOURCE_MEM)) {
13595                         printk(KERN_ERR PFX "Cannot find proper PCI device "
13596                                "base address for APE, aborting.\n");
13597                         err = -ENODEV;
13598                         goto err_out_iounmap;
13599                 }
13600
13601                 tp->aperegs = pci_ioremap_bar(pdev, BAR_2);
13602                 if (!tp->aperegs) {
13603                         printk(KERN_ERR PFX "Cannot map APE registers, "
13604                                "aborting.\n");
13605                         err = -ENOMEM;
13606                         goto err_out_iounmap;
13607                 }
13608
13609                 tg3_ape_lock_init(tp);
13610         }
13611
13612         /*
13613          * Reset chip in case UNDI or EFI driver did not shutdown
13614          * DMA self test will enable WDMAC and we'll see (spurious)
13615          * pending DMA on the PCI bus at that point.
13616          */
13617         if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
13618             (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
13619                 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
13620                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
13621         }
13622
13623         err = tg3_test_dma(tp);
13624         if (err) {
13625                 printk(KERN_ERR PFX "DMA engine test failed, aborting.\n");
13626                 goto err_out_apeunmap;
13627         }
13628
13629         /* Tigon3 can do ipv4 only... and some chips have buggy
13630          * checksumming.
13631          */
13632         if ((tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) == 0) {
13633                 dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
13634                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
13635                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
13636                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
13637                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
13638                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
13639                         dev->features |= NETIF_F_IPV6_CSUM;
13640
13641                 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
13642         } else
13643                 tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
13644
13645         /* flow control autonegotiation is default behavior */
13646         tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
13647         tp->link_config.flowctrl = TG3_FLOW_CTRL_TX | TG3_FLOW_CTRL_RX;
13648
13649         tg3_init_coal(tp);
13650
13651         pci_set_drvdata(pdev, dev);
13652
13653         err = register_netdev(dev);
13654         if (err) {
13655                 printk(KERN_ERR PFX "Cannot register net device, "
13656                        "aborting.\n");
13657                 goto err_out_apeunmap;
13658         }
13659
13660         printk(KERN_INFO "%s: Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n",
13661                dev->name,
13662                tp->board_part_number,
13663                tp->pci_chip_rev_id,
13664                tg3_bus_string(tp, str),
13665                dev->dev_addr);
13666
13667         if (tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED)
13668                 printk(KERN_INFO
13669                        "%s: attached PHY driver [%s] (mii_bus:phy_addr=%s)\n",
13670                        tp->dev->name,
13671                        tp->mdio_bus->phy_map[PHY_ADDR]->drv->name,
13672                        dev_name(&tp->mdio_bus->phy_map[PHY_ADDR]->dev));
13673         else
13674                 printk(KERN_INFO
13675                        "%s: attached PHY is %s (%s Ethernet) (WireSpeed[%d])\n",
13676                        tp->dev->name, tg3_phy_string(tp),
13677                        ((tp->tg3_flags & TG3_FLAG_10_100_ONLY) ? "10/100Base-TX" :
13678                         ((tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) ? "1000Base-SX" :
13679                          "10/100/1000Base-T")),
13680                        (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED) == 0);
13681
13682         printk(KERN_INFO "%s: RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n",
13683                dev->name,
13684                (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0,
13685                (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) != 0,
13686                (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) != 0,
13687                (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0,
13688                (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) != 0);
13689         printk(KERN_INFO "%s: dma_rwctrl[%08x] dma_mask[%d-bit]\n",
13690                dev->name, tp->dma_rwctrl,
13691                (pdev->dma_mask == DMA_32BIT_MASK) ? 32 :
13692                 (((u64) pdev->dma_mask == DMA_40BIT_MASK) ? 40 : 64));
13693
13694         return 0;
13695
13696 err_out_apeunmap:
13697         if (tp->aperegs) {
13698                 iounmap(tp->aperegs);
13699                 tp->aperegs = NULL;
13700         }
13701
13702 err_out_iounmap:
13703         if (tp->regs) {
13704                 iounmap(tp->regs);
13705                 tp->regs = NULL;
13706         }
13707
13708 err_out_free_dev:
13709         free_netdev(dev);
13710
13711 err_out_free_res:
13712         pci_release_regions(pdev);
13713
13714 err_out_disable_pdev:
13715         pci_disable_device(pdev);
13716         pci_set_drvdata(pdev, NULL);
13717         return err;
13718 }
13719
13720 static void __devexit tg3_remove_one(struct pci_dev *pdev)
13721 {
13722         struct net_device *dev = pci_get_drvdata(pdev);
13723
13724         if (dev) {
13725                 struct tg3 *tp = netdev_priv(dev);
13726
13727                 flush_scheduled_work();
13728
13729                 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
13730                         tg3_phy_fini(tp);
13731                         tg3_mdio_fini(tp);
13732                 }
13733
13734                 unregister_netdev(dev);
13735                 if (tp->aperegs) {
13736                         iounmap(tp->aperegs);
13737                         tp->aperegs = NULL;
13738                 }
13739                 if (tp->regs) {
13740                         iounmap(tp->regs);
13741                         tp->regs = NULL;
13742                 }
13743                 free_netdev(dev);
13744                 pci_release_regions(pdev);
13745                 pci_disable_device(pdev);
13746                 pci_set_drvdata(pdev, NULL);
13747         }
13748 }
13749
13750 static int tg3_suspend(struct pci_dev *pdev, pm_message_t state)
13751 {
13752         struct net_device *dev = pci_get_drvdata(pdev);
13753         struct tg3 *tp = netdev_priv(dev);
13754         pci_power_t target_state;
13755         int err;
13756
13757         /* PCI register 4 needs to be saved whether netif_running() or not.
13758          * MSI address and data need to be saved if using MSI and
13759          * netif_running().
13760          */
13761         pci_save_state(pdev);
13762
13763         if (!netif_running(dev))
13764                 return 0;
13765
13766         flush_scheduled_work();
13767         tg3_phy_stop(tp);
13768         tg3_netif_stop(tp);
13769
13770         del_timer_sync(&tp->timer);
13771
13772         tg3_full_lock(tp, 1);
13773         tg3_disable_ints(tp);
13774         tg3_full_unlock(tp);
13775
13776         netif_device_detach(dev);
13777
13778         tg3_full_lock(tp, 0);
13779         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
13780         tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
13781         tg3_full_unlock(tp);
13782
13783         target_state = pdev->pm_cap ? pci_target_state(pdev) : PCI_D3hot;
13784
13785         err = tg3_set_power_state(tp, target_state);
13786         if (err) {
13787                 int err2;
13788
13789                 tg3_full_lock(tp, 0);
13790
13791                 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
13792                 err2 = tg3_restart_hw(tp, 1);
13793                 if (err2)
13794                         goto out;
13795
13796                 tp->timer.expires = jiffies + tp->timer_offset;
13797                 add_timer(&tp->timer);
13798
13799                 netif_device_attach(dev);
13800                 tg3_netif_start(tp);
13801
13802 out:
13803                 tg3_full_unlock(tp);
13804
13805                 if (!err2)
13806                         tg3_phy_start(tp);
13807         }
13808
13809         return err;
13810 }
13811
13812 static int tg3_resume(struct pci_dev *pdev)
13813 {
13814         struct net_device *dev = pci_get_drvdata(pdev);
13815         struct tg3 *tp = netdev_priv(dev);
13816         int err;
13817
13818         pci_restore_state(tp->pdev);
13819
13820         if (!netif_running(dev))
13821                 return 0;
13822
13823         err = tg3_set_power_state(tp, PCI_D0);
13824         if (err)
13825                 return err;
13826
13827         netif_device_attach(dev);
13828
13829         tg3_full_lock(tp, 0);
13830
13831         tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
13832         err = tg3_restart_hw(tp, 1);
13833         if (err)
13834                 goto out;
13835
13836         tp->timer.expires = jiffies + tp->timer_offset;
13837         add_timer(&tp->timer);
13838
13839         tg3_netif_start(tp);
13840
13841 out:
13842         tg3_full_unlock(tp);
13843
13844         if (!err)
13845                 tg3_phy_start(tp);
13846
13847         return err;
13848 }
13849
13850 static struct pci_driver tg3_driver = {
13851         .name           = DRV_MODULE_NAME,
13852         .id_table       = tg3_pci_tbl,
13853         .probe          = tg3_init_one,
13854         .remove         = __devexit_p(tg3_remove_one),
13855         .suspend        = tg3_suspend,
13856         .resume         = tg3_resume
13857 };
13858
13859 static int __init tg3_init(void)
13860 {
13861         return pci_register_driver(&tg3_driver);
13862 }
13863
13864 static void __exit tg3_cleanup(void)
13865 {
13866         pci_unregister_driver(&tg3_driver);
13867 }
13868
13869 module_init(tg3_init);
13870 module_exit(tg3_cleanup);