defxx.c: dfx_bus_init() is __devexit not __devinit
[linux-2.6] / drivers / net / tg3.c
1 /*
2  * tg3.c: Broadcom Tigon3 ethernet driver.
3  *
4  * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5  * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6  * Copyright (C) 2004 Sun Microsystems Inc.
7  * Copyright (C) 2005-2007 Broadcom Corporation.
8  *
9  * Firmware is:
10  *      Derived from proprietary unpublished source code,
11  *      Copyright (C) 2000-2003 Broadcom Corporation.
12  *
13  *      Permission is hereby granted for the distribution of this firmware
14  *      data in hexadecimal or equivalent format, provided this copyright
15  *      notice is accompanying it.
16  */
17
18
19 #include <linux/module.h>
20 #include <linux/moduleparam.h>
21 #include <linux/kernel.h>
22 #include <linux/types.h>
23 #include <linux/compiler.h>
24 #include <linux/slab.h>
25 #include <linux/delay.h>
26 #include <linux/in.h>
27 #include <linux/init.h>
28 #include <linux/ioport.h>
29 #include <linux/pci.h>
30 #include <linux/netdevice.h>
31 #include <linux/etherdevice.h>
32 #include <linux/skbuff.h>
33 #include <linux/ethtool.h>
34 #include <linux/mii.h>
35 #include <linux/if_vlan.h>
36 #include <linux/ip.h>
37 #include <linux/tcp.h>
38 #include <linux/workqueue.h>
39 #include <linux/prefetch.h>
40 #include <linux/dma-mapping.h>
41
42 #include <net/checksum.h>
43 #include <net/ip.h>
44
45 #include <asm/system.h>
46 #include <asm/io.h>
47 #include <asm/byteorder.h>
48 #include <asm/uaccess.h>
49
50 #ifdef CONFIG_SPARC
51 #include <asm/idprom.h>
52 #include <asm/prom.h>
53 #endif
54
55 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
56 #define TG3_VLAN_TAG_USED 1
57 #else
58 #define TG3_VLAN_TAG_USED 0
59 #endif
60
61 #define TG3_TSO_SUPPORT 1
62
63 #include "tg3.h"
64
65 #define DRV_MODULE_NAME         "tg3"
66 #define PFX DRV_MODULE_NAME     ": "
67 #define DRV_MODULE_VERSION      "3.85"
68 #define DRV_MODULE_RELDATE      "October 18, 2007"
69
70 #define TG3_DEF_MAC_MODE        0
71 #define TG3_DEF_RX_MODE         0
72 #define TG3_DEF_TX_MODE         0
73 #define TG3_DEF_MSG_ENABLE        \
74         (NETIF_MSG_DRV          | \
75          NETIF_MSG_PROBE        | \
76          NETIF_MSG_LINK         | \
77          NETIF_MSG_TIMER        | \
78          NETIF_MSG_IFDOWN       | \
79          NETIF_MSG_IFUP         | \
80          NETIF_MSG_RX_ERR       | \
81          NETIF_MSG_TX_ERR)
82
83 /* length of time before we decide the hardware is borked,
84  * and dev->tx_timeout() should be called to fix the problem
85  */
86 #define TG3_TX_TIMEOUT                  (5 * HZ)
87
88 /* hardware minimum and maximum for a single frame's data payload */
89 #define TG3_MIN_MTU                     60
90 #define TG3_MAX_MTU(tp) \
91         ((tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) ? 9000 : 1500)
92
93 /* These numbers seem to be hard coded in the NIC firmware somehow.
94  * You can't change the ring sizes, but you can change where you place
95  * them in the NIC onboard memory.
96  */
97 #define TG3_RX_RING_SIZE                512
98 #define TG3_DEF_RX_RING_PENDING         200
99 #define TG3_RX_JUMBO_RING_SIZE          256
100 #define TG3_DEF_RX_JUMBO_RING_PENDING   100
101
102 /* Do not place this n-ring entries value into the tp struct itself,
103  * we really want to expose these constants to GCC so that modulo et
104  * al.  operations are done with shifts and masks instead of with
105  * hw multiply/modulo instructions.  Another solution would be to
106  * replace things like '% foo' with '& (foo - 1)'.
107  */
108 #define TG3_RX_RCB_RING_SIZE(tp)        \
109         ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ?  512 : 1024)
110
111 #define TG3_TX_RING_SIZE                512
112 #define TG3_DEF_TX_RING_PENDING         (TG3_TX_RING_SIZE - 1)
113
114 #define TG3_RX_RING_BYTES       (sizeof(struct tg3_rx_buffer_desc) * \
115                                  TG3_RX_RING_SIZE)
116 #define TG3_RX_JUMBO_RING_BYTES (sizeof(struct tg3_rx_buffer_desc) * \
117                                  TG3_RX_JUMBO_RING_SIZE)
118 #define TG3_RX_RCB_RING_BYTES(tp) (sizeof(struct tg3_rx_buffer_desc) * \
119                                    TG3_RX_RCB_RING_SIZE(tp))
120 #define TG3_TX_RING_BYTES       (sizeof(struct tg3_tx_buffer_desc) * \
121                                  TG3_TX_RING_SIZE)
122 #define NEXT_TX(N)              (((N) + 1) & (TG3_TX_RING_SIZE - 1))
123
124 #define RX_PKT_BUF_SZ           (1536 + tp->rx_offset + 64)
125 #define RX_JUMBO_PKT_BUF_SZ     (9046 + tp->rx_offset + 64)
126
127 /* minimum number of free TX descriptors required to wake up TX process */
128 #define TG3_TX_WAKEUP_THRESH(tp)                ((tp)->tx_pending / 4)
129
130 /* number of ETHTOOL_GSTATS u64's */
131 #define TG3_NUM_STATS           (sizeof(struct tg3_ethtool_stats)/sizeof(u64))
132
133 #define TG3_NUM_TEST            6
134
135 static char version[] __devinitdata =
136         DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
137
138 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
139 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
140 MODULE_LICENSE("GPL");
141 MODULE_VERSION(DRV_MODULE_VERSION);
142
143 static int tg3_debug = -1;      /* -1 == use TG3_DEF_MSG_ENABLE as value */
144 module_param(tg3_debug, int, 0);
145 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
146
147 static struct pci_device_id tg3_pci_tbl[] = {
148         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
149         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
150         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
151         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
152         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
153         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
154         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
155         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
156         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
157         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
158         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
159         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
160         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
161         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
162         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
163         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
164         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
165         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
166         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901)},
167         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2)},
168         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
169         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F)},
170         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5720)},
171         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
172         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
173         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750)},
174         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
175         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750M)},
176         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
177         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F)},
178         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
179         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
180         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
181         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
182         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F)},
183         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
184         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
185         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
186         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
187         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
188         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
189         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
190         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
191         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F)},
192         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
193         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
194         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
195         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
196         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
197         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
198         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
199         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
200         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
201         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
202         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
203         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
204         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
205         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
206         {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
207         {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
208         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
209         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
210         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
211         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
212         {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
213         {}
214 };
215
216 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
217
218 static const struct {
219         const char string[ETH_GSTRING_LEN];
220 } ethtool_stats_keys[TG3_NUM_STATS] = {
221         { "rx_octets" },
222         { "rx_fragments" },
223         { "rx_ucast_packets" },
224         { "rx_mcast_packets" },
225         { "rx_bcast_packets" },
226         { "rx_fcs_errors" },
227         { "rx_align_errors" },
228         { "rx_xon_pause_rcvd" },
229         { "rx_xoff_pause_rcvd" },
230         { "rx_mac_ctrl_rcvd" },
231         { "rx_xoff_entered" },
232         { "rx_frame_too_long_errors" },
233         { "rx_jabbers" },
234         { "rx_undersize_packets" },
235         { "rx_in_length_errors" },
236         { "rx_out_length_errors" },
237         { "rx_64_or_less_octet_packets" },
238         { "rx_65_to_127_octet_packets" },
239         { "rx_128_to_255_octet_packets" },
240         { "rx_256_to_511_octet_packets" },
241         { "rx_512_to_1023_octet_packets" },
242         { "rx_1024_to_1522_octet_packets" },
243         { "rx_1523_to_2047_octet_packets" },
244         { "rx_2048_to_4095_octet_packets" },
245         { "rx_4096_to_8191_octet_packets" },
246         { "rx_8192_to_9022_octet_packets" },
247
248         { "tx_octets" },
249         { "tx_collisions" },
250
251         { "tx_xon_sent" },
252         { "tx_xoff_sent" },
253         { "tx_flow_control" },
254         { "tx_mac_errors" },
255         { "tx_single_collisions" },
256         { "tx_mult_collisions" },
257         { "tx_deferred" },
258         { "tx_excessive_collisions" },
259         { "tx_late_collisions" },
260         { "tx_collide_2times" },
261         { "tx_collide_3times" },
262         { "tx_collide_4times" },
263         { "tx_collide_5times" },
264         { "tx_collide_6times" },
265         { "tx_collide_7times" },
266         { "tx_collide_8times" },
267         { "tx_collide_9times" },
268         { "tx_collide_10times" },
269         { "tx_collide_11times" },
270         { "tx_collide_12times" },
271         { "tx_collide_13times" },
272         { "tx_collide_14times" },
273         { "tx_collide_15times" },
274         { "tx_ucast_packets" },
275         { "tx_mcast_packets" },
276         { "tx_bcast_packets" },
277         { "tx_carrier_sense_errors" },
278         { "tx_discards" },
279         { "tx_errors" },
280
281         { "dma_writeq_full" },
282         { "dma_write_prioq_full" },
283         { "rxbds_empty" },
284         { "rx_discards" },
285         { "rx_errors" },
286         { "rx_threshold_hit" },
287
288         { "dma_readq_full" },
289         { "dma_read_prioq_full" },
290         { "tx_comp_queue_full" },
291
292         { "ring_set_send_prod_index" },
293         { "ring_status_update" },
294         { "nic_irqs" },
295         { "nic_avoided_irqs" },
296         { "nic_tx_threshold_hit" }
297 };
298
299 static const struct {
300         const char string[ETH_GSTRING_LEN];
301 } ethtool_test_keys[TG3_NUM_TEST] = {
302         { "nvram test     (online) " },
303         { "link test      (online) " },
304         { "register test  (offline)" },
305         { "memory test    (offline)" },
306         { "loopback test  (offline)" },
307         { "interrupt test (offline)" },
308 };
309
310 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
311 {
312         writel(val, tp->regs + off);
313 }
314
315 static u32 tg3_read32(struct tg3 *tp, u32 off)
316 {
317         return (readl(tp->regs + off));
318 }
319
320 static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
321 {
322         writel(val, tp->aperegs + off);
323 }
324
325 static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
326 {
327         return (readl(tp->aperegs + off));
328 }
329
330 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
331 {
332         unsigned long flags;
333
334         spin_lock_irqsave(&tp->indirect_lock, flags);
335         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
336         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
337         spin_unlock_irqrestore(&tp->indirect_lock, flags);
338 }
339
340 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
341 {
342         writel(val, tp->regs + off);
343         readl(tp->regs + off);
344 }
345
346 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
347 {
348         unsigned long flags;
349         u32 val;
350
351         spin_lock_irqsave(&tp->indirect_lock, flags);
352         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
353         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
354         spin_unlock_irqrestore(&tp->indirect_lock, flags);
355         return val;
356 }
357
358 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
359 {
360         unsigned long flags;
361
362         if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
363                 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
364                                        TG3_64BIT_REG_LOW, val);
365                 return;
366         }
367         if (off == (MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW)) {
368                 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
369                                        TG3_64BIT_REG_LOW, val);
370                 return;
371         }
372
373         spin_lock_irqsave(&tp->indirect_lock, flags);
374         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
375         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
376         spin_unlock_irqrestore(&tp->indirect_lock, flags);
377
378         /* In indirect mode when disabling interrupts, we also need
379          * to clear the interrupt bit in the GRC local ctrl register.
380          */
381         if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
382             (val == 0x1)) {
383                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
384                                        tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
385         }
386 }
387
388 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
389 {
390         unsigned long flags;
391         u32 val;
392
393         spin_lock_irqsave(&tp->indirect_lock, flags);
394         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
395         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
396         spin_unlock_irqrestore(&tp->indirect_lock, flags);
397         return val;
398 }
399
400 /* usec_wait specifies the wait time in usec when writing to certain registers
401  * where it is unsafe to read back the register without some delay.
402  * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
403  * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
404  */
405 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
406 {
407         if ((tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) ||
408             (tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND))
409                 /* Non-posted methods */
410                 tp->write32(tp, off, val);
411         else {
412                 /* Posted method */
413                 tg3_write32(tp, off, val);
414                 if (usec_wait)
415                         udelay(usec_wait);
416                 tp->read32(tp, off);
417         }
418         /* Wait again after the read for the posted method to guarantee that
419          * the wait time is met.
420          */
421         if (usec_wait)
422                 udelay(usec_wait);
423 }
424
425 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
426 {
427         tp->write32_mbox(tp, off, val);
428         if (!(tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) &&
429             !(tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND))
430                 tp->read32_mbox(tp, off);
431 }
432
433 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
434 {
435         void __iomem *mbox = tp->regs + off;
436         writel(val, mbox);
437         if (tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG)
438                 writel(val, mbox);
439         if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
440                 readl(mbox);
441 }
442
443 static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
444 {
445         return (readl(tp->regs + off + GRCMBOX_BASE));
446 }
447
448 static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
449 {
450         writel(val, tp->regs + off + GRCMBOX_BASE);
451 }
452
453 #define tw32_mailbox(reg, val)  tp->write32_mbox(tp, reg, val)
454 #define tw32_mailbox_f(reg, val)        tw32_mailbox_flush(tp, (reg), (val))
455 #define tw32_rx_mbox(reg, val)  tp->write32_rx_mbox(tp, reg, val)
456 #define tw32_tx_mbox(reg, val)  tp->write32_tx_mbox(tp, reg, val)
457 #define tr32_mailbox(reg)       tp->read32_mbox(tp, reg)
458
459 #define tw32(reg,val)           tp->write32(tp, reg, val)
460 #define tw32_f(reg,val)         _tw32_flush(tp,(reg),(val), 0)
461 #define tw32_wait_f(reg,val,us) _tw32_flush(tp,(reg),(val), (us))
462 #define tr32(reg)               tp->read32(tp, reg)
463
464 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
465 {
466         unsigned long flags;
467
468         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) &&
469             (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
470                 return;
471
472         spin_lock_irqsave(&tp->indirect_lock, flags);
473         if (tp->tg3_flags & TG3_FLAG_SRAM_USE_CONFIG) {
474                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
475                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
476
477                 /* Always leave this as zero. */
478                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
479         } else {
480                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
481                 tw32_f(TG3PCI_MEM_WIN_DATA, val);
482
483                 /* Always leave this as zero. */
484                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
485         }
486         spin_unlock_irqrestore(&tp->indirect_lock, flags);
487 }
488
489 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
490 {
491         unsigned long flags;
492
493         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) &&
494             (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
495                 *val = 0;
496                 return;
497         }
498
499         spin_lock_irqsave(&tp->indirect_lock, flags);
500         if (tp->tg3_flags & TG3_FLAG_SRAM_USE_CONFIG) {
501                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
502                 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
503
504                 /* Always leave this as zero. */
505                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
506         } else {
507                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
508                 *val = tr32(TG3PCI_MEM_WIN_DATA);
509
510                 /* Always leave this as zero. */
511                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
512         }
513         spin_unlock_irqrestore(&tp->indirect_lock, flags);
514 }
515
516 static void tg3_ape_lock_init(struct tg3 *tp)
517 {
518         int i;
519
520         /* Make sure the driver hasn't any stale locks. */
521         for (i = 0; i < 8; i++)
522                 tg3_ape_write32(tp, TG3_APE_LOCK_GRANT + 4 * i,
523                                 APE_LOCK_GRANT_DRIVER);
524 }
525
526 static int tg3_ape_lock(struct tg3 *tp, int locknum)
527 {
528         int i, off;
529         int ret = 0;
530         u32 status;
531
532         if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
533                 return 0;
534
535         switch (locknum) {
536                 case TG3_APE_LOCK_MEM:
537                         break;
538                 default:
539                         return -EINVAL;
540         }
541
542         off = 4 * locknum;
543
544         tg3_ape_write32(tp, TG3_APE_LOCK_REQ + off, APE_LOCK_REQ_DRIVER);
545
546         /* Wait for up to 1 millisecond to acquire lock. */
547         for (i = 0; i < 100; i++) {
548                 status = tg3_ape_read32(tp, TG3_APE_LOCK_GRANT + off);
549                 if (status == APE_LOCK_GRANT_DRIVER)
550                         break;
551                 udelay(10);
552         }
553
554         if (status != APE_LOCK_GRANT_DRIVER) {
555                 /* Revoke the lock request. */
556                 tg3_ape_write32(tp, TG3_APE_LOCK_GRANT + off,
557                                 APE_LOCK_GRANT_DRIVER);
558
559                 ret = -EBUSY;
560         }
561
562         return ret;
563 }
564
565 static void tg3_ape_unlock(struct tg3 *tp, int locknum)
566 {
567         int off;
568
569         if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
570                 return;
571
572         switch (locknum) {
573                 case TG3_APE_LOCK_MEM:
574                         break;
575                 default:
576                         return;
577         }
578
579         off = 4 * locknum;
580         tg3_ape_write32(tp, TG3_APE_LOCK_GRANT + off, APE_LOCK_GRANT_DRIVER);
581 }
582
583 static void tg3_disable_ints(struct tg3 *tp)
584 {
585         tw32(TG3PCI_MISC_HOST_CTRL,
586              (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
587         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
588 }
589
590 static inline void tg3_cond_int(struct tg3 *tp)
591 {
592         if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
593             (tp->hw_status->status & SD_STATUS_UPDATED))
594                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
595         else
596                 tw32(HOSTCC_MODE, tp->coalesce_mode |
597                      (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
598 }
599
600 static void tg3_enable_ints(struct tg3 *tp)
601 {
602         tp->irq_sync = 0;
603         wmb();
604
605         tw32(TG3PCI_MISC_HOST_CTRL,
606              (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
607         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
608                        (tp->last_tag << 24));
609         if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI)
610                 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
611                                (tp->last_tag << 24));
612         tg3_cond_int(tp);
613 }
614
615 static inline unsigned int tg3_has_work(struct tg3 *tp)
616 {
617         struct tg3_hw_status *sblk = tp->hw_status;
618         unsigned int work_exists = 0;
619
620         /* check for phy events */
621         if (!(tp->tg3_flags &
622               (TG3_FLAG_USE_LINKCHG_REG |
623                TG3_FLAG_POLL_SERDES))) {
624                 if (sblk->status & SD_STATUS_LINK_CHG)
625                         work_exists = 1;
626         }
627         /* check for RX/TX work to do */
628         if (sblk->idx[0].tx_consumer != tp->tx_cons ||
629             sblk->idx[0].rx_producer != tp->rx_rcb_ptr)
630                 work_exists = 1;
631
632         return work_exists;
633 }
634
635 /* tg3_restart_ints
636  *  similar to tg3_enable_ints, but it accurately determines whether there
637  *  is new work pending and can return without flushing the PIO write
638  *  which reenables interrupts
639  */
640 static void tg3_restart_ints(struct tg3 *tp)
641 {
642         tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
643                      tp->last_tag << 24);
644         mmiowb();
645
646         /* When doing tagged status, this work check is unnecessary.
647          * The last_tag we write above tells the chip which piece of
648          * work we've completed.
649          */
650         if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
651             tg3_has_work(tp))
652                 tw32(HOSTCC_MODE, tp->coalesce_mode |
653                      (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
654 }
655
656 static inline void tg3_netif_stop(struct tg3 *tp)
657 {
658         tp->dev->trans_start = jiffies; /* prevent tx timeout */
659         napi_disable(&tp->napi);
660         netif_tx_disable(tp->dev);
661 }
662
663 static inline void tg3_netif_start(struct tg3 *tp)
664 {
665         netif_wake_queue(tp->dev);
666         /* NOTE: unconditional netif_wake_queue is only appropriate
667          * so long as all callers are assured to have free tx slots
668          * (such as after tg3_init_hw)
669          */
670         napi_enable(&tp->napi);
671         tp->hw_status->status |= SD_STATUS_UPDATED;
672         tg3_enable_ints(tp);
673 }
674
675 static void tg3_switch_clocks(struct tg3 *tp)
676 {
677         u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
678         u32 orig_clock_ctrl;
679
680         if ((tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) ||
681             (tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
682                 return;
683
684         orig_clock_ctrl = clock_ctrl;
685         clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
686                        CLOCK_CTRL_CLKRUN_OENABLE |
687                        0x1f);
688         tp->pci_clock_ctrl = clock_ctrl;
689
690         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
691                 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
692                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
693                                     clock_ctrl | CLOCK_CTRL_625_CORE, 40);
694                 }
695         } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
696                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
697                             clock_ctrl |
698                             (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
699                             40);
700                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
701                             clock_ctrl | (CLOCK_CTRL_ALTCLK),
702                             40);
703         }
704         tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
705 }
706
707 #define PHY_BUSY_LOOPS  5000
708
709 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
710 {
711         u32 frame_val;
712         unsigned int loops;
713         int ret;
714
715         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
716                 tw32_f(MAC_MI_MODE,
717                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
718                 udelay(80);
719         }
720
721         *val = 0x0;
722
723         frame_val  = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
724                       MI_COM_PHY_ADDR_MASK);
725         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
726                       MI_COM_REG_ADDR_MASK);
727         frame_val |= (MI_COM_CMD_READ | MI_COM_START);
728
729         tw32_f(MAC_MI_COM, frame_val);
730
731         loops = PHY_BUSY_LOOPS;
732         while (loops != 0) {
733                 udelay(10);
734                 frame_val = tr32(MAC_MI_COM);
735
736                 if ((frame_val & MI_COM_BUSY) == 0) {
737                         udelay(5);
738                         frame_val = tr32(MAC_MI_COM);
739                         break;
740                 }
741                 loops -= 1;
742         }
743
744         ret = -EBUSY;
745         if (loops != 0) {
746                 *val = frame_val & MI_COM_DATA_MASK;
747                 ret = 0;
748         }
749
750         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
751                 tw32_f(MAC_MI_MODE, tp->mi_mode);
752                 udelay(80);
753         }
754
755         return ret;
756 }
757
758 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
759 {
760         u32 frame_val;
761         unsigned int loops;
762         int ret;
763
764         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 &&
765             (reg == MII_TG3_CTRL || reg == MII_TG3_AUX_CTRL))
766                 return 0;
767
768         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
769                 tw32_f(MAC_MI_MODE,
770                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
771                 udelay(80);
772         }
773
774         frame_val  = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
775                       MI_COM_PHY_ADDR_MASK);
776         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
777                       MI_COM_REG_ADDR_MASK);
778         frame_val |= (val & MI_COM_DATA_MASK);
779         frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
780
781         tw32_f(MAC_MI_COM, frame_val);
782
783         loops = PHY_BUSY_LOOPS;
784         while (loops != 0) {
785                 udelay(10);
786                 frame_val = tr32(MAC_MI_COM);
787                 if ((frame_val & MI_COM_BUSY) == 0) {
788                         udelay(5);
789                         frame_val = tr32(MAC_MI_COM);
790                         break;
791                 }
792                 loops -= 1;
793         }
794
795         ret = -EBUSY;
796         if (loops != 0)
797                 ret = 0;
798
799         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
800                 tw32_f(MAC_MI_MODE, tp->mi_mode);
801                 udelay(80);
802         }
803
804         return ret;
805 }
806
807 static void tg3_phy_toggle_automdix(struct tg3 *tp, int enable)
808 {
809         u32 phy;
810
811         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
812             (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES))
813                 return;
814
815         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
816                 u32 ephy;
817
818                 if (!tg3_readphy(tp, MII_TG3_EPHY_TEST, &ephy)) {
819                         tg3_writephy(tp, MII_TG3_EPHY_TEST,
820                                      ephy | MII_TG3_EPHY_SHADOW_EN);
821                         if (!tg3_readphy(tp, MII_TG3_EPHYTST_MISCCTRL, &phy)) {
822                                 if (enable)
823                                         phy |= MII_TG3_EPHYTST_MISCCTRL_MDIX;
824                                 else
825                                         phy &= ~MII_TG3_EPHYTST_MISCCTRL_MDIX;
826                                 tg3_writephy(tp, MII_TG3_EPHYTST_MISCCTRL, phy);
827                         }
828                         tg3_writephy(tp, MII_TG3_EPHY_TEST, ephy);
829                 }
830         } else {
831                 phy = MII_TG3_AUXCTL_MISC_RDSEL_MISC |
832                       MII_TG3_AUXCTL_SHDWSEL_MISC;
833                 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, phy) &&
834                     !tg3_readphy(tp, MII_TG3_AUX_CTRL, &phy)) {
835                         if (enable)
836                                 phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
837                         else
838                                 phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
839                         phy |= MII_TG3_AUXCTL_MISC_WREN;
840                         tg3_writephy(tp, MII_TG3_AUX_CTRL, phy);
841                 }
842         }
843 }
844
845 static void tg3_phy_set_wirespeed(struct tg3 *tp)
846 {
847         u32 val;
848
849         if (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED)
850                 return;
851
852         if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x7007) &&
853             !tg3_readphy(tp, MII_TG3_AUX_CTRL, &val))
854                 tg3_writephy(tp, MII_TG3_AUX_CTRL,
855                              (val | (1 << 15) | (1 << 4)));
856 }
857
858 static int tg3_bmcr_reset(struct tg3 *tp)
859 {
860         u32 phy_control;
861         int limit, err;
862
863         /* OK, reset it, and poll the BMCR_RESET bit until it
864          * clears or we time out.
865          */
866         phy_control = BMCR_RESET;
867         err = tg3_writephy(tp, MII_BMCR, phy_control);
868         if (err != 0)
869                 return -EBUSY;
870
871         limit = 5000;
872         while (limit--) {
873                 err = tg3_readphy(tp, MII_BMCR, &phy_control);
874                 if (err != 0)
875                         return -EBUSY;
876
877                 if ((phy_control & BMCR_RESET) == 0) {
878                         udelay(40);
879                         break;
880                 }
881                 udelay(10);
882         }
883         if (limit <= 0)
884                 return -EBUSY;
885
886         return 0;
887 }
888
889 static int tg3_wait_macro_done(struct tg3 *tp)
890 {
891         int limit = 100;
892
893         while (limit--) {
894                 u32 tmp32;
895
896                 if (!tg3_readphy(tp, 0x16, &tmp32)) {
897                         if ((tmp32 & 0x1000) == 0)
898                                 break;
899                 }
900         }
901         if (limit <= 0)
902                 return -EBUSY;
903
904         return 0;
905 }
906
907 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
908 {
909         static const u32 test_pat[4][6] = {
910         { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
911         { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
912         { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
913         { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
914         };
915         int chan;
916
917         for (chan = 0; chan < 4; chan++) {
918                 int i;
919
920                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
921                              (chan * 0x2000) | 0x0200);
922                 tg3_writephy(tp, 0x16, 0x0002);
923
924                 for (i = 0; i < 6; i++)
925                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
926                                      test_pat[chan][i]);
927
928                 tg3_writephy(tp, 0x16, 0x0202);
929                 if (tg3_wait_macro_done(tp)) {
930                         *resetp = 1;
931                         return -EBUSY;
932                 }
933
934                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
935                              (chan * 0x2000) | 0x0200);
936                 tg3_writephy(tp, 0x16, 0x0082);
937                 if (tg3_wait_macro_done(tp)) {
938                         *resetp = 1;
939                         return -EBUSY;
940                 }
941
942                 tg3_writephy(tp, 0x16, 0x0802);
943                 if (tg3_wait_macro_done(tp)) {
944                         *resetp = 1;
945                         return -EBUSY;
946                 }
947
948                 for (i = 0; i < 6; i += 2) {
949                         u32 low, high;
950
951                         if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
952                             tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
953                             tg3_wait_macro_done(tp)) {
954                                 *resetp = 1;
955                                 return -EBUSY;
956                         }
957                         low &= 0x7fff;
958                         high &= 0x000f;
959                         if (low != test_pat[chan][i] ||
960                             high != test_pat[chan][i+1]) {
961                                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
962                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
963                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
964
965                                 return -EBUSY;
966                         }
967                 }
968         }
969
970         return 0;
971 }
972
973 static int tg3_phy_reset_chanpat(struct tg3 *tp)
974 {
975         int chan;
976
977         for (chan = 0; chan < 4; chan++) {
978                 int i;
979
980                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
981                              (chan * 0x2000) | 0x0200);
982                 tg3_writephy(tp, 0x16, 0x0002);
983                 for (i = 0; i < 6; i++)
984                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
985                 tg3_writephy(tp, 0x16, 0x0202);
986                 if (tg3_wait_macro_done(tp))
987                         return -EBUSY;
988         }
989
990         return 0;
991 }
992
993 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
994 {
995         u32 reg32, phy9_orig;
996         int retries, do_phy_reset, err;
997
998         retries = 10;
999         do_phy_reset = 1;
1000         do {
1001                 if (do_phy_reset) {
1002                         err = tg3_bmcr_reset(tp);
1003                         if (err)
1004                                 return err;
1005                         do_phy_reset = 0;
1006                 }
1007
1008                 /* Disable transmitter and interrupt.  */
1009                 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
1010                         continue;
1011
1012                 reg32 |= 0x3000;
1013                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
1014
1015                 /* Set full-duplex, 1000 mbps.  */
1016                 tg3_writephy(tp, MII_BMCR,
1017                              BMCR_FULLDPLX | TG3_BMCR_SPEED1000);
1018
1019                 /* Set to master mode.  */
1020                 if (tg3_readphy(tp, MII_TG3_CTRL, &phy9_orig))
1021                         continue;
1022
1023                 tg3_writephy(tp, MII_TG3_CTRL,
1024                              (MII_TG3_CTRL_AS_MASTER |
1025                               MII_TG3_CTRL_ENABLE_AS_MASTER));
1026
1027                 /* Enable SM_DSP_CLOCK and 6dB.  */
1028                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1029
1030                 /* Block the PHY control access.  */
1031                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
1032                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0800);
1033
1034                 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
1035                 if (!err)
1036                         break;
1037         } while (--retries);
1038
1039         err = tg3_phy_reset_chanpat(tp);
1040         if (err)
1041                 return err;
1042
1043         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
1044         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0000);
1045
1046         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
1047         tg3_writephy(tp, 0x16, 0x0000);
1048
1049         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
1050             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
1051                 /* Set Extended packet length bit for jumbo frames */
1052                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4400);
1053         }
1054         else {
1055                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1056         }
1057
1058         tg3_writephy(tp, MII_TG3_CTRL, phy9_orig);
1059
1060         if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32)) {
1061                 reg32 &= ~0x3000;
1062                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
1063         } else if (!err)
1064                 err = -EBUSY;
1065
1066         return err;
1067 }
1068
1069 static void tg3_link_report(struct tg3 *);
1070
1071 /* This will reset the tigon3 PHY if there is no valid
1072  * link unless the FORCE argument is non-zero.
1073  */
1074 static int tg3_phy_reset(struct tg3 *tp)
1075 {
1076         u32 phy_status;
1077         int err;
1078
1079         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1080                 u32 val;
1081
1082                 val = tr32(GRC_MISC_CFG);
1083                 tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
1084                 udelay(40);
1085         }
1086         err  = tg3_readphy(tp, MII_BMSR, &phy_status);
1087         err |= tg3_readphy(tp, MII_BMSR, &phy_status);
1088         if (err != 0)
1089                 return -EBUSY;
1090
1091         if (netif_running(tp->dev) && netif_carrier_ok(tp->dev)) {
1092                 netif_carrier_off(tp->dev);
1093                 tg3_link_report(tp);
1094         }
1095
1096         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
1097             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
1098             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
1099                 err = tg3_phy_reset_5703_4_5(tp);
1100                 if (err)
1101                         return err;
1102                 goto out;
1103         }
1104
1105         err = tg3_bmcr_reset(tp);
1106         if (err)
1107                 return err;
1108
1109 out:
1110         if (tp->tg3_flags2 & TG3_FLG2_PHY_ADC_BUG) {
1111                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1112                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1113                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x2aaa);
1114                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1115                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0323);
1116                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1117         }
1118         if (tp->tg3_flags2 & TG3_FLG2_PHY_5704_A0_BUG) {
1119                 tg3_writephy(tp, 0x1c, 0x8d68);
1120                 tg3_writephy(tp, 0x1c, 0x8d68);
1121         }
1122         if (tp->tg3_flags2 & TG3_FLG2_PHY_BER_BUG) {
1123                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1124                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1125                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x310b);
1126                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1127                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x9506);
1128                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x401f);
1129                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x14e2);
1130                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1131         }
1132         else if (tp->tg3_flags2 & TG3_FLG2_PHY_JITTER_BUG) {
1133                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1134                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1135                 if (tp->tg3_flags2 & TG3_FLG2_PHY_ADJUST_TRIM) {
1136                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
1137                         tg3_writephy(tp, MII_TG3_TEST1,
1138                                      MII_TG3_TEST1_TRIM_EN | 0x4);
1139                 } else
1140                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
1141                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1142         }
1143         /* Set Extended packet length bit (bit 14) on all chips that */
1144         /* support jumbo frames */
1145         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
1146                 /* Cannot do read-modify-write on 5401 */
1147                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
1148         } else if (tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) {
1149                 u32 phy_reg;
1150
1151                 /* Set bit 14 with read-modify-write to preserve other bits */
1152                 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0007) &&
1153                     !tg3_readphy(tp, MII_TG3_AUX_CTRL, &phy_reg))
1154                         tg3_writephy(tp, MII_TG3_AUX_CTRL, phy_reg | 0x4000);
1155         }
1156
1157         /* Set phy register 0x10 bit 0 to high fifo elasticity to support
1158          * jumbo frames transmission.
1159          */
1160         if (tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) {
1161                 u32 phy_reg;
1162
1163                 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &phy_reg))
1164                     tg3_writephy(tp, MII_TG3_EXT_CTRL,
1165                                  phy_reg | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
1166         }
1167
1168         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1169                 /* adjust output voltage */
1170                 tg3_writephy(tp, MII_TG3_EPHY_PTEST, 0x12);
1171         }
1172
1173         tg3_phy_toggle_automdix(tp, 1);
1174         tg3_phy_set_wirespeed(tp);
1175         return 0;
1176 }
1177
1178 static void tg3_frob_aux_power(struct tg3 *tp)
1179 {
1180         struct tg3 *tp_peer = tp;
1181
1182         if ((tp->tg3_flags2 & TG3_FLG2_IS_NIC) == 0)
1183                 return;
1184
1185         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
1186             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)) {
1187                 struct net_device *dev_peer;
1188
1189                 dev_peer = pci_get_drvdata(tp->pdev_peer);
1190                 /* remove_one() may have been run on the peer. */
1191                 if (!dev_peer)
1192                         tp_peer = tp;
1193                 else
1194                         tp_peer = netdev_priv(dev_peer);
1195         }
1196
1197         if ((tp->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
1198             (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0 ||
1199             (tp_peer->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
1200             (tp_peer->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0) {
1201                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1202                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1203                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1204                                     (GRC_LCLCTRL_GPIO_OE0 |
1205                                      GRC_LCLCTRL_GPIO_OE1 |
1206                                      GRC_LCLCTRL_GPIO_OE2 |
1207                                      GRC_LCLCTRL_GPIO_OUTPUT0 |
1208                                      GRC_LCLCTRL_GPIO_OUTPUT1),
1209                                     100);
1210                 } else {
1211                         u32 no_gpio2;
1212                         u32 grc_local_ctrl = 0;
1213
1214                         if (tp_peer != tp &&
1215                             (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
1216                                 return;
1217
1218                         /* Workaround to prevent overdrawing Amps. */
1219                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
1220                             ASIC_REV_5714) {
1221                                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
1222                                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1223                                             grc_local_ctrl, 100);
1224                         }
1225
1226                         /* On 5753 and variants, GPIO2 cannot be used. */
1227                         no_gpio2 = tp->nic_sram_data_cfg &
1228                                     NIC_SRAM_DATA_CFG_NO_GPIO2;
1229
1230                         grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
1231                                          GRC_LCLCTRL_GPIO_OE1 |
1232                                          GRC_LCLCTRL_GPIO_OE2 |
1233                                          GRC_LCLCTRL_GPIO_OUTPUT1 |
1234                                          GRC_LCLCTRL_GPIO_OUTPUT2;
1235                         if (no_gpio2) {
1236                                 grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
1237                                                     GRC_LCLCTRL_GPIO_OUTPUT2);
1238                         }
1239                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1240                                                     grc_local_ctrl, 100);
1241
1242                         grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
1243
1244                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1245                                                     grc_local_ctrl, 100);
1246
1247                         if (!no_gpio2) {
1248                                 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
1249                                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1250                                             grc_local_ctrl, 100);
1251                         }
1252                 }
1253         } else {
1254                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
1255                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
1256                         if (tp_peer != tp &&
1257                             (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
1258                                 return;
1259
1260                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1261                                     (GRC_LCLCTRL_GPIO_OE1 |
1262                                      GRC_LCLCTRL_GPIO_OUTPUT1), 100);
1263
1264                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1265                                     GRC_LCLCTRL_GPIO_OE1, 100);
1266
1267                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1268                                     (GRC_LCLCTRL_GPIO_OE1 |
1269                                      GRC_LCLCTRL_GPIO_OUTPUT1), 100);
1270                 }
1271         }
1272 }
1273
1274 static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
1275 {
1276         if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
1277                 return 1;
1278         else if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411) {
1279                 if (speed != SPEED_10)
1280                         return 1;
1281         } else if (speed == SPEED_10)
1282                 return 1;
1283
1284         return 0;
1285 }
1286
1287 static int tg3_setup_phy(struct tg3 *, int);
1288
1289 #define RESET_KIND_SHUTDOWN     0
1290 #define RESET_KIND_INIT         1
1291 #define RESET_KIND_SUSPEND      2
1292
1293 static void tg3_write_sig_post_reset(struct tg3 *, int);
1294 static int tg3_halt_cpu(struct tg3 *, u32);
1295 static int tg3_nvram_lock(struct tg3 *);
1296 static void tg3_nvram_unlock(struct tg3 *);
1297
1298 static void tg3_power_down_phy(struct tg3 *tp)
1299 {
1300         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
1301                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
1302                         u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
1303                         u32 serdes_cfg = tr32(MAC_SERDES_CFG);
1304
1305                         sg_dig_ctrl |=
1306                                 SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
1307                         tw32(SG_DIG_CTRL, sg_dig_ctrl);
1308                         tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
1309                 }
1310                 return;
1311         }
1312
1313         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1314                 u32 val;
1315
1316                 tg3_bmcr_reset(tp);
1317                 val = tr32(GRC_MISC_CFG);
1318                 tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
1319                 udelay(40);
1320                 return;
1321         } else {
1322                 tg3_writephy(tp, MII_TG3_EXT_CTRL,
1323                              MII_TG3_EXT_CTRL_FORCE_LED_OFF);
1324                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x01b2);
1325         }
1326
1327         /* The PHY should not be powered down on some chips because
1328          * of bugs.
1329          */
1330         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1331             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
1332             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 &&
1333              (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)))
1334                 return;
1335         tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
1336 }
1337
1338 static int tg3_set_power_state(struct tg3 *tp, pci_power_t state)
1339 {
1340         u32 misc_host_ctrl;
1341         u16 power_control, power_caps;
1342         int pm = tp->pm_cap;
1343
1344         /* Make sure register accesses (indirect or otherwise)
1345          * will function correctly.
1346          */
1347         pci_write_config_dword(tp->pdev,
1348                                TG3PCI_MISC_HOST_CTRL,
1349                                tp->misc_host_ctrl);
1350
1351         pci_read_config_word(tp->pdev,
1352                              pm + PCI_PM_CTRL,
1353                              &power_control);
1354         power_control |= PCI_PM_CTRL_PME_STATUS;
1355         power_control &= ~(PCI_PM_CTRL_STATE_MASK);
1356         switch (state) {
1357         case PCI_D0:
1358                 power_control |= 0;
1359                 pci_write_config_word(tp->pdev,
1360                                       pm + PCI_PM_CTRL,
1361                                       power_control);
1362                 udelay(100);    /* Delay after power state change */
1363
1364                 /* Switch out of Vaux if it is a NIC */
1365                 if (tp->tg3_flags2 & TG3_FLG2_IS_NIC)
1366                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl, 100);
1367
1368                 return 0;
1369
1370         case PCI_D1:
1371                 power_control |= 1;
1372                 break;
1373
1374         case PCI_D2:
1375                 power_control |= 2;
1376                 break;
1377
1378         case PCI_D3hot:
1379                 power_control |= 3;
1380                 break;
1381
1382         default:
1383                 printk(KERN_WARNING PFX "%s: Invalid power state (%d) "
1384                        "requested.\n",
1385                        tp->dev->name, state);
1386                 return -EINVAL;
1387         };
1388
1389         power_control |= PCI_PM_CTRL_PME_ENABLE;
1390
1391         misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
1392         tw32(TG3PCI_MISC_HOST_CTRL,
1393              misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
1394
1395         if (tp->link_config.phy_is_low_power == 0) {
1396                 tp->link_config.phy_is_low_power = 1;
1397                 tp->link_config.orig_speed = tp->link_config.speed;
1398                 tp->link_config.orig_duplex = tp->link_config.duplex;
1399                 tp->link_config.orig_autoneg = tp->link_config.autoneg;
1400         }
1401
1402         if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)) {
1403                 tp->link_config.speed = SPEED_10;
1404                 tp->link_config.duplex = DUPLEX_HALF;
1405                 tp->link_config.autoneg = AUTONEG_ENABLE;
1406                 tg3_setup_phy(tp, 0);
1407         }
1408
1409         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1410                 u32 val;
1411
1412                 val = tr32(GRC_VCPU_EXT_CTRL);
1413                 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
1414         } else if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
1415                 int i;
1416                 u32 val;
1417
1418                 for (i = 0; i < 200; i++) {
1419                         tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
1420                         if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
1421                                 break;
1422                         msleep(1);
1423                 }
1424         }
1425         if (tp->tg3_flags & TG3_FLAG_WOL_CAP)
1426                 tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
1427                                                      WOL_DRV_STATE_SHUTDOWN |
1428                                                      WOL_DRV_WOL |
1429                                                      WOL_SET_MAGIC_PKT);
1430
1431         pci_read_config_word(tp->pdev, pm + PCI_PM_PMC, &power_caps);
1432
1433         if (tp->tg3_flags & TG3_FLAG_WOL_ENABLE) {
1434                 u32 mac_mode;
1435
1436                 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
1437                         tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x5a);
1438                         udelay(40);
1439
1440                         if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
1441                                 mac_mode = MAC_MODE_PORT_MODE_GMII;
1442                         else
1443                                 mac_mode = MAC_MODE_PORT_MODE_MII;
1444
1445                         mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
1446                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
1447                             ASIC_REV_5700) {
1448                                 u32 speed = (tp->tg3_flags &
1449                                              TG3_FLAG_WOL_SPEED_100MB) ?
1450                                              SPEED_100 : SPEED_10;
1451                                 if (tg3_5700_link_polarity(tp, speed))
1452                                         mac_mode |= MAC_MODE_LINK_POLARITY;
1453                                 else
1454                                         mac_mode &= ~MAC_MODE_LINK_POLARITY;
1455                         }
1456                 } else {
1457                         mac_mode = MAC_MODE_PORT_MODE_TBI;
1458                 }
1459
1460                 if (!(tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
1461                         tw32(MAC_LED_CTRL, tp->led_ctrl);
1462
1463                 if (((power_caps & PCI_PM_CAP_PME_D3cold) &&
1464                      (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)))
1465                         mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
1466
1467                 tw32_f(MAC_MODE, mac_mode);
1468                 udelay(100);
1469
1470                 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
1471                 udelay(10);
1472         }
1473
1474         if (!(tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB) &&
1475             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1476              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
1477                 u32 base_val;
1478
1479                 base_val = tp->pci_clock_ctrl;
1480                 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
1481                              CLOCK_CTRL_TXCLK_DISABLE);
1482
1483                 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
1484                             CLOCK_CTRL_PWRDOWN_PLL133, 40);
1485         } else if ((tp->tg3_flags2 & TG3_FLG2_5780_CLASS) ||
1486                    (tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) ||
1487                    (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)) {
1488                 /* do nothing */
1489         } else if (!((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
1490                      (tp->tg3_flags & TG3_FLAG_ENABLE_ASF))) {
1491                 u32 newbits1, newbits2;
1492
1493                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1494                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1495                         newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
1496                                     CLOCK_CTRL_TXCLK_DISABLE |
1497                                     CLOCK_CTRL_ALTCLK);
1498                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
1499                 } else if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
1500                         newbits1 = CLOCK_CTRL_625_CORE;
1501                         newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
1502                 } else {
1503                         newbits1 = CLOCK_CTRL_ALTCLK;
1504                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
1505                 }
1506
1507                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
1508                             40);
1509
1510                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
1511                             40);
1512
1513                 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
1514                         u32 newbits3;
1515
1516                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1517                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1518                                 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
1519                                             CLOCK_CTRL_TXCLK_DISABLE |
1520                                             CLOCK_CTRL_44MHZ_CORE);
1521                         } else {
1522                                 newbits3 = CLOCK_CTRL_44MHZ_CORE;
1523                         }
1524
1525                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
1526                                     tp->pci_clock_ctrl | newbits3, 40);
1527                 }
1528         }
1529
1530         if (!(tp->tg3_flags & TG3_FLAG_WOL_ENABLE) &&
1531             !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF) &&
1532             !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
1533                 tg3_power_down_phy(tp);
1534
1535         tg3_frob_aux_power(tp);
1536
1537         /* Workaround for unstable PLL clock */
1538         if ((GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX) ||
1539             (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX)) {
1540                 u32 val = tr32(0x7d00);
1541
1542                 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
1543                 tw32(0x7d00, val);
1544                 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
1545                         int err;
1546
1547                         err = tg3_nvram_lock(tp);
1548                         tg3_halt_cpu(tp, RX_CPU_BASE);
1549                         if (!err)
1550                                 tg3_nvram_unlock(tp);
1551                 }
1552         }
1553
1554         tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
1555
1556         /* Finally, set the new power state. */
1557         pci_write_config_word(tp->pdev, pm + PCI_PM_CTRL, power_control);
1558         udelay(100);    /* Delay after power state change */
1559
1560         return 0;
1561 }
1562
1563 static void tg3_link_report(struct tg3 *tp)
1564 {
1565         if (!netif_carrier_ok(tp->dev)) {
1566                 if (netif_msg_link(tp))
1567                         printk(KERN_INFO PFX "%s: Link is down.\n",
1568                                tp->dev->name);
1569         } else if (netif_msg_link(tp)) {
1570                 printk(KERN_INFO PFX "%s: Link is up at %d Mbps, %s duplex.\n",
1571                        tp->dev->name,
1572                        (tp->link_config.active_speed == SPEED_1000 ?
1573                         1000 :
1574                         (tp->link_config.active_speed == SPEED_100 ?
1575                          100 : 10)),
1576                        (tp->link_config.active_duplex == DUPLEX_FULL ?
1577                         "full" : "half"));
1578
1579                 printk(KERN_INFO PFX "%s: Flow control is %s for TX and "
1580                        "%s for RX.\n",
1581                        tp->dev->name,
1582                        (tp->tg3_flags & TG3_FLAG_TX_PAUSE) ? "on" : "off",
1583                        (tp->tg3_flags & TG3_FLAG_RX_PAUSE) ? "on" : "off");
1584         }
1585 }
1586
1587 static void tg3_setup_flow_control(struct tg3 *tp, u32 local_adv, u32 remote_adv)
1588 {
1589         u32 new_tg3_flags = 0;
1590         u32 old_rx_mode = tp->rx_mode;
1591         u32 old_tx_mode = tp->tx_mode;
1592
1593         if (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG) {
1594
1595                 /* Convert 1000BaseX flow control bits to 1000BaseT
1596                  * bits before resolving flow control.
1597                  */
1598                 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
1599                         local_adv &= ~(ADVERTISE_PAUSE_CAP |
1600                                        ADVERTISE_PAUSE_ASYM);
1601                         remote_adv &= ~(LPA_PAUSE_CAP | LPA_PAUSE_ASYM);
1602
1603                         if (local_adv & ADVERTISE_1000XPAUSE)
1604                                 local_adv |= ADVERTISE_PAUSE_CAP;
1605                         if (local_adv & ADVERTISE_1000XPSE_ASYM)
1606                                 local_adv |= ADVERTISE_PAUSE_ASYM;
1607                         if (remote_adv & LPA_1000XPAUSE)
1608                                 remote_adv |= LPA_PAUSE_CAP;
1609                         if (remote_adv & LPA_1000XPAUSE_ASYM)
1610                                 remote_adv |= LPA_PAUSE_ASYM;
1611                 }
1612
1613                 if (local_adv & ADVERTISE_PAUSE_CAP) {
1614                         if (local_adv & ADVERTISE_PAUSE_ASYM) {
1615                                 if (remote_adv & LPA_PAUSE_CAP)
1616                                         new_tg3_flags |=
1617                                                 (TG3_FLAG_RX_PAUSE |
1618                                                 TG3_FLAG_TX_PAUSE);
1619                                 else if (remote_adv & LPA_PAUSE_ASYM)
1620                                         new_tg3_flags |=
1621                                                 (TG3_FLAG_RX_PAUSE);
1622                         } else {
1623                                 if (remote_adv & LPA_PAUSE_CAP)
1624                                         new_tg3_flags |=
1625                                                 (TG3_FLAG_RX_PAUSE |
1626                                                 TG3_FLAG_TX_PAUSE);
1627                         }
1628                 } else if (local_adv & ADVERTISE_PAUSE_ASYM) {
1629                         if ((remote_adv & LPA_PAUSE_CAP) &&
1630                         (remote_adv & LPA_PAUSE_ASYM))
1631                                 new_tg3_flags |= TG3_FLAG_TX_PAUSE;
1632                 }
1633
1634                 tp->tg3_flags &= ~(TG3_FLAG_RX_PAUSE | TG3_FLAG_TX_PAUSE);
1635                 tp->tg3_flags |= new_tg3_flags;
1636         } else {
1637                 new_tg3_flags = tp->tg3_flags;
1638         }
1639
1640         if (new_tg3_flags & TG3_FLAG_RX_PAUSE)
1641                 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1642         else
1643                 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1644
1645         if (old_rx_mode != tp->rx_mode) {
1646                 tw32_f(MAC_RX_MODE, tp->rx_mode);
1647         }
1648
1649         if (new_tg3_flags & TG3_FLAG_TX_PAUSE)
1650                 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1651         else
1652                 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1653
1654         if (old_tx_mode != tp->tx_mode) {
1655                 tw32_f(MAC_TX_MODE, tp->tx_mode);
1656         }
1657 }
1658
1659 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
1660 {
1661         switch (val & MII_TG3_AUX_STAT_SPDMASK) {
1662         case MII_TG3_AUX_STAT_10HALF:
1663                 *speed = SPEED_10;
1664                 *duplex = DUPLEX_HALF;
1665                 break;
1666
1667         case MII_TG3_AUX_STAT_10FULL:
1668                 *speed = SPEED_10;
1669                 *duplex = DUPLEX_FULL;
1670                 break;
1671
1672         case MII_TG3_AUX_STAT_100HALF:
1673                 *speed = SPEED_100;
1674                 *duplex = DUPLEX_HALF;
1675                 break;
1676
1677         case MII_TG3_AUX_STAT_100FULL:
1678                 *speed = SPEED_100;
1679                 *duplex = DUPLEX_FULL;
1680                 break;
1681
1682         case MII_TG3_AUX_STAT_1000HALF:
1683                 *speed = SPEED_1000;
1684                 *duplex = DUPLEX_HALF;
1685                 break;
1686
1687         case MII_TG3_AUX_STAT_1000FULL:
1688                 *speed = SPEED_1000;
1689                 *duplex = DUPLEX_FULL;
1690                 break;
1691
1692         default:
1693                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1694                         *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
1695                                  SPEED_10;
1696                         *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
1697                                   DUPLEX_HALF;
1698                         break;
1699                 }
1700                 *speed = SPEED_INVALID;
1701                 *duplex = DUPLEX_INVALID;
1702                 break;
1703         };
1704 }
1705
1706 static void tg3_phy_copper_begin(struct tg3 *tp)
1707 {
1708         u32 new_adv;
1709         int i;
1710
1711         if (tp->link_config.phy_is_low_power) {
1712                 /* Entering low power mode.  Disable gigabit and
1713                  * 100baseT advertisements.
1714                  */
1715                 tg3_writephy(tp, MII_TG3_CTRL, 0);
1716
1717                 new_adv = (ADVERTISE_10HALF | ADVERTISE_10FULL |
1718                            ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
1719                 if (tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB)
1720                         new_adv |= (ADVERTISE_100HALF | ADVERTISE_100FULL);
1721
1722                 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1723         } else if (tp->link_config.speed == SPEED_INVALID) {
1724                 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
1725                         tp->link_config.advertising &=
1726                                 ~(ADVERTISED_1000baseT_Half |
1727                                   ADVERTISED_1000baseT_Full);
1728
1729                 new_adv = (ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
1730                 if (tp->link_config.advertising & ADVERTISED_10baseT_Half)
1731                         new_adv |= ADVERTISE_10HALF;
1732                 if (tp->link_config.advertising & ADVERTISED_10baseT_Full)
1733                         new_adv |= ADVERTISE_10FULL;
1734                 if (tp->link_config.advertising & ADVERTISED_100baseT_Half)
1735                         new_adv |= ADVERTISE_100HALF;
1736                 if (tp->link_config.advertising & ADVERTISED_100baseT_Full)
1737                         new_adv |= ADVERTISE_100FULL;
1738                 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1739
1740                 if (tp->link_config.advertising &
1741                     (ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full)) {
1742                         new_adv = 0;
1743                         if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
1744                                 new_adv |= MII_TG3_CTRL_ADV_1000_HALF;
1745                         if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
1746                                 new_adv |= MII_TG3_CTRL_ADV_1000_FULL;
1747                         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY) &&
1748                             (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1749                              tp->pci_chip_rev_id == CHIPREV_ID_5701_B0))
1750                                 new_adv |= (MII_TG3_CTRL_AS_MASTER |
1751                                             MII_TG3_CTRL_ENABLE_AS_MASTER);
1752                         tg3_writephy(tp, MII_TG3_CTRL, new_adv);
1753                 } else {
1754                         tg3_writephy(tp, MII_TG3_CTRL, 0);
1755                 }
1756         } else {
1757                 /* Asking for a specific link mode. */
1758                 if (tp->link_config.speed == SPEED_1000) {
1759                         new_adv = ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP;
1760                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
1761
1762                         if (tp->link_config.duplex == DUPLEX_FULL)
1763                                 new_adv = MII_TG3_CTRL_ADV_1000_FULL;
1764                         else
1765                                 new_adv = MII_TG3_CTRL_ADV_1000_HALF;
1766                         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1767                             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
1768                                 new_adv |= (MII_TG3_CTRL_AS_MASTER |
1769                                             MII_TG3_CTRL_ENABLE_AS_MASTER);
1770                         tg3_writephy(tp, MII_TG3_CTRL, new_adv);
1771                 } else {
1772                         tg3_writephy(tp, MII_TG3_CTRL, 0);
1773
1774                         new_adv = ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP;
1775                         if (tp->link_config.speed == SPEED_100) {
1776                                 if (tp->link_config.duplex == DUPLEX_FULL)
1777                                         new_adv |= ADVERTISE_100FULL;
1778                                 else
1779                                         new_adv |= ADVERTISE_100HALF;
1780                         } else {
1781                                 if (tp->link_config.duplex == DUPLEX_FULL)
1782                                         new_adv |= ADVERTISE_10FULL;
1783                                 else
1784                                         new_adv |= ADVERTISE_10HALF;
1785                         }
1786                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
1787                 }
1788         }
1789
1790         if (tp->link_config.autoneg == AUTONEG_DISABLE &&
1791             tp->link_config.speed != SPEED_INVALID) {
1792                 u32 bmcr, orig_bmcr;
1793
1794                 tp->link_config.active_speed = tp->link_config.speed;
1795                 tp->link_config.active_duplex = tp->link_config.duplex;
1796
1797                 bmcr = 0;
1798                 switch (tp->link_config.speed) {
1799                 default:
1800                 case SPEED_10:
1801                         break;
1802
1803                 case SPEED_100:
1804                         bmcr |= BMCR_SPEED100;
1805                         break;
1806
1807                 case SPEED_1000:
1808                         bmcr |= TG3_BMCR_SPEED1000;
1809                         break;
1810                 };
1811
1812                 if (tp->link_config.duplex == DUPLEX_FULL)
1813                         bmcr |= BMCR_FULLDPLX;
1814
1815                 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
1816                     (bmcr != orig_bmcr)) {
1817                         tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
1818                         for (i = 0; i < 1500; i++) {
1819                                 u32 tmp;
1820
1821                                 udelay(10);
1822                                 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
1823                                     tg3_readphy(tp, MII_BMSR, &tmp))
1824                                         continue;
1825                                 if (!(tmp & BMSR_LSTATUS)) {
1826                                         udelay(40);
1827                                         break;
1828                                 }
1829                         }
1830                         tg3_writephy(tp, MII_BMCR, bmcr);
1831                         udelay(40);
1832                 }
1833         } else {
1834                 tg3_writephy(tp, MII_BMCR,
1835                              BMCR_ANENABLE | BMCR_ANRESTART);
1836         }
1837 }
1838
1839 static int tg3_init_5401phy_dsp(struct tg3 *tp)
1840 {
1841         int err;
1842
1843         /* Turn off tap power management. */
1844         /* Set Extended packet length bit */
1845         err  = tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
1846
1847         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0012);
1848         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1804);
1849
1850         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0013);
1851         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1204);
1852
1853         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
1854         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0132);
1855
1856         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
1857         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0232);
1858
1859         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1860         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0a20);
1861
1862         udelay(40);
1863
1864         return err;
1865 }
1866
1867 static int tg3_copper_is_advertising_all(struct tg3 *tp, u32 mask)
1868 {
1869         u32 adv_reg, all_mask = 0;
1870
1871         if (mask & ADVERTISED_10baseT_Half)
1872                 all_mask |= ADVERTISE_10HALF;
1873         if (mask & ADVERTISED_10baseT_Full)
1874                 all_mask |= ADVERTISE_10FULL;
1875         if (mask & ADVERTISED_100baseT_Half)
1876                 all_mask |= ADVERTISE_100HALF;
1877         if (mask & ADVERTISED_100baseT_Full)
1878                 all_mask |= ADVERTISE_100FULL;
1879
1880         if (tg3_readphy(tp, MII_ADVERTISE, &adv_reg))
1881                 return 0;
1882
1883         if ((adv_reg & all_mask) != all_mask)
1884                 return 0;
1885         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
1886                 u32 tg3_ctrl;
1887
1888                 all_mask = 0;
1889                 if (mask & ADVERTISED_1000baseT_Half)
1890                         all_mask |= ADVERTISE_1000HALF;
1891                 if (mask & ADVERTISED_1000baseT_Full)
1892                         all_mask |= ADVERTISE_1000FULL;
1893
1894                 if (tg3_readphy(tp, MII_TG3_CTRL, &tg3_ctrl))
1895                         return 0;
1896
1897                 if ((tg3_ctrl & all_mask) != all_mask)
1898                         return 0;
1899         }
1900         return 1;
1901 }
1902
1903 static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
1904 {
1905         int current_link_up;
1906         u32 bmsr, dummy;
1907         u16 current_speed;
1908         u8 current_duplex;
1909         int i, err;
1910
1911         tw32(MAC_EVENT, 0);
1912
1913         tw32_f(MAC_STATUS,
1914              (MAC_STATUS_SYNC_CHANGED |
1915               MAC_STATUS_CFG_CHANGED |
1916               MAC_STATUS_MI_COMPLETION |
1917               MAC_STATUS_LNKSTATE_CHANGED));
1918         udelay(40);
1919
1920         tp->mi_mode = MAC_MI_MODE_BASE;
1921         tw32_f(MAC_MI_MODE, tp->mi_mode);
1922         udelay(80);
1923
1924         tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x02);
1925
1926         /* Some third-party PHYs need to be reset on link going
1927          * down.
1928          */
1929         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
1930              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
1931              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
1932             netif_carrier_ok(tp->dev)) {
1933                 tg3_readphy(tp, MII_BMSR, &bmsr);
1934                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
1935                     !(bmsr & BMSR_LSTATUS))
1936                         force_reset = 1;
1937         }
1938         if (force_reset)
1939                 tg3_phy_reset(tp);
1940
1941         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
1942                 tg3_readphy(tp, MII_BMSR, &bmsr);
1943                 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
1944                     !(tp->tg3_flags & TG3_FLAG_INIT_COMPLETE))
1945                         bmsr = 0;
1946
1947                 if (!(bmsr & BMSR_LSTATUS)) {
1948                         err = tg3_init_5401phy_dsp(tp);
1949                         if (err)
1950                                 return err;
1951
1952                         tg3_readphy(tp, MII_BMSR, &bmsr);
1953                         for (i = 0; i < 1000; i++) {
1954                                 udelay(10);
1955                                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
1956                                     (bmsr & BMSR_LSTATUS)) {
1957                                         udelay(40);
1958                                         break;
1959                                 }
1960                         }
1961
1962                         if ((tp->phy_id & PHY_ID_REV_MASK) == PHY_REV_BCM5401_B0 &&
1963                             !(bmsr & BMSR_LSTATUS) &&
1964                             tp->link_config.active_speed == SPEED_1000) {
1965                                 err = tg3_phy_reset(tp);
1966                                 if (!err)
1967                                         err = tg3_init_5401phy_dsp(tp);
1968                                 if (err)
1969                                         return err;
1970                         }
1971                 }
1972         } else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1973                    tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
1974                 /* 5701 {A0,B0} CRC bug workaround */
1975                 tg3_writephy(tp, 0x15, 0x0a75);
1976                 tg3_writephy(tp, 0x1c, 0x8c68);
1977                 tg3_writephy(tp, 0x1c, 0x8d68);
1978                 tg3_writephy(tp, 0x1c, 0x8c68);
1979         }
1980
1981         /* Clear pending interrupts... */
1982         tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
1983         tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
1984
1985         if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT)
1986                 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
1987         else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906)
1988                 tg3_writephy(tp, MII_TG3_IMASK, ~0);
1989
1990         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1991             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1992                 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
1993                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
1994                                      MII_TG3_EXT_CTRL_LNK3_LED_MODE);
1995                 else
1996                         tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
1997         }
1998
1999         current_link_up = 0;
2000         current_speed = SPEED_INVALID;
2001         current_duplex = DUPLEX_INVALID;
2002
2003         if (tp->tg3_flags2 & TG3_FLG2_CAPACITIVE_COUPLING) {
2004                 u32 val;
2005
2006                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4007);
2007                 tg3_readphy(tp, MII_TG3_AUX_CTRL, &val);
2008                 if (!(val & (1 << 10))) {
2009                         val |= (1 << 10);
2010                         tg3_writephy(tp, MII_TG3_AUX_CTRL, val);
2011                         goto relink;
2012                 }
2013         }
2014
2015         bmsr = 0;
2016         for (i = 0; i < 100; i++) {
2017                 tg3_readphy(tp, MII_BMSR, &bmsr);
2018                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
2019                     (bmsr & BMSR_LSTATUS))
2020                         break;
2021                 udelay(40);
2022         }
2023
2024         if (bmsr & BMSR_LSTATUS) {
2025                 u32 aux_stat, bmcr;
2026
2027                 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
2028                 for (i = 0; i < 2000; i++) {
2029                         udelay(10);
2030                         if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
2031                             aux_stat)
2032                                 break;
2033                 }
2034
2035                 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
2036                                              &current_speed,
2037                                              &current_duplex);
2038
2039                 bmcr = 0;
2040                 for (i = 0; i < 200; i++) {
2041                         tg3_readphy(tp, MII_BMCR, &bmcr);
2042                         if (tg3_readphy(tp, MII_BMCR, &bmcr))
2043                                 continue;
2044                         if (bmcr && bmcr != 0x7fff)
2045                                 break;
2046                         udelay(10);
2047                 }
2048
2049                 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2050                         if (bmcr & BMCR_ANENABLE) {
2051                                 current_link_up = 1;
2052
2053                                 /* Force autoneg restart if we are exiting
2054                                  * low power mode.
2055                                  */
2056                                 if (!tg3_copper_is_advertising_all(tp,
2057                                                 tp->link_config.advertising))
2058                                         current_link_up = 0;
2059                         } else {
2060                                 current_link_up = 0;
2061                         }
2062                 } else {
2063                         if (!(bmcr & BMCR_ANENABLE) &&
2064                             tp->link_config.speed == current_speed &&
2065                             tp->link_config.duplex == current_duplex) {
2066                                 current_link_up = 1;
2067                         } else {
2068                                 current_link_up = 0;
2069                         }
2070                 }
2071
2072                 tp->link_config.active_speed = current_speed;
2073                 tp->link_config.active_duplex = current_duplex;
2074         }
2075
2076         if (current_link_up == 1 &&
2077             (tp->link_config.active_duplex == DUPLEX_FULL) &&
2078             (tp->link_config.autoneg == AUTONEG_ENABLE)) {
2079                 u32 local_adv, remote_adv;
2080
2081                 if (tg3_readphy(tp, MII_ADVERTISE, &local_adv))
2082                         local_adv = 0;
2083                 local_adv &= (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
2084
2085                 if (tg3_readphy(tp, MII_LPA, &remote_adv))
2086                         remote_adv = 0;
2087
2088                 remote_adv &= (LPA_PAUSE_CAP | LPA_PAUSE_ASYM);
2089
2090                 /* If we are not advertising full pause capability,
2091                  * something is wrong.  Bring the link down and reconfigure.
2092                  */
2093                 if (local_adv != ADVERTISE_PAUSE_CAP) {
2094                         current_link_up = 0;
2095                 } else {
2096                         tg3_setup_flow_control(tp, local_adv, remote_adv);
2097                 }
2098         }
2099 relink:
2100         if (current_link_up == 0 || tp->link_config.phy_is_low_power) {
2101                 u32 tmp;
2102
2103                 tg3_phy_copper_begin(tp);
2104
2105                 tg3_readphy(tp, MII_BMSR, &tmp);
2106                 if (!tg3_readphy(tp, MII_BMSR, &tmp) &&
2107                     (tmp & BMSR_LSTATUS))
2108                         current_link_up = 1;
2109         }
2110
2111         tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
2112         if (current_link_up == 1) {
2113                 if (tp->link_config.active_speed == SPEED_100 ||
2114                     tp->link_config.active_speed == SPEED_10)
2115                         tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
2116                 else
2117                         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
2118         } else
2119                 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
2120
2121         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
2122         if (tp->link_config.active_duplex == DUPLEX_HALF)
2123                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
2124
2125         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
2126                 if (current_link_up == 1 &&
2127                     tg3_5700_link_polarity(tp, tp->link_config.active_speed))
2128                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
2129                 else
2130                         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
2131         }
2132
2133         /* ??? Without this setting Netgear GA302T PHY does not
2134          * ??? send/receive packets...
2135          */
2136         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411 &&
2137             tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) {
2138                 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
2139                 tw32_f(MAC_MI_MODE, tp->mi_mode);
2140                 udelay(80);
2141         }
2142
2143         tw32_f(MAC_MODE, tp->mac_mode);
2144         udelay(40);
2145
2146         if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
2147                 /* Polled via timer. */
2148                 tw32_f(MAC_EVENT, 0);
2149         } else {
2150                 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2151         }
2152         udelay(40);
2153
2154         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 &&
2155             current_link_up == 1 &&
2156             tp->link_config.active_speed == SPEED_1000 &&
2157             ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ||
2158              (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED))) {
2159                 udelay(120);
2160                 tw32_f(MAC_STATUS,
2161                      (MAC_STATUS_SYNC_CHANGED |
2162                       MAC_STATUS_CFG_CHANGED));
2163                 udelay(40);
2164                 tg3_write_mem(tp,
2165                               NIC_SRAM_FIRMWARE_MBOX,
2166                               NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
2167         }
2168
2169         if (current_link_up != netif_carrier_ok(tp->dev)) {
2170                 if (current_link_up)
2171                         netif_carrier_on(tp->dev);
2172                 else
2173                         netif_carrier_off(tp->dev);
2174                 tg3_link_report(tp);
2175         }
2176
2177         return 0;
2178 }
2179
2180 struct tg3_fiber_aneginfo {
2181         int state;
2182 #define ANEG_STATE_UNKNOWN              0
2183 #define ANEG_STATE_AN_ENABLE            1
2184 #define ANEG_STATE_RESTART_INIT         2
2185 #define ANEG_STATE_RESTART              3
2186 #define ANEG_STATE_DISABLE_LINK_OK      4
2187 #define ANEG_STATE_ABILITY_DETECT_INIT  5
2188 #define ANEG_STATE_ABILITY_DETECT       6
2189 #define ANEG_STATE_ACK_DETECT_INIT      7
2190 #define ANEG_STATE_ACK_DETECT           8
2191 #define ANEG_STATE_COMPLETE_ACK_INIT    9
2192 #define ANEG_STATE_COMPLETE_ACK         10
2193 #define ANEG_STATE_IDLE_DETECT_INIT     11
2194 #define ANEG_STATE_IDLE_DETECT          12
2195 #define ANEG_STATE_LINK_OK              13
2196 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT  14
2197 #define ANEG_STATE_NEXT_PAGE_WAIT       15
2198
2199         u32 flags;
2200 #define MR_AN_ENABLE            0x00000001
2201 #define MR_RESTART_AN           0x00000002
2202 #define MR_AN_COMPLETE          0x00000004
2203 #define MR_PAGE_RX              0x00000008
2204 #define MR_NP_LOADED            0x00000010
2205 #define MR_TOGGLE_TX            0x00000020
2206 #define MR_LP_ADV_FULL_DUPLEX   0x00000040
2207 #define MR_LP_ADV_HALF_DUPLEX   0x00000080
2208 #define MR_LP_ADV_SYM_PAUSE     0x00000100
2209 #define MR_LP_ADV_ASYM_PAUSE    0x00000200
2210 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
2211 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
2212 #define MR_LP_ADV_NEXT_PAGE     0x00001000
2213 #define MR_TOGGLE_RX            0x00002000
2214 #define MR_NP_RX                0x00004000
2215
2216 #define MR_LINK_OK              0x80000000
2217
2218         unsigned long link_time, cur_time;
2219
2220         u32 ability_match_cfg;
2221         int ability_match_count;
2222
2223         char ability_match, idle_match, ack_match;
2224
2225         u32 txconfig, rxconfig;
2226 #define ANEG_CFG_NP             0x00000080
2227 #define ANEG_CFG_ACK            0x00000040
2228 #define ANEG_CFG_RF2            0x00000020
2229 #define ANEG_CFG_RF1            0x00000010
2230 #define ANEG_CFG_PS2            0x00000001
2231 #define ANEG_CFG_PS1            0x00008000
2232 #define ANEG_CFG_HD             0x00004000
2233 #define ANEG_CFG_FD             0x00002000
2234 #define ANEG_CFG_INVAL          0x00001f06
2235
2236 };
2237 #define ANEG_OK         0
2238 #define ANEG_DONE       1
2239 #define ANEG_TIMER_ENAB 2
2240 #define ANEG_FAILED     -1
2241
2242 #define ANEG_STATE_SETTLE_TIME  10000
2243
2244 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
2245                                    struct tg3_fiber_aneginfo *ap)
2246 {
2247         unsigned long delta;
2248         u32 rx_cfg_reg;
2249         int ret;
2250
2251         if (ap->state == ANEG_STATE_UNKNOWN) {
2252                 ap->rxconfig = 0;
2253                 ap->link_time = 0;
2254                 ap->cur_time = 0;
2255                 ap->ability_match_cfg = 0;
2256                 ap->ability_match_count = 0;
2257                 ap->ability_match = 0;
2258                 ap->idle_match = 0;
2259                 ap->ack_match = 0;
2260         }
2261         ap->cur_time++;
2262
2263         if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
2264                 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
2265
2266                 if (rx_cfg_reg != ap->ability_match_cfg) {
2267                         ap->ability_match_cfg = rx_cfg_reg;
2268                         ap->ability_match = 0;
2269                         ap->ability_match_count = 0;
2270                 } else {
2271                         if (++ap->ability_match_count > 1) {
2272                                 ap->ability_match = 1;
2273                                 ap->ability_match_cfg = rx_cfg_reg;
2274                         }
2275                 }
2276                 if (rx_cfg_reg & ANEG_CFG_ACK)
2277                         ap->ack_match = 1;
2278                 else
2279                         ap->ack_match = 0;
2280
2281                 ap->idle_match = 0;
2282         } else {
2283                 ap->idle_match = 1;
2284                 ap->ability_match_cfg = 0;
2285                 ap->ability_match_count = 0;
2286                 ap->ability_match = 0;
2287                 ap->ack_match = 0;
2288
2289                 rx_cfg_reg = 0;
2290         }
2291
2292         ap->rxconfig = rx_cfg_reg;
2293         ret = ANEG_OK;
2294
2295         switch(ap->state) {
2296         case ANEG_STATE_UNKNOWN:
2297                 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
2298                         ap->state = ANEG_STATE_AN_ENABLE;
2299
2300                 /* fallthru */
2301         case ANEG_STATE_AN_ENABLE:
2302                 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
2303                 if (ap->flags & MR_AN_ENABLE) {
2304                         ap->link_time = 0;
2305                         ap->cur_time = 0;
2306                         ap->ability_match_cfg = 0;
2307                         ap->ability_match_count = 0;
2308                         ap->ability_match = 0;
2309                         ap->idle_match = 0;
2310                         ap->ack_match = 0;
2311
2312                         ap->state = ANEG_STATE_RESTART_INIT;
2313                 } else {
2314                         ap->state = ANEG_STATE_DISABLE_LINK_OK;
2315                 }
2316                 break;
2317
2318         case ANEG_STATE_RESTART_INIT:
2319                 ap->link_time = ap->cur_time;
2320                 ap->flags &= ~(MR_NP_LOADED);
2321                 ap->txconfig = 0;
2322                 tw32(MAC_TX_AUTO_NEG, 0);
2323                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
2324                 tw32_f(MAC_MODE, tp->mac_mode);
2325                 udelay(40);
2326
2327                 ret = ANEG_TIMER_ENAB;
2328                 ap->state = ANEG_STATE_RESTART;
2329
2330                 /* fallthru */
2331         case ANEG_STATE_RESTART:
2332                 delta = ap->cur_time - ap->link_time;
2333                 if (delta > ANEG_STATE_SETTLE_TIME) {
2334                         ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
2335                 } else {
2336                         ret = ANEG_TIMER_ENAB;
2337                 }
2338                 break;
2339
2340         case ANEG_STATE_DISABLE_LINK_OK:
2341                 ret = ANEG_DONE;
2342                 break;
2343
2344         case ANEG_STATE_ABILITY_DETECT_INIT:
2345                 ap->flags &= ~(MR_TOGGLE_TX);
2346                 ap->txconfig = (ANEG_CFG_FD | ANEG_CFG_PS1);
2347                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
2348                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
2349                 tw32_f(MAC_MODE, tp->mac_mode);
2350                 udelay(40);
2351
2352                 ap->state = ANEG_STATE_ABILITY_DETECT;
2353                 break;
2354
2355         case ANEG_STATE_ABILITY_DETECT:
2356                 if (ap->ability_match != 0 && ap->rxconfig != 0) {
2357                         ap->state = ANEG_STATE_ACK_DETECT_INIT;
2358                 }
2359                 break;
2360
2361         case ANEG_STATE_ACK_DETECT_INIT:
2362                 ap->txconfig |= ANEG_CFG_ACK;
2363                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
2364                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
2365                 tw32_f(MAC_MODE, tp->mac_mode);
2366                 udelay(40);
2367
2368                 ap->state = ANEG_STATE_ACK_DETECT;
2369
2370                 /* fallthru */
2371         case ANEG_STATE_ACK_DETECT:
2372                 if (ap->ack_match != 0) {
2373                         if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
2374                             (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
2375                                 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
2376                         } else {
2377                                 ap->state = ANEG_STATE_AN_ENABLE;
2378                         }
2379                 } else if (ap->ability_match != 0 &&
2380                            ap->rxconfig == 0) {
2381                         ap->state = ANEG_STATE_AN_ENABLE;
2382                 }
2383                 break;
2384
2385         case ANEG_STATE_COMPLETE_ACK_INIT:
2386                 if (ap->rxconfig & ANEG_CFG_INVAL) {
2387                         ret = ANEG_FAILED;
2388                         break;
2389                 }
2390                 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
2391                                MR_LP_ADV_HALF_DUPLEX |
2392                                MR_LP_ADV_SYM_PAUSE |
2393                                MR_LP_ADV_ASYM_PAUSE |
2394                                MR_LP_ADV_REMOTE_FAULT1 |
2395                                MR_LP_ADV_REMOTE_FAULT2 |
2396                                MR_LP_ADV_NEXT_PAGE |
2397                                MR_TOGGLE_RX |
2398                                MR_NP_RX);
2399                 if (ap->rxconfig & ANEG_CFG_FD)
2400                         ap->flags |= MR_LP_ADV_FULL_DUPLEX;
2401                 if (ap->rxconfig & ANEG_CFG_HD)
2402                         ap->flags |= MR_LP_ADV_HALF_DUPLEX;
2403                 if (ap->rxconfig & ANEG_CFG_PS1)
2404                         ap->flags |= MR_LP_ADV_SYM_PAUSE;
2405                 if (ap->rxconfig & ANEG_CFG_PS2)
2406                         ap->flags |= MR_LP_ADV_ASYM_PAUSE;
2407                 if (ap->rxconfig & ANEG_CFG_RF1)
2408                         ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
2409                 if (ap->rxconfig & ANEG_CFG_RF2)
2410                         ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
2411                 if (ap->rxconfig & ANEG_CFG_NP)
2412                         ap->flags |= MR_LP_ADV_NEXT_PAGE;
2413
2414                 ap->link_time = ap->cur_time;
2415
2416                 ap->flags ^= (MR_TOGGLE_TX);
2417                 if (ap->rxconfig & 0x0008)
2418                         ap->flags |= MR_TOGGLE_RX;
2419                 if (ap->rxconfig & ANEG_CFG_NP)
2420                         ap->flags |= MR_NP_RX;
2421                 ap->flags |= MR_PAGE_RX;
2422
2423                 ap->state = ANEG_STATE_COMPLETE_ACK;
2424                 ret = ANEG_TIMER_ENAB;
2425                 break;
2426
2427         case ANEG_STATE_COMPLETE_ACK:
2428                 if (ap->ability_match != 0 &&
2429                     ap->rxconfig == 0) {
2430                         ap->state = ANEG_STATE_AN_ENABLE;
2431                         break;
2432                 }
2433                 delta = ap->cur_time - ap->link_time;
2434                 if (delta > ANEG_STATE_SETTLE_TIME) {
2435                         if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
2436                                 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
2437                         } else {
2438                                 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
2439                                     !(ap->flags & MR_NP_RX)) {
2440                                         ap->state = ANEG_STATE_IDLE_DETECT_INIT;
2441                                 } else {
2442                                         ret = ANEG_FAILED;
2443                                 }
2444                         }
2445                 }
2446                 break;
2447
2448         case ANEG_STATE_IDLE_DETECT_INIT:
2449                 ap->link_time = ap->cur_time;
2450                 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
2451                 tw32_f(MAC_MODE, tp->mac_mode);
2452                 udelay(40);
2453
2454                 ap->state = ANEG_STATE_IDLE_DETECT;
2455                 ret = ANEG_TIMER_ENAB;
2456                 break;
2457
2458         case ANEG_STATE_IDLE_DETECT:
2459                 if (ap->ability_match != 0 &&
2460                     ap->rxconfig == 0) {
2461                         ap->state = ANEG_STATE_AN_ENABLE;
2462                         break;
2463                 }
2464                 delta = ap->cur_time - ap->link_time;
2465                 if (delta > ANEG_STATE_SETTLE_TIME) {
2466                         /* XXX another gem from the Broadcom driver :( */
2467                         ap->state = ANEG_STATE_LINK_OK;
2468                 }
2469                 break;
2470
2471         case ANEG_STATE_LINK_OK:
2472                 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
2473                 ret = ANEG_DONE;
2474                 break;
2475
2476         case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
2477                 /* ??? unimplemented */
2478                 break;
2479
2480         case ANEG_STATE_NEXT_PAGE_WAIT:
2481                 /* ??? unimplemented */
2482                 break;
2483
2484         default:
2485                 ret = ANEG_FAILED;
2486                 break;
2487         };
2488
2489         return ret;
2490 }
2491
2492 static int fiber_autoneg(struct tg3 *tp, u32 *flags)
2493 {
2494         int res = 0;
2495         struct tg3_fiber_aneginfo aninfo;
2496         int status = ANEG_FAILED;
2497         unsigned int tick;
2498         u32 tmp;
2499
2500         tw32_f(MAC_TX_AUTO_NEG, 0);
2501
2502         tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
2503         tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
2504         udelay(40);
2505
2506         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
2507         udelay(40);
2508
2509         memset(&aninfo, 0, sizeof(aninfo));
2510         aninfo.flags |= MR_AN_ENABLE;
2511         aninfo.state = ANEG_STATE_UNKNOWN;
2512         aninfo.cur_time = 0;
2513         tick = 0;
2514         while (++tick < 195000) {
2515                 status = tg3_fiber_aneg_smachine(tp, &aninfo);
2516                 if (status == ANEG_DONE || status == ANEG_FAILED)
2517                         break;
2518
2519                 udelay(1);
2520         }
2521
2522         tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
2523         tw32_f(MAC_MODE, tp->mac_mode);
2524         udelay(40);
2525
2526         *flags = aninfo.flags;
2527
2528         if (status == ANEG_DONE &&
2529             (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
2530                              MR_LP_ADV_FULL_DUPLEX)))
2531                 res = 1;
2532
2533         return res;
2534 }
2535
2536 static void tg3_init_bcm8002(struct tg3 *tp)
2537 {
2538         u32 mac_status = tr32(MAC_STATUS);
2539         int i;
2540
2541         /* Reset when initting first time or we have a link. */
2542         if ((tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) &&
2543             !(mac_status & MAC_STATUS_PCS_SYNCED))
2544                 return;
2545
2546         /* Set PLL lock range. */
2547         tg3_writephy(tp, 0x16, 0x8007);
2548
2549         /* SW reset */
2550         tg3_writephy(tp, MII_BMCR, BMCR_RESET);
2551
2552         /* Wait for reset to complete. */
2553         /* XXX schedule_timeout() ... */
2554         for (i = 0; i < 500; i++)
2555                 udelay(10);
2556
2557         /* Config mode; select PMA/Ch 1 regs. */
2558         tg3_writephy(tp, 0x10, 0x8411);
2559
2560         /* Enable auto-lock and comdet, select txclk for tx. */
2561         tg3_writephy(tp, 0x11, 0x0a10);
2562
2563         tg3_writephy(tp, 0x18, 0x00a0);
2564         tg3_writephy(tp, 0x16, 0x41ff);
2565
2566         /* Assert and deassert POR. */
2567         tg3_writephy(tp, 0x13, 0x0400);
2568         udelay(40);
2569         tg3_writephy(tp, 0x13, 0x0000);
2570
2571         tg3_writephy(tp, 0x11, 0x0a50);
2572         udelay(40);
2573         tg3_writephy(tp, 0x11, 0x0a10);
2574
2575         /* Wait for signal to stabilize */
2576         /* XXX schedule_timeout() ... */
2577         for (i = 0; i < 15000; i++)
2578                 udelay(10);
2579
2580         /* Deselect the channel register so we can read the PHYID
2581          * later.
2582          */
2583         tg3_writephy(tp, 0x10, 0x8011);
2584 }
2585
2586 static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
2587 {
2588         u32 sg_dig_ctrl, sg_dig_status;
2589         u32 serdes_cfg, expected_sg_dig_ctrl;
2590         int workaround, port_a;
2591         int current_link_up;
2592
2593         serdes_cfg = 0;
2594         expected_sg_dig_ctrl = 0;
2595         workaround = 0;
2596         port_a = 1;
2597         current_link_up = 0;
2598
2599         if (tp->pci_chip_rev_id != CHIPREV_ID_5704_A0 &&
2600             tp->pci_chip_rev_id != CHIPREV_ID_5704_A1) {
2601                 workaround = 1;
2602                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
2603                         port_a = 0;
2604
2605                 /* preserve bits 0-11,13,14 for signal pre-emphasis */
2606                 /* preserve bits 20-23 for voltage regulator */
2607                 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
2608         }
2609
2610         sg_dig_ctrl = tr32(SG_DIG_CTRL);
2611
2612         if (tp->link_config.autoneg != AUTONEG_ENABLE) {
2613                 if (sg_dig_ctrl & (1 << 31)) {
2614                         if (workaround) {
2615                                 u32 val = serdes_cfg;
2616
2617                                 if (port_a)
2618                                         val |= 0xc010000;
2619                                 else
2620                                         val |= 0x4010000;
2621                                 tw32_f(MAC_SERDES_CFG, val);
2622                         }
2623                         tw32_f(SG_DIG_CTRL, 0x01388400);
2624                 }
2625                 if (mac_status & MAC_STATUS_PCS_SYNCED) {
2626                         tg3_setup_flow_control(tp, 0, 0);
2627                         current_link_up = 1;
2628                 }
2629                 goto out;
2630         }
2631
2632         /* Want auto-negotiation.  */
2633         expected_sg_dig_ctrl = 0x81388400;
2634
2635         /* Pause capability */
2636         expected_sg_dig_ctrl |= (1 << 11);
2637
2638         /* Asymettric pause */
2639         expected_sg_dig_ctrl |= (1 << 12);
2640
2641         if (sg_dig_ctrl != expected_sg_dig_ctrl) {
2642                 if ((tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT) &&
2643                     tp->serdes_counter &&
2644                     ((mac_status & (MAC_STATUS_PCS_SYNCED |
2645                                     MAC_STATUS_RCVD_CFG)) ==
2646                      MAC_STATUS_PCS_SYNCED)) {
2647                         tp->serdes_counter--;
2648                         current_link_up = 1;
2649                         goto out;
2650                 }
2651 restart_autoneg:
2652                 if (workaround)
2653                         tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
2654                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | (1 << 30));
2655                 udelay(5);
2656                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
2657
2658                 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
2659                 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2660         } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
2661                                  MAC_STATUS_SIGNAL_DET)) {
2662                 sg_dig_status = tr32(SG_DIG_STATUS);
2663                 mac_status = tr32(MAC_STATUS);
2664
2665                 if ((sg_dig_status & (1 << 1)) &&
2666                     (mac_status & MAC_STATUS_PCS_SYNCED)) {
2667                         u32 local_adv, remote_adv;
2668
2669                         local_adv = ADVERTISE_PAUSE_CAP;
2670                         remote_adv = 0;
2671                         if (sg_dig_status & (1 << 19))
2672                                 remote_adv |= LPA_PAUSE_CAP;
2673                         if (sg_dig_status & (1 << 20))
2674                                 remote_adv |= LPA_PAUSE_ASYM;
2675
2676                         tg3_setup_flow_control(tp, local_adv, remote_adv);
2677                         current_link_up = 1;
2678                         tp->serdes_counter = 0;
2679                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2680                 } else if (!(sg_dig_status & (1 << 1))) {
2681                         if (tp->serdes_counter)
2682                                 tp->serdes_counter--;
2683                         else {
2684                                 if (workaround) {
2685                                         u32 val = serdes_cfg;
2686
2687                                         if (port_a)
2688                                                 val |= 0xc010000;
2689                                         else
2690                                                 val |= 0x4010000;
2691
2692                                         tw32_f(MAC_SERDES_CFG, val);
2693                                 }
2694
2695                                 tw32_f(SG_DIG_CTRL, 0x01388400);
2696                                 udelay(40);
2697
2698                                 /* Link parallel detection - link is up */
2699                                 /* only if we have PCS_SYNC and not */
2700                                 /* receiving config code words */
2701                                 mac_status = tr32(MAC_STATUS);
2702                                 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
2703                                     !(mac_status & MAC_STATUS_RCVD_CFG)) {
2704                                         tg3_setup_flow_control(tp, 0, 0);
2705                                         current_link_up = 1;
2706                                         tp->tg3_flags2 |=
2707                                                 TG3_FLG2_PARALLEL_DETECT;
2708                                         tp->serdes_counter =
2709                                                 SERDES_PARALLEL_DET_TIMEOUT;
2710                                 } else
2711                                         goto restart_autoneg;
2712                         }
2713                 }
2714         } else {
2715                 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
2716                 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2717         }
2718
2719 out:
2720         return current_link_up;
2721 }
2722
2723 static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
2724 {
2725         int current_link_up = 0;
2726
2727         if (!(mac_status & MAC_STATUS_PCS_SYNCED))
2728                 goto out;
2729
2730         if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2731                 u32 flags;
2732                 int i;
2733
2734                 if (fiber_autoneg(tp, &flags)) {
2735                         u32 local_adv, remote_adv;
2736
2737                         local_adv = ADVERTISE_PAUSE_CAP;
2738                         remote_adv = 0;
2739                         if (flags & MR_LP_ADV_SYM_PAUSE)
2740                                 remote_adv |= LPA_PAUSE_CAP;
2741                         if (flags & MR_LP_ADV_ASYM_PAUSE)
2742                                 remote_adv |= LPA_PAUSE_ASYM;
2743
2744                         tg3_setup_flow_control(tp, local_adv, remote_adv);
2745
2746                         current_link_up = 1;
2747                 }
2748                 for (i = 0; i < 30; i++) {
2749                         udelay(20);
2750                         tw32_f(MAC_STATUS,
2751                                (MAC_STATUS_SYNC_CHANGED |
2752                                 MAC_STATUS_CFG_CHANGED));
2753                         udelay(40);
2754                         if ((tr32(MAC_STATUS) &
2755                              (MAC_STATUS_SYNC_CHANGED |
2756                               MAC_STATUS_CFG_CHANGED)) == 0)
2757                                 break;
2758                 }
2759
2760                 mac_status = tr32(MAC_STATUS);
2761                 if (current_link_up == 0 &&
2762                     (mac_status & MAC_STATUS_PCS_SYNCED) &&
2763                     !(mac_status & MAC_STATUS_RCVD_CFG))
2764                         current_link_up = 1;
2765         } else {
2766                 /* Forcing 1000FD link up. */
2767                 current_link_up = 1;
2768
2769                 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
2770                 udelay(40);
2771
2772                 tw32_f(MAC_MODE, tp->mac_mode);
2773                 udelay(40);
2774         }
2775
2776 out:
2777         return current_link_up;
2778 }
2779
2780 static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
2781 {
2782         u32 orig_pause_cfg;
2783         u16 orig_active_speed;
2784         u8 orig_active_duplex;
2785         u32 mac_status;
2786         int current_link_up;
2787         int i;
2788
2789         orig_pause_cfg =
2790                 (tp->tg3_flags & (TG3_FLAG_RX_PAUSE |
2791                                   TG3_FLAG_TX_PAUSE));
2792         orig_active_speed = tp->link_config.active_speed;
2793         orig_active_duplex = tp->link_config.active_duplex;
2794
2795         if (!(tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG) &&
2796             netif_carrier_ok(tp->dev) &&
2797             (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE)) {
2798                 mac_status = tr32(MAC_STATUS);
2799                 mac_status &= (MAC_STATUS_PCS_SYNCED |
2800                                MAC_STATUS_SIGNAL_DET |
2801                                MAC_STATUS_CFG_CHANGED |
2802                                MAC_STATUS_RCVD_CFG);
2803                 if (mac_status == (MAC_STATUS_PCS_SYNCED |
2804                                    MAC_STATUS_SIGNAL_DET)) {
2805                         tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
2806                                             MAC_STATUS_CFG_CHANGED));
2807                         return 0;
2808                 }
2809         }
2810
2811         tw32_f(MAC_TX_AUTO_NEG, 0);
2812
2813         tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
2814         tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
2815         tw32_f(MAC_MODE, tp->mac_mode);
2816         udelay(40);
2817
2818         if (tp->phy_id == PHY_ID_BCM8002)
2819                 tg3_init_bcm8002(tp);
2820
2821         /* Enable link change event even when serdes polling.  */
2822         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2823         udelay(40);
2824
2825         current_link_up = 0;
2826         mac_status = tr32(MAC_STATUS);
2827
2828         if (tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG)
2829                 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
2830         else
2831                 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
2832
2833         tp->hw_status->status =
2834                 (SD_STATUS_UPDATED |
2835                  (tp->hw_status->status & ~SD_STATUS_LINK_CHG));
2836
2837         for (i = 0; i < 100; i++) {
2838                 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
2839                                     MAC_STATUS_CFG_CHANGED));
2840                 udelay(5);
2841                 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
2842                                          MAC_STATUS_CFG_CHANGED |
2843                                          MAC_STATUS_LNKSTATE_CHANGED)) == 0)
2844                         break;
2845         }
2846
2847         mac_status = tr32(MAC_STATUS);
2848         if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
2849                 current_link_up = 0;
2850                 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
2851                     tp->serdes_counter == 0) {
2852                         tw32_f(MAC_MODE, (tp->mac_mode |
2853                                           MAC_MODE_SEND_CONFIGS));
2854                         udelay(1);
2855                         tw32_f(MAC_MODE, tp->mac_mode);
2856                 }
2857         }
2858
2859         if (current_link_up == 1) {
2860                 tp->link_config.active_speed = SPEED_1000;
2861                 tp->link_config.active_duplex = DUPLEX_FULL;
2862                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
2863                                     LED_CTRL_LNKLED_OVERRIDE |
2864                                     LED_CTRL_1000MBPS_ON));
2865         } else {
2866                 tp->link_config.active_speed = SPEED_INVALID;
2867                 tp->link_config.active_duplex = DUPLEX_INVALID;
2868                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
2869                                     LED_CTRL_LNKLED_OVERRIDE |
2870                                     LED_CTRL_TRAFFIC_OVERRIDE));
2871         }
2872
2873         if (current_link_up != netif_carrier_ok(tp->dev)) {
2874                 if (current_link_up)
2875                         netif_carrier_on(tp->dev);
2876                 else
2877                         netif_carrier_off(tp->dev);
2878                 tg3_link_report(tp);
2879         } else {
2880                 u32 now_pause_cfg =
2881                         tp->tg3_flags & (TG3_FLAG_RX_PAUSE |
2882                                          TG3_FLAG_TX_PAUSE);
2883                 if (orig_pause_cfg != now_pause_cfg ||
2884                     orig_active_speed != tp->link_config.active_speed ||
2885                     orig_active_duplex != tp->link_config.active_duplex)
2886                         tg3_link_report(tp);
2887         }
2888
2889         return 0;
2890 }
2891
2892 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
2893 {
2894         int current_link_up, err = 0;
2895         u32 bmsr, bmcr;
2896         u16 current_speed;
2897         u8 current_duplex;
2898
2899         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
2900         tw32_f(MAC_MODE, tp->mac_mode);
2901         udelay(40);
2902
2903         tw32(MAC_EVENT, 0);
2904
2905         tw32_f(MAC_STATUS,
2906              (MAC_STATUS_SYNC_CHANGED |
2907               MAC_STATUS_CFG_CHANGED |
2908               MAC_STATUS_MI_COMPLETION |
2909               MAC_STATUS_LNKSTATE_CHANGED));
2910         udelay(40);
2911
2912         if (force_reset)
2913                 tg3_phy_reset(tp);
2914
2915         current_link_up = 0;
2916         current_speed = SPEED_INVALID;
2917         current_duplex = DUPLEX_INVALID;
2918
2919         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
2920         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
2921         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
2922                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
2923                         bmsr |= BMSR_LSTATUS;
2924                 else
2925                         bmsr &= ~BMSR_LSTATUS;
2926         }
2927
2928         err |= tg3_readphy(tp, MII_BMCR, &bmcr);
2929
2930         if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
2931             (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT)) {
2932                 /* do nothing, just check for link up at the end */
2933         } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2934                 u32 adv, new_adv;
2935
2936                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
2937                 new_adv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
2938                                   ADVERTISE_1000XPAUSE |
2939                                   ADVERTISE_1000XPSE_ASYM |
2940                                   ADVERTISE_SLCT);
2941
2942                 /* Always advertise symmetric PAUSE just like copper */
2943                 new_adv |= ADVERTISE_1000XPAUSE;
2944
2945                 if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
2946                         new_adv |= ADVERTISE_1000XHALF;
2947                 if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
2948                         new_adv |= ADVERTISE_1000XFULL;
2949
2950                 if ((new_adv != adv) || !(bmcr & BMCR_ANENABLE)) {
2951                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
2952                         bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
2953                         tg3_writephy(tp, MII_BMCR, bmcr);
2954
2955                         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2956                         tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
2957                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2958
2959                         return err;
2960                 }
2961         } else {
2962                 u32 new_bmcr;
2963
2964                 bmcr &= ~BMCR_SPEED1000;
2965                 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
2966
2967                 if (tp->link_config.duplex == DUPLEX_FULL)
2968                         new_bmcr |= BMCR_FULLDPLX;
2969
2970                 if (new_bmcr != bmcr) {
2971                         /* BMCR_SPEED1000 is a reserved bit that needs
2972                          * to be set on write.
2973                          */
2974                         new_bmcr |= BMCR_SPEED1000;
2975
2976                         /* Force a linkdown */
2977                         if (netif_carrier_ok(tp->dev)) {
2978                                 u32 adv;
2979
2980                                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
2981                                 adv &= ~(ADVERTISE_1000XFULL |
2982                                          ADVERTISE_1000XHALF |
2983                                          ADVERTISE_SLCT);
2984                                 tg3_writephy(tp, MII_ADVERTISE, adv);
2985                                 tg3_writephy(tp, MII_BMCR, bmcr |
2986                                                            BMCR_ANRESTART |
2987                                                            BMCR_ANENABLE);
2988                                 udelay(10);
2989                                 netif_carrier_off(tp->dev);
2990                         }
2991                         tg3_writephy(tp, MII_BMCR, new_bmcr);
2992                         bmcr = new_bmcr;
2993                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
2994                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
2995                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
2996                             ASIC_REV_5714) {
2997                                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
2998                                         bmsr |= BMSR_LSTATUS;
2999                                 else
3000                                         bmsr &= ~BMSR_LSTATUS;
3001                         }
3002                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3003                 }
3004         }
3005
3006         if (bmsr & BMSR_LSTATUS) {
3007                 current_speed = SPEED_1000;
3008                 current_link_up = 1;
3009                 if (bmcr & BMCR_FULLDPLX)
3010                         current_duplex = DUPLEX_FULL;
3011                 else
3012                         current_duplex = DUPLEX_HALF;
3013
3014                 if (bmcr & BMCR_ANENABLE) {
3015                         u32 local_adv, remote_adv, common;
3016
3017                         err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
3018                         err |= tg3_readphy(tp, MII_LPA, &remote_adv);
3019                         common = local_adv & remote_adv;
3020                         if (common & (ADVERTISE_1000XHALF |
3021                                       ADVERTISE_1000XFULL)) {
3022                                 if (common & ADVERTISE_1000XFULL)
3023                                         current_duplex = DUPLEX_FULL;
3024                                 else
3025                                         current_duplex = DUPLEX_HALF;
3026
3027                                 tg3_setup_flow_control(tp, local_adv,
3028                                                        remote_adv);
3029                         }
3030                         else
3031                                 current_link_up = 0;
3032                 }
3033         }
3034
3035         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
3036         if (tp->link_config.active_duplex == DUPLEX_HALF)
3037                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
3038
3039         tw32_f(MAC_MODE, tp->mac_mode);
3040         udelay(40);
3041
3042         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
3043
3044         tp->link_config.active_speed = current_speed;
3045         tp->link_config.active_duplex = current_duplex;
3046
3047         if (current_link_up != netif_carrier_ok(tp->dev)) {
3048                 if (current_link_up)
3049                         netif_carrier_on(tp->dev);
3050                 else {
3051                         netif_carrier_off(tp->dev);
3052                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3053                 }
3054                 tg3_link_report(tp);
3055         }
3056         return err;
3057 }
3058
3059 static void tg3_serdes_parallel_detect(struct tg3 *tp)
3060 {
3061         if (tp->serdes_counter) {
3062                 /* Give autoneg time to complete. */
3063                 tp->serdes_counter--;
3064                 return;
3065         }
3066         if (!netif_carrier_ok(tp->dev) &&
3067             (tp->link_config.autoneg == AUTONEG_ENABLE)) {
3068                 u32 bmcr;
3069
3070                 tg3_readphy(tp, MII_BMCR, &bmcr);
3071                 if (bmcr & BMCR_ANENABLE) {
3072                         u32 phy1, phy2;
3073
3074                         /* Select shadow register 0x1f */
3075                         tg3_writephy(tp, 0x1c, 0x7c00);
3076                         tg3_readphy(tp, 0x1c, &phy1);
3077
3078                         /* Select expansion interrupt status register */
3079                         tg3_writephy(tp, 0x17, 0x0f01);
3080                         tg3_readphy(tp, 0x15, &phy2);
3081                         tg3_readphy(tp, 0x15, &phy2);
3082
3083                         if ((phy1 & 0x10) && !(phy2 & 0x20)) {
3084                                 /* We have signal detect and not receiving
3085                                  * config code words, link is up by parallel
3086                                  * detection.
3087                                  */
3088
3089                                 bmcr &= ~BMCR_ANENABLE;
3090                                 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
3091                                 tg3_writephy(tp, MII_BMCR, bmcr);
3092                                 tp->tg3_flags2 |= TG3_FLG2_PARALLEL_DETECT;
3093                         }
3094                 }
3095         }
3096         else if (netif_carrier_ok(tp->dev) &&
3097                  (tp->link_config.autoneg == AUTONEG_ENABLE) &&
3098                  (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT)) {
3099                 u32 phy2;
3100
3101                 /* Select expansion interrupt status register */
3102                 tg3_writephy(tp, 0x17, 0x0f01);
3103                 tg3_readphy(tp, 0x15, &phy2);
3104                 if (phy2 & 0x20) {
3105                         u32 bmcr;
3106
3107                         /* Config code words received, turn on autoneg. */
3108                         tg3_readphy(tp, MII_BMCR, &bmcr);
3109                         tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
3110
3111                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3112
3113                 }
3114         }
3115 }
3116
3117 static int tg3_setup_phy(struct tg3 *tp, int force_reset)
3118 {
3119         int err;
3120
3121         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
3122                 err = tg3_setup_fiber_phy(tp, force_reset);
3123         } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
3124                 err = tg3_setup_fiber_mii_phy(tp, force_reset);
3125         } else {
3126                 err = tg3_setup_copper_phy(tp, force_reset);
3127         }
3128
3129         if (tp->link_config.active_speed == SPEED_1000 &&
3130             tp->link_config.active_duplex == DUPLEX_HALF)
3131                 tw32(MAC_TX_LENGTHS,
3132                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
3133                       (6 << TX_LENGTHS_IPG_SHIFT) |
3134                       (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
3135         else
3136                 tw32(MAC_TX_LENGTHS,
3137                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
3138                       (6 << TX_LENGTHS_IPG_SHIFT) |
3139                       (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
3140
3141         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
3142                 if (netif_carrier_ok(tp->dev)) {
3143                         tw32(HOSTCC_STAT_COAL_TICKS,
3144                              tp->coal.stats_block_coalesce_usecs);
3145                 } else {
3146                         tw32(HOSTCC_STAT_COAL_TICKS, 0);
3147                 }
3148         }
3149
3150         if (tp->tg3_flags & TG3_FLAG_ASPM_WORKAROUND) {
3151                 u32 val = tr32(PCIE_PWR_MGMT_THRESH);
3152                 if (!netif_carrier_ok(tp->dev))
3153                         val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
3154                               tp->pwrmgmt_thresh;
3155                 else
3156                         val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
3157                 tw32(PCIE_PWR_MGMT_THRESH, val);
3158         }
3159
3160         return err;
3161 }
3162
3163 /* This is called whenever we suspect that the system chipset is re-
3164  * ordering the sequence of MMIO to the tx send mailbox. The symptom
3165  * is bogus tx completions. We try to recover by setting the
3166  * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
3167  * in the workqueue.
3168  */
3169 static void tg3_tx_recover(struct tg3 *tp)
3170 {
3171         BUG_ON((tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) ||
3172                tp->write32_tx_mbox == tg3_write_indirect_mbox);
3173
3174         printk(KERN_WARNING PFX "%s: The system may be re-ordering memory-"
3175                "mapped I/O cycles to the network device, attempting to "
3176                "recover. Please report the problem to the driver maintainer "
3177                "and include system chipset information.\n", tp->dev->name);
3178
3179         spin_lock(&tp->lock);
3180         tp->tg3_flags |= TG3_FLAG_TX_RECOVERY_PENDING;
3181         spin_unlock(&tp->lock);
3182 }
3183
3184 static inline u32 tg3_tx_avail(struct tg3 *tp)
3185 {
3186         smp_mb();
3187         return (tp->tx_pending -
3188                 ((tp->tx_prod - tp->tx_cons) & (TG3_TX_RING_SIZE - 1)));
3189 }
3190
3191 /* Tigon3 never reports partial packet sends.  So we do not
3192  * need special logic to handle SKBs that have not had all
3193  * of their frags sent yet, like SunGEM does.
3194  */
3195 static void tg3_tx(struct tg3 *tp)
3196 {
3197         u32 hw_idx = tp->hw_status->idx[0].tx_consumer;
3198         u32 sw_idx = tp->tx_cons;
3199
3200         while (sw_idx != hw_idx) {
3201                 struct tx_ring_info *ri = &tp->tx_buffers[sw_idx];
3202                 struct sk_buff *skb = ri->skb;
3203                 int i, tx_bug = 0;
3204
3205                 if (unlikely(skb == NULL)) {
3206                         tg3_tx_recover(tp);
3207                         return;
3208                 }
3209
3210                 pci_unmap_single(tp->pdev,
3211                                  pci_unmap_addr(ri, mapping),
3212                                  skb_headlen(skb),
3213                                  PCI_DMA_TODEVICE);
3214
3215                 ri->skb = NULL;
3216
3217                 sw_idx = NEXT_TX(sw_idx);
3218
3219                 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
3220                         ri = &tp->tx_buffers[sw_idx];
3221                         if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
3222                                 tx_bug = 1;
3223
3224                         pci_unmap_page(tp->pdev,
3225                                        pci_unmap_addr(ri, mapping),
3226                                        skb_shinfo(skb)->frags[i].size,
3227                                        PCI_DMA_TODEVICE);
3228
3229                         sw_idx = NEXT_TX(sw_idx);
3230                 }
3231
3232                 dev_kfree_skb(skb);
3233
3234                 if (unlikely(tx_bug)) {
3235                         tg3_tx_recover(tp);
3236                         return;
3237                 }
3238         }
3239
3240         tp->tx_cons = sw_idx;
3241
3242         /* Need to make the tx_cons update visible to tg3_start_xmit()
3243          * before checking for netif_queue_stopped().  Without the
3244          * memory barrier, there is a small possibility that tg3_start_xmit()
3245          * will miss it and cause the queue to be stopped forever.
3246          */
3247         smp_mb();
3248
3249         if (unlikely(netif_queue_stopped(tp->dev) &&
3250                      (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp)))) {
3251                 netif_tx_lock(tp->dev);
3252                 if (netif_queue_stopped(tp->dev) &&
3253                     (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp)))
3254                         netif_wake_queue(tp->dev);
3255                 netif_tx_unlock(tp->dev);
3256         }
3257 }
3258
3259 /* Returns size of skb allocated or < 0 on error.
3260  *
3261  * We only need to fill in the address because the other members
3262  * of the RX descriptor are invariant, see tg3_init_rings.
3263  *
3264  * Note the purposeful assymetry of cpu vs. chip accesses.  For
3265  * posting buffers we only dirty the first cache line of the RX
3266  * descriptor (containing the address).  Whereas for the RX status
3267  * buffers the cpu only reads the last cacheline of the RX descriptor
3268  * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
3269  */
3270 static int tg3_alloc_rx_skb(struct tg3 *tp, u32 opaque_key,
3271                             int src_idx, u32 dest_idx_unmasked)
3272 {
3273         struct tg3_rx_buffer_desc *desc;
3274         struct ring_info *map, *src_map;
3275         struct sk_buff *skb;
3276         dma_addr_t mapping;
3277         int skb_size, dest_idx;
3278
3279         src_map = NULL;
3280         switch (opaque_key) {
3281         case RXD_OPAQUE_RING_STD:
3282                 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
3283                 desc = &tp->rx_std[dest_idx];
3284                 map = &tp->rx_std_buffers[dest_idx];
3285                 if (src_idx >= 0)
3286                         src_map = &tp->rx_std_buffers[src_idx];
3287                 skb_size = tp->rx_pkt_buf_sz;
3288                 break;
3289
3290         case RXD_OPAQUE_RING_JUMBO:
3291                 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
3292                 desc = &tp->rx_jumbo[dest_idx];
3293                 map = &tp->rx_jumbo_buffers[dest_idx];
3294                 if (src_idx >= 0)
3295                         src_map = &tp->rx_jumbo_buffers[src_idx];
3296                 skb_size = RX_JUMBO_PKT_BUF_SZ;
3297                 break;
3298
3299         default:
3300                 return -EINVAL;
3301         };
3302
3303         /* Do not overwrite any of the map or rp information
3304          * until we are sure we can commit to a new buffer.
3305          *
3306          * Callers depend upon this behavior and assume that
3307          * we leave everything unchanged if we fail.
3308          */
3309         skb = netdev_alloc_skb(tp->dev, skb_size);
3310         if (skb == NULL)
3311                 return -ENOMEM;
3312
3313         skb_reserve(skb, tp->rx_offset);
3314
3315         mapping = pci_map_single(tp->pdev, skb->data,
3316                                  skb_size - tp->rx_offset,
3317                                  PCI_DMA_FROMDEVICE);
3318
3319         map->skb = skb;
3320         pci_unmap_addr_set(map, mapping, mapping);
3321
3322         if (src_map != NULL)
3323                 src_map->skb = NULL;
3324
3325         desc->addr_hi = ((u64)mapping >> 32);
3326         desc->addr_lo = ((u64)mapping & 0xffffffff);
3327
3328         return skb_size;
3329 }
3330
3331 /* We only need to move over in the address because the other
3332  * members of the RX descriptor are invariant.  See notes above
3333  * tg3_alloc_rx_skb for full details.
3334  */
3335 static void tg3_recycle_rx(struct tg3 *tp, u32 opaque_key,
3336                            int src_idx, u32 dest_idx_unmasked)
3337 {
3338         struct tg3_rx_buffer_desc *src_desc, *dest_desc;
3339         struct ring_info *src_map, *dest_map;
3340         int dest_idx;
3341
3342         switch (opaque_key) {
3343         case RXD_OPAQUE_RING_STD:
3344                 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
3345                 dest_desc = &tp->rx_std[dest_idx];
3346                 dest_map = &tp->rx_std_buffers[dest_idx];
3347                 src_desc = &tp->rx_std[src_idx];
3348                 src_map = &tp->rx_std_buffers[src_idx];
3349                 break;
3350
3351         case RXD_OPAQUE_RING_JUMBO:
3352                 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
3353                 dest_desc = &tp->rx_jumbo[dest_idx];
3354                 dest_map = &tp->rx_jumbo_buffers[dest_idx];
3355                 src_desc = &tp->rx_jumbo[src_idx];
3356                 src_map = &tp->rx_jumbo_buffers[src_idx];
3357                 break;
3358
3359         default:
3360                 return;
3361         };
3362
3363         dest_map->skb = src_map->skb;
3364         pci_unmap_addr_set(dest_map, mapping,
3365                            pci_unmap_addr(src_map, mapping));
3366         dest_desc->addr_hi = src_desc->addr_hi;
3367         dest_desc->addr_lo = src_desc->addr_lo;
3368
3369         src_map->skb = NULL;
3370 }
3371
3372 #if TG3_VLAN_TAG_USED
3373 static int tg3_vlan_rx(struct tg3 *tp, struct sk_buff *skb, u16 vlan_tag)
3374 {
3375         return vlan_hwaccel_receive_skb(skb, tp->vlgrp, vlan_tag);
3376 }
3377 #endif
3378
3379 /* The RX ring scheme is composed of multiple rings which post fresh
3380  * buffers to the chip, and one special ring the chip uses to report
3381  * status back to the host.
3382  *
3383  * The special ring reports the status of received packets to the
3384  * host.  The chip does not write into the original descriptor the
3385  * RX buffer was obtained from.  The chip simply takes the original
3386  * descriptor as provided by the host, updates the status and length
3387  * field, then writes this into the next status ring entry.
3388  *
3389  * Each ring the host uses to post buffers to the chip is described
3390  * by a TG3_BDINFO entry in the chips SRAM area.  When a packet arrives,
3391  * it is first placed into the on-chip ram.  When the packet's length
3392  * is known, it walks down the TG3_BDINFO entries to select the ring.
3393  * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
3394  * which is within the range of the new packet's length is chosen.
3395  *
3396  * The "separate ring for rx status" scheme may sound queer, but it makes
3397  * sense from a cache coherency perspective.  If only the host writes
3398  * to the buffer post rings, and only the chip writes to the rx status
3399  * rings, then cache lines never move beyond shared-modified state.
3400  * If both the host and chip were to write into the same ring, cache line
3401  * eviction could occur since both entities want it in an exclusive state.
3402  */
3403 static int tg3_rx(struct tg3 *tp, int budget)
3404 {
3405         u32 work_mask, rx_std_posted = 0;
3406         u32 sw_idx = tp->rx_rcb_ptr;
3407         u16 hw_idx;
3408         int received;
3409
3410         hw_idx = tp->hw_status->idx[0].rx_producer;
3411         /*
3412          * We need to order the read of hw_idx and the read of
3413          * the opaque cookie.
3414          */
3415         rmb();
3416         work_mask = 0;
3417         received = 0;
3418         while (sw_idx != hw_idx && budget > 0) {
3419                 struct tg3_rx_buffer_desc *desc = &tp->rx_rcb[sw_idx];
3420                 unsigned int len;
3421                 struct sk_buff *skb;
3422                 dma_addr_t dma_addr;
3423                 u32 opaque_key, desc_idx, *post_ptr;
3424
3425                 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
3426                 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
3427                 if (opaque_key == RXD_OPAQUE_RING_STD) {
3428                         dma_addr = pci_unmap_addr(&tp->rx_std_buffers[desc_idx],
3429                                                   mapping);
3430                         skb = tp->rx_std_buffers[desc_idx].skb;
3431                         post_ptr = &tp->rx_std_ptr;
3432                         rx_std_posted++;
3433                 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
3434                         dma_addr = pci_unmap_addr(&tp->rx_jumbo_buffers[desc_idx],
3435                                                   mapping);
3436                         skb = tp->rx_jumbo_buffers[desc_idx].skb;
3437                         post_ptr = &tp->rx_jumbo_ptr;
3438                 }
3439                 else {
3440                         goto next_pkt_nopost;
3441                 }
3442
3443                 work_mask |= opaque_key;
3444
3445                 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
3446                     (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
3447                 drop_it:
3448                         tg3_recycle_rx(tp, opaque_key,
3449                                        desc_idx, *post_ptr);
3450                 drop_it_no_recycle:
3451                         /* Other statistics kept track of by card. */
3452                         tp->net_stats.rx_dropped++;
3453                         goto next_pkt;
3454                 }
3455
3456                 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4; /* omit crc */
3457
3458                 if (len > RX_COPY_THRESHOLD
3459                         && tp->rx_offset == 2
3460                         /* rx_offset != 2 iff this is a 5701 card running
3461                          * in PCI-X mode [see tg3_get_invariants()] */
3462                 ) {
3463                         int skb_size;
3464
3465                         skb_size = tg3_alloc_rx_skb(tp, opaque_key,
3466                                                     desc_idx, *post_ptr);
3467                         if (skb_size < 0)
3468                                 goto drop_it;
3469
3470                         pci_unmap_single(tp->pdev, dma_addr,
3471                                          skb_size - tp->rx_offset,
3472                                          PCI_DMA_FROMDEVICE);
3473
3474                         skb_put(skb, len);
3475                 } else {
3476                         struct sk_buff *copy_skb;
3477
3478                         tg3_recycle_rx(tp, opaque_key,
3479                                        desc_idx, *post_ptr);
3480
3481                         copy_skb = netdev_alloc_skb(tp->dev, len + 2);
3482                         if (copy_skb == NULL)
3483                                 goto drop_it_no_recycle;
3484
3485                         skb_reserve(copy_skb, 2);
3486                         skb_put(copy_skb, len);
3487                         pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
3488                         skb_copy_from_linear_data(skb, copy_skb->data, len);
3489                         pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
3490
3491                         /* We'll reuse the original ring buffer. */
3492                         skb = copy_skb;
3493                 }
3494
3495                 if ((tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) &&
3496                     (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
3497                     (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
3498                       >> RXD_TCPCSUM_SHIFT) == 0xffff))
3499                         skb->ip_summed = CHECKSUM_UNNECESSARY;
3500                 else
3501                         skb->ip_summed = CHECKSUM_NONE;
3502
3503                 skb->protocol = eth_type_trans(skb, tp->dev);
3504 #if TG3_VLAN_TAG_USED
3505                 if (tp->vlgrp != NULL &&
3506                     desc->type_flags & RXD_FLAG_VLAN) {
3507                         tg3_vlan_rx(tp, skb,
3508                                     desc->err_vlan & RXD_VLAN_MASK);
3509                 } else
3510 #endif
3511                         netif_receive_skb(skb);
3512
3513                 tp->dev->last_rx = jiffies;
3514                 received++;
3515                 budget--;
3516
3517 next_pkt:
3518                 (*post_ptr)++;
3519
3520                 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
3521                         u32 idx = *post_ptr % TG3_RX_RING_SIZE;
3522
3523                         tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX +
3524                                      TG3_64BIT_REG_LOW, idx);
3525                         work_mask &= ~RXD_OPAQUE_RING_STD;
3526                         rx_std_posted = 0;
3527                 }
3528 next_pkt_nopost:
3529                 sw_idx++;
3530                 sw_idx &= (TG3_RX_RCB_RING_SIZE(tp) - 1);
3531
3532                 /* Refresh hw_idx to see if there is new work */
3533                 if (sw_idx == hw_idx) {
3534                         hw_idx = tp->hw_status->idx[0].rx_producer;
3535                         rmb();
3536                 }
3537         }
3538
3539         /* ACK the status ring. */
3540         tp->rx_rcb_ptr = sw_idx;
3541         tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW, sw_idx);
3542
3543         /* Refill RX ring(s). */
3544         if (work_mask & RXD_OPAQUE_RING_STD) {
3545                 sw_idx = tp->rx_std_ptr % TG3_RX_RING_SIZE;
3546                 tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
3547                              sw_idx);
3548         }
3549         if (work_mask & RXD_OPAQUE_RING_JUMBO) {
3550                 sw_idx = tp->rx_jumbo_ptr % TG3_RX_JUMBO_RING_SIZE;
3551                 tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
3552                              sw_idx);
3553         }
3554         mmiowb();
3555
3556         return received;
3557 }
3558
3559 static int tg3_poll_work(struct tg3 *tp, int work_done, int budget)
3560 {
3561         struct tg3_hw_status *sblk = tp->hw_status;
3562
3563         /* handle link change and other phy events */
3564         if (!(tp->tg3_flags &
3565               (TG3_FLAG_USE_LINKCHG_REG |
3566                TG3_FLAG_POLL_SERDES))) {
3567                 if (sblk->status & SD_STATUS_LINK_CHG) {
3568                         sblk->status = SD_STATUS_UPDATED |
3569                                 (sblk->status & ~SD_STATUS_LINK_CHG);
3570                         spin_lock(&tp->lock);
3571                         tg3_setup_phy(tp, 0);
3572                         spin_unlock(&tp->lock);
3573                 }
3574         }
3575
3576         /* run TX completion thread */
3577         if (sblk->idx[0].tx_consumer != tp->tx_cons) {
3578                 tg3_tx(tp);
3579                 if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING))
3580                         return work_done;
3581         }
3582
3583         /* run RX thread, within the bounds set by NAPI.
3584          * All RX "locking" is done by ensuring outside
3585          * code synchronizes with tg3->napi.poll()
3586          */
3587         if (sblk->idx[0].rx_producer != tp->rx_rcb_ptr)
3588                 work_done += tg3_rx(tp, budget - work_done);
3589
3590         return work_done;
3591 }
3592
3593 static int tg3_poll(struct napi_struct *napi, int budget)
3594 {
3595         struct tg3 *tp = container_of(napi, struct tg3, napi);
3596         int work_done = 0;
3597         struct tg3_hw_status *sblk = tp->hw_status;
3598
3599         while (1) {
3600                 work_done = tg3_poll_work(tp, work_done, budget);
3601
3602                 if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING))
3603                         goto tx_recovery;
3604
3605                 if (unlikely(work_done >= budget))
3606                         break;
3607
3608                 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) {
3609                         /* tp->last_tag is used in tg3_restart_ints() below
3610                          * to tell the hw how much work has been processed,
3611                          * so we must read it before checking for more work.
3612                          */
3613                         tp->last_tag = sblk->status_tag;
3614                         rmb();
3615                 } else
3616                         sblk->status &= ~SD_STATUS_UPDATED;
3617
3618                 if (likely(!tg3_has_work(tp))) {
3619                         netif_rx_complete(tp->dev, napi);
3620                         tg3_restart_ints(tp);
3621                         break;
3622                 }
3623         }
3624
3625         return work_done;
3626
3627 tx_recovery:
3628         /* work_done is guaranteed to be less than budget. */
3629         netif_rx_complete(tp->dev, napi);
3630         schedule_work(&tp->reset_task);
3631         return work_done;
3632 }
3633
3634 static void tg3_irq_quiesce(struct tg3 *tp)
3635 {
3636         BUG_ON(tp->irq_sync);
3637
3638         tp->irq_sync = 1;
3639         smp_mb();
3640
3641         synchronize_irq(tp->pdev->irq);
3642 }
3643
3644 static inline int tg3_irq_sync(struct tg3 *tp)
3645 {
3646         return tp->irq_sync;
3647 }
3648
3649 /* Fully shutdown all tg3 driver activity elsewhere in the system.
3650  * If irq_sync is non-zero, then the IRQ handler must be synchronized
3651  * with as well.  Most of the time, this is not necessary except when
3652  * shutting down the device.
3653  */
3654 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
3655 {
3656         spin_lock_bh(&tp->lock);
3657         if (irq_sync)
3658                 tg3_irq_quiesce(tp);
3659 }
3660
3661 static inline void tg3_full_unlock(struct tg3 *tp)
3662 {
3663         spin_unlock_bh(&tp->lock);
3664 }
3665
3666 /* One-shot MSI handler - Chip automatically disables interrupt
3667  * after sending MSI so driver doesn't have to do it.
3668  */
3669 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
3670 {
3671         struct net_device *dev = dev_id;
3672         struct tg3 *tp = netdev_priv(dev);
3673
3674         prefetch(tp->hw_status);
3675         prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
3676
3677         if (likely(!tg3_irq_sync(tp)))
3678                 netif_rx_schedule(dev, &tp->napi);
3679
3680         return IRQ_HANDLED;
3681 }
3682
3683 /* MSI ISR - No need to check for interrupt sharing and no need to
3684  * flush status block and interrupt mailbox. PCI ordering rules
3685  * guarantee that MSI will arrive after the status block.
3686  */
3687 static irqreturn_t tg3_msi(int irq, void *dev_id)
3688 {
3689         struct net_device *dev = dev_id;
3690         struct tg3 *tp = netdev_priv(dev);
3691
3692         prefetch(tp->hw_status);
3693         prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
3694         /*
3695          * Writing any value to intr-mbox-0 clears PCI INTA# and
3696          * chip-internal interrupt pending events.
3697          * Writing non-zero to intr-mbox-0 additional tells the
3698          * NIC to stop sending us irqs, engaging "in-intr-handler"
3699          * event coalescing.
3700          */
3701         tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
3702         if (likely(!tg3_irq_sync(tp)))
3703                 netif_rx_schedule(dev, &tp->napi);
3704
3705         return IRQ_RETVAL(1);
3706 }
3707
3708 static irqreturn_t tg3_interrupt(int irq, void *dev_id)
3709 {
3710         struct net_device *dev = dev_id;
3711         struct tg3 *tp = netdev_priv(dev);
3712         struct tg3_hw_status *sblk = tp->hw_status;
3713         unsigned int handled = 1;
3714
3715         /* In INTx mode, it is possible for the interrupt to arrive at
3716          * the CPU before the status block posted prior to the interrupt.
3717          * Reading the PCI State register will confirm whether the
3718          * interrupt is ours and will flush the status block.
3719          */
3720         if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
3721                 if ((tp->tg3_flags & TG3_FLAG_CHIP_RESETTING) ||
3722                     (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
3723                         handled = 0;
3724                         goto out;
3725                 }
3726         }
3727
3728         /*
3729          * Writing any value to intr-mbox-0 clears PCI INTA# and
3730          * chip-internal interrupt pending events.
3731          * Writing non-zero to intr-mbox-0 additional tells the
3732          * NIC to stop sending us irqs, engaging "in-intr-handler"
3733          * event coalescing.
3734          *
3735          * Flush the mailbox to de-assert the IRQ immediately to prevent
3736          * spurious interrupts.  The flush impacts performance but
3737          * excessive spurious interrupts can be worse in some cases.
3738          */
3739         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
3740         if (tg3_irq_sync(tp))
3741                 goto out;
3742         sblk->status &= ~SD_STATUS_UPDATED;
3743         if (likely(tg3_has_work(tp))) {
3744                 prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
3745                 netif_rx_schedule(dev, &tp->napi);
3746         } else {
3747                 /* No work, shared interrupt perhaps?  re-enable
3748                  * interrupts, and flush that PCI write
3749                  */
3750                 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
3751                                0x00000000);
3752         }
3753 out:
3754         return IRQ_RETVAL(handled);
3755 }
3756
3757 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
3758 {
3759         struct net_device *dev = dev_id;
3760         struct tg3 *tp = netdev_priv(dev);
3761         struct tg3_hw_status *sblk = tp->hw_status;
3762         unsigned int handled = 1;
3763
3764         /* In INTx mode, it is possible for the interrupt to arrive at
3765          * the CPU before the status block posted prior to the interrupt.
3766          * Reading the PCI State register will confirm whether the
3767          * interrupt is ours and will flush the status block.
3768          */
3769         if (unlikely(sblk->status_tag == tp->last_tag)) {
3770                 if ((tp->tg3_flags & TG3_FLAG_CHIP_RESETTING) ||
3771                     (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
3772                         handled = 0;
3773                         goto out;
3774                 }
3775         }
3776
3777         /*
3778          * writing any value to intr-mbox-0 clears PCI INTA# and
3779          * chip-internal interrupt pending events.
3780          * writing non-zero to intr-mbox-0 additional tells the
3781          * NIC to stop sending us irqs, engaging "in-intr-handler"
3782          * event coalescing.
3783          *
3784          * Flush the mailbox to de-assert the IRQ immediately to prevent
3785          * spurious interrupts.  The flush impacts performance but
3786          * excessive spurious interrupts can be worse in some cases.
3787          */
3788         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
3789         if (tg3_irq_sync(tp))
3790                 goto out;
3791         if (netif_rx_schedule_prep(dev, &tp->napi)) {
3792                 prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
3793                 /* Update last_tag to mark that this status has been
3794                  * seen. Because interrupt may be shared, we may be
3795                  * racing with tg3_poll(), so only update last_tag
3796                  * if tg3_poll() is not scheduled.
3797                  */
3798                 tp->last_tag = sblk->status_tag;
3799                 __netif_rx_schedule(dev, &tp->napi);
3800         }
3801 out:
3802         return IRQ_RETVAL(handled);
3803 }
3804
3805 /* ISR for interrupt test */
3806 static irqreturn_t tg3_test_isr(int irq, void *dev_id)
3807 {
3808         struct net_device *dev = dev_id;
3809         struct tg3 *tp = netdev_priv(dev);
3810         struct tg3_hw_status *sblk = tp->hw_status;
3811
3812         if ((sblk->status & SD_STATUS_UPDATED) ||
3813             !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
3814                 tg3_disable_ints(tp);
3815                 return IRQ_RETVAL(1);
3816         }
3817         return IRQ_RETVAL(0);
3818 }
3819
3820 static int tg3_init_hw(struct tg3 *, int);
3821 static int tg3_halt(struct tg3 *, int, int);
3822
3823 /* Restart hardware after configuration changes, self-test, etc.
3824  * Invoked with tp->lock held.
3825  */
3826 static int tg3_restart_hw(struct tg3 *tp, int reset_phy)
3827 {
3828         int err;
3829
3830         err = tg3_init_hw(tp, reset_phy);
3831         if (err) {
3832                 printk(KERN_ERR PFX "%s: Failed to re-initialize device, "
3833                        "aborting.\n", tp->dev->name);
3834                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
3835                 tg3_full_unlock(tp);
3836                 del_timer_sync(&tp->timer);
3837                 tp->irq_sync = 0;
3838                 napi_enable(&tp->napi);
3839                 dev_close(tp->dev);
3840                 tg3_full_lock(tp, 0);
3841         }
3842         return err;
3843 }
3844
3845 #ifdef CONFIG_NET_POLL_CONTROLLER
3846 static void tg3_poll_controller(struct net_device *dev)
3847 {
3848         struct tg3 *tp = netdev_priv(dev);
3849
3850         tg3_interrupt(tp->pdev->irq, dev);
3851 }
3852 #endif
3853
3854 static void tg3_reset_task(struct work_struct *work)
3855 {
3856         struct tg3 *tp = container_of(work, struct tg3, reset_task);
3857         unsigned int restart_timer;
3858
3859         tg3_full_lock(tp, 0);
3860
3861         if (!netif_running(tp->dev)) {
3862                 tg3_full_unlock(tp);
3863                 return;
3864         }
3865
3866         tg3_full_unlock(tp);
3867
3868         tg3_netif_stop(tp);
3869
3870         tg3_full_lock(tp, 1);
3871
3872         restart_timer = tp->tg3_flags2 & TG3_FLG2_RESTART_TIMER;
3873         tp->tg3_flags2 &= ~TG3_FLG2_RESTART_TIMER;
3874
3875         if (tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING) {
3876                 tp->write32_tx_mbox = tg3_write32_tx_mbox;
3877                 tp->write32_rx_mbox = tg3_write_flush_reg32;
3878                 tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;
3879                 tp->tg3_flags &= ~TG3_FLAG_TX_RECOVERY_PENDING;
3880         }
3881
3882         tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
3883         if (tg3_init_hw(tp, 1))
3884                 goto out;
3885
3886         tg3_netif_start(tp);
3887
3888         if (restart_timer)
3889                 mod_timer(&tp->timer, jiffies + 1);
3890
3891 out:
3892         tg3_full_unlock(tp);
3893 }
3894
3895 static void tg3_dump_short_state(struct tg3 *tp)
3896 {
3897         printk(KERN_ERR PFX "DEBUG: MAC_TX_STATUS[%08x] MAC_RX_STATUS[%08x]\n",
3898                tr32(MAC_TX_STATUS), tr32(MAC_RX_STATUS));
3899         printk(KERN_ERR PFX "DEBUG: RDMAC_STATUS[%08x] WDMAC_STATUS[%08x]\n",
3900                tr32(RDMAC_STATUS), tr32(WDMAC_STATUS));
3901 }
3902
3903 static void tg3_tx_timeout(struct net_device *dev)
3904 {
3905         struct tg3 *tp = netdev_priv(dev);
3906
3907         if (netif_msg_tx_err(tp)) {
3908                 printk(KERN_ERR PFX "%s: transmit timed out, resetting\n",
3909                        dev->name);
3910                 tg3_dump_short_state(tp);
3911         }
3912
3913         schedule_work(&tp->reset_task);
3914 }
3915
3916 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
3917 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
3918 {
3919         u32 base = (u32) mapping & 0xffffffff;
3920
3921         return ((base > 0xffffdcc0) &&
3922                 (base + len + 8 < base));
3923 }
3924
3925 /* Test for DMA addresses > 40-bit */
3926 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
3927                                           int len)
3928 {
3929 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
3930         if (tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG)
3931                 return (((u64) mapping + len) > DMA_40BIT_MASK);
3932         return 0;
3933 #else
3934         return 0;
3935 #endif
3936 }
3937
3938 static void tg3_set_txd(struct tg3 *, int, dma_addr_t, int, u32, u32);
3939
3940 /* Workaround 4GB and 40-bit hardware DMA bugs. */
3941 static int tigon3_dma_hwbug_workaround(struct tg3 *tp, struct sk_buff *skb,
3942                                        u32 last_plus_one, u32 *start,
3943                                        u32 base_flags, u32 mss)
3944 {
3945         struct sk_buff *new_skb = skb_copy(skb, GFP_ATOMIC);
3946         dma_addr_t new_addr = 0;
3947         u32 entry = *start;
3948         int i, ret = 0;
3949
3950         if (!new_skb) {
3951                 ret = -1;
3952         } else {
3953                 /* New SKB is guaranteed to be linear. */
3954                 entry = *start;
3955                 new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
3956                                           PCI_DMA_TODEVICE);
3957                 /* Make sure new skb does not cross any 4G boundaries.
3958                  * Drop the packet if it does.
3959                  */
3960                 if (tg3_4g_overflow_test(new_addr, new_skb->len)) {
3961                         ret = -1;
3962                         dev_kfree_skb(new_skb);
3963                         new_skb = NULL;
3964                 } else {
3965                         tg3_set_txd(tp, entry, new_addr, new_skb->len,
3966                                     base_flags, 1 | (mss << 1));
3967                         *start = NEXT_TX(entry);
3968                 }
3969         }
3970
3971         /* Now clean up the sw ring entries. */
3972         i = 0;
3973         while (entry != last_plus_one) {
3974                 int len;
3975
3976                 if (i == 0)
3977                         len = skb_headlen(skb);
3978                 else
3979                         len = skb_shinfo(skb)->frags[i-1].size;
3980                 pci_unmap_single(tp->pdev,
3981                                  pci_unmap_addr(&tp->tx_buffers[entry], mapping),
3982                                  len, PCI_DMA_TODEVICE);
3983                 if (i == 0) {
3984                         tp->tx_buffers[entry].skb = new_skb;
3985                         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, new_addr);
3986                 } else {
3987                         tp->tx_buffers[entry].skb = NULL;
3988                 }
3989                 entry = NEXT_TX(entry);
3990                 i++;
3991         }
3992
3993         dev_kfree_skb(skb);
3994
3995         return ret;
3996 }
3997
3998 static void tg3_set_txd(struct tg3 *tp, int entry,
3999                         dma_addr_t mapping, int len, u32 flags,
4000                         u32 mss_and_is_end)
4001 {
4002         struct tg3_tx_buffer_desc *txd = &tp->tx_ring[entry];
4003         int is_end = (mss_and_is_end & 0x1);
4004         u32 mss = (mss_and_is_end >> 1);
4005         u32 vlan_tag = 0;
4006
4007         if (is_end)
4008                 flags |= TXD_FLAG_END;
4009         if (flags & TXD_FLAG_VLAN) {
4010                 vlan_tag = flags >> 16;
4011                 flags &= 0xffff;
4012         }
4013         vlan_tag |= (mss << TXD_MSS_SHIFT);
4014
4015         txd->addr_hi = ((u64) mapping >> 32);
4016         txd->addr_lo = ((u64) mapping & 0xffffffff);
4017         txd->len_flags = (len << TXD_LEN_SHIFT) | flags;
4018         txd->vlan_tag = vlan_tag << TXD_VLAN_TAG_SHIFT;
4019 }
4020
4021 /* hard_start_xmit for devices that don't have any bugs and
4022  * support TG3_FLG2_HW_TSO_2 only.
4023  */
4024 static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
4025 {
4026         struct tg3 *tp = netdev_priv(dev);
4027         dma_addr_t mapping;
4028         u32 len, entry, base_flags, mss;
4029
4030         len = skb_headlen(skb);
4031
4032         /* We are running in BH disabled context with netif_tx_lock
4033          * and TX reclaim runs via tp->napi.poll inside of a software
4034          * interrupt.  Furthermore, IRQ processing runs lockless so we have
4035          * no IRQ context deadlocks to worry about either.  Rejoice!
4036          */
4037         if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->nr_frags + 1))) {
4038                 if (!netif_queue_stopped(dev)) {
4039                         netif_stop_queue(dev);
4040
4041                         /* This is a hard error, log it. */
4042                         printk(KERN_ERR PFX "%s: BUG! Tx Ring full when "
4043                                "queue awake!\n", dev->name);
4044                 }
4045                 return NETDEV_TX_BUSY;
4046         }
4047
4048         entry = tp->tx_prod;
4049         base_flags = 0;
4050         mss = 0;
4051         if ((mss = skb_shinfo(skb)->gso_size) != 0) {
4052                 int tcp_opt_len, ip_tcp_len;
4053
4054                 if (skb_header_cloned(skb) &&
4055                     pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
4056                         dev_kfree_skb(skb);
4057                         goto out_unlock;
4058                 }
4059
4060                 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
4061                         mss |= (skb_headlen(skb) - ETH_HLEN) << 9;
4062                 else {
4063                         struct iphdr *iph = ip_hdr(skb);
4064
4065                         tcp_opt_len = tcp_optlen(skb);
4066                         ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
4067
4068                         iph->check = 0;
4069                         iph->tot_len = htons(mss + ip_tcp_len + tcp_opt_len);
4070                         mss |= (ip_tcp_len + tcp_opt_len) << 9;
4071                 }
4072
4073                 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
4074                                TXD_FLAG_CPU_POST_DMA);
4075
4076                 tcp_hdr(skb)->check = 0;
4077
4078         }
4079         else if (skb->ip_summed == CHECKSUM_PARTIAL)
4080                 base_flags |= TXD_FLAG_TCPUDP_CSUM;
4081 #if TG3_VLAN_TAG_USED
4082         if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
4083                 base_flags |= (TXD_FLAG_VLAN |
4084                                (vlan_tx_tag_get(skb) << 16));
4085 #endif
4086
4087         /* Queue skb data, a.k.a. the main skb fragment. */
4088         mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
4089
4090         tp->tx_buffers[entry].skb = skb;
4091         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
4092
4093         tg3_set_txd(tp, entry, mapping, len, base_flags,
4094                     (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
4095
4096         entry = NEXT_TX(entry);
4097
4098         /* Now loop through additional data fragments, and queue them. */
4099         if (skb_shinfo(skb)->nr_frags > 0) {
4100                 unsigned int i, last;
4101
4102                 last = skb_shinfo(skb)->nr_frags - 1;
4103                 for (i = 0; i <= last; i++) {
4104                         skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4105
4106                         len = frag->size;
4107                         mapping = pci_map_page(tp->pdev,
4108                                                frag->page,
4109                                                frag->page_offset,
4110                                                len, PCI_DMA_TODEVICE);
4111
4112                         tp->tx_buffers[entry].skb = NULL;
4113                         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
4114
4115                         tg3_set_txd(tp, entry, mapping, len,
4116                                     base_flags, (i == last) | (mss << 1));
4117
4118                         entry = NEXT_TX(entry);
4119                 }
4120         }
4121
4122         /* Packets are ready, update Tx producer idx local and on card. */
4123         tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry);
4124
4125         tp->tx_prod = entry;
4126         if (unlikely(tg3_tx_avail(tp) <= (MAX_SKB_FRAGS + 1))) {
4127                 netif_stop_queue(dev);
4128                 if (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp))
4129                         netif_wake_queue(tp->dev);
4130         }
4131
4132 out_unlock:
4133         mmiowb();
4134
4135         dev->trans_start = jiffies;
4136
4137         return NETDEV_TX_OK;
4138 }
4139
4140 static int tg3_start_xmit_dma_bug(struct sk_buff *, struct net_device *);
4141
4142 /* Use GSO to workaround a rare TSO bug that may be triggered when the
4143  * TSO header is greater than 80 bytes.
4144  */
4145 static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
4146 {
4147         struct sk_buff *segs, *nskb;
4148
4149         /* Estimate the number of fragments in the worst case */
4150         if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->gso_segs * 3))) {
4151                 netif_stop_queue(tp->dev);
4152                 if (tg3_tx_avail(tp) <= (skb_shinfo(skb)->gso_segs * 3))
4153                         return NETDEV_TX_BUSY;
4154
4155                 netif_wake_queue(tp->dev);
4156         }
4157
4158         segs = skb_gso_segment(skb, tp->dev->features & ~NETIF_F_TSO);
4159         if (unlikely(IS_ERR(segs)))
4160                 goto tg3_tso_bug_end;
4161
4162         do {
4163                 nskb = segs;
4164                 segs = segs->next;
4165                 nskb->next = NULL;
4166                 tg3_start_xmit_dma_bug(nskb, tp->dev);
4167         } while (segs);
4168
4169 tg3_tso_bug_end:
4170         dev_kfree_skb(skb);
4171
4172         return NETDEV_TX_OK;
4173 }
4174
4175 /* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
4176  * support TG3_FLG2_HW_TSO_1 or firmware TSO only.
4177  */
4178 static int tg3_start_xmit_dma_bug(struct sk_buff *skb, struct net_device *dev)
4179 {
4180         struct tg3 *tp = netdev_priv(dev);
4181         dma_addr_t mapping;
4182         u32 len, entry, base_flags, mss;
4183         int would_hit_hwbug;
4184
4185         len = skb_headlen(skb);
4186
4187         /* We are running in BH disabled context with netif_tx_lock
4188          * and TX reclaim runs via tp->napi.poll inside of a software
4189          * interrupt.  Furthermore, IRQ processing runs lockless so we have
4190          * no IRQ context deadlocks to worry about either.  Rejoice!
4191          */
4192         if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->nr_frags + 1))) {
4193                 if (!netif_queue_stopped(dev)) {
4194                         netif_stop_queue(dev);
4195
4196                         /* This is a hard error, log it. */
4197                         printk(KERN_ERR PFX "%s: BUG! Tx Ring full when "
4198                                "queue awake!\n", dev->name);
4199                 }
4200                 return NETDEV_TX_BUSY;
4201         }
4202
4203         entry = tp->tx_prod;
4204         base_flags = 0;
4205         if (skb->ip_summed == CHECKSUM_PARTIAL)
4206                 base_flags |= TXD_FLAG_TCPUDP_CSUM;
4207         mss = 0;
4208         if ((mss = skb_shinfo(skb)->gso_size) != 0) {
4209                 struct iphdr *iph;
4210                 int tcp_opt_len, ip_tcp_len, hdr_len;
4211
4212                 if (skb_header_cloned(skb) &&
4213                     pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
4214                         dev_kfree_skb(skb);
4215                         goto out_unlock;
4216                 }
4217
4218                 tcp_opt_len = tcp_optlen(skb);
4219                 ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
4220
4221                 hdr_len = ip_tcp_len + tcp_opt_len;
4222                 if (unlikely((ETH_HLEN + hdr_len) > 80) &&
4223                              (tp->tg3_flags2 & TG3_FLG2_TSO_BUG))
4224                         return (tg3_tso_bug(tp, skb));
4225
4226                 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
4227                                TXD_FLAG_CPU_POST_DMA);
4228
4229                 iph = ip_hdr(skb);
4230                 iph->check = 0;
4231                 iph->tot_len = htons(mss + hdr_len);
4232                 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
4233                         tcp_hdr(skb)->check = 0;
4234                         base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
4235                 } else
4236                         tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
4237                                                                  iph->daddr, 0,
4238                                                                  IPPROTO_TCP,
4239                                                                  0);
4240
4241                 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO) ||
4242                     (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)) {
4243                         if (tcp_opt_len || iph->ihl > 5) {
4244                                 int tsflags;
4245
4246                                 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
4247                                 mss |= (tsflags << 11);
4248                         }
4249                 } else {
4250                         if (tcp_opt_len || iph->ihl > 5) {
4251                                 int tsflags;
4252
4253                                 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
4254                                 base_flags |= tsflags << 12;
4255                         }
4256                 }
4257         }
4258 #if TG3_VLAN_TAG_USED
4259         if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
4260                 base_flags |= (TXD_FLAG_VLAN |
4261                                (vlan_tx_tag_get(skb) << 16));
4262 #endif
4263
4264         /* Queue skb data, a.k.a. the main skb fragment. */
4265         mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
4266
4267         tp->tx_buffers[entry].skb = skb;
4268         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
4269
4270         would_hit_hwbug = 0;
4271
4272         if (tg3_4g_overflow_test(mapping, len))
4273                 would_hit_hwbug = 1;
4274
4275         tg3_set_txd(tp, entry, mapping, len, base_flags,
4276                     (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
4277
4278         entry = NEXT_TX(entry);
4279
4280         /* Now loop through additional data fragments, and queue them. */
4281         if (skb_shinfo(skb)->nr_frags > 0) {
4282                 unsigned int i, last;
4283
4284                 last = skb_shinfo(skb)->nr_frags - 1;
4285                 for (i = 0; i <= last; i++) {
4286                         skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4287
4288                         len = frag->size;
4289                         mapping = pci_map_page(tp->pdev,
4290                                                frag->page,
4291                                                frag->page_offset,
4292                                                len, PCI_DMA_TODEVICE);
4293
4294                         tp->tx_buffers[entry].skb = NULL;
4295                         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
4296
4297                         if (tg3_4g_overflow_test(mapping, len))
4298                                 would_hit_hwbug = 1;
4299
4300                         if (tg3_40bit_overflow_test(tp, mapping, len))
4301                                 would_hit_hwbug = 1;
4302
4303                         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
4304                                 tg3_set_txd(tp, entry, mapping, len,
4305                                             base_flags, (i == last)|(mss << 1));
4306                         else
4307                                 tg3_set_txd(tp, entry, mapping, len,
4308                                             base_flags, (i == last));
4309
4310                         entry = NEXT_TX(entry);
4311                 }
4312         }
4313
4314         if (would_hit_hwbug) {
4315                 u32 last_plus_one = entry;
4316                 u32 start;
4317
4318                 start = entry - 1 - skb_shinfo(skb)->nr_frags;
4319                 start &= (TG3_TX_RING_SIZE - 1);
4320
4321                 /* If the workaround fails due to memory/mapping
4322                  * failure, silently drop this packet.
4323                  */
4324                 if (tigon3_dma_hwbug_workaround(tp, skb, last_plus_one,
4325                                                 &start, base_flags, mss))
4326                         goto out_unlock;
4327
4328                 entry = start;
4329         }
4330
4331         /* Packets are ready, update Tx producer idx local and on card. */
4332         tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry);
4333
4334         tp->tx_prod = entry;
4335         if (unlikely(tg3_tx_avail(tp) <= (MAX_SKB_FRAGS + 1))) {
4336                 netif_stop_queue(dev);
4337                 if (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp))
4338                         netif_wake_queue(tp->dev);
4339         }
4340
4341 out_unlock:
4342         mmiowb();
4343
4344         dev->trans_start = jiffies;
4345
4346         return NETDEV_TX_OK;
4347 }
4348
4349 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
4350                                int new_mtu)
4351 {
4352         dev->mtu = new_mtu;
4353
4354         if (new_mtu > ETH_DATA_LEN) {
4355                 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
4356                         tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
4357                         ethtool_op_set_tso(dev, 0);
4358                 }
4359                 else
4360                         tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE;
4361         } else {
4362                 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
4363                         tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
4364                 tp->tg3_flags &= ~TG3_FLAG_JUMBO_RING_ENABLE;
4365         }
4366 }
4367
4368 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
4369 {
4370         struct tg3 *tp = netdev_priv(dev);
4371         int err;
4372
4373         if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
4374                 return -EINVAL;
4375
4376         if (!netif_running(dev)) {
4377                 /* We'll just catch it later when the
4378                  * device is up'd.
4379                  */
4380                 tg3_set_mtu(dev, tp, new_mtu);
4381                 return 0;
4382         }
4383
4384         tg3_netif_stop(tp);
4385
4386         tg3_full_lock(tp, 1);
4387
4388         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
4389
4390         tg3_set_mtu(dev, tp, new_mtu);
4391
4392         err = tg3_restart_hw(tp, 0);
4393
4394         if (!err)
4395                 tg3_netif_start(tp);
4396
4397         tg3_full_unlock(tp);
4398
4399         return err;
4400 }
4401
4402 /* Free up pending packets in all rx/tx rings.
4403  *
4404  * The chip has been shut down and the driver detached from
4405  * the networking, so no interrupts or new tx packets will
4406  * end up in the driver.  tp->{tx,}lock is not held and we are not
4407  * in an interrupt context and thus may sleep.
4408  */
4409 static void tg3_free_rings(struct tg3 *tp)
4410 {
4411         struct ring_info *rxp;
4412         int i;
4413
4414         for (i = 0; i < TG3_RX_RING_SIZE; i++) {
4415                 rxp = &tp->rx_std_buffers[i];
4416
4417                 if (rxp->skb == NULL)
4418                         continue;
4419                 pci_unmap_single(tp->pdev,
4420                                  pci_unmap_addr(rxp, mapping),
4421                                  tp->rx_pkt_buf_sz - tp->rx_offset,
4422                                  PCI_DMA_FROMDEVICE);
4423                 dev_kfree_skb_any(rxp->skb);
4424                 rxp->skb = NULL;
4425         }
4426
4427         for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
4428                 rxp = &tp->rx_jumbo_buffers[i];
4429
4430                 if (rxp->skb == NULL)
4431                         continue;
4432                 pci_unmap_single(tp->pdev,
4433                                  pci_unmap_addr(rxp, mapping),
4434                                  RX_JUMBO_PKT_BUF_SZ - tp->rx_offset,
4435                                  PCI_DMA_FROMDEVICE);
4436                 dev_kfree_skb_any(rxp->skb);
4437                 rxp->skb = NULL;
4438         }
4439
4440         for (i = 0; i < TG3_TX_RING_SIZE; ) {
4441                 struct tx_ring_info *txp;
4442                 struct sk_buff *skb;
4443                 int j;
4444
4445                 txp = &tp->tx_buffers[i];
4446                 skb = txp->skb;
4447
4448                 if (skb == NULL) {
4449                         i++;
4450                         continue;
4451                 }
4452
4453                 pci_unmap_single(tp->pdev,
4454                                  pci_unmap_addr(txp, mapping),
4455                                  skb_headlen(skb),
4456                                  PCI_DMA_TODEVICE);
4457                 txp->skb = NULL;
4458
4459                 i++;
4460
4461                 for (j = 0; j < skb_shinfo(skb)->nr_frags; j++) {
4462                         txp = &tp->tx_buffers[i & (TG3_TX_RING_SIZE - 1)];
4463                         pci_unmap_page(tp->pdev,
4464                                        pci_unmap_addr(txp, mapping),
4465                                        skb_shinfo(skb)->frags[j].size,
4466                                        PCI_DMA_TODEVICE);
4467                         i++;
4468                 }
4469
4470                 dev_kfree_skb_any(skb);
4471         }
4472 }
4473
4474 /* Initialize tx/rx rings for packet processing.
4475  *
4476  * The chip has been shut down and the driver detached from
4477  * the networking, so no interrupts or new tx packets will
4478  * end up in the driver.  tp->{tx,}lock are held and thus
4479  * we may not sleep.
4480  */
4481 static int tg3_init_rings(struct tg3 *tp)
4482 {
4483         u32 i;
4484
4485         /* Free up all the SKBs. */
4486         tg3_free_rings(tp);
4487
4488         /* Zero out all descriptors. */
4489         memset(tp->rx_std, 0, TG3_RX_RING_BYTES);
4490         memset(tp->rx_jumbo, 0, TG3_RX_JUMBO_RING_BYTES);
4491         memset(tp->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
4492         memset(tp->tx_ring, 0, TG3_TX_RING_BYTES);
4493
4494         tp->rx_pkt_buf_sz = RX_PKT_BUF_SZ;
4495         if ((tp->tg3_flags2 & TG3_FLG2_5780_CLASS) &&
4496             (tp->dev->mtu > ETH_DATA_LEN))
4497                 tp->rx_pkt_buf_sz = RX_JUMBO_PKT_BUF_SZ;
4498
4499         /* Initialize invariants of the rings, we only set this
4500          * stuff once.  This works because the card does not
4501          * write into the rx buffer posting rings.
4502          */
4503         for (i = 0; i < TG3_RX_RING_SIZE; i++) {
4504                 struct tg3_rx_buffer_desc *rxd;
4505
4506                 rxd = &tp->rx_std[i];
4507                 rxd->idx_len = (tp->rx_pkt_buf_sz - tp->rx_offset - 64)
4508                         << RXD_LEN_SHIFT;
4509                 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
4510                 rxd->opaque = (RXD_OPAQUE_RING_STD |
4511                                (i << RXD_OPAQUE_INDEX_SHIFT));
4512         }
4513
4514         if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
4515                 for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
4516                         struct tg3_rx_buffer_desc *rxd;
4517
4518                         rxd = &tp->rx_jumbo[i];
4519                         rxd->idx_len = (RX_JUMBO_PKT_BUF_SZ - tp->rx_offset - 64)
4520                                 << RXD_LEN_SHIFT;
4521                         rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
4522                                 RXD_FLAG_JUMBO;
4523                         rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
4524                                (i << RXD_OPAQUE_INDEX_SHIFT));
4525                 }
4526         }
4527
4528         /* Now allocate fresh SKBs for each rx ring. */
4529         for (i = 0; i < tp->rx_pending; i++) {
4530                 if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_STD, -1, i) < 0) {
4531                         printk(KERN_WARNING PFX
4532                                "%s: Using a smaller RX standard ring, "
4533                                "only %d out of %d buffers were allocated "
4534                                "successfully.\n",
4535                                tp->dev->name, i, tp->rx_pending);
4536                         if (i == 0)
4537                                 return -ENOMEM;
4538                         tp->rx_pending = i;
4539                         break;
4540                 }
4541         }
4542
4543         if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
4544                 for (i = 0; i < tp->rx_jumbo_pending; i++) {
4545                         if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_JUMBO,
4546                                              -1, i) < 0) {
4547                                 printk(KERN_WARNING PFX
4548                                        "%s: Using a smaller RX jumbo ring, "
4549                                        "only %d out of %d buffers were "
4550                                        "allocated successfully.\n",
4551                                        tp->dev->name, i, tp->rx_jumbo_pending);
4552                                 if (i == 0) {
4553                                         tg3_free_rings(tp);
4554                                         return -ENOMEM;
4555                                 }
4556                                 tp->rx_jumbo_pending = i;
4557                                 break;
4558                         }
4559                 }
4560         }
4561         return 0;
4562 }
4563
4564 /*
4565  * Must not be invoked with interrupt sources disabled and
4566  * the hardware shutdown down.
4567  */
4568 static void tg3_free_consistent(struct tg3 *tp)
4569 {
4570         kfree(tp->rx_std_buffers);
4571         tp->rx_std_buffers = NULL;
4572         if (tp->rx_std) {
4573                 pci_free_consistent(tp->pdev, TG3_RX_RING_BYTES,
4574                                     tp->rx_std, tp->rx_std_mapping);
4575                 tp->rx_std = NULL;
4576         }
4577         if (tp->rx_jumbo) {
4578                 pci_free_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
4579                                     tp->rx_jumbo, tp->rx_jumbo_mapping);
4580                 tp->rx_jumbo = NULL;
4581         }
4582         if (tp->rx_rcb) {
4583                 pci_free_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
4584                                     tp->rx_rcb, tp->rx_rcb_mapping);
4585                 tp->rx_rcb = NULL;
4586         }
4587         if (tp->tx_ring) {
4588                 pci_free_consistent(tp->pdev, TG3_TX_RING_BYTES,
4589                         tp->tx_ring, tp->tx_desc_mapping);
4590                 tp->tx_ring = NULL;
4591         }
4592         if (tp->hw_status) {
4593                 pci_free_consistent(tp->pdev, TG3_HW_STATUS_SIZE,
4594                                     tp->hw_status, tp->status_mapping);
4595                 tp->hw_status = NULL;
4596         }
4597         if (tp->hw_stats) {
4598                 pci_free_consistent(tp->pdev, sizeof(struct tg3_hw_stats),
4599                                     tp->hw_stats, tp->stats_mapping);
4600                 tp->hw_stats = NULL;
4601         }
4602 }
4603
4604 /*
4605  * Must not be invoked with interrupt sources disabled and
4606  * the hardware shutdown down.  Can sleep.
4607  */
4608 static int tg3_alloc_consistent(struct tg3 *tp)
4609 {
4610         tp->rx_std_buffers = kzalloc((sizeof(struct ring_info) *
4611                                       (TG3_RX_RING_SIZE +
4612                                        TG3_RX_JUMBO_RING_SIZE)) +
4613                                      (sizeof(struct tx_ring_info) *
4614                                       TG3_TX_RING_SIZE),
4615                                      GFP_KERNEL);
4616         if (!tp->rx_std_buffers)
4617                 return -ENOMEM;
4618
4619         tp->rx_jumbo_buffers = &tp->rx_std_buffers[TG3_RX_RING_SIZE];
4620         tp->tx_buffers = (struct tx_ring_info *)
4621                 &tp->rx_jumbo_buffers[TG3_RX_JUMBO_RING_SIZE];
4622
4623         tp->rx_std = pci_alloc_consistent(tp->pdev, TG3_RX_RING_BYTES,
4624                                           &tp->rx_std_mapping);
4625         if (!tp->rx_std)
4626                 goto err_out;
4627
4628         tp->rx_jumbo = pci_alloc_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
4629                                             &tp->rx_jumbo_mapping);
4630
4631         if (!tp->rx_jumbo)
4632                 goto err_out;
4633
4634         tp->rx_rcb = pci_alloc_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
4635                                           &tp->rx_rcb_mapping);
4636         if (!tp->rx_rcb)
4637                 goto err_out;
4638
4639         tp->tx_ring = pci_alloc_consistent(tp->pdev, TG3_TX_RING_BYTES,
4640                                            &tp->tx_desc_mapping);
4641         if (!tp->tx_ring)
4642                 goto err_out;
4643
4644         tp->hw_status = pci_alloc_consistent(tp->pdev,
4645                                              TG3_HW_STATUS_SIZE,
4646                                              &tp->status_mapping);
4647         if (!tp->hw_status)
4648                 goto err_out;
4649
4650         tp->hw_stats = pci_alloc_consistent(tp->pdev,
4651                                             sizeof(struct tg3_hw_stats),
4652                                             &tp->stats_mapping);
4653         if (!tp->hw_stats)
4654                 goto err_out;
4655
4656         memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
4657         memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
4658
4659         return 0;
4660
4661 err_out:
4662         tg3_free_consistent(tp);
4663         return -ENOMEM;
4664 }
4665
4666 #define MAX_WAIT_CNT 1000
4667
4668 /* To stop a block, clear the enable bit and poll till it
4669  * clears.  tp->lock is held.
4670  */
4671 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, int silent)
4672 {
4673         unsigned int i;
4674         u32 val;
4675
4676         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
4677                 switch (ofs) {
4678                 case RCVLSC_MODE:
4679                 case DMAC_MODE:
4680                 case MBFREE_MODE:
4681                 case BUFMGR_MODE:
4682                 case MEMARB_MODE:
4683                         /* We can't enable/disable these bits of the
4684                          * 5705/5750, just say success.
4685                          */
4686                         return 0;
4687
4688                 default:
4689                         break;
4690                 };
4691         }
4692
4693         val = tr32(ofs);
4694         val &= ~enable_bit;
4695         tw32_f(ofs, val);
4696
4697         for (i = 0; i < MAX_WAIT_CNT; i++) {
4698                 udelay(100);
4699                 val = tr32(ofs);
4700                 if ((val & enable_bit) == 0)
4701                         break;
4702         }
4703
4704         if (i == MAX_WAIT_CNT && !silent) {
4705                 printk(KERN_ERR PFX "tg3_stop_block timed out, "
4706                        "ofs=%lx enable_bit=%x\n",
4707                        ofs, enable_bit);
4708                 return -ENODEV;
4709         }
4710
4711         return 0;
4712 }
4713
4714 /* tp->lock is held. */
4715 static int tg3_abort_hw(struct tg3 *tp, int silent)
4716 {
4717         int i, err;
4718
4719         tg3_disable_ints(tp);
4720
4721         tp->rx_mode &= ~RX_MODE_ENABLE;
4722         tw32_f(MAC_RX_MODE, tp->rx_mode);
4723         udelay(10);
4724
4725         err  = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
4726         err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
4727         err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
4728         err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
4729         err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
4730         err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
4731
4732         err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
4733         err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
4734         err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
4735         err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
4736         err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
4737         err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
4738         err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
4739
4740         tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
4741         tw32_f(MAC_MODE, tp->mac_mode);
4742         udelay(40);
4743
4744         tp->tx_mode &= ~TX_MODE_ENABLE;
4745         tw32_f(MAC_TX_MODE, tp->tx_mode);
4746
4747         for (i = 0; i < MAX_WAIT_CNT; i++) {
4748                 udelay(100);
4749                 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
4750                         break;
4751         }
4752         if (i >= MAX_WAIT_CNT) {
4753                 printk(KERN_ERR PFX "tg3_abort_hw timed out for %s, "
4754                        "TX_MODE_ENABLE will not clear MAC_TX_MODE=%08x\n",
4755                        tp->dev->name, tr32(MAC_TX_MODE));
4756                 err |= -ENODEV;
4757         }
4758
4759         err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
4760         err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
4761         err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
4762
4763         tw32(FTQ_RESET, 0xffffffff);
4764         tw32(FTQ_RESET, 0x00000000);
4765
4766         err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
4767         err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
4768
4769         if (tp->hw_status)
4770                 memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
4771         if (tp->hw_stats)
4772                 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
4773
4774         return err;
4775 }
4776
4777 /* tp->lock is held. */
4778 static int tg3_nvram_lock(struct tg3 *tp)
4779 {
4780         if (tp->tg3_flags & TG3_FLAG_NVRAM) {
4781                 int i;
4782
4783                 if (tp->nvram_lock_cnt == 0) {
4784                         tw32(NVRAM_SWARB, SWARB_REQ_SET1);
4785                         for (i = 0; i < 8000; i++) {
4786                                 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
4787                                         break;
4788                                 udelay(20);
4789                         }
4790                         if (i == 8000) {
4791                                 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
4792                                 return -ENODEV;
4793                         }
4794                 }
4795                 tp->nvram_lock_cnt++;
4796         }
4797         return 0;
4798 }
4799
4800 /* tp->lock is held. */
4801 static void tg3_nvram_unlock(struct tg3 *tp)
4802 {
4803         if (tp->tg3_flags & TG3_FLAG_NVRAM) {
4804                 if (tp->nvram_lock_cnt > 0)
4805                         tp->nvram_lock_cnt--;
4806                 if (tp->nvram_lock_cnt == 0)
4807                         tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
4808         }
4809 }
4810
4811 /* tp->lock is held. */
4812 static void tg3_enable_nvram_access(struct tg3 *tp)
4813 {
4814         if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
4815             !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM)) {
4816                 u32 nvaccess = tr32(NVRAM_ACCESS);
4817
4818                 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
4819         }
4820 }
4821
4822 /* tp->lock is held. */
4823 static void tg3_disable_nvram_access(struct tg3 *tp)
4824 {
4825         if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
4826             !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM)) {
4827                 u32 nvaccess = tr32(NVRAM_ACCESS);
4828
4829                 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
4830         }
4831 }
4832
4833 static void tg3_ape_send_event(struct tg3 *tp, u32 event)
4834 {
4835         int i;
4836         u32 apedata;
4837
4838         apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
4839         if (apedata != APE_SEG_SIG_MAGIC)
4840                 return;
4841
4842         apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
4843         if (apedata != APE_FW_STATUS_READY)
4844                 return;
4845
4846         /* Wait for up to 1 millisecond for APE to service previous event. */
4847         for (i = 0; i < 10; i++) {
4848                 if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
4849                         return;
4850
4851                 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
4852
4853                 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
4854                         tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
4855                                         event | APE_EVENT_STATUS_EVENT_PENDING);
4856
4857                 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
4858
4859                 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
4860                         break;
4861
4862                 udelay(100);
4863         }
4864
4865         if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
4866                 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
4867 }
4868
4869 static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
4870 {
4871         u32 event;
4872         u32 apedata;
4873
4874         if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
4875                 return;
4876
4877         switch (kind) {
4878                 case RESET_KIND_INIT:
4879                         tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
4880                                         APE_HOST_SEG_SIG_MAGIC);
4881                         tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
4882                                         APE_HOST_SEG_LEN_MAGIC);
4883                         apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
4884                         tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
4885                         tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
4886                                         APE_HOST_DRIVER_ID_MAGIC);
4887                         tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
4888                                         APE_HOST_BEHAV_NO_PHYLOCK);
4889
4890                         event = APE_EVENT_STATUS_STATE_START;
4891                         break;
4892                 case RESET_KIND_SHUTDOWN:
4893                         event = APE_EVENT_STATUS_STATE_UNLOAD;
4894                         break;
4895                 case RESET_KIND_SUSPEND:
4896                         event = APE_EVENT_STATUS_STATE_SUSPEND;
4897                         break;
4898                 default:
4899                         return;
4900         }
4901
4902         event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
4903
4904         tg3_ape_send_event(tp, event);
4905 }
4906
4907 /* tp->lock is held. */
4908 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
4909 {
4910         tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
4911                       NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
4912
4913         if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
4914                 switch (kind) {
4915                 case RESET_KIND_INIT:
4916                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4917                                       DRV_STATE_START);
4918                         break;
4919
4920                 case RESET_KIND_SHUTDOWN:
4921                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4922                                       DRV_STATE_UNLOAD);
4923                         break;
4924
4925                 case RESET_KIND_SUSPEND:
4926                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4927                                       DRV_STATE_SUSPEND);
4928                         break;
4929
4930                 default:
4931                         break;
4932                 };
4933         }
4934
4935         if (kind == RESET_KIND_INIT ||
4936             kind == RESET_KIND_SUSPEND)
4937                 tg3_ape_driver_state_change(tp, kind);
4938 }
4939
4940 /* tp->lock is held. */
4941 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
4942 {
4943         if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
4944                 switch (kind) {
4945                 case RESET_KIND_INIT:
4946                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4947                                       DRV_STATE_START_DONE);
4948                         break;
4949
4950                 case RESET_KIND_SHUTDOWN:
4951                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4952                                       DRV_STATE_UNLOAD_DONE);
4953                         break;
4954
4955                 default:
4956                         break;
4957                 };
4958         }
4959
4960         if (kind == RESET_KIND_SHUTDOWN)
4961                 tg3_ape_driver_state_change(tp, kind);
4962 }
4963
4964 /* tp->lock is held. */
4965 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
4966 {
4967         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
4968                 switch (kind) {
4969                 case RESET_KIND_INIT:
4970                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4971                                       DRV_STATE_START);
4972                         break;
4973
4974                 case RESET_KIND_SHUTDOWN:
4975                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4976                                       DRV_STATE_UNLOAD);
4977                         break;
4978
4979                 case RESET_KIND_SUSPEND:
4980                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4981                                       DRV_STATE_SUSPEND);
4982                         break;
4983
4984                 default:
4985                         break;
4986                 };
4987         }
4988 }
4989
4990 static int tg3_poll_fw(struct tg3 *tp)
4991 {
4992         int i;
4993         u32 val;
4994
4995         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
4996                 /* Wait up to 20ms for init done. */
4997                 for (i = 0; i < 200; i++) {
4998                         if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
4999                                 return 0;
5000                         udelay(100);
5001                 }
5002                 return -ENODEV;
5003         }
5004
5005         /* Wait for firmware initialization to complete. */
5006         for (i = 0; i < 100000; i++) {
5007                 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
5008                 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
5009                         break;
5010                 udelay(10);
5011         }
5012
5013         /* Chip might not be fitted with firmware.  Some Sun onboard
5014          * parts are configured like that.  So don't signal the timeout
5015          * of the above loop as an error, but do report the lack of
5016          * running firmware once.
5017          */
5018         if (i >= 100000 &&
5019             !(tp->tg3_flags2 & TG3_FLG2_NO_FWARE_REPORTED)) {
5020                 tp->tg3_flags2 |= TG3_FLG2_NO_FWARE_REPORTED;
5021
5022                 printk(KERN_INFO PFX "%s: No firmware running.\n",
5023                        tp->dev->name);
5024         }
5025
5026         return 0;
5027 }
5028
5029 /* Save PCI command register before chip reset */
5030 static void tg3_save_pci_state(struct tg3 *tp)
5031 {
5032         pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd);
5033 }
5034
5035 /* Restore PCI state after chip reset */
5036 static void tg3_restore_pci_state(struct tg3 *tp)
5037 {
5038         u32 val;
5039
5040         /* Re-enable indirect register accesses. */
5041         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
5042                                tp->misc_host_ctrl);
5043
5044         /* Set MAX PCI retry to zero. */
5045         val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
5046         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
5047             (tp->tg3_flags & TG3_FLAG_PCIX_MODE))
5048                 val |= PCISTATE_RETRY_SAME_DMA;
5049         /* Allow reads and writes to the APE register and memory space. */
5050         if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
5051                 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
5052                        PCISTATE_ALLOW_APE_SHMEM_WR;
5053         pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
5054
5055         pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
5056
5057         if (!(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
5058                 pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
5059                                       tp->pci_cacheline_sz);
5060                 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
5061                                       tp->pci_lat_timer);
5062         }
5063         /* Make sure PCI-X relaxed ordering bit is clear. */
5064         if (tp->pcix_cap) {
5065                 u16 pcix_cmd;
5066
5067                 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
5068                                      &pcix_cmd);
5069                 pcix_cmd &= ~PCI_X_CMD_ERO;
5070                 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
5071                                       pcix_cmd);
5072         }
5073
5074         if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
5075
5076                 /* Chip reset on 5780 will reset MSI enable bit,
5077                  * so need to restore it.
5078                  */
5079                 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
5080                         u16 ctrl;
5081
5082                         pci_read_config_word(tp->pdev,
5083                                              tp->msi_cap + PCI_MSI_FLAGS,
5084                                              &ctrl);
5085                         pci_write_config_word(tp->pdev,
5086                                               tp->msi_cap + PCI_MSI_FLAGS,
5087                                               ctrl | PCI_MSI_FLAGS_ENABLE);
5088                         val = tr32(MSGINT_MODE);
5089                         tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
5090                 }
5091         }
5092 }
5093
5094 static void tg3_stop_fw(struct tg3 *);
5095
5096 /* tp->lock is held. */
5097 static int tg3_chip_reset(struct tg3 *tp)
5098 {
5099         u32 val;
5100         void (*write_op)(struct tg3 *, u32, u32);
5101         int err;
5102
5103         tg3_nvram_lock(tp);
5104
5105         /* No matching tg3_nvram_unlock() after this because
5106          * chip reset below will undo the nvram lock.
5107          */
5108         tp->nvram_lock_cnt = 0;
5109
5110         /* GRC_MISC_CFG core clock reset will clear the memory
5111          * enable bit in PCI register 4 and the MSI enable bit
5112          * on some chips, so we save relevant registers here.
5113          */
5114         tg3_save_pci_state(tp);
5115
5116         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
5117             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
5118             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
5119             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
5120             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
5121                 tw32(GRC_FASTBOOT_PC, 0);
5122
5123         /*
5124          * We must avoid the readl() that normally takes place.
5125          * It locks machines, causes machine checks, and other
5126          * fun things.  So, temporarily disable the 5701
5127          * hardware workaround, while we do the reset.
5128          */
5129         write_op = tp->write32;
5130         if (write_op == tg3_write_flush_reg32)
5131                 tp->write32 = tg3_write32;
5132
5133         /* Prevent the irq handler from reading or writing PCI registers
5134          * during chip reset when the memory enable bit in the PCI command
5135          * register may be cleared.  The chip does not generate interrupt
5136          * at this time, but the irq handler may still be called due to irq
5137          * sharing or irqpoll.
5138          */
5139         tp->tg3_flags |= TG3_FLAG_CHIP_RESETTING;
5140         if (tp->hw_status) {
5141                 tp->hw_status->status = 0;
5142                 tp->hw_status->status_tag = 0;
5143         }
5144         tp->last_tag = 0;
5145         smp_mb();
5146         synchronize_irq(tp->pdev->irq);
5147
5148         /* do the reset */
5149         val = GRC_MISC_CFG_CORECLK_RESET;
5150
5151         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
5152                 if (tr32(0x7e2c) == 0x60) {
5153                         tw32(0x7e2c, 0x20);
5154                 }
5155                 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
5156                         tw32(GRC_MISC_CFG, (1 << 29));
5157                         val |= (1 << 29);
5158                 }
5159         }
5160
5161         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
5162                 tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
5163                 tw32(GRC_VCPU_EXT_CTRL,
5164                      tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
5165         }
5166
5167         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
5168                 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
5169         tw32(GRC_MISC_CFG, val);
5170
5171         /* restore 5701 hardware bug workaround write method */
5172         tp->write32 = write_op;
5173
5174         /* Unfortunately, we have to delay before the PCI read back.
5175          * Some 575X chips even will not respond to a PCI cfg access
5176          * when the reset command is given to the chip.
5177          *
5178          * How do these hardware designers expect things to work
5179          * properly if the PCI write is posted for a long period
5180          * of time?  It is always necessary to have some method by
5181          * which a register read back can occur to push the write
5182          * out which does the reset.
5183          *
5184          * For most tg3 variants the trick below was working.
5185          * Ho hum...
5186          */
5187         udelay(120);
5188
5189         /* Flush PCI posted writes.  The normal MMIO registers
5190          * are inaccessible at this time so this is the only
5191          * way to make this reliably (actually, this is no longer
5192          * the case, see above).  I tried to use indirect
5193          * register read/write but this upset some 5701 variants.
5194          */
5195         pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
5196
5197         udelay(120);
5198
5199         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
5200                 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) {
5201                         int i;
5202                         u32 cfg_val;
5203
5204                         /* Wait for link training to complete.  */
5205                         for (i = 0; i < 5000; i++)
5206                                 udelay(100);
5207
5208                         pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
5209                         pci_write_config_dword(tp->pdev, 0xc4,
5210                                                cfg_val | (1 << 15));
5211                 }
5212                 /* Set PCIE max payload size and clear error status.  */
5213                 pci_write_config_dword(tp->pdev, 0xd8, 0xf5000);
5214         }
5215
5216         tg3_restore_pci_state(tp);
5217
5218         tp->tg3_flags &= ~TG3_FLAG_CHIP_RESETTING;
5219
5220         val = 0;
5221         if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
5222                 val = tr32(MEMARB_MODE);
5223         tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
5224
5225         if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A3) {
5226                 tg3_stop_fw(tp);
5227                 tw32(0x5000, 0x400);
5228         }
5229
5230         tw32(GRC_MODE, tp->grc_mode);
5231
5232         if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0) {
5233                 val = tr32(0xc4);
5234
5235                 tw32(0xc4, val | (1 << 15));
5236         }
5237
5238         if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
5239             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
5240                 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
5241                 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0)
5242                         tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
5243                 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
5244         }
5245
5246         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
5247                 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
5248                 tw32_f(MAC_MODE, tp->mac_mode);
5249         } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
5250                 tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
5251                 tw32_f(MAC_MODE, tp->mac_mode);
5252         } else
5253                 tw32_f(MAC_MODE, 0);
5254         udelay(40);
5255
5256         err = tg3_poll_fw(tp);
5257         if (err)
5258                 return err;
5259
5260         if ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
5261             tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
5262                 val = tr32(0x7c00);
5263
5264                 tw32(0x7c00, val | (1 << 25));
5265         }
5266
5267         /* Reprobe ASF enable state.  */
5268         tp->tg3_flags &= ~TG3_FLAG_ENABLE_ASF;
5269         tp->tg3_flags2 &= ~TG3_FLG2_ASF_NEW_HANDSHAKE;
5270         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
5271         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
5272                 u32 nic_cfg;
5273
5274                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
5275                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
5276                         tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
5277                         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
5278                                 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
5279                 }
5280         }
5281
5282         return 0;
5283 }
5284
5285 /* tp->lock is held. */
5286 static void tg3_stop_fw(struct tg3 *tp)
5287 {
5288         if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) &&
5289            !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)) {
5290                 u32 val;
5291                 int i;
5292
5293                 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
5294                 val = tr32(GRC_RX_CPU_EVENT);
5295                 val |= (1 << 14);
5296                 tw32(GRC_RX_CPU_EVENT, val);
5297
5298                 /* Wait for RX cpu to ACK the event.  */
5299                 for (i = 0; i < 100; i++) {
5300                         if (!(tr32(GRC_RX_CPU_EVENT) & (1 << 14)))
5301                                 break;
5302                         udelay(1);
5303                 }
5304         }
5305 }
5306
5307 /* tp->lock is held. */
5308 static int tg3_halt(struct tg3 *tp, int kind, int silent)
5309 {
5310         int err;
5311
5312         tg3_stop_fw(tp);
5313
5314         tg3_write_sig_pre_reset(tp, kind);
5315
5316         tg3_abort_hw(tp, silent);
5317         err = tg3_chip_reset(tp);
5318
5319         tg3_write_sig_legacy(tp, kind);
5320         tg3_write_sig_post_reset(tp, kind);
5321
5322         if (err)
5323                 return err;
5324
5325         return 0;
5326 }
5327
5328 #define TG3_FW_RELEASE_MAJOR    0x0
5329 #define TG3_FW_RELASE_MINOR     0x0
5330 #define TG3_FW_RELEASE_FIX      0x0
5331 #define TG3_FW_START_ADDR       0x08000000
5332 #define TG3_FW_TEXT_ADDR        0x08000000
5333 #define TG3_FW_TEXT_LEN         0x9c0
5334 #define TG3_FW_RODATA_ADDR      0x080009c0
5335 #define TG3_FW_RODATA_LEN       0x60
5336 #define TG3_FW_DATA_ADDR        0x08000a40
5337 #define TG3_FW_DATA_LEN         0x20
5338 #define TG3_FW_SBSS_ADDR        0x08000a60
5339 #define TG3_FW_SBSS_LEN         0xc
5340 #define TG3_FW_BSS_ADDR         0x08000a70
5341 #define TG3_FW_BSS_LEN          0x10
5342
5343 static const u32 tg3FwText[(TG3_FW_TEXT_LEN / sizeof(u32)) + 1] = {
5344         0x00000000, 0x10000003, 0x00000000, 0x0000000d, 0x0000000d, 0x3c1d0800,
5345         0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100000, 0x0e000018, 0x00000000,
5346         0x0000000d, 0x3c1d0800, 0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100034,
5347         0x0e00021c, 0x00000000, 0x0000000d, 0x00000000, 0x00000000, 0x00000000,
5348         0x27bdffe0, 0x3c1cc000, 0xafbf0018, 0xaf80680c, 0x0e00004c, 0x241b2105,
5349         0x97850000, 0x97870002, 0x9782002c, 0x9783002e, 0x3c040800, 0x248409c0,
5350         0xafa00014, 0x00021400, 0x00621825, 0x00052c00, 0xafa30010, 0x8f860010,
5351         0x00e52825, 0x0e000060, 0x24070102, 0x3c02ac00, 0x34420100, 0x3c03ac01,
5352         0x34630100, 0xaf820490, 0x3c02ffff, 0xaf820494, 0xaf830498, 0xaf82049c,
5353         0x24020001, 0xaf825ce0, 0x0e00003f, 0xaf825d00, 0x0e000140, 0x00000000,
5354         0x8fbf0018, 0x03e00008, 0x27bd0020, 0x2402ffff, 0xaf825404, 0x8f835400,
5355         0x34630400, 0xaf835400, 0xaf825404, 0x3c020800, 0x24420034, 0xaf82541c,
5356         0x03e00008, 0xaf805400, 0x00000000, 0x00000000, 0x3c020800, 0x34423000,
5357         0x3c030800, 0x34633000, 0x3c040800, 0x348437ff, 0x3c010800, 0xac220a64,
5358         0x24020040, 0x3c010800, 0xac220a68, 0x3c010800, 0xac200a60, 0xac600000,
5359         0x24630004, 0x0083102b, 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000,
5360         0x00804821, 0x8faa0010, 0x3c020800, 0x8c420a60, 0x3c040800, 0x8c840a68,
5361         0x8fab0014, 0x24430001, 0x0044102b, 0x3c010800, 0xac230a60, 0x14400003,
5362         0x00004021, 0x3c010800, 0xac200a60, 0x3c020800, 0x8c420a60, 0x3c030800,
5363         0x8c630a64, 0x91240000, 0x00021140, 0x00431021, 0x00481021, 0x25080001,
5364         0xa0440000, 0x29020008, 0x1440fff4, 0x25290001, 0x3c020800, 0x8c420a60,
5365         0x3c030800, 0x8c630a64, 0x8f84680c, 0x00021140, 0x00431021, 0xac440008,
5366         0xac45000c, 0xac460010, 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c,
5367         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
5368         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
5369         0, 0, 0, 0, 0, 0,
5370         0x02000008, 0x00000000, 0x0a0001e3, 0x3c0a0001, 0x0a0001e3, 0x3c0a0002,
5371         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
5372         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
5373         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
5374         0x0a0001e3, 0x3c0a0007, 0x0a0001e3, 0x3c0a0008, 0x0a0001e3, 0x3c0a0009,
5375         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a000b,
5376         0x0a0001e3, 0x3c0a000c, 0x0a0001e3, 0x3c0a000d, 0x0a0001e3, 0x00000000,
5377         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a000e, 0x0a0001e3, 0x00000000,
5378         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
5379         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
5380         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a0013, 0x0a0001e3, 0x3c0a0014,
5381         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
5382         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
5383         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
5384         0x27bdffe0, 0x00001821, 0x00001021, 0xafbf0018, 0xafb10014, 0xafb00010,
5385         0x3c010800, 0x00220821, 0xac200a70, 0x3c010800, 0x00220821, 0xac200a74,
5386         0x3c010800, 0x00220821, 0xac200a78, 0x24630001, 0x1860fff5, 0x2442000c,
5387         0x24110001, 0x8f906810, 0x32020004, 0x14400005, 0x24040001, 0x3c020800,
5388         0x8c420a78, 0x18400003, 0x00002021, 0x0e000182, 0x00000000, 0x32020001,
5389         0x10400003, 0x00000000, 0x0e000169, 0x00000000, 0x0a000153, 0xaf915028,
5390         0x8fbf0018, 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0020, 0x3c050800,
5391         0x8ca50a70, 0x3c060800, 0x8cc60a80, 0x3c070800, 0x8ce70a78, 0x27bdffe0,
5392         0x3c040800, 0x248409d0, 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014,
5393         0x0e00017b, 0x00002021, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x24020001,
5394         0x8f836810, 0x00821004, 0x00021027, 0x00621824, 0x03e00008, 0xaf836810,
5395         0x27bdffd8, 0xafbf0024, 0x1080002e, 0xafb00020, 0x8f825cec, 0xafa20018,
5396         0x8f825cec, 0x3c100800, 0x26100a78, 0xafa2001c, 0x34028000, 0xaf825cec,
5397         0x8e020000, 0x18400016, 0x00000000, 0x3c020800, 0x94420a74, 0x8fa3001c,
5398         0x000221c0, 0xac830004, 0x8fa2001c, 0x3c010800, 0x0e000201, 0xac220a74,
5399         0x10400005, 0x00000000, 0x8e020000, 0x24420001, 0x0a0001df, 0xae020000,
5400         0x3c020800, 0x8c420a70, 0x00021c02, 0x000321c0, 0x0a0001c5, 0xafa2001c,
5401         0x0e000201, 0x00000000, 0x1040001f, 0x00000000, 0x8e020000, 0x8fa3001c,
5402         0x24420001, 0x3c010800, 0xac230a70, 0x3c010800, 0xac230a74, 0x0a0001df,
5403         0xae020000, 0x3c100800, 0x26100a78, 0x8e020000, 0x18400028, 0x00000000,
5404         0x0e000201, 0x00000000, 0x14400024, 0x00000000, 0x8e020000, 0x3c030800,
5405         0x8c630a70, 0x2442ffff, 0xafa3001c, 0x18400006, 0xae020000, 0x00031402,
5406         0x000221c0, 0x8c820004, 0x3c010800, 0xac220a70, 0x97a2001e, 0x2442ff00,
5407         0x2c420300, 0x1440000b, 0x24024000, 0x3c040800, 0x248409dc, 0xafa00010,
5408         0xafa00014, 0x8fa6001c, 0x24050008, 0x0e000060, 0x00003821, 0x0a0001df,
5409         0x00000000, 0xaf825cf8, 0x3c020800, 0x8c420a40, 0x8fa3001c, 0x24420001,
5410         0xaf835cf8, 0x3c010800, 0xac220a40, 0x8fbf0024, 0x8fb00020, 0x03e00008,
5411         0x27bd0028, 0x27bdffe0, 0x3c040800, 0x248409e8, 0x00002821, 0x00003021,
5412         0x00003821, 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014, 0x8fbf0018,
5413         0x03e00008, 0x27bd0020, 0x8f82680c, 0x8f85680c, 0x00021827, 0x0003182b,
5414         0x00031823, 0x00431024, 0x00441021, 0x00a2282b, 0x10a00006, 0x00000000,
5415         0x00401821, 0x8f82680c, 0x0043102b, 0x1440fffd, 0x00000000, 0x03e00008,
5416         0x00000000, 0x3c040800, 0x8c840000, 0x3c030800, 0x8c630a40, 0x0064102b,
5417         0x54400002, 0x00831023, 0x00641023, 0x2c420008, 0x03e00008, 0x38420001,
5418         0x27bdffe0, 0x00802821, 0x3c040800, 0x24840a00, 0x00003021, 0x00003821,
5419         0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014, 0x0a000216, 0x00000000,
5420         0x8fbf0018, 0x03e00008, 0x27bd0020, 0x00000000, 0x27bdffe0, 0x3c1cc000,
5421         0xafbf0018, 0x0e00004c, 0xaf80680c, 0x3c040800, 0x24840a10, 0x03802821,
5422         0x00003021, 0x00003821, 0xafa00010, 0x0e000060, 0xafa00014, 0x2402ffff,
5423         0xaf825404, 0x3c0200aa, 0x0e000234, 0xaf825434, 0x8fbf0018, 0x03e00008,
5424         0x27bd0020, 0x00000000, 0x00000000, 0x00000000, 0x27bdffe8, 0xafb00010,
5425         0x24100001, 0xafbf0014, 0x3c01c003, 0xac200000, 0x8f826810, 0x30422000,
5426         0x10400003, 0x00000000, 0x0e000246, 0x00000000, 0x0a00023a, 0xaf905428,
5427         0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x27bdfff8, 0x8f845d0c,
5428         0x3c0200ff, 0x3c030800, 0x8c630a50, 0x3442fff8, 0x00821024, 0x1043001e,
5429         0x3c0500ff, 0x34a5fff8, 0x3c06c003, 0x3c074000, 0x00851824, 0x8c620010,
5430         0x3c010800, 0xac230a50, 0x30420008, 0x10400005, 0x00871025, 0x8cc20000,
5431         0x24420001, 0xacc20000, 0x00871025, 0xaf825d0c, 0x8fa20000, 0x24420001,
5432         0xafa20000, 0x8fa20000, 0x8fa20000, 0x24420001, 0xafa20000, 0x8fa20000,
5433         0x8f845d0c, 0x3c030800, 0x8c630a50, 0x00851024, 0x1443ffe8, 0x00851824,
5434         0x27bd0008, 0x03e00008, 0x00000000, 0x00000000, 0x00000000
5435 };
5436
5437 static const u32 tg3FwRodata[(TG3_FW_RODATA_LEN / sizeof(u32)) + 1] = {
5438         0x35373031, 0x726c7341, 0x00000000, 0x00000000, 0x53774576, 0x656e7430,
5439         0x00000000, 0x726c7045, 0x76656e74, 0x31000000, 0x556e6b6e, 0x45766e74,
5440         0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x66617461, 0x6c457272,
5441         0x00000000, 0x00000000, 0x4d61696e, 0x43707542, 0x00000000, 0x00000000,
5442         0x00000000
5443 };
5444
5445 #if 0 /* All zeros, don't eat up space with it. */
5446 u32 tg3FwData[(TG3_FW_DATA_LEN / sizeof(u32)) + 1] = {
5447         0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
5448         0x00000000, 0x00000000, 0x00000000, 0x00000000
5449 };
5450 #endif
5451
5452 #define RX_CPU_SCRATCH_BASE     0x30000
5453 #define RX_CPU_SCRATCH_SIZE     0x04000
5454 #define TX_CPU_SCRATCH_BASE     0x34000
5455 #define TX_CPU_SCRATCH_SIZE     0x04000
5456
5457 /* tp->lock is held. */
5458 static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
5459 {
5460         int i;
5461
5462         BUG_ON(offset == TX_CPU_BASE &&
5463             (tp->tg3_flags2 & TG3_FLG2_5705_PLUS));
5464
5465         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
5466                 u32 val = tr32(GRC_VCPU_EXT_CTRL);
5467
5468                 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
5469                 return 0;
5470         }
5471         if (offset == RX_CPU_BASE) {
5472                 for (i = 0; i < 10000; i++) {
5473                         tw32(offset + CPU_STATE, 0xffffffff);
5474                         tw32(offset + CPU_MODE,  CPU_MODE_HALT);
5475                         if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
5476                                 break;
5477                 }
5478
5479                 tw32(offset + CPU_STATE, 0xffffffff);
5480                 tw32_f(offset + CPU_MODE,  CPU_MODE_HALT);
5481                 udelay(10);
5482         } else {
5483                 for (i = 0; i < 10000; i++) {
5484                         tw32(offset + CPU_STATE, 0xffffffff);
5485                         tw32(offset + CPU_MODE,  CPU_MODE_HALT);
5486                         if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
5487                                 break;
5488                 }
5489         }
5490
5491         if (i >= 10000) {
5492                 printk(KERN_ERR PFX "tg3_reset_cpu timed out for %s, "
5493                        "and %s CPU\n",
5494                        tp->dev->name,
5495                        (offset == RX_CPU_BASE ? "RX" : "TX"));
5496                 return -ENODEV;
5497         }
5498
5499         /* Clear firmware's nvram arbitration. */
5500         if (tp->tg3_flags & TG3_FLAG_NVRAM)
5501                 tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
5502         return 0;
5503 }
5504
5505 struct fw_info {
5506         unsigned int text_base;
5507         unsigned int text_len;
5508         const u32 *text_data;
5509         unsigned int rodata_base;
5510         unsigned int rodata_len;
5511         const u32 *rodata_data;
5512         unsigned int data_base;
5513         unsigned int data_len;
5514         const u32 *data_data;
5515 };
5516
5517 /* tp->lock is held. */
5518 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base, u32 cpu_scratch_base,
5519                                  int cpu_scratch_size, struct fw_info *info)
5520 {
5521         int err, lock_err, i;
5522         void (*write_op)(struct tg3 *, u32, u32);
5523
5524         if (cpu_base == TX_CPU_BASE &&
5525             (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5526                 printk(KERN_ERR PFX "tg3_load_firmware_cpu: Trying to load "
5527                        "TX cpu firmware on %s which is 5705.\n",
5528                        tp->dev->name);
5529                 return -EINVAL;
5530         }
5531
5532         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
5533                 write_op = tg3_write_mem;
5534         else
5535                 write_op = tg3_write_indirect_reg32;
5536
5537         /* It is possible that bootcode is still loading at this point.
5538          * Get the nvram lock first before halting the cpu.
5539          */
5540         lock_err = tg3_nvram_lock(tp);
5541         err = tg3_halt_cpu(tp, cpu_base);
5542         if (!lock_err)
5543                 tg3_nvram_unlock(tp);
5544         if (err)
5545                 goto out;
5546
5547         for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
5548                 write_op(tp, cpu_scratch_base + i, 0);
5549         tw32(cpu_base + CPU_STATE, 0xffffffff);
5550         tw32(cpu_base + CPU_MODE, tr32(cpu_base+CPU_MODE)|CPU_MODE_HALT);
5551         for (i = 0; i < (info->text_len / sizeof(u32)); i++)
5552                 write_op(tp, (cpu_scratch_base +
5553                               (info->text_base & 0xffff) +
5554                               (i * sizeof(u32))),
5555                          (info->text_data ?
5556                           info->text_data[i] : 0));
5557         for (i = 0; i < (info->rodata_len / sizeof(u32)); i++)
5558                 write_op(tp, (cpu_scratch_base +
5559                               (info->rodata_base & 0xffff) +
5560                               (i * sizeof(u32))),
5561                          (info->rodata_data ?
5562                           info->rodata_data[i] : 0));
5563         for (i = 0; i < (info->data_len / sizeof(u32)); i++)
5564                 write_op(tp, (cpu_scratch_base +
5565                               (info->data_base & 0xffff) +
5566                               (i * sizeof(u32))),
5567                          (info->data_data ?
5568                           info->data_data[i] : 0));
5569
5570         err = 0;
5571
5572 out:
5573         return err;
5574 }
5575
5576 /* tp->lock is held. */
5577 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
5578 {
5579         struct fw_info info;
5580         int err, i;
5581
5582         info.text_base = TG3_FW_TEXT_ADDR;
5583         info.text_len = TG3_FW_TEXT_LEN;
5584         info.text_data = &tg3FwText[0];
5585         info.rodata_base = TG3_FW_RODATA_ADDR;
5586         info.rodata_len = TG3_FW_RODATA_LEN;
5587         info.rodata_data = &tg3FwRodata[0];
5588         info.data_base = TG3_FW_DATA_ADDR;
5589         info.data_len = TG3_FW_DATA_LEN;
5590         info.data_data = NULL;
5591
5592         err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
5593                                     RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
5594                                     &info);
5595         if (err)
5596                 return err;
5597
5598         err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
5599                                     TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
5600                                     &info);
5601         if (err)
5602                 return err;
5603
5604         /* Now startup only the RX cpu. */
5605         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
5606         tw32_f(RX_CPU_BASE + CPU_PC,    TG3_FW_TEXT_ADDR);
5607
5608         for (i = 0; i < 5; i++) {
5609                 if (tr32(RX_CPU_BASE + CPU_PC) == TG3_FW_TEXT_ADDR)
5610                         break;
5611                 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
5612                 tw32(RX_CPU_BASE + CPU_MODE,  CPU_MODE_HALT);
5613                 tw32_f(RX_CPU_BASE + CPU_PC,    TG3_FW_TEXT_ADDR);
5614                 udelay(1000);
5615         }
5616         if (i >= 5) {
5617                 printk(KERN_ERR PFX "tg3_load_firmware fails for %s "
5618                        "to set RX CPU PC, is %08x should be %08x\n",
5619                        tp->dev->name, tr32(RX_CPU_BASE + CPU_PC),
5620                        TG3_FW_TEXT_ADDR);
5621                 return -ENODEV;
5622         }
5623         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
5624         tw32_f(RX_CPU_BASE + CPU_MODE,  0x00000000);
5625
5626         return 0;
5627 }
5628
5629
5630 #define TG3_TSO_FW_RELEASE_MAJOR        0x1
5631 #define TG3_TSO_FW_RELASE_MINOR         0x6
5632 #define TG3_TSO_FW_RELEASE_FIX          0x0
5633 #define TG3_TSO_FW_START_ADDR           0x08000000
5634 #define TG3_TSO_FW_TEXT_ADDR            0x08000000
5635 #define TG3_TSO_FW_TEXT_LEN             0x1aa0
5636 #define TG3_TSO_FW_RODATA_ADDR          0x08001aa0
5637 #define TG3_TSO_FW_RODATA_LEN           0x60
5638 #define TG3_TSO_FW_DATA_ADDR            0x08001b20
5639 #define TG3_TSO_FW_DATA_LEN             0x30
5640 #define TG3_TSO_FW_SBSS_ADDR            0x08001b50
5641 #define TG3_TSO_FW_SBSS_LEN             0x2c
5642 #define TG3_TSO_FW_BSS_ADDR             0x08001b80
5643 #define TG3_TSO_FW_BSS_LEN              0x894
5644
5645 static const u32 tg3TsoFwText[(TG3_TSO_FW_TEXT_LEN / 4) + 1] = {
5646         0x0e000003, 0x00000000, 0x08001b24, 0x00000000, 0x10000003, 0x00000000,
5647         0x0000000d, 0x0000000d, 0x3c1d0800, 0x37bd4000, 0x03a0f021, 0x3c100800,
5648         0x26100000, 0x0e000010, 0x00000000, 0x0000000d, 0x27bdffe0, 0x3c04fefe,
5649         0xafbf0018, 0x0e0005d8, 0x34840002, 0x0e000668, 0x00000000, 0x3c030800,
5650         0x90631b68, 0x24020002, 0x3c040800, 0x24841aac, 0x14620003, 0x24050001,
5651         0x3c040800, 0x24841aa0, 0x24060006, 0x00003821, 0xafa00010, 0x0e00067c,
5652         0xafa00014, 0x8f625c50, 0x34420001, 0xaf625c50, 0x8f625c90, 0x34420001,
5653         0xaf625c90, 0x2402ffff, 0x0e000034, 0xaf625404, 0x8fbf0018, 0x03e00008,
5654         0x27bd0020, 0x00000000, 0x00000000, 0x00000000, 0x27bdffe0, 0xafbf001c,
5655         0xafb20018, 0xafb10014, 0x0e00005b, 0xafb00010, 0x24120002, 0x24110001,
5656         0x8f706820, 0x32020100, 0x10400003, 0x00000000, 0x0e0000bb, 0x00000000,
5657         0x8f706820, 0x32022000, 0x10400004, 0x32020001, 0x0e0001f0, 0x24040001,
5658         0x32020001, 0x10400003, 0x00000000, 0x0e0000a3, 0x00000000, 0x3c020800,
5659         0x90421b98, 0x14520003, 0x00000000, 0x0e0004c0, 0x00000000, 0x0a00003c,
5660         0xaf715028, 0x8fbf001c, 0x8fb20018, 0x8fb10014, 0x8fb00010, 0x03e00008,
5661         0x27bd0020, 0x27bdffe0, 0x3c040800, 0x24841ac0, 0x00002821, 0x00003021,
5662         0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014, 0x3c040800,
5663         0x248423d8, 0xa4800000, 0x3c010800, 0xa0201b98, 0x3c010800, 0xac201b9c,
5664         0x3c010800, 0xac201ba0, 0x3c010800, 0xac201ba4, 0x3c010800, 0xac201bac,
5665         0x3c010800, 0xac201bb8, 0x3c010800, 0xac201bbc, 0x8f624434, 0x3c010800,
5666         0xac221b88, 0x8f624438, 0x3c010800, 0xac221b8c, 0x8f624410, 0xac80f7a8,
5667         0x3c010800, 0xac201b84, 0x3c010800, 0xac2023e0, 0x3c010800, 0xac2023c8,
5668         0x3c010800, 0xac2023cc, 0x3c010800, 0xac202400, 0x3c010800, 0xac221b90,
5669         0x8f620068, 0x24030007, 0x00021702, 0x10430005, 0x00000000, 0x8f620068,
5670         0x00021702, 0x14400004, 0x24020001, 0x3c010800, 0x0a000097, 0xac20240c,
5671         0xac820034, 0x3c040800, 0x24841acc, 0x3c050800, 0x8ca5240c, 0x00003021,
5672         0x00003821, 0xafa00010, 0x0e00067c, 0xafa00014, 0x8fbf0018, 0x03e00008,
5673         0x27bd0020, 0x27bdffe0, 0x3c040800, 0x24841ad8, 0x00002821, 0x00003021,
5674         0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014, 0x0e00005b,
5675         0x00000000, 0x0e0000b4, 0x00002021, 0x8fbf0018, 0x03e00008, 0x27bd0020,
5676         0x24020001, 0x8f636820, 0x00821004, 0x00021027, 0x00621824, 0x03e00008,
5677         0xaf636820, 0x27bdffd0, 0xafbf002c, 0xafb60028, 0xafb50024, 0xafb40020,
5678         0xafb3001c, 0xafb20018, 0xafb10014, 0xafb00010, 0x8f675c5c, 0x3c030800,
5679         0x24631bbc, 0x8c620000, 0x14470005, 0x3c0200ff, 0x3c020800, 0x90421b98,
5680         0x14400119, 0x3c0200ff, 0x3442fff8, 0x00e28824, 0xac670000, 0x00111902,
5681         0x306300ff, 0x30e20003, 0x000211c0, 0x00622825, 0x00a04021, 0x00071602,
5682         0x3c030800, 0x90631b98, 0x3044000f, 0x14600036, 0x00804821, 0x24020001,
5683         0x3c010800, 0xa0221b98, 0x00051100, 0x00821025, 0x3c010800, 0xac201b9c,
5684         0x3c010800, 0xac201ba0, 0x3c010800, 0xac201ba4, 0x3c010800, 0xac201bac,
5685         0x3c010800, 0xac201bb8, 0x3c010800, 0xac201bb0, 0x3c010800, 0xac201bb4,
5686         0x3c010800, 0xa42223d8, 0x9622000c, 0x30437fff, 0x3c010800, 0xa4222410,
5687         0x30428000, 0x3c010800, 0xa4231bc6, 0x10400005, 0x24020001, 0x3c010800,
5688         0xac2223f4, 0x0a000102, 0x2406003e, 0x24060036, 0x3c010800, 0xac2023f4,
5689         0x9622000a, 0x3c030800, 0x94631bc6, 0x3c010800, 0xac2023f0, 0x3c010800,
5690         0xac2023f8, 0x00021302, 0x00021080, 0x00c21021, 0x00621821, 0x3c010800,
5691         0xa42223d0, 0x3c010800, 0x0a000115, 0xa4231b96, 0x9622000c, 0x3c010800,
5692         0xa42223ec, 0x3c040800, 0x24841b9c, 0x8c820000, 0x00021100, 0x3c010800,
5693         0x00220821, 0xac311bc8, 0x8c820000, 0x00021100, 0x3c010800, 0x00220821,
5694         0xac271bcc, 0x8c820000, 0x25030001, 0x306601ff, 0x00021100, 0x3c010800,
5695         0x00220821, 0xac261bd0, 0x8c820000, 0x00021100, 0x3c010800, 0x00220821,
5696         0xac291bd4, 0x96230008, 0x3c020800, 0x8c421bac, 0x00432821, 0x3c010800,
5697         0xac251bac, 0x9622000a, 0x30420004, 0x14400018, 0x00061100, 0x8f630c14,
5698         0x3063000f, 0x2c620002, 0x1440000b, 0x3c02c000, 0x8f630c14, 0x3c020800,
5699         0x8c421b40, 0x3063000f, 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002,
5700         0x1040fff7, 0x3c02c000, 0x00e21825, 0xaf635c5c, 0x8f625c50, 0x30420002,
5701         0x10400014, 0x00000000, 0x0a000147, 0x00000000, 0x3c030800, 0x8c631b80,
5702         0x3c040800, 0x94841b94, 0x01221025, 0x3c010800, 0xa42223da, 0x24020001,
5703         0x3c010800, 0xac221bb8, 0x24630001, 0x0085202a, 0x3c010800, 0x10800003,
5704         0xac231b80, 0x3c010800, 0xa4251b94, 0x3c060800, 0x24c61b9c, 0x8cc20000,
5705         0x24420001, 0xacc20000, 0x28420080, 0x14400005, 0x00000000, 0x0e000656,
5706         0x24040002, 0x0a0001e6, 0x00000000, 0x3c020800, 0x8c421bb8, 0x10400078,
5707         0x24020001, 0x3c050800, 0x90a51b98, 0x14a20072, 0x00000000, 0x3c150800,
5708         0x96b51b96, 0x3c040800, 0x8c841bac, 0x32a3ffff, 0x0083102a, 0x1440006c,
5709         0x00000000, 0x14830003, 0x00000000, 0x3c010800, 0xac2523f0, 0x1060005c,
5710         0x00009021, 0x24d60004, 0x0060a021, 0x24d30014, 0x8ec20000, 0x00028100,
5711         0x3c110800, 0x02308821, 0x0e000625, 0x8e311bc8, 0x00402821, 0x10a00054,
5712         0x00000000, 0x9628000a, 0x31020040, 0x10400005, 0x2407180c, 0x8e22000c,
5713         0x2407188c, 0x00021400, 0xaca20018, 0x3c030800, 0x00701821, 0x8c631bd0,
5714         0x3c020800, 0x00501021, 0x8c421bd4, 0x00031d00, 0x00021400, 0x00621825,
5715         0xaca30014, 0x8ec30004, 0x96220008, 0x00432023, 0x3242ffff, 0x3083ffff,
5716         0x00431021, 0x0282102a, 0x14400002, 0x02b23023, 0x00803021, 0x8e620000,
5717         0x30c4ffff, 0x00441021, 0xae620000, 0x8e220000, 0xaca20000, 0x8e220004,
5718         0x8e63fff4, 0x00431021, 0xaca20004, 0xa4a6000e, 0x8e62fff4, 0x00441021,
5719         0xae62fff4, 0x96230008, 0x0043102a, 0x14400005, 0x02469021, 0x8e62fff0,
5720         0xae60fff4, 0x24420001, 0xae62fff0, 0xaca00008, 0x3242ffff, 0x14540008,
5721         0x24020305, 0x31020080, 0x54400001, 0x34e70010, 0x24020905, 0xa4a2000c,
5722         0x0a0001cb, 0x34e70020, 0xa4a2000c, 0x3c020800, 0x8c4223f0, 0x10400003,
5723         0x3c024b65, 0x0a0001d3, 0x34427654, 0x3c02b49a, 0x344289ab, 0xaca2001c,
5724         0x30e2ffff, 0xaca20010, 0x0e0005a2, 0x00a02021, 0x3242ffff, 0x0054102b,
5725         0x1440ffa9, 0x00000000, 0x24020002, 0x3c010800, 0x0a0001e6, 0xa0221b98,
5726         0x8ec2083c, 0x24420001, 0x0a0001e6, 0xaec2083c, 0x0e0004c0, 0x00000000,
5727         0x8fbf002c, 0x8fb60028, 0x8fb50024, 0x8fb40020, 0x8fb3001c, 0x8fb20018,
5728         0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0030, 0x27bdffd0, 0xafbf0028,
5729         0xafb30024, 0xafb20020, 0xafb1001c, 0xafb00018, 0x8f725c9c, 0x3c0200ff,
5730         0x3442fff8, 0x3c070800, 0x24e71bb4, 0x02428824, 0x9623000e, 0x8ce20000,
5731         0x00431021, 0xace20000, 0x8e220010, 0x30420020, 0x14400011, 0x00809821,
5732         0x0e00063b, 0x02202021, 0x3c02c000, 0x02421825, 0xaf635c9c, 0x8f625c90,
5733         0x30420002, 0x1040011e, 0x00000000, 0xaf635c9c, 0x8f625c90, 0x30420002,
5734         0x10400119, 0x00000000, 0x0a00020d, 0x00000000, 0x8e240008, 0x8e230014,
5735         0x00041402, 0x000231c0, 0x00031502, 0x304201ff, 0x2442ffff, 0x3042007f,
5736         0x00031942, 0x30637800, 0x00021100, 0x24424000, 0x00624821, 0x9522000a,
5737         0x3084ffff, 0x30420008, 0x104000b0, 0x000429c0, 0x3c020800, 0x8c422400,
5738         0x14400024, 0x24c50008, 0x94c20014, 0x3c010800, 0xa42223d0, 0x8cc40010,
5739         0x00041402, 0x3c010800, 0xa42223d2, 0x3c010800, 0xa42423d4, 0x94c2000e,
5740         0x3083ffff, 0x00431023, 0x3c010800, 0xac222408, 0x94c2001a, 0x3c010800,
5741         0xac262400, 0x3c010800, 0xac322404, 0x3c010800, 0xac2223fc, 0x3c02c000,
5742         0x02421825, 0xaf635c9c, 0x8f625c90, 0x30420002, 0x104000e5, 0x00000000,
5743         0xaf635c9c, 0x8f625c90, 0x30420002, 0x104000e0, 0x00000000, 0x0a000246,
5744         0x00000000, 0x94c2000e, 0x3c030800, 0x946323d4, 0x00434023, 0x3103ffff,
5745         0x2c620008, 0x1040001c, 0x00000000, 0x94c20014, 0x24420028, 0x00a22821,
5746         0x00031042, 0x1840000b, 0x00002021, 0x24e60848, 0x00403821, 0x94a30000,
5747         0x8cc20000, 0x24840001, 0x00431021, 0xacc20000, 0x0087102a, 0x1440fff9,
5748         0x24a50002, 0x31020001, 0x1040001f, 0x3c024000, 0x3c040800, 0x248423fc,
5749         0xa0a00001, 0x94a30000, 0x8c820000, 0x00431021, 0x0a000285, 0xac820000,
5750         0x8f626800, 0x3c030010, 0x00431024, 0x10400009, 0x00000000, 0x94c2001a,
5751         0x3c030800, 0x8c6323fc, 0x00431021, 0x3c010800, 0xac2223fc, 0x0a000286,
5752         0x3c024000, 0x94c2001a, 0x94c4001c, 0x3c030800, 0x8c6323fc, 0x00441023,
5753         0x00621821, 0x3c010800, 0xac2323fc, 0x3c024000, 0x02421825, 0xaf635c9c,
5754         0x8f625c90, 0x30420002, 0x1440fffc, 0x00000000, 0x9522000a, 0x30420010,
5755         0x1040009b, 0x00000000, 0x3c030800, 0x946323d4, 0x3c070800, 0x24e72400,
5756         0x8ce40000, 0x8f626800, 0x24630030, 0x00832821, 0x3c030010, 0x00431024,
5757         0x1440000a, 0x00000000, 0x94a20004, 0x3c040800, 0x8c842408, 0x3c030800,
5758         0x8c6323fc, 0x00441023, 0x00621821, 0x3c010800, 0xac2323fc, 0x3c040800,
5759         0x8c8423fc, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402, 0x00822021,
5760         0x00041027, 0xa4a20006, 0x3c030800, 0x8c632404, 0x3c0200ff, 0x3442fff8,
5761         0x00628824, 0x96220008, 0x24050001, 0x24034000, 0x000231c0, 0x00801021,
5762         0xa4c2001a, 0xa4c0001c, 0xace00000, 0x3c010800, 0xac251b60, 0xaf635cb8,
5763         0x8f625cb0, 0x30420002, 0x10400003, 0x00000000, 0x3c010800, 0xac201b60,
5764         0x8e220008, 0xaf625cb8, 0x8f625cb0, 0x30420002, 0x10400003, 0x00000000,
5765         0x3c010800, 0xac201b60, 0x3c020800, 0x8c421b60, 0x1040ffec, 0x00000000,
5766         0x3c040800, 0x0e00063b, 0x8c842404, 0x0a00032a, 0x00000000, 0x3c030800,
5767         0x90631b98, 0x24020002, 0x14620003, 0x3c034b65, 0x0a0002e1, 0x00008021,
5768         0x8e22001c, 0x34637654, 0x10430002, 0x24100002, 0x24100001, 0x00c02021,
5769         0x0e000350, 0x02003021, 0x24020003, 0x3c010800, 0xa0221b98, 0x24020002,
5770         0x1202000a, 0x24020001, 0x3c030800, 0x8c6323f0, 0x10620006, 0x00000000,
5771         0x3c020800, 0x944223d8, 0x00021400, 0x0a00031f, 0xae220014, 0x3c040800,
5772         0x248423da, 0x94820000, 0x00021400, 0xae220014, 0x3c020800, 0x8c421bbc,
5773         0x3c03c000, 0x3c010800, 0xa0201b98, 0x00431025, 0xaf625c5c, 0x8f625c50,
5774         0x30420002, 0x10400009, 0x00000000, 0x2484f7e2, 0x8c820000, 0x00431025,
5775         0xaf625c5c, 0x8f625c50, 0x30420002, 0x1440fffa, 0x00000000, 0x3c020800,
5776         0x24421b84, 0x8c430000, 0x24630001, 0xac430000, 0x8f630c14, 0x3063000f,
5777         0x2c620002, 0x1440000c, 0x3c024000, 0x8f630c14, 0x3c020800, 0x8c421b40,
5778         0x3063000f, 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002, 0x1040fff7,
5779         0x00000000, 0x3c024000, 0x02421825, 0xaf635c9c, 0x8f625c90, 0x30420002,
5780         0x1440fffc, 0x00000000, 0x12600003, 0x00000000, 0x0e0004c0, 0x00000000,
5781         0x8fbf0028, 0x8fb30024, 0x8fb20020, 0x8fb1001c, 0x8fb00018, 0x03e00008,
5782         0x27bd0030, 0x8f634450, 0x3c040800, 0x24841b88, 0x8c820000, 0x00031c02,
5783         0x0043102b, 0x14400007, 0x3c038000, 0x8c840004, 0x8f624450, 0x00021c02,
5784         0x0083102b, 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024,
5785         0x1440fffd, 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3c024000,
5786         0x00822025, 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00000000,
5787         0x03e00008, 0x00000000, 0x27bdffe0, 0x00805821, 0x14c00011, 0x256e0008,
5788         0x3c020800, 0x8c4223f4, 0x10400007, 0x24020016, 0x3c010800, 0xa42223d2,
5789         0x2402002a, 0x3c010800, 0x0a000364, 0xa42223d4, 0x8d670010, 0x00071402,
5790         0x3c010800, 0xa42223d2, 0x3c010800, 0xa42723d4, 0x3c040800, 0x948423d4,
5791         0x3c030800, 0x946323d2, 0x95cf0006, 0x3c020800, 0x944223d0, 0x00832023,
5792         0x01e2c023, 0x3065ffff, 0x24a20028, 0x01c24821, 0x3082ffff, 0x14c0001a,
5793         0x01226021, 0x9582000c, 0x3042003f, 0x3c010800, 0xa42223d6, 0x95820004,
5794         0x95830006, 0x3c010800, 0xac2023e4, 0x3c010800, 0xac2023e8, 0x00021400,
5795         0x00431025, 0x3c010800, 0xac221bc0, 0x95220004, 0x3c010800, 0xa4221bc4,
5796         0x95230002, 0x01e51023, 0x0043102a, 0x10400010, 0x24020001, 0x3c010800,
5797         0x0a000398, 0xac2223f8, 0x3c030800, 0x8c6323e8, 0x3c020800, 0x94421bc4,
5798         0x00431021, 0xa5220004, 0x3c020800, 0x94421bc0, 0xa5820004, 0x3c020800,
5799         0x8c421bc0, 0xa5820006, 0x3c020800, 0x8c4223f0, 0x3c0d0800, 0x8dad23e4,
5800         0x3c0a0800, 0x144000e5, 0x8d4a23e8, 0x3c020800, 0x94421bc4, 0x004a1821,
5801         0x3063ffff, 0x0062182b, 0x24020002, 0x10c2000d, 0x01435023, 0x3c020800,
5802         0x944223d6, 0x30420009, 0x10400008, 0x00000000, 0x9582000c, 0x3042fff6,
5803         0xa582000c, 0x3c020800, 0x944223d6, 0x30420009, 0x01a26823, 0x3c020800,
5804         0x8c4223f8, 0x1040004a, 0x01203821, 0x3c020800, 0x944223d2, 0x00004021,
5805         0xa520000a, 0x01e21023, 0xa5220002, 0x3082ffff, 0x00021042, 0x18400008,
5806         0x00003021, 0x00401821, 0x94e20000, 0x25080001, 0x00c23021, 0x0103102a,
5807         0x1440fffb, 0x24e70002, 0x00061c02, 0x30c2ffff, 0x00623021, 0x00061402,
5808         0x00c23021, 0x00c02821, 0x00061027, 0xa522000a, 0x00003021, 0x2527000c,
5809         0x00004021, 0x94e20000, 0x25080001, 0x00c23021, 0x2d020004, 0x1440fffb,
5810         0x24e70002, 0x95220002, 0x00004021, 0x91230009, 0x00442023, 0x01803821,
5811         0x3082ffff, 0xa4e00010, 0x00621821, 0x00021042, 0x18400010, 0x00c33021,
5812         0x00404821, 0x94e20000, 0x24e70002, 0x00c23021, 0x30e2007f, 0x14400006,
5813         0x25080001, 0x8d630000, 0x3c02007f, 0x3442ff80, 0x00625824, 0x25670008,
5814         0x0109102a, 0x1440fff3, 0x00000000, 0x30820001, 0x10400005, 0x00061c02,
5815         0xa0e00001, 0x94e20000, 0x00c23021, 0x00061c02, 0x30c2ffff, 0x00623021,
5816         0x00061402, 0x00c23021, 0x0a00047d, 0x30c6ffff, 0x24020002, 0x14c20081,
5817         0x00000000, 0x3c020800, 0x8c42240c, 0x14400007, 0x00000000, 0x3c020800,
5818         0x944223d2, 0x95230002, 0x01e21023, 0x10620077, 0x00000000, 0x3c020800,
5819         0x944223d2, 0x01e21023, 0xa5220002, 0x3c020800, 0x8c42240c, 0x1040001a,
5820         0x31e3ffff, 0x8dc70010, 0x3c020800, 0x94421b96, 0x00e04021, 0x00072c02,
5821         0x00aa2021, 0x00431023, 0x00823823, 0x00072402, 0x30e2ffff, 0x00823821,
5822         0x00071027, 0xa522000a, 0x3102ffff, 0x3c040800, 0x948423d4, 0x00453023,
5823         0x00e02821, 0x00641823, 0x006d1821, 0x00c33021, 0x00061c02, 0x30c2ffff,
5824         0x0a00047d, 0x00623021, 0x01203821, 0x00004021, 0x3082ffff, 0x00021042,
5825         0x18400008, 0x00003021, 0x00401821, 0x94e20000, 0x25080001, 0x00c23021,
5826         0x0103102a, 0x1440fffb, 0x24e70002, 0x00061c02, 0x30c2ffff, 0x00623021,
5827         0x00061402, 0x00c23021, 0x00c02821, 0x00061027, 0xa522000a, 0x00003021,
5828         0x2527000c, 0x00004021, 0x94e20000, 0x25080001, 0x00c23021, 0x2d020004,
5829         0x1440fffb, 0x24e70002, 0x95220002, 0x00004021, 0x91230009, 0x00442023,
5830         0x01803821, 0x3082ffff, 0xa4e00010, 0x3c040800, 0x948423d4, 0x00621821,
5831         0x00c33021, 0x00061c02, 0x30c2ffff, 0x00623021, 0x00061c02, 0x3c020800,
5832         0x944223d0, 0x00c34821, 0x00441023, 0x00021fc2, 0x00431021, 0x00021043,
5833         0x18400010, 0x00003021, 0x00402021, 0x94e20000, 0x24e70002, 0x00c23021,
5834         0x30e2007f, 0x14400006, 0x25080001, 0x8d630000, 0x3c02007f, 0x3442ff80,
5835         0x00625824, 0x25670008, 0x0104102a, 0x1440fff3, 0x00000000, 0x3c020800,
5836         0x944223ec, 0x00c23021, 0x3122ffff, 0x00c23021, 0x00061c02, 0x30c2ffff,
5837         0x00623021, 0x00061402, 0x00c23021, 0x00c04021, 0x00061027, 0xa5820010,
5838         0xadc00014, 0x0a00049d, 0xadc00000, 0x8dc70010, 0x00e04021, 0x11400007,
5839         0x00072c02, 0x00aa3021, 0x00061402, 0x30c3ffff, 0x00433021, 0x00061402,
5840         0x00c22821, 0x00051027, 0xa522000a, 0x3c030800, 0x946323d4, 0x3102ffff,
5841         0x01e21021, 0x00433023, 0x00cd3021, 0x00061c02, 0x30c2ffff, 0x00623021,
5842         0x00061402, 0x00c23021, 0x00c04021, 0x00061027, 0xa5820010, 0x3102ffff,
5843         0x00051c00, 0x00431025, 0xadc20010, 0x3c020800, 0x8c4223f4, 0x10400005,
5844         0x2de205eb, 0x14400002, 0x25e2fff2, 0x34028870, 0xa5c20034, 0x3c030800,
5845         0x246323e8, 0x8c620000, 0x24420001, 0xac620000, 0x3c040800, 0x8c8423e4,
5846         0x3c020800, 0x8c421bc0, 0x3303ffff, 0x00832021, 0x00431821, 0x0062102b,
5847         0x3c010800, 0xac2423e4, 0x10400003, 0x2482ffff, 0x3c010800, 0xac2223e4,
5848         0x3c010800, 0xac231bc0, 0x03e00008, 0x27bd0020, 0x27bdffb8, 0x3c050800,
5849         0x24a51b96, 0xafbf0044, 0xafbe0040, 0xafb7003c, 0xafb60038, 0xafb50034,
5850         0xafb40030, 0xafb3002c, 0xafb20028, 0xafb10024, 0xafb00020, 0x94a90000,
5851         0x3c020800, 0x944223d0, 0x3c030800, 0x8c631bb0, 0x3c040800, 0x8c841bac,
5852         0x01221023, 0x0064182a, 0xa7a9001e, 0x106000be, 0xa7a20016, 0x24be0022,
5853         0x97b6001e, 0x24b3001a, 0x24b70016, 0x8fc20000, 0x14400008, 0x00000000,
5854         0x8fc2fff8, 0x97a30016, 0x8fc4fff4, 0x00431021, 0x0082202a, 0x148000b0,
5855         0x00000000, 0x97d50818, 0x32a2ffff, 0x104000a3, 0x00009021, 0x0040a021,
5856         0x00008821, 0x0e000625, 0x00000000, 0x00403021, 0x14c00007, 0x00000000,
5857         0x3c020800, 0x8c4223dc, 0x24420001, 0x3c010800, 0x0a000596, 0xac2223dc,
5858         0x3c100800, 0x02118021, 0x8e101bc8, 0x9608000a, 0x31020040, 0x10400005,
5859         0x2407180c, 0x8e02000c, 0x2407188c, 0x00021400, 0xacc20018, 0x31020080,
5860         0x54400001, 0x34e70010, 0x3c020800, 0x00511021, 0x8c421bd0, 0x3c030800,
5861         0x00711821, 0x8c631bd4, 0x00021500, 0x00031c00, 0x00431025, 0xacc20014,
5862         0x96040008, 0x3242ffff, 0x00821021, 0x0282102a, 0x14400002, 0x02b22823,
5863         0x00802821, 0x8e020000, 0x02459021, 0xacc20000, 0x8e020004, 0x00c02021,
5864         0x26310010, 0xac820004, 0x30e2ffff, 0xac800008, 0xa485000e, 0xac820010,
5865         0x24020305, 0x0e0005a2, 0xa482000c, 0x3242ffff, 0x0054102b, 0x1440ffc5,
5866         0x3242ffff, 0x0a00058e, 0x00000000, 0x8e620000, 0x8e63fffc, 0x0043102a,
5867         0x10400067, 0x00000000, 0x8e62fff0, 0x00028900, 0x3c100800, 0x02118021,
5868         0x0e000625, 0x8e101bc8, 0x00403021, 0x14c00005, 0x00000000, 0x8e62082c,
5869         0x24420001, 0x0a000596, 0xae62082c, 0x9608000a, 0x31020040, 0x10400005,
5870         0x2407180c, 0x8e02000c, 0x2407188c, 0x00021400, 0xacc20018, 0x3c020800,
5871         0x00511021, 0x8c421bd0, 0x3c030800, 0x00711821, 0x8c631bd4, 0x00021500,
5872         0x00031c00, 0x00431025, 0xacc20014, 0x8e63fff4, 0x96020008, 0x00432023,
5873         0x3242ffff, 0x3083ffff, 0x00431021, 0x02c2102a, 0x10400003, 0x00802821,
5874         0x97a9001e, 0x01322823, 0x8e620000, 0x30a4ffff, 0x00441021, 0xae620000,
5875         0xa4c5000e, 0x8e020000, 0xacc20000, 0x8e020004, 0x8e63fff4, 0x00431021,
5876         0xacc20004, 0x8e63fff4, 0x96020008, 0x00641821, 0x0062102a, 0x14400006,
5877         0x02459021, 0x8e62fff0, 0xae60fff4, 0x24420001, 0x0a000571, 0xae62fff0,
5878         0xae63fff4, 0xacc00008, 0x3242ffff, 0x10560003, 0x31020004, 0x10400006,
5879         0x24020305, 0x31020080, 0x54400001, 0x34e70010, 0x34e70020, 0x24020905,
5880         0xa4c2000c, 0x8ee30000, 0x8ee20004, 0x14620007, 0x3c02b49a, 0x8ee20860,
5881         0x54400001, 0x34e70400, 0x3c024b65, 0x0a000588, 0x34427654, 0x344289ab,
5882         0xacc2001c, 0x30e2ffff, 0xacc20010, 0x0e0005a2, 0x00c02021, 0x3242ffff,
5883         0x0056102b, 0x1440ff9b, 0x00000000, 0x8e620000, 0x8e63fffc, 0x0043102a,
5884         0x1440ff48, 0x00000000, 0x8fbf0044, 0x8fbe0040, 0x8fb7003c, 0x8fb60038,
5885         0x8fb50034, 0x8fb40030, 0x8fb3002c, 0x8fb20028, 0x8fb10024, 0x8fb00020,
5886         0x03e00008, 0x27bd0048, 0x27bdffe8, 0xafbf0014, 0xafb00010, 0x8f624450,
5887         0x8f634410, 0x0a0005b1, 0x00808021, 0x8f626820, 0x30422000, 0x10400003,
5888         0x00000000, 0x0e0001f0, 0x00002021, 0x8f624450, 0x8f634410, 0x3042ffff,
5889         0x0043102b, 0x1440fff5, 0x00000000, 0x8f630c14, 0x3063000f, 0x2c620002,
5890         0x1440000b, 0x00000000, 0x8f630c14, 0x3c020800, 0x8c421b40, 0x3063000f,
5891         0x24420001, 0x3c010800, 0xac221b40, 0x2c620002, 0x1040fff7, 0x00000000,
5892         0xaf705c18, 0x8f625c10, 0x30420002, 0x10400009, 0x00000000, 0x8f626820,
5893         0x30422000, 0x1040fff8, 0x00000000, 0x0e0001f0, 0x00002021, 0x0a0005c4,
5894         0x00000000, 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x00000000,
5895         0x00000000, 0x00000000, 0x27bdffe8, 0x3c1bc000, 0xafbf0014, 0xafb00010,
5896         0xaf60680c, 0x8f626804, 0x34420082, 0xaf626804, 0x8f634000, 0x24020b50,
5897         0x3c010800, 0xac221b54, 0x24020b78, 0x3c010800, 0xac221b64, 0x34630002,
5898         0xaf634000, 0x0e000605, 0x00808021, 0x3c010800, 0xa0221b68, 0x304200ff,
5899         0x24030002, 0x14430005, 0x00000000, 0x3c020800, 0x8c421b54, 0x0a0005f8,
5900         0xac5000c0, 0x3c020800, 0x8c421b54, 0xac5000bc, 0x8f624434, 0x8f634438,
5901         0x8f644410, 0x3c010800, 0xac221b5c, 0x3c010800, 0xac231b6c, 0x3c010800,
5902         0xac241b58, 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x3c040800,
5903         0x8c870000, 0x3c03aa55, 0x3463aa55, 0x3c06c003, 0xac830000, 0x8cc20000,
5904         0x14430007, 0x24050002, 0x3c0355aa, 0x346355aa, 0xac830000, 0x8cc20000,
5905         0x50430001, 0x24050001, 0x3c020800, 0xac470000, 0x03e00008, 0x00a01021,
5906         0x27bdfff8, 0x18800009, 0x00002821, 0x8f63680c, 0x8f62680c, 0x1043fffe,
5907         0x00000000, 0x24a50001, 0x00a4102a, 0x1440fff9, 0x00000000, 0x03e00008,
5908         0x27bd0008, 0x8f634450, 0x3c020800, 0x8c421b5c, 0x00031c02, 0x0043102b,
5909         0x14400008, 0x3c038000, 0x3c040800, 0x8c841b6c, 0x8f624450, 0x00021c02,
5910         0x0083102b, 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024,
5911         0x1440fffd, 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3082ffff,
5912         0x2442e000, 0x2c422001, 0x14400003, 0x3c024000, 0x0a000648, 0x2402ffff,
5913         0x00822025, 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00001021,
5914         0x03e00008, 0x00000000, 0x8f624450, 0x3c030800, 0x8c631b58, 0x0a000651,
5915         0x3042ffff, 0x8f624450, 0x3042ffff, 0x0043102b, 0x1440fffc, 0x00000000,
5916         0x03e00008, 0x00000000, 0x27bdffe0, 0x00802821, 0x3c040800, 0x24841af0,
5917         0x00003021, 0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014,
5918         0x0a000660, 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x00000000,
5919         0x00000000, 0x00000000, 0x3c020800, 0x34423000, 0x3c030800, 0x34633000,
5920         0x3c040800, 0x348437ff, 0x3c010800, 0xac221b74, 0x24020040, 0x3c010800,
5921         0xac221b78, 0x3c010800, 0xac201b70, 0xac600000, 0x24630004, 0x0083102b,
5922         0x5040fffd, 0xac600000, 0x03e00008, 0x00000000, 0x00804821, 0x8faa0010,
5923         0x3c020800, 0x8c421b70, 0x3c040800, 0x8c841b78, 0x8fab0014, 0x24430001,
5924         0x0044102b, 0x3c010800, 0xac231b70, 0x14400003, 0x00004021, 0x3c010800,
5925         0xac201b70, 0x3c020800, 0x8c421b70, 0x3c030800, 0x8c631b74, 0x91240000,
5926         0x00021140, 0x00431021, 0x00481021, 0x25080001, 0xa0440000, 0x29020008,
5927         0x1440fff4, 0x25290001, 0x3c020800, 0x8c421b70, 0x3c030800, 0x8c631b74,
5928         0x8f64680c, 0x00021140, 0x00431021, 0xac440008, 0xac45000c, 0xac460010,
5929         0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c, 0x00000000, 0x00000000,
5930 };
5931
5932 static const u32 tg3TsoFwRodata[] = {
5933         0x4d61696e, 0x43707542, 0x00000000, 0x4d61696e, 0x43707541, 0x00000000,
5934         0x00000000, 0x00000000, 0x73746b6f, 0x66666c64, 0x496e0000, 0x73746b6f,
5935         0x66662a2a, 0x00000000, 0x53774576, 0x656e7430, 0x00000000, 0x00000000,
5936         0x00000000, 0x00000000, 0x66617461, 0x6c457272, 0x00000000, 0x00000000,
5937         0x00000000,
5938 };
5939
5940 static const u32 tg3TsoFwData[] = {
5941         0x00000000, 0x73746b6f, 0x66666c64, 0x5f76312e, 0x362e3000, 0x00000000,
5942         0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
5943         0x00000000,
5944 };
5945
5946 /* 5705 needs a special version of the TSO firmware.  */
5947 #define TG3_TSO5_FW_RELEASE_MAJOR       0x1
5948 #define TG3_TSO5_FW_RELASE_MINOR        0x2
5949 #define TG3_TSO5_FW_RELEASE_FIX         0x0
5950 #define TG3_TSO5_FW_START_ADDR          0x00010000
5951 #define TG3_TSO5_FW_TEXT_ADDR           0x00010000
5952 #define TG3_TSO5_FW_TEXT_LEN            0xe90
5953 #define TG3_TSO5_FW_RODATA_ADDR         0x00010e90
5954 #define TG3_TSO5_FW_RODATA_LEN          0x50
5955 #define TG3_TSO5_FW_DATA_ADDR           0x00010f00
5956 #define TG3_TSO5_FW_DATA_LEN            0x20
5957 #define TG3_TSO5_FW_SBSS_ADDR           0x00010f20
5958 #define TG3_TSO5_FW_SBSS_LEN            0x28
5959 #define TG3_TSO5_FW_BSS_ADDR            0x00010f50
5960 #define TG3_TSO5_FW_BSS_LEN             0x88
5961
5962 static const u32 tg3Tso5FwText[(TG3_TSO5_FW_TEXT_LEN / 4) + 1] = {
5963         0x0c004003, 0x00000000, 0x00010f04, 0x00000000, 0x10000003, 0x00000000,
5964         0x0000000d, 0x0000000d, 0x3c1d0001, 0x37bde000, 0x03a0f021, 0x3c100001,
5965         0x26100000, 0x0c004010, 0x00000000, 0x0000000d, 0x27bdffe0, 0x3c04fefe,
5966         0xafbf0018, 0x0c0042e8, 0x34840002, 0x0c004364, 0x00000000, 0x3c030001,
5967         0x90630f34, 0x24020002, 0x3c040001, 0x24840e9c, 0x14620003, 0x24050001,
5968         0x3c040001, 0x24840e90, 0x24060002, 0x00003821, 0xafa00010, 0x0c004378,
5969         0xafa00014, 0x0c00402c, 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020,
5970         0x00000000, 0x00000000, 0x27bdffe0, 0xafbf001c, 0xafb20018, 0xafb10014,
5971         0x0c0042d4, 0xafb00010, 0x3c128000, 0x24110001, 0x8f706810, 0x32020400,
5972         0x10400007, 0x00000000, 0x8f641008, 0x00921024, 0x14400003, 0x00000000,
5973         0x0c004064, 0x00000000, 0x3c020001, 0x90420f56, 0x10510003, 0x32020200,
5974         0x1040fff1, 0x00000000, 0x0c0041b4, 0x00000000, 0x08004034, 0x00000000,
5975         0x8fbf001c, 0x8fb20018, 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0020,
5976         0x27bdffe0, 0x3c040001, 0x24840eb0, 0x00002821, 0x00003021, 0x00003821,
5977         0xafbf0018, 0xafa00010, 0x0c004378, 0xafa00014, 0x0000d021, 0x24020130,
5978         0xaf625000, 0x3c010001, 0xa4200f50, 0x3c010001, 0xa0200f57, 0x8fbf0018,
5979         0x03e00008, 0x27bd0020, 0x00000000, 0x00000000, 0x3c030001, 0x24630f60,
5980         0x90620000, 0x27bdfff0, 0x14400003, 0x0080c021, 0x08004073, 0x00004821,
5981         0x3c022000, 0x03021024, 0x10400003, 0x24090002, 0x08004073, 0xa0600000,
5982         0x24090001, 0x00181040, 0x30431f80, 0x346f8008, 0x1520004b, 0x25eb0028,
5983         0x3c040001, 0x00832021, 0x8c848010, 0x3c050001, 0x24a50f7a, 0x00041402,
5984         0xa0a20000, 0x3c010001, 0xa0240f7b, 0x3c020001, 0x00431021, 0x94428014,
5985         0x3c010001, 0xa0220f7c, 0x3c0c0001, 0x01836021, 0x8d8c8018, 0x304200ff,
5986         0x24420008, 0x000220c3, 0x24020001, 0x3c010001, 0xa0220f60, 0x0124102b,
5987         0x1040000c, 0x00003821, 0x24a6000e, 0x01602821, 0x8ca20000, 0x8ca30004,
5988         0x24a50008, 0x24e70001, 0xacc20000, 0xacc30004, 0x00e4102b, 0x1440fff8,
5989         0x24c60008, 0x00003821, 0x3c080001, 0x25080f7b, 0x91060000, 0x3c020001,
5990         0x90420f7c, 0x2503000d, 0x00c32821, 0x00461023, 0x00021fc2, 0x00431021,
5991         0x00021043, 0x1840000c, 0x00002021, 0x91020001, 0x00461023, 0x00021fc2,
5992         0x00431021, 0x00021843, 0x94a20000, 0x24e70001, 0x00822021, 0x00e3102a,
5993         0x1440fffb, 0x24a50002, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402,
5994         0x00822021, 0x3c02ffff, 0x01821024, 0x3083ffff, 0x00431025, 0x3c010001,
5995         0x080040fa, 0xac220f80, 0x3c050001, 0x24a50f7c, 0x90a20000, 0x3c0c0001,
5996         0x01836021, 0x8d8c8018, 0x000220c2, 0x1080000e, 0x00003821, 0x01603021,
5997         0x24a5000c, 0x8ca20000, 0x8ca30004, 0x24a50008, 0x24e70001, 0xacc20000,
5998         0xacc30004, 0x00e4102b, 0x1440fff8, 0x24c60008, 0x3c050001, 0x24a50f7c,
5999         0x90a20000, 0x30430007, 0x24020004, 0x10620011, 0x28620005, 0x10400005,
6000         0x24020002, 0x10620008, 0x000710c0, 0x080040fa, 0x00000000, 0x24020006,
6001         0x1062000e, 0x000710c0, 0x080040fa, 0x00000000, 0x00a21821, 0x9463000c,
6002         0x004b1021, 0x080040fa, 0xa4430000, 0x000710c0, 0x00a21821, 0x8c63000c,
6003         0x004b1021, 0x080040fa, 0xac430000, 0x00a21821, 0x8c63000c, 0x004b2021,
6004         0x00a21021, 0xac830000, 0x94420010, 0xa4820004, 0x95e70006, 0x3c020001,
6005         0x90420f7c, 0x3c030001, 0x90630f7a, 0x00e2c823, 0x3c020001, 0x90420f7b,
6006         0x24630028, 0x01e34021, 0x24420028, 0x15200012, 0x01e23021, 0x94c2000c,
6007         0x3c010001, 0xa4220f78, 0x94c20004, 0x94c30006, 0x3c010001, 0xa4200f76,
6008         0x3c010001, 0xa4200f72, 0x00021400, 0x00431025, 0x3c010001, 0xac220f6c,
6009         0x95020004, 0x3c010001, 0x08004124, 0xa4220f70, 0x3c020001, 0x94420f70,
6010         0x3c030001, 0x94630f72, 0x00431021, 0xa5020004, 0x3c020001, 0x94420f6c,
6011         0xa4c20004, 0x3c020001, 0x8c420f6c, 0xa4c20006, 0x3c040001, 0x94840f72,
6012         0x3c020001, 0x94420f70, 0x3c0a0001, 0x954a0f76, 0x00441821, 0x3063ffff,
6013         0x0062182a, 0x24020002, 0x1122000b, 0x00832023, 0x3c030001, 0x94630f78,
6014         0x30620009, 0x10400006, 0x3062fff6, 0xa4c2000c, 0x3c020001, 0x94420f78,
6015         0x30420009, 0x01425023, 0x24020001, 0x1122001b, 0x29220002, 0x50400005,
6016         0x24020002, 0x11200007, 0x31a2ffff, 0x08004197, 0x00000000, 0x1122001d,
6017         0x24020016, 0x08004197, 0x31a2ffff, 0x3c0e0001, 0x95ce0f80, 0x10800005,
6018         0x01806821, 0x01c42021, 0x00041c02, 0x3082ffff, 0x00627021, 0x000e1027,
6019         0xa502000a, 0x3c030001, 0x90630f7b, 0x31a2ffff, 0x00e21021, 0x0800418d,
6020         0x00432023, 0x3c020001, 0x94420f80, 0x00442021, 0x00041c02, 0x3082ffff,
6021         0x00622021, 0x00807021, 0x00041027, 0x08004185, 0xa502000a, 0x3c050001,
6022         0x24a50f7a, 0x90a30000, 0x14620002, 0x24e2fff2, 0xa5e20034, 0x90a20000,
6023         0x00e21023, 0xa5020002, 0x3c030001, 0x94630f80, 0x3c020001, 0x94420f5a,
6024         0x30e5ffff, 0x00641821, 0x00451023, 0x00622023, 0x00041c02, 0x3082ffff,
6025         0x00622021, 0x00041027, 0xa502000a, 0x3c030001, 0x90630f7c, 0x24620001,
6026         0x14a20005, 0x00807021, 0x01631021, 0x90420000, 0x08004185, 0x00026200,
6027         0x24620002, 0x14a20003, 0x306200fe, 0x004b1021, 0x944c0000, 0x3c020001,
6028         0x94420f82, 0x3183ffff, 0x3c040001, 0x90840f7b, 0x00431021, 0x00e21021,
6029         0x00442023, 0x008a2021, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402,
6030         0x00822021, 0x00806821, 0x00041027, 0xa4c20010, 0x31a2ffff, 0x000e1c00,
6031         0x00431025, 0x3c040001, 0x24840f72, 0xade20010, 0x94820000, 0x3c050001,
6032         0x94a50f76, 0x3c030001, 0x8c630f6c, 0x24420001, 0x00b92821, 0xa4820000,
6033         0x3322ffff, 0x00622021, 0x0083182b, 0x3c010001, 0xa4250f76, 0x10600003,
6034         0x24a2ffff, 0x3c010001, 0xa4220f76, 0x3c024000, 0x03021025, 0x3c010001,
6035         0xac240f6c, 0xaf621008, 0x03e00008, 0x27bd0010, 0x3c030001, 0x90630f56,
6036         0x27bdffe8, 0x24020001, 0xafbf0014, 0x10620026, 0xafb00010, 0x8f620cf4,
6037         0x2442ffff, 0x3042007f, 0x00021100, 0x8c434000, 0x3c010001, 0xac230f64,
6038         0x8c434008, 0x24444000, 0x8c5c4004, 0x30620040, 0x14400002, 0x24020088,
6039         0x24020008, 0x3c010001, 0xa4220f68, 0x30620004, 0x10400005, 0x24020001,
6040         0x3c010001, 0xa0220f57, 0x080041d5, 0x00031402, 0x3c010001, 0xa0200f57,
6041         0x00031402, 0x3c010001, 0xa4220f54, 0x9483000c, 0x24020001, 0x3c010001,
6042         0xa4200f50, 0x3c010001, 0xa0220f56, 0x3c010001, 0xa4230f62, 0x24020001,
6043         0x1342001e, 0x00000000, 0x13400005, 0x24020003, 0x13420067, 0x00000000,
6044         0x080042cf, 0x00000000, 0x3c020001, 0x94420f62, 0x241a0001, 0x3c010001,
6045         0xa4200f5e, 0x3c010001, 0xa4200f52, 0x304407ff, 0x00021bc2, 0x00031823,
6046         0x3063003e, 0x34630036, 0x00021242, 0x3042003c, 0x00621821, 0x3c010001,
6047         0xa4240f58, 0x00832021, 0x24630030, 0x3c010001, 0xa4240f5a, 0x3c010001,
6048         0xa4230f5c, 0x3c060001, 0x24c60f52, 0x94c50000, 0x94c30002, 0x3c040001,
6049         0x94840f5a, 0x00651021, 0x0044102a, 0x10400013, 0x3c108000, 0x00a31021,
6050         0xa4c20000, 0x3c02a000, 0xaf620cf4, 0x3c010001, 0xa0200f56, 0x8f641008,
6051         0x00901024, 0x14400003, 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4,
6052         0x00501024, 0x104000b7, 0x00000000, 0x0800420f, 0x00000000, 0x3c030001,
6053         0x94630f50, 0x00851023, 0xa4c40000, 0x00621821, 0x3042ffff, 0x3c010001,
6054         0xa4230f50, 0xaf620ce8, 0x3c020001, 0x94420f68, 0x34420024, 0xaf620cec,
6055         0x94c30002, 0x3c020001, 0x94420f50, 0x14620012, 0x3c028000, 0x3c108000,
6056         0x3c02a000, 0xaf620cf4, 0x3c010001, 0xa0200f56, 0x8f641008, 0x00901024,
6057         0x14400003, 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4, 0x00501024,
6058         0x1440fff7, 0x00000000, 0x080042cf, 0x241a0003, 0xaf620cf4, 0x3c108000,
6059         0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064, 0x00000000,
6060         0x8f620cf4, 0x00501024, 0x1440fff7, 0x00000000, 0x080042cf, 0x241a0003,
6061         0x3c070001, 0x24e70f50, 0x94e20000, 0x03821021, 0xaf620ce0, 0x3c020001,
6062         0x8c420f64, 0xaf620ce4, 0x3c050001, 0x94a50f54, 0x94e30000, 0x3c040001,
6063         0x94840f58, 0x3c020001, 0x94420f5e, 0x00a32823, 0x00822023, 0x30a6ffff,
6064         0x3083ffff, 0x00c3102b, 0x14400043, 0x00000000, 0x3c020001, 0x94420f5c,
6065         0x00021400, 0x00621025, 0xaf620ce8, 0x94e20000, 0x3c030001, 0x94630f54,
6066         0x00441021, 0xa4e20000, 0x3042ffff, 0x14430021, 0x3c020008, 0x3c020001,
6067         0x90420f57, 0x10400006, 0x3c03000c, 0x3c020001, 0x94420f68, 0x34630624,
6068         0x0800427c, 0x0000d021, 0x3c020001, 0x94420f68, 0x3c030008, 0x34630624,
6069         0x00431025, 0xaf620cec, 0x3c108000, 0x3c02a000, 0xaf620cf4, 0x3c010001,
6070         0xa0200f56, 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064,
6071         0x00000000, 0x8f620cf4, 0x00501024, 0x10400015, 0x00000000, 0x08004283,
6072         0x00000000, 0x3c030001, 0x94630f68, 0x34420624, 0x3c108000, 0x00621825,
6073         0x3c028000, 0xaf630cec, 0xaf620cf4, 0x8f641008, 0x00901024, 0x14400003,
6074         0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4, 0x00501024, 0x1440fff7,
6075         0x00000000, 0x3c010001, 0x080042cf, 0xa4200f5e, 0x3c020001, 0x94420f5c,
6076         0x00021400, 0x00c21025, 0xaf620ce8, 0x3c020001, 0x90420f57, 0x10400009,
6077         0x3c03000c, 0x3c020001, 0x94420f68, 0x34630624, 0x0000d021, 0x00431025,
6078         0xaf620cec, 0x080042c1, 0x3c108000, 0x3c020001, 0x94420f68, 0x3c030008,
6079         0x34630604, 0x00431025, 0xaf620cec, 0x3c020001, 0x94420f5e, 0x00451021,
6080         0x3c010001, 0xa4220f5e, 0x3c108000, 0x3c02a000, 0xaf620cf4, 0x3c010001,
6081         0xa0200f56, 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064,
6082         0x00000000, 0x8f620cf4, 0x00501024, 0x1440fff7, 0x00000000, 0x8fbf0014,
6083         0x8fb00010, 0x03e00008, 0x27bd0018, 0x00000000, 0x27bdffe0, 0x3c040001,
6084         0x24840ec0, 0x00002821, 0x00003021, 0x00003821, 0xafbf0018, 0xafa00010,
6085         0x0c004378, 0xafa00014, 0x0000d021, 0x24020130, 0xaf625000, 0x3c010001,
6086         0xa4200f50, 0x3c010001, 0xa0200f57, 0x8fbf0018, 0x03e00008, 0x27bd0020,
6087         0x27bdffe8, 0x3c1bc000, 0xafbf0014, 0xafb00010, 0xaf60680c, 0x8f626804,
6088         0x34420082, 0xaf626804, 0x8f634000, 0x24020b50, 0x3c010001, 0xac220f20,
6089         0x24020b78, 0x3c010001, 0xac220f30, 0x34630002, 0xaf634000, 0x0c004315,
6090         0x00808021, 0x3c010001, 0xa0220f34, 0x304200ff, 0x24030002, 0x14430005,
6091         0x00000000, 0x3c020001, 0x8c420f20, 0x08004308, 0xac5000c0, 0x3c020001,
6092         0x8c420f20, 0xac5000bc, 0x8f624434, 0x8f634438, 0x8f644410, 0x3c010001,
6093         0xac220f28, 0x3c010001, 0xac230f38, 0x3c010001, 0xac240f24, 0x8fbf0014,
6094         0x8fb00010, 0x03e00008, 0x27bd0018, 0x03e00008, 0x24020001, 0x27bdfff8,
6095         0x18800009, 0x00002821, 0x8f63680c, 0x8f62680c, 0x1043fffe, 0x00000000,
6096         0x24a50001, 0x00a4102a, 0x1440fff9, 0x00000000, 0x03e00008, 0x27bd0008,
6097         0x8f634450, 0x3c020001, 0x8c420f28, 0x00031c02, 0x0043102b, 0x14400008,
6098         0x3c038000, 0x3c040001, 0x8c840f38, 0x8f624450, 0x00021c02, 0x0083102b,
6099         0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024, 0x1440fffd,
6100         0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3082ffff, 0x2442e000,
6101         0x2c422001, 0x14400003, 0x3c024000, 0x08004347, 0x2402ffff, 0x00822025,
6102         0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00001021, 0x03e00008,
6103         0x00000000, 0x8f624450, 0x3c030001, 0x8c630f24, 0x08004350, 0x3042ffff,
6104         0x8f624450, 0x3042ffff, 0x0043102b, 0x1440fffc, 0x00000000, 0x03e00008,
6105         0x00000000, 0x27bdffe0, 0x00802821, 0x3c040001, 0x24840ed0, 0x00003021,
6106         0x00003821, 0xafbf0018, 0xafa00010, 0x0c004378, 0xafa00014, 0x0800435f,
6107         0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x3c020001, 0x3442d600,
6108         0x3c030001, 0x3463d600, 0x3c040001, 0x3484ddff, 0x3c010001, 0xac220f40,
6109         0x24020040, 0x3c010001, 0xac220f44, 0x3c010001, 0xac200f3c, 0xac600000,
6110         0x24630004, 0x0083102b, 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000,
6111         0x00804821, 0x8faa0010, 0x3c020001, 0x8c420f3c, 0x3c040001, 0x8c840f44,
6112         0x8fab0014, 0x24430001, 0x0044102b, 0x3c010001, 0xac230f3c, 0x14400003,
6113         0x00004021, 0x3c010001, 0xac200f3c, 0x3c020001, 0x8c420f3c, 0x3c030001,
6114         0x8c630f40, 0x91240000, 0x00021140, 0x00431021, 0x00481021, 0x25080001,
6115         0xa0440000, 0x29020008, 0x1440fff4, 0x25290001, 0x3c020001, 0x8c420f3c,
6116         0x3c030001, 0x8c630f40, 0x8f64680c, 0x00021140, 0x00431021, 0xac440008,
6117         0xac45000c, 0xac460010, 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c,
6118         0x00000000, 0x00000000, 0x00000000,
6119 };
6120
6121 static const u32 tg3Tso5FwRodata[(TG3_TSO5_FW_RODATA_LEN / 4) + 1] = {
6122         0x4d61696e, 0x43707542, 0x00000000, 0x4d61696e, 0x43707541, 0x00000000,
6123         0x00000000, 0x00000000, 0x73746b6f, 0x66666c64, 0x00000000, 0x00000000,
6124         0x73746b6f, 0x66666c64, 0x00000000, 0x00000000, 0x66617461, 0x6c457272,
6125         0x00000000, 0x00000000, 0x00000000,
6126 };
6127
6128 static const u32 tg3Tso5FwData[(TG3_TSO5_FW_DATA_LEN / 4) + 1] = {
6129         0x00000000, 0x73746b6f, 0x66666c64, 0x5f76312e, 0x322e3000, 0x00000000,
6130         0x00000000, 0x00000000, 0x00000000,
6131 };
6132
6133 /* tp->lock is held. */
6134 static int tg3_load_tso_firmware(struct tg3 *tp)
6135 {
6136         struct fw_info info;
6137         unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
6138         int err, i;
6139
6140         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
6141                 return 0;
6142
6143         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
6144                 info.text_base = TG3_TSO5_FW_TEXT_ADDR;
6145                 info.text_len = TG3_TSO5_FW_TEXT_LEN;
6146                 info.text_data = &tg3Tso5FwText[0];
6147                 info.rodata_base = TG3_TSO5_FW_RODATA_ADDR;
6148                 info.rodata_len = TG3_TSO5_FW_RODATA_LEN;
6149                 info.rodata_data = &tg3Tso5FwRodata[0];
6150                 info.data_base = TG3_TSO5_FW_DATA_ADDR;
6151                 info.data_len = TG3_TSO5_FW_DATA_LEN;
6152                 info.data_data = &tg3Tso5FwData[0];
6153                 cpu_base = RX_CPU_BASE;
6154                 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
6155                 cpu_scratch_size = (info.text_len +
6156                                     info.rodata_len +
6157                                     info.data_len +
6158                                     TG3_TSO5_FW_SBSS_LEN +
6159                                     TG3_TSO5_FW_BSS_LEN);
6160         } else {
6161                 info.text_base = TG3_TSO_FW_TEXT_ADDR;
6162                 info.text_len = TG3_TSO_FW_TEXT_LEN;
6163                 info.text_data = &tg3TsoFwText[0];
6164                 info.rodata_base = TG3_TSO_FW_RODATA_ADDR;
6165                 info.rodata_len = TG3_TSO_FW_RODATA_LEN;
6166                 info.rodata_data = &tg3TsoFwRodata[0];
6167                 info.data_base = TG3_TSO_FW_DATA_ADDR;
6168                 info.data_len = TG3_TSO_FW_DATA_LEN;
6169                 info.data_data = &tg3TsoFwData[0];
6170                 cpu_base = TX_CPU_BASE;
6171                 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
6172                 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
6173         }
6174
6175         err = tg3_load_firmware_cpu(tp, cpu_base,
6176                                     cpu_scratch_base, cpu_scratch_size,
6177                                     &info);
6178         if (err)
6179                 return err;
6180
6181         /* Now startup the cpu. */
6182         tw32(cpu_base + CPU_STATE, 0xffffffff);
6183         tw32_f(cpu_base + CPU_PC,    info.text_base);
6184
6185         for (i = 0; i < 5; i++) {
6186                 if (tr32(cpu_base + CPU_PC) == info.text_base)
6187                         break;
6188                 tw32(cpu_base + CPU_STATE, 0xffffffff);
6189                 tw32(cpu_base + CPU_MODE,  CPU_MODE_HALT);
6190                 tw32_f(cpu_base + CPU_PC,    info.text_base);
6191                 udelay(1000);
6192         }
6193         if (i >= 5) {
6194                 printk(KERN_ERR PFX "tg3_load_tso_firmware fails for %s "
6195                        "to set CPU PC, is %08x should be %08x\n",
6196                        tp->dev->name, tr32(cpu_base + CPU_PC),
6197                        info.text_base);
6198                 return -ENODEV;
6199         }
6200         tw32(cpu_base + CPU_STATE, 0xffffffff);
6201         tw32_f(cpu_base + CPU_MODE,  0x00000000);
6202         return 0;
6203 }
6204
6205
6206 /* tp->lock is held. */
6207 static void __tg3_set_mac_addr(struct tg3 *tp, int skip_mac_1)
6208 {
6209         u32 addr_high, addr_low;
6210         int i;
6211
6212         addr_high = ((tp->dev->dev_addr[0] << 8) |
6213                      tp->dev->dev_addr[1]);
6214         addr_low = ((tp->dev->dev_addr[2] << 24) |
6215                     (tp->dev->dev_addr[3] << 16) |
6216                     (tp->dev->dev_addr[4] <<  8) |
6217                     (tp->dev->dev_addr[5] <<  0));
6218         for (i = 0; i < 4; i++) {
6219                 if (i == 1 && skip_mac_1)
6220                         continue;
6221                 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
6222                 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
6223         }
6224
6225         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
6226             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
6227                 for (i = 0; i < 12; i++) {
6228                         tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
6229                         tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
6230                 }
6231         }
6232
6233         addr_high = (tp->dev->dev_addr[0] +
6234                      tp->dev->dev_addr[1] +
6235                      tp->dev->dev_addr[2] +
6236                      tp->dev->dev_addr[3] +
6237                      tp->dev->dev_addr[4] +
6238                      tp->dev->dev_addr[5]) &
6239                 TX_BACKOFF_SEED_MASK;
6240         tw32(MAC_TX_BACKOFF_SEED, addr_high);
6241 }
6242
6243 static int tg3_set_mac_addr(struct net_device *dev, void *p)
6244 {
6245         struct tg3 *tp = netdev_priv(dev);
6246         struct sockaddr *addr = p;
6247         int err = 0, skip_mac_1 = 0;
6248
6249         if (!is_valid_ether_addr(addr->sa_data))
6250                 return -EINVAL;
6251
6252         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
6253
6254         if (!netif_running(dev))
6255                 return 0;
6256
6257         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
6258                 u32 addr0_high, addr0_low, addr1_high, addr1_low;
6259
6260                 addr0_high = tr32(MAC_ADDR_0_HIGH);
6261                 addr0_low = tr32(MAC_ADDR_0_LOW);
6262                 addr1_high = tr32(MAC_ADDR_1_HIGH);
6263                 addr1_low = tr32(MAC_ADDR_1_LOW);
6264
6265                 /* Skip MAC addr 1 if ASF is using it. */
6266                 if ((addr0_high != addr1_high || addr0_low != addr1_low) &&
6267                     !(addr1_high == 0 && addr1_low == 0))
6268                         skip_mac_1 = 1;
6269         }
6270         spin_lock_bh(&tp->lock);
6271         __tg3_set_mac_addr(tp, skip_mac_1);
6272         spin_unlock_bh(&tp->lock);
6273
6274         return err;
6275 }
6276
6277 /* tp->lock is held. */
6278 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
6279                            dma_addr_t mapping, u32 maxlen_flags,
6280                            u32 nic_addr)
6281 {
6282         tg3_write_mem(tp,
6283                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
6284                       ((u64) mapping >> 32));
6285         tg3_write_mem(tp,
6286                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
6287                       ((u64) mapping & 0xffffffff));
6288         tg3_write_mem(tp,
6289                       (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
6290                        maxlen_flags);
6291
6292         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
6293                 tg3_write_mem(tp,
6294                               (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
6295                               nic_addr);
6296 }
6297
6298 static void __tg3_set_rx_mode(struct net_device *);
6299 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
6300 {
6301         tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
6302         tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
6303         tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
6304         tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
6305         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6306                 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
6307                 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
6308         }
6309         tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
6310         tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
6311         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6312                 u32 val = ec->stats_block_coalesce_usecs;
6313
6314                 if (!netif_carrier_ok(tp->dev))
6315                         val = 0;
6316
6317                 tw32(HOSTCC_STAT_COAL_TICKS, val);
6318         }
6319 }
6320
6321 /* tp->lock is held. */
6322 static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
6323 {
6324         u32 val, rdmac_mode;
6325         int i, err, limit;
6326
6327         tg3_disable_ints(tp);
6328
6329         tg3_stop_fw(tp);
6330
6331         tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
6332
6333         if (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) {
6334                 tg3_abort_hw(tp, 1);
6335         }
6336
6337         if (reset_phy)
6338                 tg3_phy_reset(tp);
6339
6340         err = tg3_chip_reset(tp);
6341         if (err)
6342                 return err;
6343
6344         tg3_write_sig_legacy(tp, RESET_KIND_INIT);
6345
6346         if (tp->pci_chip_rev_id == CHIPREV_ID_5784_A0) {
6347                 val = tr32(TG3_CPMU_CTRL);
6348                 val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE);
6349                 tw32(TG3_CPMU_CTRL, val);
6350         }
6351
6352         /* This works around an issue with Athlon chipsets on
6353          * B3 tigon3 silicon.  This bit has no effect on any
6354          * other revision.  But do not set this on PCI Express
6355          * chips and don't even touch the clocks if the CPMU is present.
6356          */
6357         if (!(tp->tg3_flags & TG3_FLAG_CPMU_PRESENT)) {
6358                 if (!(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
6359                         tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
6360                 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
6361         }
6362
6363         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
6364             (tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
6365                 val = tr32(TG3PCI_PCISTATE);
6366                 val |= PCISTATE_RETRY_SAME_DMA;
6367                 tw32(TG3PCI_PCISTATE, val);
6368         }
6369
6370         if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
6371                 /* Allow reads and writes to the
6372                  * APE register and memory space.
6373                  */
6374                 val = tr32(TG3PCI_PCISTATE);
6375                 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
6376                        PCISTATE_ALLOW_APE_SHMEM_WR;
6377                 tw32(TG3PCI_PCISTATE, val);
6378         }
6379
6380         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_BX) {
6381                 /* Enable some hw fixes.  */
6382                 val = tr32(TG3PCI_MSI_DATA);
6383                 val |= (1 << 26) | (1 << 28) | (1 << 29);
6384                 tw32(TG3PCI_MSI_DATA, val);
6385         }
6386
6387         /* Descriptor ring init may make accesses to the
6388          * NIC SRAM area to setup the TX descriptors, so we
6389          * can only do this after the hardware has been
6390          * successfully reset.
6391          */
6392         err = tg3_init_rings(tp);
6393         if (err)
6394                 return err;
6395
6396         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 &&
6397             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761) {
6398                 /* This value is determined during the probe time DMA
6399                  * engine test, tg3_test_dma.
6400                  */
6401                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
6402         }
6403
6404         tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
6405                           GRC_MODE_4X_NIC_SEND_RINGS |
6406                           GRC_MODE_NO_TX_PHDR_CSUM |
6407                           GRC_MODE_NO_RX_PHDR_CSUM);
6408         tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
6409
6410         /* Pseudo-header checksum is done by hardware logic and not
6411          * the offload processers, so make the chip do the pseudo-
6412          * header checksums on receive.  For transmit it is more
6413          * convenient to do the pseudo-header checksum in software
6414          * as Linux does that on transmit for us in all cases.
6415          */
6416         tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
6417
6418         tw32(GRC_MODE,
6419              tp->grc_mode |
6420              (GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP));
6421
6422         /* Setup the timer prescalar register.  Clock is always 66Mhz. */
6423         val = tr32(GRC_MISC_CFG);
6424         val &= ~0xff;
6425         val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
6426         tw32(GRC_MISC_CFG, val);
6427
6428         /* Initialize MBUF/DESC pool. */
6429         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
6430                 /* Do nothing.  */
6431         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
6432                 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
6433                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
6434                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
6435                 else
6436                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
6437                 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
6438                 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
6439         }
6440         else if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
6441                 int fw_len;
6442
6443                 fw_len = (TG3_TSO5_FW_TEXT_LEN +
6444                           TG3_TSO5_FW_RODATA_LEN +
6445                           TG3_TSO5_FW_DATA_LEN +
6446                           TG3_TSO5_FW_SBSS_LEN +
6447                           TG3_TSO5_FW_BSS_LEN);
6448                 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
6449                 tw32(BUFMGR_MB_POOL_ADDR,
6450                      NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
6451                 tw32(BUFMGR_MB_POOL_SIZE,
6452                      NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
6453         }
6454
6455         if (tp->dev->mtu <= ETH_DATA_LEN) {
6456                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
6457                      tp->bufmgr_config.mbuf_read_dma_low_water);
6458                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
6459                      tp->bufmgr_config.mbuf_mac_rx_low_water);
6460                 tw32(BUFMGR_MB_HIGH_WATER,
6461                      tp->bufmgr_config.mbuf_high_water);
6462         } else {
6463                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
6464                      tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
6465                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
6466                      tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
6467                 tw32(BUFMGR_MB_HIGH_WATER,
6468                      tp->bufmgr_config.mbuf_high_water_jumbo);
6469         }
6470         tw32(BUFMGR_DMA_LOW_WATER,
6471              tp->bufmgr_config.dma_low_water);
6472         tw32(BUFMGR_DMA_HIGH_WATER,
6473              tp->bufmgr_config.dma_high_water);
6474
6475         tw32(BUFMGR_MODE, BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE);
6476         for (i = 0; i < 2000; i++) {
6477                 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
6478                         break;
6479                 udelay(10);
6480         }
6481         if (i >= 2000) {
6482                 printk(KERN_ERR PFX "tg3_reset_hw cannot enable BUFMGR for %s.\n",
6483                        tp->dev->name);
6484                 return -ENODEV;
6485         }
6486
6487         /* Setup replenish threshold. */
6488         val = tp->rx_pending / 8;
6489         if (val == 0)
6490                 val = 1;
6491         else if (val > tp->rx_std_max_post)
6492                 val = tp->rx_std_max_post;
6493         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
6494                 if (tp->pci_chip_rev_id == CHIPREV_ID_5906_A1)
6495                         tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
6496
6497                 if (val > (TG3_RX_INTERNAL_RING_SZ_5906 / 2))
6498                         val = TG3_RX_INTERNAL_RING_SZ_5906 / 2;
6499         }
6500
6501         tw32(RCVBDI_STD_THRESH, val);
6502
6503         /* Initialize TG3_BDINFO's at:
6504          *  RCVDBDI_STD_BD:     standard eth size rx ring
6505          *  RCVDBDI_JUMBO_BD:   jumbo frame rx ring
6506          *  RCVDBDI_MINI_BD:    small frame rx ring (??? does not work)
6507          *
6508          * like so:
6509          *  TG3_BDINFO_HOST_ADDR:       high/low parts of DMA address of ring
6510          *  TG3_BDINFO_MAXLEN_FLAGS:    (rx max buffer size << 16) |
6511          *                              ring attribute flags
6512          *  TG3_BDINFO_NIC_ADDR:        location of descriptors in nic SRAM
6513          *
6514          * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
6515          * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
6516          *
6517          * The size of each ring is fixed in the firmware, but the location is
6518          * configurable.
6519          */
6520         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
6521              ((u64) tp->rx_std_mapping >> 32));
6522         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
6523              ((u64) tp->rx_std_mapping & 0xffffffff));
6524         tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
6525              NIC_SRAM_RX_BUFFER_DESC);
6526
6527         /* Don't even try to program the JUMBO/MINI buffer descriptor
6528          * configs on 5705.
6529          */
6530         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
6531                 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS,
6532                      RX_STD_MAX_SIZE_5705 << BDINFO_FLAGS_MAXLEN_SHIFT);
6533         } else {
6534                 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS,
6535                      RX_STD_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT);
6536
6537                 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
6538                      BDINFO_FLAGS_DISABLED);
6539
6540                 /* Setup replenish threshold. */
6541                 tw32(RCVBDI_JUMBO_THRESH, tp->rx_jumbo_pending / 8);
6542
6543                 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
6544                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
6545                              ((u64) tp->rx_jumbo_mapping >> 32));
6546                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
6547                              ((u64) tp->rx_jumbo_mapping & 0xffffffff));
6548                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
6549                              RX_JUMBO_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT);
6550                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
6551                              NIC_SRAM_RX_JUMBO_BUFFER_DESC);
6552                 } else {
6553                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
6554                              BDINFO_FLAGS_DISABLED);
6555                 }
6556
6557         }
6558
6559         /* There is only one send ring on 5705/5750, no need to explicitly
6560          * disable the others.
6561          */
6562         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6563                 /* Clear out send RCB ring in SRAM. */
6564                 for (i = NIC_SRAM_SEND_RCB; i < NIC_SRAM_RCV_RET_RCB; i += TG3_BDINFO_SIZE)
6565                         tg3_write_mem(tp, i + TG3_BDINFO_MAXLEN_FLAGS,
6566                                       BDINFO_FLAGS_DISABLED);
6567         }
6568
6569         tp->tx_prod = 0;
6570         tp->tx_cons = 0;
6571         tw32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0);
6572         tw32_tx_mbox(MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0);
6573
6574         tg3_set_bdinfo(tp, NIC_SRAM_SEND_RCB,
6575                        tp->tx_desc_mapping,
6576                        (TG3_TX_RING_SIZE <<
6577                         BDINFO_FLAGS_MAXLEN_SHIFT),
6578                        NIC_SRAM_TX_BUFFER_DESC);
6579
6580         /* There is only one receive return ring on 5705/5750, no need
6581          * to explicitly disable the others.
6582          */
6583         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6584                 for (i = NIC_SRAM_RCV_RET_RCB; i < NIC_SRAM_STATS_BLK;
6585                      i += TG3_BDINFO_SIZE) {
6586                         tg3_write_mem(tp, i + TG3_BDINFO_MAXLEN_FLAGS,
6587                                       BDINFO_FLAGS_DISABLED);
6588                 }
6589         }
6590
6591         tp->rx_rcb_ptr = 0;
6592         tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW, 0);
6593
6594         tg3_set_bdinfo(tp, NIC_SRAM_RCV_RET_RCB,
6595                        tp->rx_rcb_mapping,
6596                        (TG3_RX_RCB_RING_SIZE(tp) <<
6597                         BDINFO_FLAGS_MAXLEN_SHIFT),
6598                        0);
6599
6600         tp->rx_std_ptr = tp->rx_pending;
6601         tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
6602                      tp->rx_std_ptr);
6603
6604         tp->rx_jumbo_ptr = (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) ?
6605                                                 tp->rx_jumbo_pending : 0;
6606         tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
6607                      tp->rx_jumbo_ptr);
6608
6609         /* Initialize MAC address and backoff seed. */
6610         __tg3_set_mac_addr(tp, 0);
6611
6612         /* MTU + ethernet header + FCS + optional VLAN tag */
6613         tw32(MAC_RX_MTU_SIZE, tp->dev->mtu + ETH_HLEN + 8);
6614
6615         /* The slot time is changed by tg3_setup_phy if we
6616          * run at gigabit with half duplex.
6617          */
6618         tw32(MAC_TX_LENGTHS,
6619              (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
6620              (6 << TX_LENGTHS_IPG_SHIFT) |
6621              (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
6622
6623         /* Receive rules. */
6624         tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
6625         tw32(RCVLPC_CONFIG, 0x0181);
6626
6627         /* Calculate RDMAC_MODE setting early, we need it to determine
6628          * the RCVLPC_STATE_ENABLE mask.
6629          */
6630         rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
6631                       RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
6632                       RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
6633                       RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
6634                       RDMAC_MODE_LNGREAD_ENAB);
6635
6636         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784)
6637                 rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
6638                               RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
6639                               RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
6640
6641         /* If statement applies to 5705 and 5750 PCI devices only */
6642         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
6643              tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
6644             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)) {
6645                 if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE &&
6646                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
6647                         rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
6648                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
6649                            !(tp->tg3_flags2 & TG3_FLG2_IS_5788)) {
6650                         rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
6651                 }
6652         }
6653
6654         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)
6655                 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
6656
6657         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
6658                 rdmac_mode |= (1 << 27);
6659
6660         /* Receive/send statistics. */
6661         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
6662                 val = tr32(RCVLPC_STATS_ENABLE);
6663                 val &= ~RCVLPC_STATSENAB_DACK_FIX;
6664                 tw32(RCVLPC_STATS_ENABLE, val);
6665         } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
6666                    (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
6667                 val = tr32(RCVLPC_STATS_ENABLE);
6668                 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
6669                 tw32(RCVLPC_STATS_ENABLE, val);
6670         } else {
6671                 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
6672         }
6673         tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
6674         tw32(SNDDATAI_STATSENAB, 0xffffff);
6675         tw32(SNDDATAI_STATSCTRL,
6676              (SNDDATAI_SCTRL_ENABLE |
6677               SNDDATAI_SCTRL_FASTUPD));
6678
6679         /* Setup host coalescing engine. */
6680         tw32(HOSTCC_MODE, 0);
6681         for (i = 0; i < 2000; i++) {
6682                 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
6683                         break;
6684                 udelay(10);
6685         }
6686
6687         __tg3_set_coalesce(tp, &tp->coal);
6688
6689         /* set status block DMA address */
6690         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
6691              ((u64) tp->status_mapping >> 32));
6692         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
6693              ((u64) tp->status_mapping & 0xffffffff));
6694
6695         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6696                 /* Status/statistics block address.  See tg3_timer,
6697                  * the tg3_periodic_fetch_stats call there, and
6698                  * tg3_get_stats to see how this works for 5705/5750 chips.
6699                  */
6700                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
6701                      ((u64) tp->stats_mapping >> 32));
6702                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
6703                      ((u64) tp->stats_mapping & 0xffffffff));
6704                 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
6705                 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
6706         }
6707
6708         tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
6709
6710         tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
6711         tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
6712         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
6713                 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
6714
6715         /* Clear statistics/status block in chip, and status block in ram. */
6716         for (i = NIC_SRAM_STATS_BLK;
6717              i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
6718              i += sizeof(u32)) {
6719                 tg3_write_mem(tp, i, 0);
6720                 udelay(40);
6721         }
6722         memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
6723
6724         if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
6725                 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
6726                 /* reset to prevent losing 1st rx packet intermittently */
6727                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
6728                 udelay(10);
6729         }
6730
6731         tp->mac_mode = MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
6732                 MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE | MAC_MODE_FHDE_ENABLE;
6733         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
6734             !(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
6735             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
6736                 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
6737         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
6738         udelay(40);
6739
6740         /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
6741          * If TG3_FLG2_IS_NIC is zero, we should read the
6742          * register to preserve the GPIO settings for LOMs. The GPIOs,
6743          * whether used as inputs or outputs, are set by boot code after
6744          * reset.
6745          */
6746         if (!(tp->tg3_flags2 & TG3_FLG2_IS_NIC)) {
6747                 u32 gpio_mask;
6748
6749                 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
6750                             GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
6751                             GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
6752
6753                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
6754                         gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
6755                                      GRC_LCLCTRL_GPIO_OUTPUT3;
6756
6757                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
6758                         gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
6759
6760                 tp->grc_local_ctrl &= ~gpio_mask;
6761                 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
6762
6763                 /* GPIO1 must be driven high for eeprom write protect */
6764                 if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT)
6765                         tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
6766                                                GRC_LCLCTRL_GPIO_OUTPUT1);
6767         }
6768         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
6769         udelay(100);
6770
6771         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0);
6772         tp->last_tag = 0;
6773
6774         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6775                 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
6776                 udelay(40);
6777         }
6778
6779         val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
6780                WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
6781                WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
6782                WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
6783                WDMAC_MODE_LNGREAD_ENAB);
6784
6785         /* If statement applies to 5705 and 5750 PCI devices only */
6786         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
6787              tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
6788             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
6789                 if ((tp->tg3_flags & TG3_FLG2_TSO_CAPABLE) &&
6790                     (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
6791                      tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
6792                         /* nothing */
6793                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
6794                            !(tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
6795                            !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
6796                         val |= WDMAC_MODE_RX_ACCEL;
6797                 }
6798         }
6799
6800         /* Enable host coalescing bug fix */
6801         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755) ||
6802             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787) ||
6803             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784) ||
6804             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761))
6805                 val |= (1 << 29);
6806
6807         tw32_f(WDMAC_MODE, val);
6808         udelay(40);
6809
6810         if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) {
6811                 u16 pcix_cmd;
6812
6813                 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
6814                                      &pcix_cmd);
6815                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) {
6816                         pcix_cmd &= ~PCI_X_CMD_MAX_READ;
6817                         pcix_cmd |= PCI_X_CMD_READ_2K;
6818                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
6819                         pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ);
6820                         pcix_cmd |= PCI_X_CMD_READ_2K;
6821                 }
6822                 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
6823                                       pcix_cmd);
6824         }
6825
6826         tw32_f(RDMAC_MODE, rdmac_mode);
6827         udelay(40);
6828
6829         tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
6830         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
6831                 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
6832
6833         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
6834                 tw32(SNDDATAC_MODE,
6835                      SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY);
6836         else
6837                 tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
6838
6839         tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
6840         tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
6841         tw32(RCVDBDI_MODE, RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ);
6842         tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
6843         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
6844                 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
6845         tw32(SNDBDI_MODE, SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE);
6846         tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
6847
6848         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
6849                 err = tg3_load_5701_a0_firmware_fix(tp);
6850                 if (err)
6851                         return err;
6852         }
6853
6854         if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
6855                 err = tg3_load_tso_firmware(tp);
6856                 if (err)
6857                         return err;
6858         }
6859
6860         tp->tx_mode = TX_MODE_ENABLE;
6861         tw32_f(MAC_TX_MODE, tp->tx_mode);
6862         udelay(100);
6863
6864         tp->rx_mode = RX_MODE_ENABLE;
6865         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
6866             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
6867                 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
6868
6869         tw32_f(MAC_RX_MODE, tp->rx_mode);
6870         udelay(10);
6871
6872         if (tp->link_config.phy_is_low_power) {
6873                 tp->link_config.phy_is_low_power = 0;
6874                 tp->link_config.speed = tp->link_config.orig_speed;
6875                 tp->link_config.duplex = tp->link_config.orig_duplex;
6876                 tp->link_config.autoneg = tp->link_config.orig_autoneg;
6877         }
6878
6879         tp->mi_mode = MAC_MI_MODE_BASE;
6880         tw32_f(MAC_MI_MODE, tp->mi_mode);
6881         udelay(80);
6882
6883         tw32(MAC_LED_CTRL, tp->led_ctrl);
6884
6885         tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
6886         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
6887                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
6888                 udelay(10);
6889         }
6890         tw32_f(MAC_RX_MODE, tp->rx_mode);
6891         udelay(10);
6892
6893         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
6894                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) &&
6895                         !(tp->tg3_flags2 & TG3_FLG2_SERDES_PREEMPHASIS)) {
6896                         /* Set drive transmission level to 1.2V  */
6897                         /* only if the signal pre-emphasis bit is not set  */
6898                         val = tr32(MAC_SERDES_CFG);
6899                         val &= 0xfffff000;
6900                         val |= 0x880;
6901                         tw32(MAC_SERDES_CFG, val);
6902                 }
6903                 if (tp->pci_chip_rev_id == CHIPREV_ID_5703_A1)
6904                         tw32(MAC_SERDES_CFG, 0x616000);
6905         }
6906
6907         /* Prevent chip from dropping frames when flow control
6908          * is enabled.
6909          */
6910         tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, 2);
6911
6912         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
6913             (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
6914                 /* Use hardware link auto-negotiation */
6915                 tp->tg3_flags2 |= TG3_FLG2_HW_AUTONEG;
6916         }
6917
6918         if ((tp->tg3_flags2 & TG3_FLG2_MII_SERDES) &&
6919             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)) {
6920                 u32 tmp;
6921
6922                 tmp = tr32(SERDES_RX_CTRL);
6923                 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
6924                 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
6925                 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
6926                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
6927         }
6928
6929         err = tg3_setup_phy(tp, 0);
6930         if (err)
6931                 return err;
6932
6933         if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
6934             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906) {
6935                 u32 tmp;
6936
6937                 /* Clear CRC stats. */
6938                 if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
6939                         tg3_writephy(tp, MII_TG3_TEST1,
6940                                      tmp | MII_TG3_TEST1_CRC_EN);
6941                         tg3_readphy(tp, 0x14, &tmp);
6942                 }
6943         }
6944
6945         __tg3_set_rx_mode(tp->dev);
6946
6947         /* Initialize receive rules. */
6948         tw32(MAC_RCV_RULE_0,  0xc2000000 & RCV_RULE_DISABLE_MASK);
6949         tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
6950         tw32(MAC_RCV_RULE_1,  0x86000004 & RCV_RULE_DISABLE_MASK);
6951         tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
6952
6953         if ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
6954             !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
6955                 limit = 8;
6956         else
6957                 limit = 16;
6958         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF)
6959                 limit -= 4;
6960         switch (limit) {
6961         case 16:
6962                 tw32(MAC_RCV_RULE_15,  0); tw32(MAC_RCV_VALUE_15,  0);
6963         case 15:
6964                 tw32(MAC_RCV_RULE_14,  0); tw32(MAC_RCV_VALUE_14,  0);
6965         case 14:
6966                 tw32(MAC_RCV_RULE_13,  0); tw32(MAC_RCV_VALUE_13,  0);
6967         case 13:
6968                 tw32(MAC_RCV_RULE_12,  0); tw32(MAC_RCV_VALUE_12,  0);
6969         case 12:
6970                 tw32(MAC_RCV_RULE_11,  0); tw32(MAC_RCV_VALUE_11,  0);
6971         case 11:
6972                 tw32(MAC_RCV_RULE_10,  0); tw32(MAC_RCV_VALUE_10,  0);
6973         case 10:
6974                 tw32(MAC_RCV_RULE_9,  0); tw32(MAC_RCV_VALUE_9,  0);
6975         case 9:
6976                 tw32(MAC_RCV_RULE_8,  0); tw32(MAC_RCV_VALUE_8,  0);
6977         case 8:
6978                 tw32(MAC_RCV_RULE_7,  0); tw32(MAC_RCV_VALUE_7,  0);
6979         case 7:
6980                 tw32(MAC_RCV_RULE_6,  0); tw32(MAC_RCV_VALUE_6,  0);
6981         case 6:
6982                 tw32(MAC_RCV_RULE_5,  0); tw32(MAC_RCV_VALUE_5,  0);
6983         case 5:
6984                 tw32(MAC_RCV_RULE_4,  0); tw32(MAC_RCV_VALUE_4,  0);
6985         case 4:
6986                 /* tw32(MAC_RCV_RULE_3,  0); tw32(MAC_RCV_VALUE_3,  0); */
6987         case 3:
6988                 /* tw32(MAC_RCV_RULE_2,  0); tw32(MAC_RCV_VALUE_2,  0); */
6989         case 2:
6990         case 1:
6991
6992         default:
6993                 break;
6994         };
6995
6996         if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
6997                 /* Write our heartbeat update interval to APE. */
6998                 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS,
6999                                 APE_HOST_HEARTBEAT_INT_DISABLE);
7000
7001         tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
7002
7003         return 0;
7004 }
7005
7006 /* Called at device open time to get the chip ready for
7007  * packet processing.  Invoked with tp->lock held.
7008  */
7009 static int tg3_init_hw(struct tg3 *tp, int reset_phy)
7010 {
7011         int err;
7012
7013         /* Force the chip into D0. */
7014         err = tg3_set_power_state(tp, PCI_D0);
7015         if (err)
7016                 goto out;
7017
7018         tg3_switch_clocks(tp);
7019
7020         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
7021
7022         err = tg3_reset_hw(tp, reset_phy);
7023
7024 out:
7025         return err;
7026 }
7027
7028 #define TG3_STAT_ADD32(PSTAT, REG) \
7029 do {    u32 __val = tr32(REG); \
7030         (PSTAT)->low += __val; \
7031         if ((PSTAT)->low < __val) \
7032                 (PSTAT)->high += 1; \
7033 } while (0)
7034
7035 static void tg3_periodic_fetch_stats(struct tg3 *tp)
7036 {
7037         struct tg3_hw_stats *sp = tp->hw_stats;
7038
7039         if (!netif_carrier_ok(tp->dev))
7040                 return;
7041
7042         TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
7043         TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
7044         TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
7045         TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
7046         TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
7047         TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
7048         TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
7049         TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
7050         TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
7051         TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
7052         TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
7053         TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
7054         TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
7055
7056         TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
7057         TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
7058         TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
7059         TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
7060         TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
7061         TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
7062         TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
7063         TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
7064         TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
7065         TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
7066         TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
7067         TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
7068         TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
7069         TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
7070
7071         TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
7072         TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
7073         TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
7074 }
7075
7076 static void tg3_timer(unsigned long __opaque)
7077 {
7078         struct tg3 *tp = (struct tg3 *) __opaque;
7079
7080         if (tp->irq_sync)
7081                 goto restart_timer;
7082
7083         spin_lock(&tp->lock);
7084
7085         if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) {
7086                 /* All of this garbage is because when using non-tagged
7087                  * IRQ status the mailbox/status_block protocol the chip
7088                  * uses with the cpu is race prone.
7089                  */
7090                 if (tp->hw_status->status & SD_STATUS_UPDATED) {
7091                         tw32(GRC_LOCAL_CTRL,
7092                              tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
7093                 } else {
7094                         tw32(HOSTCC_MODE, tp->coalesce_mode |
7095                              (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
7096                 }
7097
7098                 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
7099                         tp->tg3_flags2 |= TG3_FLG2_RESTART_TIMER;
7100                         spin_unlock(&tp->lock);
7101                         schedule_work(&tp->reset_task);
7102                         return;
7103                 }
7104         }
7105
7106         /* This part only runs once per second. */
7107         if (!--tp->timer_counter) {
7108                 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
7109                         tg3_periodic_fetch_stats(tp);
7110
7111                 if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
7112                         u32 mac_stat;
7113                         int phy_event;
7114
7115                         mac_stat = tr32(MAC_STATUS);
7116
7117                         phy_event = 0;
7118                         if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) {
7119                                 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
7120                                         phy_event = 1;
7121                         } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
7122                                 phy_event = 1;
7123
7124                         if (phy_event)
7125                                 tg3_setup_phy(tp, 0);
7126                 } else if (tp->tg3_flags & TG3_FLAG_POLL_SERDES) {
7127                         u32 mac_stat = tr32(MAC_STATUS);
7128                         int need_setup = 0;
7129
7130                         if (netif_carrier_ok(tp->dev) &&
7131                             (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
7132                                 need_setup = 1;
7133                         }
7134                         if (! netif_carrier_ok(tp->dev) &&
7135                             (mac_stat & (MAC_STATUS_PCS_SYNCED |
7136                                          MAC_STATUS_SIGNAL_DET))) {
7137                                 need_setup = 1;
7138                         }
7139                         if (need_setup) {
7140                                 if (!tp->serdes_counter) {
7141                                         tw32_f(MAC_MODE,
7142                                              (tp->mac_mode &
7143                                               ~MAC_MODE_PORT_MODE_MASK));
7144                                         udelay(40);
7145                                         tw32_f(MAC_MODE, tp->mac_mode);
7146                                         udelay(40);
7147                                 }
7148                                 tg3_setup_phy(tp, 0);
7149                         }
7150                 } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
7151                         tg3_serdes_parallel_detect(tp);
7152
7153                 tp->timer_counter = tp->timer_multiplier;
7154         }
7155
7156         /* Heartbeat is only sent once every 2 seconds.
7157          *
7158          * The heartbeat is to tell the ASF firmware that the host
7159          * driver is still alive.  In the event that the OS crashes,
7160          * ASF needs to reset the hardware to free up the FIFO space
7161          * that may be filled with rx packets destined for the host.
7162          * If the FIFO is full, ASF will no longer function properly.
7163          *
7164          * Unintended resets have been reported on real time kernels
7165          * where the timer doesn't run on time.  Netpoll will also have
7166          * same problem.
7167          *
7168          * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
7169          * to check the ring condition when the heartbeat is expiring
7170          * before doing the reset.  This will prevent most unintended
7171          * resets.
7172          */
7173         if (!--tp->asf_counter) {
7174                 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
7175                         u32 val;
7176
7177                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
7178                                       FWCMD_NICDRV_ALIVE3);
7179                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
7180                         /* 5 seconds timeout */
7181                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, 5);
7182                         val = tr32(GRC_RX_CPU_EVENT);
7183                         val |= (1 << 14);
7184                         tw32(GRC_RX_CPU_EVENT, val);
7185                 }
7186                 tp->asf_counter = tp->asf_multiplier;
7187         }
7188
7189         spin_unlock(&tp->lock);
7190
7191 restart_timer:
7192         tp->timer.expires = jiffies + tp->timer_offset;
7193         add_timer(&tp->timer);
7194 }
7195
7196 static int tg3_request_irq(struct tg3 *tp)
7197 {
7198         irq_handler_t fn;
7199         unsigned long flags;
7200         struct net_device *dev = tp->dev;
7201
7202         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7203                 fn = tg3_msi;
7204                 if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI)
7205                         fn = tg3_msi_1shot;
7206                 flags = IRQF_SAMPLE_RANDOM;
7207         } else {
7208                 fn = tg3_interrupt;
7209                 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
7210                         fn = tg3_interrupt_tagged;
7211                 flags = IRQF_SHARED | IRQF_SAMPLE_RANDOM;
7212         }
7213         return (request_irq(tp->pdev->irq, fn, flags, dev->name, dev));
7214 }
7215
7216 static int tg3_test_interrupt(struct tg3 *tp)
7217 {
7218         struct net_device *dev = tp->dev;
7219         int err, i, intr_ok = 0;
7220
7221         if (!netif_running(dev))
7222                 return -ENODEV;
7223
7224         tg3_disable_ints(tp);
7225
7226         free_irq(tp->pdev->irq, dev);
7227
7228         err = request_irq(tp->pdev->irq, tg3_test_isr,
7229                           IRQF_SHARED | IRQF_SAMPLE_RANDOM, dev->name, dev);
7230         if (err)
7231                 return err;
7232
7233         tp->hw_status->status &= ~SD_STATUS_UPDATED;
7234         tg3_enable_ints(tp);
7235
7236         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
7237                HOSTCC_MODE_NOW);
7238
7239         for (i = 0; i < 5; i++) {
7240                 u32 int_mbox, misc_host_ctrl;
7241
7242                 int_mbox = tr32_mailbox(MAILBOX_INTERRUPT_0 +
7243                                         TG3_64BIT_REG_LOW);
7244                 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
7245
7246                 if ((int_mbox != 0) ||
7247                     (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) {
7248                         intr_ok = 1;
7249                         break;
7250                 }
7251
7252                 msleep(10);
7253         }
7254
7255         tg3_disable_ints(tp);
7256
7257         free_irq(tp->pdev->irq, dev);
7258
7259         err = tg3_request_irq(tp);
7260
7261         if (err)
7262                 return err;
7263
7264         if (intr_ok)
7265                 return 0;
7266
7267         return -EIO;
7268 }
7269
7270 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
7271  * successfully restored
7272  */
7273 static int tg3_test_msi(struct tg3 *tp)
7274 {
7275         struct net_device *dev = tp->dev;
7276         int err;
7277         u16 pci_cmd;
7278
7279         if (!(tp->tg3_flags2 & TG3_FLG2_USING_MSI))
7280                 return 0;
7281
7282         /* Turn off SERR reporting in case MSI terminates with Master
7283          * Abort.
7284          */
7285         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
7286         pci_write_config_word(tp->pdev, PCI_COMMAND,
7287                               pci_cmd & ~PCI_COMMAND_SERR);
7288
7289         err = tg3_test_interrupt(tp);
7290
7291         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
7292
7293         if (!err)
7294                 return 0;
7295
7296         /* other failures */
7297         if (err != -EIO)
7298                 return err;
7299
7300         /* MSI test failed, go back to INTx mode */
7301         printk(KERN_WARNING PFX "%s: No interrupt was generated using MSI, "
7302                "switching to INTx mode. Please report this failure to "
7303                "the PCI maintainer and include system chipset information.\n",
7304                        tp->dev->name);
7305
7306         free_irq(tp->pdev->irq, dev);
7307         pci_disable_msi(tp->pdev);
7308
7309         tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
7310
7311         err = tg3_request_irq(tp);
7312         if (err)
7313                 return err;
7314
7315         /* Need to reset the chip because the MSI cycle may have terminated
7316          * with Master Abort.
7317          */
7318         tg3_full_lock(tp, 1);
7319
7320         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
7321         err = tg3_init_hw(tp, 1);
7322
7323         tg3_full_unlock(tp);
7324
7325         if (err)
7326                 free_irq(tp->pdev->irq, dev);
7327
7328         return err;
7329 }
7330
7331 static int tg3_open(struct net_device *dev)
7332 {
7333         struct tg3 *tp = netdev_priv(dev);
7334         int err;
7335
7336         netif_carrier_off(tp->dev);
7337
7338         tg3_full_lock(tp, 0);
7339
7340         err = tg3_set_power_state(tp, PCI_D0);
7341         if (err) {
7342                 tg3_full_unlock(tp);
7343                 return err;
7344         }
7345
7346         tg3_disable_ints(tp);
7347         tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
7348
7349         tg3_full_unlock(tp);
7350
7351         /* The placement of this call is tied
7352          * to the setup and use of Host TX descriptors.
7353          */
7354         err = tg3_alloc_consistent(tp);
7355         if (err)
7356                 return err;
7357
7358         if (tp->tg3_flags & TG3_FLAG_SUPPORT_MSI) {
7359                 /* All MSI supporting chips should support tagged
7360                  * status.  Assert that this is the case.
7361                  */
7362                 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) {
7363                         printk(KERN_WARNING PFX "%s: MSI without TAGGED? "
7364                                "Not using MSI.\n", tp->dev->name);
7365                 } else if (pci_enable_msi(tp->pdev) == 0) {
7366                         u32 msi_mode;
7367
7368                         /* Hardware bug - MSI won't work if INTX disabled. */
7369                         if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
7370                                 pci_intx(tp->pdev, 1);
7371
7372                         msi_mode = tr32(MSGINT_MODE);
7373                         tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
7374                         tp->tg3_flags2 |= TG3_FLG2_USING_MSI;
7375                 }
7376         }
7377         err = tg3_request_irq(tp);
7378
7379         if (err) {
7380                 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7381                         pci_disable_msi(tp->pdev);
7382                         tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
7383                 }
7384                 tg3_free_consistent(tp);
7385                 return err;
7386         }
7387
7388         napi_enable(&tp->napi);
7389
7390         tg3_full_lock(tp, 0);
7391
7392         err = tg3_init_hw(tp, 1);
7393         if (err) {
7394                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
7395                 tg3_free_rings(tp);
7396         } else {
7397                 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
7398                         tp->timer_offset = HZ;
7399                 else
7400                         tp->timer_offset = HZ / 10;
7401
7402                 BUG_ON(tp->timer_offset > HZ);
7403                 tp->timer_counter = tp->timer_multiplier =
7404                         (HZ / tp->timer_offset);
7405                 tp->asf_counter = tp->asf_multiplier =
7406                         ((HZ / tp->timer_offset) * 2);
7407
7408                 init_timer(&tp->timer);
7409                 tp->timer.expires = jiffies + tp->timer_offset;
7410                 tp->timer.data = (unsigned long) tp;
7411                 tp->timer.function = tg3_timer;
7412         }
7413
7414         tg3_full_unlock(tp);
7415
7416         if (err) {
7417                 napi_disable(&tp->napi);
7418                 free_irq(tp->pdev->irq, dev);
7419                 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7420                         pci_disable_msi(tp->pdev);
7421                         tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
7422                 }
7423                 tg3_free_consistent(tp);
7424                 return err;
7425         }
7426
7427         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7428                 err = tg3_test_msi(tp);
7429
7430                 if (err) {
7431                         tg3_full_lock(tp, 0);
7432
7433                         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7434                                 pci_disable_msi(tp->pdev);
7435                                 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
7436                         }
7437                         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
7438                         tg3_free_rings(tp);
7439                         tg3_free_consistent(tp);
7440
7441                         tg3_full_unlock(tp);
7442
7443                         napi_disable(&tp->napi);
7444
7445                         return err;
7446                 }
7447
7448                 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7449                         if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI) {
7450                                 u32 val = tr32(PCIE_TRANSACTION_CFG);
7451
7452                                 tw32(PCIE_TRANSACTION_CFG,
7453                                      val | PCIE_TRANS_CFG_1SHOT_MSI);
7454                         }
7455                 }
7456         }
7457
7458         tg3_full_lock(tp, 0);
7459
7460         add_timer(&tp->timer);
7461         tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
7462         tg3_enable_ints(tp);
7463
7464         tg3_full_unlock(tp);
7465
7466         netif_start_queue(dev);
7467
7468         return 0;
7469 }
7470
7471 #if 0
7472 /*static*/ void tg3_dump_state(struct tg3 *tp)
7473 {
7474         u32 val32, val32_2, val32_3, val32_4, val32_5;
7475         u16 val16;
7476         int i;
7477
7478         pci_read_config_word(tp->pdev, PCI_STATUS, &val16);
7479         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE, &val32);
7480         printk("DEBUG: PCI status [%04x] TG3PCI state[%08x]\n",
7481                val16, val32);
7482
7483         /* MAC block */
7484         printk("DEBUG: MAC_MODE[%08x] MAC_STATUS[%08x]\n",
7485                tr32(MAC_MODE), tr32(MAC_STATUS));
7486         printk("       MAC_EVENT[%08x] MAC_LED_CTRL[%08x]\n",
7487                tr32(MAC_EVENT), tr32(MAC_LED_CTRL));
7488         printk("DEBUG: MAC_TX_MODE[%08x] MAC_TX_STATUS[%08x]\n",
7489                tr32(MAC_TX_MODE), tr32(MAC_TX_STATUS));
7490         printk("       MAC_RX_MODE[%08x] MAC_RX_STATUS[%08x]\n",
7491                tr32(MAC_RX_MODE), tr32(MAC_RX_STATUS));
7492
7493         /* Send data initiator control block */
7494         printk("DEBUG: SNDDATAI_MODE[%08x] SNDDATAI_STATUS[%08x]\n",
7495                tr32(SNDDATAI_MODE), tr32(SNDDATAI_STATUS));
7496         printk("       SNDDATAI_STATSCTRL[%08x]\n",
7497                tr32(SNDDATAI_STATSCTRL));
7498
7499         /* Send data completion control block */
7500         printk("DEBUG: SNDDATAC_MODE[%08x]\n", tr32(SNDDATAC_MODE));
7501
7502         /* Send BD ring selector block */
7503         printk("DEBUG: SNDBDS_MODE[%08x] SNDBDS_STATUS[%08x]\n",
7504                tr32(SNDBDS_MODE), tr32(SNDBDS_STATUS));
7505
7506         /* Send BD initiator control block */
7507         printk("DEBUG: SNDBDI_MODE[%08x] SNDBDI_STATUS[%08x]\n",
7508                tr32(SNDBDI_MODE), tr32(SNDBDI_STATUS));
7509
7510         /* Send BD completion control block */
7511         printk("DEBUG: SNDBDC_MODE[%08x]\n", tr32(SNDBDC_MODE));
7512
7513         /* Receive list placement control block */
7514         printk("DEBUG: RCVLPC_MODE[%08x] RCVLPC_STATUS[%08x]\n",
7515                tr32(RCVLPC_MODE), tr32(RCVLPC_STATUS));
7516         printk("       RCVLPC_STATSCTRL[%08x]\n",
7517                tr32(RCVLPC_STATSCTRL));
7518
7519         /* Receive data and receive BD initiator control block */
7520         printk("DEBUG: RCVDBDI_MODE[%08x] RCVDBDI_STATUS[%08x]\n",
7521                tr32(RCVDBDI_MODE), tr32(RCVDBDI_STATUS));
7522
7523         /* Receive data completion control block */
7524         printk("DEBUG: RCVDCC_MODE[%08x]\n",
7525                tr32(RCVDCC_MODE));
7526
7527         /* Receive BD initiator control block */
7528         printk("DEBUG: RCVBDI_MODE[%08x] RCVBDI_STATUS[%08x]\n",
7529                tr32(RCVBDI_MODE), tr32(RCVBDI_STATUS));
7530
7531         /* Receive BD completion control block */
7532         printk("DEBUG: RCVCC_MODE[%08x] RCVCC_STATUS[%08x]\n",
7533                tr32(RCVCC_MODE), tr32(RCVCC_STATUS));
7534
7535         /* Receive list selector control block */
7536         printk("DEBUG: RCVLSC_MODE[%08x] RCVLSC_STATUS[%08x]\n",
7537                tr32(RCVLSC_MODE), tr32(RCVLSC_STATUS));
7538
7539         /* Mbuf cluster free block */
7540         printk("DEBUG: MBFREE_MODE[%08x] MBFREE_STATUS[%08x]\n",
7541                tr32(MBFREE_MODE), tr32(MBFREE_STATUS));
7542
7543         /* Host coalescing control block */
7544         printk("DEBUG: HOSTCC_MODE[%08x] HOSTCC_STATUS[%08x]\n",
7545                tr32(HOSTCC_MODE), tr32(HOSTCC_STATUS));
7546         printk("DEBUG: HOSTCC_STATS_BLK_HOST_ADDR[%08x%08x]\n",
7547                tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
7548                tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
7549         printk("DEBUG: HOSTCC_STATUS_BLK_HOST_ADDR[%08x%08x]\n",
7550                tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
7551                tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
7552         printk("DEBUG: HOSTCC_STATS_BLK_NIC_ADDR[%08x]\n",
7553                tr32(HOSTCC_STATS_BLK_NIC_ADDR));
7554         printk("DEBUG: HOSTCC_STATUS_BLK_NIC_ADDR[%08x]\n",
7555                tr32(HOSTCC_STATUS_BLK_NIC_ADDR));
7556
7557         /* Memory arbiter control block */
7558         printk("DEBUG: MEMARB_MODE[%08x] MEMARB_STATUS[%08x]\n",
7559                tr32(MEMARB_MODE), tr32(MEMARB_STATUS));
7560
7561         /* Buffer manager control block */
7562         printk("DEBUG: BUFMGR_MODE[%08x] BUFMGR_STATUS[%08x]\n",
7563                tr32(BUFMGR_MODE), tr32(BUFMGR_STATUS));
7564         printk("DEBUG: BUFMGR_MB_POOL_ADDR[%08x] BUFMGR_MB_POOL_SIZE[%08x]\n",
7565                tr32(BUFMGR_MB_POOL_ADDR), tr32(BUFMGR_MB_POOL_SIZE));
7566         printk("DEBUG: BUFMGR_DMA_DESC_POOL_ADDR[%08x] "
7567                "BUFMGR_DMA_DESC_POOL_SIZE[%08x]\n",
7568                tr32(BUFMGR_DMA_DESC_POOL_ADDR),
7569                tr32(BUFMGR_DMA_DESC_POOL_SIZE));
7570
7571         /* Read DMA control block */
7572         printk("DEBUG: RDMAC_MODE[%08x] RDMAC_STATUS[%08x]\n",
7573                tr32(RDMAC_MODE), tr32(RDMAC_STATUS));
7574
7575         /* Write DMA control block */
7576         printk("DEBUG: WDMAC_MODE[%08x] WDMAC_STATUS[%08x]\n",
7577                tr32(WDMAC_MODE), tr32(WDMAC_STATUS));
7578
7579         /* DMA completion block */
7580         printk("DEBUG: DMAC_MODE[%08x]\n",
7581                tr32(DMAC_MODE));
7582
7583         /* GRC block */
7584         printk("DEBUG: GRC_MODE[%08x] GRC_MISC_CFG[%08x]\n",
7585                tr32(GRC_MODE), tr32(GRC_MISC_CFG));
7586         printk("DEBUG: GRC_LOCAL_CTRL[%08x]\n",
7587                tr32(GRC_LOCAL_CTRL));
7588
7589         /* TG3_BDINFOs */
7590         printk("DEBUG: RCVDBDI_JUMBO_BD[%08x%08x:%08x:%08x]\n",
7591                tr32(RCVDBDI_JUMBO_BD + 0x0),
7592                tr32(RCVDBDI_JUMBO_BD + 0x4),
7593                tr32(RCVDBDI_JUMBO_BD + 0x8),
7594                tr32(RCVDBDI_JUMBO_BD + 0xc));
7595         printk("DEBUG: RCVDBDI_STD_BD[%08x%08x:%08x:%08x]\n",
7596                tr32(RCVDBDI_STD_BD + 0x0),
7597                tr32(RCVDBDI_STD_BD + 0x4),
7598                tr32(RCVDBDI_STD_BD + 0x8),
7599                tr32(RCVDBDI_STD_BD + 0xc));
7600         printk("DEBUG: RCVDBDI_MINI_BD[%08x%08x:%08x:%08x]\n",
7601                tr32(RCVDBDI_MINI_BD + 0x0),
7602                tr32(RCVDBDI_MINI_BD + 0x4),
7603                tr32(RCVDBDI_MINI_BD + 0x8),
7604                tr32(RCVDBDI_MINI_BD + 0xc));
7605
7606         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x0, &val32);
7607         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x4, &val32_2);
7608         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x8, &val32_3);
7609         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0xc, &val32_4);
7610         printk("DEBUG: SRAM_SEND_RCB_0[%08x%08x:%08x:%08x]\n",
7611                val32, val32_2, val32_3, val32_4);
7612
7613         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x0, &val32);
7614         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x4, &val32_2);
7615         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x8, &val32_3);
7616         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0xc, &val32_4);
7617         printk("DEBUG: SRAM_RCV_RET_RCB_0[%08x%08x:%08x:%08x]\n",
7618                val32, val32_2, val32_3, val32_4);
7619
7620         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x0, &val32);
7621         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x4, &val32_2);
7622         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x8, &val32_3);
7623         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0xc, &val32_4);
7624         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x10, &val32_5);
7625         printk("DEBUG: SRAM_STATUS_BLK[%08x:%08x:%08x:%08x:%08x]\n",
7626                val32, val32_2, val32_3, val32_4, val32_5);
7627
7628         /* SW status block */
7629         printk("DEBUG: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
7630                tp->hw_status->status,
7631                tp->hw_status->status_tag,
7632                tp->hw_status->rx_jumbo_consumer,
7633                tp->hw_status->rx_consumer,
7634                tp->hw_status->rx_mini_consumer,
7635                tp->hw_status->idx[0].rx_producer,
7636                tp->hw_status->idx[0].tx_consumer);
7637
7638         /* SW statistics block */
7639         printk("DEBUG: Host statistics block [%08x:%08x:%08x:%08x]\n",
7640                ((u32 *)tp->hw_stats)[0],
7641                ((u32 *)tp->hw_stats)[1],
7642                ((u32 *)tp->hw_stats)[2],
7643                ((u32 *)tp->hw_stats)[3]);
7644
7645         /* Mailboxes */
7646         printk("DEBUG: SNDHOST_PROD[%08x%08x] SNDNIC_PROD[%08x%08x]\n",
7647                tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + 0x0),
7648                tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + 0x4),
7649                tr32_mailbox(MAILBOX_SNDNIC_PROD_IDX_0 + 0x0),
7650                tr32_mailbox(MAILBOX_SNDNIC_PROD_IDX_0 + 0x4));
7651
7652         /* NIC side send descriptors. */
7653         for (i = 0; i < 6; i++) {
7654                 unsigned long txd;
7655
7656                 txd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_TX_BUFFER_DESC
7657                         + (i * sizeof(struct tg3_tx_buffer_desc));
7658                 printk("DEBUG: NIC TXD(%d)[%08x:%08x:%08x:%08x]\n",
7659                        i,
7660                        readl(txd + 0x0), readl(txd + 0x4),
7661                        readl(txd + 0x8), readl(txd + 0xc));
7662         }
7663
7664         /* NIC side RX descriptors. */
7665         for (i = 0; i < 6; i++) {
7666                 unsigned long rxd;
7667
7668                 rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_BUFFER_DESC
7669                         + (i * sizeof(struct tg3_rx_buffer_desc));
7670                 printk("DEBUG: NIC RXD_STD(%d)[0][%08x:%08x:%08x:%08x]\n",
7671                        i,
7672                        readl(rxd + 0x0), readl(rxd + 0x4),
7673                        readl(rxd + 0x8), readl(rxd + 0xc));
7674                 rxd += (4 * sizeof(u32));
7675                 printk("DEBUG: NIC RXD_STD(%d)[1][%08x:%08x:%08x:%08x]\n",
7676                        i,
7677                        readl(rxd + 0x0), readl(rxd + 0x4),
7678                        readl(rxd + 0x8), readl(rxd + 0xc));
7679         }
7680
7681         for (i = 0; i < 6; i++) {
7682                 unsigned long rxd;
7683
7684                 rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_JUMBO_BUFFER_DESC
7685                         + (i * sizeof(struct tg3_rx_buffer_desc));
7686                 printk("DEBUG: NIC RXD_JUMBO(%d)[0][%08x:%08x:%08x:%08x]\n",
7687                        i,
7688                        readl(rxd + 0x0), readl(rxd + 0x4),
7689                        readl(rxd + 0x8), readl(rxd + 0xc));
7690                 rxd += (4 * sizeof(u32));
7691                 printk("DEBUG: NIC RXD_JUMBO(%d)[1][%08x:%08x:%08x:%08x]\n",
7692                        i,
7693                        readl(rxd + 0x0), readl(rxd + 0x4),
7694                        readl(rxd + 0x8), readl(rxd + 0xc));
7695         }
7696 }
7697 #endif
7698
7699 static struct net_device_stats *tg3_get_stats(struct net_device *);
7700 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *);
7701
7702 static int tg3_close(struct net_device *dev)
7703 {
7704         struct tg3 *tp = netdev_priv(dev);
7705
7706         napi_disable(&tp->napi);
7707         cancel_work_sync(&tp->reset_task);
7708
7709         netif_stop_queue(dev);
7710
7711         del_timer_sync(&tp->timer);
7712
7713         tg3_full_lock(tp, 1);
7714 #if 0
7715         tg3_dump_state(tp);
7716 #endif
7717
7718         tg3_disable_ints(tp);
7719
7720         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
7721         tg3_free_rings(tp);
7722         tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
7723
7724         tg3_full_unlock(tp);
7725
7726         free_irq(tp->pdev->irq, dev);
7727         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7728                 pci_disable_msi(tp->pdev);
7729                 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
7730         }
7731
7732         memcpy(&tp->net_stats_prev, tg3_get_stats(tp->dev),
7733                sizeof(tp->net_stats_prev));
7734         memcpy(&tp->estats_prev, tg3_get_estats(tp),
7735                sizeof(tp->estats_prev));
7736
7737         tg3_free_consistent(tp);
7738
7739         tg3_set_power_state(tp, PCI_D3hot);
7740
7741         netif_carrier_off(tp->dev);
7742
7743         return 0;
7744 }
7745
7746 static inline unsigned long get_stat64(tg3_stat64_t *val)
7747 {
7748         unsigned long ret;
7749
7750 #if (BITS_PER_LONG == 32)
7751         ret = val->low;
7752 #else
7753         ret = ((u64)val->high << 32) | ((u64)val->low);
7754 #endif
7755         return ret;
7756 }
7757
7758 static unsigned long calc_crc_errors(struct tg3 *tp)
7759 {
7760         struct tg3_hw_stats *hw_stats = tp->hw_stats;
7761
7762         if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
7763             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
7764              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
7765                 u32 val;
7766
7767                 spin_lock_bh(&tp->lock);
7768                 if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) {
7769                         tg3_writephy(tp, MII_TG3_TEST1,
7770                                      val | MII_TG3_TEST1_CRC_EN);
7771                         tg3_readphy(tp, 0x14, &val);
7772                 } else
7773                         val = 0;
7774                 spin_unlock_bh(&tp->lock);
7775
7776                 tp->phy_crc_errors += val;
7777
7778                 return tp->phy_crc_errors;
7779         }
7780
7781         return get_stat64(&hw_stats->rx_fcs_errors);
7782 }
7783
7784 #define ESTAT_ADD(member) \
7785         estats->member =        old_estats->member + \
7786                                 get_stat64(&hw_stats->member)
7787
7788 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *tp)
7789 {
7790         struct tg3_ethtool_stats *estats = &tp->estats;
7791         struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
7792         struct tg3_hw_stats *hw_stats = tp->hw_stats;
7793
7794         if (!hw_stats)
7795                 return old_estats;
7796
7797         ESTAT_ADD(rx_octets);
7798         ESTAT_ADD(rx_fragments);
7799         ESTAT_ADD(rx_ucast_packets);
7800         ESTAT_ADD(rx_mcast_packets);
7801         ESTAT_ADD(rx_bcast_packets);
7802         ESTAT_ADD(rx_fcs_errors);
7803         ESTAT_ADD(rx_align_errors);
7804         ESTAT_ADD(rx_xon_pause_rcvd);
7805         ESTAT_ADD(rx_xoff_pause_rcvd);
7806         ESTAT_ADD(rx_mac_ctrl_rcvd);
7807         ESTAT_ADD(rx_xoff_entered);
7808         ESTAT_ADD(rx_frame_too_long_errors);
7809         ESTAT_ADD(rx_jabbers);
7810         ESTAT_ADD(rx_undersize_packets);
7811         ESTAT_ADD(rx_in_length_errors);
7812         ESTAT_ADD(rx_out_length_errors);
7813         ESTAT_ADD(rx_64_or_less_octet_packets);
7814         ESTAT_ADD(rx_65_to_127_octet_packets);
7815         ESTAT_ADD(rx_128_to_255_octet_packets);
7816         ESTAT_ADD(rx_256_to_511_octet_packets);
7817         ESTAT_ADD(rx_512_to_1023_octet_packets);
7818         ESTAT_ADD(rx_1024_to_1522_octet_packets);
7819         ESTAT_ADD(rx_1523_to_2047_octet_packets);
7820         ESTAT_ADD(rx_2048_to_4095_octet_packets);
7821         ESTAT_ADD(rx_4096_to_8191_octet_packets);
7822         ESTAT_ADD(rx_8192_to_9022_octet_packets);
7823
7824         ESTAT_ADD(tx_octets);
7825         ESTAT_ADD(tx_collisions);
7826         ESTAT_ADD(tx_xon_sent);
7827         ESTAT_ADD(tx_xoff_sent);
7828         ESTAT_ADD(tx_flow_control);
7829         ESTAT_ADD(tx_mac_errors);
7830         ESTAT_ADD(tx_single_collisions);
7831         ESTAT_ADD(tx_mult_collisions);
7832         ESTAT_ADD(tx_deferred);
7833         ESTAT_ADD(tx_excessive_collisions);
7834         ESTAT_ADD(tx_late_collisions);
7835         ESTAT_ADD(tx_collide_2times);
7836         ESTAT_ADD(tx_collide_3times);
7837         ESTAT_ADD(tx_collide_4times);
7838         ESTAT_ADD(tx_collide_5times);
7839         ESTAT_ADD(tx_collide_6times);
7840         ESTAT_ADD(tx_collide_7times);
7841         ESTAT_ADD(tx_collide_8times);
7842         ESTAT_ADD(tx_collide_9times);
7843         ESTAT_ADD(tx_collide_10times);
7844         ESTAT_ADD(tx_collide_11times);
7845         ESTAT_ADD(tx_collide_12times);
7846         ESTAT_ADD(tx_collide_13times);
7847         ESTAT_ADD(tx_collide_14times);
7848         ESTAT_ADD(tx_collide_15times);
7849         ESTAT_ADD(tx_ucast_packets);
7850         ESTAT_ADD(tx_mcast_packets);
7851         ESTAT_ADD(tx_bcast_packets);
7852         ESTAT_ADD(tx_carrier_sense_errors);
7853         ESTAT_ADD(tx_discards);
7854         ESTAT_ADD(tx_errors);
7855
7856         ESTAT_ADD(dma_writeq_full);
7857         ESTAT_ADD(dma_write_prioq_full);
7858         ESTAT_ADD(rxbds_empty);
7859         ESTAT_ADD(rx_discards);
7860         ESTAT_ADD(rx_errors);
7861         ESTAT_ADD(rx_threshold_hit);
7862
7863         ESTAT_ADD(dma_readq_full);
7864         ESTAT_ADD(dma_read_prioq_full);
7865         ESTAT_ADD(tx_comp_queue_full);
7866
7867         ESTAT_ADD(ring_set_send_prod_index);
7868         ESTAT_ADD(ring_status_update);
7869         ESTAT_ADD(nic_irqs);
7870         ESTAT_ADD(nic_avoided_irqs);
7871         ESTAT_ADD(nic_tx_threshold_hit);
7872
7873         return estats;
7874 }
7875
7876 static struct net_device_stats *tg3_get_stats(struct net_device *dev)
7877 {
7878         struct tg3 *tp = netdev_priv(dev);
7879         struct net_device_stats *stats = &tp->net_stats;
7880         struct net_device_stats *old_stats = &tp->net_stats_prev;
7881         struct tg3_hw_stats *hw_stats = tp->hw_stats;
7882
7883         if (!hw_stats)
7884                 return old_stats;
7885
7886         stats->rx_packets = old_stats->rx_packets +
7887                 get_stat64(&hw_stats->rx_ucast_packets) +
7888                 get_stat64(&hw_stats->rx_mcast_packets) +
7889                 get_stat64(&hw_stats->rx_bcast_packets);
7890
7891         stats->tx_packets = old_stats->tx_packets +
7892                 get_stat64(&hw_stats->tx_ucast_packets) +
7893                 get_stat64(&hw_stats->tx_mcast_packets) +
7894                 get_stat64(&hw_stats->tx_bcast_packets);
7895
7896         stats->rx_bytes = old_stats->rx_bytes +
7897                 get_stat64(&hw_stats->rx_octets);
7898         stats->tx_bytes = old_stats->tx_bytes +
7899                 get_stat64(&hw_stats->tx_octets);
7900
7901         stats->rx_errors = old_stats->rx_errors +
7902                 get_stat64(&hw_stats->rx_errors);
7903         stats->tx_errors = old_stats->tx_errors +
7904                 get_stat64(&hw_stats->tx_errors) +
7905                 get_stat64(&hw_stats->tx_mac_errors) +
7906                 get_stat64(&hw_stats->tx_carrier_sense_errors) +
7907                 get_stat64(&hw_stats->tx_discards);
7908
7909         stats->multicast = old_stats->multicast +
7910                 get_stat64(&hw_stats->rx_mcast_packets);
7911         stats->collisions = old_stats->collisions +
7912                 get_stat64(&hw_stats->tx_collisions);
7913
7914         stats->rx_length_errors = old_stats->rx_length_errors +
7915                 get_stat64(&hw_stats->rx_frame_too_long_errors) +
7916                 get_stat64(&hw_stats->rx_undersize_packets);
7917
7918         stats->rx_over_errors = old_stats->rx_over_errors +
7919                 get_stat64(&hw_stats->rxbds_empty);
7920         stats->rx_frame_errors = old_stats->rx_frame_errors +
7921                 get_stat64(&hw_stats->rx_align_errors);
7922         stats->tx_aborted_errors = old_stats->tx_aborted_errors +
7923                 get_stat64(&hw_stats->tx_discards);
7924         stats->tx_carrier_errors = old_stats->tx_carrier_errors +
7925                 get_stat64(&hw_stats->tx_carrier_sense_errors);
7926
7927         stats->rx_crc_errors = old_stats->rx_crc_errors +
7928                 calc_crc_errors(tp);
7929
7930         stats->rx_missed_errors = old_stats->rx_missed_errors +
7931                 get_stat64(&hw_stats->rx_discards);
7932
7933         return stats;
7934 }
7935
7936 static inline u32 calc_crc(unsigned char *buf, int len)
7937 {
7938         u32 reg;
7939         u32 tmp;
7940         int j, k;
7941
7942         reg = 0xffffffff;
7943
7944         for (j = 0; j < len; j++) {
7945                 reg ^= buf[j];
7946
7947                 for (k = 0; k < 8; k++) {
7948                         tmp = reg & 0x01;
7949
7950                         reg >>= 1;
7951
7952                         if (tmp) {
7953                                 reg ^= 0xedb88320;
7954                         }
7955                 }
7956         }
7957
7958         return ~reg;
7959 }
7960
7961 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
7962 {
7963         /* accept or reject all multicast frames */
7964         tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
7965         tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
7966         tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
7967         tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
7968 }
7969
7970 static void __tg3_set_rx_mode(struct net_device *dev)
7971 {
7972         struct tg3 *tp = netdev_priv(dev);
7973         u32 rx_mode;
7974
7975         rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
7976                                   RX_MODE_KEEP_VLAN_TAG);
7977
7978         /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
7979          * flag clear.
7980          */
7981 #if TG3_VLAN_TAG_USED
7982         if (!tp->vlgrp &&
7983             !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
7984                 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
7985 #else
7986         /* By definition, VLAN is disabled always in this
7987          * case.
7988          */
7989         if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
7990                 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
7991 #endif
7992
7993         if (dev->flags & IFF_PROMISC) {
7994                 /* Promiscuous mode. */
7995                 rx_mode |= RX_MODE_PROMISC;
7996         } else if (dev->flags & IFF_ALLMULTI) {
7997                 /* Accept all multicast. */
7998                 tg3_set_multi (tp, 1);
7999         } else if (dev->mc_count < 1) {
8000                 /* Reject all multicast. */
8001                 tg3_set_multi (tp, 0);
8002         } else {
8003                 /* Accept one or more multicast(s). */
8004                 struct dev_mc_list *mclist;
8005                 unsigned int i;
8006                 u32 mc_filter[4] = { 0, };
8007                 u32 regidx;
8008                 u32 bit;
8009                 u32 crc;
8010
8011                 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
8012                      i++, mclist = mclist->next) {
8013
8014                         crc = calc_crc (mclist->dmi_addr, ETH_ALEN);
8015                         bit = ~crc & 0x7f;
8016                         regidx = (bit & 0x60) >> 5;
8017                         bit &= 0x1f;
8018                         mc_filter[regidx] |= (1 << bit);
8019                 }
8020
8021                 tw32(MAC_HASH_REG_0, mc_filter[0]);
8022                 tw32(MAC_HASH_REG_1, mc_filter[1]);
8023                 tw32(MAC_HASH_REG_2, mc_filter[2]);
8024                 tw32(MAC_HASH_REG_3, mc_filter[3]);
8025         }
8026
8027         if (rx_mode != tp->rx_mode) {
8028                 tp->rx_mode = rx_mode;
8029                 tw32_f(MAC_RX_MODE, rx_mode);
8030                 udelay(10);
8031         }
8032 }
8033
8034 static void tg3_set_rx_mode(struct net_device *dev)
8035 {
8036         struct tg3 *tp = netdev_priv(dev);
8037
8038         if (!netif_running(dev))
8039                 return;
8040
8041         tg3_full_lock(tp, 0);
8042         __tg3_set_rx_mode(dev);
8043         tg3_full_unlock(tp);
8044 }
8045
8046 #define TG3_REGDUMP_LEN         (32 * 1024)
8047
8048 static int tg3_get_regs_len(struct net_device *dev)
8049 {
8050         return TG3_REGDUMP_LEN;
8051 }
8052
8053 static void tg3_get_regs(struct net_device *dev,
8054                 struct ethtool_regs *regs, void *_p)
8055 {
8056         u32 *p = _p;
8057         struct tg3 *tp = netdev_priv(dev);
8058         u8 *orig_p = _p;
8059         int i;
8060
8061         regs->version = 0;
8062
8063         memset(p, 0, TG3_REGDUMP_LEN);
8064
8065         if (tp->link_config.phy_is_low_power)
8066                 return;
8067
8068         tg3_full_lock(tp, 0);
8069
8070 #define __GET_REG32(reg)        (*(p)++ = tr32(reg))
8071 #define GET_REG32_LOOP(base,len)                \
8072 do {    p = (u32 *)(orig_p + (base));           \
8073         for (i = 0; i < len; i += 4)            \
8074                 __GET_REG32((base) + i);        \
8075 } while (0)
8076 #define GET_REG32_1(reg)                        \
8077 do {    p = (u32 *)(orig_p + (reg));            \
8078         __GET_REG32((reg));                     \
8079 } while (0)
8080
8081         GET_REG32_LOOP(TG3PCI_VENDOR, 0xb0);
8082         GET_REG32_LOOP(MAILBOX_INTERRUPT_0, 0x200);
8083         GET_REG32_LOOP(MAC_MODE, 0x4f0);
8084         GET_REG32_LOOP(SNDDATAI_MODE, 0xe0);
8085         GET_REG32_1(SNDDATAC_MODE);
8086         GET_REG32_LOOP(SNDBDS_MODE, 0x80);
8087         GET_REG32_LOOP(SNDBDI_MODE, 0x48);
8088         GET_REG32_1(SNDBDC_MODE);
8089         GET_REG32_LOOP(RCVLPC_MODE, 0x20);
8090         GET_REG32_LOOP(RCVLPC_SELLST_BASE, 0x15c);
8091         GET_REG32_LOOP(RCVDBDI_MODE, 0x0c);
8092         GET_REG32_LOOP(RCVDBDI_JUMBO_BD, 0x3c);
8093         GET_REG32_LOOP(RCVDBDI_BD_PROD_IDX_0, 0x44);
8094         GET_REG32_1(RCVDCC_MODE);
8095         GET_REG32_LOOP(RCVBDI_MODE, 0x20);
8096         GET_REG32_LOOP(RCVCC_MODE, 0x14);
8097         GET_REG32_LOOP(RCVLSC_MODE, 0x08);
8098         GET_REG32_1(MBFREE_MODE);
8099         GET_REG32_LOOP(HOSTCC_MODE, 0x100);
8100         GET_REG32_LOOP(MEMARB_MODE, 0x10);
8101         GET_REG32_LOOP(BUFMGR_MODE, 0x58);
8102         GET_REG32_LOOP(RDMAC_MODE, 0x08);
8103         GET_REG32_LOOP(WDMAC_MODE, 0x08);
8104         GET_REG32_1(RX_CPU_MODE);
8105         GET_REG32_1(RX_CPU_STATE);
8106         GET_REG32_1(RX_CPU_PGMCTR);
8107         GET_REG32_1(RX_CPU_HWBKPT);
8108         GET_REG32_1(TX_CPU_MODE);
8109         GET_REG32_1(TX_CPU_STATE);
8110         GET_REG32_1(TX_CPU_PGMCTR);
8111         GET_REG32_LOOP(GRCMBOX_INTERRUPT_0, 0x110);
8112         GET_REG32_LOOP(FTQ_RESET, 0x120);
8113         GET_REG32_LOOP(MSGINT_MODE, 0x0c);
8114         GET_REG32_1(DMAC_MODE);
8115         GET_REG32_LOOP(GRC_MODE, 0x4c);
8116         if (tp->tg3_flags & TG3_FLAG_NVRAM)
8117                 GET_REG32_LOOP(NVRAM_CMD, 0x24);
8118
8119 #undef __GET_REG32
8120 #undef GET_REG32_LOOP
8121 #undef GET_REG32_1
8122
8123         tg3_full_unlock(tp);
8124 }
8125
8126 static int tg3_get_eeprom_len(struct net_device *dev)
8127 {
8128         struct tg3 *tp = netdev_priv(dev);
8129
8130         return tp->nvram_size;
8131 }
8132
8133 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val);
8134 static int tg3_nvram_read_swab(struct tg3 *tp, u32 offset, u32 *val);
8135
8136 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
8137 {
8138         struct tg3 *tp = netdev_priv(dev);
8139         int ret;
8140         u8  *pd;
8141         u32 i, offset, len, val, b_offset, b_count;
8142
8143         if (tp->link_config.phy_is_low_power)
8144                 return -EAGAIN;
8145
8146         offset = eeprom->offset;
8147         len = eeprom->len;
8148         eeprom->len = 0;
8149
8150         eeprom->magic = TG3_EEPROM_MAGIC;
8151
8152         if (offset & 3) {
8153                 /* adjustments to start on required 4 byte boundary */
8154                 b_offset = offset & 3;
8155                 b_count = 4 - b_offset;
8156                 if (b_count > len) {
8157                         /* i.e. offset=1 len=2 */
8158                         b_count = len;
8159                 }
8160                 ret = tg3_nvram_read(tp, offset-b_offset, &val);
8161                 if (ret)
8162                         return ret;
8163                 val = cpu_to_le32(val);
8164                 memcpy(data, ((char*)&val) + b_offset, b_count);
8165                 len -= b_count;
8166                 offset += b_count;
8167                 eeprom->len += b_count;
8168         }
8169
8170         /* read bytes upto the last 4 byte boundary */
8171         pd = &data[eeprom->len];
8172         for (i = 0; i < (len - (len & 3)); i += 4) {
8173                 ret = tg3_nvram_read(tp, offset + i, &val);
8174                 if (ret) {
8175                         eeprom->len += i;
8176                         return ret;
8177                 }
8178                 val = cpu_to_le32(val);
8179                 memcpy(pd + i, &val, 4);
8180         }
8181         eeprom->len += i;
8182
8183         if (len & 3) {
8184                 /* read last bytes not ending on 4 byte boundary */
8185                 pd = &data[eeprom->len];
8186                 b_count = len & 3;
8187                 b_offset = offset + len - b_count;
8188                 ret = tg3_nvram_read(tp, b_offset, &val);
8189                 if (ret)
8190                         return ret;
8191                 val = cpu_to_le32(val);
8192                 memcpy(pd, ((char*)&val), b_count);
8193                 eeprom->len += b_count;
8194         }
8195         return 0;
8196 }
8197
8198 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf);
8199
8200 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
8201 {
8202         struct tg3 *tp = netdev_priv(dev);
8203         int ret;
8204         u32 offset, len, b_offset, odd_len, start, end;
8205         u8 *buf;
8206
8207         if (tp->link_config.phy_is_low_power)
8208                 return -EAGAIN;
8209
8210         if (eeprom->magic != TG3_EEPROM_MAGIC)
8211                 return -EINVAL;
8212
8213         offset = eeprom->offset;
8214         len = eeprom->len;
8215
8216         if ((b_offset = (offset & 3))) {
8217                 /* adjustments to start on required 4 byte boundary */
8218                 ret = tg3_nvram_read(tp, offset-b_offset, &start);
8219                 if (ret)
8220                         return ret;
8221                 start = cpu_to_le32(start);
8222                 len += b_offset;
8223                 offset &= ~3;
8224                 if (len < 4)
8225                         len = 4;
8226         }
8227
8228         odd_len = 0;
8229         if (len & 3) {
8230                 /* adjustments to end on required 4 byte boundary */
8231                 odd_len = 1;
8232                 len = (len + 3) & ~3;
8233                 ret = tg3_nvram_read(tp, offset+len-4, &end);
8234                 if (ret)
8235                         return ret;
8236                 end = cpu_to_le32(end);
8237         }
8238
8239         buf = data;
8240         if (b_offset || odd_len) {
8241                 buf = kmalloc(len, GFP_KERNEL);
8242                 if (!buf)
8243                         return -ENOMEM;
8244                 if (b_offset)
8245                         memcpy(buf, &start, 4);
8246                 if (odd_len)
8247                         memcpy(buf+len-4, &end, 4);
8248                 memcpy(buf + b_offset, data, eeprom->len);
8249         }
8250
8251         ret = tg3_nvram_write_block(tp, offset, len, buf);
8252
8253         if (buf != data)
8254                 kfree(buf);
8255
8256         return ret;
8257 }
8258
8259 static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
8260 {
8261         struct tg3 *tp = netdev_priv(dev);
8262
8263         cmd->supported = (SUPPORTED_Autoneg);
8264
8265         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
8266                 cmd->supported |= (SUPPORTED_1000baseT_Half |
8267                                    SUPPORTED_1000baseT_Full);
8268
8269         if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)) {
8270                 cmd->supported |= (SUPPORTED_100baseT_Half |
8271                                   SUPPORTED_100baseT_Full |
8272                                   SUPPORTED_10baseT_Half |
8273                                   SUPPORTED_10baseT_Full |
8274                                   SUPPORTED_MII);
8275                 cmd->port = PORT_TP;
8276         } else {
8277                 cmd->supported |= SUPPORTED_FIBRE;
8278                 cmd->port = PORT_FIBRE;
8279         }
8280
8281         cmd->advertising = tp->link_config.advertising;
8282         if (netif_running(dev)) {
8283                 cmd->speed = tp->link_config.active_speed;
8284                 cmd->duplex = tp->link_config.active_duplex;
8285         }
8286         cmd->phy_address = PHY_ADDR;
8287         cmd->transceiver = 0;
8288         cmd->autoneg = tp->link_config.autoneg;
8289         cmd->maxtxpkt = 0;
8290         cmd->maxrxpkt = 0;
8291         return 0;
8292 }
8293
8294 static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
8295 {
8296         struct tg3 *tp = netdev_priv(dev);
8297
8298         if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) {
8299                 /* These are the only valid advertisement bits allowed.  */
8300                 if (cmd->autoneg == AUTONEG_ENABLE &&
8301                     (cmd->advertising & ~(ADVERTISED_1000baseT_Half |
8302                                           ADVERTISED_1000baseT_Full |
8303                                           ADVERTISED_Autoneg |
8304                                           ADVERTISED_FIBRE)))
8305                         return -EINVAL;
8306                 /* Fiber can only do SPEED_1000.  */
8307                 else if ((cmd->autoneg != AUTONEG_ENABLE) &&
8308                          (cmd->speed != SPEED_1000))
8309                         return -EINVAL;
8310         /* Copper cannot force SPEED_1000.  */
8311         } else if ((cmd->autoneg != AUTONEG_ENABLE) &&
8312                    (cmd->speed == SPEED_1000))
8313                 return -EINVAL;
8314         else if ((cmd->speed == SPEED_1000) &&
8315                  (tp->tg3_flags2 & TG3_FLAG_10_100_ONLY))
8316                 return -EINVAL;
8317
8318         tg3_full_lock(tp, 0);
8319
8320         tp->link_config.autoneg = cmd->autoneg;
8321         if (cmd->autoneg == AUTONEG_ENABLE) {
8322                 tp->link_config.advertising = (cmd->advertising |
8323                                               ADVERTISED_Autoneg);
8324                 tp->link_config.speed = SPEED_INVALID;
8325                 tp->link_config.duplex = DUPLEX_INVALID;
8326         } else {
8327                 tp->link_config.advertising = 0;
8328                 tp->link_config.speed = cmd->speed;
8329                 tp->link_config.duplex = cmd->duplex;
8330         }
8331
8332         tp->link_config.orig_speed = tp->link_config.speed;
8333         tp->link_config.orig_duplex = tp->link_config.duplex;
8334         tp->link_config.orig_autoneg = tp->link_config.autoneg;
8335
8336         if (netif_running(dev))
8337                 tg3_setup_phy(tp, 1);
8338
8339         tg3_full_unlock(tp);
8340
8341         return 0;
8342 }
8343
8344 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
8345 {
8346         struct tg3 *tp = netdev_priv(dev);
8347
8348         strcpy(info->driver, DRV_MODULE_NAME);
8349         strcpy(info->version, DRV_MODULE_VERSION);
8350         strcpy(info->fw_version, tp->fw_ver);
8351         strcpy(info->bus_info, pci_name(tp->pdev));
8352 }
8353
8354 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
8355 {
8356         struct tg3 *tp = netdev_priv(dev);
8357
8358         if (tp->tg3_flags & TG3_FLAG_WOL_CAP)
8359                 wol->supported = WAKE_MAGIC;
8360         else
8361                 wol->supported = 0;
8362         wol->wolopts = 0;
8363         if (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)
8364                 wol->wolopts = WAKE_MAGIC;
8365         memset(&wol->sopass, 0, sizeof(wol->sopass));
8366 }
8367
8368 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
8369 {
8370         struct tg3 *tp = netdev_priv(dev);
8371
8372         if (wol->wolopts & ~WAKE_MAGIC)
8373                 return -EINVAL;
8374         if ((wol->wolopts & WAKE_MAGIC) &&
8375             !(tp->tg3_flags & TG3_FLAG_WOL_CAP))
8376                 return -EINVAL;
8377
8378         spin_lock_bh(&tp->lock);
8379         if (wol->wolopts & WAKE_MAGIC)
8380                 tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
8381         else
8382                 tp->tg3_flags &= ~TG3_FLAG_WOL_ENABLE;
8383         spin_unlock_bh(&tp->lock);
8384
8385         return 0;
8386 }
8387
8388 static u32 tg3_get_msglevel(struct net_device *dev)
8389 {
8390         struct tg3 *tp = netdev_priv(dev);
8391         return tp->msg_enable;
8392 }
8393
8394 static void tg3_set_msglevel(struct net_device *dev, u32 value)
8395 {
8396         struct tg3 *tp = netdev_priv(dev);
8397         tp->msg_enable = value;
8398 }
8399
8400 static int tg3_set_tso(struct net_device *dev, u32 value)
8401 {
8402         struct tg3 *tp = netdev_priv(dev);
8403
8404         if (!(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
8405                 if (value)
8406                         return -EINVAL;
8407                 return 0;
8408         }
8409         if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_2) &&
8410             (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906)) {
8411                 if (value) {
8412                         dev->features |= NETIF_F_TSO6;
8413                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
8414                                 dev->features |= NETIF_F_TSO_ECN;
8415                 } else
8416                         dev->features &= ~(NETIF_F_TSO6 | NETIF_F_TSO_ECN);
8417         }
8418         return ethtool_op_set_tso(dev, value);
8419 }
8420
8421 static int tg3_nway_reset(struct net_device *dev)
8422 {
8423         struct tg3 *tp = netdev_priv(dev);
8424         u32 bmcr;
8425         int r;
8426
8427         if (!netif_running(dev))
8428                 return -EAGAIN;
8429
8430         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
8431                 return -EINVAL;
8432
8433         spin_lock_bh(&tp->lock);
8434         r = -EINVAL;
8435         tg3_readphy(tp, MII_BMCR, &bmcr);
8436         if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
8437             ((bmcr & BMCR_ANENABLE) ||
8438              (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT))) {
8439                 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
8440                                            BMCR_ANENABLE);
8441                 r = 0;
8442         }
8443         spin_unlock_bh(&tp->lock);
8444
8445         return r;
8446 }
8447
8448 static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
8449 {
8450         struct tg3 *tp = netdev_priv(dev);
8451
8452         ering->rx_max_pending = TG3_RX_RING_SIZE - 1;
8453         ering->rx_mini_max_pending = 0;
8454         if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE)
8455                 ering->rx_jumbo_max_pending = TG3_RX_JUMBO_RING_SIZE - 1;
8456         else
8457                 ering->rx_jumbo_max_pending = 0;
8458
8459         ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
8460
8461         ering->rx_pending = tp->rx_pending;
8462         ering->rx_mini_pending = 0;
8463         if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE)
8464                 ering->rx_jumbo_pending = tp->rx_jumbo_pending;
8465         else
8466                 ering->rx_jumbo_pending = 0;
8467
8468         ering->tx_pending = tp->tx_pending;
8469 }
8470
8471 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
8472 {
8473         struct tg3 *tp = netdev_priv(dev);
8474         int irq_sync = 0, err = 0;
8475
8476         if ((ering->rx_pending > TG3_RX_RING_SIZE - 1) ||
8477             (ering->rx_jumbo_pending > TG3_RX_JUMBO_RING_SIZE - 1) ||
8478             (ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
8479             (ering->tx_pending <= MAX_SKB_FRAGS) ||
8480             ((tp->tg3_flags2 & TG3_FLG2_TSO_BUG) &&
8481              (ering->tx_pending <= (MAX_SKB_FRAGS * 3))))
8482                 return -EINVAL;
8483
8484         if (netif_running(dev)) {
8485                 tg3_netif_stop(tp);
8486                 irq_sync = 1;
8487         }
8488
8489         tg3_full_lock(tp, irq_sync);
8490
8491         tp->rx_pending = ering->rx_pending;
8492
8493         if ((tp->tg3_flags2 & TG3_FLG2_MAX_RXPEND_64) &&
8494             tp->rx_pending > 63)
8495                 tp->rx_pending = 63;
8496         tp->rx_jumbo_pending = ering->rx_jumbo_pending;
8497         tp->tx_pending = ering->tx_pending;
8498
8499         if (netif_running(dev)) {
8500                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
8501                 err = tg3_restart_hw(tp, 1);
8502                 if (!err)
8503                         tg3_netif_start(tp);
8504         }
8505
8506         tg3_full_unlock(tp);
8507
8508         return err;
8509 }
8510
8511 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
8512 {
8513         struct tg3 *tp = netdev_priv(dev);
8514
8515         epause->autoneg = (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG) != 0;
8516         epause->rx_pause = (tp->tg3_flags & TG3_FLAG_RX_PAUSE) != 0;
8517         epause->tx_pause = (tp->tg3_flags & TG3_FLAG_TX_PAUSE) != 0;
8518 }
8519
8520 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
8521 {
8522         struct tg3 *tp = netdev_priv(dev);
8523         int irq_sync = 0, err = 0;
8524
8525         if (netif_running(dev)) {
8526                 tg3_netif_stop(tp);
8527                 irq_sync = 1;
8528         }
8529
8530         tg3_full_lock(tp, irq_sync);
8531
8532         if (epause->autoneg)
8533                 tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
8534         else
8535                 tp->tg3_flags &= ~TG3_FLAG_PAUSE_AUTONEG;
8536         if (epause->rx_pause)
8537                 tp->tg3_flags |= TG3_FLAG_RX_PAUSE;
8538         else
8539                 tp->tg3_flags &= ~TG3_FLAG_RX_PAUSE;
8540         if (epause->tx_pause)
8541                 tp->tg3_flags |= TG3_FLAG_TX_PAUSE;
8542         else
8543                 tp->tg3_flags &= ~TG3_FLAG_TX_PAUSE;
8544
8545         if (netif_running(dev)) {
8546                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
8547                 err = tg3_restart_hw(tp, 1);
8548                 if (!err)
8549                         tg3_netif_start(tp);
8550         }
8551
8552         tg3_full_unlock(tp);
8553
8554         return err;
8555 }
8556
8557 static u32 tg3_get_rx_csum(struct net_device *dev)
8558 {
8559         struct tg3 *tp = netdev_priv(dev);
8560         return (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0;
8561 }
8562
8563 static int tg3_set_rx_csum(struct net_device *dev, u32 data)
8564 {
8565         struct tg3 *tp = netdev_priv(dev);
8566
8567         if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
8568                 if (data != 0)
8569                         return -EINVAL;
8570                 return 0;
8571         }
8572
8573         spin_lock_bh(&tp->lock);
8574         if (data)
8575                 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
8576         else
8577                 tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
8578         spin_unlock_bh(&tp->lock);
8579
8580         return 0;
8581 }
8582
8583 static int tg3_set_tx_csum(struct net_device *dev, u32 data)
8584 {
8585         struct tg3 *tp = netdev_priv(dev);
8586
8587         if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
8588                 if (data != 0)
8589                         return -EINVAL;
8590                 return 0;
8591         }
8592
8593         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
8594             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
8595             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
8596             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
8597                 ethtool_op_set_tx_ipv6_csum(dev, data);
8598         else
8599                 ethtool_op_set_tx_csum(dev, data);
8600
8601         return 0;
8602 }
8603
8604 static int tg3_get_sset_count (struct net_device *dev, int sset)
8605 {
8606         switch (sset) {
8607         case ETH_SS_TEST:
8608                 return TG3_NUM_TEST;
8609         case ETH_SS_STATS:
8610                 return TG3_NUM_STATS;
8611         default:
8612                 return -EOPNOTSUPP;
8613         }
8614 }
8615
8616 static void tg3_get_strings (struct net_device *dev, u32 stringset, u8 *buf)
8617 {
8618         switch (stringset) {
8619         case ETH_SS_STATS:
8620                 memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
8621                 break;
8622         case ETH_SS_TEST:
8623                 memcpy(buf, &ethtool_test_keys, sizeof(ethtool_test_keys));
8624                 break;
8625         default:
8626                 WARN_ON(1);     /* we need a WARN() */
8627                 break;
8628         }
8629 }
8630
8631 static int tg3_phys_id(struct net_device *dev, u32 data)
8632 {
8633         struct tg3 *tp = netdev_priv(dev);
8634         int i;
8635
8636         if (!netif_running(tp->dev))
8637                 return -EAGAIN;
8638
8639         if (data == 0)
8640                 data = 2;
8641
8642         for (i = 0; i < (data * 2); i++) {
8643                 if ((i % 2) == 0)
8644                         tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
8645                                            LED_CTRL_1000MBPS_ON |
8646                                            LED_CTRL_100MBPS_ON |
8647                                            LED_CTRL_10MBPS_ON |
8648                                            LED_CTRL_TRAFFIC_OVERRIDE |
8649                                            LED_CTRL_TRAFFIC_BLINK |
8650                                            LED_CTRL_TRAFFIC_LED);
8651
8652                 else
8653                         tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
8654                                            LED_CTRL_TRAFFIC_OVERRIDE);
8655
8656                 if (msleep_interruptible(500))
8657                         break;
8658         }
8659         tw32(MAC_LED_CTRL, tp->led_ctrl);
8660         return 0;
8661 }
8662
8663 static void tg3_get_ethtool_stats (struct net_device *dev,
8664                                    struct ethtool_stats *estats, u64 *tmp_stats)
8665 {
8666         struct tg3 *tp = netdev_priv(dev);
8667         memcpy(tmp_stats, tg3_get_estats(tp), sizeof(tp->estats));
8668 }
8669
8670 #define NVRAM_TEST_SIZE 0x100
8671 #define NVRAM_SELFBOOT_FORMAT1_SIZE 0x14
8672 #define NVRAM_SELFBOOT_HW_SIZE 0x20
8673 #define NVRAM_SELFBOOT_DATA_SIZE 0x1c
8674
8675 static int tg3_test_nvram(struct tg3 *tp)
8676 {
8677         u32 *buf, csum, magic;
8678         int i, j, k, err = 0, size;
8679
8680         if (tg3_nvram_read_swab(tp, 0, &magic) != 0)
8681                 return -EIO;
8682
8683         if (magic == TG3_EEPROM_MAGIC)
8684                 size = NVRAM_TEST_SIZE;
8685         else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) {
8686                 if ((magic & 0xe00000) == 0x200000)
8687                         size = NVRAM_SELFBOOT_FORMAT1_SIZE;
8688                 else
8689                         return 0;
8690         } else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
8691                 size = NVRAM_SELFBOOT_HW_SIZE;
8692         else
8693                 return -EIO;
8694
8695         buf = kmalloc(size, GFP_KERNEL);
8696         if (buf == NULL)
8697                 return -ENOMEM;
8698
8699         err = -EIO;
8700         for (i = 0, j = 0; i < size; i += 4, j++) {
8701                 u32 val;
8702
8703                 if ((err = tg3_nvram_read(tp, i, &val)) != 0)
8704                         break;
8705                 buf[j] = cpu_to_le32(val);
8706         }
8707         if (i < size)
8708                 goto out;
8709
8710         /* Selfboot format */
8711         if ((cpu_to_be32(buf[0]) & TG3_EEPROM_MAGIC_FW_MSK) ==
8712             TG3_EEPROM_MAGIC_FW) {
8713                 u8 *buf8 = (u8 *) buf, csum8 = 0;
8714
8715                 for (i = 0; i < size; i++)
8716                         csum8 += buf8[i];
8717
8718                 if (csum8 == 0) {
8719                         err = 0;
8720                         goto out;
8721                 }
8722
8723                 err = -EIO;
8724                 goto out;
8725         }
8726
8727         if ((cpu_to_be32(buf[0]) & TG3_EEPROM_MAGIC_HW_MSK) ==
8728             TG3_EEPROM_MAGIC_HW) {
8729                 u8 data[NVRAM_SELFBOOT_DATA_SIZE];
8730                 u8 parity[NVRAM_SELFBOOT_DATA_SIZE];
8731                 u8 *buf8 = (u8 *) buf;
8732
8733                 /* Separate the parity bits and the data bytes.  */
8734                 for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) {
8735                         if ((i == 0) || (i == 8)) {
8736                                 int l;
8737                                 u8 msk;
8738
8739                                 for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1)
8740                                         parity[k++] = buf8[i] & msk;
8741                                 i++;
8742                         }
8743                         else if (i == 16) {
8744                                 int l;
8745                                 u8 msk;
8746
8747                                 for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1)
8748                                         parity[k++] = buf8[i] & msk;
8749                                 i++;
8750
8751                                 for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1)
8752                                         parity[k++] = buf8[i] & msk;
8753                                 i++;
8754                         }
8755                         data[j++] = buf8[i];
8756                 }
8757
8758                 err = -EIO;
8759                 for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) {
8760                         u8 hw8 = hweight8(data[i]);
8761
8762                         if ((hw8 & 0x1) && parity[i])
8763                                 goto out;
8764                         else if (!(hw8 & 0x1) && !parity[i])
8765                                 goto out;
8766                 }
8767                 err = 0;
8768                 goto out;
8769         }
8770
8771         /* Bootstrap checksum at offset 0x10 */
8772         csum = calc_crc((unsigned char *) buf, 0x10);
8773         if(csum != cpu_to_le32(buf[0x10/4]))
8774                 goto out;
8775
8776         /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
8777         csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
8778         if (csum != cpu_to_le32(buf[0xfc/4]))
8779                  goto out;
8780
8781         err = 0;
8782
8783 out:
8784         kfree(buf);
8785         return err;
8786 }
8787
8788 #define TG3_SERDES_TIMEOUT_SEC  2
8789 #define TG3_COPPER_TIMEOUT_SEC  6
8790
8791 static int tg3_test_link(struct tg3 *tp)
8792 {
8793         int i, max;
8794
8795         if (!netif_running(tp->dev))
8796                 return -ENODEV;
8797
8798         if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
8799                 max = TG3_SERDES_TIMEOUT_SEC;
8800         else
8801                 max = TG3_COPPER_TIMEOUT_SEC;
8802
8803         for (i = 0; i < max; i++) {
8804                 if (netif_carrier_ok(tp->dev))
8805                         return 0;
8806
8807                 if (msleep_interruptible(1000))
8808                         break;
8809         }
8810
8811         return -EIO;
8812 }
8813
8814 /* Only test the commonly used registers */
8815 static int tg3_test_registers(struct tg3 *tp)
8816 {
8817         int i, is_5705, is_5750;
8818         u32 offset, read_mask, write_mask, val, save_val, read_val;
8819         static struct {
8820                 u16 offset;
8821                 u16 flags;
8822 #define TG3_FL_5705     0x1
8823 #define TG3_FL_NOT_5705 0x2
8824 #define TG3_FL_NOT_5788 0x4
8825 #define TG3_FL_NOT_5750 0x8
8826                 u32 read_mask;
8827                 u32 write_mask;
8828         } reg_tbl[] = {
8829                 /* MAC Control Registers */
8830                 { MAC_MODE, TG3_FL_NOT_5705,
8831                         0x00000000, 0x00ef6f8c },
8832                 { MAC_MODE, TG3_FL_5705,
8833                         0x00000000, 0x01ef6b8c },
8834                 { MAC_STATUS, TG3_FL_NOT_5705,
8835                         0x03800107, 0x00000000 },
8836                 { MAC_STATUS, TG3_FL_5705,
8837                         0x03800100, 0x00000000 },
8838                 { MAC_ADDR_0_HIGH, 0x0000,
8839                         0x00000000, 0x0000ffff },
8840                 { MAC_ADDR_0_LOW, 0x0000,
8841                         0x00000000, 0xffffffff },
8842                 { MAC_RX_MTU_SIZE, 0x0000,
8843                         0x00000000, 0x0000ffff },
8844                 { MAC_TX_MODE, 0x0000,
8845                         0x00000000, 0x00000070 },
8846                 { MAC_TX_LENGTHS, 0x0000,
8847                         0x00000000, 0x00003fff },
8848                 { MAC_RX_MODE, TG3_FL_NOT_5705,
8849                         0x00000000, 0x000007fc },
8850                 { MAC_RX_MODE, TG3_FL_5705,
8851                         0x00000000, 0x000007dc },
8852                 { MAC_HASH_REG_0, 0x0000,
8853                         0x00000000, 0xffffffff },
8854                 { MAC_HASH_REG_1, 0x0000,
8855                         0x00000000, 0xffffffff },
8856                 { MAC_HASH_REG_2, 0x0000,
8857                         0x00000000, 0xffffffff },
8858                 { MAC_HASH_REG_3, 0x0000,
8859                         0x00000000, 0xffffffff },
8860
8861                 /* Receive Data and Receive BD Initiator Control Registers. */
8862                 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
8863                         0x00000000, 0xffffffff },
8864                 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
8865                         0x00000000, 0xffffffff },
8866                 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
8867                         0x00000000, 0x00000003 },
8868                 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
8869                         0x00000000, 0xffffffff },
8870                 { RCVDBDI_STD_BD+0, 0x0000,
8871                         0x00000000, 0xffffffff },
8872                 { RCVDBDI_STD_BD+4, 0x0000,
8873                         0x00000000, 0xffffffff },
8874                 { RCVDBDI_STD_BD+8, 0x0000,
8875                         0x00000000, 0xffff0002 },
8876                 { RCVDBDI_STD_BD+0xc, 0x0000,
8877                         0x00000000, 0xffffffff },
8878
8879                 /* Receive BD Initiator Control Registers. */
8880                 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
8881                         0x00000000, 0xffffffff },
8882                 { RCVBDI_STD_THRESH, TG3_FL_5705,
8883                         0x00000000, 0x000003ff },
8884                 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
8885                         0x00000000, 0xffffffff },
8886
8887                 /* Host Coalescing Control Registers. */
8888                 { HOSTCC_MODE, TG3_FL_NOT_5705,
8889                         0x00000000, 0x00000004 },
8890                 { HOSTCC_MODE, TG3_FL_5705,
8891                         0x00000000, 0x000000f6 },
8892                 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
8893                         0x00000000, 0xffffffff },
8894                 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
8895                         0x00000000, 0x000003ff },
8896                 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
8897                         0x00000000, 0xffffffff },
8898                 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
8899                         0x00000000, 0x000003ff },
8900                 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
8901                         0x00000000, 0xffffffff },
8902                 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
8903                         0x00000000, 0x000000ff },
8904                 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
8905                         0x00000000, 0xffffffff },
8906                 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
8907                         0x00000000, 0x000000ff },
8908                 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
8909                         0x00000000, 0xffffffff },
8910                 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
8911                         0x00000000, 0xffffffff },
8912                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
8913                         0x00000000, 0xffffffff },
8914                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
8915                         0x00000000, 0x000000ff },
8916                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
8917                         0x00000000, 0xffffffff },
8918                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
8919                         0x00000000, 0x000000ff },
8920                 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
8921                         0x00000000, 0xffffffff },
8922                 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
8923                         0x00000000, 0xffffffff },
8924                 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
8925                         0x00000000, 0xffffffff },
8926                 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
8927                         0x00000000, 0xffffffff },
8928                 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
8929                         0x00000000, 0xffffffff },
8930                 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
8931                         0xffffffff, 0x00000000 },
8932                 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
8933                         0xffffffff, 0x00000000 },
8934
8935                 /* Buffer Manager Control Registers. */
8936                 { BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750,
8937                         0x00000000, 0x007fff80 },
8938                 { BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750,
8939                         0x00000000, 0x007fffff },
8940                 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
8941                         0x00000000, 0x0000003f },
8942                 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
8943                         0x00000000, 0x000001ff },
8944                 { BUFMGR_MB_HIGH_WATER, 0x0000,
8945                         0x00000000, 0x000001ff },
8946                 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
8947                         0xffffffff, 0x00000000 },
8948                 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
8949                         0xffffffff, 0x00000000 },
8950
8951                 /* Mailbox Registers */
8952                 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
8953                         0x00000000, 0x000001ff },
8954                 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
8955                         0x00000000, 0x000001ff },
8956                 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
8957                         0x00000000, 0x000007ff },
8958                 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
8959                         0x00000000, 0x000001ff },
8960
8961                 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
8962         };
8963
8964         is_5705 = is_5750 = 0;
8965         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
8966                 is_5705 = 1;
8967                 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
8968                         is_5750 = 1;
8969         }
8970
8971         for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
8972                 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
8973                         continue;
8974
8975                 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
8976                         continue;
8977
8978                 if ((tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
8979                     (reg_tbl[i].flags & TG3_FL_NOT_5788))
8980                         continue;
8981
8982                 if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750))
8983                         continue;
8984
8985                 offset = (u32) reg_tbl[i].offset;
8986                 read_mask = reg_tbl[i].read_mask;
8987                 write_mask = reg_tbl[i].write_mask;
8988
8989                 /* Save the original register content */
8990                 save_val = tr32(offset);
8991
8992                 /* Determine the read-only value. */
8993                 read_val = save_val & read_mask;
8994
8995                 /* Write zero to the register, then make sure the read-only bits
8996                  * are not changed and the read/write bits are all zeros.
8997                  */
8998                 tw32(offset, 0);
8999
9000                 val = tr32(offset);
9001
9002                 /* Test the read-only and read/write bits. */
9003                 if (((val & read_mask) != read_val) || (val & write_mask))
9004                         goto out;
9005
9006                 /* Write ones to all the bits defined by RdMask and WrMask, then
9007                  * make sure the read-only bits are not changed and the
9008                  * read/write bits are all ones.
9009                  */
9010                 tw32(offset, read_mask | write_mask);
9011
9012                 val = tr32(offset);
9013
9014                 /* Test the read-only bits. */
9015                 if ((val & read_mask) != read_val)
9016                         goto out;
9017
9018                 /* Test the read/write bits. */
9019                 if ((val & write_mask) != write_mask)
9020                         goto out;
9021
9022                 tw32(offset, save_val);
9023         }
9024
9025         return 0;
9026
9027 out:
9028         if (netif_msg_hw(tp))
9029                 printk(KERN_ERR PFX "Register test failed at offset %x\n",
9030                        offset);
9031         tw32(offset, save_val);
9032         return -EIO;
9033 }
9034
9035 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
9036 {
9037         static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
9038         int i;
9039         u32 j;
9040
9041         for (i = 0; i < ARRAY_SIZE(test_pattern); i++) {
9042                 for (j = 0; j < len; j += 4) {
9043                         u32 val;
9044
9045                         tg3_write_mem(tp, offset + j, test_pattern[i]);
9046                         tg3_read_mem(tp, offset + j, &val);
9047                         if (val != test_pattern[i])
9048                                 return -EIO;
9049                 }
9050         }
9051         return 0;
9052 }
9053
9054 static int tg3_test_memory(struct tg3 *tp)
9055 {
9056         static struct mem_entry {
9057                 u32 offset;
9058                 u32 len;
9059         } mem_tbl_570x[] = {
9060                 { 0x00000000, 0x00b50},
9061                 { 0x00002000, 0x1c000},
9062                 { 0xffffffff, 0x00000}
9063         }, mem_tbl_5705[] = {
9064                 { 0x00000100, 0x0000c},
9065                 { 0x00000200, 0x00008},
9066                 { 0x00004000, 0x00800},
9067                 { 0x00006000, 0x01000},
9068                 { 0x00008000, 0x02000},
9069                 { 0x00010000, 0x0e000},
9070                 { 0xffffffff, 0x00000}
9071         }, mem_tbl_5755[] = {
9072                 { 0x00000200, 0x00008},
9073                 { 0x00004000, 0x00800},
9074                 { 0x00006000, 0x00800},
9075                 { 0x00008000, 0x02000},
9076                 { 0x00010000, 0x0c000},
9077                 { 0xffffffff, 0x00000}
9078         }, mem_tbl_5906[] = {
9079                 { 0x00000200, 0x00008},
9080                 { 0x00004000, 0x00400},
9081                 { 0x00006000, 0x00400},
9082                 { 0x00008000, 0x01000},
9083                 { 0x00010000, 0x01000},
9084                 { 0xffffffff, 0x00000}
9085         };
9086         struct mem_entry *mem_tbl;
9087         int err = 0;
9088         int i;
9089
9090         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
9091                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
9092                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
9093                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
9094                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
9095                         mem_tbl = mem_tbl_5755;
9096                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
9097                         mem_tbl = mem_tbl_5906;
9098                 else
9099                         mem_tbl = mem_tbl_5705;
9100         } else
9101                 mem_tbl = mem_tbl_570x;
9102
9103         for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
9104                 if ((err = tg3_do_mem_test(tp, mem_tbl[i].offset,
9105                     mem_tbl[i].len)) != 0)
9106                         break;
9107         }
9108
9109         return err;
9110 }
9111
9112 #define TG3_MAC_LOOPBACK        0
9113 #define TG3_PHY_LOOPBACK        1
9114
9115 static int tg3_run_loopback(struct tg3 *tp, int loopback_mode)
9116 {
9117         u32 mac_mode, rx_start_idx, rx_idx, tx_idx, opaque_key;
9118         u32 desc_idx;
9119         struct sk_buff *skb, *rx_skb;
9120         u8 *tx_data;
9121         dma_addr_t map;
9122         int num_pkts, tx_len, rx_len, i, err;
9123         struct tg3_rx_buffer_desc *desc;
9124
9125         if (loopback_mode == TG3_MAC_LOOPBACK) {
9126                 /* HW errata - mac loopback fails in some cases on 5780.
9127                  * Normal traffic and PHY loopback are not affected by
9128                  * errata.
9129                  */
9130                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780)
9131                         return 0;
9132
9133                 mac_mode = (tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK) |
9134                            MAC_MODE_PORT_INT_LPBACK;
9135                 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
9136                         mac_mode |= MAC_MODE_LINK_POLARITY;
9137                 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
9138                         mac_mode |= MAC_MODE_PORT_MODE_MII;
9139                 else
9140                         mac_mode |= MAC_MODE_PORT_MODE_GMII;
9141                 tw32(MAC_MODE, mac_mode);
9142         } else if (loopback_mode == TG3_PHY_LOOPBACK) {
9143                 u32 val;
9144
9145                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
9146                         u32 phytest;
9147
9148                         if (!tg3_readphy(tp, MII_TG3_EPHY_TEST, &phytest)) {
9149                                 u32 phy;
9150
9151                                 tg3_writephy(tp, MII_TG3_EPHY_TEST,
9152                                              phytest | MII_TG3_EPHY_SHADOW_EN);
9153                                 if (!tg3_readphy(tp, 0x1b, &phy))
9154                                         tg3_writephy(tp, 0x1b, phy & ~0x20);
9155                                 tg3_writephy(tp, MII_TG3_EPHY_TEST, phytest);
9156                         }
9157                         val = BMCR_LOOPBACK | BMCR_FULLDPLX | BMCR_SPEED100;
9158                 } else
9159                         val = BMCR_LOOPBACK | BMCR_FULLDPLX | BMCR_SPEED1000;
9160
9161                 tg3_phy_toggle_automdix(tp, 0);
9162
9163                 tg3_writephy(tp, MII_BMCR, val);
9164                 udelay(40);
9165
9166                 mac_mode = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
9167                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
9168                         tg3_writephy(tp, MII_TG3_EPHY_PTEST, 0x1800);
9169                         mac_mode |= MAC_MODE_PORT_MODE_MII;
9170                 } else
9171                         mac_mode |= MAC_MODE_PORT_MODE_GMII;
9172
9173                 /* reset to prevent losing 1st rx packet intermittently */
9174                 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
9175                         tw32_f(MAC_RX_MODE, RX_MODE_RESET);
9176                         udelay(10);
9177                         tw32_f(MAC_RX_MODE, tp->rx_mode);
9178                 }
9179                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
9180                         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401)
9181                                 mac_mode &= ~MAC_MODE_LINK_POLARITY;
9182                         else if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411)
9183                                 mac_mode |= MAC_MODE_LINK_POLARITY;
9184                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
9185                                      MII_TG3_EXT_CTRL_LNK3_LED_MODE);
9186                 }
9187                 tw32(MAC_MODE, mac_mode);
9188         }
9189         else
9190                 return -EINVAL;
9191
9192         err = -EIO;
9193
9194         tx_len = 1514;
9195         skb = netdev_alloc_skb(tp->dev, tx_len);
9196         if (!skb)
9197                 return -ENOMEM;
9198
9199         tx_data = skb_put(skb, tx_len);
9200         memcpy(tx_data, tp->dev->dev_addr, 6);
9201         memset(tx_data + 6, 0x0, 8);
9202
9203         tw32(MAC_RX_MTU_SIZE, tx_len + 4);
9204
9205         for (i = 14; i < tx_len; i++)
9206                 tx_data[i] = (u8) (i & 0xff);
9207
9208         map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
9209
9210         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
9211              HOSTCC_MODE_NOW);
9212
9213         udelay(10);
9214
9215         rx_start_idx = tp->hw_status->idx[0].rx_producer;
9216
9217         num_pkts = 0;
9218
9219         tg3_set_txd(tp, tp->tx_prod, map, tx_len, 0, 1);
9220
9221         tp->tx_prod++;
9222         num_pkts++;
9223
9224         tw32_tx_mbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW,
9225                      tp->tx_prod);
9226         tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW);
9227
9228         udelay(10);
9229
9230         /* 250 usec to allow enough time on some 10/100 Mbps devices.  */
9231         for (i = 0; i < 25; i++) {
9232                 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
9233                        HOSTCC_MODE_NOW);
9234
9235                 udelay(10);
9236
9237                 tx_idx = tp->hw_status->idx[0].tx_consumer;
9238                 rx_idx = tp->hw_status->idx[0].rx_producer;
9239                 if ((tx_idx == tp->tx_prod) &&
9240                     (rx_idx == (rx_start_idx + num_pkts)))
9241                         break;
9242         }
9243
9244         pci_unmap_single(tp->pdev, map, tx_len, PCI_DMA_TODEVICE);
9245         dev_kfree_skb(skb);
9246
9247         if (tx_idx != tp->tx_prod)
9248                 goto out;
9249
9250         if (rx_idx != rx_start_idx + num_pkts)
9251                 goto out;
9252
9253         desc = &tp->rx_rcb[rx_start_idx];
9254         desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
9255         opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
9256         if (opaque_key != RXD_OPAQUE_RING_STD)
9257                 goto out;
9258
9259         if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
9260             (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
9261                 goto out;
9262
9263         rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4;
9264         if (rx_len != tx_len)
9265                 goto out;
9266
9267         rx_skb = tp->rx_std_buffers[desc_idx].skb;
9268
9269         map = pci_unmap_addr(&tp->rx_std_buffers[desc_idx], mapping);
9270         pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len, PCI_DMA_FROMDEVICE);
9271
9272         for (i = 14; i < tx_len; i++) {
9273                 if (*(rx_skb->data + i) != (u8) (i & 0xff))
9274                         goto out;
9275         }
9276         err = 0;
9277
9278         /* tg3_free_rings will unmap and free the rx_skb */
9279 out:
9280         return err;
9281 }
9282
9283 #define TG3_MAC_LOOPBACK_FAILED         1
9284 #define TG3_PHY_LOOPBACK_FAILED         2
9285 #define TG3_LOOPBACK_FAILED             (TG3_MAC_LOOPBACK_FAILED |      \
9286                                          TG3_PHY_LOOPBACK_FAILED)
9287
9288 static int tg3_test_loopback(struct tg3 *tp)
9289 {
9290         int err = 0;
9291         u32 cpmuctrl = 0;
9292
9293         if (!netif_running(tp->dev))
9294                 return TG3_LOOPBACK_FAILED;
9295
9296         err = tg3_reset_hw(tp, 1);
9297         if (err)
9298                 return TG3_LOOPBACK_FAILED;
9299
9300         if (tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) {
9301                 int i;
9302                 u32 status;
9303
9304                 tw32(TG3_CPMU_MUTEX_REQ, CPMU_MUTEX_REQ_DRIVER);
9305
9306                 /* Wait for up to 40 microseconds to acquire lock. */
9307                 for (i = 0; i < 4; i++) {
9308                         status = tr32(TG3_CPMU_MUTEX_GNT);
9309                         if (status == CPMU_MUTEX_GNT_DRIVER)
9310                                 break;
9311                         udelay(10);
9312                 }
9313
9314                 if (status != CPMU_MUTEX_GNT_DRIVER)
9315                         return TG3_LOOPBACK_FAILED;
9316
9317                 cpmuctrl = tr32(TG3_CPMU_CTRL);
9318
9319                 /* Turn off power management based on link speed. */
9320                 tw32(TG3_CPMU_CTRL,
9321                      cpmuctrl & ~CPMU_CTRL_LINK_SPEED_MODE);
9322         }
9323
9324         if (tg3_run_loopback(tp, TG3_MAC_LOOPBACK))
9325                 err |= TG3_MAC_LOOPBACK_FAILED;
9326
9327         if (tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) {
9328                 tw32(TG3_CPMU_CTRL, cpmuctrl);
9329
9330                 /* Release the mutex */
9331                 tw32(TG3_CPMU_MUTEX_GNT, CPMU_MUTEX_GNT_DRIVER);
9332         }
9333
9334         if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
9335                 if (tg3_run_loopback(tp, TG3_PHY_LOOPBACK))
9336                         err |= TG3_PHY_LOOPBACK_FAILED;
9337         }
9338
9339         return err;
9340 }
9341
9342 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
9343                           u64 *data)
9344 {
9345         struct tg3 *tp = netdev_priv(dev);
9346
9347         if (tp->link_config.phy_is_low_power)
9348                 tg3_set_power_state(tp, PCI_D0);
9349
9350         memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
9351
9352         if (tg3_test_nvram(tp) != 0) {
9353                 etest->flags |= ETH_TEST_FL_FAILED;
9354                 data[0] = 1;
9355         }
9356         if (tg3_test_link(tp) != 0) {
9357                 etest->flags |= ETH_TEST_FL_FAILED;
9358                 data[1] = 1;
9359         }
9360         if (etest->flags & ETH_TEST_FL_OFFLINE) {
9361                 int err, irq_sync = 0;
9362
9363                 if (netif_running(dev)) {
9364                         tg3_netif_stop(tp);
9365                         irq_sync = 1;
9366                 }
9367
9368                 tg3_full_lock(tp, irq_sync);
9369
9370                 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
9371                 err = tg3_nvram_lock(tp);
9372                 tg3_halt_cpu(tp, RX_CPU_BASE);
9373                 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
9374                         tg3_halt_cpu(tp, TX_CPU_BASE);
9375                 if (!err)
9376                         tg3_nvram_unlock(tp);
9377
9378                 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
9379                         tg3_phy_reset(tp);
9380
9381                 if (tg3_test_registers(tp) != 0) {
9382                         etest->flags |= ETH_TEST_FL_FAILED;
9383                         data[2] = 1;
9384                 }
9385                 if (tg3_test_memory(tp) != 0) {
9386                         etest->flags |= ETH_TEST_FL_FAILED;
9387                         data[3] = 1;
9388                 }
9389                 if ((data[4] = tg3_test_loopback(tp)) != 0)
9390                         etest->flags |= ETH_TEST_FL_FAILED;
9391
9392                 tg3_full_unlock(tp);
9393
9394                 if (tg3_test_interrupt(tp) != 0) {
9395                         etest->flags |= ETH_TEST_FL_FAILED;
9396                         data[5] = 1;
9397                 }
9398
9399                 tg3_full_lock(tp, 0);
9400
9401                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9402                 if (netif_running(dev)) {
9403                         tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
9404                         if (!tg3_restart_hw(tp, 1))
9405                                 tg3_netif_start(tp);
9406                 }
9407
9408                 tg3_full_unlock(tp);
9409         }
9410         if (tp->link_config.phy_is_low_power)
9411                 tg3_set_power_state(tp, PCI_D3hot);
9412
9413 }
9414
9415 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
9416 {
9417         struct mii_ioctl_data *data = if_mii(ifr);
9418         struct tg3 *tp = netdev_priv(dev);
9419         int err;
9420
9421         switch(cmd) {
9422         case SIOCGMIIPHY:
9423                 data->phy_id = PHY_ADDR;
9424
9425                 /* fallthru */
9426         case SIOCGMIIREG: {
9427                 u32 mii_regval;
9428
9429                 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
9430                         break;                  /* We have no PHY */
9431
9432                 if (tp->link_config.phy_is_low_power)
9433                         return -EAGAIN;
9434
9435                 spin_lock_bh(&tp->lock);
9436                 err = tg3_readphy(tp, data->reg_num & 0x1f, &mii_regval);
9437                 spin_unlock_bh(&tp->lock);
9438
9439                 data->val_out = mii_regval;
9440
9441                 return err;
9442         }
9443
9444         case SIOCSMIIREG:
9445                 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
9446                         break;                  /* We have no PHY */
9447
9448                 if (!capable(CAP_NET_ADMIN))
9449                         return -EPERM;
9450
9451                 if (tp->link_config.phy_is_low_power)
9452                         return -EAGAIN;
9453
9454                 spin_lock_bh(&tp->lock);
9455                 err = tg3_writephy(tp, data->reg_num & 0x1f, data->val_in);
9456                 spin_unlock_bh(&tp->lock);
9457
9458                 return err;
9459
9460         default:
9461                 /* do nothing */
9462                 break;
9463         }
9464         return -EOPNOTSUPP;
9465 }
9466
9467 #if TG3_VLAN_TAG_USED
9468 static void tg3_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
9469 {
9470         struct tg3 *tp = netdev_priv(dev);
9471
9472         if (netif_running(dev))
9473                 tg3_netif_stop(tp);
9474
9475         tg3_full_lock(tp, 0);
9476
9477         tp->vlgrp = grp;
9478
9479         /* Update RX_MODE_KEEP_VLAN_TAG bit in RX_MODE register. */
9480         __tg3_set_rx_mode(dev);
9481
9482         if (netif_running(dev))
9483                 tg3_netif_start(tp);
9484
9485         tg3_full_unlock(tp);
9486 }
9487 #endif
9488
9489 static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
9490 {
9491         struct tg3 *tp = netdev_priv(dev);
9492
9493         memcpy(ec, &tp->coal, sizeof(*ec));
9494         return 0;
9495 }
9496
9497 static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
9498 {
9499         struct tg3 *tp = netdev_priv(dev);
9500         u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
9501         u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
9502
9503         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
9504                 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
9505                 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
9506                 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
9507                 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
9508         }
9509
9510         if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
9511             (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
9512             (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
9513             (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
9514             (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
9515             (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
9516             (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
9517             (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
9518             (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
9519             (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
9520                 return -EINVAL;
9521
9522         /* No rx interrupts will be generated if both are zero */
9523         if ((ec->rx_coalesce_usecs == 0) &&
9524             (ec->rx_max_coalesced_frames == 0))
9525                 return -EINVAL;
9526
9527         /* No tx interrupts will be generated if both are zero */
9528         if ((ec->tx_coalesce_usecs == 0) &&
9529             (ec->tx_max_coalesced_frames == 0))
9530                 return -EINVAL;
9531
9532         /* Only copy relevant parameters, ignore all others. */
9533         tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
9534         tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
9535         tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
9536         tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
9537         tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
9538         tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
9539         tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
9540         tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
9541         tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
9542
9543         if (netif_running(dev)) {
9544                 tg3_full_lock(tp, 0);
9545                 __tg3_set_coalesce(tp, &tp->coal);
9546                 tg3_full_unlock(tp);
9547         }
9548         return 0;
9549 }
9550
9551 static const struct ethtool_ops tg3_ethtool_ops = {
9552         .get_settings           = tg3_get_settings,
9553         .set_settings           = tg3_set_settings,
9554         .get_drvinfo            = tg3_get_drvinfo,
9555         .get_regs_len           = tg3_get_regs_len,
9556         .get_regs               = tg3_get_regs,
9557         .get_wol                = tg3_get_wol,
9558         .set_wol                = tg3_set_wol,
9559         .get_msglevel           = tg3_get_msglevel,
9560         .set_msglevel           = tg3_set_msglevel,
9561         .nway_reset             = tg3_nway_reset,
9562         .get_link               = ethtool_op_get_link,
9563         .get_eeprom_len         = tg3_get_eeprom_len,
9564         .get_eeprom             = tg3_get_eeprom,
9565         .set_eeprom             = tg3_set_eeprom,
9566         .get_ringparam          = tg3_get_ringparam,
9567         .set_ringparam          = tg3_set_ringparam,
9568         .get_pauseparam         = tg3_get_pauseparam,
9569         .set_pauseparam         = tg3_set_pauseparam,
9570         .get_rx_csum            = tg3_get_rx_csum,
9571         .set_rx_csum            = tg3_set_rx_csum,
9572         .set_tx_csum            = tg3_set_tx_csum,
9573         .set_sg                 = ethtool_op_set_sg,
9574         .set_tso                = tg3_set_tso,
9575         .self_test              = tg3_self_test,
9576         .get_strings            = tg3_get_strings,
9577         .phys_id                = tg3_phys_id,
9578         .get_ethtool_stats      = tg3_get_ethtool_stats,
9579         .get_coalesce           = tg3_get_coalesce,
9580         .set_coalesce           = tg3_set_coalesce,
9581         .get_sset_count         = tg3_get_sset_count,
9582 };
9583
9584 static void __devinit tg3_get_eeprom_size(struct tg3 *tp)
9585 {
9586         u32 cursize, val, magic;
9587
9588         tp->nvram_size = EEPROM_CHIP_SIZE;
9589
9590         if (tg3_nvram_read_swab(tp, 0, &magic) != 0)
9591                 return;
9592
9593         if ((magic != TG3_EEPROM_MAGIC) &&
9594             ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) &&
9595             ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW))
9596                 return;
9597
9598         /*
9599          * Size the chip by reading offsets at increasing powers of two.
9600          * When we encounter our validation signature, we know the addressing
9601          * has wrapped around, and thus have our chip size.
9602          */
9603         cursize = 0x10;
9604
9605         while (cursize < tp->nvram_size) {
9606                 if (tg3_nvram_read_swab(tp, cursize, &val) != 0)
9607                         return;
9608
9609                 if (val == magic)
9610                         break;
9611
9612                 cursize <<= 1;
9613         }
9614
9615         tp->nvram_size = cursize;
9616 }
9617
9618 static void __devinit tg3_get_nvram_size(struct tg3 *tp)
9619 {
9620         u32 val;
9621
9622         if (tg3_nvram_read_swab(tp, 0, &val) != 0)
9623                 return;
9624
9625         /* Selfboot format */
9626         if (val != TG3_EEPROM_MAGIC) {
9627                 tg3_get_eeprom_size(tp);
9628                 return;
9629         }
9630
9631         if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
9632                 if (val != 0) {
9633                         tp->nvram_size = (val >> 16) * 1024;
9634                         return;
9635                 }
9636         }
9637         tp->nvram_size = 0x80000;
9638 }
9639
9640 static void __devinit tg3_get_nvram_info(struct tg3 *tp)
9641 {
9642         u32 nvcfg1;
9643
9644         nvcfg1 = tr32(NVRAM_CFG1);
9645         if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
9646                 tp->tg3_flags2 |= TG3_FLG2_FLASH;
9647         }
9648         else {
9649                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
9650                 tw32(NVRAM_CFG1, nvcfg1);
9651         }
9652
9653         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) ||
9654             (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
9655                 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
9656                         case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
9657                                 tp->nvram_jedecnum = JEDEC_ATMEL;
9658                                 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
9659                                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9660                                 break;
9661                         case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
9662                                 tp->nvram_jedecnum = JEDEC_ATMEL;
9663                                 tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
9664                                 break;
9665                         case FLASH_VENDOR_ATMEL_EEPROM:
9666                                 tp->nvram_jedecnum = JEDEC_ATMEL;
9667                                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
9668                                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9669                                 break;
9670                         case FLASH_VENDOR_ST:
9671                                 tp->nvram_jedecnum = JEDEC_ST;
9672                                 tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
9673                                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9674                                 break;
9675                         case FLASH_VENDOR_SAIFUN:
9676                                 tp->nvram_jedecnum = JEDEC_SAIFUN;
9677                                 tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
9678                                 break;
9679                         case FLASH_VENDOR_SST_SMALL:
9680                         case FLASH_VENDOR_SST_LARGE:
9681                                 tp->nvram_jedecnum = JEDEC_SST;
9682                                 tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
9683                                 break;
9684                 }
9685         }
9686         else {
9687                 tp->nvram_jedecnum = JEDEC_ATMEL;
9688                 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
9689                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9690         }
9691 }
9692
9693 static void __devinit tg3_get_5752_nvram_info(struct tg3 *tp)
9694 {
9695         u32 nvcfg1;
9696
9697         nvcfg1 = tr32(NVRAM_CFG1);
9698
9699         /* NVRAM protection for TPM */
9700         if (nvcfg1 & (1 << 27))
9701                 tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM;
9702
9703         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
9704                 case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
9705                 case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
9706                         tp->nvram_jedecnum = JEDEC_ATMEL;
9707                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9708                         break;
9709                 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
9710                         tp->nvram_jedecnum = JEDEC_ATMEL;
9711                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9712                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
9713                         break;
9714                 case FLASH_5752VENDOR_ST_M45PE10:
9715                 case FLASH_5752VENDOR_ST_M45PE20:
9716                 case FLASH_5752VENDOR_ST_M45PE40:
9717                         tp->nvram_jedecnum = JEDEC_ST;
9718                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9719                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
9720                         break;
9721         }
9722
9723         if (tp->tg3_flags2 & TG3_FLG2_FLASH) {
9724                 switch (nvcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
9725                         case FLASH_5752PAGE_SIZE_256:
9726                                 tp->nvram_pagesize = 256;
9727                                 break;
9728                         case FLASH_5752PAGE_SIZE_512:
9729                                 tp->nvram_pagesize = 512;
9730                                 break;
9731                         case FLASH_5752PAGE_SIZE_1K:
9732                                 tp->nvram_pagesize = 1024;
9733                                 break;
9734                         case FLASH_5752PAGE_SIZE_2K:
9735                                 tp->nvram_pagesize = 2048;
9736                                 break;
9737                         case FLASH_5752PAGE_SIZE_4K:
9738                                 tp->nvram_pagesize = 4096;
9739                                 break;
9740                         case FLASH_5752PAGE_SIZE_264:
9741                                 tp->nvram_pagesize = 264;
9742                                 break;
9743                 }
9744         }
9745         else {
9746                 /* For eeprom, set pagesize to maximum eeprom size */
9747                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
9748
9749                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
9750                 tw32(NVRAM_CFG1, nvcfg1);
9751         }
9752 }
9753
9754 static void __devinit tg3_get_5755_nvram_info(struct tg3 *tp)
9755 {
9756         u32 nvcfg1, protect = 0;
9757
9758         nvcfg1 = tr32(NVRAM_CFG1);
9759
9760         /* NVRAM protection for TPM */
9761         if (nvcfg1 & (1 << 27)) {
9762                 tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM;
9763                 protect = 1;
9764         }
9765
9766         nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
9767         switch (nvcfg1) {
9768                 case FLASH_5755VENDOR_ATMEL_FLASH_1:
9769                 case FLASH_5755VENDOR_ATMEL_FLASH_2:
9770                 case FLASH_5755VENDOR_ATMEL_FLASH_3:
9771                 case FLASH_5755VENDOR_ATMEL_FLASH_5:
9772                         tp->nvram_jedecnum = JEDEC_ATMEL;
9773                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9774                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
9775                         tp->nvram_pagesize = 264;
9776                         if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 ||
9777                             nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5)
9778                                 tp->nvram_size = (protect ? 0x3e200 : 0x80000);
9779                         else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2)
9780                                 tp->nvram_size = (protect ? 0x1f200 : 0x40000);
9781                         else
9782                                 tp->nvram_size = (protect ? 0x1f200 : 0x20000);
9783                         break;
9784                 case FLASH_5752VENDOR_ST_M45PE10:
9785                 case FLASH_5752VENDOR_ST_M45PE20:
9786                 case FLASH_5752VENDOR_ST_M45PE40:
9787                         tp->nvram_jedecnum = JEDEC_ST;
9788                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9789                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
9790                         tp->nvram_pagesize = 256;
9791                         if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10)
9792                                 tp->nvram_size = (protect ? 0x10000 : 0x20000);
9793                         else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20)
9794                                 tp->nvram_size = (protect ? 0x10000 : 0x40000);
9795                         else
9796                                 tp->nvram_size = (protect ? 0x20000 : 0x80000);
9797                         break;
9798         }
9799 }
9800
9801 static void __devinit tg3_get_5787_nvram_info(struct tg3 *tp)
9802 {
9803         u32 nvcfg1;
9804
9805         nvcfg1 = tr32(NVRAM_CFG1);
9806
9807         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
9808                 case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
9809                 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
9810                 case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
9811                 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
9812                         tp->nvram_jedecnum = JEDEC_ATMEL;
9813                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9814                         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
9815
9816                         nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
9817                         tw32(NVRAM_CFG1, nvcfg1);
9818                         break;
9819                 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
9820                 case FLASH_5755VENDOR_ATMEL_FLASH_1:
9821                 case FLASH_5755VENDOR_ATMEL_FLASH_2:
9822                 case FLASH_5755VENDOR_ATMEL_FLASH_3:
9823                         tp->nvram_jedecnum = JEDEC_ATMEL;
9824                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9825                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
9826                         tp->nvram_pagesize = 264;
9827                         break;
9828                 case FLASH_5752VENDOR_ST_M45PE10:
9829                 case FLASH_5752VENDOR_ST_M45PE20:
9830                 case FLASH_5752VENDOR_ST_M45PE40:
9831                         tp->nvram_jedecnum = JEDEC_ST;
9832                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9833                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
9834                         tp->nvram_pagesize = 256;
9835                         break;
9836         }
9837 }
9838
9839 static void __devinit tg3_get_5761_nvram_info(struct tg3 *tp)
9840 {
9841         u32 nvcfg1, protect = 0;
9842
9843         nvcfg1 = tr32(NVRAM_CFG1);
9844
9845         /* NVRAM protection for TPM */
9846         if (nvcfg1 & (1 << 27)) {
9847                 tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM;
9848                 protect = 1;
9849         }
9850
9851         nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
9852         switch (nvcfg1) {
9853                 case FLASH_5761VENDOR_ATMEL_ADB021D:
9854                 case FLASH_5761VENDOR_ATMEL_ADB041D:
9855                 case FLASH_5761VENDOR_ATMEL_ADB081D:
9856                 case FLASH_5761VENDOR_ATMEL_ADB161D:
9857                 case FLASH_5761VENDOR_ATMEL_MDB021D:
9858                 case FLASH_5761VENDOR_ATMEL_MDB041D:
9859                 case FLASH_5761VENDOR_ATMEL_MDB081D:
9860                 case FLASH_5761VENDOR_ATMEL_MDB161D:
9861                         tp->nvram_jedecnum = JEDEC_ATMEL;
9862                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9863                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
9864                         tp->tg3_flags3 |= TG3_FLG3_NO_NVRAM_ADDR_TRANS;
9865                         tp->nvram_pagesize = 256;
9866                         break;
9867                 case FLASH_5761VENDOR_ST_A_M45PE20:
9868                 case FLASH_5761VENDOR_ST_A_M45PE40:
9869                 case FLASH_5761VENDOR_ST_A_M45PE80:
9870                 case FLASH_5761VENDOR_ST_A_M45PE16:
9871                 case FLASH_5761VENDOR_ST_M_M45PE20:
9872                 case FLASH_5761VENDOR_ST_M_M45PE40:
9873                 case FLASH_5761VENDOR_ST_M_M45PE80:
9874                 case FLASH_5761VENDOR_ST_M_M45PE16:
9875                         tp->nvram_jedecnum = JEDEC_ST;
9876                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9877                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
9878                         tp->nvram_pagesize = 256;
9879                         break;
9880         }
9881
9882         if (protect) {
9883                 tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT);
9884         } else {
9885                 switch (nvcfg1) {
9886                         case FLASH_5761VENDOR_ATMEL_ADB161D:
9887                         case FLASH_5761VENDOR_ATMEL_MDB161D:
9888                         case FLASH_5761VENDOR_ST_A_M45PE16:
9889                         case FLASH_5761VENDOR_ST_M_M45PE16:
9890                                 tp->nvram_size = 0x100000;
9891                                 break;
9892                         case FLASH_5761VENDOR_ATMEL_ADB081D:
9893                         case FLASH_5761VENDOR_ATMEL_MDB081D:
9894                         case FLASH_5761VENDOR_ST_A_M45PE80:
9895                         case FLASH_5761VENDOR_ST_M_M45PE80:
9896                                 tp->nvram_size = 0x80000;
9897                                 break;
9898                         case FLASH_5761VENDOR_ATMEL_ADB041D:
9899                         case FLASH_5761VENDOR_ATMEL_MDB041D:
9900                         case FLASH_5761VENDOR_ST_A_M45PE40:
9901                         case FLASH_5761VENDOR_ST_M_M45PE40:
9902                                 tp->nvram_size = 0x40000;
9903                                 break;
9904                         case FLASH_5761VENDOR_ATMEL_ADB021D:
9905                         case FLASH_5761VENDOR_ATMEL_MDB021D:
9906                         case FLASH_5761VENDOR_ST_A_M45PE20:
9907                         case FLASH_5761VENDOR_ST_M_M45PE20:
9908                                 tp->nvram_size = 0x20000;
9909                                 break;
9910                 }
9911         }
9912 }
9913
9914 static void __devinit tg3_get_5906_nvram_info(struct tg3 *tp)
9915 {
9916         tp->nvram_jedecnum = JEDEC_ATMEL;
9917         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9918         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
9919 }
9920
9921 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
9922 static void __devinit tg3_nvram_init(struct tg3 *tp)
9923 {
9924         tw32_f(GRC_EEPROM_ADDR,
9925              (EEPROM_ADDR_FSM_RESET |
9926               (EEPROM_DEFAULT_CLOCK_PERIOD <<
9927                EEPROM_ADDR_CLKPERD_SHIFT)));
9928
9929         msleep(1);
9930
9931         /* Enable seeprom accesses. */
9932         tw32_f(GRC_LOCAL_CTRL,
9933              tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
9934         udelay(100);
9935
9936         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
9937             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
9938                 tp->tg3_flags |= TG3_FLAG_NVRAM;
9939
9940                 if (tg3_nvram_lock(tp)) {
9941                         printk(KERN_WARNING PFX "%s: Cannot get nvarm lock, "
9942                                "tg3_nvram_init failed.\n", tp->dev->name);
9943                         return;
9944                 }
9945                 tg3_enable_nvram_access(tp);
9946
9947                 tp->nvram_size = 0;
9948
9949                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
9950                         tg3_get_5752_nvram_info(tp);
9951                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
9952                         tg3_get_5755_nvram_info(tp);
9953                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
9954                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784)
9955                         tg3_get_5787_nvram_info(tp);
9956                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
9957                         tg3_get_5761_nvram_info(tp);
9958                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
9959                         tg3_get_5906_nvram_info(tp);
9960                 else
9961                         tg3_get_nvram_info(tp);
9962
9963                 if (tp->nvram_size == 0)
9964                         tg3_get_nvram_size(tp);
9965
9966                 tg3_disable_nvram_access(tp);
9967                 tg3_nvram_unlock(tp);
9968
9969         } else {
9970                 tp->tg3_flags &= ~(TG3_FLAG_NVRAM | TG3_FLAG_NVRAM_BUFFERED);
9971
9972                 tg3_get_eeprom_size(tp);
9973         }
9974 }
9975
9976 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
9977                                         u32 offset, u32 *val)
9978 {
9979         u32 tmp;
9980         int i;
9981
9982         if (offset > EEPROM_ADDR_ADDR_MASK ||
9983             (offset % 4) != 0)
9984                 return -EINVAL;
9985
9986         tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
9987                                         EEPROM_ADDR_DEVID_MASK |
9988                                         EEPROM_ADDR_READ);
9989         tw32(GRC_EEPROM_ADDR,
9990              tmp |
9991              (0 << EEPROM_ADDR_DEVID_SHIFT) |
9992              ((offset << EEPROM_ADDR_ADDR_SHIFT) &
9993               EEPROM_ADDR_ADDR_MASK) |
9994              EEPROM_ADDR_READ | EEPROM_ADDR_START);
9995
9996         for (i = 0; i < 1000; i++) {
9997                 tmp = tr32(GRC_EEPROM_ADDR);
9998
9999                 if (tmp & EEPROM_ADDR_COMPLETE)
10000                         break;
10001                 msleep(1);
10002         }
10003         if (!(tmp & EEPROM_ADDR_COMPLETE))
10004                 return -EBUSY;
10005
10006         *val = tr32(GRC_EEPROM_DATA);
10007         return 0;
10008 }
10009
10010 #define NVRAM_CMD_TIMEOUT 10000
10011
10012 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
10013 {
10014         int i;
10015
10016         tw32(NVRAM_CMD, nvram_cmd);
10017         for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
10018                 udelay(10);
10019                 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
10020                         udelay(10);
10021                         break;
10022                 }
10023         }
10024         if (i == NVRAM_CMD_TIMEOUT) {
10025                 return -EBUSY;
10026         }
10027         return 0;
10028 }
10029
10030 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
10031 {
10032         if ((tp->tg3_flags & TG3_FLAG_NVRAM) &&
10033             (tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) &&
10034             (tp->tg3_flags2 & TG3_FLG2_FLASH) &&
10035            !(tp->tg3_flags3 & TG3_FLG3_NO_NVRAM_ADDR_TRANS) &&
10036             (tp->nvram_jedecnum == JEDEC_ATMEL))
10037
10038                 addr = ((addr / tp->nvram_pagesize) <<
10039                         ATMEL_AT45DB0X1B_PAGE_POS) +
10040                        (addr % tp->nvram_pagesize);
10041
10042         return addr;
10043 }
10044
10045 static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
10046 {
10047         if ((tp->tg3_flags & TG3_FLAG_NVRAM) &&
10048             (tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) &&
10049             (tp->tg3_flags2 & TG3_FLG2_FLASH) &&
10050            !(tp->tg3_flags3 & TG3_FLG3_NO_NVRAM_ADDR_TRANS) &&
10051             (tp->nvram_jedecnum == JEDEC_ATMEL))
10052
10053                 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
10054                         tp->nvram_pagesize) +
10055                        (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
10056
10057         return addr;
10058 }
10059
10060 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
10061 {
10062         int ret;
10063
10064         if (!(tp->tg3_flags & TG3_FLAG_NVRAM))
10065                 return tg3_nvram_read_using_eeprom(tp, offset, val);
10066
10067         offset = tg3_nvram_phys_addr(tp, offset);
10068
10069         if (offset > NVRAM_ADDR_MSK)
10070                 return -EINVAL;
10071
10072         ret = tg3_nvram_lock(tp);
10073         if (ret)
10074                 return ret;
10075
10076         tg3_enable_nvram_access(tp);
10077
10078         tw32(NVRAM_ADDR, offset);
10079         ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
10080                 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
10081
10082         if (ret == 0)
10083                 *val = swab32(tr32(NVRAM_RDDATA));
10084
10085         tg3_disable_nvram_access(tp);
10086
10087         tg3_nvram_unlock(tp);
10088
10089         return ret;
10090 }
10091
10092 static int tg3_nvram_read_swab(struct tg3 *tp, u32 offset, u32 *val)
10093 {
10094         int err;
10095         u32 tmp;
10096
10097         err = tg3_nvram_read(tp, offset, &tmp);
10098         *val = swab32(tmp);
10099         return err;
10100 }
10101
10102 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
10103                                     u32 offset, u32 len, u8 *buf)
10104 {
10105         int i, j, rc = 0;
10106         u32 val;
10107
10108         for (i = 0; i < len; i += 4) {
10109                 u32 addr, data;
10110
10111                 addr = offset + i;
10112
10113                 memcpy(&data, buf + i, 4);
10114
10115                 tw32(GRC_EEPROM_DATA, cpu_to_le32(data));
10116
10117                 val = tr32(GRC_EEPROM_ADDR);
10118                 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
10119
10120                 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
10121                         EEPROM_ADDR_READ);
10122                 tw32(GRC_EEPROM_ADDR, val |
10123                         (0 << EEPROM_ADDR_DEVID_SHIFT) |
10124                         (addr & EEPROM_ADDR_ADDR_MASK) |
10125                         EEPROM_ADDR_START |
10126                         EEPROM_ADDR_WRITE);
10127
10128                 for (j = 0; j < 1000; j++) {
10129                         val = tr32(GRC_EEPROM_ADDR);
10130
10131                         if (val & EEPROM_ADDR_COMPLETE)
10132                                 break;
10133                         msleep(1);
10134                 }
10135                 if (!(val & EEPROM_ADDR_COMPLETE)) {
10136                         rc = -EBUSY;
10137                         break;
10138                 }
10139         }
10140
10141         return rc;
10142 }
10143
10144 /* offset and length are dword aligned */
10145 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
10146                 u8 *buf)
10147 {
10148         int ret = 0;
10149         u32 pagesize = tp->nvram_pagesize;
10150         u32 pagemask = pagesize - 1;
10151         u32 nvram_cmd;
10152         u8 *tmp;
10153
10154         tmp = kmalloc(pagesize, GFP_KERNEL);
10155         if (tmp == NULL)
10156                 return -ENOMEM;
10157
10158         while (len) {
10159                 int j;
10160                 u32 phy_addr, page_off, size;
10161
10162                 phy_addr = offset & ~pagemask;
10163
10164                 for (j = 0; j < pagesize; j += 4) {
10165                         if ((ret = tg3_nvram_read(tp, phy_addr + j,
10166                                                 (u32 *) (tmp + j))))
10167                                 break;
10168                 }
10169                 if (ret)
10170                         break;
10171
10172                 page_off = offset & pagemask;
10173                 size = pagesize;
10174                 if (len < size)
10175                         size = len;
10176
10177                 len -= size;
10178
10179                 memcpy(tmp + page_off, buf, size);
10180
10181                 offset = offset + (pagesize - page_off);
10182
10183                 tg3_enable_nvram_access(tp);
10184
10185                 /*
10186                  * Before we can erase the flash page, we need
10187                  * to issue a special "write enable" command.
10188                  */
10189                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
10190
10191                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
10192                         break;
10193
10194                 /* Erase the target page */
10195                 tw32(NVRAM_ADDR, phy_addr);
10196
10197                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
10198                         NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
10199
10200                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
10201                         break;
10202
10203                 /* Issue another write enable to start the write. */
10204                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
10205
10206                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
10207                         break;
10208
10209                 for (j = 0; j < pagesize; j += 4) {
10210                         u32 data;
10211
10212                         data = *((u32 *) (tmp + j));
10213                         tw32(NVRAM_WRDATA, cpu_to_be32(data));
10214
10215                         tw32(NVRAM_ADDR, phy_addr + j);
10216
10217                         nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
10218                                 NVRAM_CMD_WR;
10219
10220                         if (j == 0)
10221                                 nvram_cmd |= NVRAM_CMD_FIRST;
10222                         else if (j == (pagesize - 4))
10223                                 nvram_cmd |= NVRAM_CMD_LAST;
10224
10225                         if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
10226                                 break;
10227                 }
10228                 if (ret)
10229                         break;
10230         }
10231
10232         nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
10233         tg3_nvram_exec_cmd(tp, nvram_cmd);
10234
10235         kfree(tmp);
10236
10237         return ret;
10238 }
10239
10240 /* offset and length are dword aligned */
10241 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
10242                 u8 *buf)
10243 {
10244         int i, ret = 0;
10245
10246         for (i = 0; i < len; i += 4, offset += 4) {
10247                 u32 data, page_off, phy_addr, nvram_cmd;
10248
10249                 memcpy(&data, buf + i, 4);
10250                 tw32(NVRAM_WRDATA, cpu_to_be32(data));
10251
10252                 page_off = offset % tp->nvram_pagesize;
10253
10254                 phy_addr = tg3_nvram_phys_addr(tp, offset);
10255
10256                 tw32(NVRAM_ADDR, phy_addr);
10257
10258                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
10259
10260                 if ((page_off == 0) || (i == 0))
10261                         nvram_cmd |= NVRAM_CMD_FIRST;
10262                 if (page_off == (tp->nvram_pagesize - 4))
10263                         nvram_cmd |= NVRAM_CMD_LAST;
10264
10265                 if (i == (len - 4))
10266                         nvram_cmd |= NVRAM_CMD_LAST;
10267
10268                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752) &&
10269                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5755) &&
10270                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5787) &&
10271                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784) &&
10272                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761) &&
10273                     (tp->nvram_jedecnum == JEDEC_ST) &&
10274                     (nvram_cmd & NVRAM_CMD_FIRST)) {
10275
10276                         if ((ret = tg3_nvram_exec_cmd(tp,
10277                                 NVRAM_CMD_WREN | NVRAM_CMD_GO |
10278                                 NVRAM_CMD_DONE)))
10279
10280                                 break;
10281                 }
10282                 if (!(tp->tg3_flags2 & TG3_FLG2_FLASH)) {
10283                         /* We always do complete word writes to eeprom. */
10284                         nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
10285                 }
10286
10287                 if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
10288                         break;
10289         }
10290         return ret;
10291 }
10292
10293 /* offset and length are dword aligned */
10294 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
10295 {
10296         int ret;
10297
10298         if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
10299                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
10300                        ~GRC_LCLCTRL_GPIO_OUTPUT1);
10301                 udelay(40);
10302         }
10303
10304         if (!(tp->tg3_flags & TG3_FLAG_NVRAM)) {
10305                 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
10306         }
10307         else {
10308                 u32 grc_mode;
10309
10310                 ret = tg3_nvram_lock(tp);
10311                 if (ret)
10312                         return ret;
10313
10314                 tg3_enable_nvram_access(tp);
10315                 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
10316                     !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM))
10317                         tw32(NVRAM_WRITE1, 0x406);
10318
10319                 grc_mode = tr32(GRC_MODE);
10320                 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
10321
10322                 if ((tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) ||
10323                         !(tp->tg3_flags2 & TG3_FLG2_FLASH)) {
10324
10325                         ret = tg3_nvram_write_block_buffered(tp, offset, len,
10326                                 buf);
10327                 }
10328                 else {
10329                         ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
10330                                 buf);
10331                 }
10332
10333                 grc_mode = tr32(GRC_MODE);
10334                 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
10335
10336                 tg3_disable_nvram_access(tp);
10337                 tg3_nvram_unlock(tp);
10338         }
10339
10340         if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
10341                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
10342                 udelay(40);
10343         }
10344
10345         return ret;
10346 }
10347
10348 struct subsys_tbl_ent {
10349         u16 subsys_vendor, subsys_devid;
10350         u32 phy_id;
10351 };
10352
10353 static struct subsys_tbl_ent subsys_id_to_phy_id[] = {
10354         /* Broadcom boards. */
10355         { PCI_VENDOR_ID_BROADCOM, 0x1644, PHY_ID_BCM5401 }, /* BCM95700A6 */
10356         { PCI_VENDOR_ID_BROADCOM, 0x0001, PHY_ID_BCM5701 }, /* BCM95701A5 */
10357         { PCI_VENDOR_ID_BROADCOM, 0x0002, PHY_ID_BCM8002 }, /* BCM95700T6 */
10358         { PCI_VENDOR_ID_BROADCOM, 0x0003, 0 },              /* BCM95700A9 */
10359         { PCI_VENDOR_ID_BROADCOM, 0x0005, PHY_ID_BCM5701 }, /* BCM95701T1 */
10360         { PCI_VENDOR_ID_BROADCOM, 0x0006, PHY_ID_BCM5701 }, /* BCM95701T8 */
10361         { PCI_VENDOR_ID_BROADCOM, 0x0007, 0 },              /* BCM95701A7 */
10362         { PCI_VENDOR_ID_BROADCOM, 0x0008, PHY_ID_BCM5701 }, /* BCM95701A10 */
10363         { PCI_VENDOR_ID_BROADCOM, 0x8008, PHY_ID_BCM5701 }, /* BCM95701A12 */
10364         { PCI_VENDOR_ID_BROADCOM, 0x0009, PHY_ID_BCM5703 }, /* BCM95703Ax1 */
10365         { PCI_VENDOR_ID_BROADCOM, 0x8009, PHY_ID_BCM5703 }, /* BCM95703Ax2 */
10366
10367         /* 3com boards. */
10368         { PCI_VENDOR_ID_3COM, 0x1000, PHY_ID_BCM5401 }, /* 3C996T */
10369         { PCI_VENDOR_ID_3COM, 0x1006, PHY_ID_BCM5701 }, /* 3C996BT */
10370         { PCI_VENDOR_ID_3COM, 0x1004, 0 },              /* 3C996SX */
10371         { PCI_VENDOR_ID_3COM, 0x1007, PHY_ID_BCM5701 }, /* 3C1000T */
10372         { PCI_VENDOR_ID_3COM, 0x1008, PHY_ID_BCM5701 }, /* 3C940BR01 */
10373
10374         /* DELL boards. */
10375         { PCI_VENDOR_ID_DELL, 0x00d1, PHY_ID_BCM5401 }, /* VIPER */
10376         { PCI_VENDOR_ID_DELL, 0x0106, PHY_ID_BCM5401 }, /* JAGUAR */
10377         { PCI_VENDOR_ID_DELL, 0x0109, PHY_ID_BCM5411 }, /* MERLOT */
10378         { PCI_VENDOR_ID_DELL, 0x010a, PHY_ID_BCM5411 }, /* SLIM_MERLOT */
10379
10380         /* Compaq boards. */
10381         { PCI_VENDOR_ID_COMPAQ, 0x007c, PHY_ID_BCM5701 }, /* BANSHEE */
10382         { PCI_VENDOR_ID_COMPAQ, 0x009a, PHY_ID_BCM5701 }, /* BANSHEE_2 */
10383         { PCI_VENDOR_ID_COMPAQ, 0x007d, 0 },              /* CHANGELING */
10384         { PCI_VENDOR_ID_COMPAQ, 0x0085, PHY_ID_BCM5701 }, /* NC7780 */
10385         { PCI_VENDOR_ID_COMPAQ, 0x0099, PHY_ID_BCM5701 }, /* NC7780_2 */
10386
10387         /* IBM boards. */
10388         { PCI_VENDOR_ID_IBM, 0x0281, 0 } /* IBM??? */
10389 };
10390
10391 static inline struct subsys_tbl_ent *lookup_by_subsys(struct tg3 *tp)
10392 {
10393         int i;
10394
10395         for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
10396                 if ((subsys_id_to_phy_id[i].subsys_vendor ==
10397                      tp->pdev->subsystem_vendor) &&
10398                     (subsys_id_to_phy_id[i].subsys_devid ==
10399                      tp->pdev->subsystem_device))
10400                         return &subsys_id_to_phy_id[i];
10401         }
10402         return NULL;
10403 }
10404
10405 static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
10406 {
10407         u32 val;
10408         u16 pmcsr;
10409
10410         /* On some early chips the SRAM cannot be accessed in D3hot state,
10411          * so need make sure we're in D0.
10412          */
10413         pci_read_config_word(tp->pdev, tp->pm_cap + PCI_PM_CTRL, &pmcsr);
10414         pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
10415         pci_write_config_word(tp->pdev, tp->pm_cap + PCI_PM_CTRL, pmcsr);
10416         msleep(1);
10417
10418         /* Make sure register accesses (indirect or otherwise)
10419          * will function correctly.
10420          */
10421         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
10422                                tp->misc_host_ctrl);
10423
10424         /* The memory arbiter has to be enabled in order for SRAM accesses
10425          * to succeed.  Normally on powerup the tg3 chip firmware will make
10426          * sure it is enabled, but other entities such as system netboot
10427          * code might disable it.
10428          */
10429         val = tr32(MEMARB_MODE);
10430         tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
10431
10432         tp->phy_id = PHY_ID_INVALID;
10433         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
10434
10435         /* Assume an onboard device and WOL capable by default.  */
10436         tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT | TG3_FLAG_WOL_CAP;
10437
10438         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
10439                 if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
10440                         tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT;
10441                         tp->tg3_flags2 |= TG3_FLG2_IS_NIC;
10442                 }
10443                 val = tr32(VCPU_CFGSHDW);
10444                 if (val & VCPU_CFGSHDW_ASPM_DBNC)
10445                         tp->tg3_flags |= TG3_FLAG_ASPM_WORKAROUND;
10446                 if ((val & VCPU_CFGSHDW_WOL_ENABLE) &&
10447                     (val & VCPU_CFGSHDW_WOL_MAGPKT))
10448                         tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
10449                 return;
10450         }
10451
10452         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
10453         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
10454                 u32 nic_cfg, led_cfg;
10455                 u32 nic_phy_id, ver, cfg2 = 0, eeprom_phy_id;
10456                 int eeprom_phy_serdes = 0;
10457
10458                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
10459                 tp->nic_sram_data_cfg = nic_cfg;
10460
10461                 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
10462                 ver >>= NIC_SRAM_DATA_VER_SHIFT;
10463                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700) &&
10464                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) &&
10465                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5703) &&
10466                     (ver > 0) && (ver < 0x100))
10467                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
10468
10469                 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
10470                     NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
10471                         eeprom_phy_serdes = 1;
10472
10473                 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
10474                 if (nic_phy_id != 0) {
10475                         u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
10476                         u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
10477
10478                         eeprom_phy_id  = (id1 >> 16) << 10;
10479                         eeprom_phy_id |= (id2 & 0xfc00) << 16;
10480                         eeprom_phy_id |= (id2 & 0x03ff) <<  0;
10481                 } else
10482                         eeprom_phy_id = 0;
10483
10484                 tp->phy_id = eeprom_phy_id;
10485                 if (eeprom_phy_serdes) {
10486                         if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
10487                                 tp->tg3_flags2 |= TG3_FLG2_MII_SERDES;
10488                         else
10489                                 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
10490                 }
10491
10492                 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
10493                         led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
10494                                     SHASTA_EXT_LED_MODE_MASK);
10495                 else
10496                         led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
10497
10498                 switch (led_cfg) {
10499                 default:
10500                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
10501                         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
10502                         break;
10503
10504                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
10505                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
10506                         break;
10507
10508                 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
10509                         tp->led_ctrl = LED_CTRL_MODE_MAC;
10510
10511                         /* Default to PHY_1_MODE if 0 (MAC_MODE) is
10512                          * read on some older 5700/5701 bootcode.
10513                          */
10514                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
10515                             ASIC_REV_5700 ||
10516                             GET_ASIC_REV(tp->pci_chip_rev_id) ==
10517                             ASIC_REV_5701)
10518                                 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
10519
10520                         break;
10521
10522                 case SHASTA_EXT_LED_SHARED:
10523                         tp->led_ctrl = LED_CTRL_MODE_SHARED;
10524                         if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
10525                             tp->pci_chip_rev_id != CHIPREV_ID_5750_A1)
10526                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
10527                                                  LED_CTRL_MODE_PHY_2);
10528                         break;
10529
10530                 case SHASTA_EXT_LED_MAC:
10531                         tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
10532                         break;
10533
10534                 case SHASTA_EXT_LED_COMBO:
10535                         tp->led_ctrl = LED_CTRL_MODE_COMBO;
10536                         if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0)
10537                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
10538                                                  LED_CTRL_MODE_PHY_2);
10539                         break;
10540
10541                 };
10542
10543                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
10544                      GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) &&
10545                     tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
10546                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
10547
10548                 if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
10549                         tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT;
10550                         if ((tp->pdev->subsystem_vendor ==
10551                              PCI_VENDOR_ID_ARIMA) &&
10552                             (tp->pdev->subsystem_device == 0x205a ||
10553                              tp->pdev->subsystem_device == 0x2063))
10554                                 tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT;
10555                 } else {
10556                         tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT;
10557                         tp->tg3_flags2 |= TG3_FLG2_IS_NIC;
10558                 }
10559
10560                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
10561                         tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
10562                         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
10563                                 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
10564                 }
10565                 if (nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE)
10566                         tp->tg3_flags3 |= TG3_FLG3_ENABLE_APE;
10567                 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES &&
10568                     !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL))
10569                         tp->tg3_flags &= ~TG3_FLAG_WOL_CAP;
10570
10571                 if (tp->tg3_flags & TG3_FLAG_WOL_CAP &&
10572                     nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE)
10573                         tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
10574
10575                 if (cfg2 & (1 << 17))
10576                         tp->tg3_flags2 |= TG3_FLG2_CAPACITIVE_COUPLING;
10577
10578                 /* serdes signal pre-emphasis in register 0x590 set by */
10579                 /* bootcode if bit 18 is set */
10580                 if (cfg2 & (1 << 18))
10581                         tp->tg3_flags2 |= TG3_FLG2_SERDES_PREEMPHASIS;
10582
10583                 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
10584                         u32 cfg3;
10585
10586                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
10587                         if (cfg3 & NIC_SRAM_ASPM_DEBOUNCE)
10588                                 tp->tg3_flags |= TG3_FLAG_ASPM_WORKAROUND;
10589                 }
10590         }
10591 }
10592
10593 static int __devinit tg3_phy_probe(struct tg3 *tp)
10594 {
10595         u32 hw_phy_id_1, hw_phy_id_2;
10596         u32 hw_phy_id, hw_phy_id_masked;
10597         int err;
10598
10599         /* Reading the PHY ID register can conflict with ASF
10600          * firwmare access to the PHY hardware.
10601          */
10602         err = 0;
10603         if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) ||
10604             (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)) {
10605                 hw_phy_id = hw_phy_id_masked = PHY_ID_INVALID;
10606         } else {
10607                 /* Now read the physical PHY_ID from the chip and verify
10608                  * that it is sane.  If it doesn't look good, we fall back
10609                  * to either the hard-coded table based PHY_ID and failing
10610                  * that the value found in the eeprom area.
10611                  */
10612                 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
10613                 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
10614
10615                 hw_phy_id  = (hw_phy_id_1 & 0xffff) << 10;
10616                 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
10617                 hw_phy_id |= (hw_phy_id_2 & 0x03ff) <<  0;
10618
10619                 hw_phy_id_masked = hw_phy_id & PHY_ID_MASK;
10620         }
10621
10622         if (!err && KNOWN_PHY_ID(hw_phy_id_masked)) {
10623                 tp->phy_id = hw_phy_id;
10624                 if (hw_phy_id_masked == PHY_ID_BCM8002)
10625                         tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
10626                 else
10627                         tp->tg3_flags2 &= ~TG3_FLG2_PHY_SERDES;
10628         } else {
10629                 if (tp->phy_id != PHY_ID_INVALID) {
10630                         /* Do nothing, phy ID already set up in
10631                          * tg3_get_eeprom_hw_cfg().
10632                          */
10633                 } else {
10634                         struct subsys_tbl_ent *p;
10635
10636                         /* No eeprom signature?  Try the hardcoded
10637                          * subsys device table.
10638                          */
10639                         p = lookup_by_subsys(tp);
10640                         if (!p)
10641                                 return -ENODEV;
10642
10643                         tp->phy_id = p->phy_id;
10644                         if (!tp->phy_id ||
10645                             tp->phy_id == PHY_ID_BCM8002)
10646                                 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
10647                 }
10648         }
10649
10650         if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) &&
10651             !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) &&
10652             !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
10653                 u32 bmsr, adv_reg, tg3_ctrl, mask;
10654
10655                 tg3_readphy(tp, MII_BMSR, &bmsr);
10656                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
10657                     (bmsr & BMSR_LSTATUS))
10658                         goto skip_phy_reset;
10659
10660                 err = tg3_phy_reset(tp);
10661                 if (err)
10662                         return err;
10663
10664                 adv_reg = (ADVERTISE_10HALF | ADVERTISE_10FULL |
10665                            ADVERTISE_100HALF | ADVERTISE_100FULL |
10666                            ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
10667                 tg3_ctrl = 0;
10668                 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
10669                         tg3_ctrl = (MII_TG3_CTRL_ADV_1000_HALF |
10670                                     MII_TG3_CTRL_ADV_1000_FULL);
10671                         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
10672                             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
10673                                 tg3_ctrl |= (MII_TG3_CTRL_AS_MASTER |
10674                                              MII_TG3_CTRL_ENABLE_AS_MASTER);
10675                 }
10676
10677                 mask = (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
10678                         ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
10679                         ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full);
10680                 if (!tg3_copper_is_advertising_all(tp, mask)) {
10681                         tg3_writephy(tp, MII_ADVERTISE, adv_reg);
10682
10683                         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
10684                                 tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
10685
10686                         tg3_writephy(tp, MII_BMCR,
10687                                      BMCR_ANENABLE | BMCR_ANRESTART);
10688                 }
10689                 tg3_phy_set_wirespeed(tp);
10690
10691                 tg3_writephy(tp, MII_ADVERTISE, adv_reg);
10692                 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
10693                         tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
10694         }
10695
10696 skip_phy_reset:
10697         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
10698                 err = tg3_init_5401phy_dsp(tp);
10699                 if (err)
10700                         return err;
10701         }
10702
10703         if (!err && ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401)) {
10704                 err = tg3_init_5401phy_dsp(tp);
10705         }
10706
10707         if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
10708                 tp->link_config.advertising =
10709                         (ADVERTISED_1000baseT_Half |
10710                          ADVERTISED_1000baseT_Full |
10711                          ADVERTISED_Autoneg |
10712                          ADVERTISED_FIBRE);
10713         if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
10714                 tp->link_config.advertising &=
10715                         ~(ADVERTISED_1000baseT_Half |
10716                           ADVERTISED_1000baseT_Full);
10717
10718         return err;
10719 }
10720
10721 static void __devinit tg3_read_partno(struct tg3 *tp)
10722 {
10723         unsigned char vpd_data[256];
10724         unsigned int i;
10725         u32 magic;
10726
10727         if (tg3_nvram_read_swab(tp, 0x0, &magic))
10728                 goto out_not_found;
10729
10730         if (magic == TG3_EEPROM_MAGIC) {
10731                 for (i = 0; i < 256; i += 4) {
10732                         u32 tmp;
10733
10734                         if (tg3_nvram_read(tp, 0x100 + i, &tmp))
10735                                 goto out_not_found;
10736
10737                         vpd_data[i + 0] = ((tmp >>  0) & 0xff);
10738                         vpd_data[i + 1] = ((tmp >>  8) & 0xff);
10739                         vpd_data[i + 2] = ((tmp >> 16) & 0xff);
10740                         vpd_data[i + 3] = ((tmp >> 24) & 0xff);
10741                 }
10742         } else {
10743                 int vpd_cap;
10744
10745                 vpd_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_VPD);
10746                 for (i = 0; i < 256; i += 4) {
10747                         u32 tmp, j = 0;
10748                         u16 tmp16;
10749
10750                         pci_write_config_word(tp->pdev, vpd_cap + PCI_VPD_ADDR,
10751                                               i);
10752                         while (j++ < 100) {
10753                                 pci_read_config_word(tp->pdev, vpd_cap +
10754                                                      PCI_VPD_ADDR, &tmp16);
10755                                 if (tmp16 & 0x8000)
10756                                         break;
10757                                 msleep(1);
10758                         }
10759                         if (!(tmp16 & 0x8000))
10760                                 goto out_not_found;
10761
10762                         pci_read_config_dword(tp->pdev, vpd_cap + PCI_VPD_DATA,
10763                                               &tmp);
10764                         tmp = cpu_to_le32(tmp);
10765                         memcpy(&vpd_data[i], &tmp, 4);
10766                 }
10767         }
10768
10769         /* Now parse and find the part number. */
10770         for (i = 0; i < 254; ) {
10771                 unsigned char val = vpd_data[i];
10772                 unsigned int block_end;
10773
10774                 if (val == 0x82 || val == 0x91) {
10775                         i = (i + 3 +
10776                              (vpd_data[i + 1] +
10777                               (vpd_data[i + 2] << 8)));
10778                         continue;
10779                 }
10780
10781                 if (val != 0x90)
10782                         goto out_not_found;
10783
10784                 block_end = (i + 3 +
10785                              (vpd_data[i + 1] +
10786                               (vpd_data[i + 2] << 8)));
10787                 i += 3;
10788
10789                 if (block_end > 256)
10790                         goto out_not_found;
10791
10792                 while (i < (block_end - 2)) {
10793                         if (vpd_data[i + 0] == 'P' &&
10794                             vpd_data[i + 1] == 'N') {
10795                                 int partno_len = vpd_data[i + 2];
10796
10797                                 i += 3;
10798                                 if (partno_len > 24 || (partno_len + i) > 256)
10799                                         goto out_not_found;
10800
10801                                 memcpy(tp->board_part_number,
10802                                        &vpd_data[i], partno_len);
10803
10804                                 /* Success. */
10805                                 return;
10806                         }
10807                         i += 3 + vpd_data[i + 2];
10808                 }
10809
10810                 /* Part number not found. */
10811                 goto out_not_found;
10812         }
10813
10814 out_not_found:
10815         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
10816                 strcpy(tp->board_part_number, "BCM95906");
10817         else
10818                 strcpy(tp->board_part_number, "none");
10819 }
10820
10821 static int __devinit tg3_fw_img_is_valid(struct tg3 *tp, u32 offset)
10822 {
10823         u32 val;
10824
10825         if (tg3_nvram_read_swab(tp, offset, &val) ||
10826             (val & 0xfc000000) != 0x0c000000 ||
10827             tg3_nvram_read_swab(tp, offset + 4, &val) ||
10828             val != 0)
10829                 return 0;
10830
10831         return 1;
10832 }
10833
10834 static void __devinit tg3_read_fw_ver(struct tg3 *tp)
10835 {
10836         u32 val, offset, start;
10837         u32 ver_offset;
10838         int i, bcnt;
10839
10840         if (tg3_nvram_read_swab(tp, 0, &val))
10841                 return;
10842
10843         if (val != TG3_EEPROM_MAGIC)
10844                 return;
10845
10846         if (tg3_nvram_read_swab(tp, 0xc, &offset) ||
10847             tg3_nvram_read_swab(tp, 0x4, &start))
10848                 return;
10849
10850         offset = tg3_nvram_logical_addr(tp, offset);
10851
10852         if (!tg3_fw_img_is_valid(tp, offset) ||
10853             tg3_nvram_read_swab(tp, offset + 8, &ver_offset))
10854                 return;
10855
10856         offset = offset + ver_offset - start;
10857         for (i = 0; i < 16; i += 4) {
10858                 if (tg3_nvram_read(tp, offset + i, &val))
10859                         return;
10860
10861                 val = le32_to_cpu(val);
10862                 memcpy(tp->fw_ver + i, &val, 4);
10863         }
10864
10865         if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF) ||
10866              (tp->tg3_flags & TG3_FLG3_ENABLE_APE))
10867                 return;
10868
10869         for (offset = TG3_NVM_DIR_START;
10870              offset < TG3_NVM_DIR_END;
10871              offset += TG3_NVM_DIRENT_SIZE) {
10872                 if (tg3_nvram_read_swab(tp, offset, &val))
10873                         return;
10874
10875                 if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI)
10876                         break;
10877         }
10878
10879         if (offset == TG3_NVM_DIR_END)
10880                 return;
10881
10882         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
10883                 start = 0x08000000;
10884         else if (tg3_nvram_read_swab(tp, offset - 4, &start))
10885                 return;
10886
10887         if (tg3_nvram_read_swab(tp, offset + 4, &offset) ||
10888             !tg3_fw_img_is_valid(tp, offset) ||
10889             tg3_nvram_read_swab(tp, offset + 8, &val))
10890                 return;
10891
10892         offset += val - start;
10893
10894         bcnt = strlen(tp->fw_ver);
10895
10896         tp->fw_ver[bcnt++] = ',';
10897         tp->fw_ver[bcnt++] = ' ';
10898
10899         for (i = 0; i < 4; i++) {
10900                 if (tg3_nvram_read(tp, offset, &val))
10901                         return;
10902
10903                 val = le32_to_cpu(val);
10904                 offset += sizeof(val);
10905
10906                 if (bcnt > TG3_VER_SIZE - sizeof(val)) {
10907                         memcpy(&tp->fw_ver[bcnt], &val, TG3_VER_SIZE - bcnt);
10908                         break;
10909                 }
10910
10911                 memcpy(&tp->fw_ver[bcnt], &val, sizeof(val));
10912                 bcnt += sizeof(val);
10913         }
10914
10915         tp->fw_ver[TG3_VER_SIZE - 1] = 0;
10916 }
10917
10918 static struct pci_dev * __devinit tg3_find_peer(struct tg3 *);
10919
10920 static int __devinit tg3_get_invariants(struct tg3 *tp)
10921 {
10922         static struct pci_device_id write_reorder_chipsets[] = {
10923                 { PCI_DEVICE(PCI_VENDOR_ID_AMD,
10924                              PCI_DEVICE_ID_AMD_FE_GATE_700C) },
10925                 { PCI_DEVICE(PCI_VENDOR_ID_AMD,
10926                              PCI_DEVICE_ID_AMD_8131_BRIDGE) },
10927                 { PCI_DEVICE(PCI_VENDOR_ID_VIA,
10928                              PCI_DEVICE_ID_VIA_8385_0) },
10929                 { },
10930         };
10931         u32 misc_ctrl_reg;
10932         u32 cacheline_sz_reg;
10933         u32 pci_state_reg, grc_misc_cfg;
10934         u32 val;
10935         u16 pci_cmd;
10936         int err, pcie_cap;
10937
10938         /* Force memory write invalidate off.  If we leave it on,
10939          * then on 5700_BX chips we have to enable a workaround.
10940          * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
10941          * to match the cacheline size.  The Broadcom driver have this
10942          * workaround but turns MWI off all the times so never uses
10943          * it.  This seems to suggest that the workaround is insufficient.
10944          */
10945         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
10946         pci_cmd &= ~PCI_COMMAND_INVALIDATE;
10947         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
10948
10949         /* It is absolutely critical that TG3PCI_MISC_HOST_CTRL
10950          * has the register indirect write enable bit set before
10951          * we try to access any of the MMIO registers.  It is also
10952          * critical that the PCI-X hw workaround situation is decided
10953          * before that as well.
10954          */
10955         pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
10956                               &misc_ctrl_reg);
10957
10958         tp->pci_chip_rev_id = (misc_ctrl_reg >>
10959                                MISC_HOST_CTRL_CHIPREV_SHIFT);
10960         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_USE_PROD_ID_REG) {
10961                 u32 prod_id_asic_rev;
10962
10963                 pci_read_config_dword(tp->pdev, TG3PCI_PRODID_ASICREV,
10964                                       &prod_id_asic_rev);
10965                 tp->pci_chip_rev_id = prod_id_asic_rev & PROD_ID_ASIC_REV_MASK;
10966         }
10967
10968         /* Wrong chip ID in 5752 A0. This code can be removed later
10969          * as A0 is not in production.
10970          */
10971         if (tp->pci_chip_rev_id == CHIPREV_ID_5752_A0_HW)
10972                 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
10973
10974         /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
10975          * we need to disable memory and use config. cycles
10976          * only to access all registers. The 5702/03 chips
10977          * can mistakenly decode the special cycles from the
10978          * ICH chipsets as memory write cycles, causing corruption
10979          * of register and memory space. Only certain ICH bridges
10980          * will drive special cycles with non-zero data during the
10981          * address phase which can fall within the 5703's address
10982          * range. This is not an ICH bug as the PCI spec allows
10983          * non-zero address during special cycles. However, only
10984          * these ICH bridges are known to drive non-zero addresses
10985          * during special cycles.
10986          *
10987          * Since special cycles do not cross PCI bridges, we only
10988          * enable this workaround if the 5703 is on the secondary
10989          * bus of these ICH bridges.
10990          */
10991         if ((tp->pci_chip_rev_id == CHIPREV_ID_5703_A1) ||
10992             (tp->pci_chip_rev_id == CHIPREV_ID_5703_A2)) {
10993                 static struct tg3_dev_id {
10994                         u32     vendor;
10995                         u32     device;
10996                         u32     rev;
10997                 } ich_chipsets[] = {
10998                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
10999                           PCI_ANY_ID },
11000                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
11001                           PCI_ANY_ID },
11002                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
11003                           0xa },
11004                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
11005                           PCI_ANY_ID },
11006                         { },
11007                 };
11008                 struct tg3_dev_id *pci_id = &ich_chipsets[0];
11009                 struct pci_dev *bridge = NULL;
11010
11011                 while (pci_id->vendor != 0) {
11012                         bridge = pci_get_device(pci_id->vendor, pci_id->device,
11013                                                 bridge);
11014                         if (!bridge) {
11015                                 pci_id++;
11016                                 continue;
11017                         }
11018                         if (pci_id->rev != PCI_ANY_ID) {
11019                                 if (bridge->revision > pci_id->rev)
11020                                         continue;
11021                         }
11022                         if (bridge->subordinate &&
11023                             (bridge->subordinate->number ==
11024                              tp->pdev->bus->number)) {
11025
11026                                 tp->tg3_flags2 |= TG3_FLG2_ICH_WORKAROUND;
11027                                 pci_dev_put(bridge);
11028                                 break;
11029                         }
11030                 }
11031         }
11032
11033         /* The EPB bridge inside 5714, 5715, and 5780 cannot support
11034          * DMA addresses > 40-bit. This bridge may have other additional
11035          * 57xx devices behind it in some 4-port NIC designs for example.
11036          * Any tg3 device found behind the bridge will also need the 40-bit
11037          * DMA workaround.
11038          */
11039         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 ||
11040             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
11041                 tp->tg3_flags2 |= TG3_FLG2_5780_CLASS;
11042                 tp->tg3_flags |= TG3_FLAG_40BIT_DMA_BUG;
11043                 tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI);
11044         }
11045         else {
11046                 struct pci_dev *bridge = NULL;
11047
11048                 do {
11049                         bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
11050                                                 PCI_DEVICE_ID_SERVERWORKS_EPB,
11051                                                 bridge);
11052                         if (bridge && bridge->subordinate &&
11053                             (bridge->subordinate->number <=
11054                              tp->pdev->bus->number) &&
11055                             (bridge->subordinate->subordinate >=
11056                              tp->pdev->bus->number)) {
11057                                 tp->tg3_flags |= TG3_FLAG_40BIT_DMA_BUG;
11058                                 pci_dev_put(bridge);
11059                                 break;
11060                         }
11061                 } while (bridge);
11062         }
11063
11064         /* Initialize misc host control in PCI block. */
11065         tp->misc_host_ctrl |= (misc_ctrl_reg &
11066                                MISC_HOST_CTRL_CHIPREV);
11067         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
11068                                tp->misc_host_ctrl);
11069
11070         pci_read_config_dword(tp->pdev, TG3PCI_CACHELINESZ,
11071                               &cacheline_sz_reg);
11072
11073         tp->pci_cacheline_sz = (cacheline_sz_reg >>  0) & 0xff;
11074         tp->pci_lat_timer    = (cacheline_sz_reg >>  8) & 0xff;
11075         tp->pci_hdr_type     = (cacheline_sz_reg >> 16) & 0xff;
11076         tp->pci_bist         = (cacheline_sz_reg >> 24) & 0xff;
11077
11078         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
11079             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714))
11080                 tp->pdev_peer = tg3_find_peer(tp);
11081
11082         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
11083             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
11084             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
11085             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
11086             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
11087             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
11088             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 ||
11089             (tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
11090                 tp->tg3_flags2 |= TG3_FLG2_5750_PLUS;
11091
11092         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) ||
11093             (tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
11094                 tp->tg3_flags2 |= TG3_FLG2_5705_PLUS;
11095
11096         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
11097                 tp->tg3_flags |= TG3_FLAG_SUPPORT_MSI;
11098                 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX ||
11099                     GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX ||
11100                     (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 &&
11101                      tp->pci_chip_rev_id <= CHIPREV_ID_5714_A2 &&
11102                      tp->pdev_peer == tp->pdev))
11103                         tp->tg3_flags &= ~TG3_FLAG_SUPPORT_MSI;
11104
11105                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
11106                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
11107                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
11108                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
11109                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
11110                         tp->tg3_flags2 |= TG3_FLG2_HW_TSO_2;
11111                         tp->tg3_flags2 |= TG3_FLG2_1SHOT_MSI;
11112                 } else {
11113                         tp->tg3_flags2 |= TG3_FLG2_HW_TSO_1 | TG3_FLG2_TSO_BUG;
11114                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
11115                                 ASIC_REV_5750 &&
11116                             tp->pci_chip_rev_id >= CHIPREV_ID_5750_C2)
11117                                 tp->tg3_flags2 &= ~TG3_FLG2_TSO_BUG;
11118                 }
11119         }
11120
11121         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705 &&
11122             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5750 &&
11123             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752 &&
11124             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5755 &&
11125             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5787 &&
11126             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 &&
11127             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761 &&
11128             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906)
11129                 tp->tg3_flags2 |= TG3_FLG2_JUMBO_CAPABLE;
11130
11131         pcie_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_EXP);
11132         if (pcie_cap != 0) {
11133                 tp->tg3_flags2 |= TG3_FLG2_PCI_EXPRESS;
11134                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
11135                         u16 lnkctl;
11136
11137                         pci_read_config_word(tp->pdev,
11138                                              pcie_cap + PCI_EXP_LNKCTL,
11139                                              &lnkctl);
11140                         if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN)
11141                                 tp->tg3_flags2 &= ~TG3_FLG2_HW_TSO_2;
11142                 }
11143         }
11144
11145         /* If we have an AMD 762 or VIA K8T800 chipset, write
11146          * reordering to the mailbox registers done by the host
11147          * controller can cause major troubles.  We read back from
11148          * every mailbox register write to force the writes to be
11149          * posted to the chip in order.
11150          */
11151         if (pci_dev_present(write_reorder_chipsets) &&
11152             !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
11153                 tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;
11154
11155         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
11156             tp->pci_lat_timer < 64) {
11157                 tp->pci_lat_timer = 64;
11158
11159                 cacheline_sz_reg  = ((tp->pci_cacheline_sz & 0xff) <<  0);
11160                 cacheline_sz_reg |= ((tp->pci_lat_timer    & 0xff) <<  8);
11161                 cacheline_sz_reg |= ((tp->pci_hdr_type     & 0xff) << 16);
11162                 cacheline_sz_reg |= ((tp->pci_bist         & 0xff) << 24);
11163
11164                 pci_write_config_dword(tp->pdev, TG3PCI_CACHELINESZ,
11165                                        cacheline_sz_reg);
11166         }
11167
11168         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
11169             (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
11170                 tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
11171                 if (!tp->pcix_cap) {
11172                         printk(KERN_ERR PFX "Cannot find PCI-X "
11173                                             "capability, aborting.\n");
11174                         return -EIO;
11175                 }
11176         }
11177
11178         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
11179                               &pci_state_reg);
11180
11181         if (tp->pcix_cap && (pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0) {
11182                 tp->tg3_flags |= TG3_FLAG_PCIX_MODE;
11183
11184                 /* If this is a 5700 BX chipset, and we are in PCI-X
11185                  * mode, enable register write workaround.
11186                  *
11187                  * The workaround is to use indirect register accesses
11188                  * for all chip writes not to mailbox registers.
11189                  */
11190                 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX) {
11191                         u32 pm_reg;
11192
11193                         tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
11194
11195                         /* The chip can have it's power management PCI config
11196                          * space registers clobbered due to this bug.
11197                          * So explicitly force the chip into D0 here.
11198                          */
11199                         pci_read_config_dword(tp->pdev,
11200                                               tp->pm_cap + PCI_PM_CTRL,
11201                                               &pm_reg);
11202                         pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
11203                         pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
11204                         pci_write_config_dword(tp->pdev,
11205                                                tp->pm_cap + PCI_PM_CTRL,
11206                                                pm_reg);
11207
11208                         /* Also, force SERR#/PERR# in PCI command. */
11209                         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
11210                         pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
11211                         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
11212                 }
11213         }
11214
11215         /* 5700 BX chips need to have their TX producer index mailboxes
11216          * written twice to workaround a bug.
11217          */
11218         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX)
11219                 tp->tg3_flags |= TG3_FLAG_TXD_MBOX_HWBUG;
11220
11221         if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
11222                 tp->tg3_flags |= TG3_FLAG_PCI_HIGH_SPEED;
11223         if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
11224                 tp->tg3_flags |= TG3_FLAG_PCI_32BIT;
11225
11226         /* Chip-specific fixup from Broadcom driver */
11227         if ((tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) &&
11228             (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
11229                 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
11230                 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
11231         }
11232
11233         /* Default fast path register access methods */
11234         tp->read32 = tg3_read32;
11235         tp->write32 = tg3_write32;
11236         tp->read32_mbox = tg3_read32;
11237         tp->write32_mbox = tg3_write32;
11238         tp->write32_tx_mbox = tg3_write32;
11239         tp->write32_rx_mbox = tg3_write32;
11240
11241         /* Various workaround register access methods */
11242         if (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG)
11243                 tp->write32 = tg3_write_indirect_reg32;
11244         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
11245                  ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
11246                   tp->pci_chip_rev_id == CHIPREV_ID_5750_A0)) {
11247                 /*
11248                  * Back to back register writes can cause problems on these
11249                  * chips, the workaround is to read back all reg writes
11250                  * except those to mailbox regs.
11251                  *
11252                  * See tg3_write_indirect_reg32().
11253                  */
11254                 tp->write32 = tg3_write_flush_reg32;
11255         }
11256
11257
11258         if ((tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG) ||
11259             (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)) {
11260                 tp->write32_tx_mbox = tg3_write32_tx_mbox;
11261                 if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
11262                         tp->write32_rx_mbox = tg3_write_flush_reg32;
11263         }
11264
11265         if (tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND) {
11266                 tp->read32 = tg3_read_indirect_reg32;
11267                 tp->write32 = tg3_write_indirect_reg32;
11268                 tp->read32_mbox = tg3_read_indirect_mbox;
11269                 tp->write32_mbox = tg3_write_indirect_mbox;
11270                 tp->write32_tx_mbox = tg3_write_indirect_mbox;
11271                 tp->write32_rx_mbox = tg3_write_indirect_mbox;
11272
11273                 iounmap(tp->regs);
11274                 tp->regs = NULL;
11275
11276                 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
11277                 pci_cmd &= ~PCI_COMMAND_MEMORY;
11278                 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
11279         }
11280         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
11281                 tp->read32_mbox = tg3_read32_mbox_5906;
11282                 tp->write32_mbox = tg3_write32_mbox_5906;
11283                 tp->write32_tx_mbox = tg3_write32_mbox_5906;
11284                 tp->write32_rx_mbox = tg3_write32_mbox_5906;
11285         }
11286
11287         if (tp->write32 == tg3_write_indirect_reg32 ||
11288             ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
11289              (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
11290               GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)))
11291                 tp->tg3_flags |= TG3_FLAG_SRAM_USE_CONFIG;
11292
11293         /* Get eeprom hw config before calling tg3_set_power_state().
11294          * In particular, the TG3_FLG2_IS_NIC flag must be
11295          * determined before calling tg3_set_power_state() so that
11296          * we know whether or not to switch out of Vaux power.
11297          * When the flag is set, it means that GPIO1 is used for eeprom
11298          * write protect and also implies that it is a LOM where GPIOs
11299          * are not used to switch power.
11300          */
11301         tg3_get_eeprom_hw_cfg(tp);
11302
11303         if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
11304                 /* Allow reads and writes to the
11305                  * APE register and memory space.
11306                  */
11307                 pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR |
11308                                  PCISTATE_ALLOW_APE_SHMEM_WR;
11309                 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE,
11310                                        pci_state_reg);
11311         }
11312
11313         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
11314             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
11315                 tp->tg3_flags |= TG3_FLAG_CPMU_PRESENT;
11316
11317         /* Set up tp->grc_local_ctrl before calling tg3_set_power_state().
11318          * GPIO1 driven high will bring 5700's external PHY out of reset.
11319          * It is also used as eeprom write protect on LOMs.
11320          */
11321         tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
11322         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
11323             (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT))
11324                 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
11325                                        GRC_LCLCTRL_GPIO_OUTPUT1);
11326         /* Unused GPIO3 must be driven as output on 5752 because there
11327          * are no pull-up resistors on unused GPIO pins.
11328          */
11329         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
11330                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
11331
11332         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
11333                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
11334
11335         /* Force the chip into D0. */
11336         err = tg3_set_power_state(tp, PCI_D0);
11337         if (err) {
11338                 printk(KERN_ERR PFX "(%s) transition to D0 failed\n",
11339                        pci_name(tp->pdev));
11340                 return err;
11341         }
11342
11343         /* 5700 B0 chips do not support checksumming correctly due
11344          * to hardware bugs.
11345          */
11346         if (tp->pci_chip_rev_id == CHIPREV_ID_5700_B0)
11347                 tp->tg3_flags |= TG3_FLAG_BROKEN_CHECKSUMS;
11348
11349         /* Derive initial jumbo mode from MTU assigned in
11350          * ether_setup() via the alloc_etherdev() call
11351          */
11352         if (tp->dev->mtu > ETH_DATA_LEN &&
11353             !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
11354                 tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE;
11355
11356         /* Determine WakeOnLan speed to use. */
11357         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
11358             tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
11359             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0 ||
11360             tp->pci_chip_rev_id == CHIPREV_ID_5701_B2) {
11361                 tp->tg3_flags &= ~(TG3_FLAG_WOL_SPEED_100MB);
11362         } else {
11363                 tp->tg3_flags |= TG3_FLAG_WOL_SPEED_100MB;
11364         }
11365
11366         /* A few boards don't want Ethernet@WireSpeed phy feature */
11367         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
11368             ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
11369              (tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) &&
11370              (tp->pci_chip_rev_id != CHIPREV_ID_5705_A1)) ||
11371             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) ||
11372             (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES))
11373                 tp->tg3_flags2 |= TG3_FLG2_NO_ETH_WIRE_SPEED;
11374
11375         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5703_AX ||
11376             GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_AX)
11377                 tp->tg3_flags2 |= TG3_FLG2_PHY_ADC_BUG;
11378         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0)
11379                 tp->tg3_flags2 |= TG3_FLG2_PHY_5704_A0_BUG;
11380
11381         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
11382                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
11383                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
11384                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
11385                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
11386                         if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
11387                             tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
11388                                 tp->tg3_flags2 |= TG3_FLG2_PHY_JITTER_BUG;
11389                         if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
11390                                 tp->tg3_flags2 |= TG3_FLG2_PHY_ADJUST_TRIM;
11391                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906)
11392                         tp->tg3_flags2 |= TG3_FLG2_PHY_BER_BUG;
11393         }
11394
11395         tp->coalesce_mode = 0;
11396         if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX &&
11397             GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX)
11398                 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
11399
11400         /* Initialize MAC MI mode, polling disabled. */
11401         tw32_f(MAC_MI_MODE, tp->mi_mode);
11402         udelay(80);
11403
11404         /* Initialize data/descriptor byte/word swapping. */
11405         val = tr32(GRC_MODE);
11406         val &= GRC_MODE_HOST_STACKUP;
11407         tw32(GRC_MODE, val | tp->grc_mode);
11408
11409         tg3_switch_clocks(tp);
11410
11411         /* Clear this out for sanity. */
11412         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
11413
11414         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
11415                               &pci_state_reg);
11416         if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
11417             (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) == 0) {
11418                 u32 chiprevid = GET_CHIP_REV_ID(tp->misc_host_ctrl);
11419
11420                 if (chiprevid == CHIPREV_ID_5701_A0 ||
11421                     chiprevid == CHIPREV_ID_5701_B0 ||
11422                     chiprevid == CHIPREV_ID_5701_B2 ||
11423                     chiprevid == CHIPREV_ID_5701_B5) {
11424                         void __iomem *sram_base;
11425
11426                         /* Write some dummy words into the SRAM status block
11427                          * area, see if it reads back correctly.  If the return
11428                          * value is bad, force enable the PCIX workaround.
11429                          */
11430                         sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
11431
11432                         writel(0x00000000, sram_base);
11433                         writel(0x00000000, sram_base + 4);
11434                         writel(0xffffffff, sram_base + 4);
11435                         if (readl(sram_base) != 0x00000000)
11436                                 tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
11437                 }
11438         }
11439
11440         udelay(50);
11441         tg3_nvram_init(tp);
11442
11443         grc_misc_cfg = tr32(GRC_MISC_CFG);
11444         grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
11445
11446         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
11447             (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
11448              grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
11449                 tp->tg3_flags2 |= TG3_FLG2_IS_5788;
11450
11451         if (!(tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
11452             (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700))
11453                 tp->tg3_flags |= TG3_FLAG_TAGGED_STATUS;
11454         if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) {
11455                 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
11456                                       HOSTCC_MODE_CLRTICK_TXBD);
11457
11458                 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
11459                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
11460                                        tp->misc_host_ctrl);
11461         }
11462
11463         /* these are limited to 10/100 only */
11464         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
11465              (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
11466             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
11467              tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
11468              (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901 ||
11469               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901_2 ||
11470               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5705F)) ||
11471             (tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
11472              (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5751F ||
11473               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5753F ||
11474               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5787F)) ||
11475             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
11476                 tp->tg3_flags |= TG3_FLAG_10_100_ONLY;
11477
11478         err = tg3_phy_probe(tp);
11479         if (err) {
11480                 printk(KERN_ERR PFX "(%s) phy probe failed, err %d\n",
11481                        pci_name(tp->pdev), err);
11482                 /* ... but do not return immediately ... */
11483         }
11484
11485         tg3_read_partno(tp);
11486         tg3_read_fw_ver(tp);
11487
11488         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
11489                 tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
11490         } else {
11491                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
11492                         tp->tg3_flags |= TG3_FLAG_USE_MI_INTERRUPT;
11493                 else
11494                         tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
11495         }
11496
11497         /* 5700 {AX,BX} chips have a broken status block link
11498          * change bit implementation, so we must use the
11499          * status register in those cases.
11500          */
11501         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
11502                 tp->tg3_flags |= TG3_FLAG_USE_LINKCHG_REG;
11503         else
11504                 tp->tg3_flags &= ~TG3_FLAG_USE_LINKCHG_REG;
11505
11506         /* The led_ctrl is set during tg3_phy_probe, here we might
11507          * have to force the link status polling mechanism based
11508          * upon subsystem IDs.
11509          */
11510         if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
11511             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
11512             !(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
11513                 tp->tg3_flags |= (TG3_FLAG_USE_MI_INTERRUPT |
11514                                   TG3_FLAG_USE_LINKCHG_REG);
11515         }
11516
11517         /* For all SERDES we poll the MAC status register. */
11518         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
11519                 tp->tg3_flags |= TG3_FLAG_POLL_SERDES;
11520         else
11521                 tp->tg3_flags &= ~TG3_FLAG_POLL_SERDES;
11522
11523         /* All chips before 5787 can get confused if TX buffers
11524          * straddle the 4GB address boundary in some cases.
11525          */
11526         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
11527             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
11528             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
11529             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
11530             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
11531                 tp->dev->hard_start_xmit = tg3_start_xmit;
11532         else
11533                 tp->dev->hard_start_xmit = tg3_start_xmit_dma_bug;
11534
11535         tp->rx_offset = 2;
11536         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
11537             (tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0)
11538                 tp->rx_offset = 0;
11539
11540         tp->rx_std_max_post = TG3_RX_RING_SIZE;
11541
11542         /* Increment the rx prod index on the rx std ring by at most
11543          * 8 for these chips to workaround hw errata.
11544          */
11545         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
11546             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
11547             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
11548                 tp->rx_std_max_post = 8;
11549
11550         if (tp->tg3_flags & TG3_FLAG_ASPM_WORKAROUND)
11551                 tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) &
11552                                      PCIE_PWR_MGMT_L1_THRESH_MSK;
11553
11554         return err;
11555 }
11556
11557 #ifdef CONFIG_SPARC
11558 static int __devinit tg3_get_macaddr_sparc(struct tg3 *tp)
11559 {
11560         struct net_device *dev = tp->dev;
11561         struct pci_dev *pdev = tp->pdev;
11562         struct device_node *dp = pci_device_to_OF_node(pdev);
11563         const unsigned char *addr;
11564         int len;
11565
11566         addr = of_get_property(dp, "local-mac-address", &len);
11567         if (addr && len == 6) {
11568                 memcpy(dev->dev_addr, addr, 6);
11569                 memcpy(dev->perm_addr, dev->dev_addr, 6);
11570                 return 0;
11571         }
11572         return -ENODEV;
11573 }
11574
11575 static int __devinit tg3_get_default_macaddr_sparc(struct tg3 *tp)
11576 {
11577         struct net_device *dev = tp->dev;
11578
11579         memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
11580         memcpy(dev->perm_addr, idprom->id_ethaddr, 6);
11581         return 0;
11582 }
11583 #endif
11584
11585 static int __devinit tg3_get_device_address(struct tg3 *tp)
11586 {
11587         struct net_device *dev = tp->dev;
11588         u32 hi, lo, mac_offset;
11589         int addr_ok = 0;
11590
11591 #ifdef CONFIG_SPARC
11592         if (!tg3_get_macaddr_sparc(tp))
11593                 return 0;
11594 #endif
11595
11596         mac_offset = 0x7c;
11597         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
11598             (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
11599                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
11600                         mac_offset = 0xcc;
11601                 if (tg3_nvram_lock(tp))
11602                         tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
11603                 else
11604                         tg3_nvram_unlock(tp);
11605         }
11606         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
11607                 mac_offset = 0x10;
11608
11609         /* First try to get it from MAC address mailbox. */
11610         tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
11611         if ((hi >> 16) == 0x484b) {
11612                 dev->dev_addr[0] = (hi >>  8) & 0xff;
11613                 dev->dev_addr[1] = (hi >>  0) & 0xff;
11614
11615                 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
11616                 dev->dev_addr[2] = (lo >> 24) & 0xff;
11617                 dev->dev_addr[3] = (lo >> 16) & 0xff;
11618                 dev->dev_addr[4] = (lo >>  8) & 0xff;
11619                 dev->dev_addr[5] = (lo >>  0) & 0xff;
11620
11621                 /* Some old bootcode may report a 0 MAC address in SRAM */
11622                 addr_ok = is_valid_ether_addr(&dev->dev_addr[0]);
11623         }
11624         if (!addr_ok) {
11625                 /* Next, try NVRAM. */
11626                 if (!tg3_nvram_read(tp, mac_offset + 0, &hi) &&
11627                     !tg3_nvram_read(tp, mac_offset + 4, &lo)) {
11628                         dev->dev_addr[0] = ((hi >> 16) & 0xff);
11629                         dev->dev_addr[1] = ((hi >> 24) & 0xff);
11630                         dev->dev_addr[2] = ((lo >>  0) & 0xff);
11631                         dev->dev_addr[3] = ((lo >>  8) & 0xff);
11632                         dev->dev_addr[4] = ((lo >> 16) & 0xff);
11633                         dev->dev_addr[5] = ((lo >> 24) & 0xff);
11634                 }
11635                 /* Finally just fetch it out of the MAC control regs. */
11636                 else {
11637                         hi = tr32(MAC_ADDR_0_HIGH);
11638                         lo = tr32(MAC_ADDR_0_LOW);
11639
11640                         dev->dev_addr[5] = lo & 0xff;
11641                         dev->dev_addr[4] = (lo >> 8) & 0xff;
11642                         dev->dev_addr[3] = (lo >> 16) & 0xff;
11643                         dev->dev_addr[2] = (lo >> 24) & 0xff;
11644                         dev->dev_addr[1] = hi & 0xff;
11645                         dev->dev_addr[0] = (hi >> 8) & 0xff;
11646                 }
11647         }
11648
11649         if (!is_valid_ether_addr(&dev->dev_addr[0])) {
11650 #ifdef CONFIG_SPARC64
11651                 if (!tg3_get_default_macaddr_sparc(tp))
11652                         return 0;
11653 #endif
11654                 return -EINVAL;
11655         }
11656         memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
11657         return 0;
11658 }
11659
11660 #define BOUNDARY_SINGLE_CACHELINE       1
11661 #define BOUNDARY_MULTI_CACHELINE        2
11662
11663 static u32 __devinit tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
11664 {
11665         int cacheline_size;
11666         u8 byte;
11667         int goal;
11668
11669         pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
11670         if (byte == 0)
11671                 cacheline_size = 1024;
11672         else
11673                 cacheline_size = (int) byte * 4;
11674
11675         /* On 5703 and later chips, the boundary bits have no
11676          * effect.
11677          */
11678         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
11679             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
11680             !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
11681                 goto out;
11682
11683 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
11684         goal = BOUNDARY_MULTI_CACHELINE;
11685 #else
11686 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
11687         goal = BOUNDARY_SINGLE_CACHELINE;
11688 #else
11689         goal = 0;
11690 #endif
11691 #endif
11692
11693         if (!goal)
11694                 goto out;
11695
11696         /* PCI controllers on most RISC systems tend to disconnect
11697          * when a device tries to burst across a cache-line boundary.
11698          * Therefore, letting tg3 do so just wastes PCI bandwidth.
11699          *
11700          * Unfortunately, for PCI-E there are only limited
11701          * write-side controls for this, and thus for reads
11702          * we will still get the disconnects.  We'll also waste
11703          * these PCI cycles for both read and write for chips
11704          * other than 5700 and 5701 which do not implement the
11705          * boundary bits.
11706          */
11707         if ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
11708             !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
11709                 switch (cacheline_size) {
11710                 case 16:
11711                 case 32:
11712                 case 64:
11713                 case 128:
11714                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
11715                                 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
11716                                         DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
11717                         } else {
11718                                 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
11719                                         DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
11720                         }
11721                         break;
11722
11723                 case 256:
11724                         val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
11725                                 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
11726                         break;
11727
11728                 default:
11729                         val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
11730                                 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
11731                         break;
11732                 };
11733         } else if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
11734                 switch (cacheline_size) {
11735                 case 16:
11736                 case 32:
11737                 case 64:
11738                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
11739                                 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
11740                                 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
11741                                 break;
11742                         }
11743                         /* fallthrough */
11744                 case 128:
11745                 default:
11746                         val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
11747                         val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
11748                         break;
11749                 };
11750         } else {
11751                 switch (cacheline_size) {
11752                 case 16:
11753                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
11754                                 val |= (DMA_RWCTRL_READ_BNDRY_16 |
11755                                         DMA_RWCTRL_WRITE_BNDRY_16);
11756                                 break;
11757                         }
11758                         /* fallthrough */
11759                 case 32:
11760                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
11761                                 val |= (DMA_RWCTRL_READ_BNDRY_32 |
11762                                         DMA_RWCTRL_WRITE_BNDRY_32);
11763                                 break;
11764                         }
11765                         /* fallthrough */
11766                 case 64:
11767                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
11768                                 val |= (DMA_RWCTRL_READ_BNDRY_64 |
11769                                         DMA_RWCTRL_WRITE_BNDRY_64);
11770                                 break;
11771                         }
11772                         /* fallthrough */
11773                 case 128:
11774                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
11775                                 val |= (DMA_RWCTRL_READ_BNDRY_128 |
11776                                         DMA_RWCTRL_WRITE_BNDRY_128);
11777                                 break;
11778                         }
11779                         /* fallthrough */
11780                 case 256:
11781                         val |= (DMA_RWCTRL_READ_BNDRY_256 |
11782                                 DMA_RWCTRL_WRITE_BNDRY_256);
11783                         break;
11784                 case 512:
11785                         val |= (DMA_RWCTRL_READ_BNDRY_512 |
11786                                 DMA_RWCTRL_WRITE_BNDRY_512);
11787                         break;
11788                 case 1024:
11789                 default:
11790                         val |= (DMA_RWCTRL_READ_BNDRY_1024 |
11791                                 DMA_RWCTRL_WRITE_BNDRY_1024);
11792                         break;
11793                 };
11794         }
11795
11796 out:
11797         return val;
11798 }
11799
11800 static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma, int size, int to_device)
11801 {
11802         struct tg3_internal_buffer_desc test_desc;
11803         u32 sram_dma_descs;
11804         int i, ret;
11805
11806         sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
11807
11808         tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
11809         tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
11810         tw32(RDMAC_STATUS, 0);
11811         tw32(WDMAC_STATUS, 0);
11812
11813         tw32(BUFMGR_MODE, 0);
11814         tw32(FTQ_RESET, 0);
11815
11816         test_desc.addr_hi = ((u64) buf_dma) >> 32;
11817         test_desc.addr_lo = buf_dma & 0xffffffff;
11818         test_desc.nic_mbuf = 0x00002100;
11819         test_desc.len = size;
11820
11821         /*
11822          * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
11823          * the *second* time the tg3 driver was getting loaded after an
11824          * initial scan.
11825          *
11826          * Broadcom tells me:
11827          *   ...the DMA engine is connected to the GRC block and a DMA
11828          *   reset may affect the GRC block in some unpredictable way...
11829          *   The behavior of resets to individual blocks has not been tested.
11830          *
11831          * Broadcom noted the GRC reset will also reset all sub-components.
11832          */
11833         if (to_device) {
11834                 test_desc.cqid_sqid = (13 << 8) | 2;
11835
11836                 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
11837                 udelay(40);
11838         } else {
11839                 test_desc.cqid_sqid = (16 << 8) | 7;
11840
11841                 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
11842                 udelay(40);
11843         }
11844         test_desc.flags = 0x00000005;
11845
11846         for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
11847                 u32 val;
11848
11849                 val = *(((u32 *)&test_desc) + i);
11850                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
11851                                        sram_dma_descs + (i * sizeof(u32)));
11852                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
11853         }
11854         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
11855
11856         if (to_device) {
11857                 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
11858         } else {
11859                 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
11860         }
11861
11862         ret = -ENODEV;
11863         for (i = 0; i < 40; i++) {
11864                 u32 val;
11865
11866                 if (to_device)
11867                         val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
11868                 else
11869                         val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
11870                 if ((val & 0xffff) == sram_dma_descs) {
11871                         ret = 0;
11872                         break;
11873                 }
11874
11875                 udelay(100);
11876         }
11877
11878         return ret;
11879 }
11880
11881 #define TEST_BUFFER_SIZE        0x2000
11882
11883 static int __devinit tg3_test_dma(struct tg3 *tp)
11884 {
11885         dma_addr_t buf_dma;
11886         u32 *buf, saved_dma_rwctrl;
11887         int ret;
11888
11889         buf = pci_alloc_consistent(tp->pdev, TEST_BUFFER_SIZE, &buf_dma);
11890         if (!buf) {
11891                 ret = -ENOMEM;
11892                 goto out_nofree;
11893         }
11894
11895         tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
11896                           (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
11897
11898         tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
11899
11900         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
11901                 /* DMA read watermark not used on PCIE */
11902                 tp->dma_rwctrl |= 0x00180000;
11903         } else if (!(tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
11904                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
11905                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
11906                         tp->dma_rwctrl |= 0x003f0000;
11907                 else
11908                         tp->dma_rwctrl |= 0x003f000f;
11909         } else {
11910                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
11911                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
11912                         u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
11913                         u32 read_water = 0x7;
11914
11915                         /* If the 5704 is behind the EPB bridge, we can
11916                          * do the less restrictive ONE_DMA workaround for
11917                          * better performance.
11918                          */
11919                         if ((tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG) &&
11920                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
11921                                 tp->dma_rwctrl |= 0x8000;
11922                         else if (ccval == 0x6 || ccval == 0x7)
11923                                 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
11924
11925                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703)
11926                                 read_water = 4;
11927                         /* Set bit 23 to enable PCIX hw bug fix */
11928                         tp->dma_rwctrl |=
11929                                 (read_water << DMA_RWCTRL_READ_WATER_SHIFT) |
11930                                 (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
11931                                 (1 << 23);
11932                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) {
11933                         /* 5780 always in PCIX mode */
11934                         tp->dma_rwctrl |= 0x00144000;
11935                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
11936                         /* 5714 always in PCIX mode */
11937                         tp->dma_rwctrl |= 0x00148000;
11938                 } else {
11939                         tp->dma_rwctrl |= 0x001b000f;
11940                 }
11941         }
11942
11943         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
11944             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
11945                 tp->dma_rwctrl &= 0xfffffff0;
11946
11947         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
11948             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
11949                 /* Remove this if it causes problems for some boards. */
11950                 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
11951
11952                 /* On 5700/5701 chips, we need to set this bit.
11953                  * Otherwise the chip will issue cacheline transactions
11954                  * to streamable DMA memory with not all the byte
11955                  * enables turned on.  This is an error on several
11956                  * RISC PCI controllers, in particular sparc64.
11957                  *
11958                  * On 5703/5704 chips, this bit has been reassigned
11959                  * a different meaning.  In particular, it is used
11960                  * on those chips to enable a PCI-X workaround.
11961                  */
11962                 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
11963         }
11964
11965         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
11966
11967 #if 0
11968         /* Unneeded, already done by tg3_get_invariants.  */
11969         tg3_switch_clocks(tp);
11970 #endif
11971
11972         ret = 0;
11973         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
11974             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
11975                 goto out;
11976
11977         /* It is best to perform DMA test with maximum write burst size
11978          * to expose the 5700/5701 write DMA bug.
11979          */
11980         saved_dma_rwctrl = tp->dma_rwctrl;
11981         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
11982         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
11983
11984         while (1) {
11985                 u32 *p = buf, i;
11986
11987                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
11988                         p[i] = i;
11989
11990                 /* Send the buffer to the chip. */
11991                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1);
11992                 if (ret) {
11993                         printk(KERN_ERR "tg3_test_dma() Write the buffer failed %d\n", ret);
11994                         break;
11995                 }
11996
11997 #if 0
11998                 /* validate data reached card RAM correctly. */
11999                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
12000                         u32 val;
12001                         tg3_read_mem(tp, 0x2100 + (i*4), &val);
12002                         if (le32_to_cpu(val) != p[i]) {
12003                                 printk(KERN_ERR "  tg3_test_dma()  Card buffer corrupted on write! (%d != %d)\n", val, i);
12004                                 /* ret = -ENODEV here? */
12005                         }
12006                         p[i] = 0;
12007                 }
12008 #endif
12009                 /* Now read it back. */
12010                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0);
12011                 if (ret) {
12012                         printk(KERN_ERR "tg3_test_dma() Read the buffer failed %d\n", ret);
12013
12014                         break;
12015                 }
12016
12017                 /* Verify it. */
12018                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
12019                         if (p[i] == i)
12020                                 continue;
12021
12022                         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
12023                             DMA_RWCTRL_WRITE_BNDRY_16) {
12024                                 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
12025                                 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
12026                                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
12027                                 break;
12028                         } else {
12029                                 printk(KERN_ERR "tg3_test_dma() buffer corrupted on read back! (%d != %d)\n", p[i], i);
12030                                 ret = -ENODEV;
12031                                 goto out;
12032                         }
12033                 }
12034
12035                 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
12036                         /* Success. */
12037                         ret = 0;
12038                         break;
12039                 }
12040         }
12041         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
12042             DMA_RWCTRL_WRITE_BNDRY_16) {
12043                 static struct pci_device_id dma_wait_state_chipsets[] = {
12044                         { PCI_DEVICE(PCI_VENDOR_ID_APPLE,
12045                                      PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
12046                         { },
12047                 };
12048
12049                 /* DMA test passed without adjusting DMA boundary,
12050                  * now look for chipsets that are known to expose the
12051                  * DMA bug without failing the test.
12052                  */
12053                 if (pci_dev_present(dma_wait_state_chipsets)) {
12054                         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
12055                         tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
12056                 }
12057                 else
12058                         /* Safe to use the calculated DMA boundary. */
12059                         tp->dma_rwctrl = saved_dma_rwctrl;
12060
12061                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
12062         }
12063
12064 out:
12065         pci_free_consistent(tp->pdev, TEST_BUFFER_SIZE, buf, buf_dma);
12066 out_nofree:
12067         return ret;
12068 }
12069
12070 static void __devinit tg3_init_link_config(struct tg3 *tp)
12071 {
12072         tp->link_config.advertising =
12073                 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
12074                  ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
12075                  ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full |
12076                  ADVERTISED_Autoneg | ADVERTISED_MII);
12077         tp->link_config.speed = SPEED_INVALID;
12078         tp->link_config.duplex = DUPLEX_INVALID;
12079         tp->link_config.autoneg = AUTONEG_ENABLE;
12080         tp->link_config.active_speed = SPEED_INVALID;
12081         tp->link_config.active_duplex = DUPLEX_INVALID;
12082         tp->link_config.phy_is_low_power = 0;
12083         tp->link_config.orig_speed = SPEED_INVALID;
12084         tp->link_config.orig_duplex = DUPLEX_INVALID;
12085         tp->link_config.orig_autoneg = AUTONEG_INVALID;
12086 }
12087
12088 static void __devinit tg3_init_bufmgr_config(struct tg3 *tp)
12089 {
12090         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
12091                 tp->bufmgr_config.mbuf_read_dma_low_water =
12092                         DEFAULT_MB_RDMA_LOW_WATER_5705;
12093                 tp->bufmgr_config.mbuf_mac_rx_low_water =
12094                         DEFAULT_MB_MACRX_LOW_WATER_5705;
12095                 tp->bufmgr_config.mbuf_high_water =
12096                         DEFAULT_MB_HIGH_WATER_5705;
12097                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
12098                         tp->bufmgr_config.mbuf_mac_rx_low_water =
12099                                 DEFAULT_MB_MACRX_LOW_WATER_5906;
12100                         tp->bufmgr_config.mbuf_high_water =
12101                                 DEFAULT_MB_HIGH_WATER_5906;
12102                 }
12103
12104                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
12105                         DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
12106                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
12107                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
12108                 tp->bufmgr_config.mbuf_high_water_jumbo =
12109                         DEFAULT_MB_HIGH_WATER_JUMBO_5780;
12110         } else {
12111                 tp->bufmgr_config.mbuf_read_dma_low_water =
12112                         DEFAULT_MB_RDMA_LOW_WATER;
12113                 tp->bufmgr_config.mbuf_mac_rx_low_water =
12114                         DEFAULT_MB_MACRX_LOW_WATER;
12115                 tp->bufmgr_config.mbuf_high_water =
12116                         DEFAULT_MB_HIGH_WATER;
12117
12118                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
12119                         DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
12120                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
12121                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
12122                 tp->bufmgr_config.mbuf_high_water_jumbo =
12123                         DEFAULT_MB_HIGH_WATER_JUMBO;
12124         }
12125
12126         tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
12127         tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
12128 }
12129
12130 static char * __devinit tg3_phy_string(struct tg3 *tp)
12131 {
12132         switch (tp->phy_id & PHY_ID_MASK) {
12133         case PHY_ID_BCM5400:    return "5400";
12134         case PHY_ID_BCM5401:    return "5401";
12135         case PHY_ID_BCM5411:    return "5411";
12136         case PHY_ID_BCM5701:    return "5701";
12137         case PHY_ID_BCM5703:    return "5703";
12138         case PHY_ID_BCM5704:    return "5704";
12139         case PHY_ID_BCM5705:    return "5705";
12140         case PHY_ID_BCM5750:    return "5750";
12141         case PHY_ID_BCM5752:    return "5752";
12142         case PHY_ID_BCM5714:    return "5714";
12143         case PHY_ID_BCM5780:    return "5780";
12144         case PHY_ID_BCM5755:    return "5755";
12145         case PHY_ID_BCM5787:    return "5787";
12146         case PHY_ID_BCM5784:    return "5784";
12147         case PHY_ID_BCM5756:    return "5722/5756";
12148         case PHY_ID_BCM5906:    return "5906";
12149         case PHY_ID_BCM5761:    return "5761";
12150         case PHY_ID_BCM8002:    return "8002/serdes";
12151         case 0:                 return "serdes";
12152         default:                return "unknown";
12153         };
12154 }
12155
12156 static char * __devinit tg3_bus_string(struct tg3 *tp, char *str)
12157 {
12158         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
12159                 strcpy(str, "PCI Express");
12160                 return str;
12161         } else if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) {
12162                 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
12163
12164                 strcpy(str, "PCIX:");
12165
12166                 if ((clock_ctrl == 7) ||
12167                     ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
12168                      GRC_MISC_CFG_BOARD_ID_5704CIOBE))
12169                         strcat(str, "133MHz");
12170                 else if (clock_ctrl == 0)
12171                         strcat(str, "33MHz");
12172                 else if (clock_ctrl == 2)
12173                         strcat(str, "50MHz");
12174                 else if (clock_ctrl == 4)
12175                         strcat(str, "66MHz");
12176                 else if (clock_ctrl == 6)
12177                         strcat(str, "100MHz");
12178         } else {
12179                 strcpy(str, "PCI:");
12180                 if (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED)
12181                         strcat(str, "66MHz");
12182                 else
12183                         strcat(str, "33MHz");
12184         }
12185         if (tp->tg3_flags & TG3_FLAG_PCI_32BIT)
12186                 strcat(str, ":32-bit");
12187         else
12188                 strcat(str, ":64-bit");
12189         return str;
12190 }
12191
12192 static struct pci_dev * __devinit tg3_find_peer(struct tg3 *tp)
12193 {
12194         struct pci_dev *peer;
12195         unsigned int func, devnr = tp->pdev->devfn & ~7;
12196
12197         for (func = 0; func < 8; func++) {
12198                 peer = pci_get_slot(tp->pdev->bus, devnr | func);
12199                 if (peer && peer != tp->pdev)
12200                         break;
12201                 pci_dev_put(peer);
12202         }
12203         /* 5704 can be configured in single-port mode, set peer to
12204          * tp->pdev in that case.
12205          */
12206         if (!peer) {
12207                 peer = tp->pdev;
12208                 return peer;
12209         }
12210
12211         /*
12212          * We don't need to keep the refcount elevated; there's no way
12213          * to remove one half of this device without removing the other
12214          */
12215         pci_dev_put(peer);
12216
12217         return peer;
12218 }
12219
12220 static void __devinit tg3_init_coal(struct tg3 *tp)
12221 {
12222         struct ethtool_coalesce *ec = &tp->coal;
12223
12224         memset(ec, 0, sizeof(*ec));
12225         ec->cmd = ETHTOOL_GCOALESCE;
12226         ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
12227         ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
12228         ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
12229         ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
12230         ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
12231         ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
12232         ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
12233         ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
12234         ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
12235
12236         if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
12237                                  HOSTCC_MODE_CLRTICK_TXBD)) {
12238                 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
12239                 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
12240                 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
12241                 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
12242         }
12243
12244         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
12245                 ec->rx_coalesce_usecs_irq = 0;
12246                 ec->tx_coalesce_usecs_irq = 0;
12247                 ec->stats_block_coalesce_usecs = 0;
12248         }
12249 }
12250
12251 static int __devinit tg3_init_one(struct pci_dev *pdev,
12252                                   const struct pci_device_id *ent)
12253 {
12254         static int tg3_version_printed = 0;
12255         unsigned long tg3reg_base, tg3reg_len;
12256         struct net_device *dev;
12257         struct tg3 *tp;
12258         int i, err, pm_cap;
12259         char str[40];
12260         u64 dma_mask, persist_dma_mask;
12261
12262         if (tg3_version_printed++ == 0)
12263                 printk(KERN_INFO "%s", version);
12264
12265         err = pci_enable_device(pdev);
12266         if (err) {
12267                 printk(KERN_ERR PFX "Cannot enable PCI device, "
12268                        "aborting.\n");
12269                 return err;
12270         }
12271
12272         if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
12273                 printk(KERN_ERR PFX "Cannot find proper PCI device "
12274                        "base address, aborting.\n");
12275                 err = -ENODEV;
12276                 goto err_out_disable_pdev;
12277         }
12278
12279         err = pci_request_regions(pdev, DRV_MODULE_NAME);
12280         if (err) {
12281                 printk(KERN_ERR PFX "Cannot obtain PCI resources, "
12282                        "aborting.\n");
12283                 goto err_out_disable_pdev;
12284         }
12285
12286         pci_set_master(pdev);
12287
12288         /* Find power-management capability. */
12289         pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
12290         if (pm_cap == 0) {
12291                 printk(KERN_ERR PFX "Cannot find PowerManagement capability, "
12292                        "aborting.\n");
12293                 err = -EIO;
12294                 goto err_out_free_res;
12295         }
12296
12297         tg3reg_base = pci_resource_start(pdev, 0);
12298         tg3reg_len = pci_resource_len(pdev, 0);
12299
12300         dev = alloc_etherdev(sizeof(*tp));
12301         if (!dev) {
12302                 printk(KERN_ERR PFX "Etherdev alloc failed, aborting.\n");
12303                 err = -ENOMEM;
12304                 goto err_out_free_res;
12305         }
12306
12307         SET_NETDEV_DEV(dev, &pdev->dev);
12308
12309 #if TG3_VLAN_TAG_USED
12310         dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
12311         dev->vlan_rx_register = tg3_vlan_rx_register;
12312 #endif
12313
12314         tp = netdev_priv(dev);
12315         tp->pdev = pdev;
12316         tp->dev = dev;
12317         tp->pm_cap = pm_cap;
12318         tp->mac_mode = TG3_DEF_MAC_MODE;
12319         tp->rx_mode = TG3_DEF_RX_MODE;
12320         tp->tx_mode = TG3_DEF_TX_MODE;
12321         tp->mi_mode = MAC_MI_MODE_BASE;
12322         if (tg3_debug > 0)
12323                 tp->msg_enable = tg3_debug;
12324         else
12325                 tp->msg_enable = TG3_DEF_MSG_ENABLE;
12326
12327         /* The word/byte swap controls here control register access byte
12328          * swapping.  DMA data byte swapping is controlled in the GRC_MODE
12329          * setting below.
12330          */
12331         tp->misc_host_ctrl =
12332                 MISC_HOST_CTRL_MASK_PCI_INT |
12333                 MISC_HOST_CTRL_WORD_SWAP |
12334                 MISC_HOST_CTRL_INDIR_ACCESS |
12335                 MISC_HOST_CTRL_PCISTATE_RW;
12336
12337         /* The NONFRM (non-frame) byte/word swap controls take effect
12338          * on descriptor entries, anything which isn't packet data.
12339          *
12340          * The StrongARM chips on the board (one for tx, one for rx)
12341          * are running in big-endian mode.
12342          */
12343         tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
12344                         GRC_MODE_WSWAP_NONFRM_DATA);
12345 #ifdef __BIG_ENDIAN
12346         tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
12347 #endif
12348         spin_lock_init(&tp->lock);
12349         spin_lock_init(&tp->indirect_lock);
12350         INIT_WORK(&tp->reset_task, tg3_reset_task);
12351
12352         tp->regs = ioremap_nocache(tg3reg_base, tg3reg_len);
12353         if (!tp->regs) {
12354                 printk(KERN_ERR PFX "Cannot map device registers, "
12355                        "aborting.\n");
12356                 err = -ENOMEM;
12357                 goto err_out_free_dev;
12358         }
12359
12360         tg3_init_link_config(tp);
12361
12362         tp->rx_pending = TG3_DEF_RX_RING_PENDING;
12363         tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
12364         tp->tx_pending = TG3_DEF_TX_RING_PENDING;
12365
12366         dev->open = tg3_open;
12367         dev->stop = tg3_close;
12368         dev->get_stats = tg3_get_stats;
12369         dev->set_multicast_list = tg3_set_rx_mode;
12370         dev->set_mac_address = tg3_set_mac_addr;
12371         dev->do_ioctl = tg3_ioctl;
12372         dev->tx_timeout = tg3_tx_timeout;
12373         netif_napi_add(dev, &tp->napi, tg3_poll, 64);
12374         dev->ethtool_ops = &tg3_ethtool_ops;
12375         dev->watchdog_timeo = TG3_TX_TIMEOUT;
12376         dev->change_mtu = tg3_change_mtu;
12377         dev->irq = pdev->irq;
12378 #ifdef CONFIG_NET_POLL_CONTROLLER
12379         dev->poll_controller = tg3_poll_controller;
12380 #endif
12381
12382         err = tg3_get_invariants(tp);
12383         if (err) {
12384                 printk(KERN_ERR PFX "Problem fetching invariants of chip, "
12385                        "aborting.\n");
12386                 goto err_out_iounmap;
12387         }
12388
12389         /* The EPB bridge inside 5714, 5715, and 5780 and any
12390          * device behind the EPB cannot support DMA addresses > 40-bit.
12391          * On 64-bit systems with IOMMU, use 40-bit dma_mask.
12392          * On 64-bit systems without IOMMU, use 64-bit dma_mask and
12393          * do DMA address check in tg3_start_xmit().
12394          */
12395         if (tp->tg3_flags2 & TG3_FLG2_IS_5788)
12396                 persist_dma_mask = dma_mask = DMA_32BIT_MASK;
12397         else if (tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG) {
12398                 persist_dma_mask = dma_mask = DMA_40BIT_MASK;
12399 #ifdef CONFIG_HIGHMEM
12400                 dma_mask = DMA_64BIT_MASK;
12401 #endif
12402         } else
12403                 persist_dma_mask = dma_mask = DMA_64BIT_MASK;
12404
12405         /* Configure DMA attributes. */
12406         if (dma_mask > DMA_32BIT_MASK) {
12407                 err = pci_set_dma_mask(pdev, dma_mask);
12408                 if (!err) {
12409                         dev->features |= NETIF_F_HIGHDMA;
12410                         err = pci_set_consistent_dma_mask(pdev,
12411                                                           persist_dma_mask);
12412                         if (err < 0) {
12413                                 printk(KERN_ERR PFX "Unable to obtain 64 bit "
12414                                        "DMA for consistent allocations\n");
12415                                 goto err_out_iounmap;
12416                         }
12417                 }
12418         }
12419         if (err || dma_mask == DMA_32BIT_MASK) {
12420                 err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
12421                 if (err) {
12422                         printk(KERN_ERR PFX "No usable DMA configuration, "
12423                                "aborting.\n");
12424                         goto err_out_iounmap;
12425                 }
12426         }
12427
12428         tg3_init_bufmgr_config(tp);
12429
12430         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
12431                 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
12432         }
12433         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
12434             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
12435             tp->pci_chip_rev_id == CHIPREV_ID_5705_A0 ||
12436             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 ||
12437             (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0) {
12438                 tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
12439         } else {
12440                 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE | TG3_FLG2_TSO_BUG;
12441         }
12442
12443         /* TSO is on by default on chips that support hardware TSO.
12444          * Firmware TSO on older chips gives lower performance, so it
12445          * is off by default, but can be enabled using ethtool.
12446          */
12447         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
12448                 dev->features |= NETIF_F_TSO;
12449                 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_2) &&
12450                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906))
12451                         dev->features |= NETIF_F_TSO6;
12452                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
12453                         dev->features |= NETIF_F_TSO_ECN;
12454         }
12455
12456
12457         if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 &&
12458             !(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) &&
12459             !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
12460                 tp->tg3_flags2 |= TG3_FLG2_MAX_RXPEND_64;
12461                 tp->rx_pending = 63;
12462         }
12463
12464         err = tg3_get_device_address(tp);
12465         if (err) {
12466                 printk(KERN_ERR PFX "Could not obtain valid ethernet address, "
12467                        "aborting.\n");
12468                 goto err_out_iounmap;
12469         }
12470
12471         /*
12472          * Reset chip in case UNDI or EFI driver did not shutdown
12473          * DMA self test will enable WDMAC and we'll see (spurious)
12474          * pending DMA on the PCI bus at that point.
12475          */
12476         if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
12477             (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
12478                 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
12479                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12480         }
12481
12482         err = tg3_test_dma(tp);
12483         if (err) {
12484                 printk(KERN_ERR PFX "DMA engine test failed, aborting.\n");
12485                 goto err_out_iounmap;
12486         }
12487
12488         /* Tigon3 can do ipv4 only... and some chips have buggy
12489          * checksumming.
12490          */
12491         if ((tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) == 0) {
12492                 dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
12493                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
12494                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
12495                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
12496                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
12497                         dev->features |= NETIF_F_IPV6_CSUM;
12498
12499                 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
12500         } else
12501                 tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
12502
12503         /* flow control autonegotiation is default behavior */
12504         tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
12505
12506         tg3_init_coal(tp);
12507
12508         if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
12509                 if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
12510                         printk(KERN_ERR PFX "Cannot find proper PCI device "
12511                                "base address for APE, aborting.\n");
12512                         err = -ENODEV;
12513                         goto err_out_iounmap;
12514                 }
12515
12516                 tg3reg_base = pci_resource_start(pdev, 2);
12517                 tg3reg_len = pci_resource_len(pdev, 2);
12518
12519                 tp->aperegs = ioremap_nocache(tg3reg_base, tg3reg_len);
12520                 if (tp->aperegs == 0UL) {
12521                         printk(KERN_ERR PFX "Cannot map APE registers, "
12522                                "aborting.\n");
12523                         err = -ENOMEM;
12524                         goto err_out_iounmap;
12525                 }
12526
12527                 tg3_ape_lock_init(tp);
12528         }
12529
12530         pci_set_drvdata(pdev, dev);
12531
12532         err = register_netdev(dev);
12533         if (err) {
12534                 printk(KERN_ERR PFX "Cannot register net device, "
12535                        "aborting.\n");
12536                 goto err_out_apeunmap;
12537         }
12538
12539         printk(KERN_INFO "%s: Tigon3 [partno(%s) rev %04x PHY(%s)] (%s) %s Ethernet ",
12540                dev->name,
12541                tp->board_part_number,
12542                tp->pci_chip_rev_id,
12543                tg3_phy_string(tp),
12544                tg3_bus_string(tp, str),
12545                ((tp->tg3_flags & TG3_FLAG_10_100_ONLY) ? "10/100Base-TX" :
12546                 ((tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) ? "1000Base-SX" :
12547                  "10/100/1000Base-T")));
12548
12549         for (i = 0; i < 6; i++)
12550                 printk("%2.2x%c", dev->dev_addr[i],
12551                        i == 5 ? '\n' : ':');
12552
12553         printk(KERN_INFO "%s: RXcsums[%d] LinkChgREG[%d] "
12554                "MIirq[%d] ASF[%d] WireSpeed[%d] TSOcap[%d]\n",
12555                dev->name,
12556                (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0,
12557                (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) != 0,
12558                (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) != 0,
12559                (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0,
12560                (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED) == 0,
12561                (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) != 0);
12562         printk(KERN_INFO "%s: dma_rwctrl[%08x] dma_mask[%d-bit]\n",
12563                dev->name, tp->dma_rwctrl,
12564                (pdev->dma_mask == DMA_32BIT_MASK) ? 32 :
12565                 (((u64) pdev->dma_mask == DMA_40BIT_MASK) ? 40 : 64));
12566
12567         return 0;
12568
12569 err_out_apeunmap:
12570         if (tp->aperegs) {
12571                 iounmap(tp->aperegs);
12572                 tp->aperegs = NULL;
12573         }
12574
12575 err_out_iounmap:
12576         if (tp->regs) {
12577                 iounmap(tp->regs);
12578                 tp->regs = NULL;
12579         }
12580
12581 err_out_free_dev:
12582         free_netdev(dev);
12583
12584 err_out_free_res:
12585         pci_release_regions(pdev);
12586
12587 err_out_disable_pdev:
12588         pci_disable_device(pdev);
12589         pci_set_drvdata(pdev, NULL);
12590         return err;
12591 }
12592
12593 static void __devexit tg3_remove_one(struct pci_dev *pdev)
12594 {
12595         struct net_device *dev = pci_get_drvdata(pdev);
12596
12597         if (dev) {
12598                 struct tg3 *tp = netdev_priv(dev);
12599
12600                 flush_scheduled_work();
12601                 unregister_netdev(dev);
12602                 if (tp->aperegs) {
12603                         iounmap(tp->aperegs);
12604                         tp->aperegs = NULL;
12605                 }
12606                 if (tp->regs) {
12607                         iounmap(tp->regs);
12608                         tp->regs = NULL;
12609                 }
12610                 free_netdev(dev);
12611                 pci_release_regions(pdev);
12612                 pci_disable_device(pdev);
12613                 pci_set_drvdata(pdev, NULL);
12614         }
12615 }
12616
12617 static int tg3_suspend(struct pci_dev *pdev, pm_message_t state)
12618 {
12619         struct net_device *dev = pci_get_drvdata(pdev);
12620         struct tg3 *tp = netdev_priv(dev);
12621         int err;
12622
12623         /* PCI register 4 needs to be saved whether netif_running() or not.
12624          * MSI address and data need to be saved if using MSI and
12625          * netif_running().
12626          */
12627         pci_save_state(pdev);
12628
12629         if (!netif_running(dev))
12630                 return 0;
12631
12632         flush_scheduled_work();
12633         tg3_netif_stop(tp);
12634
12635         del_timer_sync(&tp->timer);
12636
12637         tg3_full_lock(tp, 1);
12638         tg3_disable_ints(tp);
12639         tg3_full_unlock(tp);
12640
12641         netif_device_detach(dev);
12642
12643         tg3_full_lock(tp, 0);
12644         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12645         tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
12646         tg3_full_unlock(tp);
12647
12648         err = tg3_set_power_state(tp, pci_choose_state(pdev, state));
12649         if (err) {
12650                 tg3_full_lock(tp, 0);
12651
12652                 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
12653                 if (tg3_restart_hw(tp, 1))
12654                         goto out;
12655
12656                 tp->timer.expires = jiffies + tp->timer_offset;
12657                 add_timer(&tp->timer);
12658
12659                 netif_device_attach(dev);
12660                 tg3_netif_start(tp);
12661
12662 out:
12663                 tg3_full_unlock(tp);
12664         }
12665
12666         return err;
12667 }
12668
12669 static int tg3_resume(struct pci_dev *pdev)
12670 {
12671         struct net_device *dev = pci_get_drvdata(pdev);
12672         struct tg3 *tp = netdev_priv(dev);
12673         int err;
12674
12675         pci_restore_state(tp->pdev);
12676
12677         if (!netif_running(dev))
12678                 return 0;
12679
12680         err = tg3_set_power_state(tp, PCI_D0);
12681         if (err)
12682                 return err;
12683
12684         /* Hardware bug - MSI won't work if INTX disabled. */
12685         if ((tp->tg3_flags2 & TG3_FLG2_5780_CLASS) &&
12686             (tp->tg3_flags2 & TG3_FLG2_USING_MSI))
12687                 pci_intx(tp->pdev, 1);
12688
12689         netif_device_attach(dev);
12690
12691         tg3_full_lock(tp, 0);
12692
12693         tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
12694         err = tg3_restart_hw(tp, 1);
12695         if (err)
12696                 goto out;
12697
12698         tp->timer.expires = jiffies + tp->timer_offset;
12699         add_timer(&tp->timer);
12700
12701         tg3_netif_start(tp);
12702
12703 out:
12704         tg3_full_unlock(tp);
12705
12706         return err;
12707 }
12708
12709 static struct pci_driver tg3_driver = {
12710         .name           = DRV_MODULE_NAME,
12711         .id_table       = tg3_pci_tbl,
12712         .probe          = tg3_init_one,
12713         .remove         = __devexit_p(tg3_remove_one),
12714         .suspend        = tg3_suspend,
12715         .resume         = tg3_resume
12716 };
12717
12718 static int __init tg3_init(void)
12719 {
12720         return pci_register_driver(&tg3_driver);
12721 }
12722
12723 static void __exit tg3_cleanup(void)
12724 {
12725         pci_unregister_driver(&tg3_driver);
12726 }
12727
12728 module_init(tg3_init);
12729 module_exit(tg3_cleanup);