Merge branch 'for-linus' of master.kernel.org:/home/rmk/linux-2.6-arm
[linux-2.6] / drivers / net / tg3.c
1 /*
2  * tg3.c: Broadcom Tigon3 ethernet driver.
3  *
4  * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5  * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6  * Copyright (C) 2004 Sun Microsystems Inc.
7  * Copyright (C) 2005-2007 Broadcom Corporation.
8  *
9  * Firmware is:
10  *      Derived from proprietary unpublished source code,
11  *      Copyright (C) 2000-2003 Broadcom Corporation.
12  *
13  *      Permission is hereby granted for the distribution of this firmware
14  *      data in hexadecimal or equivalent format, provided this copyright
15  *      notice is accompanying it.
16  */
17
18
19 #include <linux/module.h>
20 #include <linux/moduleparam.h>
21 #include <linux/kernel.h>
22 #include <linux/types.h>
23 #include <linux/compiler.h>
24 #include <linux/slab.h>
25 #include <linux/delay.h>
26 #include <linux/in.h>
27 #include <linux/init.h>
28 #include <linux/ioport.h>
29 #include <linux/pci.h>
30 #include <linux/netdevice.h>
31 #include <linux/etherdevice.h>
32 #include <linux/skbuff.h>
33 #include <linux/ethtool.h>
34 #include <linux/mii.h>
35 #include <linux/if_vlan.h>
36 #include <linux/ip.h>
37 #include <linux/tcp.h>
38 #include <linux/workqueue.h>
39 #include <linux/prefetch.h>
40 #include <linux/dma-mapping.h>
41
42 #include <net/checksum.h>
43 #include <net/ip.h>
44
45 #include <asm/system.h>
46 #include <asm/io.h>
47 #include <asm/byteorder.h>
48 #include <asm/uaccess.h>
49
50 #ifdef CONFIG_SPARC
51 #include <asm/idprom.h>
52 #include <asm/prom.h>
53 #endif
54
55 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
56 #define TG3_VLAN_TAG_USED 1
57 #else
58 #define TG3_VLAN_TAG_USED 0
59 #endif
60
61 #define TG3_TSO_SUPPORT 1
62
63 #include "tg3.h"
64
65 #define DRV_MODULE_NAME         "tg3"
66 #define PFX DRV_MODULE_NAME     ": "
67 #define DRV_MODULE_VERSION      "3.90"
68 #define DRV_MODULE_RELDATE      "April 12, 2008"
69
70 #define TG3_DEF_MAC_MODE        0
71 #define TG3_DEF_RX_MODE         0
72 #define TG3_DEF_TX_MODE         0
73 #define TG3_DEF_MSG_ENABLE        \
74         (NETIF_MSG_DRV          | \
75          NETIF_MSG_PROBE        | \
76          NETIF_MSG_LINK         | \
77          NETIF_MSG_TIMER        | \
78          NETIF_MSG_IFDOWN       | \
79          NETIF_MSG_IFUP         | \
80          NETIF_MSG_RX_ERR       | \
81          NETIF_MSG_TX_ERR)
82
83 /* length of time before we decide the hardware is borked,
84  * and dev->tx_timeout() should be called to fix the problem
85  */
86 #define TG3_TX_TIMEOUT                  (5 * HZ)
87
88 /* hardware minimum and maximum for a single frame's data payload */
89 #define TG3_MIN_MTU                     60
90 #define TG3_MAX_MTU(tp) \
91         ((tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) ? 9000 : 1500)
92
93 /* These numbers seem to be hard coded in the NIC firmware somehow.
94  * You can't change the ring sizes, but you can change where you place
95  * them in the NIC onboard memory.
96  */
97 #define TG3_RX_RING_SIZE                512
98 #define TG3_DEF_RX_RING_PENDING         200
99 #define TG3_RX_JUMBO_RING_SIZE          256
100 #define TG3_DEF_RX_JUMBO_RING_PENDING   100
101
102 /* Do not place this n-ring entries value into the tp struct itself,
103  * we really want to expose these constants to GCC so that modulo et
104  * al.  operations are done with shifts and masks instead of with
105  * hw multiply/modulo instructions.  Another solution would be to
106  * replace things like '% foo' with '& (foo - 1)'.
107  */
108 #define TG3_RX_RCB_RING_SIZE(tp)        \
109         ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ?  512 : 1024)
110
111 #define TG3_TX_RING_SIZE                512
112 #define TG3_DEF_TX_RING_PENDING         (TG3_TX_RING_SIZE - 1)
113
114 #define TG3_RX_RING_BYTES       (sizeof(struct tg3_rx_buffer_desc) * \
115                                  TG3_RX_RING_SIZE)
116 #define TG3_RX_JUMBO_RING_BYTES (sizeof(struct tg3_rx_buffer_desc) * \
117                                  TG3_RX_JUMBO_RING_SIZE)
118 #define TG3_RX_RCB_RING_BYTES(tp) (sizeof(struct tg3_rx_buffer_desc) * \
119                                    TG3_RX_RCB_RING_SIZE(tp))
120 #define TG3_TX_RING_BYTES       (sizeof(struct tg3_tx_buffer_desc) * \
121                                  TG3_TX_RING_SIZE)
122 #define NEXT_TX(N)              (((N) + 1) & (TG3_TX_RING_SIZE - 1))
123
124 #define RX_PKT_BUF_SZ           (1536 + tp->rx_offset + 64)
125 #define RX_JUMBO_PKT_BUF_SZ     (9046 + tp->rx_offset + 64)
126
127 /* minimum number of free TX descriptors required to wake up TX process */
128 #define TG3_TX_WAKEUP_THRESH(tp)                ((tp)->tx_pending / 4)
129
130 /* number of ETHTOOL_GSTATS u64's */
131 #define TG3_NUM_STATS           (sizeof(struct tg3_ethtool_stats)/sizeof(u64))
132
133 #define TG3_NUM_TEST            6
134
135 static char version[] __devinitdata =
136         DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
137
138 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
139 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
140 MODULE_LICENSE("GPL");
141 MODULE_VERSION(DRV_MODULE_VERSION);
142
143 static int tg3_debug = -1;      /* -1 == use TG3_DEF_MSG_ENABLE as value */
144 module_param(tg3_debug, int, 0);
145 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
146
147 static struct pci_device_id tg3_pci_tbl[] = {
148         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
149         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
150         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
151         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
152         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
153         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
154         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
155         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
156         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
157         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
158         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
159         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
160         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
161         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
162         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
163         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
164         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
165         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
166         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901)},
167         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2)},
168         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
169         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F)},
170         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5720)},
171         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
172         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
173         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750)},
174         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
175         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750M)},
176         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
177         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F)},
178         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
179         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
180         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
181         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
182         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F)},
183         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
184         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
185         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
186         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
187         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
188         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
189         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
190         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
191         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F)},
192         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
193         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
194         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
195         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
196         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
197         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
198         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
199         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
200         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
201         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
202         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
203         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
204         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
205         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
206         {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
207         {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
208         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
209         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
210         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
211         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
212         {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
213         {}
214 };
215
216 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
217
218 static const struct {
219         const char string[ETH_GSTRING_LEN];
220 } ethtool_stats_keys[TG3_NUM_STATS] = {
221         { "rx_octets" },
222         { "rx_fragments" },
223         { "rx_ucast_packets" },
224         { "rx_mcast_packets" },
225         { "rx_bcast_packets" },
226         { "rx_fcs_errors" },
227         { "rx_align_errors" },
228         { "rx_xon_pause_rcvd" },
229         { "rx_xoff_pause_rcvd" },
230         { "rx_mac_ctrl_rcvd" },
231         { "rx_xoff_entered" },
232         { "rx_frame_too_long_errors" },
233         { "rx_jabbers" },
234         { "rx_undersize_packets" },
235         { "rx_in_length_errors" },
236         { "rx_out_length_errors" },
237         { "rx_64_or_less_octet_packets" },
238         { "rx_65_to_127_octet_packets" },
239         { "rx_128_to_255_octet_packets" },
240         { "rx_256_to_511_octet_packets" },
241         { "rx_512_to_1023_octet_packets" },
242         { "rx_1024_to_1522_octet_packets" },
243         { "rx_1523_to_2047_octet_packets" },
244         { "rx_2048_to_4095_octet_packets" },
245         { "rx_4096_to_8191_octet_packets" },
246         { "rx_8192_to_9022_octet_packets" },
247
248         { "tx_octets" },
249         { "tx_collisions" },
250
251         { "tx_xon_sent" },
252         { "tx_xoff_sent" },
253         { "tx_flow_control" },
254         { "tx_mac_errors" },
255         { "tx_single_collisions" },
256         { "tx_mult_collisions" },
257         { "tx_deferred" },
258         { "tx_excessive_collisions" },
259         { "tx_late_collisions" },
260         { "tx_collide_2times" },
261         { "tx_collide_3times" },
262         { "tx_collide_4times" },
263         { "tx_collide_5times" },
264         { "tx_collide_6times" },
265         { "tx_collide_7times" },
266         { "tx_collide_8times" },
267         { "tx_collide_9times" },
268         { "tx_collide_10times" },
269         { "tx_collide_11times" },
270         { "tx_collide_12times" },
271         { "tx_collide_13times" },
272         { "tx_collide_14times" },
273         { "tx_collide_15times" },
274         { "tx_ucast_packets" },
275         { "tx_mcast_packets" },
276         { "tx_bcast_packets" },
277         { "tx_carrier_sense_errors" },
278         { "tx_discards" },
279         { "tx_errors" },
280
281         { "dma_writeq_full" },
282         { "dma_write_prioq_full" },
283         { "rxbds_empty" },
284         { "rx_discards" },
285         { "rx_errors" },
286         { "rx_threshold_hit" },
287
288         { "dma_readq_full" },
289         { "dma_read_prioq_full" },
290         { "tx_comp_queue_full" },
291
292         { "ring_set_send_prod_index" },
293         { "ring_status_update" },
294         { "nic_irqs" },
295         { "nic_avoided_irqs" },
296         { "nic_tx_threshold_hit" }
297 };
298
299 static const struct {
300         const char string[ETH_GSTRING_LEN];
301 } ethtool_test_keys[TG3_NUM_TEST] = {
302         { "nvram test     (online) " },
303         { "link test      (online) " },
304         { "register test  (offline)" },
305         { "memory test    (offline)" },
306         { "loopback test  (offline)" },
307         { "interrupt test (offline)" },
308 };
309
310 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
311 {
312         writel(val, tp->regs + off);
313 }
314
315 static u32 tg3_read32(struct tg3 *tp, u32 off)
316 {
317         return (readl(tp->regs + off));
318 }
319
320 static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
321 {
322         writel(val, tp->aperegs + off);
323 }
324
325 static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
326 {
327         return (readl(tp->aperegs + off));
328 }
329
330 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
331 {
332         unsigned long flags;
333
334         spin_lock_irqsave(&tp->indirect_lock, flags);
335         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
336         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
337         spin_unlock_irqrestore(&tp->indirect_lock, flags);
338 }
339
340 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
341 {
342         writel(val, tp->regs + off);
343         readl(tp->regs + off);
344 }
345
346 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
347 {
348         unsigned long flags;
349         u32 val;
350
351         spin_lock_irqsave(&tp->indirect_lock, flags);
352         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
353         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
354         spin_unlock_irqrestore(&tp->indirect_lock, flags);
355         return val;
356 }
357
358 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
359 {
360         unsigned long flags;
361
362         if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
363                 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
364                                        TG3_64BIT_REG_LOW, val);
365                 return;
366         }
367         if (off == (MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW)) {
368                 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
369                                        TG3_64BIT_REG_LOW, val);
370                 return;
371         }
372
373         spin_lock_irqsave(&tp->indirect_lock, flags);
374         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
375         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
376         spin_unlock_irqrestore(&tp->indirect_lock, flags);
377
378         /* In indirect mode when disabling interrupts, we also need
379          * to clear the interrupt bit in the GRC local ctrl register.
380          */
381         if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
382             (val == 0x1)) {
383                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
384                                        tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
385         }
386 }
387
388 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
389 {
390         unsigned long flags;
391         u32 val;
392
393         spin_lock_irqsave(&tp->indirect_lock, flags);
394         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
395         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
396         spin_unlock_irqrestore(&tp->indirect_lock, flags);
397         return val;
398 }
399
400 /* usec_wait specifies the wait time in usec when writing to certain registers
401  * where it is unsafe to read back the register without some delay.
402  * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
403  * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
404  */
405 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
406 {
407         if ((tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) ||
408             (tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND))
409                 /* Non-posted methods */
410                 tp->write32(tp, off, val);
411         else {
412                 /* Posted method */
413                 tg3_write32(tp, off, val);
414                 if (usec_wait)
415                         udelay(usec_wait);
416                 tp->read32(tp, off);
417         }
418         /* Wait again after the read for the posted method to guarantee that
419          * the wait time is met.
420          */
421         if (usec_wait)
422                 udelay(usec_wait);
423 }
424
425 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
426 {
427         tp->write32_mbox(tp, off, val);
428         if (!(tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) &&
429             !(tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND))
430                 tp->read32_mbox(tp, off);
431 }
432
433 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
434 {
435         void __iomem *mbox = tp->regs + off;
436         writel(val, mbox);
437         if (tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG)
438                 writel(val, mbox);
439         if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
440                 readl(mbox);
441 }
442
443 static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
444 {
445         return (readl(tp->regs + off + GRCMBOX_BASE));
446 }
447
448 static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
449 {
450         writel(val, tp->regs + off + GRCMBOX_BASE);
451 }
452
453 #define tw32_mailbox(reg, val)  tp->write32_mbox(tp, reg, val)
454 #define tw32_mailbox_f(reg, val)        tw32_mailbox_flush(tp, (reg), (val))
455 #define tw32_rx_mbox(reg, val)  tp->write32_rx_mbox(tp, reg, val)
456 #define tw32_tx_mbox(reg, val)  tp->write32_tx_mbox(tp, reg, val)
457 #define tr32_mailbox(reg)       tp->read32_mbox(tp, reg)
458
459 #define tw32(reg,val)           tp->write32(tp, reg, val)
460 #define tw32_f(reg,val)         _tw32_flush(tp,(reg),(val), 0)
461 #define tw32_wait_f(reg,val,us) _tw32_flush(tp,(reg),(val), (us))
462 #define tr32(reg)               tp->read32(tp, reg)
463
464 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
465 {
466         unsigned long flags;
467
468         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) &&
469             (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
470                 return;
471
472         spin_lock_irqsave(&tp->indirect_lock, flags);
473         if (tp->tg3_flags & TG3_FLAG_SRAM_USE_CONFIG) {
474                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
475                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
476
477                 /* Always leave this as zero. */
478                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
479         } else {
480                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
481                 tw32_f(TG3PCI_MEM_WIN_DATA, val);
482
483                 /* Always leave this as zero. */
484                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
485         }
486         spin_unlock_irqrestore(&tp->indirect_lock, flags);
487 }
488
489 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
490 {
491         unsigned long flags;
492
493         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) &&
494             (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
495                 *val = 0;
496                 return;
497         }
498
499         spin_lock_irqsave(&tp->indirect_lock, flags);
500         if (tp->tg3_flags & TG3_FLAG_SRAM_USE_CONFIG) {
501                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
502                 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
503
504                 /* Always leave this as zero. */
505                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
506         } else {
507                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
508                 *val = tr32(TG3PCI_MEM_WIN_DATA);
509
510                 /* Always leave this as zero. */
511                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
512         }
513         spin_unlock_irqrestore(&tp->indirect_lock, flags);
514 }
515
516 static void tg3_ape_lock_init(struct tg3 *tp)
517 {
518         int i;
519
520         /* Make sure the driver hasn't any stale locks. */
521         for (i = 0; i < 8; i++)
522                 tg3_ape_write32(tp, TG3_APE_LOCK_GRANT + 4 * i,
523                                 APE_LOCK_GRANT_DRIVER);
524 }
525
526 static int tg3_ape_lock(struct tg3 *tp, int locknum)
527 {
528         int i, off;
529         int ret = 0;
530         u32 status;
531
532         if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
533                 return 0;
534
535         switch (locknum) {
536                 case TG3_APE_LOCK_MEM:
537                         break;
538                 default:
539                         return -EINVAL;
540         }
541
542         off = 4 * locknum;
543
544         tg3_ape_write32(tp, TG3_APE_LOCK_REQ + off, APE_LOCK_REQ_DRIVER);
545
546         /* Wait for up to 1 millisecond to acquire lock. */
547         for (i = 0; i < 100; i++) {
548                 status = tg3_ape_read32(tp, TG3_APE_LOCK_GRANT + off);
549                 if (status == APE_LOCK_GRANT_DRIVER)
550                         break;
551                 udelay(10);
552         }
553
554         if (status != APE_LOCK_GRANT_DRIVER) {
555                 /* Revoke the lock request. */
556                 tg3_ape_write32(tp, TG3_APE_LOCK_GRANT + off,
557                                 APE_LOCK_GRANT_DRIVER);
558
559                 ret = -EBUSY;
560         }
561
562         return ret;
563 }
564
565 static void tg3_ape_unlock(struct tg3 *tp, int locknum)
566 {
567         int off;
568
569         if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
570                 return;
571
572         switch (locknum) {
573                 case TG3_APE_LOCK_MEM:
574                         break;
575                 default:
576                         return;
577         }
578
579         off = 4 * locknum;
580         tg3_ape_write32(tp, TG3_APE_LOCK_GRANT + off, APE_LOCK_GRANT_DRIVER);
581 }
582
583 static void tg3_disable_ints(struct tg3 *tp)
584 {
585         tw32(TG3PCI_MISC_HOST_CTRL,
586              (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
587         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
588 }
589
590 static inline void tg3_cond_int(struct tg3 *tp)
591 {
592         if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
593             (tp->hw_status->status & SD_STATUS_UPDATED))
594                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
595         else
596                 tw32(HOSTCC_MODE, tp->coalesce_mode |
597                      (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
598 }
599
600 static void tg3_enable_ints(struct tg3 *tp)
601 {
602         tp->irq_sync = 0;
603         wmb();
604
605         tw32(TG3PCI_MISC_HOST_CTRL,
606              (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
607         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
608                        (tp->last_tag << 24));
609         if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI)
610                 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
611                                (tp->last_tag << 24));
612         tg3_cond_int(tp);
613 }
614
615 static inline unsigned int tg3_has_work(struct tg3 *tp)
616 {
617         struct tg3_hw_status *sblk = tp->hw_status;
618         unsigned int work_exists = 0;
619
620         /* check for phy events */
621         if (!(tp->tg3_flags &
622               (TG3_FLAG_USE_LINKCHG_REG |
623                TG3_FLAG_POLL_SERDES))) {
624                 if (sblk->status & SD_STATUS_LINK_CHG)
625                         work_exists = 1;
626         }
627         /* check for RX/TX work to do */
628         if (sblk->idx[0].tx_consumer != tp->tx_cons ||
629             sblk->idx[0].rx_producer != tp->rx_rcb_ptr)
630                 work_exists = 1;
631
632         return work_exists;
633 }
634
635 /* tg3_restart_ints
636  *  similar to tg3_enable_ints, but it accurately determines whether there
637  *  is new work pending and can return without flushing the PIO write
638  *  which reenables interrupts
639  */
640 static void tg3_restart_ints(struct tg3 *tp)
641 {
642         tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
643                      tp->last_tag << 24);
644         mmiowb();
645
646         /* When doing tagged status, this work check is unnecessary.
647          * The last_tag we write above tells the chip which piece of
648          * work we've completed.
649          */
650         if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
651             tg3_has_work(tp))
652                 tw32(HOSTCC_MODE, tp->coalesce_mode |
653                      (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
654 }
655
656 static inline void tg3_netif_stop(struct tg3 *tp)
657 {
658         tp->dev->trans_start = jiffies; /* prevent tx timeout */
659         napi_disable(&tp->napi);
660         netif_tx_disable(tp->dev);
661 }
662
663 static inline void tg3_netif_start(struct tg3 *tp)
664 {
665         netif_wake_queue(tp->dev);
666         /* NOTE: unconditional netif_wake_queue is only appropriate
667          * so long as all callers are assured to have free tx slots
668          * (such as after tg3_init_hw)
669          */
670         napi_enable(&tp->napi);
671         tp->hw_status->status |= SD_STATUS_UPDATED;
672         tg3_enable_ints(tp);
673 }
674
675 static void tg3_switch_clocks(struct tg3 *tp)
676 {
677         u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
678         u32 orig_clock_ctrl;
679
680         if ((tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) ||
681             (tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
682                 return;
683
684         orig_clock_ctrl = clock_ctrl;
685         clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
686                        CLOCK_CTRL_CLKRUN_OENABLE |
687                        0x1f);
688         tp->pci_clock_ctrl = clock_ctrl;
689
690         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
691                 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
692                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
693                                     clock_ctrl | CLOCK_CTRL_625_CORE, 40);
694                 }
695         } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
696                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
697                             clock_ctrl |
698                             (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
699                             40);
700                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
701                             clock_ctrl | (CLOCK_CTRL_ALTCLK),
702                             40);
703         }
704         tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
705 }
706
707 #define PHY_BUSY_LOOPS  5000
708
709 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
710 {
711         u32 frame_val;
712         unsigned int loops;
713         int ret;
714
715         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
716                 tw32_f(MAC_MI_MODE,
717                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
718                 udelay(80);
719         }
720
721         *val = 0x0;
722
723         frame_val  = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
724                       MI_COM_PHY_ADDR_MASK);
725         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
726                       MI_COM_REG_ADDR_MASK);
727         frame_val |= (MI_COM_CMD_READ | MI_COM_START);
728
729         tw32_f(MAC_MI_COM, frame_val);
730
731         loops = PHY_BUSY_LOOPS;
732         while (loops != 0) {
733                 udelay(10);
734                 frame_val = tr32(MAC_MI_COM);
735
736                 if ((frame_val & MI_COM_BUSY) == 0) {
737                         udelay(5);
738                         frame_val = tr32(MAC_MI_COM);
739                         break;
740                 }
741                 loops -= 1;
742         }
743
744         ret = -EBUSY;
745         if (loops != 0) {
746                 *val = frame_val & MI_COM_DATA_MASK;
747                 ret = 0;
748         }
749
750         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
751                 tw32_f(MAC_MI_MODE, tp->mi_mode);
752                 udelay(80);
753         }
754
755         return ret;
756 }
757
758 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
759 {
760         u32 frame_val;
761         unsigned int loops;
762         int ret;
763
764         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 &&
765             (reg == MII_TG3_CTRL || reg == MII_TG3_AUX_CTRL))
766                 return 0;
767
768         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
769                 tw32_f(MAC_MI_MODE,
770                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
771                 udelay(80);
772         }
773
774         frame_val  = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
775                       MI_COM_PHY_ADDR_MASK);
776         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
777                       MI_COM_REG_ADDR_MASK);
778         frame_val |= (val & MI_COM_DATA_MASK);
779         frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
780
781         tw32_f(MAC_MI_COM, frame_val);
782
783         loops = PHY_BUSY_LOOPS;
784         while (loops != 0) {
785                 udelay(10);
786                 frame_val = tr32(MAC_MI_COM);
787                 if ((frame_val & MI_COM_BUSY) == 0) {
788                         udelay(5);
789                         frame_val = tr32(MAC_MI_COM);
790                         break;
791                 }
792                 loops -= 1;
793         }
794
795         ret = -EBUSY;
796         if (loops != 0)
797                 ret = 0;
798
799         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
800                 tw32_f(MAC_MI_MODE, tp->mi_mode);
801                 udelay(80);
802         }
803
804         return ret;
805 }
806
807 static void tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
808 {
809         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
810         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
811 }
812
813 static void tg3_phy_toggle_automdix(struct tg3 *tp, int enable)
814 {
815         u32 phy;
816
817         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
818             (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES))
819                 return;
820
821         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
822                 u32 ephy;
823
824                 if (!tg3_readphy(tp, MII_TG3_EPHY_TEST, &ephy)) {
825                         tg3_writephy(tp, MII_TG3_EPHY_TEST,
826                                      ephy | MII_TG3_EPHY_SHADOW_EN);
827                         if (!tg3_readphy(tp, MII_TG3_EPHYTST_MISCCTRL, &phy)) {
828                                 if (enable)
829                                         phy |= MII_TG3_EPHYTST_MISCCTRL_MDIX;
830                                 else
831                                         phy &= ~MII_TG3_EPHYTST_MISCCTRL_MDIX;
832                                 tg3_writephy(tp, MII_TG3_EPHYTST_MISCCTRL, phy);
833                         }
834                         tg3_writephy(tp, MII_TG3_EPHY_TEST, ephy);
835                 }
836         } else {
837                 phy = MII_TG3_AUXCTL_MISC_RDSEL_MISC |
838                       MII_TG3_AUXCTL_SHDWSEL_MISC;
839                 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, phy) &&
840                     !tg3_readphy(tp, MII_TG3_AUX_CTRL, &phy)) {
841                         if (enable)
842                                 phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
843                         else
844                                 phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
845                         phy |= MII_TG3_AUXCTL_MISC_WREN;
846                         tg3_writephy(tp, MII_TG3_AUX_CTRL, phy);
847                 }
848         }
849 }
850
851 static void tg3_phy_set_wirespeed(struct tg3 *tp)
852 {
853         u32 val;
854
855         if (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED)
856                 return;
857
858         if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x7007) &&
859             !tg3_readphy(tp, MII_TG3_AUX_CTRL, &val))
860                 tg3_writephy(tp, MII_TG3_AUX_CTRL,
861                              (val | (1 << 15) | (1 << 4)));
862 }
863
864 static int tg3_bmcr_reset(struct tg3 *tp)
865 {
866         u32 phy_control;
867         int limit, err;
868
869         /* OK, reset it, and poll the BMCR_RESET bit until it
870          * clears or we time out.
871          */
872         phy_control = BMCR_RESET;
873         err = tg3_writephy(tp, MII_BMCR, phy_control);
874         if (err != 0)
875                 return -EBUSY;
876
877         limit = 5000;
878         while (limit--) {
879                 err = tg3_readphy(tp, MII_BMCR, &phy_control);
880                 if (err != 0)
881                         return -EBUSY;
882
883                 if ((phy_control & BMCR_RESET) == 0) {
884                         udelay(40);
885                         break;
886                 }
887                 udelay(10);
888         }
889         if (limit <= 0)
890                 return -EBUSY;
891
892         return 0;
893 }
894
895 static void tg3_phy_apply_otp(struct tg3 *tp)
896 {
897         u32 otp, phy;
898
899         if (!tp->phy_otp)
900                 return;
901
902         otp = tp->phy_otp;
903
904         /* Enable SM_DSP clock and tx 6dB coding. */
905         phy = MII_TG3_AUXCTL_SHDWSEL_AUXCTL |
906               MII_TG3_AUXCTL_ACTL_SMDSP_ENA |
907               MII_TG3_AUXCTL_ACTL_TX_6DB;
908         tg3_writephy(tp, MII_TG3_AUX_CTRL, phy);
909
910         phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
911         phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT;
912         tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy);
913
914         phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) |
915               ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT);
916         tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy);
917
918         phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT);
919         phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ;
920         tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy);
921
922         phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT);
923         tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy);
924
925         phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT);
926         tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy);
927
928         phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) |
929               ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
930         tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);
931
932         /* Turn off SM_DSP clock. */
933         phy = MII_TG3_AUXCTL_SHDWSEL_AUXCTL |
934               MII_TG3_AUXCTL_ACTL_TX_6DB;
935         tg3_writephy(tp, MII_TG3_AUX_CTRL, phy);
936 }
937
938 static int tg3_wait_macro_done(struct tg3 *tp)
939 {
940         int limit = 100;
941
942         while (limit--) {
943                 u32 tmp32;
944
945                 if (!tg3_readphy(tp, 0x16, &tmp32)) {
946                         if ((tmp32 & 0x1000) == 0)
947                                 break;
948                 }
949         }
950         if (limit <= 0)
951                 return -EBUSY;
952
953         return 0;
954 }
955
956 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
957 {
958         static const u32 test_pat[4][6] = {
959         { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
960         { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
961         { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
962         { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
963         };
964         int chan;
965
966         for (chan = 0; chan < 4; chan++) {
967                 int i;
968
969                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
970                              (chan * 0x2000) | 0x0200);
971                 tg3_writephy(tp, 0x16, 0x0002);
972
973                 for (i = 0; i < 6; i++)
974                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
975                                      test_pat[chan][i]);
976
977                 tg3_writephy(tp, 0x16, 0x0202);
978                 if (tg3_wait_macro_done(tp)) {
979                         *resetp = 1;
980                         return -EBUSY;
981                 }
982
983                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
984                              (chan * 0x2000) | 0x0200);
985                 tg3_writephy(tp, 0x16, 0x0082);
986                 if (tg3_wait_macro_done(tp)) {
987                         *resetp = 1;
988                         return -EBUSY;
989                 }
990
991                 tg3_writephy(tp, 0x16, 0x0802);
992                 if (tg3_wait_macro_done(tp)) {
993                         *resetp = 1;
994                         return -EBUSY;
995                 }
996
997                 for (i = 0; i < 6; i += 2) {
998                         u32 low, high;
999
1000                         if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
1001                             tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
1002                             tg3_wait_macro_done(tp)) {
1003                                 *resetp = 1;
1004                                 return -EBUSY;
1005                         }
1006                         low &= 0x7fff;
1007                         high &= 0x000f;
1008                         if (low != test_pat[chan][i] ||
1009                             high != test_pat[chan][i+1]) {
1010                                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
1011                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
1012                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
1013
1014                                 return -EBUSY;
1015                         }
1016                 }
1017         }
1018
1019         return 0;
1020 }
1021
1022 static int tg3_phy_reset_chanpat(struct tg3 *tp)
1023 {
1024         int chan;
1025
1026         for (chan = 0; chan < 4; chan++) {
1027                 int i;
1028
1029                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1030                              (chan * 0x2000) | 0x0200);
1031                 tg3_writephy(tp, 0x16, 0x0002);
1032                 for (i = 0; i < 6; i++)
1033                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
1034                 tg3_writephy(tp, 0x16, 0x0202);
1035                 if (tg3_wait_macro_done(tp))
1036                         return -EBUSY;
1037         }
1038
1039         return 0;
1040 }
1041
1042 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
1043 {
1044         u32 reg32, phy9_orig;
1045         int retries, do_phy_reset, err;
1046
1047         retries = 10;
1048         do_phy_reset = 1;
1049         do {
1050                 if (do_phy_reset) {
1051                         err = tg3_bmcr_reset(tp);
1052                         if (err)
1053                                 return err;
1054                         do_phy_reset = 0;
1055                 }
1056
1057                 /* Disable transmitter and interrupt.  */
1058                 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
1059                         continue;
1060
1061                 reg32 |= 0x3000;
1062                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
1063
1064                 /* Set full-duplex, 1000 mbps.  */
1065                 tg3_writephy(tp, MII_BMCR,
1066                              BMCR_FULLDPLX | TG3_BMCR_SPEED1000);
1067
1068                 /* Set to master mode.  */
1069                 if (tg3_readphy(tp, MII_TG3_CTRL, &phy9_orig))
1070                         continue;
1071
1072                 tg3_writephy(tp, MII_TG3_CTRL,
1073                              (MII_TG3_CTRL_AS_MASTER |
1074                               MII_TG3_CTRL_ENABLE_AS_MASTER));
1075
1076                 /* Enable SM_DSP_CLOCK and 6dB.  */
1077                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1078
1079                 /* Block the PHY control access.  */
1080                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
1081                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0800);
1082
1083                 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
1084                 if (!err)
1085                         break;
1086         } while (--retries);
1087
1088         err = tg3_phy_reset_chanpat(tp);
1089         if (err)
1090                 return err;
1091
1092         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
1093         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0000);
1094
1095         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
1096         tg3_writephy(tp, 0x16, 0x0000);
1097
1098         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
1099             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
1100                 /* Set Extended packet length bit for jumbo frames */
1101                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4400);
1102         }
1103         else {
1104                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1105         }
1106
1107         tg3_writephy(tp, MII_TG3_CTRL, phy9_orig);
1108
1109         if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32)) {
1110                 reg32 &= ~0x3000;
1111                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
1112         } else if (!err)
1113                 err = -EBUSY;
1114
1115         return err;
1116 }
1117
1118 static void tg3_link_report(struct tg3 *);
1119
1120 /* This will reset the tigon3 PHY if there is no valid
1121  * link unless the FORCE argument is non-zero.
1122  */
1123 static int tg3_phy_reset(struct tg3 *tp)
1124 {
1125         u32 cpmuctrl;
1126         u32 phy_status;
1127         int err;
1128
1129         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1130                 u32 val;
1131
1132                 val = tr32(GRC_MISC_CFG);
1133                 tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
1134                 udelay(40);
1135         }
1136         err  = tg3_readphy(tp, MII_BMSR, &phy_status);
1137         err |= tg3_readphy(tp, MII_BMSR, &phy_status);
1138         if (err != 0)
1139                 return -EBUSY;
1140
1141         if (netif_running(tp->dev) && netif_carrier_ok(tp->dev)) {
1142                 netif_carrier_off(tp->dev);
1143                 tg3_link_report(tp);
1144         }
1145
1146         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
1147             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
1148             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
1149                 err = tg3_phy_reset_5703_4_5(tp);
1150                 if (err)
1151                         return err;
1152                 goto out;
1153         }
1154
1155         cpmuctrl = 0;
1156         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
1157             GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
1158                 cpmuctrl = tr32(TG3_CPMU_CTRL);
1159                 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY)
1160                         tw32(TG3_CPMU_CTRL,
1161                              cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY);
1162         }
1163
1164         err = tg3_bmcr_reset(tp);
1165         if (err)
1166                 return err;
1167
1168         if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) {
1169                 u32 phy;
1170
1171                 phy = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz;
1172                 tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, phy);
1173
1174                 tw32(TG3_CPMU_CTRL, cpmuctrl);
1175         }
1176
1177         if (tp->tg3_flags3 & TG3_FLG3_5761_5784_AX_FIXES) {
1178                 u32 val;
1179
1180                 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
1181                 if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
1182                     CPMU_LSPD_1000MB_MACCLK_12_5) {
1183                         val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
1184                         udelay(40);
1185                         tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
1186                 }
1187
1188                 /* Disable GPHY autopowerdown. */
1189                 tg3_writephy(tp, MII_TG3_MISC_SHDW,
1190                              MII_TG3_MISC_SHDW_WREN |
1191                              MII_TG3_MISC_SHDW_APD_SEL |
1192                              MII_TG3_MISC_SHDW_APD_WKTM_84MS);
1193         }
1194
1195         tg3_phy_apply_otp(tp);
1196
1197 out:
1198         if (tp->tg3_flags2 & TG3_FLG2_PHY_ADC_BUG) {
1199                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1200                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1201                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x2aaa);
1202                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1203                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0323);
1204                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1205         }
1206         if (tp->tg3_flags2 & TG3_FLG2_PHY_5704_A0_BUG) {
1207                 tg3_writephy(tp, 0x1c, 0x8d68);
1208                 tg3_writephy(tp, 0x1c, 0x8d68);
1209         }
1210         if (tp->tg3_flags2 & TG3_FLG2_PHY_BER_BUG) {
1211                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1212                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1213                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x310b);
1214                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1215                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x9506);
1216                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x401f);
1217                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x14e2);
1218                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1219         }
1220         else if (tp->tg3_flags2 & TG3_FLG2_PHY_JITTER_BUG) {
1221                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1222                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1223                 if (tp->tg3_flags2 & TG3_FLG2_PHY_ADJUST_TRIM) {
1224                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
1225                         tg3_writephy(tp, MII_TG3_TEST1,
1226                                      MII_TG3_TEST1_TRIM_EN | 0x4);
1227                 } else
1228                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
1229                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1230         }
1231         /* Set Extended packet length bit (bit 14) on all chips that */
1232         /* support jumbo frames */
1233         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
1234                 /* Cannot do read-modify-write on 5401 */
1235                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
1236         } else if (tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) {
1237                 u32 phy_reg;
1238
1239                 /* Set bit 14 with read-modify-write to preserve other bits */
1240                 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0007) &&
1241                     !tg3_readphy(tp, MII_TG3_AUX_CTRL, &phy_reg))
1242                         tg3_writephy(tp, MII_TG3_AUX_CTRL, phy_reg | 0x4000);
1243         }
1244
1245         /* Set phy register 0x10 bit 0 to high fifo elasticity to support
1246          * jumbo frames transmission.
1247          */
1248         if (tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) {
1249                 u32 phy_reg;
1250
1251                 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &phy_reg))
1252                     tg3_writephy(tp, MII_TG3_EXT_CTRL,
1253                                  phy_reg | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
1254         }
1255
1256         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1257                 /* adjust output voltage */
1258                 tg3_writephy(tp, MII_TG3_EPHY_PTEST, 0x12);
1259         }
1260
1261         tg3_phy_toggle_automdix(tp, 1);
1262         tg3_phy_set_wirespeed(tp);
1263         return 0;
1264 }
1265
1266 static void tg3_frob_aux_power(struct tg3 *tp)
1267 {
1268         struct tg3 *tp_peer = tp;
1269
1270         if ((tp->tg3_flags2 & TG3_FLG2_IS_NIC) == 0)
1271                 return;
1272
1273         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
1274             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)) {
1275                 struct net_device *dev_peer;
1276
1277                 dev_peer = pci_get_drvdata(tp->pdev_peer);
1278                 /* remove_one() may have been run on the peer. */
1279                 if (!dev_peer)
1280                         tp_peer = tp;
1281                 else
1282                         tp_peer = netdev_priv(dev_peer);
1283         }
1284
1285         if ((tp->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
1286             (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0 ||
1287             (tp_peer->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
1288             (tp_peer->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0) {
1289                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1290                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1291                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1292                                     (GRC_LCLCTRL_GPIO_OE0 |
1293                                      GRC_LCLCTRL_GPIO_OE1 |
1294                                      GRC_LCLCTRL_GPIO_OE2 |
1295                                      GRC_LCLCTRL_GPIO_OUTPUT0 |
1296                                      GRC_LCLCTRL_GPIO_OUTPUT1),
1297                                     100);
1298                 } else {
1299                         u32 no_gpio2;
1300                         u32 grc_local_ctrl = 0;
1301
1302                         if (tp_peer != tp &&
1303                             (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
1304                                 return;
1305
1306                         /* Workaround to prevent overdrawing Amps. */
1307                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
1308                             ASIC_REV_5714) {
1309                                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
1310                                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1311                                             grc_local_ctrl, 100);
1312                         }
1313
1314                         /* On 5753 and variants, GPIO2 cannot be used. */
1315                         no_gpio2 = tp->nic_sram_data_cfg &
1316                                     NIC_SRAM_DATA_CFG_NO_GPIO2;
1317
1318                         grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
1319                                          GRC_LCLCTRL_GPIO_OE1 |
1320                                          GRC_LCLCTRL_GPIO_OE2 |
1321                                          GRC_LCLCTRL_GPIO_OUTPUT1 |
1322                                          GRC_LCLCTRL_GPIO_OUTPUT2;
1323                         if (no_gpio2) {
1324                                 grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
1325                                                     GRC_LCLCTRL_GPIO_OUTPUT2);
1326                         }
1327                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1328                                                     grc_local_ctrl, 100);
1329
1330                         grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
1331
1332                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1333                                                     grc_local_ctrl, 100);
1334
1335                         if (!no_gpio2) {
1336                                 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
1337                                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1338                                             grc_local_ctrl, 100);
1339                         }
1340                 }
1341         } else {
1342                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
1343                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
1344                         if (tp_peer != tp &&
1345                             (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
1346                                 return;
1347
1348                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1349                                     (GRC_LCLCTRL_GPIO_OE1 |
1350                                      GRC_LCLCTRL_GPIO_OUTPUT1), 100);
1351
1352                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1353                                     GRC_LCLCTRL_GPIO_OE1, 100);
1354
1355                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1356                                     (GRC_LCLCTRL_GPIO_OE1 |
1357                                      GRC_LCLCTRL_GPIO_OUTPUT1), 100);
1358                 }
1359         }
1360 }
1361
1362 static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
1363 {
1364         if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
1365                 return 1;
1366         else if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411) {
1367                 if (speed != SPEED_10)
1368                         return 1;
1369         } else if (speed == SPEED_10)
1370                 return 1;
1371
1372         return 0;
1373 }
1374
1375 static int tg3_setup_phy(struct tg3 *, int);
1376
1377 #define RESET_KIND_SHUTDOWN     0
1378 #define RESET_KIND_INIT         1
1379 #define RESET_KIND_SUSPEND      2
1380
1381 static void tg3_write_sig_post_reset(struct tg3 *, int);
1382 static int tg3_halt_cpu(struct tg3 *, u32);
1383 static int tg3_nvram_lock(struct tg3 *);
1384 static void tg3_nvram_unlock(struct tg3 *);
1385
1386 static void tg3_power_down_phy(struct tg3 *tp)
1387 {
1388         u32 val;
1389
1390         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
1391                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
1392                         u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
1393                         u32 serdes_cfg = tr32(MAC_SERDES_CFG);
1394
1395                         sg_dig_ctrl |=
1396                                 SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
1397                         tw32(SG_DIG_CTRL, sg_dig_ctrl);
1398                         tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
1399                 }
1400                 return;
1401         }
1402
1403         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1404                 tg3_bmcr_reset(tp);
1405                 val = tr32(GRC_MISC_CFG);
1406                 tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
1407                 udelay(40);
1408                 return;
1409         } else {
1410                 tg3_writephy(tp, MII_TG3_EXT_CTRL,
1411                              MII_TG3_EXT_CTRL_FORCE_LED_OFF);
1412                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x01b2);
1413         }
1414
1415         /* The PHY should not be powered down on some chips because
1416          * of bugs.
1417          */
1418         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1419             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
1420             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 &&
1421              (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)))
1422                 return;
1423
1424         if (tp->tg3_flags3 & TG3_FLG3_5761_5784_AX_FIXES) {
1425                 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
1426                 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
1427                 val |= CPMU_LSPD_1000MB_MACCLK_12_5;
1428                 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
1429         }
1430
1431         tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
1432 }
1433
1434 static int tg3_set_power_state(struct tg3 *tp, pci_power_t state)
1435 {
1436         u32 misc_host_ctrl;
1437         u16 power_control, power_caps;
1438         int pm = tp->pm_cap;
1439
1440         /* Make sure register accesses (indirect or otherwise)
1441          * will function correctly.
1442          */
1443         pci_write_config_dword(tp->pdev,
1444                                TG3PCI_MISC_HOST_CTRL,
1445                                tp->misc_host_ctrl);
1446
1447         pci_read_config_word(tp->pdev,
1448                              pm + PCI_PM_CTRL,
1449                              &power_control);
1450         power_control |= PCI_PM_CTRL_PME_STATUS;
1451         power_control &= ~(PCI_PM_CTRL_STATE_MASK);
1452         switch (state) {
1453         case PCI_D0:
1454                 power_control |= 0;
1455                 pci_write_config_word(tp->pdev,
1456                                       pm + PCI_PM_CTRL,
1457                                       power_control);
1458                 udelay(100);    /* Delay after power state change */
1459
1460                 /* Switch out of Vaux if it is a NIC */
1461                 if (tp->tg3_flags2 & TG3_FLG2_IS_NIC)
1462                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl, 100);
1463
1464                 return 0;
1465
1466         case PCI_D1:
1467                 power_control |= 1;
1468                 break;
1469
1470         case PCI_D2:
1471                 power_control |= 2;
1472                 break;
1473
1474         case PCI_D3hot:
1475                 power_control |= 3;
1476                 break;
1477
1478         default:
1479                 printk(KERN_WARNING PFX "%s: Invalid power state (%d) "
1480                        "requested.\n",
1481                        tp->dev->name, state);
1482                 return -EINVAL;
1483         };
1484
1485         power_control |= PCI_PM_CTRL_PME_ENABLE;
1486
1487         misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
1488         tw32(TG3PCI_MISC_HOST_CTRL,
1489              misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
1490
1491         if (tp->link_config.phy_is_low_power == 0) {
1492                 tp->link_config.phy_is_low_power = 1;
1493                 tp->link_config.orig_speed = tp->link_config.speed;
1494                 tp->link_config.orig_duplex = tp->link_config.duplex;
1495                 tp->link_config.orig_autoneg = tp->link_config.autoneg;
1496         }
1497
1498         if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)) {
1499                 tp->link_config.speed = SPEED_10;
1500                 tp->link_config.duplex = DUPLEX_HALF;
1501                 tp->link_config.autoneg = AUTONEG_ENABLE;
1502                 tg3_setup_phy(tp, 0);
1503         }
1504
1505         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1506                 u32 val;
1507
1508                 val = tr32(GRC_VCPU_EXT_CTRL);
1509                 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
1510         } else if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
1511                 int i;
1512                 u32 val;
1513
1514                 for (i = 0; i < 200; i++) {
1515                         tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
1516                         if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
1517                                 break;
1518                         msleep(1);
1519                 }
1520         }
1521         if (tp->tg3_flags & TG3_FLAG_WOL_CAP)
1522                 tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
1523                                                      WOL_DRV_STATE_SHUTDOWN |
1524                                                      WOL_DRV_WOL |
1525                                                      WOL_SET_MAGIC_PKT);
1526
1527         pci_read_config_word(tp->pdev, pm + PCI_PM_PMC, &power_caps);
1528
1529         if (tp->tg3_flags & TG3_FLAG_WOL_ENABLE) {
1530                 u32 mac_mode;
1531
1532                 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
1533                         tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x5a);
1534                         udelay(40);
1535
1536                         if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
1537                                 mac_mode = MAC_MODE_PORT_MODE_GMII;
1538                         else
1539                                 mac_mode = MAC_MODE_PORT_MODE_MII;
1540
1541                         mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
1542                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
1543                             ASIC_REV_5700) {
1544                                 u32 speed = (tp->tg3_flags &
1545                                              TG3_FLAG_WOL_SPEED_100MB) ?
1546                                              SPEED_100 : SPEED_10;
1547                                 if (tg3_5700_link_polarity(tp, speed))
1548                                         mac_mode |= MAC_MODE_LINK_POLARITY;
1549                                 else
1550                                         mac_mode &= ~MAC_MODE_LINK_POLARITY;
1551                         }
1552                 } else {
1553                         mac_mode = MAC_MODE_PORT_MODE_TBI;
1554                 }
1555
1556                 if (!(tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
1557                         tw32(MAC_LED_CTRL, tp->led_ctrl);
1558
1559                 if (((power_caps & PCI_PM_CAP_PME_D3cold) &&
1560                      (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)))
1561                         mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
1562
1563                 tw32_f(MAC_MODE, mac_mode);
1564                 udelay(100);
1565
1566                 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
1567                 udelay(10);
1568         }
1569
1570         if (!(tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB) &&
1571             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1572              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
1573                 u32 base_val;
1574
1575                 base_val = tp->pci_clock_ctrl;
1576                 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
1577                              CLOCK_CTRL_TXCLK_DISABLE);
1578
1579                 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
1580                             CLOCK_CTRL_PWRDOWN_PLL133, 40);
1581         } else if ((tp->tg3_flags2 & TG3_FLG2_5780_CLASS) ||
1582                    (tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) ||
1583                    (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)) {
1584                 /* do nothing */
1585         } else if (!((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
1586                      (tp->tg3_flags & TG3_FLAG_ENABLE_ASF))) {
1587                 u32 newbits1, newbits2;
1588
1589                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1590                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1591                         newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
1592                                     CLOCK_CTRL_TXCLK_DISABLE |
1593                                     CLOCK_CTRL_ALTCLK);
1594                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
1595                 } else if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
1596                         newbits1 = CLOCK_CTRL_625_CORE;
1597                         newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
1598                 } else {
1599                         newbits1 = CLOCK_CTRL_ALTCLK;
1600                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
1601                 }
1602
1603                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
1604                             40);
1605
1606                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
1607                             40);
1608
1609                 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
1610                         u32 newbits3;
1611
1612                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1613                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1614                                 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
1615                                             CLOCK_CTRL_TXCLK_DISABLE |
1616                                             CLOCK_CTRL_44MHZ_CORE);
1617                         } else {
1618                                 newbits3 = CLOCK_CTRL_44MHZ_CORE;
1619                         }
1620
1621                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
1622                                     tp->pci_clock_ctrl | newbits3, 40);
1623                 }
1624         }
1625
1626         if (!(tp->tg3_flags & TG3_FLAG_WOL_ENABLE) &&
1627             !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF) &&
1628             !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
1629                 tg3_power_down_phy(tp);
1630
1631         tg3_frob_aux_power(tp);
1632
1633         /* Workaround for unstable PLL clock */
1634         if ((GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX) ||
1635             (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX)) {
1636                 u32 val = tr32(0x7d00);
1637
1638                 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
1639                 tw32(0x7d00, val);
1640                 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
1641                         int err;
1642
1643                         err = tg3_nvram_lock(tp);
1644                         tg3_halt_cpu(tp, RX_CPU_BASE);
1645                         if (!err)
1646                                 tg3_nvram_unlock(tp);
1647                 }
1648         }
1649
1650         tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
1651
1652         /* Finally, set the new power state. */
1653         pci_write_config_word(tp->pdev, pm + PCI_PM_CTRL, power_control);
1654         udelay(100);    /* Delay after power state change */
1655
1656         return 0;
1657 }
1658
1659 static void tg3_link_report(struct tg3 *tp)
1660 {
1661         if (!netif_carrier_ok(tp->dev)) {
1662                 if (netif_msg_link(tp))
1663                         printk(KERN_INFO PFX "%s: Link is down.\n",
1664                                tp->dev->name);
1665         } else if (netif_msg_link(tp)) {
1666                 printk(KERN_INFO PFX "%s: Link is up at %d Mbps, %s duplex.\n",
1667                        tp->dev->name,
1668                        (tp->link_config.active_speed == SPEED_1000 ?
1669                         1000 :
1670                         (tp->link_config.active_speed == SPEED_100 ?
1671                          100 : 10)),
1672                        (tp->link_config.active_duplex == DUPLEX_FULL ?
1673                         "full" : "half"));
1674
1675                 printk(KERN_INFO PFX
1676                        "%s: Flow control is %s for TX and %s for RX.\n",
1677                        tp->dev->name,
1678                        (tp->link_config.active_flowctrl & TG3_FLOW_CTRL_TX) ?
1679                        "on" : "off",
1680                        (tp->link_config.active_flowctrl & TG3_FLOW_CTRL_RX) ?
1681                        "on" : "off");
1682         }
1683 }
1684
1685 static u16 tg3_advert_flowctrl_1000T(u8 flow_ctrl)
1686 {
1687         u16 miireg;
1688
1689         if ((flow_ctrl & TG3_FLOW_CTRL_TX) && (flow_ctrl & TG3_FLOW_CTRL_RX))
1690                 miireg = ADVERTISE_PAUSE_CAP;
1691         else if (flow_ctrl & TG3_FLOW_CTRL_TX)
1692                 miireg = ADVERTISE_PAUSE_ASYM;
1693         else if (flow_ctrl & TG3_FLOW_CTRL_RX)
1694                 miireg = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1695         else
1696                 miireg = 0;
1697
1698         return miireg;
1699 }
1700
1701 static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
1702 {
1703         u16 miireg;
1704
1705         if ((flow_ctrl & TG3_FLOW_CTRL_TX) && (flow_ctrl & TG3_FLOW_CTRL_RX))
1706                 miireg = ADVERTISE_1000XPAUSE;
1707         else if (flow_ctrl & TG3_FLOW_CTRL_TX)
1708                 miireg = ADVERTISE_1000XPSE_ASYM;
1709         else if (flow_ctrl & TG3_FLOW_CTRL_RX)
1710                 miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1711         else
1712                 miireg = 0;
1713
1714         return miireg;
1715 }
1716
1717 static u8 tg3_resolve_flowctrl_1000T(u16 lcladv, u16 rmtadv)
1718 {
1719         u8 cap = 0;
1720
1721         if (lcladv & ADVERTISE_PAUSE_CAP) {
1722                 if (lcladv & ADVERTISE_PAUSE_ASYM) {
1723                         if (rmtadv & LPA_PAUSE_CAP)
1724                                 cap = TG3_FLOW_CTRL_TX | TG3_FLOW_CTRL_RX;
1725                         else if (rmtadv & LPA_PAUSE_ASYM)
1726                                 cap = TG3_FLOW_CTRL_RX;
1727                 } else {
1728                         if (rmtadv & LPA_PAUSE_CAP)
1729                                 cap = TG3_FLOW_CTRL_TX | TG3_FLOW_CTRL_RX;
1730                 }
1731         } else if (lcladv & ADVERTISE_PAUSE_ASYM) {
1732                 if ((rmtadv & LPA_PAUSE_CAP) && (rmtadv & LPA_PAUSE_ASYM))
1733                         cap = TG3_FLOW_CTRL_TX;
1734         }
1735
1736         return cap;
1737 }
1738
1739 static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
1740 {
1741         u8 cap = 0;
1742
1743         if (lcladv & ADVERTISE_1000XPAUSE) {
1744                 if (lcladv & ADVERTISE_1000XPSE_ASYM) {
1745                         if (rmtadv & LPA_1000XPAUSE)
1746                                 cap = TG3_FLOW_CTRL_TX | TG3_FLOW_CTRL_RX;
1747                         else if (rmtadv & LPA_1000XPAUSE_ASYM)
1748                                 cap = TG3_FLOW_CTRL_RX;
1749                 } else {
1750                         if (rmtadv & LPA_1000XPAUSE)
1751                                 cap = TG3_FLOW_CTRL_TX | TG3_FLOW_CTRL_RX;
1752                 }
1753         } else if (lcladv & ADVERTISE_1000XPSE_ASYM) {
1754                 if ((rmtadv & LPA_1000XPAUSE) && (rmtadv & LPA_1000XPAUSE_ASYM))
1755                         cap = TG3_FLOW_CTRL_TX;
1756         }
1757
1758         return cap;
1759 }
1760
1761 static void tg3_setup_flow_control(struct tg3 *tp, u32 local_adv, u32 remote_adv)
1762 {
1763         u8 new_tg3_flags = 0;
1764         u32 old_rx_mode = tp->rx_mode;
1765         u32 old_tx_mode = tp->tx_mode;
1766
1767         if (tp->link_config.autoneg == AUTONEG_ENABLE &&
1768             (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG)) {
1769                 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
1770                         new_tg3_flags = tg3_resolve_flowctrl_1000X(local_adv,
1771                                                                    remote_adv);
1772                 else
1773                         new_tg3_flags = tg3_resolve_flowctrl_1000T(local_adv,
1774                                                                    remote_adv);
1775         } else {
1776                 new_tg3_flags = tp->link_config.flowctrl;
1777         }
1778
1779         tp->link_config.active_flowctrl = new_tg3_flags;
1780
1781         if (new_tg3_flags & TG3_FLOW_CTRL_RX)
1782                 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1783         else
1784                 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1785
1786         if (old_rx_mode != tp->rx_mode) {
1787                 tw32_f(MAC_RX_MODE, tp->rx_mode);
1788         }
1789
1790         if (new_tg3_flags & TG3_FLOW_CTRL_TX)
1791                 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1792         else
1793                 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1794
1795         if (old_tx_mode != tp->tx_mode) {
1796                 tw32_f(MAC_TX_MODE, tp->tx_mode);
1797         }
1798 }
1799
1800 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
1801 {
1802         switch (val & MII_TG3_AUX_STAT_SPDMASK) {
1803         case MII_TG3_AUX_STAT_10HALF:
1804                 *speed = SPEED_10;
1805                 *duplex = DUPLEX_HALF;
1806                 break;
1807
1808         case MII_TG3_AUX_STAT_10FULL:
1809                 *speed = SPEED_10;
1810                 *duplex = DUPLEX_FULL;
1811                 break;
1812
1813         case MII_TG3_AUX_STAT_100HALF:
1814                 *speed = SPEED_100;
1815                 *duplex = DUPLEX_HALF;
1816                 break;
1817
1818         case MII_TG3_AUX_STAT_100FULL:
1819                 *speed = SPEED_100;
1820                 *duplex = DUPLEX_FULL;
1821                 break;
1822
1823         case MII_TG3_AUX_STAT_1000HALF:
1824                 *speed = SPEED_1000;
1825                 *duplex = DUPLEX_HALF;
1826                 break;
1827
1828         case MII_TG3_AUX_STAT_1000FULL:
1829                 *speed = SPEED_1000;
1830                 *duplex = DUPLEX_FULL;
1831                 break;
1832
1833         default:
1834                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1835                         *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
1836                                  SPEED_10;
1837                         *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
1838                                   DUPLEX_HALF;
1839                         break;
1840                 }
1841                 *speed = SPEED_INVALID;
1842                 *duplex = DUPLEX_INVALID;
1843                 break;
1844         };
1845 }
1846
1847 static void tg3_phy_copper_begin(struct tg3 *tp)
1848 {
1849         u32 new_adv;
1850         int i;
1851
1852         if (tp->link_config.phy_is_low_power) {
1853                 /* Entering low power mode.  Disable gigabit and
1854                  * 100baseT advertisements.
1855                  */
1856                 tg3_writephy(tp, MII_TG3_CTRL, 0);
1857
1858                 new_adv = (ADVERTISE_10HALF | ADVERTISE_10FULL |
1859                            ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
1860                 if (tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB)
1861                         new_adv |= (ADVERTISE_100HALF | ADVERTISE_100FULL);
1862
1863                 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1864         } else if (tp->link_config.speed == SPEED_INVALID) {
1865                 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
1866                         tp->link_config.advertising &=
1867                                 ~(ADVERTISED_1000baseT_Half |
1868                                   ADVERTISED_1000baseT_Full);
1869
1870                 new_adv = ADVERTISE_CSMA;
1871                 if (tp->link_config.advertising & ADVERTISED_10baseT_Half)
1872                         new_adv |= ADVERTISE_10HALF;
1873                 if (tp->link_config.advertising & ADVERTISED_10baseT_Full)
1874                         new_adv |= ADVERTISE_10FULL;
1875                 if (tp->link_config.advertising & ADVERTISED_100baseT_Half)
1876                         new_adv |= ADVERTISE_100HALF;
1877                 if (tp->link_config.advertising & ADVERTISED_100baseT_Full)
1878                         new_adv |= ADVERTISE_100FULL;
1879
1880                 new_adv |= tg3_advert_flowctrl_1000T(tp->link_config.flowctrl);
1881
1882                 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1883
1884                 if (tp->link_config.advertising &
1885                     (ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full)) {
1886                         new_adv = 0;
1887                         if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
1888                                 new_adv |= MII_TG3_CTRL_ADV_1000_HALF;
1889                         if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
1890                                 new_adv |= MII_TG3_CTRL_ADV_1000_FULL;
1891                         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY) &&
1892                             (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1893                              tp->pci_chip_rev_id == CHIPREV_ID_5701_B0))
1894                                 new_adv |= (MII_TG3_CTRL_AS_MASTER |
1895                                             MII_TG3_CTRL_ENABLE_AS_MASTER);
1896                         tg3_writephy(tp, MII_TG3_CTRL, new_adv);
1897                 } else {
1898                         tg3_writephy(tp, MII_TG3_CTRL, 0);
1899                 }
1900         } else {
1901                 new_adv = tg3_advert_flowctrl_1000T(tp->link_config.flowctrl);
1902                 new_adv |= ADVERTISE_CSMA;
1903
1904                 /* Asking for a specific link mode. */
1905                 if (tp->link_config.speed == SPEED_1000) {
1906                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
1907
1908                         if (tp->link_config.duplex == DUPLEX_FULL)
1909                                 new_adv = MII_TG3_CTRL_ADV_1000_FULL;
1910                         else
1911                                 new_adv = MII_TG3_CTRL_ADV_1000_HALF;
1912                         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1913                             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
1914                                 new_adv |= (MII_TG3_CTRL_AS_MASTER |
1915                                             MII_TG3_CTRL_ENABLE_AS_MASTER);
1916                 } else {
1917                         if (tp->link_config.speed == SPEED_100) {
1918                                 if (tp->link_config.duplex == DUPLEX_FULL)
1919                                         new_adv |= ADVERTISE_100FULL;
1920                                 else
1921                                         new_adv |= ADVERTISE_100HALF;
1922                         } else {
1923                                 if (tp->link_config.duplex == DUPLEX_FULL)
1924                                         new_adv |= ADVERTISE_10FULL;
1925                                 else
1926                                         new_adv |= ADVERTISE_10HALF;
1927                         }
1928                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
1929
1930                         new_adv = 0;
1931                 }
1932
1933                 tg3_writephy(tp, MII_TG3_CTRL, new_adv);
1934         }
1935
1936         if (tp->link_config.autoneg == AUTONEG_DISABLE &&
1937             tp->link_config.speed != SPEED_INVALID) {
1938                 u32 bmcr, orig_bmcr;
1939
1940                 tp->link_config.active_speed = tp->link_config.speed;
1941                 tp->link_config.active_duplex = tp->link_config.duplex;
1942
1943                 bmcr = 0;
1944                 switch (tp->link_config.speed) {
1945                 default:
1946                 case SPEED_10:
1947                         break;
1948
1949                 case SPEED_100:
1950                         bmcr |= BMCR_SPEED100;
1951                         break;
1952
1953                 case SPEED_1000:
1954                         bmcr |= TG3_BMCR_SPEED1000;
1955                         break;
1956                 };
1957
1958                 if (tp->link_config.duplex == DUPLEX_FULL)
1959                         bmcr |= BMCR_FULLDPLX;
1960
1961                 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
1962                     (bmcr != orig_bmcr)) {
1963                         tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
1964                         for (i = 0; i < 1500; i++) {
1965                                 u32 tmp;
1966
1967                                 udelay(10);
1968                                 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
1969                                     tg3_readphy(tp, MII_BMSR, &tmp))
1970                                         continue;
1971                                 if (!(tmp & BMSR_LSTATUS)) {
1972                                         udelay(40);
1973                                         break;
1974                                 }
1975                         }
1976                         tg3_writephy(tp, MII_BMCR, bmcr);
1977                         udelay(40);
1978                 }
1979         } else {
1980                 tg3_writephy(tp, MII_BMCR,
1981                              BMCR_ANENABLE | BMCR_ANRESTART);
1982         }
1983 }
1984
1985 static int tg3_init_5401phy_dsp(struct tg3 *tp)
1986 {
1987         int err;
1988
1989         /* Turn off tap power management. */
1990         /* Set Extended packet length bit */
1991         err  = tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
1992
1993         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0012);
1994         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1804);
1995
1996         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0013);
1997         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1204);
1998
1999         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
2000         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0132);
2001
2002         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
2003         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0232);
2004
2005         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
2006         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0a20);
2007
2008         udelay(40);
2009
2010         return err;
2011 }
2012
2013 static int tg3_copper_is_advertising_all(struct tg3 *tp, u32 mask)
2014 {
2015         u32 adv_reg, all_mask = 0;
2016
2017         if (mask & ADVERTISED_10baseT_Half)
2018                 all_mask |= ADVERTISE_10HALF;
2019         if (mask & ADVERTISED_10baseT_Full)
2020                 all_mask |= ADVERTISE_10FULL;
2021         if (mask & ADVERTISED_100baseT_Half)
2022                 all_mask |= ADVERTISE_100HALF;
2023         if (mask & ADVERTISED_100baseT_Full)
2024                 all_mask |= ADVERTISE_100FULL;
2025
2026         if (tg3_readphy(tp, MII_ADVERTISE, &adv_reg))
2027                 return 0;
2028
2029         if ((adv_reg & all_mask) != all_mask)
2030                 return 0;
2031         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
2032                 u32 tg3_ctrl;
2033
2034                 all_mask = 0;
2035                 if (mask & ADVERTISED_1000baseT_Half)
2036                         all_mask |= ADVERTISE_1000HALF;
2037                 if (mask & ADVERTISED_1000baseT_Full)
2038                         all_mask |= ADVERTISE_1000FULL;
2039
2040                 if (tg3_readphy(tp, MII_TG3_CTRL, &tg3_ctrl))
2041                         return 0;
2042
2043                 if ((tg3_ctrl & all_mask) != all_mask)
2044                         return 0;
2045         }
2046         return 1;
2047 }
2048
2049 static int tg3_adv_1000T_flowctrl_ok(struct tg3 *tp, u32 *lcladv, u32 *rmtadv)
2050 {
2051         u32 curadv, reqadv;
2052
2053         if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
2054                 return 1;
2055
2056         curadv = *lcladv & (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
2057         reqadv = tg3_advert_flowctrl_1000T(tp->link_config.flowctrl);
2058
2059         if (tp->link_config.active_duplex == DUPLEX_FULL) {
2060                 if (curadv != reqadv)
2061                         return 0;
2062
2063                 if (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG)
2064                         tg3_readphy(tp, MII_LPA, rmtadv);
2065         } else {
2066                 /* Reprogram the advertisement register, even if it
2067                  * does not affect the current link.  If the link
2068                  * gets renegotiated in the future, we can save an
2069                  * additional renegotiation cycle by advertising
2070                  * it correctly in the first place.
2071                  */
2072                 if (curadv != reqadv) {
2073                         *lcladv &= ~(ADVERTISE_PAUSE_CAP |
2074                                      ADVERTISE_PAUSE_ASYM);
2075                         tg3_writephy(tp, MII_ADVERTISE, *lcladv | reqadv);
2076                 }
2077         }
2078
2079         return 1;
2080 }
2081
2082 static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
2083 {
2084         int current_link_up;
2085         u32 bmsr, dummy;
2086         u32 lcl_adv, rmt_adv;
2087         u16 current_speed;
2088         u8 current_duplex;
2089         int i, err;
2090
2091         tw32(MAC_EVENT, 0);
2092
2093         tw32_f(MAC_STATUS,
2094              (MAC_STATUS_SYNC_CHANGED |
2095               MAC_STATUS_CFG_CHANGED |
2096               MAC_STATUS_MI_COMPLETION |
2097               MAC_STATUS_LNKSTATE_CHANGED));
2098         udelay(40);
2099
2100         tp->mi_mode = MAC_MI_MODE_BASE;
2101         tw32_f(MAC_MI_MODE, tp->mi_mode);
2102         udelay(80);
2103
2104         tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x02);
2105
2106         /* Some third-party PHYs need to be reset on link going
2107          * down.
2108          */
2109         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
2110              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2111              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
2112             netif_carrier_ok(tp->dev)) {
2113                 tg3_readphy(tp, MII_BMSR, &bmsr);
2114                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
2115                     !(bmsr & BMSR_LSTATUS))
2116                         force_reset = 1;
2117         }
2118         if (force_reset)
2119                 tg3_phy_reset(tp);
2120
2121         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
2122                 tg3_readphy(tp, MII_BMSR, &bmsr);
2123                 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
2124                     !(tp->tg3_flags & TG3_FLAG_INIT_COMPLETE))
2125                         bmsr = 0;
2126
2127                 if (!(bmsr & BMSR_LSTATUS)) {
2128                         err = tg3_init_5401phy_dsp(tp);
2129                         if (err)
2130                                 return err;
2131
2132                         tg3_readphy(tp, MII_BMSR, &bmsr);
2133                         for (i = 0; i < 1000; i++) {
2134                                 udelay(10);
2135                                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
2136                                     (bmsr & BMSR_LSTATUS)) {
2137                                         udelay(40);
2138                                         break;
2139                                 }
2140                         }
2141
2142                         if ((tp->phy_id & PHY_ID_REV_MASK) == PHY_REV_BCM5401_B0 &&
2143                             !(bmsr & BMSR_LSTATUS) &&
2144                             tp->link_config.active_speed == SPEED_1000) {
2145                                 err = tg3_phy_reset(tp);
2146                                 if (!err)
2147                                         err = tg3_init_5401phy_dsp(tp);
2148                                 if (err)
2149                                         return err;
2150                         }
2151                 }
2152         } else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
2153                    tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
2154                 /* 5701 {A0,B0} CRC bug workaround */
2155                 tg3_writephy(tp, 0x15, 0x0a75);
2156                 tg3_writephy(tp, 0x1c, 0x8c68);
2157                 tg3_writephy(tp, 0x1c, 0x8d68);
2158                 tg3_writephy(tp, 0x1c, 0x8c68);
2159         }
2160
2161         /* Clear pending interrupts... */
2162         tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
2163         tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
2164
2165         if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT)
2166                 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
2167         else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906)
2168                 tg3_writephy(tp, MII_TG3_IMASK, ~0);
2169
2170         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2171             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2172                 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
2173                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
2174                                      MII_TG3_EXT_CTRL_LNK3_LED_MODE);
2175                 else
2176                         tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
2177         }
2178
2179         current_link_up = 0;
2180         current_speed = SPEED_INVALID;
2181         current_duplex = DUPLEX_INVALID;
2182
2183         if (tp->tg3_flags2 & TG3_FLG2_CAPACITIVE_COUPLING) {
2184                 u32 val;
2185
2186                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4007);
2187                 tg3_readphy(tp, MII_TG3_AUX_CTRL, &val);
2188                 if (!(val & (1 << 10))) {
2189                         val |= (1 << 10);
2190                         tg3_writephy(tp, MII_TG3_AUX_CTRL, val);
2191                         goto relink;
2192                 }
2193         }
2194
2195         bmsr = 0;
2196         for (i = 0; i < 100; i++) {
2197                 tg3_readphy(tp, MII_BMSR, &bmsr);
2198                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
2199                     (bmsr & BMSR_LSTATUS))
2200                         break;
2201                 udelay(40);
2202         }
2203
2204         if (bmsr & BMSR_LSTATUS) {
2205                 u32 aux_stat, bmcr;
2206
2207                 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
2208                 for (i = 0; i < 2000; i++) {
2209                         udelay(10);
2210                         if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
2211                             aux_stat)
2212                                 break;
2213                 }
2214
2215                 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
2216                                              &current_speed,
2217                                              &current_duplex);
2218
2219                 bmcr = 0;
2220                 for (i = 0; i < 200; i++) {
2221                         tg3_readphy(tp, MII_BMCR, &bmcr);
2222                         if (tg3_readphy(tp, MII_BMCR, &bmcr))
2223                                 continue;
2224                         if (bmcr && bmcr != 0x7fff)
2225                                 break;
2226                         udelay(10);
2227                 }
2228
2229                 lcl_adv = 0;
2230                 rmt_adv = 0;
2231
2232                 tp->link_config.active_speed = current_speed;
2233                 tp->link_config.active_duplex = current_duplex;
2234
2235                 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2236                         if ((bmcr & BMCR_ANENABLE) &&
2237                             tg3_copper_is_advertising_all(tp,
2238                                                 tp->link_config.advertising)) {
2239                                 if (tg3_adv_1000T_flowctrl_ok(tp, &lcl_adv,
2240                                                                   &rmt_adv))
2241                                         current_link_up = 1;
2242                         }
2243                 } else {
2244                         if (!(bmcr & BMCR_ANENABLE) &&
2245                             tp->link_config.speed == current_speed &&
2246                             tp->link_config.duplex == current_duplex &&
2247                             tp->link_config.flowctrl ==
2248                             tp->link_config.active_flowctrl) {
2249                                 current_link_up = 1;
2250                         }
2251                 }
2252
2253                 if (current_link_up == 1 &&
2254                     tp->link_config.active_duplex == DUPLEX_FULL)
2255                         tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
2256         }
2257
2258 relink:
2259         if (current_link_up == 0 || tp->link_config.phy_is_low_power) {
2260                 u32 tmp;
2261
2262                 tg3_phy_copper_begin(tp);
2263
2264                 tg3_readphy(tp, MII_BMSR, &tmp);
2265                 if (!tg3_readphy(tp, MII_BMSR, &tmp) &&
2266                     (tmp & BMSR_LSTATUS))
2267                         current_link_up = 1;
2268         }
2269
2270         tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
2271         if (current_link_up == 1) {
2272                 if (tp->link_config.active_speed == SPEED_100 ||
2273                     tp->link_config.active_speed == SPEED_10)
2274                         tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
2275                 else
2276                         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
2277         } else
2278                 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
2279
2280         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
2281         if (tp->link_config.active_duplex == DUPLEX_HALF)
2282                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
2283
2284         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
2285                 if (current_link_up == 1 &&
2286                     tg3_5700_link_polarity(tp, tp->link_config.active_speed))
2287                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
2288                 else
2289                         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
2290         }
2291
2292         /* ??? Without this setting Netgear GA302T PHY does not
2293          * ??? send/receive packets...
2294          */
2295         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411 &&
2296             tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) {
2297                 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
2298                 tw32_f(MAC_MI_MODE, tp->mi_mode);
2299                 udelay(80);
2300         }
2301
2302         tw32_f(MAC_MODE, tp->mac_mode);
2303         udelay(40);
2304
2305         if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
2306                 /* Polled via timer. */
2307                 tw32_f(MAC_EVENT, 0);
2308         } else {
2309                 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2310         }
2311         udelay(40);
2312
2313         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 &&
2314             current_link_up == 1 &&
2315             tp->link_config.active_speed == SPEED_1000 &&
2316             ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ||
2317              (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED))) {
2318                 udelay(120);
2319                 tw32_f(MAC_STATUS,
2320                      (MAC_STATUS_SYNC_CHANGED |
2321                       MAC_STATUS_CFG_CHANGED));
2322                 udelay(40);
2323                 tg3_write_mem(tp,
2324                               NIC_SRAM_FIRMWARE_MBOX,
2325                               NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
2326         }
2327
2328         if (current_link_up != netif_carrier_ok(tp->dev)) {
2329                 if (current_link_up)
2330                         netif_carrier_on(tp->dev);
2331                 else
2332                         netif_carrier_off(tp->dev);
2333                 tg3_link_report(tp);
2334         }
2335
2336         return 0;
2337 }
2338
2339 struct tg3_fiber_aneginfo {
2340         int state;
2341 #define ANEG_STATE_UNKNOWN              0
2342 #define ANEG_STATE_AN_ENABLE            1
2343 #define ANEG_STATE_RESTART_INIT         2
2344 #define ANEG_STATE_RESTART              3
2345 #define ANEG_STATE_DISABLE_LINK_OK      4
2346 #define ANEG_STATE_ABILITY_DETECT_INIT  5
2347 #define ANEG_STATE_ABILITY_DETECT       6
2348 #define ANEG_STATE_ACK_DETECT_INIT      7
2349 #define ANEG_STATE_ACK_DETECT           8
2350 #define ANEG_STATE_COMPLETE_ACK_INIT    9
2351 #define ANEG_STATE_COMPLETE_ACK         10
2352 #define ANEG_STATE_IDLE_DETECT_INIT     11
2353 #define ANEG_STATE_IDLE_DETECT          12
2354 #define ANEG_STATE_LINK_OK              13
2355 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT  14
2356 #define ANEG_STATE_NEXT_PAGE_WAIT       15
2357
2358         u32 flags;
2359 #define MR_AN_ENABLE            0x00000001
2360 #define MR_RESTART_AN           0x00000002
2361 #define MR_AN_COMPLETE          0x00000004
2362 #define MR_PAGE_RX              0x00000008
2363 #define MR_NP_LOADED            0x00000010
2364 #define MR_TOGGLE_TX            0x00000020
2365 #define MR_LP_ADV_FULL_DUPLEX   0x00000040
2366 #define MR_LP_ADV_HALF_DUPLEX   0x00000080
2367 #define MR_LP_ADV_SYM_PAUSE     0x00000100
2368 #define MR_LP_ADV_ASYM_PAUSE    0x00000200
2369 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
2370 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
2371 #define MR_LP_ADV_NEXT_PAGE     0x00001000
2372 #define MR_TOGGLE_RX            0x00002000
2373 #define MR_NP_RX                0x00004000
2374
2375 #define MR_LINK_OK              0x80000000
2376
2377         unsigned long link_time, cur_time;
2378
2379         u32 ability_match_cfg;
2380         int ability_match_count;
2381
2382         char ability_match, idle_match, ack_match;
2383
2384         u32 txconfig, rxconfig;
2385 #define ANEG_CFG_NP             0x00000080
2386 #define ANEG_CFG_ACK            0x00000040
2387 #define ANEG_CFG_RF2            0x00000020
2388 #define ANEG_CFG_RF1            0x00000010
2389 #define ANEG_CFG_PS2            0x00000001
2390 #define ANEG_CFG_PS1            0x00008000
2391 #define ANEG_CFG_HD             0x00004000
2392 #define ANEG_CFG_FD             0x00002000
2393 #define ANEG_CFG_INVAL          0x00001f06
2394
2395 };
2396 #define ANEG_OK         0
2397 #define ANEG_DONE       1
2398 #define ANEG_TIMER_ENAB 2
2399 #define ANEG_FAILED     -1
2400
2401 #define ANEG_STATE_SETTLE_TIME  10000
2402
2403 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
2404                                    struct tg3_fiber_aneginfo *ap)
2405 {
2406         u16 flowctrl;
2407         unsigned long delta;
2408         u32 rx_cfg_reg;
2409         int ret;
2410
2411         if (ap->state == ANEG_STATE_UNKNOWN) {
2412                 ap->rxconfig = 0;
2413                 ap->link_time = 0;
2414                 ap->cur_time = 0;
2415                 ap->ability_match_cfg = 0;
2416                 ap->ability_match_count = 0;
2417                 ap->ability_match = 0;
2418                 ap->idle_match = 0;
2419                 ap->ack_match = 0;
2420         }
2421         ap->cur_time++;
2422
2423         if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
2424                 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
2425
2426                 if (rx_cfg_reg != ap->ability_match_cfg) {
2427                         ap->ability_match_cfg = rx_cfg_reg;
2428                         ap->ability_match = 0;
2429                         ap->ability_match_count = 0;
2430                 } else {
2431                         if (++ap->ability_match_count > 1) {
2432                                 ap->ability_match = 1;
2433                                 ap->ability_match_cfg = rx_cfg_reg;
2434                         }
2435                 }
2436                 if (rx_cfg_reg & ANEG_CFG_ACK)
2437                         ap->ack_match = 1;
2438                 else
2439                         ap->ack_match = 0;
2440
2441                 ap->idle_match = 0;
2442         } else {
2443                 ap->idle_match = 1;
2444                 ap->ability_match_cfg = 0;
2445                 ap->ability_match_count = 0;
2446                 ap->ability_match = 0;
2447                 ap->ack_match = 0;
2448
2449                 rx_cfg_reg = 0;
2450         }
2451
2452         ap->rxconfig = rx_cfg_reg;
2453         ret = ANEG_OK;
2454
2455         switch(ap->state) {
2456         case ANEG_STATE_UNKNOWN:
2457                 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
2458                         ap->state = ANEG_STATE_AN_ENABLE;
2459
2460                 /* fallthru */
2461         case ANEG_STATE_AN_ENABLE:
2462                 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
2463                 if (ap->flags & MR_AN_ENABLE) {
2464                         ap->link_time = 0;
2465                         ap->cur_time = 0;
2466                         ap->ability_match_cfg = 0;
2467                         ap->ability_match_count = 0;
2468                         ap->ability_match = 0;
2469                         ap->idle_match = 0;
2470                         ap->ack_match = 0;
2471
2472                         ap->state = ANEG_STATE_RESTART_INIT;
2473                 } else {
2474                         ap->state = ANEG_STATE_DISABLE_LINK_OK;
2475                 }
2476                 break;
2477
2478         case ANEG_STATE_RESTART_INIT:
2479                 ap->link_time = ap->cur_time;
2480                 ap->flags &= ~(MR_NP_LOADED);
2481                 ap->txconfig = 0;
2482                 tw32(MAC_TX_AUTO_NEG, 0);
2483                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
2484                 tw32_f(MAC_MODE, tp->mac_mode);
2485                 udelay(40);
2486
2487                 ret = ANEG_TIMER_ENAB;
2488                 ap->state = ANEG_STATE_RESTART;
2489
2490                 /* fallthru */
2491         case ANEG_STATE_RESTART:
2492                 delta = ap->cur_time - ap->link_time;
2493                 if (delta > ANEG_STATE_SETTLE_TIME) {
2494                         ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
2495                 } else {
2496                         ret = ANEG_TIMER_ENAB;
2497                 }
2498                 break;
2499
2500         case ANEG_STATE_DISABLE_LINK_OK:
2501                 ret = ANEG_DONE;
2502                 break;
2503
2504         case ANEG_STATE_ABILITY_DETECT_INIT:
2505                 ap->flags &= ~(MR_TOGGLE_TX);
2506                 ap->txconfig = ANEG_CFG_FD;
2507                 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
2508                 if (flowctrl & ADVERTISE_1000XPAUSE)
2509                         ap->txconfig |= ANEG_CFG_PS1;
2510                 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
2511                         ap->txconfig |= ANEG_CFG_PS2;
2512                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
2513                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
2514                 tw32_f(MAC_MODE, tp->mac_mode);
2515                 udelay(40);
2516
2517                 ap->state = ANEG_STATE_ABILITY_DETECT;
2518                 break;
2519
2520         case ANEG_STATE_ABILITY_DETECT:
2521                 if (ap->ability_match != 0 && ap->rxconfig != 0) {
2522                         ap->state = ANEG_STATE_ACK_DETECT_INIT;
2523                 }
2524                 break;
2525
2526         case ANEG_STATE_ACK_DETECT_INIT:
2527                 ap->txconfig |= ANEG_CFG_ACK;
2528                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
2529                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
2530                 tw32_f(MAC_MODE, tp->mac_mode);
2531                 udelay(40);
2532
2533                 ap->state = ANEG_STATE_ACK_DETECT;
2534
2535                 /* fallthru */
2536         case ANEG_STATE_ACK_DETECT:
2537                 if (ap->ack_match != 0) {
2538                         if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
2539                             (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
2540                                 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
2541                         } else {
2542                                 ap->state = ANEG_STATE_AN_ENABLE;
2543                         }
2544                 } else if (ap->ability_match != 0 &&
2545                            ap->rxconfig == 0) {
2546                         ap->state = ANEG_STATE_AN_ENABLE;
2547                 }
2548                 break;
2549
2550         case ANEG_STATE_COMPLETE_ACK_INIT:
2551                 if (ap->rxconfig & ANEG_CFG_INVAL) {
2552                         ret = ANEG_FAILED;
2553                         break;
2554                 }
2555                 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
2556                                MR_LP_ADV_HALF_DUPLEX |
2557                                MR_LP_ADV_SYM_PAUSE |
2558                                MR_LP_ADV_ASYM_PAUSE |
2559                                MR_LP_ADV_REMOTE_FAULT1 |
2560                                MR_LP_ADV_REMOTE_FAULT2 |
2561                                MR_LP_ADV_NEXT_PAGE |
2562                                MR_TOGGLE_RX |
2563                                MR_NP_RX);
2564                 if (ap->rxconfig & ANEG_CFG_FD)
2565                         ap->flags |= MR_LP_ADV_FULL_DUPLEX;
2566                 if (ap->rxconfig & ANEG_CFG_HD)
2567                         ap->flags |= MR_LP_ADV_HALF_DUPLEX;
2568                 if (ap->rxconfig & ANEG_CFG_PS1)
2569                         ap->flags |= MR_LP_ADV_SYM_PAUSE;
2570                 if (ap->rxconfig & ANEG_CFG_PS2)
2571                         ap->flags |= MR_LP_ADV_ASYM_PAUSE;
2572                 if (ap->rxconfig & ANEG_CFG_RF1)
2573                         ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
2574                 if (ap->rxconfig & ANEG_CFG_RF2)
2575                         ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
2576                 if (ap->rxconfig & ANEG_CFG_NP)
2577                         ap->flags |= MR_LP_ADV_NEXT_PAGE;
2578
2579                 ap->link_time = ap->cur_time;
2580
2581                 ap->flags ^= (MR_TOGGLE_TX);
2582                 if (ap->rxconfig & 0x0008)
2583                         ap->flags |= MR_TOGGLE_RX;
2584                 if (ap->rxconfig & ANEG_CFG_NP)
2585                         ap->flags |= MR_NP_RX;
2586                 ap->flags |= MR_PAGE_RX;
2587
2588                 ap->state = ANEG_STATE_COMPLETE_ACK;
2589                 ret = ANEG_TIMER_ENAB;
2590                 break;
2591
2592         case ANEG_STATE_COMPLETE_ACK:
2593                 if (ap->ability_match != 0 &&
2594                     ap->rxconfig == 0) {
2595                         ap->state = ANEG_STATE_AN_ENABLE;
2596                         break;
2597                 }
2598                 delta = ap->cur_time - ap->link_time;
2599                 if (delta > ANEG_STATE_SETTLE_TIME) {
2600                         if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
2601                                 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
2602                         } else {
2603                                 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
2604                                     !(ap->flags & MR_NP_RX)) {
2605                                         ap->state = ANEG_STATE_IDLE_DETECT_INIT;
2606                                 } else {
2607                                         ret = ANEG_FAILED;
2608                                 }
2609                         }
2610                 }
2611                 break;
2612
2613         case ANEG_STATE_IDLE_DETECT_INIT:
2614                 ap->link_time = ap->cur_time;
2615                 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
2616                 tw32_f(MAC_MODE, tp->mac_mode);
2617                 udelay(40);
2618
2619                 ap->state = ANEG_STATE_IDLE_DETECT;
2620                 ret = ANEG_TIMER_ENAB;
2621                 break;
2622
2623         case ANEG_STATE_IDLE_DETECT:
2624                 if (ap->ability_match != 0 &&
2625                     ap->rxconfig == 0) {
2626                         ap->state = ANEG_STATE_AN_ENABLE;
2627                         break;
2628                 }
2629                 delta = ap->cur_time - ap->link_time;
2630                 if (delta > ANEG_STATE_SETTLE_TIME) {
2631                         /* XXX another gem from the Broadcom driver :( */
2632                         ap->state = ANEG_STATE_LINK_OK;
2633                 }
2634                 break;
2635
2636         case ANEG_STATE_LINK_OK:
2637                 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
2638                 ret = ANEG_DONE;
2639                 break;
2640
2641         case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
2642                 /* ??? unimplemented */
2643                 break;
2644
2645         case ANEG_STATE_NEXT_PAGE_WAIT:
2646                 /* ??? unimplemented */
2647                 break;
2648
2649         default:
2650                 ret = ANEG_FAILED;
2651                 break;
2652         };
2653
2654         return ret;
2655 }
2656
2657 static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags)
2658 {
2659         int res = 0;
2660         struct tg3_fiber_aneginfo aninfo;
2661         int status = ANEG_FAILED;
2662         unsigned int tick;
2663         u32 tmp;
2664
2665         tw32_f(MAC_TX_AUTO_NEG, 0);
2666
2667         tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
2668         tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
2669         udelay(40);
2670
2671         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
2672         udelay(40);
2673
2674         memset(&aninfo, 0, sizeof(aninfo));
2675         aninfo.flags |= MR_AN_ENABLE;
2676         aninfo.state = ANEG_STATE_UNKNOWN;
2677         aninfo.cur_time = 0;
2678         tick = 0;
2679         while (++tick < 195000) {
2680                 status = tg3_fiber_aneg_smachine(tp, &aninfo);
2681                 if (status == ANEG_DONE || status == ANEG_FAILED)
2682                         break;
2683
2684                 udelay(1);
2685         }
2686
2687         tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
2688         tw32_f(MAC_MODE, tp->mac_mode);
2689         udelay(40);
2690
2691         *txflags = aninfo.txconfig;
2692         *rxflags = aninfo.flags;
2693
2694         if (status == ANEG_DONE &&
2695             (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
2696                              MR_LP_ADV_FULL_DUPLEX)))
2697                 res = 1;
2698
2699         return res;
2700 }
2701
2702 static void tg3_init_bcm8002(struct tg3 *tp)
2703 {
2704         u32 mac_status = tr32(MAC_STATUS);
2705         int i;
2706
2707         /* Reset when initting first time or we have a link. */
2708         if ((tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) &&
2709             !(mac_status & MAC_STATUS_PCS_SYNCED))
2710                 return;
2711
2712         /* Set PLL lock range. */
2713         tg3_writephy(tp, 0x16, 0x8007);
2714
2715         /* SW reset */
2716         tg3_writephy(tp, MII_BMCR, BMCR_RESET);
2717
2718         /* Wait for reset to complete. */
2719         /* XXX schedule_timeout() ... */
2720         for (i = 0; i < 500; i++)
2721                 udelay(10);
2722
2723         /* Config mode; select PMA/Ch 1 regs. */
2724         tg3_writephy(tp, 0x10, 0x8411);
2725
2726         /* Enable auto-lock and comdet, select txclk for tx. */
2727         tg3_writephy(tp, 0x11, 0x0a10);
2728
2729         tg3_writephy(tp, 0x18, 0x00a0);
2730         tg3_writephy(tp, 0x16, 0x41ff);
2731
2732         /* Assert and deassert POR. */
2733         tg3_writephy(tp, 0x13, 0x0400);
2734         udelay(40);
2735         tg3_writephy(tp, 0x13, 0x0000);
2736
2737         tg3_writephy(tp, 0x11, 0x0a50);
2738         udelay(40);
2739         tg3_writephy(tp, 0x11, 0x0a10);
2740
2741         /* Wait for signal to stabilize */
2742         /* XXX schedule_timeout() ... */
2743         for (i = 0; i < 15000; i++)
2744                 udelay(10);
2745
2746         /* Deselect the channel register so we can read the PHYID
2747          * later.
2748          */
2749         tg3_writephy(tp, 0x10, 0x8011);
2750 }
2751
2752 static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
2753 {
2754         u16 flowctrl;
2755         u32 sg_dig_ctrl, sg_dig_status;
2756         u32 serdes_cfg, expected_sg_dig_ctrl;
2757         int workaround, port_a;
2758         int current_link_up;
2759
2760         serdes_cfg = 0;
2761         expected_sg_dig_ctrl = 0;
2762         workaround = 0;
2763         port_a = 1;
2764         current_link_up = 0;
2765
2766         if (tp->pci_chip_rev_id != CHIPREV_ID_5704_A0 &&
2767             tp->pci_chip_rev_id != CHIPREV_ID_5704_A1) {
2768                 workaround = 1;
2769                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
2770                         port_a = 0;
2771
2772                 /* preserve bits 0-11,13,14 for signal pre-emphasis */
2773                 /* preserve bits 20-23 for voltage regulator */
2774                 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
2775         }
2776
2777         sg_dig_ctrl = tr32(SG_DIG_CTRL);
2778
2779         if (tp->link_config.autoneg != AUTONEG_ENABLE) {
2780                 if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) {
2781                         if (workaround) {
2782                                 u32 val = serdes_cfg;
2783
2784                                 if (port_a)
2785                                         val |= 0xc010000;
2786                                 else
2787                                         val |= 0x4010000;
2788                                 tw32_f(MAC_SERDES_CFG, val);
2789                         }
2790
2791                         tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
2792                 }
2793                 if (mac_status & MAC_STATUS_PCS_SYNCED) {
2794                         tg3_setup_flow_control(tp, 0, 0);
2795                         current_link_up = 1;
2796                 }
2797                 goto out;
2798         }
2799
2800         /* Want auto-negotiation.  */
2801         expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP;
2802
2803         flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
2804         if (flowctrl & ADVERTISE_1000XPAUSE)
2805                 expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP;
2806         if (flowctrl & ADVERTISE_1000XPSE_ASYM)
2807                 expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE;
2808
2809         if (sg_dig_ctrl != expected_sg_dig_ctrl) {
2810                 if ((tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT) &&
2811                     tp->serdes_counter &&
2812                     ((mac_status & (MAC_STATUS_PCS_SYNCED |
2813                                     MAC_STATUS_RCVD_CFG)) ==
2814                      MAC_STATUS_PCS_SYNCED)) {
2815                         tp->serdes_counter--;
2816                         current_link_up = 1;
2817                         goto out;
2818                 }
2819 restart_autoneg:
2820                 if (workaround)
2821                         tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
2822                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET);
2823                 udelay(5);
2824                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
2825
2826                 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
2827                 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2828         } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
2829                                  MAC_STATUS_SIGNAL_DET)) {
2830                 sg_dig_status = tr32(SG_DIG_STATUS);
2831                 mac_status = tr32(MAC_STATUS);
2832
2833                 if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) &&
2834                     (mac_status & MAC_STATUS_PCS_SYNCED)) {
2835                         u32 local_adv = 0, remote_adv = 0;
2836
2837                         if (sg_dig_ctrl & SG_DIG_PAUSE_CAP)
2838                                 local_adv |= ADVERTISE_1000XPAUSE;
2839                         if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE)
2840                                 local_adv |= ADVERTISE_1000XPSE_ASYM;
2841
2842                         if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE)
2843                                 remote_adv |= LPA_1000XPAUSE;
2844                         if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE)
2845                                 remote_adv |= LPA_1000XPAUSE_ASYM;
2846
2847                         tg3_setup_flow_control(tp, local_adv, remote_adv);
2848                         current_link_up = 1;
2849                         tp->serdes_counter = 0;
2850                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2851                 } else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) {
2852                         if (tp->serdes_counter)
2853                                 tp->serdes_counter--;
2854                         else {
2855                                 if (workaround) {
2856                                         u32 val = serdes_cfg;
2857
2858                                         if (port_a)
2859                                                 val |= 0xc010000;
2860                                         else
2861                                                 val |= 0x4010000;
2862
2863                                         tw32_f(MAC_SERDES_CFG, val);
2864                                 }
2865
2866                                 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
2867                                 udelay(40);
2868
2869                                 /* Link parallel detection - link is up */
2870                                 /* only if we have PCS_SYNC and not */
2871                                 /* receiving config code words */
2872                                 mac_status = tr32(MAC_STATUS);
2873                                 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
2874                                     !(mac_status & MAC_STATUS_RCVD_CFG)) {
2875                                         tg3_setup_flow_control(tp, 0, 0);
2876                                         current_link_up = 1;
2877                                         tp->tg3_flags2 |=
2878                                                 TG3_FLG2_PARALLEL_DETECT;
2879                                         tp->serdes_counter =
2880                                                 SERDES_PARALLEL_DET_TIMEOUT;
2881                                 } else
2882                                         goto restart_autoneg;
2883                         }
2884                 }
2885         } else {
2886                 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
2887                 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2888         }
2889
2890 out:
2891         return current_link_up;
2892 }
2893
2894 static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
2895 {
2896         int current_link_up = 0;
2897
2898         if (!(mac_status & MAC_STATUS_PCS_SYNCED))
2899                 goto out;
2900
2901         if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2902                 u32 txflags, rxflags;
2903                 int i;
2904
2905                 if (fiber_autoneg(tp, &txflags, &rxflags)) {
2906                         u32 local_adv = 0, remote_adv = 0;
2907
2908                         if (txflags & ANEG_CFG_PS1)
2909                                 local_adv |= ADVERTISE_1000XPAUSE;
2910                         if (txflags & ANEG_CFG_PS2)
2911                                 local_adv |= ADVERTISE_1000XPSE_ASYM;
2912
2913                         if (rxflags & MR_LP_ADV_SYM_PAUSE)
2914                                 remote_adv |= LPA_1000XPAUSE;
2915                         if (rxflags & MR_LP_ADV_ASYM_PAUSE)
2916                                 remote_adv |= LPA_1000XPAUSE_ASYM;
2917
2918                         tg3_setup_flow_control(tp, local_adv, remote_adv);
2919
2920                         current_link_up = 1;
2921                 }
2922                 for (i = 0; i < 30; i++) {
2923                         udelay(20);
2924                         tw32_f(MAC_STATUS,
2925                                (MAC_STATUS_SYNC_CHANGED |
2926                                 MAC_STATUS_CFG_CHANGED));
2927                         udelay(40);
2928                         if ((tr32(MAC_STATUS) &
2929                              (MAC_STATUS_SYNC_CHANGED |
2930                               MAC_STATUS_CFG_CHANGED)) == 0)
2931                                 break;
2932                 }
2933
2934                 mac_status = tr32(MAC_STATUS);
2935                 if (current_link_up == 0 &&
2936                     (mac_status & MAC_STATUS_PCS_SYNCED) &&
2937                     !(mac_status & MAC_STATUS_RCVD_CFG))
2938                         current_link_up = 1;
2939         } else {
2940                 tg3_setup_flow_control(tp, 0, 0);
2941
2942                 /* Forcing 1000FD link up. */
2943                 current_link_up = 1;
2944
2945                 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
2946                 udelay(40);
2947
2948                 tw32_f(MAC_MODE, tp->mac_mode);
2949                 udelay(40);
2950         }
2951
2952 out:
2953         return current_link_up;
2954 }
2955
2956 static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
2957 {
2958         u32 orig_pause_cfg;
2959         u16 orig_active_speed;
2960         u8 orig_active_duplex;
2961         u32 mac_status;
2962         int current_link_up;
2963         int i;
2964
2965         orig_pause_cfg = tp->link_config.active_flowctrl;
2966         orig_active_speed = tp->link_config.active_speed;
2967         orig_active_duplex = tp->link_config.active_duplex;
2968
2969         if (!(tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG) &&
2970             netif_carrier_ok(tp->dev) &&
2971             (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE)) {
2972                 mac_status = tr32(MAC_STATUS);
2973                 mac_status &= (MAC_STATUS_PCS_SYNCED |
2974                                MAC_STATUS_SIGNAL_DET |
2975                                MAC_STATUS_CFG_CHANGED |
2976                                MAC_STATUS_RCVD_CFG);
2977                 if (mac_status == (MAC_STATUS_PCS_SYNCED |
2978                                    MAC_STATUS_SIGNAL_DET)) {
2979                         tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
2980                                             MAC_STATUS_CFG_CHANGED));
2981                         return 0;
2982                 }
2983         }
2984
2985         tw32_f(MAC_TX_AUTO_NEG, 0);
2986
2987         tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
2988         tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
2989         tw32_f(MAC_MODE, tp->mac_mode);
2990         udelay(40);
2991
2992         if (tp->phy_id == PHY_ID_BCM8002)
2993                 tg3_init_bcm8002(tp);
2994
2995         /* Enable link change event even when serdes polling.  */
2996         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2997         udelay(40);
2998
2999         current_link_up = 0;
3000         mac_status = tr32(MAC_STATUS);
3001
3002         if (tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG)
3003                 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
3004         else
3005                 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
3006
3007         tp->hw_status->status =
3008                 (SD_STATUS_UPDATED |
3009                  (tp->hw_status->status & ~SD_STATUS_LINK_CHG));
3010
3011         for (i = 0; i < 100; i++) {
3012                 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
3013                                     MAC_STATUS_CFG_CHANGED));
3014                 udelay(5);
3015                 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
3016                                          MAC_STATUS_CFG_CHANGED |
3017                                          MAC_STATUS_LNKSTATE_CHANGED)) == 0)
3018                         break;
3019         }
3020
3021         mac_status = tr32(MAC_STATUS);
3022         if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
3023                 current_link_up = 0;
3024                 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
3025                     tp->serdes_counter == 0) {
3026                         tw32_f(MAC_MODE, (tp->mac_mode |
3027                                           MAC_MODE_SEND_CONFIGS));
3028                         udelay(1);
3029                         tw32_f(MAC_MODE, tp->mac_mode);
3030                 }
3031         }
3032
3033         if (current_link_up == 1) {
3034                 tp->link_config.active_speed = SPEED_1000;
3035                 tp->link_config.active_duplex = DUPLEX_FULL;
3036                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
3037                                     LED_CTRL_LNKLED_OVERRIDE |
3038                                     LED_CTRL_1000MBPS_ON));
3039         } else {
3040                 tp->link_config.active_speed = SPEED_INVALID;
3041                 tp->link_config.active_duplex = DUPLEX_INVALID;
3042                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
3043                                     LED_CTRL_LNKLED_OVERRIDE |
3044                                     LED_CTRL_TRAFFIC_OVERRIDE));
3045         }
3046
3047         if (current_link_up != netif_carrier_ok(tp->dev)) {
3048                 if (current_link_up)
3049                         netif_carrier_on(tp->dev);
3050                 else
3051                         netif_carrier_off(tp->dev);
3052                 tg3_link_report(tp);
3053         } else {
3054                 u32 now_pause_cfg = tp->link_config.active_flowctrl;
3055                 if (orig_pause_cfg != now_pause_cfg ||
3056                     orig_active_speed != tp->link_config.active_speed ||
3057                     orig_active_duplex != tp->link_config.active_duplex)
3058                         tg3_link_report(tp);
3059         }
3060
3061         return 0;
3062 }
3063
3064 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
3065 {
3066         int current_link_up, err = 0;
3067         u32 bmsr, bmcr;
3068         u16 current_speed;
3069         u8 current_duplex;
3070         u32 local_adv, remote_adv;
3071
3072         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
3073         tw32_f(MAC_MODE, tp->mac_mode);
3074         udelay(40);
3075
3076         tw32(MAC_EVENT, 0);
3077
3078         tw32_f(MAC_STATUS,
3079              (MAC_STATUS_SYNC_CHANGED |
3080               MAC_STATUS_CFG_CHANGED |
3081               MAC_STATUS_MI_COMPLETION |
3082               MAC_STATUS_LNKSTATE_CHANGED));
3083         udelay(40);
3084
3085         if (force_reset)
3086                 tg3_phy_reset(tp);
3087
3088         current_link_up = 0;
3089         current_speed = SPEED_INVALID;
3090         current_duplex = DUPLEX_INVALID;
3091
3092         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
3093         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
3094         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
3095                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
3096                         bmsr |= BMSR_LSTATUS;
3097                 else
3098                         bmsr &= ~BMSR_LSTATUS;
3099         }
3100
3101         err |= tg3_readphy(tp, MII_BMCR, &bmcr);
3102
3103         if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
3104             (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT) &&
3105              tp->link_config.flowctrl == tp->link_config.active_flowctrl) {
3106                 /* do nothing, just check for link up at the end */
3107         } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
3108                 u32 adv, new_adv;
3109
3110                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
3111                 new_adv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
3112                                   ADVERTISE_1000XPAUSE |
3113                                   ADVERTISE_1000XPSE_ASYM |
3114                                   ADVERTISE_SLCT);
3115
3116                 new_adv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
3117
3118                 if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
3119                         new_adv |= ADVERTISE_1000XHALF;
3120                 if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
3121                         new_adv |= ADVERTISE_1000XFULL;
3122
3123                 if ((new_adv != adv) || !(bmcr & BMCR_ANENABLE)) {
3124                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
3125                         bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
3126                         tg3_writephy(tp, MII_BMCR, bmcr);
3127
3128                         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
3129                         tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
3130                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3131
3132                         return err;
3133                 }
3134         } else {
3135                 u32 new_bmcr;
3136
3137                 bmcr &= ~BMCR_SPEED1000;
3138                 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
3139
3140                 if (tp->link_config.duplex == DUPLEX_FULL)
3141                         new_bmcr |= BMCR_FULLDPLX;
3142
3143                 if (new_bmcr != bmcr) {
3144                         /* BMCR_SPEED1000 is a reserved bit that needs
3145                          * to be set on write.
3146                          */
3147                         new_bmcr |= BMCR_SPEED1000;
3148
3149                         /* Force a linkdown */
3150                         if (netif_carrier_ok(tp->dev)) {
3151                                 u32 adv;
3152
3153                                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
3154                                 adv &= ~(ADVERTISE_1000XFULL |
3155                                          ADVERTISE_1000XHALF |
3156                                          ADVERTISE_SLCT);
3157                                 tg3_writephy(tp, MII_ADVERTISE, adv);
3158                                 tg3_writephy(tp, MII_BMCR, bmcr |
3159                                                            BMCR_ANRESTART |
3160                                                            BMCR_ANENABLE);
3161                                 udelay(10);
3162                                 netif_carrier_off(tp->dev);
3163                         }
3164                         tg3_writephy(tp, MII_BMCR, new_bmcr);
3165                         bmcr = new_bmcr;
3166                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
3167                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
3168                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
3169                             ASIC_REV_5714) {
3170                                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
3171                                         bmsr |= BMSR_LSTATUS;
3172                                 else
3173                                         bmsr &= ~BMSR_LSTATUS;
3174                         }
3175                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3176                 }
3177         }
3178
3179         if (bmsr & BMSR_LSTATUS) {
3180                 current_speed = SPEED_1000;
3181                 current_link_up = 1;
3182                 if (bmcr & BMCR_FULLDPLX)
3183                         current_duplex = DUPLEX_FULL;
3184                 else
3185                         current_duplex = DUPLEX_HALF;
3186
3187                 local_adv = 0;
3188                 remote_adv = 0;
3189
3190                 if (bmcr & BMCR_ANENABLE) {
3191                         u32 common;
3192
3193                         err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
3194                         err |= tg3_readphy(tp, MII_LPA, &remote_adv);
3195                         common = local_adv & remote_adv;
3196                         if (common & (ADVERTISE_1000XHALF |
3197                                       ADVERTISE_1000XFULL)) {
3198                                 if (common & ADVERTISE_1000XFULL)
3199                                         current_duplex = DUPLEX_FULL;
3200                                 else
3201                                         current_duplex = DUPLEX_HALF;
3202                         }
3203                         else
3204                                 current_link_up = 0;
3205                 }
3206         }
3207
3208         if (current_link_up == 1 && current_duplex == DUPLEX_FULL)
3209                 tg3_setup_flow_control(tp, local_adv, remote_adv);
3210
3211         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
3212         if (tp->link_config.active_duplex == DUPLEX_HALF)
3213                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
3214
3215         tw32_f(MAC_MODE, tp->mac_mode);
3216         udelay(40);
3217
3218         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
3219
3220         tp->link_config.active_speed = current_speed;
3221         tp->link_config.active_duplex = current_duplex;
3222
3223         if (current_link_up != netif_carrier_ok(tp->dev)) {
3224                 if (current_link_up)
3225                         netif_carrier_on(tp->dev);
3226                 else {
3227                         netif_carrier_off(tp->dev);
3228                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3229                 }
3230                 tg3_link_report(tp);
3231         }
3232         return err;
3233 }
3234
3235 static void tg3_serdes_parallel_detect(struct tg3 *tp)
3236 {
3237         if (tp->serdes_counter) {
3238                 /* Give autoneg time to complete. */
3239                 tp->serdes_counter--;
3240                 return;
3241         }
3242         if (!netif_carrier_ok(tp->dev) &&
3243             (tp->link_config.autoneg == AUTONEG_ENABLE)) {
3244                 u32 bmcr;
3245
3246                 tg3_readphy(tp, MII_BMCR, &bmcr);
3247                 if (bmcr & BMCR_ANENABLE) {
3248                         u32 phy1, phy2;
3249
3250                         /* Select shadow register 0x1f */
3251                         tg3_writephy(tp, 0x1c, 0x7c00);
3252                         tg3_readphy(tp, 0x1c, &phy1);
3253
3254                         /* Select expansion interrupt status register */
3255                         tg3_writephy(tp, 0x17, 0x0f01);
3256                         tg3_readphy(tp, 0x15, &phy2);
3257                         tg3_readphy(tp, 0x15, &phy2);
3258
3259                         if ((phy1 & 0x10) && !(phy2 & 0x20)) {
3260                                 /* We have signal detect and not receiving
3261                                  * config code words, link is up by parallel
3262                                  * detection.
3263                                  */
3264
3265                                 bmcr &= ~BMCR_ANENABLE;
3266                                 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
3267                                 tg3_writephy(tp, MII_BMCR, bmcr);
3268                                 tp->tg3_flags2 |= TG3_FLG2_PARALLEL_DETECT;
3269                         }
3270                 }
3271         }
3272         else if (netif_carrier_ok(tp->dev) &&
3273                  (tp->link_config.autoneg == AUTONEG_ENABLE) &&
3274                  (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT)) {
3275                 u32 phy2;
3276
3277                 /* Select expansion interrupt status register */
3278                 tg3_writephy(tp, 0x17, 0x0f01);
3279                 tg3_readphy(tp, 0x15, &phy2);
3280                 if (phy2 & 0x20) {
3281                         u32 bmcr;
3282
3283                         /* Config code words received, turn on autoneg. */
3284                         tg3_readphy(tp, MII_BMCR, &bmcr);
3285                         tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
3286
3287                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3288
3289                 }
3290         }
3291 }
3292
3293 static int tg3_setup_phy(struct tg3 *tp, int force_reset)
3294 {
3295         int err;
3296
3297         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
3298                 err = tg3_setup_fiber_phy(tp, force_reset);
3299         } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
3300                 err = tg3_setup_fiber_mii_phy(tp, force_reset);
3301         } else {
3302                 err = tg3_setup_copper_phy(tp, force_reset);
3303         }
3304
3305         if (tp->pci_chip_rev_id == CHIPREV_ID_5784_A0 ||
3306             tp->pci_chip_rev_id == CHIPREV_ID_5784_A1) {
3307                 u32 val, scale;
3308
3309                 val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
3310                 if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5)
3311                         scale = 65;
3312                 else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25)
3313                         scale = 6;
3314                 else
3315                         scale = 12;
3316
3317                 val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK;
3318                 val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT);
3319                 tw32(GRC_MISC_CFG, val);
3320         }
3321
3322         if (tp->link_config.active_speed == SPEED_1000 &&
3323             tp->link_config.active_duplex == DUPLEX_HALF)
3324                 tw32(MAC_TX_LENGTHS,
3325                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
3326                       (6 << TX_LENGTHS_IPG_SHIFT) |
3327                       (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
3328         else
3329                 tw32(MAC_TX_LENGTHS,
3330                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
3331                       (6 << TX_LENGTHS_IPG_SHIFT) |
3332                       (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
3333
3334         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
3335                 if (netif_carrier_ok(tp->dev)) {
3336                         tw32(HOSTCC_STAT_COAL_TICKS,
3337                              tp->coal.stats_block_coalesce_usecs);
3338                 } else {
3339                         tw32(HOSTCC_STAT_COAL_TICKS, 0);
3340                 }
3341         }
3342
3343         if (tp->tg3_flags & TG3_FLAG_ASPM_WORKAROUND) {
3344                 u32 val = tr32(PCIE_PWR_MGMT_THRESH);
3345                 if (!netif_carrier_ok(tp->dev))
3346                         val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
3347                               tp->pwrmgmt_thresh;
3348                 else
3349                         val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
3350                 tw32(PCIE_PWR_MGMT_THRESH, val);
3351         }
3352
3353         return err;
3354 }
3355
3356 /* This is called whenever we suspect that the system chipset is re-
3357  * ordering the sequence of MMIO to the tx send mailbox. The symptom
3358  * is bogus tx completions. We try to recover by setting the
3359  * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
3360  * in the workqueue.
3361  */
3362 static void tg3_tx_recover(struct tg3 *tp)
3363 {
3364         BUG_ON((tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) ||
3365                tp->write32_tx_mbox == tg3_write_indirect_mbox);
3366
3367         printk(KERN_WARNING PFX "%s: The system may be re-ordering memory-"
3368                "mapped I/O cycles to the network device, attempting to "
3369                "recover. Please report the problem to the driver maintainer "
3370                "and include system chipset information.\n", tp->dev->name);
3371
3372         spin_lock(&tp->lock);
3373         tp->tg3_flags |= TG3_FLAG_TX_RECOVERY_PENDING;
3374         spin_unlock(&tp->lock);
3375 }
3376
3377 static inline u32 tg3_tx_avail(struct tg3 *tp)
3378 {
3379         smp_mb();
3380         return (tp->tx_pending -
3381                 ((tp->tx_prod - tp->tx_cons) & (TG3_TX_RING_SIZE - 1)));
3382 }
3383
3384 /* Tigon3 never reports partial packet sends.  So we do not
3385  * need special logic to handle SKBs that have not had all
3386  * of their frags sent yet, like SunGEM does.
3387  */
3388 static void tg3_tx(struct tg3 *tp)
3389 {
3390         u32 hw_idx = tp->hw_status->idx[0].tx_consumer;
3391         u32 sw_idx = tp->tx_cons;
3392
3393         while (sw_idx != hw_idx) {
3394                 struct tx_ring_info *ri = &tp->tx_buffers[sw_idx];
3395                 struct sk_buff *skb = ri->skb;
3396                 int i, tx_bug = 0;
3397
3398                 if (unlikely(skb == NULL)) {
3399                         tg3_tx_recover(tp);
3400                         return;
3401                 }
3402
3403                 pci_unmap_single(tp->pdev,
3404                                  pci_unmap_addr(ri, mapping),
3405                                  skb_headlen(skb),
3406                                  PCI_DMA_TODEVICE);
3407
3408                 ri->skb = NULL;
3409
3410                 sw_idx = NEXT_TX(sw_idx);
3411
3412                 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
3413                         ri = &tp->tx_buffers[sw_idx];
3414                         if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
3415                                 tx_bug = 1;
3416
3417                         pci_unmap_page(tp->pdev,
3418                                        pci_unmap_addr(ri, mapping),
3419                                        skb_shinfo(skb)->frags[i].size,
3420                                        PCI_DMA_TODEVICE);
3421
3422                         sw_idx = NEXT_TX(sw_idx);
3423                 }
3424
3425                 dev_kfree_skb(skb);
3426
3427                 if (unlikely(tx_bug)) {
3428                         tg3_tx_recover(tp);
3429                         return;
3430                 }
3431         }
3432
3433         tp->tx_cons = sw_idx;
3434
3435         /* Need to make the tx_cons update visible to tg3_start_xmit()
3436          * before checking for netif_queue_stopped().  Without the
3437          * memory barrier, there is a small possibility that tg3_start_xmit()
3438          * will miss it and cause the queue to be stopped forever.
3439          */
3440         smp_mb();
3441
3442         if (unlikely(netif_queue_stopped(tp->dev) &&
3443                      (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp)))) {
3444                 netif_tx_lock(tp->dev);
3445                 if (netif_queue_stopped(tp->dev) &&
3446                     (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp)))
3447                         netif_wake_queue(tp->dev);
3448                 netif_tx_unlock(tp->dev);
3449         }
3450 }
3451
3452 /* Returns size of skb allocated or < 0 on error.
3453  *
3454  * We only need to fill in the address because the other members
3455  * of the RX descriptor are invariant, see tg3_init_rings.
3456  *
3457  * Note the purposeful assymetry of cpu vs. chip accesses.  For
3458  * posting buffers we only dirty the first cache line of the RX
3459  * descriptor (containing the address).  Whereas for the RX status
3460  * buffers the cpu only reads the last cacheline of the RX descriptor
3461  * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
3462  */
3463 static int tg3_alloc_rx_skb(struct tg3 *tp, u32 opaque_key,
3464                             int src_idx, u32 dest_idx_unmasked)
3465 {
3466         struct tg3_rx_buffer_desc *desc;
3467         struct ring_info *map, *src_map;
3468         struct sk_buff *skb;
3469         dma_addr_t mapping;
3470         int skb_size, dest_idx;
3471
3472         src_map = NULL;
3473         switch (opaque_key) {
3474         case RXD_OPAQUE_RING_STD:
3475                 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
3476                 desc = &tp->rx_std[dest_idx];
3477                 map = &tp->rx_std_buffers[dest_idx];
3478                 if (src_idx >= 0)
3479                         src_map = &tp->rx_std_buffers[src_idx];
3480                 skb_size = tp->rx_pkt_buf_sz;
3481                 break;
3482
3483         case RXD_OPAQUE_RING_JUMBO:
3484                 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
3485                 desc = &tp->rx_jumbo[dest_idx];
3486                 map = &tp->rx_jumbo_buffers[dest_idx];
3487                 if (src_idx >= 0)
3488                         src_map = &tp->rx_jumbo_buffers[src_idx];
3489                 skb_size = RX_JUMBO_PKT_BUF_SZ;
3490                 break;
3491
3492         default:
3493                 return -EINVAL;
3494         };
3495
3496         /* Do not overwrite any of the map or rp information
3497          * until we are sure we can commit to a new buffer.
3498          *
3499          * Callers depend upon this behavior and assume that
3500          * we leave everything unchanged if we fail.
3501          */
3502         skb = netdev_alloc_skb(tp->dev, skb_size);
3503         if (skb == NULL)
3504                 return -ENOMEM;
3505
3506         skb_reserve(skb, tp->rx_offset);
3507
3508         mapping = pci_map_single(tp->pdev, skb->data,
3509                                  skb_size - tp->rx_offset,
3510                                  PCI_DMA_FROMDEVICE);
3511
3512         map->skb = skb;
3513         pci_unmap_addr_set(map, mapping, mapping);
3514
3515         if (src_map != NULL)
3516                 src_map->skb = NULL;
3517
3518         desc->addr_hi = ((u64)mapping >> 32);
3519         desc->addr_lo = ((u64)mapping & 0xffffffff);
3520
3521         return skb_size;
3522 }
3523
3524 /* We only need to move over in the address because the other
3525  * members of the RX descriptor are invariant.  See notes above
3526  * tg3_alloc_rx_skb for full details.
3527  */
3528 static void tg3_recycle_rx(struct tg3 *tp, u32 opaque_key,
3529                            int src_idx, u32 dest_idx_unmasked)
3530 {
3531         struct tg3_rx_buffer_desc *src_desc, *dest_desc;
3532         struct ring_info *src_map, *dest_map;
3533         int dest_idx;
3534
3535         switch (opaque_key) {
3536         case RXD_OPAQUE_RING_STD:
3537                 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
3538                 dest_desc = &tp->rx_std[dest_idx];
3539                 dest_map = &tp->rx_std_buffers[dest_idx];
3540                 src_desc = &tp->rx_std[src_idx];
3541                 src_map = &tp->rx_std_buffers[src_idx];
3542                 break;
3543
3544         case RXD_OPAQUE_RING_JUMBO:
3545                 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
3546                 dest_desc = &tp->rx_jumbo[dest_idx];
3547                 dest_map = &tp->rx_jumbo_buffers[dest_idx];
3548                 src_desc = &tp->rx_jumbo[src_idx];
3549                 src_map = &tp->rx_jumbo_buffers[src_idx];
3550                 break;
3551
3552         default:
3553                 return;
3554         };
3555
3556         dest_map->skb = src_map->skb;
3557         pci_unmap_addr_set(dest_map, mapping,
3558                            pci_unmap_addr(src_map, mapping));
3559         dest_desc->addr_hi = src_desc->addr_hi;
3560         dest_desc->addr_lo = src_desc->addr_lo;
3561
3562         src_map->skb = NULL;
3563 }
3564
3565 #if TG3_VLAN_TAG_USED
3566 static int tg3_vlan_rx(struct tg3 *tp, struct sk_buff *skb, u16 vlan_tag)
3567 {
3568         return vlan_hwaccel_receive_skb(skb, tp->vlgrp, vlan_tag);
3569 }
3570 #endif
3571
3572 /* The RX ring scheme is composed of multiple rings which post fresh
3573  * buffers to the chip, and one special ring the chip uses to report
3574  * status back to the host.
3575  *
3576  * The special ring reports the status of received packets to the
3577  * host.  The chip does not write into the original descriptor the
3578  * RX buffer was obtained from.  The chip simply takes the original
3579  * descriptor as provided by the host, updates the status and length
3580  * field, then writes this into the next status ring entry.
3581  *
3582  * Each ring the host uses to post buffers to the chip is described
3583  * by a TG3_BDINFO entry in the chips SRAM area.  When a packet arrives,
3584  * it is first placed into the on-chip ram.  When the packet's length
3585  * is known, it walks down the TG3_BDINFO entries to select the ring.
3586  * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
3587  * which is within the range of the new packet's length is chosen.
3588  *
3589  * The "separate ring for rx status" scheme may sound queer, but it makes
3590  * sense from a cache coherency perspective.  If only the host writes
3591  * to the buffer post rings, and only the chip writes to the rx status
3592  * rings, then cache lines never move beyond shared-modified state.
3593  * If both the host and chip were to write into the same ring, cache line
3594  * eviction could occur since both entities want it in an exclusive state.
3595  */
3596 static int tg3_rx(struct tg3 *tp, int budget)
3597 {
3598         u32 work_mask, rx_std_posted = 0;
3599         u32 sw_idx = tp->rx_rcb_ptr;
3600         u16 hw_idx;
3601         int received;
3602
3603         hw_idx = tp->hw_status->idx[0].rx_producer;
3604         /*
3605          * We need to order the read of hw_idx and the read of
3606          * the opaque cookie.
3607          */
3608         rmb();
3609         work_mask = 0;
3610         received = 0;
3611         while (sw_idx != hw_idx && budget > 0) {
3612                 struct tg3_rx_buffer_desc *desc = &tp->rx_rcb[sw_idx];
3613                 unsigned int len;
3614                 struct sk_buff *skb;
3615                 dma_addr_t dma_addr;
3616                 u32 opaque_key, desc_idx, *post_ptr;
3617
3618                 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
3619                 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
3620                 if (opaque_key == RXD_OPAQUE_RING_STD) {
3621                         dma_addr = pci_unmap_addr(&tp->rx_std_buffers[desc_idx],
3622                                                   mapping);
3623                         skb = tp->rx_std_buffers[desc_idx].skb;
3624                         post_ptr = &tp->rx_std_ptr;
3625                         rx_std_posted++;
3626                 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
3627                         dma_addr = pci_unmap_addr(&tp->rx_jumbo_buffers[desc_idx],
3628                                                   mapping);
3629                         skb = tp->rx_jumbo_buffers[desc_idx].skb;
3630                         post_ptr = &tp->rx_jumbo_ptr;
3631                 }
3632                 else {
3633                         goto next_pkt_nopost;
3634                 }
3635
3636                 work_mask |= opaque_key;
3637
3638                 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
3639                     (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
3640                 drop_it:
3641                         tg3_recycle_rx(tp, opaque_key,
3642                                        desc_idx, *post_ptr);
3643                 drop_it_no_recycle:
3644                         /* Other statistics kept track of by card. */
3645                         tp->net_stats.rx_dropped++;
3646                         goto next_pkt;
3647                 }
3648
3649                 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4; /* omit crc */
3650
3651                 if (len > RX_COPY_THRESHOLD
3652                         && tp->rx_offset == 2
3653                         /* rx_offset != 2 iff this is a 5701 card running
3654                          * in PCI-X mode [see tg3_get_invariants()] */
3655                 ) {
3656                         int skb_size;
3657
3658                         skb_size = tg3_alloc_rx_skb(tp, opaque_key,
3659                                                     desc_idx, *post_ptr);
3660                         if (skb_size < 0)
3661                                 goto drop_it;
3662
3663                         pci_unmap_single(tp->pdev, dma_addr,
3664                                          skb_size - tp->rx_offset,
3665                                          PCI_DMA_FROMDEVICE);
3666
3667                         skb_put(skb, len);
3668                 } else {
3669                         struct sk_buff *copy_skb;
3670
3671                         tg3_recycle_rx(tp, opaque_key,
3672                                        desc_idx, *post_ptr);
3673
3674                         copy_skb = netdev_alloc_skb(tp->dev, len + 2);
3675                         if (copy_skb == NULL)
3676                                 goto drop_it_no_recycle;
3677
3678                         skb_reserve(copy_skb, 2);
3679                         skb_put(copy_skb, len);
3680                         pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
3681                         skb_copy_from_linear_data(skb, copy_skb->data, len);
3682                         pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
3683
3684                         /* We'll reuse the original ring buffer. */
3685                         skb = copy_skb;
3686                 }
3687
3688                 if ((tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) &&
3689                     (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
3690                     (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
3691                       >> RXD_TCPCSUM_SHIFT) == 0xffff))
3692                         skb->ip_summed = CHECKSUM_UNNECESSARY;
3693                 else
3694                         skb->ip_summed = CHECKSUM_NONE;
3695
3696                 skb->protocol = eth_type_trans(skb, tp->dev);
3697 #if TG3_VLAN_TAG_USED
3698                 if (tp->vlgrp != NULL &&
3699                     desc->type_flags & RXD_FLAG_VLAN) {
3700                         tg3_vlan_rx(tp, skb,
3701                                     desc->err_vlan & RXD_VLAN_MASK);
3702                 } else
3703 #endif
3704                         netif_receive_skb(skb);
3705
3706                 tp->dev->last_rx = jiffies;
3707                 received++;
3708                 budget--;
3709
3710 next_pkt:
3711                 (*post_ptr)++;
3712
3713                 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
3714                         u32 idx = *post_ptr % TG3_RX_RING_SIZE;
3715
3716                         tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX +
3717                                      TG3_64BIT_REG_LOW, idx);
3718                         work_mask &= ~RXD_OPAQUE_RING_STD;
3719                         rx_std_posted = 0;
3720                 }
3721 next_pkt_nopost:
3722                 sw_idx++;
3723                 sw_idx &= (TG3_RX_RCB_RING_SIZE(tp) - 1);
3724
3725                 /* Refresh hw_idx to see if there is new work */
3726                 if (sw_idx == hw_idx) {
3727                         hw_idx = tp->hw_status->idx[0].rx_producer;
3728                         rmb();
3729                 }
3730         }
3731
3732         /* ACK the status ring. */
3733         tp->rx_rcb_ptr = sw_idx;
3734         tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW, sw_idx);
3735
3736         /* Refill RX ring(s). */
3737         if (work_mask & RXD_OPAQUE_RING_STD) {
3738                 sw_idx = tp->rx_std_ptr % TG3_RX_RING_SIZE;
3739                 tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
3740                              sw_idx);
3741         }
3742         if (work_mask & RXD_OPAQUE_RING_JUMBO) {
3743                 sw_idx = tp->rx_jumbo_ptr % TG3_RX_JUMBO_RING_SIZE;
3744                 tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
3745                              sw_idx);
3746         }
3747         mmiowb();
3748
3749         return received;
3750 }
3751
3752 static int tg3_poll_work(struct tg3 *tp, int work_done, int budget)
3753 {
3754         struct tg3_hw_status *sblk = tp->hw_status;
3755
3756         /* handle link change and other phy events */
3757         if (!(tp->tg3_flags &
3758               (TG3_FLAG_USE_LINKCHG_REG |
3759                TG3_FLAG_POLL_SERDES))) {
3760                 if (sblk->status & SD_STATUS_LINK_CHG) {
3761                         sblk->status = SD_STATUS_UPDATED |
3762                                 (sblk->status & ~SD_STATUS_LINK_CHG);
3763                         spin_lock(&tp->lock);
3764                         tg3_setup_phy(tp, 0);
3765                         spin_unlock(&tp->lock);
3766                 }
3767         }
3768
3769         /* run TX completion thread */
3770         if (sblk->idx[0].tx_consumer != tp->tx_cons) {
3771                 tg3_tx(tp);
3772                 if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING))
3773                         return work_done;
3774         }
3775
3776         /* run RX thread, within the bounds set by NAPI.
3777          * All RX "locking" is done by ensuring outside
3778          * code synchronizes with tg3->napi.poll()
3779          */
3780         if (sblk->idx[0].rx_producer != tp->rx_rcb_ptr)
3781                 work_done += tg3_rx(tp, budget - work_done);
3782
3783         return work_done;
3784 }
3785
3786 static int tg3_poll(struct napi_struct *napi, int budget)
3787 {
3788         struct tg3 *tp = container_of(napi, struct tg3, napi);
3789         int work_done = 0;
3790         struct tg3_hw_status *sblk = tp->hw_status;
3791
3792         while (1) {
3793                 work_done = tg3_poll_work(tp, work_done, budget);
3794
3795                 if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING))
3796                         goto tx_recovery;
3797
3798                 if (unlikely(work_done >= budget))
3799                         break;
3800
3801                 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) {
3802                         /* tp->last_tag is used in tg3_restart_ints() below
3803                          * to tell the hw how much work has been processed,
3804                          * so we must read it before checking for more work.
3805                          */
3806                         tp->last_tag = sblk->status_tag;
3807                         rmb();
3808                 } else
3809                         sblk->status &= ~SD_STATUS_UPDATED;
3810
3811                 if (likely(!tg3_has_work(tp))) {
3812                         netif_rx_complete(tp->dev, napi);
3813                         tg3_restart_ints(tp);
3814                         break;
3815                 }
3816         }
3817
3818         return work_done;
3819
3820 tx_recovery:
3821         /* work_done is guaranteed to be less than budget. */
3822         netif_rx_complete(tp->dev, napi);
3823         schedule_work(&tp->reset_task);
3824         return work_done;
3825 }
3826
3827 static void tg3_irq_quiesce(struct tg3 *tp)
3828 {
3829         BUG_ON(tp->irq_sync);
3830
3831         tp->irq_sync = 1;
3832         smp_mb();
3833
3834         synchronize_irq(tp->pdev->irq);
3835 }
3836
3837 static inline int tg3_irq_sync(struct tg3 *tp)
3838 {
3839         return tp->irq_sync;
3840 }
3841
3842 /* Fully shutdown all tg3 driver activity elsewhere in the system.
3843  * If irq_sync is non-zero, then the IRQ handler must be synchronized
3844  * with as well.  Most of the time, this is not necessary except when
3845  * shutting down the device.
3846  */
3847 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
3848 {
3849         spin_lock_bh(&tp->lock);
3850         if (irq_sync)
3851                 tg3_irq_quiesce(tp);
3852 }
3853
3854 static inline void tg3_full_unlock(struct tg3 *tp)
3855 {
3856         spin_unlock_bh(&tp->lock);
3857 }
3858
3859 /* One-shot MSI handler - Chip automatically disables interrupt
3860  * after sending MSI so driver doesn't have to do it.
3861  */
3862 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
3863 {
3864         struct net_device *dev = dev_id;
3865         struct tg3 *tp = netdev_priv(dev);
3866
3867         prefetch(tp->hw_status);
3868         prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
3869
3870         if (likely(!tg3_irq_sync(tp)))
3871                 netif_rx_schedule(dev, &tp->napi);
3872
3873         return IRQ_HANDLED;
3874 }
3875
3876 /* MSI ISR - No need to check for interrupt sharing and no need to
3877  * flush status block and interrupt mailbox. PCI ordering rules
3878  * guarantee that MSI will arrive after the status block.
3879  */
3880 static irqreturn_t tg3_msi(int irq, void *dev_id)
3881 {
3882         struct net_device *dev = dev_id;
3883         struct tg3 *tp = netdev_priv(dev);
3884
3885         prefetch(tp->hw_status);
3886         prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
3887         /*
3888          * Writing any value to intr-mbox-0 clears PCI INTA# and
3889          * chip-internal interrupt pending events.
3890          * Writing non-zero to intr-mbox-0 additional tells the
3891          * NIC to stop sending us irqs, engaging "in-intr-handler"
3892          * event coalescing.
3893          */
3894         tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
3895         if (likely(!tg3_irq_sync(tp)))
3896                 netif_rx_schedule(dev, &tp->napi);
3897
3898         return IRQ_RETVAL(1);
3899 }
3900
3901 static irqreturn_t tg3_interrupt(int irq, void *dev_id)
3902 {
3903         struct net_device *dev = dev_id;
3904         struct tg3 *tp = netdev_priv(dev);
3905         struct tg3_hw_status *sblk = tp->hw_status;
3906         unsigned int handled = 1;
3907
3908         /* In INTx mode, it is possible for the interrupt to arrive at
3909          * the CPU before the status block posted prior to the interrupt.
3910          * Reading the PCI State register will confirm whether the
3911          * interrupt is ours and will flush the status block.
3912          */
3913         if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
3914                 if ((tp->tg3_flags & TG3_FLAG_CHIP_RESETTING) ||
3915                     (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
3916                         handled = 0;
3917                         goto out;
3918                 }
3919         }
3920
3921         /*
3922          * Writing any value to intr-mbox-0 clears PCI INTA# and
3923          * chip-internal interrupt pending events.
3924          * Writing non-zero to intr-mbox-0 additional tells the
3925          * NIC to stop sending us irqs, engaging "in-intr-handler"
3926          * event coalescing.
3927          *
3928          * Flush the mailbox to de-assert the IRQ immediately to prevent
3929          * spurious interrupts.  The flush impacts performance but
3930          * excessive spurious interrupts can be worse in some cases.
3931          */
3932         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
3933         if (tg3_irq_sync(tp))
3934                 goto out;
3935         sblk->status &= ~SD_STATUS_UPDATED;
3936         if (likely(tg3_has_work(tp))) {
3937                 prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
3938                 netif_rx_schedule(dev, &tp->napi);
3939         } else {
3940                 /* No work, shared interrupt perhaps?  re-enable
3941                  * interrupts, and flush that PCI write
3942                  */
3943                 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
3944                                0x00000000);
3945         }
3946 out:
3947         return IRQ_RETVAL(handled);
3948 }
3949
3950 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
3951 {
3952         struct net_device *dev = dev_id;
3953         struct tg3 *tp = netdev_priv(dev);
3954         struct tg3_hw_status *sblk = tp->hw_status;
3955         unsigned int handled = 1;
3956
3957         /* In INTx mode, it is possible for the interrupt to arrive at
3958          * the CPU before the status block posted prior to the interrupt.
3959          * Reading the PCI State register will confirm whether the
3960          * interrupt is ours and will flush the status block.
3961          */
3962         if (unlikely(sblk->status_tag == tp->last_tag)) {
3963                 if ((tp->tg3_flags & TG3_FLAG_CHIP_RESETTING) ||
3964                     (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
3965                         handled = 0;
3966                         goto out;
3967                 }
3968         }
3969
3970         /*
3971          * writing any value to intr-mbox-0 clears PCI INTA# and
3972          * chip-internal interrupt pending events.
3973          * writing non-zero to intr-mbox-0 additional tells the
3974          * NIC to stop sending us irqs, engaging "in-intr-handler"
3975          * event coalescing.
3976          *
3977          * Flush the mailbox to de-assert the IRQ immediately to prevent
3978          * spurious interrupts.  The flush impacts performance but
3979          * excessive spurious interrupts can be worse in some cases.
3980          */
3981         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
3982         if (tg3_irq_sync(tp))
3983                 goto out;
3984         if (netif_rx_schedule_prep(dev, &tp->napi)) {
3985                 prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
3986                 /* Update last_tag to mark that this status has been
3987                  * seen. Because interrupt may be shared, we may be
3988                  * racing with tg3_poll(), so only update last_tag
3989                  * if tg3_poll() is not scheduled.
3990                  */
3991                 tp->last_tag = sblk->status_tag;
3992                 __netif_rx_schedule(dev, &tp->napi);
3993         }
3994 out:
3995         return IRQ_RETVAL(handled);
3996 }
3997
3998 /* ISR for interrupt test */
3999 static irqreturn_t tg3_test_isr(int irq, void *dev_id)
4000 {
4001         struct net_device *dev = dev_id;
4002         struct tg3 *tp = netdev_priv(dev);
4003         struct tg3_hw_status *sblk = tp->hw_status;
4004
4005         if ((sblk->status & SD_STATUS_UPDATED) ||
4006             !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
4007                 tg3_disable_ints(tp);
4008                 return IRQ_RETVAL(1);
4009         }
4010         return IRQ_RETVAL(0);
4011 }
4012
4013 static int tg3_init_hw(struct tg3 *, int);
4014 static int tg3_halt(struct tg3 *, int, int);
4015
4016 /* Restart hardware after configuration changes, self-test, etc.
4017  * Invoked with tp->lock held.
4018  */
4019 static int tg3_restart_hw(struct tg3 *tp, int reset_phy)
4020 {
4021         int err;
4022
4023         err = tg3_init_hw(tp, reset_phy);
4024         if (err) {
4025                 printk(KERN_ERR PFX "%s: Failed to re-initialize device, "
4026                        "aborting.\n", tp->dev->name);
4027                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
4028                 tg3_full_unlock(tp);
4029                 del_timer_sync(&tp->timer);
4030                 tp->irq_sync = 0;
4031                 napi_enable(&tp->napi);
4032                 dev_close(tp->dev);
4033                 tg3_full_lock(tp, 0);
4034         }
4035         return err;
4036 }
4037
4038 #ifdef CONFIG_NET_POLL_CONTROLLER
4039 static void tg3_poll_controller(struct net_device *dev)
4040 {
4041         struct tg3 *tp = netdev_priv(dev);
4042
4043         tg3_interrupt(tp->pdev->irq, dev);
4044 }
4045 #endif
4046
4047 static void tg3_reset_task(struct work_struct *work)
4048 {
4049         struct tg3 *tp = container_of(work, struct tg3, reset_task);
4050         unsigned int restart_timer;
4051
4052         tg3_full_lock(tp, 0);
4053
4054         if (!netif_running(tp->dev)) {
4055                 tg3_full_unlock(tp);
4056                 return;
4057         }
4058
4059         tg3_full_unlock(tp);
4060
4061         tg3_netif_stop(tp);
4062
4063         tg3_full_lock(tp, 1);
4064
4065         restart_timer = tp->tg3_flags2 & TG3_FLG2_RESTART_TIMER;
4066         tp->tg3_flags2 &= ~TG3_FLG2_RESTART_TIMER;
4067
4068         if (tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING) {
4069                 tp->write32_tx_mbox = tg3_write32_tx_mbox;
4070                 tp->write32_rx_mbox = tg3_write_flush_reg32;
4071                 tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;
4072                 tp->tg3_flags &= ~TG3_FLAG_TX_RECOVERY_PENDING;
4073         }
4074
4075         tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
4076         if (tg3_init_hw(tp, 1))
4077                 goto out;
4078
4079         tg3_netif_start(tp);
4080
4081         if (restart_timer)
4082                 mod_timer(&tp->timer, jiffies + 1);
4083
4084 out:
4085         tg3_full_unlock(tp);
4086 }
4087
4088 static void tg3_dump_short_state(struct tg3 *tp)
4089 {
4090         printk(KERN_ERR PFX "DEBUG: MAC_TX_STATUS[%08x] MAC_RX_STATUS[%08x]\n",
4091                tr32(MAC_TX_STATUS), tr32(MAC_RX_STATUS));
4092         printk(KERN_ERR PFX "DEBUG: RDMAC_STATUS[%08x] WDMAC_STATUS[%08x]\n",
4093                tr32(RDMAC_STATUS), tr32(WDMAC_STATUS));
4094 }
4095
4096 static void tg3_tx_timeout(struct net_device *dev)
4097 {
4098         struct tg3 *tp = netdev_priv(dev);
4099
4100         if (netif_msg_tx_err(tp)) {
4101                 printk(KERN_ERR PFX "%s: transmit timed out, resetting\n",
4102                        dev->name);
4103                 tg3_dump_short_state(tp);
4104         }
4105
4106         schedule_work(&tp->reset_task);
4107 }
4108
4109 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
4110 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
4111 {
4112         u32 base = (u32) mapping & 0xffffffff;
4113
4114         return ((base > 0xffffdcc0) &&
4115                 (base + len + 8 < base));
4116 }
4117
4118 /* Test for DMA addresses > 40-bit */
4119 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
4120                                           int len)
4121 {
4122 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
4123         if (tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG)
4124                 return (((u64) mapping + len) > DMA_40BIT_MASK);
4125         return 0;
4126 #else
4127         return 0;
4128 #endif
4129 }
4130
4131 static void tg3_set_txd(struct tg3 *, int, dma_addr_t, int, u32, u32);
4132
4133 /* Workaround 4GB and 40-bit hardware DMA bugs. */
4134 static int tigon3_dma_hwbug_workaround(struct tg3 *tp, struct sk_buff *skb,
4135                                        u32 last_plus_one, u32 *start,
4136                                        u32 base_flags, u32 mss)
4137 {
4138         struct sk_buff *new_skb = skb_copy(skb, GFP_ATOMIC);
4139         dma_addr_t new_addr = 0;
4140         u32 entry = *start;
4141         int i, ret = 0;
4142
4143         if (!new_skb) {
4144                 ret = -1;
4145         } else {
4146                 /* New SKB is guaranteed to be linear. */
4147                 entry = *start;
4148                 new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
4149                                           PCI_DMA_TODEVICE);
4150                 /* Make sure new skb does not cross any 4G boundaries.
4151                  * Drop the packet if it does.
4152                  */
4153                 if (tg3_4g_overflow_test(new_addr, new_skb->len)) {
4154                         ret = -1;
4155                         dev_kfree_skb(new_skb);
4156                         new_skb = NULL;
4157                 } else {
4158                         tg3_set_txd(tp, entry, new_addr, new_skb->len,
4159                                     base_flags, 1 | (mss << 1));
4160                         *start = NEXT_TX(entry);
4161                 }
4162         }
4163
4164         /* Now clean up the sw ring entries. */
4165         i = 0;
4166         while (entry != last_plus_one) {
4167                 int len;
4168
4169                 if (i == 0)
4170                         len = skb_headlen(skb);
4171                 else
4172                         len = skb_shinfo(skb)->frags[i-1].size;
4173                 pci_unmap_single(tp->pdev,
4174                                  pci_unmap_addr(&tp->tx_buffers[entry], mapping),
4175                                  len, PCI_DMA_TODEVICE);
4176                 if (i == 0) {
4177                         tp->tx_buffers[entry].skb = new_skb;
4178                         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, new_addr);
4179                 } else {
4180                         tp->tx_buffers[entry].skb = NULL;
4181                 }
4182                 entry = NEXT_TX(entry);
4183                 i++;
4184         }
4185
4186         dev_kfree_skb(skb);
4187
4188         return ret;
4189 }
4190
4191 static void tg3_set_txd(struct tg3 *tp, int entry,
4192                         dma_addr_t mapping, int len, u32 flags,
4193                         u32 mss_and_is_end)
4194 {
4195         struct tg3_tx_buffer_desc *txd = &tp->tx_ring[entry];
4196         int is_end = (mss_and_is_end & 0x1);
4197         u32 mss = (mss_and_is_end >> 1);
4198         u32 vlan_tag = 0;
4199
4200         if (is_end)
4201                 flags |= TXD_FLAG_END;
4202         if (flags & TXD_FLAG_VLAN) {
4203                 vlan_tag = flags >> 16;
4204                 flags &= 0xffff;
4205         }
4206         vlan_tag |= (mss << TXD_MSS_SHIFT);
4207
4208         txd->addr_hi = ((u64) mapping >> 32);
4209         txd->addr_lo = ((u64) mapping & 0xffffffff);
4210         txd->len_flags = (len << TXD_LEN_SHIFT) | flags;
4211         txd->vlan_tag = vlan_tag << TXD_VLAN_TAG_SHIFT;
4212 }
4213
4214 /* hard_start_xmit for devices that don't have any bugs and
4215  * support TG3_FLG2_HW_TSO_2 only.
4216  */
4217 static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
4218 {
4219         struct tg3 *tp = netdev_priv(dev);
4220         dma_addr_t mapping;
4221         u32 len, entry, base_flags, mss;
4222
4223         len = skb_headlen(skb);
4224
4225         /* We are running in BH disabled context with netif_tx_lock
4226          * and TX reclaim runs via tp->napi.poll inside of a software
4227          * interrupt.  Furthermore, IRQ processing runs lockless so we have
4228          * no IRQ context deadlocks to worry about either.  Rejoice!
4229          */
4230         if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->nr_frags + 1))) {
4231                 if (!netif_queue_stopped(dev)) {
4232                         netif_stop_queue(dev);
4233
4234                         /* This is a hard error, log it. */
4235                         printk(KERN_ERR PFX "%s: BUG! Tx Ring full when "
4236                                "queue awake!\n", dev->name);
4237                 }
4238                 return NETDEV_TX_BUSY;
4239         }
4240
4241         entry = tp->tx_prod;
4242         base_flags = 0;
4243         mss = 0;
4244         if ((mss = skb_shinfo(skb)->gso_size) != 0) {
4245                 int tcp_opt_len, ip_tcp_len;
4246
4247                 if (skb_header_cloned(skb) &&
4248                     pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
4249                         dev_kfree_skb(skb);
4250                         goto out_unlock;
4251                 }
4252
4253                 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
4254                         mss |= (skb_headlen(skb) - ETH_HLEN) << 9;
4255                 else {
4256                         struct iphdr *iph = ip_hdr(skb);
4257
4258                         tcp_opt_len = tcp_optlen(skb);
4259                         ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
4260
4261                         iph->check = 0;
4262                         iph->tot_len = htons(mss + ip_tcp_len + tcp_opt_len);
4263                         mss |= (ip_tcp_len + tcp_opt_len) << 9;
4264                 }
4265
4266                 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
4267                                TXD_FLAG_CPU_POST_DMA);
4268
4269                 tcp_hdr(skb)->check = 0;
4270
4271         }
4272         else if (skb->ip_summed == CHECKSUM_PARTIAL)
4273                 base_flags |= TXD_FLAG_TCPUDP_CSUM;
4274 #if TG3_VLAN_TAG_USED
4275         if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
4276                 base_flags |= (TXD_FLAG_VLAN |
4277                                (vlan_tx_tag_get(skb) << 16));
4278 #endif
4279
4280         /* Queue skb data, a.k.a. the main skb fragment. */
4281         mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
4282
4283         tp->tx_buffers[entry].skb = skb;
4284         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
4285
4286         tg3_set_txd(tp, entry, mapping, len, base_flags,
4287                     (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
4288
4289         entry = NEXT_TX(entry);
4290
4291         /* Now loop through additional data fragments, and queue them. */
4292         if (skb_shinfo(skb)->nr_frags > 0) {
4293                 unsigned int i, last;
4294
4295                 last = skb_shinfo(skb)->nr_frags - 1;
4296                 for (i = 0; i <= last; i++) {
4297                         skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4298
4299                         len = frag->size;
4300                         mapping = pci_map_page(tp->pdev,
4301                                                frag->page,
4302                                                frag->page_offset,
4303                                                len, PCI_DMA_TODEVICE);
4304
4305                         tp->tx_buffers[entry].skb = NULL;
4306                         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
4307
4308                         tg3_set_txd(tp, entry, mapping, len,
4309                                     base_flags, (i == last) | (mss << 1));
4310
4311                         entry = NEXT_TX(entry);
4312                 }
4313         }
4314
4315         /* Packets are ready, update Tx producer idx local and on card. */
4316         tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry);
4317
4318         tp->tx_prod = entry;
4319         if (unlikely(tg3_tx_avail(tp) <= (MAX_SKB_FRAGS + 1))) {
4320                 netif_stop_queue(dev);
4321                 if (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp))
4322                         netif_wake_queue(tp->dev);
4323         }
4324
4325 out_unlock:
4326         mmiowb();
4327
4328         dev->trans_start = jiffies;
4329
4330         return NETDEV_TX_OK;
4331 }
4332
4333 static int tg3_start_xmit_dma_bug(struct sk_buff *, struct net_device *);
4334
4335 /* Use GSO to workaround a rare TSO bug that may be triggered when the
4336  * TSO header is greater than 80 bytes.
4337  */
4338 static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
4339 {
4340         struct sk_buff *segs, *nskb;
4341
4342         /* Estimate the number of fragments in the worst case */
4343         if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->gso_segs * 3))) {
4344                 netif_stop_queue(tp->dev);
4345                 if (tg3_tx_avail(tp) <= (skb_shinfo(skb)->gso_segs * 3))
4346                         return NETDEV_TX_BUSY;
4347
4348                 netif_wake_queue(tp->dev);
4349         }
4350
4351         segs = skb_gso_segment(skb, tp->dev->features & ~NETIF_F_TSO);
4352         if (unlikely(IS_ERR(segs)))
4353                 goto tg3_tso_bug_end;
4354
4355         do {
4356                 nskb = segs;
4357                 segs = segs->next;
4358                 nskb->next = NULL;
4359                 tg3_start_xmit_dma_bug(nskb, tp->dev);
4360         } while (segs);
4361
4362 tg3_tso_bug_end:
4363         dev_kfree_skb(skb);
4364
4365         return NETDEV_TX_OK;
4366 }
4367
4368 /* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
4369  * support TG3_FLG2_HW_TSO_1 or firmware TSO only.
4370  */
4371 static int tg3_start_xmit_dma_bug(struct sk_buff *skb, struct net_device *dev)
4372 {
4373         struct tg3 *tp = netdev_priv(dev);
4374         dma_addr_t mapping;
4375         u32 len, entry, base_flags, mss;
4376         int would_hit_hwbug;
4377
4378         len = skb_headlen(skb);
4379
4380         /* We are running in BH disabled context with netif_tx_lock
4381          * and TX reclaim runs via tp->napi.poll inside of a software
4382          * interrupt.  Furthermore, IRQ processing runs lockless so we have
4383          * no IRQ context deadlocks to worry about either.  Rejoice!
4384          */
4385         if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->nr_frags + 1))) {
4386                 if (!netif_queue_stopped(dev)) {
4387                         netif_stop_queue(dev);
4388
4389                         /* This is a hard error, log it. */
4390                         printk(KERN_ERR PFX "%s: BUG! Tx Ring full when "
4391                                "queue awake!\n", dev->name);
4392                 }
4393                 return NETDEV_TX_BUSY;
4394         }
4395
4396         entry = tp->tx_prod;
4397         base_flags = 0;
4398         if (skb->ip_summed == CHECKSUM_PARTIAL)
4399                 base_flags |= TXD_FLAG_TCPUDP_CSUM;
4400         mss = 0;
4401         if ((mss = skb_shinfo(skb)->gso_size) != 0) {
4402                 struct iphdr *iph;
4403                 int tcp_opt_len, ip_tcp_len, hdr_len;
4404
4405                 if (skb_header_cloned(skb) &&
4406                     pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
4407                         dev_kfree_skb(skb);
4408                         goto out_unlock;
4409                 }
4410
4411                 tcp_opt_len = tcp_optlen(skb);
4412                 ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
4413
4414                 hdr_len = ip_tcp_len + tcp_opt_len;
4415                 if (unlikely((ETH_HLEN + hdr_len) > 80) &&
4416                              (tp->tg3_flags2 & TG3_FLG2_TSO_BUG))
4417                         return (tg3_tso_bug(tp, skb));
4418
4419                 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
4420                                TXD_FLAG_CPU_POST_DMA);
4421
4422                 iph = ip_hdr(skb);
4423                 iph->check = 0;
4424                 iph->tot_len = htons(mss + hdr_len);
4425                 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
4426                         tcp_hdr(skb)->check = 0;
4427                         base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
4428                 } else
4429                         tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
4430                                                                  iph->daddr, 0,
4431                                                                  IPPROTO_TCP,
4432                                                                  0);
4433
4434                 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO) ||
4435                     (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)) {
4436                         if (tcp_opt_len || iph->ihl > 5) {
4437                                 int tsflags;
4438
4439                                 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
4440                                 mss |= (tsflags << 11);
4441                         }
4442                 } else {
4443                         if (tcp_opt_len || iph->ihl > 5) {
4444                                 int tsflags;
4445
4446                                 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
4447                                 base_flags |= tsflags << 12;
4448                         }
4449                 }
4450         }
4451 #if TG3_VLAN_TAG_USED
4452         if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
4453                 base_flags |= (TXD_FLAG_VLAN |
4454                                (vlan_tx_tag_get(skb) << 16));
4455 #endif
4456
4457         /* Queue skb data, a.k.a. the main skb fragment. */
4458         mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
4459
4460         tp->tx_buffers[entry].skb = skb;
4461         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
4462
4463         would_hit_hwbug = 0;
4464
4465         if (tg3_4g_overflow_test(mapping, len))
4466                 would_hit_hwbug = 1;
4467
4468         tg3_set_txd(tp, entry, mapping, len, base_flags,
4469                     (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
4470
4471         entry = NEXT_TX(entry);
4472
4473         /* Now loop through additional data fragments, and queue them. */
4474         if (skb_shinfo(skb)->nr_frags > 0) {
4475                 unsigned int i, last;
4476
4477                 last = skb_shinfo(skb)->nr_frags - 1;
4478                 for (i = 0; i <= last; i++) {
4479                         skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4480
4481                         len = frag->size;
4482                         mapping = pci_map_page(tp->pdev,
4483                                                frag->page,
4484                                                frag->page_offset,
4485                                                len, PCI_DMA_TODEVICE);
4486
4487                         tp->tx_buffers[entry].skb = NULL;
4488                         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
4489
4490                         if (tg3_4g_overflow_test(mapping, len))
4491                                 would_hit_hwbug = 1;
4492
4493                         if (tg3_40bit_overflow_test(tp, mapping, len))
4494                                 would_hit_hwbug = 1;
4495
4496                         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
4497                                 tg3_set_txd(tp, entry, mapping, len,
4498                                             base_flags, (i == last)|(mss << 1));
4499                         else
4500                                 tg3_set_txd(tp, entry, mapping, len,
4501                                             base_flags, (i == last));
4502
4503                         entry = NEXT_TX(entry);
4504                 }
4505         }
4506
4507         if (would_hit_hwbug) {
4508                 u32 last_plus_one = entry;
4509                 u32 start;
4510
4511                 start = entry - 1 - skb_shinfo(skb)->nr_frags;
4512                 start &= (TG3_TX_RING_SIZE - 1);
4513
4514                 /* If the workaround fails due to memory/mapping
4515                  * failure, silently drop this packet.
4516                  */
4517                 if (tigon3_dma_hwbug_workaround(tp, skb, last_plus_one,
4518                                                 &start, base_flags, mss))
4519                         goto out_unlock;
4520
4521                 entry = start;
4522         }
4523
4524         /* Packets are ready, update Tx producer idx local and on card. */
4525         tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry);
4526
4527         tp->tx_prod = entry;
4528         if (unlikely(tg3_tx_avail(tp) <= (MAX_SKB_FRAGS + 1))) {
4529                 netif_stop_queue(dev);
4530                 if (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp))
4531                         netif_wake_queue(tp->dev);
4532         }
4533
4534 out_unlock:
4535         mmiowb();
4536
4537         dev->trans_start = jiffies;
4538
4539         return NETDEV_TX_OK;
4540 }
4541
4542 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
4543                                int new_mtu)
4544 {
4545         dev->mtu = new_mtu;
4546
4547         if (new_mtu > ETH_DATA_LEN) {
4548                 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
4549                         tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
4550                         ethtool_op_set_tso(dev, 0);
4551                 }
4552                 else
4553                         tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE;
4554         } else {
4555                 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
4556                         tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
4557                 tp->tg3_flags &= ~TG3_FLAG_JUMBO_RING_ENABLE;
4558         }
4559 }
4560
4561 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
4562 {
4563         struct tg3 *tp = netdev_priv(dev);
4564         int err;
4565
4566         if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
4567                 return -EINVAL;
4568
4569         if (!netif_running(dev)) {
4570                 /* We'll just catch it later when the
4571                  * device is up'd.
4572                  */
4573                 tg3_set_mtu(dev, tp, new_mtu);
4574                 return 0;
4575         }
4576
4577         tg3_netif_stop(tp);
4578
4579         tg3_full_lock(tp, 1);
4580
4581         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
4582
4583         tg3_set_mtu(dev, tp, new_mtu);
4584
4585         err = tg3_restart_hw(tp, 0);
4586
4587         if (!err)
4588                 tg3_netif_start(tp);
4589
4590         tg3_full_unlock(tp);
4591
4592         return err;
4593 }
4594
4595 /* Free up pending packets in all rx/tx rings.
4596  *
4597  * The chip has been shut down and the driver detached from
4598  * the networking, so no interrupts or new tx packets will
4599  * end up in the driver.  tp->{tx,}lock is not held and we are not
4600  * in an interrupt context and thus may sleep.
4601  */
4602 static void tg3_free_rings(struct tg3 *tp)
4603 {
4604         struct ring_info *rxp;
4605         int i;
4606
4607         for (i = 0; i < TG3_RX_RING_SIZE; i++) {
4608                 rxp = &tp->rx_std_buffers[i];
4609
4610                 if (rxp->skb == NULL)
4611                         continue;
4612                 pci_unmap_single(tp->pdev,
4613                                  pci_unmap_addr(rxp, mapping),
4614                                  tp->rx_pkt_buf_sz - tp->rx_offset,
4615                                  PCI_DMA_FROMDEVICE);
4616                 dev_kfree_skb_any(rxp->skb);
4617                 rxp->skb = NULL;
4618         }
4619
4620         for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
4621                 rxp = &tp->rx_jumbo_buffers[i];
4622
4623                 if (rxp->skb == NULL)
4624                         continue;
4625                 pci_unmap_single(tp->pdev,
4626                                  pci_unmap_addr(rxp, mapping),
4627                                  RX_JUMBO_PKT_BUF_SZ - tp->rx_offset,
4628                                  PCI_DMA_FROMDEVICE);
4629                 dev_kfree_skb_any(rxp->skb);
4630                 rxp->skb = NULL;
4631         }
4632
4633         for (i = 0; i < TG3_TX_RING_SIZE; ) {
4634                 struct tx_ring_info *txp;
4635                 struct sk_buff *skb;
4636                 int j;
4637
4638                 txp = &tp->tx_buffers[i];
4639                 skb = txp->skb;
4640
4641                 if (skb == NULL) {
4642                         i++;
4643                         continue;
4644                 }
4645
4646                 pci_unmap_single(tp->pdev,
4647                                  pci_unmap_addr(txp, mapping),
4648                                  skb_headlen(skb),
4649                                  PCI_DMA_TODEVICE);
4650                 txp->skb = NULL;
4651
4652                 i++;
4653
4654                 for (j = 0; j < skb_shinfo(skb)->nr_frags; j++) {
4655                         txp = &tp->tx_buffers[i & (TG3_TX_RING_SIZE - 1)];
4656                         pci_unmap_page(tp->pdev,
4657                                        pci_unmap_addr(txp, mapping),
4658                                        skb_shinfo(skb)->frags[j].size,
4659                                        PCI_DMA_TODEVICE);
4660                         i++;
4661                 }
4662
4663                 dev_kfree_skb_any(skb);
4664         }
4665 }
4666
4667 /* Initialize tx/rx rings for packet processing.
4668  *
4669  * The chip has been shut down and the driver detached from
4670  * the networking, so no interrupts or new tx packets will
4671  * end up in the driver.  tp->{tx,}lock are held and thus
4672  * we may not sleep.
4673  */
4674 static int tg3_init_rings(struct tg3 *tp)
4675 {
4676         u32 i;
4677
4678         /* Free up all the SKBs. */
4679         tg3_free_rings(tp);
4680
4681         /* Zero out all descriptors. */
4682         memset(tp->rx_std, 0, TG3_RX_RING_BYTES);
4683         memset(tp->rx_jumbo, 0, TG3_RX_JUMBO_RING_BYTES);
4684         memset(tp->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
4685         memset(tp->tx_ring, 0, TG3_TX_RING_BYTES);
4686
4687         tp->rx_pkt_buf_sz = RX_PKT_BUF_SZ;
4688         if ((tp->tg3_flags2 & TG3_FLG2_5780_CLASS) &&
4689             (tp->dev->mtu > ETH_DATA_LEN))
4690                 tp->rx_pkt_buf_sz = RX_JUMBO_PKT_BUF_SZ;
4691
4692         /* Initialize invariants of the rings, we only set this
4693          * stuff once.  This works because the card does not
4694          * write into the rx buffer posting rings.
4695          */
4696         for (i = 0; i < TG3_RX_RING_SIZE; i++) {
4697                 struct tg3_rx_buffer_desc *rxd;
4698
4699                 rxd = &tp->rx_std[i];
4700                 rxd->idx_len = (tp->rx_pkt_buf_sz - tp->rx_offset - 64)
4701                         << RXD_LEN_SHIFT;
4702                 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
4703                 rxd->opaque = (RXD_OPAQUE_RING_STD |
4704                                (i << RXD_OPAQUE_INDEX_SHIFT));
4705         }
4706
4707         if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
4708                 for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
4709                         struct tg3_rx_buffer_desc *rxd;
4710
4711                         rxd = &tp->rx_jumbo[i];
4712                         rxd->idx_len = (RX_JUMBO_PKT_BUF_SZ - tp->rx_offset - 64)
4713                                 << RXD_LEN_SHIFT;
4714                         rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
4715                                 RXD_FLAG_JUMBO;
4716                         rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
4717                                (i << RXD_OPAQUE_INDEX_SHIFT));
4718                 }
4719         }
4720
4721         /* Now allocate fresh SKBs for each rx ring. */
4722         for (i = 0; i < tp->rx_pending; i++) {
4723                 if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_STD, -1, i) < 0) {
4724                         printk(KERN_WARNING PFX
4725                                "%s: Using a smaller RX standard ring, "
4726                                "only %d out of %d buffers were allocated "
4727                                "successfully.\n",
4728                                tp->dev->name, i, tp->rx_pending);
4729                         if (i == 0)
4730                                 return -ENOMEM;
4731                         tp->rx_pending = i;
4732                         break;
4733                 }
4734         }
4735
4736         if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
4737                 for (i = 0; i < tp->rx_jumbo_pending; i++) {
4738                         if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_JUMBO,
4739                                              -1, i) < 0) {
4740                                 printk(KERN_WARNING PFX
4741                                        "%s: Using a smaller RX jumbo ring, "
4742                                        "only %d out of %d buffers were "
4743                                        "allocated successfully.\n",
4744                                        tp->dev->name, i, tp->rx_jumbo_pending);
4745                                 if (i == 0) {
4746                                         tg3_free_rings(tp);
4747                                         return -ENOMEM;
4748                                 }
4749                                 tp->rx_jumbo_pending = i;
4750                                 break;
4751                         }
4752                 }
4753         }
4754         return 0;
4755 }
4756
4757 /*
4758  * Must not be invoked with interrupt sources disabled and
4759  * the hardware shutdown down.
4760  */
4761 static void tg3_free_consistent(struct tg3 *tp)
4762 {
4763         kfree(tp->rx_std_buffers);
4764         tp->rx_std_buffers = NULL;
4765         if (tp->rx_std) {
4766                 pci_free_consistent(tp->pdev, TG3_RX_RING_BYTES,
4767                                     tp->rx_std, tp->rx_std_mapping);
4768                 tp->rx_std = NULL;
4769         }
4770         if (tp->rx_jumbo) {
4771                 pci_free_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
4772                                     tp->rx_jumbo, tp->rx_jumbo_mapping);
4773                 tp->rx_jumbo = NULL;
4774         }
4775         if (tp->rx_rcb) {
4776                 pci_free_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
4777                                     tp->rx_rcb, tp->rx_rcb_mapping);
4778                 tp->rx_rcb = NULL;
4779         }
4780         if (tp->tx_ring) {
4781                 pci_free_consistent(tp->pdev, TG3_TX_RING_BYTES,
4782                         tp->tx_ring, tp->tx_desc_mapping);
4783                 tp->tx_ring = NULL;
4784         }
4785         if (tp->hw_status) {
4786                 pci_free_consistent(tp->pdev, TG3_HW_STATUS_SIZE,
4787                                     tp->hw_status, tp->status_mapping);
4788                 tp->hw_status = NULL;
4789         }
4790         if (tp->hw_stats) {
4791                 pci_free_consistent(tp->pdev, sizeof(struct tg3_hw_stats),
4792                                     tp->hw_stats, tp->stats_mapping);
4793                 tp->hw_stats = NULL;
4794         }
4795 }
4796
4797 /*
4798  * Must not be invoked with interrupt sources disabled and
4799  * the hardware shutdown down.  Can sleep.
4800  */
4801 static int tg3_alloc_consistent(struct tg3 *tp)
4802 {
4803         tp->rx_std_buffers = kzalloc((sizeof(struct ring_info) *
4804                                       (TG3_RX_RING_SIZE +
4805                                        TG3_RX_JUMBO_RING_SIZE)) +
4806                                      (sizeof(struct tx_ring_info) *
4807                                       TG3_TX_RING_SIZE),
4808                                      GFP_KERNEL);
4809         if (!tp->rx_std_buffers)
4810                 return -ENOMEM;
4811
4812         tp->rx_jumbo_buffers = &tp->rx_std_buffers[TG3_RX_RING_SIZE];
4813         tp->tx_buffers = (struct tx_ring_info *)
4814                 &tp->rx_jumbo_buffers[TG3_RX_JUMBO_RING_SIZE];
4815
4816         tp->rx_std = pci_alloc_consistent(tp->pdev, TG3_RX_RING_BYTES,
4817                                           &tp->rx_std_mapping);
4818         if (!tp->rx_std)
4819                 goto err_out;
4820
4821         tp->rx_jumbo = pci_alloc_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
4822                                             &tp->rx_jumbo_mapping);
4823
4824         if (!tp->rx_jumbo)
4825                 goto err_out;
4826
4827         tp->rx_rcb = pci_alloc_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
4828                                           &tp->rx_rcb_mapping);
4829         if (!tp->rx_rcb)
4830                 goto err_out;
4831
4832         tp->tx_ring = pci_alloc_consistent(tp->pdev, TG3_TX_RING_BYTES,
4833                                            &tp->tx_desc_mapping);
4834         if (!tp->tx_ring)
4835                 goto err_out;
4836
4837         tp->hw_status = pci_alloc_consistent(tp->pdev,
4838                                              TG3_HW_STATUS_SIZE,
4839                                              &tp->status_mapping);
4840         if (!tp->hw_status)
4841                 goto err_out;
4842
4843         tp->hw_stats = pci_alloc_consistent(tp->pdev,
4844                                             sizeof(struct tg3_hw_stats),
4845                                             &tp->stats_mapping);
4846         if (!tp->hw_stats)
4847                 goto err_out;
4848
4849         memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
4850         memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
4851
4852         return 0;
4853
4854 err_out:
4855         tg3_free_consistent(tp);
4856         return -ENOMEM;
4857 }
4858
4859 #define MAX_WAIT_CNT 1000
4860
4861 /* To stop a block, clear the enable bit and poll till it
4862  * clears.  tp->lock is held.
4863  */
4864 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, int silent)
4865 {
4866         unsigned int i;
4867         u32 val;
4868
4869         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
4870                 switch (ofs) {
4871                 case RCVLSC_MODE:
4872                 case DMAC_MODE:
4873                 case MBFREE_MODE:
4874                 case BUFMGR_MODE:
4875                 case MEMARB_MODE:
4876                         /* We can't enable/disable these bits of the
4877                          * 5705/5750, just say success.
4878                          */
4879                         return 0;
4880
4881                 default:
4882                         break;
4883                 };
4884         }
4885
4886         val = tr32(ofs);
4887         val &= ~enable_bit;
4888         tw32_f(ofs, val);
4889
4890         for (i = 0; i < MAX_WAIT_CNT; i++) {
4891                 udelay(100);
4892                 val = tr32(ofs);
4893                 if ((val & enable_bit) == 0)
4894                         break;
4895         }
4896
4897         if (i == MAX_WAIT_CNT && !silent) {
4898                 printk(KERN_ERR PFX "tg3_stop_block timed out, "
4899                        "ofs=%lx enable_bit=%x\n",
4900                        ofs, enable_bit);
4901                 return -ENODEV;
4902         }
4903
4904         return 0;
4905 }
4906
4907 /* tp->lock is held. */
4908 static int tg3_abort_hw(struct tg3 *tp, int silent)
4909 {
4910         int i, err;
4911
4912         tg3_disable_ints(tp);
4913
4914         tp->rx_mode &= ~RX_MODE_ENABLE;
4915         tw32_f(MAC_RX_MODE, tp->rx_mode);
4916         udelay(10);
4917
4918         err  = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
4919         err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
4920         err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
4921         err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
4922         err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
4923         err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
4924
4925         err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
4926         err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
4927         err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
4928         err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
4929         err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
4930         err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
4931         err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
4932
4933         tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
4934         tw32_f(MAC_MODE, tp->mac_mode);
4935         udelay(40);
4936
4937         tp->tx_mode &= ~TX_MODE_ENABLE;
4938         tw32_f(MAC_TX_MODE, tp->tx_mode);
4939
4940         for (i = 0; i < MAX_WAIT_CNT; i++) {
4941                 udelay(100);
4942                 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
4943                         break;
4944         }
4945         if (i >= MAX_WAIT_CNT) {
4946                 printk(KERN_ERR PFX "tg3_abort_hw timed out for %s, "
4947                        "TX_MODE_ENABLE will not clear MAC_TX_MODE=%08x\n",
4948                        tp->dev->name, tr32(MAC_TX_MODE));
4949                 err |= -ENODEV;
4950         }
4951
4952         err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
4953         err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
4954         err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
4955
4956         tw32(FTQ_RESET, 0xffffffff);
4957         tw32(FTQ_RESET, 0x00000000);
4958
4959         err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
4960         err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
4961
4962         if (tp->hw_status)
4963                 memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
4964         if (tp->hw_stats)
4965                 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
4966
4967         return err;
4968 }
4969
4970 /* tp->lock is held. */
4971 static int tg3_nvram_lock(struct tg3 *tp)
4972 {
4973         if (tp->tg3_flags & TG3_FLAG_NVRAM) {
4974                 int i;
4975
4976                 if (tp->nvram_lock_cnt == 0) {
4977                         tw32(NVRAM_SWARB, SWARB_REQ_SET1);
4978                         for (i = 0; i < 8000; i++) {
4979                                 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
4980                                         break;
4981                                 udelay(20);
4982                         }
4983                         if (i == 8000) {
4984                                 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
4985                                 return -ENODEV;
4986                         }
4987                 }
4988                 tp->nvram_lock_cnt++;
4989         }
4990         return 0;
4991 }
4992
4993 /* tp->lock is held. */
4994 static void tg3_nvram_unlock(struct tg3 *tp)
4995 {
4996         if (tp->tg3_flags & TG3_FLAG_NVRAM) {
4997                 if (tp->nvram_lock_cnt > 0)
4998                         tp->nvram_lock_cnt--;
4999                 if (tp->nvram_lock_cnt == 0)
5000                         tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
5001         }
5002 }
5003
5004 /* tp->lock is held. */
5005 static void tg3_enable_nvram_access(struct tg3 *tp)
5006 {
5007         if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
5008             !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM)) {
5009                 u32 nvaccess = tr32(NVRAM_ACCESS);
5010
5011                 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
5012         }
5013 }
5014
5015 /* tp->lock is held. */
5016 static void tg3_disable_nvram_access(struct tg3 *tp)
5017 {
5018         if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
5019             !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM)) {
5020                 u32 nvaccess = tr32(NVRAM_ACCESS);
5021
5022                 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
5023         }
5024 }
5025
5026 static void tg3_ape_send_event(struct tg3 *tp, u32 event)
5027 {
5028         int i;
5029         u32 apedata;
5030
5031         apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
5032         if (apedata != APE_SEG_SIG_MAGIC)
5033                 return;
5034
5035         apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
5036         if (apedata != APE_FW_STATUS_READY)
5037                 return;
5038
5039         /* Wait for up to 1 millisecond for APE to service previous event. */
5040         for (i = 0; i < 10; i++) {
5041                 if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
5042                         return;
5043
5044                 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
5045
5046                 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
5047                         tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
5048                                         event | APE_EVENT_STATUS_EVENT_PENDING);
5049
5050                 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
5051
5052                 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
5053                         break;
5054
5055                 udelay(100);
5056         }
5057
5058         if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
5059                 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
5060 }
5061
5062 static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
5063 {
5064         u32 event;
5065         u32 apedata;
5066
5067         if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
5068                 return;
5069
5070         switch (kind) {
5071                 case RESET_KIND_INIT:
5072                         tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
5073                                         APE_HOST_SEG_SIG_MAGIC);
5074                         tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
5075                                         APE_HOST_SEG_LEN_MAGIC);
5076                         apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
5077                         tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
5078                         tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
5079                                         APE_HOST_DRIVER_ID_MAGIC);
5080                         tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
5081                                         APE_HOST_BEHAV_NO_PHYLOCK);
5082
5083                         event = APE_EVENT_STATUS_STATE_START;
5084                         break;
5085                 case RESET_KIND_SHUTDOWN:
5086                         event = APE_EVENT_STATUS_STATE_UNLOAD;
5087                         break;
5088                 case RESET_KIND_SUSPEND:
5089                         event = APE_EVENT_STATUS_STATE_SUSPEND;
5090                         break;
5091                 default:
5092                         return;
5093         }
5094
5095         event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
5096
5097         tg3_ape_send_event(tp, event);
5098 }
5099
5100 /* tp->lock is held. */
5101 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
5102 {
5103         tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
5104                       NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
5105
5106         if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
5107                 switch (kind) {
5108                 case RESET_KIND_INIT:
5109                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5110                                       DRV_STATE_START);
5111                         break;
5112
5113                 case RESET_KIND_SHUTDOWN:
5114                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5115                                       DRV_STATE_UNLOAD);
5116                         break;
5117
5118                 case RESET_KIND_SUSPEND:
5119                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5120                                       DRV_STATE_SUSPEND);
5121                         break;
5122
5123                 default:
5124                         break;
5125                 };
5126         }
5127
5128         if (kind == RESET_KIND_INIT ||
5129             kind == RESET_KIND_SUSPEND)
5130                 tg3_ape_driver_state_change(tp, kind);
5131 }
5132
5133 /* tp->lock is held. */
5134 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
5135 {
5136         if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
5137                 switch (kind) {
5138                 case RESET_KIND_INIT:
5139                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5140                                       DRV_STATE_START_DONE);
5141                         break;
5142
5143                 case RESET_KIND_SHUTDOWN:
5144                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5145                                       DRV_STATE_UNLOAD_DONE);
5146                         break;
5147
5148                 default:
5149                         break;
5150                 };
5151         }
5152
5153         if (kind == RESET_KIND_SHUTDOWN)
5154                 tg3_ape_driver_state_change(tp, kind);
5155 }
5156
5157 /* tp->lock is held. */
5158 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
5159 {
5160         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
5161                 switch (kind) {
5162                 case RESET_KIND_INIT:
5163                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5164                                       DRV_STATE_START);
5165                         break;
5166
5167                 case RESET_KIND_SHUTDOWN:
5168                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5169                                       DRV_STATE_UNLOAD);
5170                         break;
5171
5172                 case RESET_KIND_SUSPEND:
5173                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5174                                       DRV_STATE_SUSPEND);
5175                         break;
5176
5177                 default:
5178                         break;
5179                 };
5180         }
5181 }
5182
5183 static int tg3_poll_fw(struct tg3 *tp)
5184 {
5185         int i;
5186         u32 val;
5187
5188         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
5189                 /* Wait up to 20ms for init done. */
5190                 for (i = 0; i < 200; i++) {
5191                         if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
5192                                 return 0;
5193                         udelay(100);
5194                 }
5195                 return -ENODEV;
5196         }
5197
5198         /* Wait for firmware initialization to complete. */
5199         for (i = 0; i < 100000; i++) {
5200                 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
5201                 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
5202                         break;
5203                 udelay(10);
5204         }
5205
5206         /* Chip might not be fitted with firmware.  Some Sun onboard
5207          * parts are configured like that.  So don't signal the timeout
5208          * of the above loop as an error, but do report the lack of
5209          * running firmware once.
5210          */
5211         if (i >= 100000 &&
5212             !(tp->tg3_flags2 & TG3_FLG2_NO_FWARE_REPORTED)) {
5213                 tp->tg3_flags2 |= TG3_FLG2_NO_FWARE_REPORTED;
5214
5215                 printk(KERN_INFO PFX "%s: No firmware running.\n",
5216                        tp->dev->name);
5217         }
5218
5219         return 0;
5220 }
5221
5222 /* Save PCI command register before chip reset */
5223 static void tg3_save_pci_state(struct tg3 *tp)
5224 {
5225         pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd);
5226 }
5227
5228 /* Restore PCI state after chip reset */
5229 static void tg3_restore_pci_state(struct tg3 *tp)
5230 {
5231         u32 val;
5232
5233         /* Re-enable indirect register accesses. */
5234         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
5235                                tp->misc_host_ctrl);
5236
5237         /* Set MAX PCI retry to zero. */
5238         val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
5239         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
5240             (tp->tg3_flags & TG3_FLAG_PCIX_MODE))
5241                 val |= PCISTATE_RETRY_SAME_DMA;
5242         /* Allow reads and writes to the APE register and memory space. */
5243         if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
5244                 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
5245                        PCISTATE_ALLOW_APE_SHMEM_WR;
5246         pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
5247
5248         pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
5249
5250         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)
5251                 pcie_set_readrq(tp->pdev, 4096);
5252         else {
5253                 pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
5254                                       tp->pci_cacheline_sz);
5255                 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
5256                                       tp->pci_lat_timer);
5257         }
5258
5259         /* Make sure PCI-X relaxed ordering bit is clear. */
5260         if (tp->pcix_cap) {
5261                 u16 pcix_cmd;
5262
5263                 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
5264                                      &pcix_cmd);
5265                 pcix_cmd &= ~PCI_X_CMD_ERO;
5266                 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
5267                                       pcix_cmd);
5268         }
5269
5270         if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
5271
5272                 /* Chip reset on 5780 will reset MSI enable bit,
5273                  * so need to restore it.
5274                  */
5275                 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
5276                         u16 ctrl;
5277
5278                         pci_read_config_word(tp->pdev,
5279                                              tp->msi_cap + PCI_MSI_FLAGS,
5280                                              &ctrl);
5281                         pci_write_config_word(tp->pdev,
5282                                               tp->msi_cap + PCI_MSI_FLAGS,
5283                                               ctrl | PCI_MSI_FLAGS_ENABLE);
5284                         val = tr32(MSGINT_MODE);
5285                         tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
5286                 }
5287         }
5288 }
5289
5290 static void tg3_stop_fw(struct tg3 *);
5291
5292 /* tp->lock is held. */
5293 static int tg3_chip_reset(struct tg3 *tp)
5294 {
5295         u32 val;
5296         void (*write_op)(struct tg3 *, u32, u32);
5297         int err;
5298
5299         tg3_nvram_lock(tp);
5300
5301         /* No matching tg3_nvram_unlock() after this because
5302          * chip reset below will undo the nvram lock.
5303          */
5304         tp->nvram_lock_cnt = 0;
5305
5306         /* GRC_MISC_CFG core clock reset will clear the memory
5307          * enable bit in PCI register 4 and the MSI enable bit
5308          * on some chips, so we save relevant registers here.
5309          */
5310         tg3_save_pci_state(tp);
5311
5312         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
5313             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
5314             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
5315             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
5316             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
5317                 tw32(GRC_FASTBOOT_PC, 0);
5318
5319         /*
5320          * We must avoid the readl() that normally takes place.
5321          * It locks machines, causes machine checks, and other
5322          * fun things.  So, temporarily disable the 5701
5323          * hardware workaround, while we do the reset.
5324          */
5325         write_op = tp->write32;
5326         if (write_op == tg3_write_flush_reg32)
5327                 tp->write32 = tg3_write32;
5328
5329         /* Prevent the irq handler from reading or writing PCI registers
5330          * during chip reset when the memory enable bit in the PCI command
5331          * register may be cleared.  The chip does not generate interrupt
5332          * at this time, but the irq handler may still be called due to irq
5333          * sharing or irqpoll.
5334          */
5335         tp->tg3_flags |= TG3_FLAG_CHIP_RESETTING;
5336         if (tp->hw_status) {
5337                 tp->hw_status->status = 0;
5338                 tp->hw_status->status_tag = 0;
5339         }
5340         tp->last_tag = 0;
5341         smp_mb();
5342         synchronize_irq(tp->pdev->irq);
5343
5344         /* do the reset */
5345         val = GRC_MISC_CFG_CORECLK_RESET;
5346
5347         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
5348                 if (tr32(0x7e2c) == 0x60) {
5349                         tw32(0x7e2c, 0x20);
5350                 }
5351                 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
5352                         tw32(GRC_MISC_CFG, (1 << 29));
5353                         val |= (1 << 29);
5354                 }
5355         }
5356
5357         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
5358                 tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
5359                 tw32(GRC_VCPU_EXT_CTRL,
5360                      tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
5361         }
5362
5363         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
5364                 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
5365         tw32(GRC_MISC_CFG, val);
5366
5367         /* restore 5701 hardware bug workaround write method */
5368         tp->write32 = write_op;
5369
5370         /* Unfortunately, we have to delay before the PCI read back.
5371          * Some 575X chips even will not respond to a PCI cfg access
5372          * when the reset command is given to the chip.
5373          *
5374          * How do these hardware designers expect things to work
5375          * properly if the PCI write is posted for a long period
5376          * of time?  It is always necessary to have some method by
5377          * which a register read back can occur to push the write
5378          * out which does the reset.
5379          *
5380          * For most tg3 variants the trick below was working.
5381          * Ho hum...
5382          */
5383         udelay(120);
5384
5385         /* Flush PCI posted writes.  The normal MMIO registers
5386          * are inaccessible at this time so this is the only
5387          * way to make this reliably (actually, this is no longer
5388          * the case, see above).  I tried to use indirect
5389          * register read/write but this upset some 5701 variants.
5390          */
5391         pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
5392
5393         udelay(120);
5394
5395         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
5396                 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) {
5397                         int i;
5398                         u32 cfg_val;
5399
5400                         /* Wait for link training to complete.  */
5401                         for (i = 0; i < 5000; i++)
5402                                 udelay(100);
5403
5404                         pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
5405                         pci_write_config_dword(tp->pdev, 0xc4,
5406                                                cfg_val | (1 << 15));
5407                 }
5408                 /* Set PCIE max payload size and clear error status.  */
5409                 pci_write_config_dword(tp->pdev, 0xd8, 0xf5000);
5410         }
5411
5412         tg3_restore_pci_state(tp);
5413
5414         tp->tg3_flags &= ~TG3_FLAG_CHIP_RESETTING;
5415
5416         val = 0;
5417         if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
5418                 val = tr32(MEMARB_MODE);
5419         tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
5420
5421         if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A3) {
5422                 tg3_stop_fw(tp);
5423                 tw32(0x5000, 0x400);
5424         }
5425
5426         tw32(GRC_MODE, tp->grc_mode);
5427
5428         if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0) {
5429                 val = tr32(0xc4);
5430
5431                 tw32(0xc4, val | (1 << 15));
5432         }
5433
5434         if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
5435             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
5436                 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
5437                 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0)
5438                         tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
5439                 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
5440         }
5441
5442         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
5443                 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
5444                 tw32_f(MAC_MODE, tp->mac_mode);
5445         } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
5446                 tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
5447                 tw32_f(MAC_MODE, tp->mac_mode);
5448         } else
5449                 tw32_f(MAC_MODE, 0);
5450         udelay(40);
5451
5452         err = tg3_poll_fw(tp);
5453         if (err)
5454                 return err;
5455
5456         if ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
5457             tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
5458                 val = tr32(0x7c00);
5459
5460                 tw32(0x7c00, val | (1 << 25));
5461         }
5462
5463         /* Reprobe ASF enable state.  */
5464         tp->tg3_flags &= ~TG3_FLAG_ENABLE_ASF;
5465         tp->tg3_flags2 &= ~TG3_FLG2_ASF_NEW_HANDSHAKE;
5466         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
5467         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
5468                 u32 nic_cfg;
5469
5470                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
5471                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
5472                         tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
5473                         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
5474                                 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
5475                 }
5476         }
5477
5478         return 0;
5479 }
5480
5481 /* tp->lock is held. */
5482 static void tg3_stop_fw(struct tg3 *tp)
5483 {
5484         if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) &&
5485            !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)) {
5486                 u32 val;
5487                 int i;
5488
5489                 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
5490                 val = tr32(GRC_RX_CPU_EVENT);
5491                 val |= (1 << 14);
5492                 tw32(GRC_RX_CPU_EVENT, val);
5493
5494                 /* Wait for RX cpu to ACK the event.  */
5495                 for (i = 0; i < 100; i++) {
5496                         if (!(tr32(GRC_RX_CPU_EVENT) & (1 << 14)))
5497                                 break;
5498                         udelay(1);
5499                 }
5500         }
5501 }
5502
5503 /* tp->lock is held. */
5504 static int tg3_halt(struct tg3 *tp, int kind, int silent)
5505 {
5506         int err;
5507
5508         tg3_stop_fw(tp);
5509
5510         tg3_write_sig_pre_reset(tp, kind);
5511
5512         tg3_abort_hw(tp, silent);
5513         err = tg3_chip_reset(tp);
5514
5515         tg3_write_sig_legacy(tp, kind);
5516         tg3_write_sig_post_reset(tp, kind);
5517
5518         if (err)
5519                 return err;
5520
5521         return 0;
5522 }
5523
5524 #define TG3_FW_RELEASE_MAJOR    0x0
5525 #define TG3_FW_RELASE_MINOR     0x0
5526 #define TG3_FW_RELEASE_FIX      0x0
5527 #define TG3_FW_START_ADDR       0x08000000
5528 #define TG3_FW_TEXT_ADDR        0x08000000
5529 #define TG3_FW_TEXT_LEN         0x9c0
5530 #define TG3_FW_RODATA_ADDR      0x080009c0
5531 #define TG3_FW_RODATA_LEN       0x60
5532 #define TG3_FW_DATA_ADDR        0x08000a40
5533 #define TG3_FW_DATA_LEN         0x20
5534 #define TG3_FW_SBSS_ADDR        0x08000a60
5535 #define TG3_FW_SBSS_LEN         0xc
5536 #define TG3_FW_BSS_ADDR         0x08000a70
5537 #define TG3_FW_BSS_LEN          0x10
5538
5539 static const u32 tg3FwText[(TG3_FW_TEXT_LEN / sizeof(u32)) + 1] = {
5540         0x00000000, 0x10000003, 0x00000000, 0x0000000d, 0x0000000d, 0x3c1d0800,
5541         0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100000, 0x0e000018, 0x00000000,
5542         0x0000000d, 0x3c1d0800, 0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100034,
5543         0x0e00021c, 0x00000000, 0x0000000d, 0x00000000, 0x00000000, 0x00000000,
5544         0x27bdffe0, 0x3c1cc000, 0xafbf0018, 0xaf80680c, 0x0e00004c, 0x241b2105,
5545         0x97850000, 0x97870002, 0x9782002c, 0x9783002e, 0x3c040800, 0x248409c0,
5546         0xafa00014, 0x00021400, 0x00621825, 0x00052c00, 0xafa30010, 0x8f860010,
5547         0x00e52825, 0x0e000060, 0x24070102, 0x3c02ac00, 0x34420100, 0x3c03ac01,
5548         0x34630100, 0xaf820490, 0x3c02ffff, 0xaf820494, 0xaf830498, 0xaf82049c,
5549         0x24020001, 0xaf825ce0, 0x0e00003f, 0xaf825d00, 0x0e000140, 0x00000000,
5550         0x8fbf0018, 0x03e00008, 0x27bd0020, 0x2402ffff, 0xaf825404, 0x8f835400,
5551         0x34630400, 0xaf835400, 0xaf825404, 0x3c020800, 0x24420034, 0xaf82541c,
5552         0x03e00008, 0xaf805400, 0x00000000, 0x00000000, 0x3c020800, 0x34423000,
5553         0x3c030800, 0x34633000, 0x3c040800, 0x348437ff, 0x3c010800, 0xac220a64,
5554         0x24020040, 0x3c010800, 0xac220a68, 0x3c010800, 0xac200a60, 0xac600000,
5555         0x24630004, 0x0083102b, 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000,
5556         0x00804821, 0x8faa0010, 0x3c020800, 0x8c420a60, 0x3c040800, 0x8c840a68,
5557         0x8fab0014, 0x24430001, 0x0044102b, 0x3c010800, 0xac230a60, 0x14400003,
5558         0x00004021, 0x3c010800, 0xac200a60, 0x3c020800, 0x8c420a60, 0x3c030800,
5559         0x8c630a64, 0x91240000, 0x00021140, 0x00431021, 0x00481021, 0x25080001,
5560         0xa0440000, 0x29020008, 0x1440fff4, 0x25290001, 0x3c020800, 0x8c420a60,
5561         0x3c030800, 0x8c630a64, 0x8f84680c, 0x00021140, 0x00431021, 0xac440008,
5562         0xac45000c, 0xac460010, 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c,
5563         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
5564         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
5565         0, 0, 0, 0, 0, 0,
5566         0x02000008, 0x00000000, 0x0a0001e3, 0x3c0a0001, 0x0a0001e3, 0x3c0a0002,
5567         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
5568         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
5569         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
5570         0x0a0001e3, 0x3c0a0007, 0x0a0001e3, 0x3c0a0008, 0x0a0001e3, 0x3c0a0009,
5571         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a000b,
5572         0x0a0001e3, 0x3c0a000c, 0x0a0001e3, 0x3c0a000d, 0x0a0001e3, 0x00000000,
5573         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a000e, 0x0a0001e3, 0x00000000,
5574         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
5575         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
5576         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a0013, 0x0a0001e3, 0x3c0a0014,
5577         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
5578         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
5579         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
5580         0x27bdffe0, 0x00001821, 0x00001021, 0xafbf0018, 0xafb10014, 0xafb00010,
5581         0x3c010800, 0x00220821, 0xac200a70, 0x3c010800, 0x00220821, 0xac200a74,
5582         0x3c010800, 0x00220821, 0xac200a78, 0x24630001, 0x1860fff5, 0x2442000c,
5583         0x24110001, 0x8f906810, 0x32020004, 0x14400005, 0x24040001, 0x3c020800,
5584         0x8c420a78, 0x18400003, 0x00002021, 0x0e000182, 0x00000000, 0x32020001,
5585         0x10400003, 0x00000000, 0x0e000169, 0x00000000, 0x0a000153, 0xaf915028,
5586         0x8fbf0018, 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0020, 0x3c050800,
5587         0x8ca50a70, 0x3c060800, 0x8cc60a80, 0x3c070800, 0x8ce70a78, 0x27bdffe0,
5588         0x3c040800, 0x248409d0, 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014,
5589         0x0e00017b, 0x00002021, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x24020001,
5590         0x8f836810, 0x00821004, 0x00021027, 0x00621824, 0x03e00008, 0xaf836810,
5591         0x27bdffd8, 0xafbf0024, 0x1080002e, 0xafb00020, 0x8f825cec, 0xafa20018,
5592         0x8f825cec, 0x3c100800, 0x26100a78, 0xafa2001c, 0x34028000, 0xaf825cec,
5593         0x8e020000, 0x18400016, 0x00000000, 0x3c020800, 0x94420a74, 0x8fa3001c,
5594         0x000221c0, 0xac830004, 0x8fa2001c, 0x3c010800, 0x0e000201, 0xac220a74,
5595         0x10400005, 0x00000000, 0x8e020000, 0x24420001, 0x0a0001df, 0xae020000,
5596         0x3c020800, 0x8c420a70, 0x00021c02, 0x000321c0, 0x0a0001c5, 0xafa2001c,
5597         0x0e000201, 0x00000000, 0x1040001f, 0x00000000, 0x8e020000, 0x8fa3001c,
5598         0x24420001, 0x3c010800, 0xac230a70, 0x3c010800, 0xac230a74, 0x0a0001df,
5599         0xae020000, 0x3c100800, 0x26100a78, 0x8e020000, 0x18400028, 0x00000000,
5600         0x0e000201, 0x00000000, 0x14400024, 0x00000000, 0x8e020000, 0x3c030800,
5601         0x8c630a70, 0x2442ffff, 0xafa3001c, 0x18400006, 0xae020000, 0x00031402,
5602         0x000221c0, 0x8c820004, 0x3c010800, 0xac220a70, 0x97a2001e, 0x2442ff00,
5603         0x2c420300, 0x1440000b, 0x24024000, 0x3c040800, 0x248409dc, 0xafa00010,
5604         0xafa00014, 0x8fa6001c, 0x24050008, 0x0e000060, 0x00003821, 0x0a0001df,
5605         0x00000000, 0xaf825cf8, 0x3c020800, 0x8c420a40, 0x8fa3001c, 0x24420001,
5606         0xaf835cf8, 0x3c010800, 0xac220a40, 0x8fbf0024, 0x8fb00020, 0x03e00008,
5607         0x27bd0028, 0x27bdffe0, 0x3c040800, 0x248409e8, 0x00002821, 0x00003021,
5608         0x00003821, 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014, 0x8fbf0018,
5609         0x03e00008, 0x27bd0020, 0x8f82680c, 0x8f85680c, 0x00021827, 0x0003182b,
5610         0x00031823, 0x00431024, 0x00441021, 0x00a2282b, 0x10a00006, 0x00000000,
5611         0x00401821, 0x8f82680c, 0x0043102b, 0x1440fffd, 0x00000000, 0x03e00008,
5612         0x00000000, 0x3c040800, 0x8c840000, 0x3c030800, 0x8c630a40, 0x0064102b,
5613         0x54400002, 0x00831023, 0x00641023, 0x2c420008, 0x03e00008, 0x38420001,
5614         0x27bdffe0, 0x00802821, 0x3c040800, 0x24840a00, 0x00003021, 0x00003821,
5615         0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014, 0x0a000216, 0x00000000,
5616         0x8fbf0018, 0x03e00008, 0x27bd0020, 0x00000000, 0x27bdffe0, 0x3c1cc000,
5617         0xafbf0018, 0x0e00004c, 0xaf80680c, 0x3c040800, 0x24840a10, 0x03802821,
5618         0x00003021, 0x00003821, 0xafa00010, 0x0e000060, 0xafa00014, 0x2402ffff,
5619         0xaf825404, 0x3c0200aa, 0x0e000234, 0xaf825434, 0x8fbf0018, 0x03e00008,
5620         0x27bd0020, 0x00000000, 0x00000000, 0x00000000, 0x27bdffe8, 0xafb00010,
5621         0x24100001, 0xafbf0014, 0x3c01c003, 0xac200000, 0x8f826810, 0x30422000,
5622         0x10400003, 0x00000000, 0x0e000246, 0x00000000, 0x0a00023a, 0xaf905428,
5623         0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x27bdfff8, 0x8f845d0c,
5624         0x3c0200ff, 0x3c030800, 0x8c630a50, 0x3442fff8, 0x00821024, 0x1043001e,
5625         0x3c0500ff, 0x34a5fff8, 0x3c06c003, 0x3c074000, 0x00851824, 0x8c620010,
5626         0x3c010800, 0xac230a50, 0x30420008, 0x10400005, 0x00871025, 0x8cc20000,
5627         0x24420001, 0xacc20000, 0x00871025, 0xaf825d0c, 0x8fa20000, 0x24420001,
5628         0xafa20000, 0x8fa20000, 0x8fa20000, 0x24420001, 0xafa20000, 0x8fa20000,
5629         0x8f845d0c, 0x3c030800, 0x8c630a50, 0x00851024, 0x1443ffe8, 0x00851824,
5630         0x27bd0008, 0x03e00008, 0x00000000, 0x00000000, 0x00000000
5631 };
5632
5633 static const u32 tg3FwRodata[(TG3_FW_RODATA_LEN / sizeof(u32)) + 1] = {
5634         0x35373031, 0x726c7341, 0x00000000, 0x00000000, 0x53774576, 0x656e7430,
5635         0x00000000, 0x726c7045, 0x76656e74, 0x31000000, 0x556e6b6e, 0x45766e74,
5636         0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x66617461, 0x6c457272,
5637         0x00000000, 0x00000000, 0x4d61696e, 0x43707542, 0x00000000, 0x00000000,
5638         0x00000000
5639 };
5640
5641 #if 0 /* All zeros, don't eat up space with it. */
5642 u32 tg3FwData[(TG3_FW_DATA_LEN / sizeof(u32)) + 1] = {
5643         0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
5644         0x00000000, 0x00000000, 0x00000000, 0x00000000
5645 };
5646 #endif
5647
5648 #define RX_CPU_SCRATCH_BASE     0x30000
5649 #define RX_CPU_SCRATCH_SIZE     0x04000
5650 #define TX_CPU_SCRATCH_BASE     0x34000
5651 #define TX_CPU_SCRATCH_SIZE     0x04000
5652
5653 /* tp->lock is held. */
5654 static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
5655 {
5656         int i;
5657
5658         BUG_ON(offset == TX_CPU_BASE &&
5659             (tp->tg3_flags2 & TG3_FLG2_5705_PLUS));
5660
5661         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
5662                 u32 val = tr32(GRC_VCPU_EXT_CTRL);
5663
5664                 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
5665                 return 0;
5666         }
5667         if (offset == RX_CPU_BASE) {
5668                 for (i = 0; i < 10000; i++) {
5669                         tw32(offset + CPU_STATE, 0xffffffff);
5670                         tw32(offset + CPU_MODE,  CPU_MODE_HALT);
5671                         if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
5672                                 break;
5673                 }
5674
5675                 tw32(offset + CPU_STATE, 0xffffffff);
5676                 tw32_f(offset + CPU_MODE,  CPU_MODE_HALT);
5677                 udelay(10);
5678         } else {
5679                 for (i = 0; i < 10000; i++) {
5680                         tw32(offset + CPU_STATE, 0xffffffff);
5681                         tw32(offset + CPU_MODE,  CPU_MODE_HALT);
5682                         if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
5683                                 break;
5684                 }
5685         }
5686
5687         if (i >= 10000) {
5688                 printk(KERN_ERR PFX "tg3_reset_cpu timed out for %s, "
5689                        "and %s CPU\n",
5690                        tp->dev->name,
5691                        (offset == RX_CPU_BASE ? "RX" : "TX"));
5692                 return -ENODEV;
5693         }
5694
5695         /* Clear firmware's nvram arbitration. */
5696         if (tp->tg3_flags & TG3_FLAG_NVRAM)
5697                 tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
5698         return 0;
5699 }
5700
5701 struct fw_info {
5702         unsigned int text_base;
5703         unsigned int text_len;
5704         const u32 *text_data;
5705         unsigned int rodata_base;
5706         unsigned int rodata_len;
5707         const u32 *rodata_data;
5708         unsigned int data_base;
5709         unsigned int data_len;
5710         const u32 *data_data;
5711 };
5712
5713 /* tp->lock is held. */
5714 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base, u32 cpu_scratch_base,
5715                                  int cpu_scratch_size, struct fw_info *info)
5716 {
5717         int err, lock_err, i;
5718         void (*write_op)(struct tg3 *, u32, u32);
5719
5720         if (cpu_base == TX_CPU_BASE &&
5721             (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5722                 printk(KERN_ERR PFX "tg3_load_firmware_cpu: Trying to load "
5723                        "TX cpu firmware on %s which is 5705.\n",
5724                        tp->dev->name);
5725                 return -EINVAL;
5726         }
5727
5728         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
5729                 write_op = tg3_write_mem;
5730         else
5731                 write_op = tg3_write_indirect_reg32;
5732
5733         /* It is possible that bootcode is still loading at this point.
5734          * Get the nvram lock first before halting the cpu.
5735          */
5736         lock_err = tg3_nvram_lock(tp);
5737         err = tg3_halt_cpu(tp, cpu_base);
5738         if (!lock_err)
5739                 tg3_nvram_unlock(tp);
5740         if (err)
5741                 goto out;
5742
5743         for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
5744                 write_op(tp, cpu_scratch_base + i, 0);
5745         tw32(cpu_base + CPU_STATE, 0xffffffff);
5746         tw32(cpu_base + CPU_MODE, tr32(cpu_base+CPU_MODE)|CPU_MODE_HALT);
5747         for (i = 0; i < (info->text_len / sizeof(u32)); i++)
5748                 write_op(tp, (cpu_scratch_base +
5749                               (info->text_base & 0xffff) +
5750                               (i * sizeof(u32))),
5751                          (info->text_data ?
5752                           info->text_data[i] : 0));
5753         for (i = 0; i < (info->rodata_len / sizeof(u32)); i++)
5754                 write_op(tp, (cpu_scratch_base +
5755                               (info->rodata_base & 0xffff) +
5756                               (i * sizeof(u32))),
5757                          (info->rodata_data ?
5758                           info->rodata_data[i] : 0));
5759         for (i = 0; i < (info->data_len / sizeof(u32)); i++)
5760                 write_op(tp, (cpu_scratch_base +
5761                               (info->data_base & 0xffff) +
5762                               (i * sizeof(u32))),
5763                          (info->data_data ?
5764                           info->data_data[i] : 0));
5765
5766         err = 0;
5767
5768 out:
5769         return err;
5770 }
5771
5772 /* tp->lock is held. */
5773 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
5774 {
5775         struct fw_info info;
5776         int err, i;
5777
5778         info.text_base = TG3_FW_TEXT_ADDR;
5779         info.text_len = TG3_FW_TEXT_LEN;
5780         info.text_data = &tg3FwText[0];
5781         info.rodata_base = TG3_FW_RODATA_ADDR;
5782         info.rodata_len = TG3_FW_RODATA_LEN;
5783         info.rodata_data = &tg3FwRodata[0];
5784         info.data_base = TG3_FW_DATA_ADDR;
5785         info.data_len = TG3_FW_DATA_LEN;
5786         info.data_data = NULL;
5787
5788         err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
5789                                     RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
5790                                     &info);
5791         if (err)
5792                 return err;
5793
5794         err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
5795                                     TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
5796                                     &info);
5797         if (err)
5798                 return err;
5799
5800         /* Now startup only the RX cpu. */
5801         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
5802         tw32_f(RX_CPU_BASE + CPU_PC,    TG3_FW_TEXT_ADDR);
5803
5804         for (i = 0; i < 5; i++) {
5805                 if (tr32(RX_CPU_BASE + CPU_PC) == TG3_FW_TEXT_ADDR)
5806                         break;
5807                 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
5808                 tw32(RX_CPU_BASE + CPU_MODE,  CPU_MODE_HALT);
5809                 tw32_f(RX_CPU_BASE + CPU_PC,    TG3_FW_TEXT_ADDR);
5810                 udelay(1000);
5811         }
5812         if (i >= 5) {
5813                 printk(KERN_ERR PFX "tg3_load_firmware fails for %s "
5814                        "to set RX CPU PC, is %08x should be %08x\n",
5815                        tp->dev->name, tr32(RX_CPU_BASE + CPU_PC),
5816                        TG3_FW_TEXT_ADDR);
5817                 return -ENODEV;
5818         }
5819         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
5820         tw32_f(RX_CPU_BASE + CPU_MODE,  0x00000000);
5821
5822         return 0;
5823 }
5824
5825
5826 #define TG3_TSO_FW_RELEASE_MAJOR        0x1
5827 #define TG3_TSO_FW_RELASE_MINOR         0x6
5828 #define TG3_TSO_FW_RELEASE_FIX          0x0
5829 #define TG3_TSO_FW_START_ADDR           0x08000000
5830 #define TG3_TSO_FW_TEXT_ADDR            0x08000000
5831 #define TG3_TSO_FW_TEXT_LEN             0x1aa0
5832 #define TG3_TSO_FW_RODATA_ADDR          0x08001aa0
5833 #define TG3_TSO_FW_RODATA_LEN           0x60
5834 #define TG3_TSO_FW_DATA_ADDR            0x08001b20
5835 #define TG3_TSO_FW_DATA_LEN             0x30
5836 #define TG3_TSO_FW_SBSS_ADDR            0x08001b50
5837 #define TG3_TSO_FW_SBSS_LEN             0x2c
5838 #define TG3_TSO_FW_BSS_ADDR             0x08001b80
5839 #define TG3_TSO_FW_BSS_LEN              0x894
5840
5841 static const u32 tg3TsoFwText[(TG3_TSO_FW_TEXT_LEN / 4) + 1] = {
5842         0x0e000003, 0x00000000, 0x08001b24, 0x00000000, 0x10000003, 0x00000000,
5843         0x0000000d, 0x0000000d, 0x3c1d0800, 0x37bd4000, 0x03a0f021, 0x3c100800,
5844         0x26100000, 0x0e000010, 0x00000000, 0x0000000d, 0x27bdffe0, 0x3c04fefe,
5845         0xafbf0018, 0x0e0005d8, 0x34840002, 0x0e000668, 0x00000000, 0x3c030800,
5846         0x90631b68, 0x24020002, 0x3c040800, 0x24841aac, 0x14620003, 0x24050001,
5847         0x3c040800, 0x24841aa0, 0x24060006, 0x00003821, 0xafa00010, 0x0e00067c,
5848         0xafa00014, 0x8f625c50, 0x34420001, 0xaf625c50, 0x8f625c90, 0x34420001,
5849         0xaf625c90, 0x2402ffff, 0x0e000034, 0xaf625404, 0x8fbf0018, 0x03e00008,
5850         0x27bd0020, 0x00000000, 0x00000000, 0x00000000, 0x27bdffe0, 0xafbf001c,
5851         0xafb20018, 0xafb10014, 0x0e00005b, 0xafb00010, 0x24120002, 0x24110001,
5852         0x8f706820, 0x32020100, 0x10400003, 0x00000000, 0x0e0000bb, 0x00000000,
5853         0x8f706820, 0x32022000, 0x10400004, 0x32020001, 0x0e0001f0, 0x24040001,
5854         0x32020001, 0x10400003, 0x00000000, 0x0e0000a3, 0x00000000, 0x3c020800,
5855         0x90421b98, 0x14520003, 0x00000000, 0x0e0004c0, 0x00000000, 0x0a00003c,
5856         0xaf715028, 0x8fbf001c, 0x8fb20018, 0x8fb10014, 0x8fb00010, 0x03e00008,
5857         0x27bd0020, 0x27bdffe0, 0x3c040800, 0x24841ac0, 0x00002821, 0x00003021,
5858         0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014, 0x3c040800,
5859         0x248423d8, 0xa4800000, 0x3c010800, 0xa0201b98, 0x3c010800, 0xac201b9c,
5860         0x3c010800, 0xac201ba0, 0x3c010800, 0xac201ba4, 0x3c010800, 0xac201bac,
5861         0x3c010800, 0xac201bb8, 0x3c010800, 0xac201bbc, 0x8f624434, 0x3c010800,
5862         0xac221b88, 0x8f624438, 0x3c010800, 0xac221b8c, 0x8f624410, 0xac80f7a8,
5863         0x3c010800, 0xac201b84, 0x3c010800, 0xac2023e0, 0x3c010800, 0xac2023c8,
5864         0x3c010800, 0xac2023cc, 0x3c010800, 0xac202400, 0x3c010800, 0xac221b90,
5865         0x8f620068, 0x24030007, 0x00021702, 0x10430005, 0x00000000, 0x8f620068,
5866         0x00021702, 0x14400004, 0x24020001, 0x3c010800, 0x0a000097, 0xac20240c,
5867         0xac820034, 0x3c040800, 0x24841acc, 0x3c050800, 0x8ca5240c, 0x00003021,
5868         0x00003821, 0xafa00010, 0x0e00067c, 0xafa00014, 0x8fbf0018, 0x03e00008,
5869         0x27bd0020, 0x27bdffe0, 0x3c040800, 0x24841ad8, 0x00002821, 0x00003021,
5870         0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014, 0x0e00005b,
5871         0x00000000, 0x0e0000b4, 0x00002021, 0x8fbf0018, 0x03e00008, 0x27bd0020,
5872         0x24020001, 0x8f636820, 0x00821004, 0x00021027, 0x00621824, 0x03e00008,
5873         0xaf636820, 0x27bdffd0, 0xafbf002c, 0xafb60028, 0xafb50024, 0xafb40020,
5874         0xafb3001c, 0xafb20018, 0xafb10014, 0xafb00010, 0x8f675c5c, 0x3c030800,
5875         0x24631bbc, 0x8c620000, 0x14470005, 0x3c0200ff, 0x3c020800, 0x90421b98,
5876         0x14400119, 0x3c0200ff, 0x3442fff8, 0x00e28824, 0xac670000, 0x00111902,
5877         0x306300ff, 0x30e20003, 0x000211c0, 0x00622825, 0x00a04021, 0x00071602,
5878         0x3c030800, 0x90631b98, 0x3044000f, 0x14600036, 0x00804821, 0x24020001,
5879         0x3c010800, 0xa0221b98, 0x00051100, 0x00821025, 0x3c010800, 0xac201b9c,
5880         0x3c010800, 0xac201ba0, 0x3c010800, 0xac201ba4, 0x3c010800, 0xac201bac,
5881         0x3c010800, 0xac201bb8, 0x3c010800, 0xac201bb0, 0x3c010800, 0xac201bb4,
5882         0x3c010800, 0xa42223d8, 0x9622000c, 0x30437fff, 0x3c010800, 0xa4222410,
5883         0x30428000, 0x3c010800, 0xa4231bc6, 0x10400005, 0x24020001, 0x3c010800,
5884         0xac2223f4, 0x0a000102, 0x2406003e, 0x24060036, 0x3c010800, 0xac2023f4,
5885         0x9622000a, 0x3c030800, 0x94631bc6, 0x3c010800, 0xac2023f0, 0x3c010800,
5886         0xac2023f8, 0x00021302, 0x00021080, 0x00c21021, 0x00621821, 0x3c010800,
5887         0xa42223d0, 0x3c010800, 0x0a000115, 0xa4231b96, 0x9622000c, 0x3c010800,
5888         0xa42223ec, 0x3c040800, 0x24841b9c, 0x8c820000, 0x00021100, 0x3c010800,
5889         0x00220821, 0xac311bc8, 0x8c820000, 0x00021100, 0x3c010800, 0x00220821,
5890         0xac271bcc, 0x8c820000, 0x25030001, 0x306601ff, 0x00021100, 0x3c010800,
5891         0x00220821, 0xac261bd0, 0x8c820000, 0x00021100, 0x3c010800, 0x00220821,
5892         0xac291bd4, 0x96230008, 0x3c020800, 0x8c421bac, 0x00432821, 0x3c010800,
5893         0xac251bac, 0x9622000a, 0x30420004, 0x14400018, 0x00061100, 0x8f630c14,
5894         0x3063000f, 0x2c620002, 0x1440000b, 0x3c02c000, 0x8f630c14, 0x3c020800,
5895         0x8c421b40, 0x3063000f, 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002,
5896         0x1040fff7, 0x3c02c000, 0x00e21825, 0xaf635c5c, 0x8f625c50, 0x30420002,
5897         0x10400014, 0x00000000, 0x0a000147, 0x00000000, 0x3c030800, 0x8c631b80,
5898         0x3c040800, 0x94841b94, 0x01221025, 0x3c010800, 0xa42223da, 0x24020001,
5899         0x3c010800, 0xac221bb8, 0x24630001, 0x0085202a, 0x3c010800, 0x10800003,
5900         0xac231b80, 0x3c010800, 0xa4251b94, 0x3c060800, 0x24c61b9c, 0x8cc20000,
5901         0x24420001, 0xacc20000, 0x28420080, 0x14400005, 0x00000000, 0x0e000656,
5902         0x24040002, 0x0a0001e6, 0x00000000, 0x3c020800, 0x8c421bb8, 0x10400078,
5903         0x24020001, 0x3c050800, 0x90a51b98, 0x14a20072, 0x00000000, 0x3c150800,
5904         0x96b51b96, 0x3c040800, 0x8c841bac, 0x32a3ffff, 0x0083102a, 0x1440006c,
5905         0x00000000, 0x14830003, 0x00000000, 0x3c010800, 0xac2523f0, 0x1060005c,
5906         0x00009021, 0x24d60004, 0x0060a021, 0x24d30014, 0x8ec20000, 0x00028100,
5907         0x3c110800, 0x02308821, 0x0e000625, 0x8e311bc8, 0x00402821, 0x10a00054,
5908         0x00000000, 0x9628000a, 0x31020040, 0x10400005, 0x2407180c, 0x8e22000c,
5909         0x2407188c, 0x00021400, 0xaca20018, 0x3c030800, 0x00701821, 0x8c631bd0,
5910         0x3c020800, 0x00501021, 0x8c421bd4, 0x00031d00, 0x00021400, 0x00621825,
5911         0xaca30014, 0x8ec30004, 0x96220008, 0x00432023, 0x3242ffff, 0x3083ffff,
5912         0x00431021, 0x0282102a, 0x14400002, 0x02b23023, 0x00803021, 0x8e620000,
5913         0x30c4ffff, 0x00441021, 0xae620000, 0x8e220000, 0xaca20000, 0x8e220004,
5914         0x8e63fff4, 0x00431021, 0xaca20004, 0xa4a6000e, 0x8e62fff4, 0x00441021,
5915         0xae62fff4, 0x96230008, 0x0043102a, 0x14400005, 0x02469021, 0x8e62fff0,
5916         0xae60fff4, 0x24420001, 0xae62fff0, 0xaca00008, 0x3242ffff, 0x14540008,
5917         0x24020305, 0x31020080, 0x54400001, 0x34e70010, 0x24020905, 0xa4a2000c,
5918         0x0a0001cb, 0x34e70020, 0xa4a2000c, 0x3c020800, 0x8c4223f0, 0x10400003,
5919         0x3c024b65, 0x0a0001d3, 0x34427654, 0x3c02b49a, 0x344289ab, 0xaca2001c,
5920         0x30e2ffff, 0xaca20010, 0x0e0005a2, 0x00a02021, 0x3242ffff, 0x0054102b,
5921         0x1440ffa9, 0x00000000, 0x24020002, 0x3c010800, 0x0a0001e6, 0xa0221b98,
5922         0x8ec2083c, 0x24420001, 0x0a0001e6, 0xaec2083c, 0x0e0004c0, 0x00000000,
5923         0x8fbf002c, 0x8fb60028, 0x8fb50024, 0x8fb40020, 0x8fb3001c, 0x8fb20018,
5924         0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0030, 0x27bdffd0, 0xafbf0028,
5925         0xafb30024, 0xafb20020, 0xafb1001c, 0xafb00018, 0x8f725c9c, 0x3c0200ff,
5926         0x3442fff8, 0x3c070800, 0x24e71bb4, 0x02428824, 0x9623000e, 0x8ce20000,
5927         0x00431021, 0xace20000, 0x8e220010, 0x30420020, 0x14400011, 0x00809821,
5928         0x0e00063b, 0x02202021, 0x3c02c000, 0x02421825, 0xaf635c9c, 0x8f625c90,
5929         0x30420002, 0x1040011e, 0x00000000, 0xaf635c9c, 0x8f625c90, 0x30420002,
5930         0x10400119, 0x00000000, 0x0a00020d, 0x00000000, 0x8e240008, 0x8e230014,
5931         0x00041402, 0x000231c0, 0x00031502, 0x304201ff, 0x2442ffff, 0x3042007f,
5932         0x00031942, 0x30637800, 0x00021100, 0x24424000, 0x00624821, 0x9522000a,
5933         0x3084ffff, 0x30420008, 0x104000b0, 0x000429c0, 0x3c020800, 0x8c422400,
5934         0x14400024, 0x24c50008, 0x94c20014, 0x3c010800, 0xa42223d0, 0x8cc40010,
5935         0x00041402, 0x3c010800, 0xa42223d2, 0x3c010800, 0xa42423d4, 0x94c2000e,
5936         0x3083ffff, 0x00431023, 0x3c010800, 0xac222408, 0x94c2001a, 0x3c010800,
5937         0xac262400, 0x3c010800, 0xac322404, 0x3c010800, 0xac2223fc, 0x3c02c000,
5938         0x02421825, 0xaf635c9c, 0x8f625c90, 0x30420002, 0x104000e5, 0x00000000,
5939         0xaf635c9c, 0x8f625c90, 0x30420002, 0x104000e0, 0x00000000, 0x0a000246,
5940         0x00000000, 0x94c2000e, 0x3c030800, 0x946323d4, 0x00434023, 0x3103ffff,
5941         0x2c620008, 0x1040001c, 0x00000000, 0x94c20014, 0x24420028, 0x00a22821,
5942         0x00031042, 0x1840000b, 0x00002021, 0x24e60848, 0x00403821, 0x94a30000,
5943         0x8cc20000, 0x24840001, 0x00431021, 0xacc20000, 0x0087102a, 0x1440fff9,
5944         0x24a50002, 0x31020001, 0x1040001f, 0x3c024000, 0x3c040800, 0x248423fc,
5945         0xa0a00001, 0x94a30000, 0x8c820000, 0x00431021, 0x0a000285, 0xac820000,
5946         0x8f626800, 0x3c030010, 0x00431024, 0x10400009, 0x00000000, 0x94c2001a,
5947         0x3c030800, 0x8c6323fc, 0x00431021, 0x3c010800, 0xac2223fc, 0x0a000286,
5948         0x3c024000, 0x94c2001a, 0x94c4001c, 0x3c030800, 0x8c6323fc, 0x00441023,
5949         0x00621821, 0x3c010800, 0xac2323fc, 0x3c024000, 0x02421825, 0xaf635c9c,
5950         0x8f625c90, 0x30420002, 0x1440fffc, 0x00000000, 0x9522000a, 0x30420010,
5951         0x1040009b, 0x00000000, 0x3c030800, 0x946323d4, 0x3c070800, 0x24e72400,
5952         0x8ce40000, 0x8f626800, 0x24630030, 0x00832821, 0x3c030010, 0x00431024,
5953         0x1440000a, 0x00000000, 0x94a20004, 0x3c040800, 0x8c842408, 0x3c030800,
5954         0x8c6323fc, 0x00441023, 0x00621821, 0x3c010800, 0xac2323fc, 0x3c040800,
5955         0x8c8423fc, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402, 0x00822021,
5956         0x00041027, 0xa4a20006, 0x3c030800, 0x8c632404, 0x3c0200ff, 0x3442fff8,
5957         0x00628824, 0x96220008, 0x24050001, 0x24034000, 0x000231c0, 0x00801021,
5958         0xa4c2001a, 0xa4c0001c, 0xace00000, 0x3c010800, 0xac251b60, 0xaf635cb8,
5959         0x8f625cb0, 0x30420002, 0x10400003, 0x00000000, 0x3c010800, 0xac201b60,
5960         0x8e220008, 0xaf625cb8, 0x8f625cb0, 0x30420002, 0x10400003, 0x00000000,
5961         0x3c010800, 0xac201b60, 0x3c020800, 0x8c421b60, 0x1040ffec, 0x00000000,
5962         0x3c040800, 0x0e00063b, 0x8c842404, 0x0a00032a, 0x00000000, 0x3c030800,
5963         0x90631b98, 0x24020002, 0x14620003, 0x3c034b65, 0x0a0002e1, 0x00008021,
5964         0x8e22001c, 0x34637654, 0x10430002, 0x24100002, 0x24100001, 0x00c02021,
5965         0x0e000350, 0x02003021, 0x24020003, 0x3c010800, 0xa0221b98, 0x24020002,
5966         0x1202000a, 0x24020001, 0x3c030800, 0x8c6323f0, 0x10620006, 0x00000000,
5967         0x3c020800, 0x944223d8, 0x00021400, 0x0a00031f, 0xae220014, 0x3c040800,
5968         0x248423da, 0x94820000, 0x00021400, 0xae220014, 0x3c020800, 0x8c421bbc,
5969         0x3c03c000, 0x3c010800, 0xa0201b98, 0x00431025, 0xaf625c5c, 0x8f625c50,
5970         0x30420002, 0x10400009, 0x00000000, 0x2484f7e2, 0x8c820000, 0x00431025,
5971         0xaf625c5c, 0x8f625c50, 0x30420002, 0x1440fffa, 0x00000000, 0x3c020800,
5972         0x24421b84, 0x8c430000, 0x24630001, 0xac430000, 0x8f630c14, 0x3063000f,
5973         0x2c620002, 0x1440000c, 0x3c024000, 0x8f630c14, 0x3c020800, 0x8c421b40,
5974         0x3063000f, 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002, 0x1040fff7,
5975         0x00000000, 0x3c024000, 0x02421825, 0xaf635c9c, 0x8f625c90, 0x30420002,
5976         0x1440fffc, 0x00000000, 0x12600003, 0x00000000, 0x0e0004c0, 0x00000000,
5977         0x8fbf0028, 0x8fb30024, 0x8fb20020, 0x8fb1001c, 0x8fb00018, 0x03e00008,
5978         0x27bd0030, 0x8f634450, 0x3c040800, 0x24841b88, 0x8c820000, 0x00031c02,
5979         0x0043102b, 0x14400007, 0x3c038000, 0x8c840004, 0x8f624450, 0x00021c02,
5980         0x0083102b, 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024,
5981         0x1440fffd, 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3c024000,
5982         0x00822025, 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00000000,
5983         0x03e00008, 0x00000000, 0x27bdffe0, 0x00805821, 0x14c00011, 0x256e0008,
5984         0x3c020800, 0x8c4223f4, 0x10400007, 0x24020016, 0x3c010800, 0xa42223d2,
5985         0x2402002a, 0x3c010800, 0x0a000364, 0xa42223d4, 0x8d670010, 0x00071402,
5986         0x3c010800, 0xa42223d2, 0x3c010800, 0xa42723d4, 0x3c040800, 0x948423d4,
5987         0x3c030800, 0x946323d2, 0x95cf0006, 0x3c020800, 0x944223d0, 0x00832023,
5988         0x01e2c023, 0x3065ffff, 0x24a20028, 0x01c24821, 0x3082ffff, 0x14c0001a,
5989         0x01226021, 0x9582000c, 0x3042003f, 0x3c010800, 0xa42223d6, 0x95820004,
5990         0x95830006, 0x3c010800, 0xac2023e4, 0x3c010800, 0xac2023e8, 0x00021400,
5991         0x00431025, 0x3c010800, 0xac221bc0, 0x95220004, 0x3c010800, 0xa4221bc4,
5992         0x95230002, 0x01e51023, 0x0043102a, 0x10400010, 0x24020001, 0x3c010800,
5993         0x0a000398, 0xac2223f8, 0x3c030800, 0x8c6323e8, 0x3c020800, 0x94421bc4,
5994         0x00431021, 0xa5220004, 0x3c020800, 0x94421bc0, 0xa5820004, 0x3c020800,
5995         0x8c421bc0, 0xa5820006, 0x3c020800, 0x8c4223f0, 0x3c0d0800, 0x8dad23e4,
5996         0x3c0a0800, 0x144000e5, 0x8d4a23e8, 0x3c020800, 0x94421bc4, 0x004a1821,
5997         0x3063ffff, 0x0062182b, 0x24020002, 0x10c2000d, 0x01435023, 0x3c020800,
5998         0x944223d6, 0x30420009, 0x10400008, 0x00000000, 0x9582000c, 0x3042fff6,
5999         0xa582000c, 0x3c020800, 0x944223d6, 0x30420009, 0x01a26823, 0x3c020800,
6000         0x8c4223f8, 0x1040004a, 0x01203821, 0x3c020800, 0x944223d2, 0x00004021,
6001         0xa520000a, 0x01e21023, 0xa5220002, 0x3082ffff, 0x00021042, 0x18400008,
6002         0x00003021, 0x00401821, 0x94e20000, 0x25080001, 0x00c23021, 0x0103102a,
6003         0x1440fffb, 0x24e70002, 0x00061c02, 0x30c2ffff, 0x00623021, 0x00061402,
6004         0x00c23021, 0x00c02821, 0x00061027, 0xa522000a, 0x00003021, 0x2527000c,
6005         0x00004021, 0x94e20000, 0x25080001, 0x00c23021, 0x2d020004, 0x1440fffb,
6006         0x24e70002, 0x95220002, 0x00004021, 0x91230009, 0x00442023, 0x01803821,
6007         0x3082ffff, 0xa4e00010, 0x00621821, 0x00021042, 0x18400010, 0x00c33021,
6008         0x00404821, 0x94e20000, 0x24e70002, 0x00c23021, 0x30e2007f, 0x14400006,
6009         0x25080001, 0x8d630000, 0x3c02007f, 0x3442ff80, 0x00625824, 0x25670008,
6010         0x0109102a, 0x1440fff3, 0x00000000, 0x30820001, 0x10400005, 0x00061c02,
6011         0xa0e00001, 0x94e20000, 0x00c23021, 0x00061c02, 0x30c2ffff, 0x00623021,
6012         0x00061402, 0x00c23021, 0x0a00047d, 0x30c6ffff, 0x24020002, 0x14c20081,
6013         0x00000000, 0x3c020800, 0x8c42240c, 0x14400007, 0x00000000, 0x3c020800,
6014         0x944223d2, 0x95230002, 0x01e21023, 0x10620077, 0x00000000, 0x3c020800,
6015         0x944223d2, 0x01e21023, 0xa5220002, 0x3c020800, 0x8c42240c, 0x1040001a,
6016         0x31e3ffff, 0x8dc70010, 0x3c020800, 0x94421b96, 0x00e04021, 0x00072c02,
6017         0x00aa2021, 0x00431023, 0x00823823, 0x00072402, 0x30e2ffff, 0x00823821,
6018         0x00071027, 0xa522000a, 0x3102ffff, 0x3c040800, 0x948423d4, 0x00453023,
6019         0x00e02821, 0x00641823, 0x006d1821, 0x00c33021, 0x00061c02, 0x30c2ffff,
6020         0x0a00047d, 0x00623021, 0x01203821, 0x00004021, 0x3082ffff, 0x00021042,
6021         0x18400008, 0x00003021, 0x00401821, 0x94e20000, 0x25080001, 0x00c23021,
6022         0x0103102a, 0x1440fffb, 0x24e70002, 0x00061c02, 0x30c2ffff, 0x00623021,
6023         0x00061402, 0x00c23021, 0x00c02821, 0x00061027, 0xa522000a, 0x00003021,
6024         0x2527000c, 0x00004021, 0x94e20000, 0x25080001, 0x00c23021, 0x2d020004,
6025         0x1440fffb, 0x24e70002, 0x95220002, 0x00004021, 0x91230009, 0x00442023,
6026         0x01803821, 0x3082ffff, 0xa4e00010, 0x3c040800, 0x948423d4, 0x00621821,
6027         0x00c33021, 0x00061c02, 0x30c2ffff, 0x00623021, 0x00061c02, 0x3c020800,
6028         0x944223d0, 0x00c34821, 0x00441023, 0x00021fc2, 0x00431021, 0x00021043,
6029         0x18400010, 0x00003021, 0x00402021, 0x94e20000, 0x24e70002, 0x00c23021,
6030         0x30e2007f, 0x14400006, 0x25080001, 0x8d630000, 0x3c02007f, 0x3442ff80,
6031         0x00625824, 0x25670008, 0x0104102a, 0x1440fff3, 0x00000000, 0x3c020800,
6032         0x944223ec, 0x00c23021, 0x3122ffff, 0x00c23021, 0x00061c02, 0x30c2ffff,
6033         0x00623021, 0x00061402, 0x00c23021, 0x00c04021, 0x00061027, 0xa5820010,
6034         0xadc00014, 0x0a00049d, 0xadc00000, 0x8dc70010, 0x00e04021, 0x11400007,
6035         0x00072c02, 0x00aa3021, 0x00061402, 0x30c3ffff, 0x00433021, 0x00061402,
6036         0x00c22821, 0x00051027, 0xa522000a, 0x3c030800, 0x946323d4, 0x3102ffff,
6037         0x01e21021, 0x00433023, 0x00cd3021, 0x00061c02, 0x30c2ffff, 0x00623021,
6038         0x00061402, 0x00c23021, 0x00c04021, 0x00061027, 0xa5820010, 0x3102ffff,
6039         0x00051c00, 0x00431025, 0xadc20010, 0x3c020800, 0x8c4223f4, 0x10400005,
6040         0x2de205eb, 0x14400002, 0x25e2fff2, 0x34028870, 0xa5c20034, 0x3c030800,
6041         0x246323e8, 0x8c620000, 0x24420001, 0xac620000, 0x3c040800, 0x8c8423e4,
6042         0x3c020800, 0x8c421bc0, 0x3303ffff, 0x00832021, 0x00431821, 0x0062102b,
6043         0x3c010800, 0xac2423e4, 0x10400003, 0x2482ffff, 0x3c010800, 0xac2223e4,
6044         0x3c010800, 0xac231bc0, 0x03e00008, 0x27bd0020, 0x27bdffb8, 0x3c050800,
6045         0x24a51b96, 0xafbf0044, 0xafbe0040, 0xafb7003c, 0xafb60038, 0xafb50034,
6046         0xafb40030, 0xafb3002c, 0xafb20028, 0xafb10024, 0xafb00020, 0x94a90000,
6047         0x3c020800, 0x944223d0, 0x3c030800, 0x8c631bb0, 0x3c040800, 0x8c841bac,
6048         0x01221023, 0x0064182a, 0xa7a9001e, 0x106000be, 0xa7a20016, 0x24be0022,
6049         0x97b6001e, 0x24b3001a, 0x24b70016, 0x8fc20000, 0x14400008, 0x00000000,
6050         0x8fc2fff8, 0x97a30016, 0x8fc4fff4, 0x00431021, 0x0082202a, 0x148000b0,
6051         0x00000000, 0x97d50818, 0x32a2ffff, 0x104000a3, 0x00009021, 0x0040a021,
6052         0x00008821, 0x0e000625, 0x00000000, 0x00403021, 0x14c00007, 0x00000000,
6053         0x3c020800, 0x8c4223dc, 0x24420001, 0x3c010800, 0x0a000596, 0xac2223dc,
6054         0x3c100800, 0x02118021, 0x8e101bc8, 0x9608000a, 0x31020040, 0x10400005,
6055         0x2407180c, 0x8e02000c, 0x2407188c, 0x00021400, 0xacc20018, 0x31020080,
6056         0x54400001, 0x34e70010, 0x3c020800, 0x00511021, 0x8c421bd0, 0x3c030800,
6057         0x00711821, 0x8c631bd4, 0x00021500, 0x00031c00, 0x00431025, 0xacc20014,
6058         0x96040008, 0x3242ffff, 0x00821021, 0x0282102a, 0x14400002, 0x02b22823,
6059         0x00802821, 0x8e020000, 0x02459021, 0xacc20000, 0x8e020004, 0x00c02021,
6060         0x26310010, 0xac820004, 0x30e2ffff, 0xac800008, 0xa485000e, 0xac820010,
6061         0x24020305, 0x0e0005a2, 0xa482000c, 0x3242ffff, 0x0054102b, 0x1440ffc5,
6062         0x3242ffff, 0x0a00058e, 0x00000000, 0x8e620000, 0x8e63fffc, 0x0043102a,
6063         0x10400067, 0x00000000, 0x8e62fff0, 0x00028900, 0x3c100800, 0x02118021,
6064         0x0e000625, 0x8e101bc8, 0x00403021, 0x14c00005, 0x00000000, 0x8e62082c,
6065         0x24420001, 0x0a000596, 0xae62082c, 0x9608000a, 0x31020040, 0x10400005,
6066         0x2407180c, 0x8e02000c, 0x2407188c, 0x00021400, 0xacc20018, 0x3c020800,
6067         0x00511021, 0x8c421bd0, 0x3c030800, 0x00711821, 0x8c631bd4, 0x00021500,
6068         0x00031c00, 0x00431025, 0xacc20014, 0x8e63fff4, 0x96020008, 0x00432023,
6069         0x3242ffff, 0x3083ffff, 0x00431021, 0x02c2102a, 0x10400003, 0x00802821,
6070         0x97a9001e, 0x01322823, 0x8e620000, 0x30a4ffff, 0x00441021, 0xae620000,
6071         0xa4c5000e, 0x8e020000, 0xacc20000, 0x8e020004, 0x8e63fff4, 0x00431021,
6072         0xacc20004, 0x8e63fff4, 0x96020008, 0x00641821, 0x0062102a, 0x14400006,
6073         0x02459021, 0x8e62fff0, 0xae60fff4, 0x24420001, 0x0a000571, 0xae62fff0,
6074         0xae63fff4, 0xacc00008, 0x3242ffff, 0x10560003, 0x31020004, 0x10400006,
6075         0x24020305, 0x31020080, 0x54400001, 0x34e70010, 0x34e70020, 0x24020905,
6076         0xa4c2000c, 0x8ee30000, 0x8ee20004, 0x14620007, 0x3c02b49a, 0x8ee20860,
6077         0x54400001, 0x34e70400, 0x3c024b65, 0x0a000588, 0x34427654, 0x344289ab,
6078         0xacc2001c, 0x30e2ffff, 0xacc20010, 0x0e0005a2, 0x00c02021, 0x3242ffff,
6079         0x0056102b, 0x1440ff9b, 0x00000000, 0x8e620000, 0x8e63fffc, 0x0043102a,
6080         0x1440ff48, 0x00000000, 0x8fbf0044, 0x8fbe0040, 0x8fb7003c, 0x8fb60038,
6081         0x8fb50034, 0x8fb40030, 0x8fb3002c, 0x8fb20028, 0x8fb10024, 0x8fb00020,
6082         0x03e00008, 0x27bd0048, 0x27bdffe8, 0xafbf0014, 0xafb00010, 0x8f624450,
6083         0x8f634410, 0x0a0005b1, 0x00808021, 0x8f626820, 0x30422000, 0x10400003,
6084         0x00000000, 0x0e0001f0, 0x00002021, 0x8f624450, 0x8f634410, 0x3042ffff,
6085         0x0043102b, 0x1440fff5, 0x00000000, 0x8f630c14, 0x3063000f, 0x2c620002,
6086         0x1440000b, 0x00000000, 0x8f630c14, 0x3c020800, 0x8c421b40, 0x3063000f,
6087         0x24420001, 0x3c010800, 0xac221b40, 0x2c620002, 0x1040fff7, 0x00000000,
6088         0xaf705c18, 0x8f625c10, 0x30420002, 0x10400009, 0x00000000, 0x8f626820,
6089         0x30422000, 0x1040fff8, 0x00000000, 0x0e0001f0, 0x00002021, 0x0a0005c4,
6090         0x00000000, 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x00000000,
6091         0x00000000, 0x00000000, 0x27bdffe8, 0x3c1bc000, 0xafbf0014, 0xafb00010,
6092         0xaf60680c, 0x8f626804, 0x34420082, 0xaf626804, 0x8f634000, 0x24020b50,
6093         0x3c010800, 0xac221b54, 0x24020b78, 0x3c010800, 0xac221b64, 0x34630002,
6094         0xaf634000, 0x0e000605, 0x00808021, 0x3c010800, 0xa0221b68, 0x304200ff,
6095         0x24030002, 0x14430005, 0x00000000, 0x3c020800, 0x8c421b54, 0x0a0005f8,
6096         0xac5000c0, 0x3c020800, 0x8c421b54, 0xac5000bc, 0x8f624434, 0x8f634438,
6097         0x8f644410, 0x3c010800, 0xac221b5c, 0x3c010800, 0xac231b6c, 0x3c010800,
6098         0xac241b58, 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x3c040800,
6099         0x8c870000, 0x3c03aa55, 0x3463aa55, 0x3c06c003, 0xac830000, 0x8cc20000,
6100         0x14430007, 0x24050002, 0x3c0355aa, 0x346355aa, 0xac830000, 0x8cc20000,
6101         0x50430001, 0x24050001, 0x3c020800, 0xac470000, 0x03e00008, 0x00a01021,
6102         0x27bdfff8, 0x18800009, 0x00002821, 0x8f63680c, 0x8f62680c, 0x1043fffe,
6103         0x00000000, 0x24a50001, 0x00a4102a, 0x1440fff9, 0x00000000, 0x03e00008,
6104         0x27bd0008, 0x8f634450, 0x3c020800, 0x8c421b5c, 0x00031c02, 0x0043102b,
6105         0x14400008, 0x3c038000, 0x3c040800, 0x8c841b6c, 0x8f624450, 0x00021c02,
6106         0x0083102b, 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024,
6107         0x1440fffd, 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3082ffff,
6108         0x2442e000, 0x2c422001, 0x14400003, 0x3c024000, 0x0a000648, 0x2402ffff,
6109         0x00822025, 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00001021,
6110         0x03e00008, 0x00000000, 0x8f624450, 0x3c030800, 0x8c631b58, 0x0a000651,
6111         0x3042ffff, 0x8f624450, 0x3042ffff, 0x0043102b, 0x1440fffc, 0x00000000,
6112         0x03e00008, 0x00000000, 0x27bdffe0, 0x00802821, 0x3c040800, 0x24841af0,
6113         0x00003021, 0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014,
6114         0x0a000660, 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x00000000,
6115         0x00000000, 0x00000000, 0x3c020800, 0x34423000, 0x3c030800, 0x34633000,
6116         0x3c040800, 0x348437ff, 0x3c010800, 0xac221b74, 0x24020040, 0x3c010800,
6117         0xac221b78, 0x3c010800, 0xac201b70, 0xac600000, 0x24630004, 0x0083102b,
6118         0x5040fffd, 0xac600000, 0x03e00008, 0x00000000, 0x00804821, 0x8faa0010,
6119         0x3c020800, 0x8c421b70, 0x3c040800, 0x8c841b78, 0x8fab0014, 0x24430001,
6120         0x0044102b, 0x3c010800, 0xac231b70, 0x14400003, 0x00004021, 0x3c010800,
6121         0xac201b70, 0x3c020800, 0x8c421b70, 0x3c030800, 0x8c631b74, 0x91240000,
6122         0x00021140, 0x00431021, 0x00481021, 0x25080001, 0xa0440000, 0x29020008,
6123         0x1440fff4, 0x25290001, 0x3c020800, 0x8c421b70, 0x3c030800, 0x8c631b74,
6124         0x8f64680c, 0x00021140, 0x00431021, 0xac440008, 0xac45000c, 0xac460010,
6125         0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c, 0x00000000, 0x00000000,
6126 };
6127
6128 static const u32 tg3TsoFwRodata[] = {
6129         0x4d61696e, 0x43707542, 0x00000000, 0x4d61696e, 0x43707541, 0x00000000,
6130         0x00000000, 0x00000000, 0x73746b6f, 0x66666c64, 0x496e0000, 0x73746b6f,
6131         0x66662a2a, 0x00000000, 0x53774576, 0x656e7430, 0x00000000, 0x00000000,
6132         0x00000000, 0x00000000, 0x66617461, 0x6c457272, 0x00000000, 0x00000000,
6133         0x00000000,
6134 };
6135
6136 static const u32 tg3TsoFwData[] = {
6137         0x00000000, 0x73746b6f, 0x66666c64, 0x5f76312e, 0x362e3000, 0x00000000,
6138         0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
6139         0x00000000,
6140 };
6141
6142 /* 5705 needs a special version of the TSO firmware.  */
6143 #define TG3_TSO5_FW_RELEASE_MAJOR       0x1
6144 #define TG3_TSO5_FW_RELASE_MINOR        0x2
6145 #define TG3_TSO5_FW_RELEASE_FIX         0x0
6146 #define TG3_TSO5_FW_START_ADDR          0x00010000
6147 #define TG3_TSO5_FW_TEXT_ADDR           0x00010000
6148 #define TG3_TSO5_FW_TEXT_LEN            0xe90
6149 #define TG3_TSO5_FW_RODATA_ADDR         0x00010e90
6150 #define TG3_TSO5_FW_RODATA_LEN          0x50
6151 #define TG3_TSO5_FW_DATA_ADDR           0x00010f00
6152 #define TG3_TSO5_FW_DATA_LEN            0x20
6153 #define TG3_TSO5_FW_SBSS_ADDR           0x00010f20
6154 #define TG3_TSO5_FW_SBSS_LEN            0x28
6155 #define TG3_TSO5_FW_BSS_ADDR            0x00010f50
6156 #define TG3_TSO5_FW_BSS_LEN             0x88
6157
6158 static const u32 tg3Tso5FwText[(TG3_TSO5_FW_TEXT_LEN / 4) + 1] = {
6159         0x0c004003, 0x00000000, 0x00010f04, 0x00000000, 0x10000003, 0x00000000,
6160         0x0000000d, 0x0000000d, 0x3c1d0001, 0x37bde000, 0x03a0f021, 0x3c100001,
6161         0x26100000, 0x0c004010, 0x00000000, 0x0000000d, 0x27bdffe0, 0x3c04fefe,
6162         0xafbf0018, 0x0c0042e8, 0x34840002, 0x0c004364, 0x00000000, 0x3c030001,
6163         0x90630f34, 0x24020002, 0x3c040001, 0x24840e9c, 0x14620003, 0x24050001,
6164         0x3c040001, 0x24840e90, 0x24060002, 0x00003821, 0xafa00010, 0x0c004378,
6165         0xafa00014, 0x0c00402c, 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020,
6166         0x00000000, 0x00000000, 0x27bdffe0, 0xafbf001c, 0xafb20018, 0xafb10014,
6167         0x0c0042d4, 0xafb00010, 0x3c128000, 0x24110001, 0x8f706810, 0x32020400,
6168         0x10400007, 0x00000000, 0x8f641008, 0x00921024, 0x14400003, 0x00000000,
6169         0x0c004064, 0x00000000, 0x3c020001, 0x90420f56, 0x10510003, 0x32020200,
6170         0x1040fff1, 0x00000000, 0x0c0041b4, 0x00000000, 0x08004034, 0x00000000,
6171         0x8fbf001c, 0x8fb20018, 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0020,
6172         0x27bdffe0, 0x3c040001, 0x24840eb0, 0x00002821, 0x00003021, 0x00003821,
6173         0xafbf0018, 0xafa00010, 0x0c004378, 0xafa00014, 0x0000d021, 0x24020130,
6174         0xaf625000, 0x3c010001, 0xa4200f50, 0x3c010001, 0xa0200f57, 0x8fbf0018,
6175         0x03e00008, 0x27bd0020, 0x00000000, 0x00000000, 0x3c030001, 0x24630f60,
6176         0x90620000, 0x27bdfff0, 0x14400003, 0x0080c021, 0x08004073, 0x00004821,
6177         0x3c022000, 0x03021024, 0x10400003, 0x24090002, 0x08004073, 0xa0600000,
6178         0x24090001, 0x00181040, 0x30431f80, 0x346f8008, 0x1520004b, 0x25eb0028,
6179         0x3c040001, 0x00832021, 0x8c848010, 0x3c050001, 0x24a50f7a, 0x00041402,
6180         0xa0a20000, 0x3c010001, 0xa0240f7b, 0x3c020001, 0x00431021, 0x94428014,
6181         0x3c010001, 0xa0220f7c, 0x3c0c0001, 0x01836021, 0x8d8c8018, 0x304200ff,
6182         0x24420008, 0x000220c3, 0x24020001, 0x3c010001, 0xa0220f60, 0x0124102b,
6183         0x1040000c, 0x00003821, 0x24a6000e, 0x01602821, 0x8ca20000, 0x8ca30004,
6184         0x24a50008, 0x24e70001, 0xacc20000, 0xacc30004, 0x00e4102b, 0x1440fff8,
6185         0x24c60008, 0x00003821, 0x3c080001, 0x25080f7b, 0x91060000, 0x3c020001,
6186         0x90420f7c, 0x2503000d, 0x00c32821, 0x00461023, 0x00021fc2, 0x00431021,
6187         0x00021043, 0x1840000c, 0x00002021, 0x91020001, 0x00461023, 0x00021fc2,
6188         0x00431021, 0x00021843, 0x94a20000, 0x24e70001, 0x00822021, 0x00e3102a,
6189         0x1440fffb, 0x24a50002, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402,
6190         0x00822021, 0x3c02ffff, 0x01821024, 0x3083ffff, 0x00431025, 0x3c010001,
6191         0x080040fa, 0xac220f80, 0x3c050001, 0x24a50f7c, 0x90a20000, 0x3c0c0001,
6192         0x01836021, 0x8d8c8018, 0x000220c2, 0x1080000e, 0x00003821, 0x01603021,
6193         0x24a5000c, 0x8ca20000, 0x8ca30004, 0x24a50008, 0x24e70001, 0xacc20000,
6194         0xacc30004, 0x00e4102b, 0x1440fff8, 0x24c60008, 0x3c050001, 0x24a50f7c,
6195         0x90a20000, 0x30430007, 0x24020004, 0x10620011, 0x28620005, 0x10400005,
6196         0x24020002, 0x10620008, 0x000710c0, 0x080040fa, 0x00000000, 0x24020006,
6197         0x1062000e, 0x000710c0, 0x080040fa, 0x00000000, 0x00a21821, 0x9463000c,
6198         0x004b1021, 0x080040fa, 0xa4430000, 0x000710c0, 0x00a21821, 0x8c63000c,
6199         0x004b1021, 0x080040fa, 0xac430000, 0x00a21821, 0x8c63000c, 0x004b2021,
6200         0x00a21021, 0xac830000, 0x94420010, 0xa4820004, 0x95e70006, 0x3c020001,
6201         0x90420f7c, 0x3c030001, 0x90630f7a, 0x00e2c823, 0x3c020001, 0x90420f7b,
6202         0x24630028, 0x01e34021, 0x24420028, 0x15200012, 0x01e23021, 0x94c2000c,
6203         0x3c010001, 0xa4220f78, 0x94c20004, 0x94c30006, 0x3c010001, 0xa4200f76,
6204         0x3c010001, 0xa4200f72, 0x00021400, 0x00431025, 0x3c010001, 0xac220f6c,
6205         0x95020004, 0x3c010001, 0x08004124, 0xa4220f70, 0x3c020001, 0x94420f70,
6206         0x3c030001, 0x94630f72, 0x00431021, 0xa5020004, 0x3c020001, 0x94420f6c,
6207         0xa4c20004, 0x3c020001, 0x8c420f6c, 0xa4c20006, 0x3c040001, 0x94840f72,
6208         0x3c020001, 0x94420f70, 0x3c0a0001, 0x954a0f76, 0x00441821, 0x3063ffff,
6209         0x0062182a, 0x24020002, 0x1122000b, 0x00832023, 0x3c030001, 0x94630f78,
6210         0x30620009, 0x10400006, 0x3062fff6, 0xa4c2000c, 0x3c020001, 0x94420f78,
6211         0x30420009, 0x01425023, 0x24020001, 0x1122001b, 0x29220002, 0x50400005,
6212         0x24020002, 0x11200007, 0x31a2ffff, 0x08004197, 0x00000000, 0x1122001d,
6213         0x24020016, 0x08004197, 0x31a2ffff, 0x3c0e0001, 0x95ce0f80, 0x10800005,
6214         0x01806821, 0x01c42021, 0x00041c02, 0x3082ffff, 0x00627021, 0x000e1027,
6215         0xa502000a, 0x3c030001, 0x90630f7b, 0x31a2ffff, 0x00e21021, 0x0800418d,
6216         0x00432023, 0x3c020001, 0x94420f80, 0x00442021, 0x00041c02, 0x3082ffff,
6217         0x00622021, 0x00807021, 0x00041027, 0x08004185, 0xa502000a, 0x3c050001,
6218         0x24a50f7a, 0x90a30000, 0x14620002, 0x24e2fff2, 0xa5e20034, 0x90a20000,
6219         0x00e21023, 0xa5020002, 0x3c030001, 0x94630f80, 0x3c020001, 0x94420f5a,
6220         0x30e5ffff, 0x00641821, 0x00451023, 0x00622023, 0x00041c02, 0x3082ffff,
6221         0x00622021, 0x00041027, 0xa502000a, 0x3c030001, 0x90630f7c, 0x24620001,
6222         0x14a20005, 0x00807021, 0x01631021, 0x90420000, 0x08004185, 0x00026200,
6223         0x24620002, 0x14a20003, 0x306200fe, 0x004b1021, 0x944c0000, 0x3c020001,
6224         0x94420f82, 0x3183ffff, 0x3c040001, 0x90840f7b, 0x00431021, 0x00e21021,
6225         0x00442023, 0x008a2021, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402,
6226         0x00822021, 0x00806821, 0x00041027, 0xa4c20010, 0x31a2ffff, 0x000e1c00,
6227         0x00431025, 0x3c040001, 0x24840f72, 0xade20010, 0x94820000, 0x3c050001,
6228         0x94a50f76, 0x3c030001, 0x8c630f6c, 0x24420001, 0x00b92821, 0xa4820000,
6229         0x3322ffff, 0x00622021, 0x0083182b, 0x3c010001, 0xa4250f76, 0x10600003,
6230         0x24a2ffff, 0x3c010001, 0xa4220f76, 0x3c024000, 0x03021025, 0x3c010001,
6231         0xac240f6c, 0xaf621008, 0x03e00008, 0x27bd0010, 0x3c030001, 0x90630f56,
6232         0x27bdffe8, 0x24020001, 0xafbf0014, 0x10620026, 0xafb00010, 0x8f620cf4,
6233         0x2442ffff, 0x3042007f, 0x00021100, 0x8c434000, 0x3c010001, 0xac230f64,
6234         0x8c434008, 0x24444000, 0x8c5c4004, 0x30620040, 0x14400002, 0x24020088,
6235         0x24020008, 0x3c010001, 0xa4220f68, 0x30620004, 0x10400005, 0x24020001,
6236         0x3c010001, 0xa0220f57, 0x080041d5, 0x00031402, 0x3c010001, 0xa0200f57,
6237         0x00031402, 0x3c010001, 0xa4220f54, 0x9483000c, 0x24020001, 0x3c010001,
6238         0xa4200f50, 0x3c010001, 0xa0220f56, 0x3c010001, 0xa4230f62, 0x24020001,
6239         0x1342001e, 0x00000000, 0x13400005, 0x24020003, 0x13420067, 0x00000000,
6240         0x080042cf, 0x00000000, 0x3c020001, 0x94420f62, 0x241a0001, 0x3c010001,
6241         0xa4200f5e, 0x3c010001, 0xa4200f52, 0x304407ff, 0x00021bc2, 0x00031823,
6242         0x3063003e, 0x34630036, 0x00021242, 0x3042003c, 0x00621821, 0x3c010001,
6243         0xa4240f58, 0x00832021, 0x24630030, 0x3c010001, 0xa4240f5a, 0x3c010001,
6244         0xa4230f5c, 0x3c060001, 0x24c60f52, 0x94c50000, 0x94c30002, 0x3c040001,
6245         0x94840f5a, 0x00651021, 0x0044102a, 0x10400013, 0x3c108000, 0x00a31021,
6246         0xa4c20000, 0x3c02a000, 0xaf620cf4, 0x3c010001, 0xa0200f56, 0x8f641008,
6247         0x00901024, 0x14400003, 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4,
6248         0x00501024, 0x104000b7, 0x00000000, 0x0800420f, 0x00000000, 0x3c030001,
6249         0x94630f50, 0x00851023, 0xa4c40000, 0x00621821, 0x3042ffff, 0x3c010001,
6250         0xa4230f50, 0xaf620ce8, 0x3c020001, 0x94420f68, 0x34420024, 0xaf620cec,
6251         0x94c30002, 0x3c020001, 0x94420f50, 0x14620012, 0x3c028000, 0x3c108000,
6252         0x3c02a000, 0xaf620cf4, 0x3c010001, 0xa0200f56, 0x8f641008, 0x00901024,
6253         0x14400003, 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4, 0x00501024,
6254         0x1440fff7, 0x00000000, 0x080042cf, 0x241a0003, 0xaf620cf4, 0x3c108000,
6255         0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064, 0x00000000,
6256         0x8f620cf4, 0x00501024, 0x1440fff7, 0x00000000, 0x080042cf, 0x241a0003,
6257         0x3c070001, 0x24e70f50, 0x94e20000, 0x03821021, 0xaf620ce0, 0x3c020001,
6258         0x8c420f64, 0xaf620ce4, 0x3c050001, 0x94a50f54, 0x94e30000, 0x3c040001,
6259         0x94840f58, 0x3c020001, 0x94420f5e, 0x00a32823, 0x00822023, 0x30a6ffff,
6260         0x3083ffff, 0x00c3102b, 0x14400043, 0x00000000, 0x3c020001, 0x94420f5c,
6261         0x00021400, 0x00621025, 0xaf620ce8, 0x94e20000, 0x3c030001, 0x94630f54,
6262         0x00441021, 0xa4e20000, 0x3042ffff, 0x14430021, 0x3c020008, 0x3c020001,
6263         0x90420f57, 0x10400006, 0x3c03000c, 0x3c020001, 0x94420f68, 0x34630624,
6264         0x0800427c, 0x0000d021, 0x3c020001, 0x94420f68, 0x3c030008, 0x34630624,
6265         0x00431025, 0xaf620cec, 0x3c108000, 0x3c02a000, 0xaf620cf4, 0x3c010001,
6266         0xa0200f56, 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064,
6267         0x00000000, 0x8f620cf4, 0x00501024, 0x10400015, 0x00000000, 0x08004283,
6268         0x00000000, 0x3c030001, 0x94630f68, 0x34420624, 0x3c108000, 0x00621825,
6269         0x3c028000, 0xaf630cec, 0xaf620cf4, 0x8f641008, 0x00901024, 0x14400003,
6270         0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4, 0x00501024, 0x1440fff7,
6271         0x00000000, 0x3c010001, 0x080042cf, 0xa4200f5e, 0x3c020001, 0x94420f5c,
6272         0x00021400, 0x00c21025, 0xaf620ce8, 0x3c020001, 0x90420f57, 0x10400009,
6273         0x3c03000c, 0x3c020001, 0x94420f68, 0x34630624, 0x0000d021, 0x00431025,
6274         0xaf620cec, 0x080042c1, 0x3c108000, 0x3c020001, 0x94420f68, 0x3c030008,
6275         0x34630604, 0x00431025, 0xaf620cec, 0x3c020001, 0x94420f5e, 0x00451021,
6276         0x3c010001, 0xa4220f5e, 0x3c108000, 0x3c02a000, 0xaf620cf4, 0x3c010001,
6277         0xa0200f56, 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064,
6278         0x00000000, 0x8f620cf4, 0x00501024, 0x1440fff7, 0x00000000, 0x8fbf0014,
6279         0x8fb00010, 0x03e00008, 0x27bd0018, 0x00000000, 0x27bdffe0, 0x3c040001,
6280         0x24840ec0, 0x00002821, 0x00003021, 0x00003821, 0xafbf0018, 0xafa00010,
6281         0x0c004378, 0xafa00014, 0x0000d021, 0x24020130, 0xaf625000, 0x3c010001,
6282         0xa4200f50, 0x3c010001, 0xa0200f57, 0x8fbf0018, 0x03e00008, 0x27bd0020,
6283         0x27bdffe8, 0x3c1bc000, 0xafbf0014, 0xafb00010, 0xaf60680c, 0x8f626804,
6284         0x34420082, 0xaf626804, 0x8f634000, 0x24020b50, 0x3c010001, 0xac220f20,
6285         0x24020b78, 0x3c010001, 0xac220f30, 0x34630002, 0xaf634000, 0x0c004315,
6286         0x00808021, 0x3c010001, 0xa0220f34, 0x304200ff, 0x24030002, 0x14430005,
6287         0x00000000, 0x3c020001, 0x8c420f20, 0x08004308, 0xac5000c0, 0x3c020001,
6288         0x8c420f20, 0xac5000bc, 0x8f624434, 0x8f634438, 0x8f644410, 0x3c010001,
6289         0xac220f28, 0x3c010001, 0xac230f38, 0x3c010001, 0xac240f24, 0x8fbf0014,
6290         0x8fb00010, 0x03e00008, 0x27bd0018, 0x03e00008, 0x24020001, 0x27bdfff8,
6291         0x18800009, 0x00002821, 0x8f63680c, 0x8f62680c, 0x1043fffe, 0x00000000,
6292         0x24a50001, 0x00a4102a, 0x1440fff9, 0x00000000, 0x03e00008, 0x27bd0008,
6293         0x8f634450, 0x3c020001, 0x8c420f28, 0x00031c02, 0x0043102b, 0x14400008,
6294         0x3c038000, 0x3c040001, 0x8c840f38, 0x8f624450, 0x00021c02, 0x0083102b,
6295         0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024, 0x1440fffd,
6296         0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3082ffff, 0x2442e000,
6297         0x2c422001, 0x14400003, 0x3c024000, 0x08004347, 0x2402ffff, 0x00822025,
6298         0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00001021, 0x03e00008,
6299         0x00000000, 0x8f624450, 0x3c030001, 0x8c630f24, 0x08004350, 0x3042ffff,
6300         0x8f624450, 0x3042ffff, 0x0043102b, 0x1440fffc, 0x00000000, 0x03e00008,
6301         0x00000000, 0x27bdffe0, 0x00802821, 0x3c040001, 0x24840ed0, 0x00003021,
6302         0x00003821, 0xafbf0018, 0xafa00010, 0x0c004378, 0xafa00014, 0x0800435f,
6303         0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x3c020001, 0x3442d600,
6304         0x3c030001, 0x3463d600, 0x3c040001, 0x3484ddff, 0x3c010001, 0xac220f40,
6305         0x24020040, 0x3c010001, 0xac220f44, 0x3c010001, 0xac200f3c, 0xac600000,
6306         0x24630004, 0x0083102b, 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000,
6307         0x00804821, 0x8faa0010, 0x3c020001, 0x8c420f3c, 0x3c040001, 0x8c840f44,
6308         0x8fab0014, 0x24430001, 0x0044102b, 0x3c010001, 0xac230f3c, 0x14400003,
6309         0x00004021, 0x3c010001, 0xac200f3c, 0x3c020001, 0x8c420f3c, 0x3c030001,
6310         0x8c630f40, 0x91240000, 0x00021140, 0x00431021, 0x00481021, 0x25080001,
6311         0xa0440000, 0x29020008, 0x1440fff4, 0x25290001, 0x3c020001, 0x8c420f3c,
6312         0x3c030001, 0x8c630f40, 0x8f64680c, 0x00021140, 0x00431021, 0xac440008,
6313         0xac45000c, 0xac460010, 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c,
6314         0x00000000, 0x00000000, 0x00000000,
6315 };
6316
6317 static const u32 tg3Tso5FwRodata[(TG3_TSO5_FW_RODATA_LEN / 4) + 1] = {
6318         0x4d61696e, 0x43707542, 0x00000000, 0x4d61696e, 0x43707541, 0x00000000,
6319         0x00000000, 0x00000000, 0x73746b6f, 0x66666c64, 0x00000000, 0x00000000,
6320         0x73746b6f, 0x66666c64, 0x00000000, 0x00000000, 0x66617461, 0x6c457272,
6321         0x00000000, 0x00000000, 0x00000000,
6322 };
6323
6324 static const u32 tg3Tso5FwData[(TG3_TSO5_FW_DATA_LEN / 4) + 1] = {
6325         0x00000000, 0x73746b6f, 0x66666c64, 0x5f76312e, 0x322e3000, 0x00000000,
6326         0x00000000, 0x00000000, 0x00000000,
6327 };
6328
6329 /* tp->lock is held. */
6330 static int tg3_load_tso_firmware(struct tg3 *tp)
6331 {
6332         struct fw_info info;
6333         unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
6334         int err, i;
6335
6336         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
6337                 return 0;
6338
6339         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
6340                 info.text_base = TG3_TSO5_FW_TEXT_ADDR;
6341                 info.text_len = TG3_TSO5_FW_TEXT_LEN;
6342                 info.text_data = &tg3Tso5FwText[0];
6343                 info.rodata_base = TG3_TSO5_FW_RODATA_ADDR;
6344                 info.rodata_len = TG3_TSO5_FW_RODATA_LEN;
6345                 info.rodata_data = &tg3Tso5FwRodata[0];
6346                 info.data_base = TG3_TSO5_FW_DATA_ADDR;
6347                 info.data_len = TG3_TSO5_FW_DATA_LEN;
6348                 info.data_data = &tg3Tso5FwData[0];
6349                 cpu_base = RX_CPU_BASE;
6350                 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
6351                 cpu_scratch_size = (info.text_len +
6352                                     info.rodata_len +
6353                                     info.data_len +
6354                                     TG3_TSO5_FW_SBSS_LEN +
6355                                     TG3_TSO5_FW_BSS_LEN);
6356         } else {
6357                 info.text_base = TG3_TSO_FW_TEXT_ADDR;
6358                 info.text_len = TG3_TSO_FW_TEXT_LEN;
6359                 info.text_data = &tg3TsoFwText[0];
6360                 info.rodata_base = TG3_TSO_FW_RODATA_ADDR;
6361                 info.rodata_len = TG3_TSO_FW_RODATA_LEN;
6362                 info.rodata_data = &tg3TsoFwRodata[0];
6363                 info.data_base = TG3_TSO_FW_DATA_ADDR;
6364                 info.data_len = TG3_TSO_FW_DATA_LEN;
6365                 info.data_data = &tg3TsoFwData[0];
6366                 cpu_base = TX_CPU_BASE;
6367                 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
6368                 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
6369         }
6370
6371         err = tg3_load_firmware_cpu(tp, cpu_base,
6372                                     cpu_scratch_base, cpu_scratch_size,
6373                                     &info);
6374         if (err)
6375                 return err;
6376
6377         /* Now startup the cpu. */
6378         tw32(cpu_base + CPU_STATE, 0xffffffff);
6379         tw32_f(cpu_base + CPU_PC,    info.text_base);
6380
6381         for (i = 0; i < 5; i++) {
6382                 if (tr32(cpu_base + CPU_PC) == info.text_base)
6383                         break;
6384                 tw32(cpu_base + CPU_STATE, 0xffffffff);
6385                 tw32(cpu_base + CPU_MODE,  CPU_MODE_HALT);
6386                 tw32_f(cpu_base + CPU_PC,    info.text_base);
6387                 udelay(1000);
6388         }
6389         if (i >= 5) {
6390                 printk(KERN_ERR PFX "tg3_load_tso_firmware fails for %s "
6391                        "to set CPU PC, is %08x should be %08x\n",
6392                        tp->dev->name, tr32(cpu_base + CPU_PC),
6393                        info.text_base);
6394                 return -ENODEV;
6395         }
6396         tw32(cpu_base + CPU_STATE, 0xffffffff);
6397         tw32_f(cpu_base + CPU_MODE,  0x00000000);
6398         return 0;
6399 }
6400
6401
6402 /* tp->lock is held. */
6403 static void __tg3_set_mac_addr(struct tg3 *tp, int skip_mac_1)
6404 {
6405         u32 addr_high, addr_low;
6406         int i;
6407
6408         addr_high = ((tp->dev->dev_addr[0] << 8) |
6409                      tp->dev->dev_addr[1]);
6410         addr_low = ((tp->dev->dev_addr[2] << 24) |
6411                     (tp->dev->dev_addr[3] << 16) |
6412                     (tp->dev->dev_addr[4] <<  8) |
6413                     (tp->dev->dev_addr[5] <<  0));
6414         for (i = 0; i < 4; i++) {
6415                 if (i == 1 && skip_mac_1)
6416                         continue;
6417                 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
6418                 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
6419         }
6420
6421         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
6422             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
6423                 for (i = 0; i < 12; i++) {
6424                         tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
6425                         tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
6426                 }
6427         }
6428
6429         addr_high = (tp->dev->dev_addr[0] +
6430                      tp->dev->dev_addr[1] +
6431                      tp->dev->dev_addr[2] +
6432                      tp->dev->dev_addr[3] +
6433                      tp->dev->dev_addr[4] +
6434                      tp->dev->dev_addr[5]) &
6435                 TX_BACKOFF_SEED_MASK;
6436         tw32(MAC_TX_BACKOFF_SEED, addr_high);
6437 }
6438
6439 static int tg3_set_mac_addr(struct net_device *dev, void *p)
6440 {
6441         struct tg3 *tp = netdev_priv(dev);
6442         struct sockaddr *addr = p;
6443         int err = 0, skip_mac_1 = 0;
6444
6445         if (!is_valid_ether_addr(addr->sa_data))
6446                 return -EINVAL;
6447
6448         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
6449
6450         if (!netif_running(dev))
6451                 return 0;
6452
6453         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
6454                 u32 addr0_high, addr0_low, addr1_high, addr1_low;
6455
6456                 addr0_high = tr32(MAC_ADDR_0_HIGH);
6457                 addr0_low = tr32(MAC_ADDR_0_LOW);
6458                 addr1_high = tr32(MAC_ADDR_1_HIGH);
6459                 addr1_low = tr32(MAC_ADDR_1_LOW);
6460
6461                 /* Skip MAC addr 1 if ASF is using it. */
6462                 if ((addr0_high != addr1_high || addr0_low != addr1_low) &&
6463                     !(addr1_high == 0 && addr1_low == 0))
6464                         skip_mac_1 = 1;
6465         }
6466         spin_lock_bh(&tp->lock);
6467         __tg3_set_mac_addr(tp, skip_mac_1);
6468         spin_unlock_bh(&tp->lock);
6469
6470         return err;
6471 }
6472
6473 /* tp->lock is held. */
6474 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
6475                            dma_addr_t mapping, u32 maxlen_flags,
6476                            u32 nic_addr)
6477 {
6478         tg3_write_mem(tp,
6479                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
6480                       ((u64) mapping >> 32));
6481         tg3_write_mem(tp,
6482                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
6483                       ((u64) mapping & 0xffffffff));
6484         tg3_write_mem(tp,
6485                       (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
6486                        maxlen_flags);
6487
6488         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
6489                 tg3_write_mem(tp,
6490                               (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
6491                               nic_addr);
6492 }
6493
6494 static void __tg3_set_rx_mode(struct net_device *);
6495 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
6496 {
6497         tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
6498         tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
6499         tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
6500         tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
6501         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6502                 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
6503                 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
6504         }
6505         tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
6506         tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
6507         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6508                 u32 val = ec->stats_block_coalesce_usecs;
6509
6510                 if (!netif_carrier_ok(tp->dev))
6511                         val = 0;
6512
6513                 tw32(HOSTCC_STAT_COAL_TICKS, val);
6514         }
6515 }
6516
6517 /* tp->lock is held. */
6518 static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
6519 {
6520         u32 val, rdmac_mode;
6521         int i, err, limit;
6522
6523         tg3_disable_ints(tp);
6524
6525         tg3_stop_fw(tp);
6526
6527         tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
6528
6529         if (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) {
6530                 tg3_abort_hw(tp, 1);
6531         }
6532
6533         if (reset_phy)
6534                 tg3_phy_reset(tp);
6535
6536         err = tg3_chip_reset(tp);
6537         if (err)
6538                 return err;
6539
6540         tg3_write_sig_legacy(tp, RESET_KIND_INIT);
6541
6542         if (tp->pci_chip_rev_id == CHIPREV_ID_5784_A0 ||
6543             tp->pci_chip_rev_id == CHIPREV_ID_5784_A1) {
6544                 val = tr32(TG3_CPMU_CTRL);
6545                 val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE);
6546                 tw32(TG3_CPMU_CTRL, val);
6547
6548                 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
6549                 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
6550                 val |= CPMU_LSPD_10MB_MACCLK_6_25;
6551                 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
6552
6553                 val = tr32(TG3_CPMU_LNK_AWARE_PWRMD);
6554                 val &= ~CPMU_LNK_AWARE_MACCLK_MASK;
6555                 val |= CPMU_LNK_AWARE_MACCLK_6_25;
6556                 tw32(TG3_CPMU_LNK_AWARE_PWRMD, val);
6557
6558                 val = tr32(TG3_CPMU_HST_ACC);
6559                 val &= ~CPMU_HST_ACC_MACCLK_MASK;
6560                 val |= CPMU_HST_ACC_MACCLK_6_25;
6561                 tw32(TG3_CPMU_HST_ACC, val);
6562         }
6563
6564         /* This works around an issue with Athlon chipsets on
6565          * B3 tigon3 silicon.  This bit has no effect on any
6566          * other revision.  But do not set this on PCI Express
6567          * chips and don't even touch the clocks if the CPMU is present.
6568          */
6569         if (!(tp->tg3_flags & TG3_FLAG_CPMU_PRESENT)) {
6570                 if (!(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
6571                         tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
6572                 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
6573         }
6574
6575         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
6576             (tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
6577                 val = tr32(TG3PCI_PCISTATE);
6578                 val |= PCISTATE_RETRY_SAME_DMA;
6579                 tw32(TG3PCI_PCISTATE, val);
6580         }
6581
6582         if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
6583                 /* Allow reads and writes to the
6584                  * APE register and memory space.
6585                  */
6586                 val = tr32(TG3PCI_PCISTATE);
6587                 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
6588                        PCISTATE_ALLOW_APE_SHMEM_WR;
6589                 tw32(TG3PCI_PCISTATE, val);
6590         }
6591
6592         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_BX) {
6593                 /* Enable some hw fixes.  */
6594                 val = tr32(TG3PCI_MSI_DATA);
6595                 val |= (1 << 26) | (1 << 28) | (1 << 29);
6596                 tw32(TG3PCI_MSI_DATA, val);
6597         }
6598
6599         /* Descriptor ring init may make accesses to the
6600          * NIC SRAM area to setup the TX descriptors, so we
6601          * can only do this after the hardware has been
6602          * successfully reset.
6603          */
6604         err = tg3_init_rings(tp);
6605         if (err)
6606                 return err;
6607
6608         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 &&
6609             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761) {
6610                 /* This value is determined during the probe time DMA
6611                  * engine test, tg3_test_dma.
6612                  */
6613                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
6614         }
6615
6616         tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
6617                           GRC_MODE_4X_NIC_SEND_RINGS |
6618                           GRC_MODE_NO_TX_PHDR_CSUM |
6619                           GRC_MODE_NO_RX_PHDR_CSUM);
6620         tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
6621
6622         /* Pseudo-header checksum is done by hardware logic and not
6623          * the offload processers, so make the chip do the pseudo-
6624          * header checksums on receive.  For transmit it is more
6625          * convenient to do the pseudo-header checksum in software
6626          * as Linux does that on transmit for us in all cases.
6627          */
6628         tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
6629
6630         tw32(GRC_MODE,
6631              tp->grc_mode |
6632              (GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP));
6633
6634         /* Setup the timer prescalar register.  Clock is always 66Mhz. */
6635         val = tr32(GRC_MISC_CFG);
6636         val &= ~0xff;
6637         val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
6638         tw32(GRC_MISC_CFG, val);
6639
6640         /* Initialize MBUF/DESC pool. */
6641         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
6642                 /* Do nothing.  */
6643         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
6644                 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
6645                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
6646                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
6647                 else
6648                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
6649                 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
6650                 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
6651         }
6652         else if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
6653                 int fw_len;
6654
6655                 fw_len = (TG3_TSO5_FW_TEXT_LEN +
6656                           TG3_TSO5_FW_RODATA_LEN +
6657                           TG3_TSO5_FW_DATA_LEN +
6658                           TG3_TSO5_FW_SBSS_LEN +
6659                           TG3_TSO5_FW_BSS_LEN);
6660                 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
6661                 tw32(BUFMGR_MB_POOL_ADDR,
6662                      NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
6663                 tw32(BUFMGR_MB_POOL_SIZE,
6664                      NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
6665         }
6666
6667         if (tp->dev->mtu <= ETH_DATA_LEN) {
6668                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
6669                      tp->bufmgr_config.mbuf_read_dma_low_water);
6670                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
6671                      tp->bufmgr_config.mbuf_mac_rx_low_water);
6672                 tw32(BUFMGR_MB_HIGH_WATER,
6673                      tp->bufmgr_config.mbuf_high_water);
6674         } else {
6675                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
6676                      tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
6677                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
6678                      tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
6679                 tw32(BUFMGR_MB_HIGH_WATER,
6680                      tp->bufmgr_config.mbuf_high_water_jumbo);
6681         }
6682         tw32(BUFMGR_DMA_LOW_WATER,
6683              tp->bufmgr_config.dma_low_water);
6684         tw32(BUFMGR_DMA_HIGH_WATER,
6685              tp->bufmgr_config.dma_high_water);
6686
6687         tw32(BUFMGR_MODE, BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE);
6688         for (i = 0; i < 2000; i++) {
6689                 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
6690                         break;
6691                 udelay(10);
6692         }
6693         if (i >= 2000) {
6694                 printk(KERN_ERR PFX "tg3_reset_hw cannot enable BUFMGR for %s.\n",
6695                        tp->dev->name);
6696                 return -ENODEV;
6697         }
6698
6699         /* Setup replenish threshold. */
6700         val = tp->rx_pending / 8;
6701         if (val == 0)
6702                 val = 1;
6703         else if (val > tp->rx_std_max_post)
6704                 val = tp->rx_std_max_post;
6705         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
6706                 if (tp->pci_chip_rev_id == CHIPREV_ID_5906_A1)
6707                         tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
6708
6709                 if (val > (TG3_RX_INTERNAL_RING_SZ_5906 / 2))
6710                         val = TG3_RX_INTERNAL_RING_SZ_5906 / 2;
6711         }
6712
6713         tw32(RCVBDI_STD_THRESH, val);
6714
6715         /* Initialize TG3_BDINFO's at:
6716          *  RCVDBDI_STD_BD:     standard eth size rx ring
6717          *  RCVDBDI_JUMBO_BD:   jumbo frame rx ring
6718          *  RCVDBDI_MINI_BD:    small frame rx ring (??? does not work)
6719          *
6720          * like so:
6721          *  TG3_BDINFO_HOST_ADDR:       high/low parts of DMA address of ring
6722          *  TG3_BDINFO_MAXLEN_FLAGS:    (rx max buffer size << 16) |
6723          *                              ring attribute flags
6724          *  TG3_BDINFO_NIC_ADDR:        location of descriptors in nic SRAM
6725          *
6726          * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
6727          * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
6728          *
6729          * The size of each ring is fixed in the firmware, but the location is
6730          * configurable.
6731          */
6732         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
6733              ((u64) tp->rx_std_mapping >> 32));
6734         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
6735              ((u64) tp->rx_std_mapping & 0xffffffff));
6736         tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
6737              NIC_SRAM_RX_BUFFER_DESC);
6738
6739         /* Don't even try to program the JUMBO/MINI buffer descriptor
6740          * configs on 5705.
6741          */
6742         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
6743                 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS,
6744                      RX_STD_MAX_SIZE_5705 << BDINFO_FLAGS_MAXLEN_SHIFT);
6745         } else {
6746                 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS,
6747                      RX_STD_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT);
6748
6749                 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
6750                      BDINFO_FLAGS_DISABLED);
6751
6752                 /* Setup replenish threshold. */
6753                 tw32(RCVBDI_JUMBO_THRESH, tp->rx_jumbo_pending / 8);
6754
6755                 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
6756                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
6757                              ((u64) tp->rx_jumbo_mapping >> 32));
6758                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
6759                              ((u64) tp->rx_jumbo_mapping & 0xffffffff));
6760                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
6761                              RX_JUMBO_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT);
6762                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
6763                              NIC_SRAM_RX_JUMBO_BUFFER_DESC);
6764                 } else {
6765                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
6766                              BDINFO_FLAGS_DISABLED);
6767                 }
6768
6769         }
6770
6771         /* There is only one send ring on 5705/5750, no need to explicitly
6772          * disable the others.
6773          */
6774         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6775                 /* Clear out send RCB ring in SRAM. */
6776                 for (i = NIC_SRAM_SEND_RCB; i < NIC_SRAM_RCV_RET_RCB; i += TG3_BDINFO_SIZE)
6777                         tg3_write_mem(tp, i + TG3_BDINFO_MAXLEN_FLAGS,
6778                                       BDINFO_FLAGS_DISABLED);
6779         }
6780
6781         tp->tx_prod = 0;
6782         tp->tx_cons = 0;
6783         tw32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0);
6784         tw32_tx_mbox(MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0);
6785
6786         tg3_set_bdinfo(tp, NIC_SRAM_SEND_RCB,
6787                        tp->tx_desc_mapping,
6788                        (TG3_TX_RING_SIZE <<
6789                         BDINFO_FLAGS_MAXLEN_SHIFT),
6790                        NIC_SRAM_TX_BUFFER_DESC);
6791
6792         /* There is only one receive return ring on 5705/5750, no need
6793          * to explicitly disable the others.
6794          */
6795         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6796                 for (i = NIC_SRAM_RCV_RET_RCB; i < NIC_SRAM_STATS_BLK;
6797                      i += TG3_BDINFO_SIZE) {
6798                         tg3_write_mem(tp, i + TG3_BDINFO_MAXLEN_FLAGS,
6799                                       BDINFO_FLAGS_DISABLED);
6800                 }
6801         }
6802
6803         tp->rx_rcb_ptr = 0;
6804         tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW, 0);
6805
6806         tg3_set_bdinfo(tp, NIC_SRAM_RCV_RET_RCB,
6807                        tp->rx_rcb_mapping,
6808                        (TG3_RX_RCB_RING_SIZE(tp) <<
6809                         BDINFO_FLAGS_MAXLEN_SHIFT),
6810                        0);
6811
6812         tp->rx_std_ptr = tp->rx_pending;
6813         tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
6814                      tp->rx_std_ptr);
6815
6816         tp->rx_jumbo_ptr = (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) ?
6817                                                 tp->rx_jumbo_pending : 0;
6818         tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
6819                      tp->rx_jumbo_ptr);
6820
6821         /* Initialize MAC address and backoff seed. */
6822         __tg3_set_mac_addr(tp, 0);
6823
6824         /* MTU + ethernet header + FCS + optional VLAN tag */
6825         tw32(MAC_RX_MTU_SIZE, tp->dev->mtu + ETH_HLEN + 8);
6826
6827         /* The slot time is changed by tg3_setup_phy if we
6828          * run at gigabit with half duplex.
6829          */
6830         tw32(MAC_TX_LENGTHS,
6831              (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
6832              (6 << TX_LENGTHS_IPG_SHIFT) |
6833              (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
6834
6835         /* Receive rules. */
6836         tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
6837         tw32(RCVLPC_CONFIG, 0x0181);
6838
6839         /* Calculate RDMAC_MODE setting early, we need it to determine
6840          * the RCVLPC_STATE_ENABLE mask.
6841          */
6842         rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
6843                       RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
6844                       RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
6845                       RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
6846                       RDMAC_MODE_LNGREAD_ENAB);
6847
6848         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784)
6849                 rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
6850                               RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
6851                               RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
6852
6853         /* If statement applies to 5705 and 5750 PCI devices only */
6854         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
6855              tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
6856             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)) {
6857                 if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE &&
6858                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
6859                         rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
6860                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
6861                            !(tp->tg3_flags2 & TG3_FLG2_IS_5788)) {
6862                         rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
6863                 }
6864         }
6865
6866         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)
6867                 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
6868
6869         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
6870                 rdmac_mode |= (1 << 27);
6871
6872         /* Receive/send statistics. */
6873         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
6874                 val = tr32(RCVLPC_STATS_ENABLE);
6875                 val &= ~RCVLPC_STATSENAB_DACK_FIX;
6876                 tw32(RCVLPC_STATS_ENABLE, val);
6877         } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
6878                    (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
6879                 val = tr32(RCVLPC_STATS_ENABLE);
6880                 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
6881                 tw32(RCVLPC_STATS_ENABLE, val);
6882         } else {
6883                 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
6884         }
6885         tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
6886         tw32(SNDDATAI_STATSENAB, 0xffffff);
6887         tw32(SNDDATAI_STATSCTRL,
6888              (SNDDATAI_SCTRL_ENABLE |
6889               SNDDATAI_SCTRL_FASTUPD));
6890
6891         /* Setup host coalescing engine. */
6892         tw32(HOSTCC_MODE, 0);
6893         for (i = 0; i < 2000; i++) {
6894                 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
6895                         break;
6896                 udelay(10);
6897         }
6898
6899         __tg3_set_coalesce(tp, &tp->coal);
6900
6901         /* set status block DMA address */
6902         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
6903              ((u64) tp->status_mapping >> 32));
6904         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
6905              ((u64) tp->status_mapping & 0xffffffff));
6906
6907         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6908                 /* Status/statistics block address.  See tg3_timer,
6909                  * the tg3_periodic_fetch_stats call there, and
6910                  * tg3_get_stats to see how this works for 5705/5750 chips.
6911                  */
6912                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
6913                      ((u64) tp->stats_mapping >> 32));
6914                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
6915                      ((u64) tp->stats_mapping & 0xffffffff));
6916                 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
6917                 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
6918         }
6919
6920         tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
6921
6922         tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
6923         tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
6924         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
6925                 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
6926
6927         /* Clear statistics/status block in chip, and status block in ram. */
6928         for (i = NIC_SRAM_STATS_BLK;
6929              i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
6930              i += sizeof(u32)) {
6931                 tg3_write_mem(tp, i, 0);
6932                 udelay(40);
6933         }
6934         memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
6935
6936         if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
6937                 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
6938                 /* reset to prevent losing 1st rx packet intermittently */
6939                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
6940                 udelay(10);
6941         }
6942
6943         tp->mac_mode = MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
6944                 MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE | MAC_MODE_FHDE_ENABLE;
6945         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
6946             !(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
6947             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
6948                 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
6949         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
6950         udelay(40);
6951
6952         /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
6953          * If TG3_FLG2_IS_NIC is zero, we should read the
6954          * register to preserve the GPIO settings for LOMs. The GPIOs,
6955          * whether used as inputs or outputs, are set by boot code after
6956          * reset.
6957          */
6958         if (!(tp->tg3_flags2 & TG3_FLG2_IS_NIC)) {
6959                 u32 gpio_mask;
6960
6961                 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
6962                             GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
6963                             GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
6964
6965                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
6966                         gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
6967                                      GRC_LCLCTRL_GPIO_OUTPUT3;
6968
6969                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
6970                         gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
6971
6972                 tp->grc_local_ctrl &= ~gpio_mask;
6973                 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
6974
6975                 /* GPIO1 must be driven high for eeprom write protect */
6976                 if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT)
6977                         tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
6978                                                GRC_LCLCTRL_GPIO_OUTPUT1);
6979         }
6980         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
6981         udelay(100);
6982
6983         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0);
6984         tp->last_tag = 0;
6985
6986         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6987                 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
6988                 udelay(40);
6989         }
6990
6991         val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
6992                WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
6993                WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
6994                WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
6995                WDMAC_MODE_LNGREAD_ENAB);
6996
6997         /* If statement applies to 5705 and 5750 PCI devices only */
6998         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
6999              tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
7000             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
7001                 if ((tp->tg3_flags & TG3_FLG2_TSO_CAPABLE) &&
7002                     (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
7003                      tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
7004                         /* nothing */
7005                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
7006                            !(tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
7007                            !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
7008                         val |= WDMAC_MODE_RX_ACCEL;
7009                 }
7010         }
7011
7012         /* Enable host coalescing bug fix */
7013         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755) ||
7014             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787) ||
7015             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784) ||
7016             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761))
7017                 val |= (1 << 29);
7018
7019         tw32_f(WDMAC_MODE, val);
7020         udelay(40);
7021
7022         if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) {
7023                 u16 pcix_cmd;
7024
7025                 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
7026                                      &pcix_cmd);
7027                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) {
7028                         pcix_cmd &= ~PCI_X_CMD_MAX_READ;
7029                         pcix_cmd |= PCI_X_CMD_READ_2K;
7030                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
7031                         pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ);
7032                         pcix_cmd |= PCI_X_CMD_READ_2K;
7033                 }
7034                 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
7035                                       pcix_cmd);
7036         }
7037
7038         tw32_f(RDMAC_MODE, rdmac_mode);
7039         udelay(40);
7040
7041         tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
7042         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
7043                 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
7044
7045         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
7046                 tw32(SNDDATAC_MODE,
7047                      SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY);
7048         else
7049                 tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
7050
7051         tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
7052         tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
7053         tw32(RCVDBDI_MODE, RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ);
7054         tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
7055         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
7056                 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
7057         tw32(SNDBDI_MODE, SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE);
7058         tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
7059
7060         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
7061                 err = tg3_load_5701_a0_firmware_fix(tp);
7062                 if (err)
7063                         return err;
7064         }
7065
7066         if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
7067                 err = tg3_load_tso_firmware(tp);
7068                 if (err)
7069                         return err;
7070         }
7071
7072         tp->tx_mode = TX_MODE_ENABLE;
7073         tw32_f(MAC_TX_MODE, tp->tx_mode);
7074         udelay(100);
7075
7076         tp->rx_mode = RX_MODE_ENABLE;
7077         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
7078             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
7079                 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
7080
7081         tw32_f(MAC_RX_MODE, tp->rx_mode);
7082         udelay(10);
7083
7084         if (tp->link_config.phy_is_low_power) {
7085                 tp->link_config.phy_is_low_power = 0;
7086                 tp->link_config.speed = tp->link_config.orig_speed;
7087                 tp->link_config.duplex = tp->link_config.orig_duplex;
7088                 tp->link_config.autoneg = tp->link_config.orig_autoneg;
7089         }
7090
7091         tp->mi_mode = MAC_MI_MODE_BASE;
7092         tw32_f(MAC_MI_MODE, tp->mi_mode);
7093         udelay(80);
7094
7095         tw32(MAC_LED_CTRL, tp->led_ctrl);
7096
7097         tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
7098         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
7099                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
7100                 udelay(10);
7101         }
7102         tw32_f(MAC_RX_MODE, tp->rx_mode);
7103         udelay(10);
7104
7105         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
7106                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) &&
7107                         !(tp->tg3_flags2 & TG3_FLG2_SERDES_PREEMPHASIS)) {
7108                         /* Set drive transmission level to 1.2V  */
7109                         /* only if the signal pre-emphasis bit is not set  */
7110                         val = tr32(MAC_SERDES_CFG);
7111                         val &= 0xfffff000;
7112                         val |= 0x880;
7113                         tw32(MAC_SERDES_CFG, val);
7114                 }
7115                 if (tp->pci_chip_rev_id == CHIPREV_ID_5703_A1)
7116                         tw32(MAC_SERDES_CFG, 0x616000);
7117         }
7118
7119         /* Prevent chip from dropping frames when flow control
7120          * is enabled.
7121          */
7122         tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, 2);
7123
7124         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
7125             (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
7126                 /* Use hardware link auto-negotiation */
7127                 tp->tg3_flags2 |= TG3_FLG2_HW_AUTONEG;
7128         }
7129
7130         if ((tp->tg3_flags2 & TG3_FLG2_MII_SERDES) &&
7131             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)) {
7132                 u32 tmp;
7133
7134                 tmp = tr32(SERDES_RX_CTRL);
7135                 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
7136                 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
7137                 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
7138                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
7139         }
7140
7141         err = tg3_setup_phy(tp, 0);
7142         if (err)
7143                 return err;
7144
7145         if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
7146             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906) {
7147                 u32 tmp;
7148
7149                 /* Clear CRC stats. */
7150                 if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
7151                         tg3_writephy(tp, MII_TG3_TEST1,
7152                                      tmp | MII_TG3_TEST1_CRC_EN);
7153                         tg3_readphy(tp, 0x14, &tmp);
7154                 }
7155         }
7156
7157         __tg3_set_rx_mode(tp->dev);
7158
7159         /* Initialize receive rules. */
7160         tw32(MAC_RCV_RULE_0,  0xc2000000 & RCV_RULE_DISABLE_MASK);
7161         tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
7162         tw32(MAC_RCV_RULE_1,  0x86000004 & RCV_RULE_DISABLE_MASK);
7163         tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
7164
7165         if ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
7166             !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
7167                 limit = 8;
7168         else
7169                 limit = 16;
7170         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF)
7171                 limit -= 4;
7172         switch (limit) {
7173         case 16:
7174                 tw32(MAC_RCV_RULE_15,  0); tw32(MAC_RCV_VALUE_15,  0);
7175         case 15:
7176                 tw32(MAC_RCV_RULE_14,  0); tw32(MAC_RCV_VALUE_14,  0);
7177         case 14:
7178                 tw32(MAC_RCV_RULE_13,  0); tw32(MAC_RCV_VALUE_13,  0);
7179         case 13:
7180                 tw32(MAC_RCV_RULE_12,  0); tw32(MAC_RCV_VALUE_12,  0);
7181         case 12:
7182                 tw32(MAC_RCV_RULE_11,  0); tw32(MAC_RCV_VALUE_11,  0);
7183         case 11:
7184                 tw32(MAC_RCV_RULE_10,  0); tw32(MAC_RCV_VALUE_10,  0);
7185         case 10:
7186                 tw32(MAC_RCV_RULE_9,  0); tw32(MAC_RCV_VALUE_9,  0);
7187         case 9:
7188                 tw32(MAC_RCV_RULE_8,  0); tw32(MAC_RCV_VALUE_8,  0);
7189         case 8:
7190                 tw32(MAC_RCV_RULE_7,  0); tw32(MAC_RCV_VALUE_7,  0);
7191         case 7:
7192                 tw32(MAC_RCV_RULE_6,  0); tw32(MAC_RCV_VALUE_6,  0);
7193         case 6:
7194                 tw32(MAC_RCV_RULE_5,  0); tw32(MAC_RCV_VALUE_5,  0);
7195         case 5:
7196                 tw32(MAC_RCV_RULE_4,  0); tw32(MAC_RCV_VALUE_4,  0);
7197         case 4:
7198                 /* tw32(MAC_RCV_RULE_3,  0); tw32(MAC_RCV_VALUE_3,  0); */
7199         case 3:
7200                 /* tw32(MAC_RCV_RULE_2,  0); tw32(MAC_RCV_VALUE_2,  0); */
7201         case 2:
7202         case 1:
7203
7204         default:
7205                 break;
7206         };
7207
7208         if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
7209                 /* Write our heartbeat update interval to APE. */
7210                 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS,
7211                                 APE_HOST_HEARTBEAT_INT_DISABLE);
7212
7213         tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
7214
7215         return 0;
7216 }
7217
7218 /* Called at device open time to get the chip ready for
7219  * packet processing.  Invoked with tp->lock held.
7220  */
7221 static int tg3_init_hw(struct tg3 *tp, int reset_phy)
7222 {
7223         int err;
7224
7225         /* Force the chip into D0. */
7226         err = tg3_set_power_state(tp, PCI_D0);
7227         if (err)
7228                 goto out;
7229
7230         tg3_switch_clocks(tp);
7231
7232         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
7233
7234         err = tg3_reset_hw(tp, reset_phy);
7235
7236 out:
7237         return err;
7238 }
7239
7240 #define TG3_STAT_ADD32(PSTAT, REG) \
7241 do {    u32 __val = tr32(REG); \
7242         (PSTAT)->low += __val; \
7243         if ((PSTAT)->low < __val) \
7244                 (PSTAT)->high += 1; \
7245 } while (0)
7246
7247 static void tg3_periodic_fetch_stats(struct tg3 *tp)
7248 {
7249         struct tg3_hw_stats *sp = tp->hw_stats;
7250
7251         if (!netif_carrier_ok(tp->dev))
7252                 return;
7253
7254         TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
7255         TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
7256         TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
7257         TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
7258         TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
7259         TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
7260         TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
7261         TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
7262         TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
7263         TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
7264         TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
7265         TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
7266         TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
7267
7268         TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
7269         TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
7270         TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
7271         TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
7272         TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
7273         TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
7274         TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
7275         TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
7276         TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
7277         TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
7278         TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
7279         TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
7280         TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
7281         TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
7282
7283         TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
7284         TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
7285         TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
7286 }
7287
7288 static void tg3_timer(unsigned long __opaque)
7289 {
7290         struct tg3 *tp = (struct tg3 *) __opaque;
7291
7292         if (tp->irq_sync)
7293                 goto restart_timer;
7294
7295         spin_lock(&tp->lock);
7296
7297         if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) {
7298                 /* All of this garbage is because when using non-tagged
7299                  * IRQ status the mailbox/status_block protocol the chip
7300                  * uses with the cpu is race prone.
7301                  */
7302                 if (tp->hw_status->status & SD_STATUS_UPDATED) {
7303                         tw32(GRC_LOCAL_CTRL,
7304                              tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
7305                 } else {
7306                         tw32(HOSTCC_MODE, tp->coalesce_mode |
7307                              (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
7308                 }
7309
7310                 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
7311                         tp->tg3_flags2 |= TG3_FLG2_RESTART_TIMER;
7312                         spin_unlock(&tp->lock);
7313                         schedule_work(&tp->reset_task);
7314                         return;
7315                 }
7316         }
7317
7318         /* This part only runs once per second. */
7319         if (!--tp->timer_counter) {
7320                 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
7321                         tg3_periodic_fetch_stats(tp);
7322
7323                 if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
7324                         u32 mac_stat;
7325                         int phy_event;
7326
7327                         mac_stat = tr32(MAC_STATUS);
7328
7329                         phy_event = 0;
7330                         if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) {
7331                                 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
7332                                         phy_event = 1;
7333                         } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
7334                                 phy_event = 1;
7335
7336                         if (phy_event)
7337                                 tg3_setup_phy(tp, 0);
7338                 } else if (tp->tg3_flags & TG3_FLAG_POLL_SERDES) {
7339                         u32 mac_stat = tr32(MAC_STATUS);
7340                         int need_setup = 0;
7341
7342                         if (netif_carrier_ok(tp->dev) &&
7343                             (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
7344                                 need_setup = 1;
7345                         }
7346                         if (! netif_carrier_ok(tp->dev) &&
7347                             (mac_stat & (MAC_STATUS_PCS_SYNCED |
7348                                          MAC_STATUS_SIGNAL_DET))) {
7349                                 need_setup = 1;
7350                         }
7351                         if (need_setup) {
7352                                 if (!tp->serdes_counter) {
7353                                         tw32_f(MAC_MODE,
7354                                              (tp->mac_mode &
7355                                               ~MAC_MODE_PORT_MODE_MASK));
7356                                         udelay(40);
7357                                         tw32_f(MAC_MODE, tp->mac_mode);
7358                                         udelay(40);
7359                                 }
7360                                 tg3_setup_phy(tp, 0);
7361                         }
7362                 } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
7363                         tg3_serdes_parallel_detect(tp);
7364
7365                 tp->timer_counter = tp->timer_multiplier;
7366         }
7367
7368         /* Heartbeat is only sent once every 2 seconds.
7369          *
7370          * The heartbeat is to tell the ASF firmware that the host
7371          * driver is still alive.  In the event that the OS crashes,
7372          * ASF needs to reset the hardware to free up the FIFO space
7373          * that may be filled with rx packets destined for the host.
7374          * If the FIFO is full, ASF will no longer function properly.
7375          *
7376          * Unintended resets have been reported on real time kernels
7377          * where the timer doesn't run on time.  Netpoll will also have
7378          * same problem.
7379          *
7380          * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
7381          * to check the ring condition when the heartbeat is expiring
7382          * before doing the reset.  This will prevent most unintended
7383          * resets.
7384          */
7385         if (!--tp->asf_counter) {
7386                 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
7387                         u32 val;
7388
7389                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
7390                                       FWCMD_NICDRV_ALIVE3);
7391                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
7392                         /* 5 seconds timeout */
7393                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, 5);
7394                         val = tr32(GRC_RX_CPU_EVENT);
7395                         val |= (1 << 14);
7396                         tw32(GRC_RX_CPU_EVENT, val);
7397                 }
7398                 tp->asf_counter = tp->asf_multiplier;
7399         }
7400
7401         spin_unlock(&tp->lock);
7402
7403 restart_timer:
7404         tp->timer.expires = jiffies + tp->timer_offset;
7405         add_timer(&tp->timer);
7406 }
7407
7408 static int tg3_request_irq(struct tg3 *tp)
7409 {
7410         irq_handler_t fn;
7411         unsigned long flags;
7412         struct net_device *dev = tp->dev;
7413
7414         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7415                 fn = tg3_msi;
7416                 if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI)
7417                         fn = tg3_msi_1shot;
7418                 flags = IRQF_SAMPLE_RANDOM;
7419         } else {
7420                 fn = tg3_interrupt;
7421                 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
7422                         fn = tg3_interrupt_tagged;
7423                 flags = IRQF_SHARED | IRQF_SAMPLE_RANDOM;
7424         }
7425         return (request_irq(tp->pdev->irq, fn, flags, dev->name, dev));
7426 }
7427
7428 static int tg3_test_interrupt(struct tg3 *tp)
7429 {
7430         struct net_device *dev = tp->dev;
7431         int err, i, intr_ok = 0;
7432
7433         if (!netif_running(dev))
7434                 return -ENODEV;
7435
7436         tg3_disable_ints(tp);
7437
7438         free_irq(tp->pdev->irq, dev);
7439
7440         err = request_irq(tp->pdev->irq, tg3_test_isr,
7441                           IRQF_SHARED | IRQF_SAMPLE_RANDOM, dev->name, dev);
7442         if (err)
7443                 return err;
7444
7445         tp->hw_status->status &= ~SD_STATUS_UPDATED;
7446         tg3_enable_ints(tp);
7447
7448         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
7449                HOSTCC_MODE_NOW);
7450
7451         for (i = 0; i < 5; i++) {
7452                 u32 int_mbox, misc_host_ctrl;
7453
7454                 int_mbox = tr32_mailbox(MAILBOX_INTERRUPT_0 +
7455                                         TG3_64BIT_REG_LOW);
7456                 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
7457
7458                 if ((int_mbox != 0) ||
7459                     (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) {
7460                         intr_ok = 1;
7461                         break;
7462                 }
7463
7464                 msleep(10);
7465         }
7466
7467         tg3_disable_ints(tp);
7468
7469         free_irq(tp->pdev->irq, dev);
7470
7471         err = tg3_request_irq(tp);
7472
7473         if (err)
7474                 return err;
7475
7476         if (intr_ok)
7477                 return 0;
7478
7479         return -EIO;
7480 }
7481
7482 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
7483  * successfully restored
7484  */
7485 static int tg3_test_msi(struct tg3 *tp)
7486 {
7487         struct net_device *dev = tp->dev;
7488         int err;
7489         u16 pci_cmd;
7490
7491         if (!(tp->tg3_flags2 & TG3_FLG2_USING_MSI))
7492                 return 0;
7493
7494         /* Turn off SERR reporting in case MSI terminates with Master
7495          * Abort.
7496          */
7497         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
7498         pci_write_config_word(tp->pdev, PCI_COMMAND,
7499                               pci_cmd & ~PCI_COMMAND_SERR);
7500
7501         err = tg3_test_interrupt(tp);
7502
7503         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
7504
7505         if (!err)
7506                 return 0;
7507
7508         /* other failures */
7509         if (err != -EIO)
7510                 return err;
7511
7512         /* MSI test failed, go back to INTx mode */
7513         printk(KERN_WARNING PFX "%s: No interrupt was generated using MSI, "
7514                "switching to INTx mode. Please report this failure to "
7515                "the PCI maintainer and include system chipset information.\n",
7516                        tp->dev->name);
7517
7518         free_irq(tp->pdev->irq, dev);
7519         pci_disable_msi(tp->pdev);
7520
7521         tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
7522
7523         err = tg3_request_irq(tp);
7524         if (err)
7525                 return err;
7526
7527         /* Need to reset the chip because the MSI cycle may have terminated
7528          * with Master Abort.
7529          */
7530         tg3_full_lock(tp, 1);
7531
7532         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
7533         err = tg3_init_hw(tp, 1);
7534
7535         tg3_full_unlock(tp);
7536
7537         if (err)
7538                 free_irq(tp->pdev->irq, dev);
7539
7540         return err;
7541 }
7542
7543 static int tg3_open(struct net_device *dev)
7544 {
7545         struct tg3 *tp = netdev_priv(dev);
7546         int err;
7547
7548         netif_carrier_off(tp->dev);
7549
7550         tg3_full_lock(tp, 0);
7551
7552         err = tg3_set_power_state(tp, PCI_D0);
7553         if (err) {
7554                 tg3_full_unlock(tp);
7555                 return err;
7556         }
7557
7558         tg3_disable_ints(tp);
7559         tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
7560
7561         tg3_full_unlock(tp);
7562
7563         /* The placement of this call is tied
7564          * to the setup and use of Host TX descriptors.
7565          */
7566         err = tg3_alloc_consistent(tp);
7567         if (err)
7568                 return err;
7569
7570         if (tp->tg3_flags & TG3_FLAG_SUPPORT_MSI) {
7571                 /* All MSI supporting chips should support tagged
7572                  * status.  Assert that this is the case.
7573                  */
7574                 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) {
7575                         printk(KERN_WARNING PFX "%s: MSI without TAGGED? "
7576                                "Not using MSI.\n", tp->dev->name);
7577                 } else if (pci_enable_msi(tp->pdev) == 0) {
7578                         u32 msi_mode;
7579
7580                         msi_mode = tr32(MSGINT_MODE);
7581                         tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
7582                         tp->tg3_flags2 |= TG3_FLG2_USING_MSI;
7583                 }
7584         }
7585         err = tg3_request_irq(tp);
7586
7587         if (err) {
7588                 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7589                         pci_disable_msi(tp->pdev);
7590                         tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
7591                 }
7592                 tg3_free_consistent(tp);
7593                 return err;
7594         }
7595
7596         napi_enable(&tp->napi);
7597
7598         tg3_full_lock(tp, 0);
7599
7600         err = tg3_init_hw(tp, 1);
7601         if (err) {
7602                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
7603                 tg3_free_rings(tp);
7604         } else {
7605                 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
7606                         tp->timer_offset = HZ;
7607                 else
7608                         tp->timer_offset = HZ / 10;
7609
7610                 BUG_ON(tp->timer_offset > HZ);
7611                 tp->timer_counter = tp->timer_multiplier =
7612                         (HZ / tp->timer_offset);
7613                 tp->asf_counter = tp->asf_multiplier =
7614                         ((HZ / tp->timer_offset) * 2);
7615
7616                 init_timer(&tp->timer);
7617                 tp->timer.expires = jiffies + tp->timer_offset;
7618                 tp->timer.data = (unsigned long) tp;
7619                 tp->timer.function = tg3_timer;
7620         }
7621
7622         tg3_full_unlock(tp);
7623
7624         if (err) {
7625                 napi_disable(&tp->napi);
7626                 free_irq(tp->pdev->irq, dev);
7627                 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7628                         pci_disable_msi(tp->pdev);
7629                         tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
7630                 }
7631                 tg3_free_consistent(tp);
7632                 return err;
7633         }
7634
7635         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7636                 err = tg3_test_msi(tp);
7637
7638                 if (err) {
7639                         tg3_full_lock(tp, 0);
7640
7641                         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7642                                 pci_disable_msi(tp->pdev);
7643                                 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
7644                         }
7645                         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
7646                         tg3_free_rings(tp);
7647                         tg3_free_consistent(tp);
7648
7649                         tg3_full_unlock(tp);
7650
7651                         napi_disable(&tp->napi);
7652
7653                         return err;
7654                 }
7655
7656                 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7657                         if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI) {
7658                                 u32 val = tr32(PCIE_TRANSACTION_CFG);
7659
7660                                 tw32(PCIE_TRANSACTION_CFG,
7661                                      val | PCIE_TRANS_CFG_1SHOT_MSI);
7662                         }
7663                 }
7664         }
7665
7666         tg3_full_lock(tp, 0);
7667
7668         add_timer(&tp->timer);
7669         tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
7670         tg3_enable_ints(tp);
7671
7672         tg3_full_unlock(tp);
7673
7674         netif_start_queue(dev);
7675
7676         return 0;
7677 }
7678
7679 #if 0
7680 /*static*/ void tg3_dump_state(struct tg3 *tp)
7681 {
7682         u32 val32, val32_2, val32_3, val32_4, val32_5;
7683         u16 val16;
7684         int i;
7685
7686         pci_read_config_word(tp->pdev, PCI_STATUS, &val16);
7687         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE, &val32);
7688         printk("DEBUG: PCI status [%04x] TG3PCI state[%08x]\n",
7689                val16, val32);
7690
7691         /* MAC block */
7692         printk("DEBUG: MAC_MODE[%08x] MAC_STATUS[%08x]\n",
7693                tr32(MAC_MODE), tr32(MAC_STATUS));
7694         printk("       MAC_EVENT[%08x] MAC_LED_CTRL[%08x]\n",
7695                tr32(MAC_EVENT), tr32(MAC_LED_CTRL));
7696         printk("DEBUG: MAC_TX_MODE[%08x] MAC_TX_STATUS[%08x]\n",
7697                tr32(MAC_TX_MODE), tr32(MAC_TX_STATUS));
7698         printk("       MAC_RX_MODE[%08x] MAC_RX_STATUS[%08x]\n",
7699                tr32(MAC_RX_MODE), tr32(MAC_RX_STATUS));
7700
7701         /* Send data initiator control block */
7702         printk("DEBUG: SNDDATAI_MODE[%08x] SNDDATAI_STATUS[%08x]\n",
7703                tr32(SNDDATAI_MODE), tr32(SNDDATAI_STATUS));
7704         printk("       SNDDATAI_STATSCTRL[%08x]\n",
7705                tr32(SNDDATAI_STATSCTRL));
7706
7707         /* Send data completion control block */
7708         printk("DEBUG: SNDDATAC_MODE[%08x]\n", tr32(SNDDATAC_MODE));
7709
7710         /* Send BD ring selector block */
7711         printk("DEBUG: SNDBDS_MODE[%08x] SNDBDS_STATUS[%08x]\n",
7712                tr32(SNDBDS_MODE), tr32(SNDBDS_STATUS));
7713
7714         /* Send BD initiator control block */
7715         printk("DEBUG: SNDBDI_MODE[%08x] SNDBDI_STATUS[%08x]\n",
7716                tr32(SNDBDI_MODE), tr32(SNDBDI_STATUS));
7717
7718         /* Send BD completion control block */
7719         printk("DEBUG: SNDBDC_MODE[%08x]\n", tr32(SNDBDC_MODE));
7720
7721         /* Receive list placement control block */
7722         printk("DEBUG: RCVLPC_MODE[%08x] RCVLPC_STATUS[%08x]\n",
7723                tr32(RCVLPC_MODE), tr32(RCVLPC_STATUS));
7724         printk("       RCVLPC_STATSCTRL[%08x]\n",
7725                tr32(RCVLPC_STATSCTRL));
7726
7727         /* Receive data and receive BD initiator control block */
7728         printk("DEBUG: RCVDBDI_MODE[%08x] RCVDBDI_STATUS[%08x]\n",
7729                tr32(RCVDBDI_MODE), tr32(RCVDBDI_STATUS));
7730
7731         /* Receive data completion control block */
7732         printk("DEBUG: RCVDCC_MODE[%08x]\n",
7733                tr32(RCVDCC_MODE));
7734
7735         /* Receive BD initiator control block */
7736         printk("DEBUG: RCVBDI_MODE[%08x] RCVBDI_STATUS[%08x]\n",
7737                tr32(RCVBDI_MODE), tr32(RCVBDI_STATUS));
7738
7739         /* Receive BD completion control block */
7740         printk("DEBUG: RCVCC_MODE[%08x] RCVCC_STATUS[%08x]\n",
7741                tr32(RCVCC_MODE), tr32(RCVCC_STATUS));
7742
7743         /* Receive list selector control block */
7744         printk("DEBUG: RCVLSC_MODE[%08x] RCVLSC_STATUS[%08x]\n",
7745                tr32(RCVLSC_MODE), tr32(RCVLSC_STATUS));
7746
7747         /* Mbuf cluster free block */
7748         printk("DEBUG: MBFREE_MODE[%08x] MBFREE_STATUS[%08x]\n",
7749                tr32(MBFREE_MODE), tr32(MBFREE_STATUS));
7750
7751         /* Host coalescing control block */
7752         printk("DEBUG: HOSTCC_MODE[%08x] HOSTCC_STATUS[%08x]\n",
7753                tr32(HOSTCC_MODE), tr32(HOSTCC_STATUS));
7754         printk("DEBUG: HOSTCC_STATS_BLK_HOST_ADDR[%08x%08x]\n",
7755                tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
7756                tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
7757         printk("DEBUG: HOSTCC_STATUS_BLK_HOST_ADDR[%08x%08x]\n",
7758                tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
7759                tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
7760         printk("DEBUG: HOSTCC_STATS_BLK_NIC_ADDR[%08x]\n",
7761                tr32(HOSTCC_STATS_BLK_NIC_ADDR));
7762         printk("DEBUG: HOSTCC_STATUS_BLK_NIC_ADDR[%08x]\n",
7763                tr32(HOSTCC_STATUS_BLK_NIC_ADDR));
7764
7765         /* Memory arbiter control block */
7766         printk("DEBUG: MEMARB_MODE[%08x] MEMARB_STATUS[%08x]\n",
7767                tr32(MEMARB_MODE), tr32(MEMARB_STATUS));
7768
7769         /* Buffer manager control block */
7770         printk("DEBUG: BUFMGR_MODE[%08x] BUFMGR_STATUS[%08x]\n",
7771                tr32(BUFMGR_MODE), tr32(BUFMGR_STATUS));
7772         printk("DEBUG: BUFMGR_MB_POOL_ADDR[%08x] BUFMGR_MB_POOL_SIZE[%08x]\n",
7773                tr32(BUFMGR_MB_POOL_ADDR), tr32(BUFMGR_MB_POOL_SIZE));
7774         printk("DEBUG: BUFMGR_DMA_DESC_POOL_ADDR[%08x] "
7775                "BUFMGR_DMA_DESC_POOL_SIZE[%08x]\n",
7776                tr32(BUFMGR_DMA_DESC_POOL_ADDR),
7777                tr32(BUFMGR_DMA_DESC_POOL_SIZE));
7778
7779         /* Read DMA control block */
7780         printk("DEBUG: RDMAC_MODE[%08x] RDMAC_STATUS[%08x]\n",
7781                tr32(RDMAC_MODE), tr32(RDMAC_STATUS));
7782
7783         /* Write DMA control block */
7784         printk("DEBUG: WDMAC_MODE[%08x] WDMAC_STATUS[%08x]\n",
7785                tr32(WDMAC_MODE), tr32(WDMAC_STATUS));
7786
7787         /* DMA completion block */
7788         printk("DEBUG: DMAC_MODE[%08x]\n",
7789                tr32(DMAC_MODE));
7790
7791         /* GRC block */
7792         printk("DEBUG: GRC_MODE[%08x] GRC_MISC_CFG[%08x]\n",
7793                tr32(GRC_MODE), tr32(GRC_MISC_CFG));
7794         printk("DEBUG: GRC_LOCAL_CTRL[%08x]\n",
7795                tr32(GRC_LOCAL_CTRL));
7796
7797         /* TG3_BDINFOs */
7798         printk("DEBUG: RCVDBDI_JUMBO_BD[%08x%08x:%08x:%08x]\n",
7799                tr32(RCVDBDI_JUMBO_BD + 0x0),
7800                tr32(RCVDBDI_JUMBO_BD + 0x4),
7801                tr32(RCVDBDI_JUMBO_BD + 0x8),
7802                tr32(RCVDBDI_JUMBO_BD + 0xc));
7803         printk("DEBUG: RCVDBDI_STD_BD[%08x%08x:%08x:%08x]\n",
7804                tr32(RCVDBDI_STD_BD + 0x0),
7805                tr32(RCVDBDI_STD_BD + 0x4),
7806                tr32(RCVDBDI_STD_BD + 0x8),
7807                tr32(RCVDBDI_STD_BD + 0xc));
7808         printk("DEBUG: RCVDBDI_MINI_BD[%08x%08x:%08x:%08x]\n",
7809                tr32(RCVDBDI_MINI_BD + 0x0),
7810                tr32(RCVDBDI_MINI_BD + 0x4),
7811                tr32(RCVDBDI_MINI_BD + 0x8),
7812                tr32(RCVDBDI_MINI_BD + 0xc));
7813
7814         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x0, &val32);
7815         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x4, &val32_2);
7816         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x8, &val32_3);
7817         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0xc, &val32_4);
7818         printk("DEBUG: SRAM_SEND_RCB_0[%08x%08x:%08x:%08x]\n",
7819                val32, val32_2, val32_3, val32_4);
7820
7821         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x0, &val32);
7822         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x4, &val32_2);
7823         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x8, &val32_3);
7824         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0xc, &val32_4);
7825         printk("DEBUG: SRAM_RCV_RET_RCB_0[%08x%08x:%08x:%08x]\n",
7826                val32, val32_2, val32_3, val32_4);
7827
7828         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x0, &val32);
7829         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x4, &val32_2);
7830         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x8, &val32_3);
7831         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0xc, &val32_4);
7832         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x10, &val32_5);
7833         printk("DEBUG: SRAM_STATUS_BLK[%08x:%08x:%08x:%08x:%08x]\n",
7834                val32, val32_2, val32_3, val32_4, val32_5);
7835
7836         /* SW status block */
7837         printk("DEBUG: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
7838                tp->hw_status->status,
7839                tp->hw_status->status_tag,
7840                tp->hw_status->rx_jumbo_consumer,
7841                tp->hw_status->rx_consumer,
7842                tp->hw_status->rx_mini_consumer,
7843                tp->hw_status->idx[0].rx_producer,
7844                tp->hw_status->idx[0].tx_consumer);
7845
7846         /* SW statistics block */
7847         printk("DEBUG: Host statistics block [%08x:%08x:%08x:%08x]\n",
7848                ((u32 *)tp->hw_stats)[0],
7849                ((u32 *)tp->hw_stats)[1],
7850                ((u32 *)tp->hw_stats)[2],
7851                ((u32 *)tp->hw_stats)[3]);
7852
7853         /* Mailboxes */
7854         printk("DEBUG: SNDHOST_PROD[%08x%08x] SNDNIC_PROD[%08x%08x]\n",
7855                tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + 0x0),
7856                tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + 0x4),
7857                tr32_mailbox(MAILBOX_SNDNIC_PROD_IDX_0 + 0x0),
7858                tr32_mailbox(MAILBOX_SNDNIC_PROD_IDX_0 + 0x4));
7859
7860         /* NIC side send descriptors. */
7861         for (i = 0; i < 6; i++) {
7862                 unsigned long txd;
7863
7864                 txd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_TX_BUFFER_DESC
7865                         + (i * sizeof(struct tg3_tx_buffer_desc));
7866                 printk("DEBUG: NIC TXD(%d)[%08x:%08x:%08x:%08x]\n",
7867                        i,
7868                        readl(txd + 0x0), readl(txd + 0x4),
7869                        readl(txd + 0x8), readl(txd + 0xc));
7870         }
7871
7872         /* NIC side RX descriptors. */
7873         for (i = 0; i < 6; i++) {
7874                 unsigned long rxd;
7875
7876                 rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_BUFFER_DESC
7877                         + (i * sizeof(struct tg3_rx_buffer_desc));
7878                 printk("DEBUG: NIC RXD_STD(%d)[0][%08x:%08x:%08x:%08x]\n",
7879                        i,
7880                        readl(rxd + 0x0), readl(rxd + 0x4),
7881                        readl(rxd + 0x8), readl(rxd + 0xc));
7882                 rxd += (4 * sizeof(u32));
7883                 printk("DEBUG: NIC RXD_STD(%d)[1][%08x:%08x:%08x:%08x]\n",
7884                        i,
7885                        readl(rxd + 0x0), readl(rxd + 0x4),
7886                        readl(rxd + 0x8), readl(rxd + 0xc));
7887         }
7888
7889         for (i = 0; i < 6; i++) {
7890                 unsigned long rxd;
7891
7892                 rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_JUMBO_BUFFER_DESC
7893                         + (i * sizeof(struct tg3_rx_buffer_desc));
7894                 printk("DEBUG: NIC RXD_JUMBO(%d)[0][%08x:%08x:%08x:%08x]\n",
7895                        i,
7896                        readl(rxd + 0x0), readl(rxd + 0x4),
7897                        readl(rxd + 0x8), readl(rxd + 0xc));
7898                 rxd += (4 * sizeof(u32));
7899                 printk("DEBUG: NIC RXD_JUMBO(%d)[1][%08x:%08x:%08x:%08x]\n",
7900                        i,
7901                        readl(rxd + 0x0), readl(rxd + 0x4),
7902                        readl(rxd + 0x8), readl(rxd + 0xc));
7903         }
7904 }
7905 #endif
7906
7907 static struct net_device_stats *tg3_get_stats(struct net_device *);
7908 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *);
7909
7910 static int tg3_close(struct net_device *dev)
7911 {
7912         struct tg3 *tp = netdev_priv(dev);
7913
7914         napi_disable(&tp->napi);
7915         cancel_work_sync(&tp->reset_task);
7916
7917         netif_stop_queue(dev);
7918
7919         del_timer_sync(&tp->timer);
7920
7921         tg3_full_lock(tp, 1);
7922 #if 0
7923         tg3_dump_state(tp);
7924 #endif
7925
7926         tg3_disable_ints(tp);
7927
7928         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
7929         tg3_free_rings(tp);
7930         tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
7931
7932         tg3_full_unlock(tp);
7933
7934         free_irq(tp->pdev->irq, dev);
7935         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7936                 pci_disable_msi(tp->pdev);
7937                 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
7938         }
7939
7940         memcpy(&tp->net_stats_prev, tg3_get_stats(tp->dev),
7941                sizeof(tp->net_stats_prev));
7942         memcpy(&tp->estats_prev, tg3_get_estats(tp),
7943                sizeof(tp->estats_prev));
7944
7945         tg3_free_consistent(tp);
7946
7947         tg3_set_power_state(tp, PCI_D3hot);
7948
7949         netif_carrier_off(tp->dev);
7950
7951         return 0;
7952 }
7953
7954 static inline unsigned long get_stat64(tg3_stat64_t *val)
7955 {
7956         unsigned long ret;
7957
7958 #if (BITS_PER_LONG == 32)
7959         ret = val->low;
7960 #else
7961         ret = ((u64)val->high << 32) | ((u64)val->low);
7962 #endif
7963         return ret;
7964 }
7965
7966 static unsigned long calc_crc_errors(struct tg3 *tp)
7967 {
7968         struct tg3_hw_stats *hw_stats = tp->hw_stats;
7969
7970         if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
7971             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
7972              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
7973                 u32 val;
7974
7975                 spin_lock_bh(&tp->lock);
7976                 if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) {
7977                         tg3_writephy(tp, MII_TG3_TEST1,
7978                                      val | MII_TG3_TEST1_CRC_EN);
7979                         tg3_readphy(tp, 0x14, &val);
7980                 } else
7981                         val = 0;
7982                 spin_unlock_bh(&tp->lock);
7983
7984                 tp->phy_crc_errors += val;
7985
7986                 return tp->phy_crc_errors;
7987         }
7988
7989         return get_stat64(&hw_stats->rx_fcs_errors);
7990 }
7991
7992 #define ESTAT_ADD(member) \
7993         estats->member =        old_estats->member + \
7994                                 get_stat64(&hw_stats->member)
7995
7996 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *tp)
7997 {
7998         struct tg3_ethtool_stats *estats = &tp->estats;
7999         struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
8000         struct tg3_hw_stats *hw_stats = tp->hw_stats;
8001
8002         if (!hw_stats)
8003                 return old_estats;
8004
8005         ESTAT_ADD(rx_octets);
8006         ESTAT_ADD(rx_fragments);
8007         ESTAT_ADD(rx_ucast_packets);
8008         ESTAT_ADD(rx_mcast_packets);
8009         ESTAT_ADD(rx_bcast_packets);
8010         ESTAT_ADD(rx_fcs_errors);
8011         ESTAT_ADD(rx_align_errors);
8012         ESTAT_ADD(rx_xon_pause_rcvd);
8013         ESTAT_ADD(rx_xoff_pause_rcvd);
8014         ESTAT_ADD(rx_mac_ctrl_rcvd);
8015         ESTAT_ADD(rx_xoff_entered);
8016         ESTAT_ADD(rx_frame_too_long_errors);
8017         ESTAT_ADD(rx_jabbers);
8018         ESTAT_ADD(rx_undersize_packets);
8019         ESTAT_ADD(rx_in_length_errors);
8020         ESTAT_ADD(rx_out_length_errors);
8021         ESTAT_ADD(rx_64_or_less_octet_packets);
8022         ESTAT_ADD(rx_65_to_127_octet_packets);
8023         ESTAT_ADD(rx_128_to_255_octet_packets);
8024         ESTAT_ADD(rx_256_to_511_octet_packets);
8025         ESTAT_ADD(rx_512_to_1023_octet_packets);
8026         ESTAT_ADD(rx_1024_to_1522_octet_packets);
8027         ESTAT_ADD(rx_1523_to_2047_octet_packets);
8028         ESTAT_ADD(rx_2048_to_4095_octet_packets);
8029         ESTAT_ADD(rx_4096_to_8191_octet_packets);
8030         ESTAT_ADD(rx_8192_to_9022_octet_packets);
8031
8032         ESTAT_ADD(tx_octets);
8033         ESTAT_ADD(tx_collisions);
8034         ESTAT_ADD(tx_xon_sent);
8035         ESTAT_ADD(tx_xoff_sent);
8036         ESTAT_ADD(tx_flow_control);
8037         ESTAT_ADD(tx_mac_errors);
8038         ESTAT_ADD(tx_single_collisions);
8039         ESTAT_ADD(tx_mult_collisions);
8040         ESTAT_ADD(tx_deferred);
8041         ESTAT_ADD(tx_excessive_collisions);
8042         ESTAT_ADD(tx_late_collisions);
8043         ESTAT_ADD(tx_collide_2times);
8044         ESTAT_ADD(tx_collide_3times);
8045         ESTAT_ADD(tx_collide_4times);
8046         ESTAT_ADD(tx_collide_5times);
8047         ESTAT_ADD(tx_collide_6times);
8048         ESTAT_ADD(tx_collide_7times);
8049         ESTAT_ADD(tx_collide_8times);
8050         ESTAT_ADD(tx_collide_9times);
8051         ESTAT_ADD(tx_collide_10times);
8052         ESTAT_ADD(tx_collide_11times);
8053         ESTAT_ADD(tx_collide_12times);
8054         ESTAT_ADD(tx_collide_13times);
8055         ESTAT_ADD(tx_collide_14times);
8056         ESTAT_ADD(tx_collide_15times);
8057         ESTAT_ADD(tx_ucast_packets);
8058         ESTAT_ADD(tx_mcast_packets);
8059         ESTAT_ADD(tx_bcast_packets);
8060         ESTAT_ADD(tx_carrier_sense_errors);
8061         ESTAT_ADD(tx_discards);
8062         ESTAT_ADD(tx_errors);
8063
8064         ESTAT_ADD(dma_writeq_full);
8065         ESTAT_ADD(dma_write_prioq_full);
8066         ESTAT_ADD(rxbds_empty);
8067         ESTAT_ADD(rx_discards);
8068         ESTAT_ADD(rx_errors);
8069         ESTAT_ADD(rx_threshold_hit);
8070
8071         ESTAT_ADD(dma_readq_full);
8072         ESTAT_ADD(dma_read_prioq_full);
8073         ESTAT_ADD(tx_comp_queue_full);
8074
8075         ESTAT_ADD(ring_set_send_prod_index);
8076         ESTAT_ADD(ring_status_update);
8077         ESTAT_ADD(nic_irqs);
8078         ESTAT_ADD(nic_avoided_irqs);
8079         ESTAT_ADD(nic_tx_threshold_hit);
8080
8081         return estats;
8082 }
8083
8084 static struct net_device_stats *tg3_get_stats(struct net_device *dev)
8085 {
8086         struct tg3 *tp = netdev_priv(dev);
8087         struct net_device_stats *stats = &tp->net_stats;
8088         struct net_device_stats *old_stats = &tp->net_stats_prev;
8089         struct tg3_hw_stats *hw_stats = tp->hw_stats;
8090
8091         if (!hw_stats)
8092                 return old_stats;
8093
8094         stats->rx_packets = old_stats->rx_packets +
8095                 get_stat64(&hw_stats->rx_ucast_packets) +
8096                 get_stat64(&hw_stats->rx_mcast_packets) +
8097                 get_stat64(&hw_stats->rx_bcast_packets);
8098
8099         stats->tx_packets = old_stats->tx_packets +
8100                 get_stat64(&hw_stats->tx_ucast_packets) +
8101                 get_stat64(&hw_stats->tx_mcast_packets) +
8102                 get_stat64(&hw_stats->tx_bcast_packets);
8103
8104         stats->rx_bytes = old_stats->rx_bytes +
8105                 get_stat64(&hw_stats->rx_octets);
8106         stats->tx_bytes = old_stats->tx_bytes +
8107                 get_stat64(&hw_stats->tx_octets);
8108
8109         stats->rx_errors = old_stats->rx_errors +
8110                 get_stat64(&hw_stats->rx_errors);
8111         stats->tx_errors = old_stats->tx_errors +
8112                 get_stat64(&hw_stats->tx_errors) +
8113                 get_stat64(&hw_stats->tx_mac_errors) +
8114                 get_stat64(&hw_stats->tx_carrier_sense_errors) +
8115                 get_stat64(&hw_stats->tx_discards);
8116
8117         stats->multicast = old_stats->multicast +
8118                 get_stat64(&hw_stats->rx_mcast_packets);
8119         stats->collisions = old_stats->collisions +
8120                 get_stat64(&hw_stats->tx_collisions);
8121
8122         stats->rx_length_errors = old_stats->rx_length_errors +
8123                 get_stat64(&hw_stats->rx_frame_too_long_errors) +
8124                 get_stat64(&hw_stats->rx_undersize_packets);
8125
8126         stats->rx_over_errors = old_stats->rx_over_errors +
8127                 get_stat64(&hw_stats->rxbds_empty);
8128         stats->rx_frame_errors = old_stats->rx_frame_errors +
8129                 get_stat64(&hw_stats->rx_align_errors);
8130         stats->tx_aborted_errors = old_stats->tx_aborted_errors +
8131                 get_stat64(&hw_stats->tx_discards);
8132         stats->tx_carrier_errors = old_stats->tx_carrier_errors +
8133                 get_stat64(&hw_stats->tx_carrier_sense_errors);
8134
8135         stats->rx_crc_errors = old_stats->rx_crc_errors +
8136                 calc_crc_errors(tp);
8137
8138         stats->rx_missed_errors = old_stats->rx_missed_errors +
8139                 get_stat64(&hw_stats->rx_discards);
8140
8141         return stats;
8142 }
8143
8144 static inline u32 calc_crc(unsigned char *buf, int len)
8145 {
8146         u32 reg;
8147         u32 tmp;
8148         int j, k;
8149
8150         reg = 0xffffffff;
8151
8152         for (j = 0; j < len; j++) {
8153                 reg ^= buf[j];
8154
8155                 for (k = 0; k < 8; k++) {
8156                         tmp = reg & 0x01;
8157
8158                         reg >>= 1;
8159
8160                         if (tmp) {
8161                                 reg ^= 0xedb88320;
8162                         }
8163                 }
8164         }
8165
8166         return ~reg;
8167 }
8168
8169 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
8170 {
8171         /* accept or reject all multicast frames */
8172         tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
8173         tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
8174         tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
8175         tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
8176 }
8177
8178 static void __tg3_set_rx_mode(struct net_device *dev)
8179 {
8180         struct tg3 *tp = netdev_priv(dev);
8181         u32 rx_mode;
8182
8183         rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
8184                                   RX_MODE_KEEP_VLAN_TAG);
8185
8186         /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
8187          * flag clear.
8188          */
8189 #if TG3_VLAN_TAG_USED
8190         if (!tp->vlgrp &&
8191             !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
8192                 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
8193 #else
8194         /* By definition, VLAN is disabled always in this
8195          * case.
8196          */
8197         if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
8198                 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
8199 #endif
8200
8201         if (dev->flags & IFF_PROMISC) {
8202                 /* Promiscuous mode. */
8203                 rx_mode |= RX_MODE_PROMISC;
8204         } else if (dev->flags & IFF_ALLMULTI) {
8205                 /* Accept all multicast. */
8206                 tg3_set_multi (tp, 1);
8207         } else if (dev->mc_count < 1) {
8208                 /* Reject all multicast. */
8209                 tg3_set_multi (tp, 0);
8210         } else {
8211                 /* Accept one or more multicast(s). */
8212                 struct dev_mc_list *mclist;
8213                 unsigned int i;
8214                 u32 mc_filter[4] = { 0, };
8215                 u32 regidx;
8216                 u32 bit;
8217                 u32 crc;
8218
8219                 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
8220                      i++, mclist = mclist->next) {
8221
8222                         crc = calc_crc (mclist->dmi_addr, ETH_ALEN);
8223                         bit = ~crc & 0x7f;
8224                         regidx = (bit & 0x60) >> 5;
8225                         bit &= 0x1f;
8226                         mc_filter[regidx] |= (1 << bit);
8227                 }
8228
8229                 tw32(MAC_HASH_REG_0, mc_filter[0]);
8230                 tw32(MAC_HASH_REG_1, mc_filter[1]);
8231                 tw32(MAC_HASH_REG_2, mc_filter[2]);
8232                 tw32(MAC_HASH_REG_3, mc_filter[3]);
8233         }
8234
8235         if (rx_mode != tp->rx_mode) {
8236                 tp->rx_mode = rx_mode;
8237                 tw32_f(MAC_RX_MODE, rx_mode);
8238                 udelay(10);
8239         }
8240 }
8241
8242 static void tg3_set_rx_mode(struct net_device *dev)
8243 {
8244         struct tg3 *tp = netdev_priv(dev);
8245
8246         if (!netif_running(dev))
8247                 return;
8248
8249         tg3_full_lock(tp, 0);
8250         __tg3_set_rx_mode(dev);
8251         tg3_full_unlock(tp);
8252 }
8253
8254 #define TG3_REGDUMP_LEN         (32 * 1024)
8255
8256 static int tg3_get_regs_len(struct net_device *dev)
8257 {
8258         return TG3_REGDUMP_LEN;
8259 }
8260
8261 static void tg3_get_regs(struct net_device *dev,
8262                 struct ethtool_regs *regs, void *_p)
8263 {
8264         u32 *p = _p;
8265         struct tg3 *tp = netdev_priv(dev);
8266         u8 *orig_p = _p;
8267         int i;
8268
8269         regs->version = 0;
8270
8271         memset(p, 0, TG3_REGDUMP_LEN);
8272
8273         if (tp->link_config.phy_is_low_power)
8274                 return;
8275
8276         tg3_full_lock(tp, 0);
8277
8278 #define __GET_REG32(reg)        (*(p)++ = tr32(reg))
8279 #define GET_REG32_LOOP(base,len)                \
8280 do {    p = (u32 *)(orig_p + (base));           \
8281         for (i = 0; i < len; i += 4)            \
8282                 __GET_REG32((base) + i);        \
8283 } while (0)
8284 #define GET_REG32_1(reg)                        \
8285 do {    p = (u32 *)(orig_p + (reg));            \
8286         __GET_REG32((reg));                     \
8287 } while (0)
8288
8289         GET_REG32_LOOP(TG3PCI_VENDOR, 0xb0);
8290         GET_REG32_LOOP(MAILBOX_INTERRUPT_0, 0x200);
8291         GET_REG32_LOOP(MAC_MODE, 0x4f0);
8292         GET_REG32_LOOP(SNDDATAI_MODE, 0xe0);
8293         GET_REG32_1(SNDDATAC_MODE);
8294         GET_REG32_LOOP(SNDBDS_MODE, 0x80);
8295         GET_REG32_LOOP(SNDBDI_MODE, 0x48);
8296         GET_REG32_1(SNDBDC_MODE);
8297         GET_REG32_LOOP(RCVLPC_MODE, 0x20);
8298         GET_REG32_LOOP(RCVLPC_SELLST_BASE, 0x15c);
8299         GET_REG32_LOOP(RCVDBDI_MODE, 0x0c);
8300         GET_REG32_LOOP(RCVDBDI_JUMBO_BD, 0x3c);
8301         GET_REG32_LOOP(RCVDBDI_BD_PROD_IDX_0, 0x44);
8302         GET_REG32_1(RCVDCC_MODE);
8303         GET_REG32_LOOP(RCVBDI_MODE, 0x20);
8304         GET_REG32_LOOP(RCVCC_MODE, 0x14);
8305         GET_REG32_LOOP(RCVLSC_MODE, 0x08);
8306         GET_REG32_1(MBFREE_MODE);
8307         GET_REG32_LOOP(HOSTCC_MODE, 0x100);
8308         GET_REG32_LOOP(MEMARB_MODE, 0x10);
8309         GET_REG32_LOOP(BUFMGR_MODE, 0x58);
8310         GET_REG32_LOOP(RDMAC_MODE, 0x08);
8311         GET_REG32_LOOP(WDMAC_MODE, 0x08);
8312         GET_REG32_1(RX_CPU_MODE);
8313         GET_REG32_1(RX_CPU_STATE);
8314         GET_REG32_1(RX_CPU_PGMCTR);
8315         GET_REG32_1(RX_CPU_HWBKPT);
8316         GET_REG32_1(TX_CPU_MODE);
8317         GET_REG32_1(TX_CPU_STATE);
8318         GET_REG32_1(TX_CPU_PGMCTR);
8319         GET_REG32_LOOP(GRCMBOX_INTERRUPT_0, 0x110);
8320         GET_REG32_LOOP(FTQ_RESET, 0x120);
8321         GET_REG32_LOOP(MSGINT_MODE, 0x0c);
8322         GET_REG32_1(DMAC_MODE);
8323         GET_REG32_LOOP(GRC_MODE, 0x4c);
8324         if (tp->tg3_flags & TG3_FLAG_NVRAM)
8325                 GET_REG32_LOOP(NVRAM_CMD, 0x24);
8326
8327 #undef __GET_REG32
8328 #undef GET_REG32_LOOP
8329 #undef GET_REG32_1
8330
8331         tg3_full_unlock(tp);
8332 }
8333
8334 static int tg3_get_eeprom_len(struct net_device *dev)
8335 {
8336         struct tg3 *tp = netdev_priv(dev);
8337
8338         return tp->nvram_size;
8339 }
8340
8341 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val);
8342 static int tg3_nvram_read_le(struct tg3 *tp, u32 offset, __le32 *val);
8343 static int tg3_nvram_read_swab(struct tg3 *tp, u32 offset, u32 *val);
8344
8345 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
8346 {
8347         struct tg3 *tp = netdev_priv(dev);
8348         int ret;
8349         u8  *pd;
8350         u32 i, offset, len, b_offset, b_count;
8351         __le32 val;
8352
8353         if (tp->link_config.phy_is_low_power)
8354                 return -EAGAIN;
8355
8356         offset = eeprom->offset;
8357         len = eeprom->len;
8358         eeprom->len = 0;
8359
8360         eeprom->magic = TG3_EEPROM_MAGIC;
8361
8362         if (offset & 3) {
8363                 /* adjustments to start on required 4 byte boundary */
8364                 b_offset = offset & 3;
8365                 b_count = 4 - b_offset;
8366                 if (b_count > len) {
8367                         /* i.e. offset=1 len=2 */
8368                         b_count = len;
8369                 }
8370                 ret = tg3_nvram_read_le(tp, offset-b_offset, &val);
8371                 if (ret)
8372                         return ret;
8373                 memcpy(data, ((char*)&val) + b_offset, b_count);
8374                 len -= b_count;
8375                 offset += b_count;
8376                 eeprom->len += b_count;
8377         }
8378
8379         /* read bytes upto the last 4 byte boundary */
8380         pd = &data[eeprom->len];
8381         for (i = 0; i < (len - (len & 3)); i += 4) {
8382                 ret = tg3_nvram_read_le(tp, offset + i, &val);
8383                 if (ret) {
8384                         eeprom->len += i;
8385                         return ret;
8386                 }
8387                 memcpy(pd + i, &val, 4);
8388         }
8389         eeprom->len += i;
8390
8391         if (len & 3) {
8392                 /* read last bytes not ending on 4 byte boundary */
8393                 pd = &data[eeprom->len];
8394                 b_count = len & 3;
8395                 b_offset = offset + len - b_count;
8396                 ret = tg3_nvram_read_le(tp, b_offset, &val);
8397                 if (ret)
8398                         return ret;
8399                 memcpy(pd, &val, b_count);
8400                 eeprom->len += b_count;
8401         }
8402         return 0;
8403 }
8404
8405 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf);
8406
8407 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
8408 {
8409         struct tg3 *tp = netdev_priv(dev);
8410         int ret;
8411         u32 offset, len, b_offset, odd_len;
8412         u8 *buf;
8413         __le32 start, end;
8414
8415         if (tp->link_config.phy_is_low_power)
8416                 return -EAGAIN;
8417
8418         if (eeprom->magic != TG3_EEPROM_MAGIC)
8419                 return -EINVAL;
8420
8421         offset = eeprom->offset;
8422         len = eeprom->len;
8423
8424         if ((b_offset = (offset & 3))) {
8425                 /* adjustments to start on required 4 byte boundary */
8426                 ret = tg3_nvram_read_le(tp, offset-b_offset, &start);
8427                 if (ret)
8428                         return ret;
8429                 len += b_offset;
8430                 offset &= ~3;
8431                 if (len < 4)
8432                         len = 4;
8433         }
8434
8435         odd_len = 0;
8436         if (len & 3) {
8437                 /* adjustments to end on required 4 byte boundary */
8438                 odd_len = 1;
8439                 len = (len + 3) & ~3;
8440                 ret = tg3_nvram_read_le(tp, offset+len-4, &end);
8441                 if (ret)
8442                         return ret;
8443         }
8444
8445         buf = data;
8446         if (b_offset || odd_len) {
8447                 buf = kmalloc(len, GFP_KERNEL);
8448                 if (!buf)
8449                         return -ENOMEM;
8450                 if (b_offset)
8451                         memcpy(buf, &start, 4);
8452                 if (odd_len)
8453                         memcpy(buf+len-4, &end, 4);
8454                 memcpy(buf + b_offset, data, eeprom->len);
8455         }
8456
8457         ret = tg3_nvram_write_block(tp, offset, len, buf);
8458
8459         if (buf != data)
8460                 kfree(buf);
8461
8462         return ret;
8463 }
8464
8465 static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
8466 {
8467         struct tg3 *tp = netdev_priv(dev);
8468
8469         cmd->supported = (SUPPORTED_Autoneg);
8470
8471         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
8472                 cmd->supported |= (SUPPORTED_1000baseT_Half |
8473                                    SUPPORTED_1000baseT_Full);
8474
8475         if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)) {
8476                 cmd->supported |= (SUPPORTED_100baseT_Half |
8477                                   SUPPORTED_100baseT_Full |
8478                                   SUPPORTED_10baseT_Half |
8479                                   SUPPORTED_10baseT_Full |
8480                                   SUPPORTED_TP);
8481                 cmd->port = PORT_TP;
8482         } else {
8483                 cmd->supported |= SUPPORTED_FIBRE;
8484                 cmd->port = PORT_FIBRE;
8485         }
8486
8487         cmd->advertising = tp->link_config.advertising;
8488         if (netif_running(dev)) {
8489                 cmd->speed = tp->link_config.active_speed;
8490                 cmd->duplex = tp->link_config.active_duplex;
8491         }
8492         cmd->phy_address = PHY_ADDR;
8493         cmd->transceiver = 0;
8494         cmd->autoneg = tp->link_config.autoneg;
8495         cmd->maxtxpkt = 0;
8496         cmd->maxrxpkt = 0;
8497         return 0;
8498 }
8499
8500 static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
8501 {
8502         struct tg3 *tp = netdev_priv(dev);
8503
8504         if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) {
8505                 /* These are the only valid advertisement bits allowed.  */
8506                 if (cmd->autoneg == AUTONEG_ENABLE &&
8507                     (cmd->advertising & ~(ADVERTISED_1000baseT_Half |
8508                                           ADVERTISED_1000baseT_Full |
8509                                           ADVERTISED_Autoneg |
8510                                           ADVERTISED_FIBRE)))
8511                         return -EINVAL;
8512                 /* Fiber can only do SPEED_1000.  */
8513                 else if ((cmd->autoneg != AUTONEG_ENABLE) &&
8514                          (cmd->speed != SPEED_1000))
8515                         return -EINVAL;
8516         /* Copper cannot force SPEED_1000.  */
8517         } else if ((cmd->autoneg != AUTONEG_ENABLE) &&
8518                    (cmd->speed == SPEED_1000))
8519                 return -EINVAL;
8520         else if ((cmd->speed == SPEED_1000) &&
8521                  (tp->tg3_flags2 & TG3_FLAG_10_100_ONLY))
8522                 return -EINVAL;
8523
8524         tg3_full_lock(tp, 0);
8525
8526         tp->link_config.autoneg = cmd->autoneg;
8527         if (cmd->autoneg == AUTONEG_ENABLE) {
8528                 tp->link_config.advertising = (cmd->advertising |
8529                                               ADVERTISED_Autoneg);
8530                 tp->link_config.speed = SPEED_INVALID;
8531                 tp->link_config.duplex = DUPLEX_INVALID;
8532         } else {
8533                 tp->link_config.advertising = 0;
8534                 tp->link_config.speed = cmd->speed;
8535                 tp->link_config.duplex = cmd->duplex;
8536         }
8537
8538         tp->link_config.orig_speed = tp->link_config.speed;
8539         tp->link_config.orig_duplex = tp->link_config.duplex;
8540         tp->link_config.orig_autoneg = tp->link_config.autoneg;
8541
8542         if (netif_running(dev))
8543                 tg3_setup_phy(tp, 1);
8544
8545         tg3_full_unlock(tp);
8546
8547         return 0;
8548 }
8549
8550 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
8551 {
8552         struct tg3 *tp = netdev_priv(dev);
8553
8554         strcpy(info->driver, DRV_MODULE_NAME);
8555         strcpy(info->version, DRV_MODULE_VERSION);
8556         strcpy(info->fw_version, tp->fw_ver);
8557         strcpy(info->bus_info, pci_name(tp->pdev));
8558 }
8559
8560 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
8561 {
8562         struct tg3 *tp = netdev_priv(dev);
8563
8564         if (tp->tg3_flags & TG3_FLAG_WOL_CAP)
8565                 wol->supported = WAKE_MAGIC;
8566         else
8567                 wol->supported = 0;
8568         wol->wolopts = 0;
8569         if (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)
8570                 wol->wolopts = WAKE_MAGIC;
8571         memset(&wol->sopass, 0, sizeof(wol->sopass));
8572 }
8573
8574 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
8575 {
8576         struct tg3 *tp = netdev_priv(dev);
8577
8578         if (wol->wolopts & ~WAKE_MAGIC)
8579                 return -EINVAL;
8580         if ((wol->wolopts & WAKE_MAGIC) &&
8581             !(tp->tg3_flags & TG3_FLAG_WOL_CAP))
8582                 return -EINVAL;
8583
8584         spin_lock_bh(&tp->lock);
8585         if (wol->wolopts & WAKE_MAGIC)
8586                 tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
8587         else
8588                 tp->tg3_flags &= ~TG3_FLAG_WOL_ENABLE;
8589         spin_unlock_bh(&tp->lock);
8590
8591         return 0;
8592 }
8593
8594 static u32 tg3_get_msglevel(struct net_device *dev)
8595 {
8596         struct tg3 *tp = netdev_priv(dev);
8597         return tp->msg_enable;
8598 }
8599
8600 static void tg3_set_msglevel(struct net_device *dev, u32 value)
8601 {
8602         struct tg3 *tp = netdev_priv(dev);
8603         tp->msg_enable = value;
8604 }
8605
8606 static int tg3_set_tso(struct net_device *dev, u32 value)
8607 {
8608         struct tg3 *tp = netdev_priv(dev);
8609
8610         if (!(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
8611                 if (value)
8612                         return -EINVAL;
8613                 return 0;
8614         }
8615         if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_2) &&
8616             (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906)) {
8617                 if (value) {
8618                         dev->features |= NETIF_F_TSO6;
8619                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
8620                                 dev->features |= NETIF_F_TSO_ECN;
8621                 } else
8622                         dev->features &= ~(NETIF_F_TSO6 | NETIF_F_TSO_ECN);
8623         }
8624         return ethtool_op_set_tso(dev, value);
8625 }
8626
8627 static int tg3_nway_reset(struct net_device *dev)
8628 {
8629         struct tg3 *tp = netdev_priv(dev);
8630         u32 bmcr;
8631         int r;
8632
8633         if (!netif_running(dev))
8634                 return -EAGAIN;
8635
8636         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
8637                 return -EINVAL;
8638
8639         spin_lock_bh(&tp->lock);
8640         r = -EINVAL;
8641         tg3_readphy(tp, MII_BMCR, &bmcr);
8642         if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
8643             ((bmcr & BMCR_ANENABLE) ||
8644              (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT))) {
8645                 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
8646                                            BMCR_ANENABLE);
8647                 r = 0;
8648         }
8649         spin_unlock_bh(&tp->lock);
8650
8651         return r;
8652 }
8653
8654 static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
8655 {
8656         struct tg3 *tp = netdev_priv(dev);
8657
8658         ering->rx_max_pending = TG3_RX_RING_SIZE - 1;
8659         ering->rx_mini_max_pending = 0;
8660         if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE)
8661                 ering->rx_jumbo_max_pending = TG3_RX_JUMBO_RING_SIZE - 1;
8662         else
8663                 ering->rx_jumbo_max_pending = 0;
8664
8665         ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
8666
8667         ering->rx_pending = tp->rx_pending;
8668         ering->rx_mini_pending = 0;
8669         if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE)
8670                 ering->rx_jumbo_pending = tp->rx_jumbo_pending;
8671         else
8672                 ering->rx_jumbo_pending = 0;
8673
8674         ering->tx_pending = tp->tx_pending;
8675 }
8676
8677 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
8678 {
8679         struct tg3 *tp = netdev_priv(dev);
8680         int irq_sync = 0, err = 0;
8681
8682         if ((ering->rx_pending > TG3_RX_RING_SIZE - 1) ||
8683             (ering->rx_jumbo_pending > TG3_RX_JUMBO_RING_SIZE - 1) ||
8684             (ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
8685             (ering->tx_pending <= MAX_SKB_FRAGS) ||
8686             ((tp->tg3_flags2 & TG3_FLG2_TSO_BUG) &&
8687              (ering->tx_pending <= (MAX_SKB_FRAGS * 3))))
8688                 return -EINVAL;
8689
8690         if (netif_running(dev)) {
8691                 tg3_netif_stop(tp);
8692                 irq_sync = 1;
8693         }
8694
8695         tg3_full_lock(tp, irq_sync);
8696
8697         tp->rx_pending = ering->rx_pending;
8698
8699         if ((tp->tg3_flags2 & TG3_FLG2_MAX_RXPEND_64) &&
8700             tp->rx_pending > 63)
8701                 tp->rx_pending = 63;
8702         tp->rx_jumbo_pending = ering->rx_jumbo_pending;
8703         tp->tx_pending = ering->tx_pending;
8704
8705         if (netif_running(dev)) {
8706                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
8707                 err = tg3_restart_hw(tp, 1);
8708                 if (!err)
8709                         tg3_netif_start(tp);
8710         }
8711
8712         tg3_full_unlock(tp);
8713
8714         return err;
8715 }
8716
8717 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
8718 {
8719         struct tg3 *tp = netdev_priv(dev);
8720
8721         epause->autoneg = (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG) != 0;
8722
8723         if (tp->link_config.active_flowctrl & TG3_FLOW_CTRL_RX)
8724                 epause->rx_pause = 1;
8725         else
8726                 epause->rx_pause = 0;
8727
8728         if (tp->link_config.active_flowctrl & TG3_FLOW_CTRL_TX)
8729                 epause->tx_pause = 1;
8730         else
8731                 epause->tx_pause = 0;
8732 }
8733
8734 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
8735 {
8736         struct tg3 *tp = netdev_priv(dev);
8737         int irq_sync = 0, err = 0;
8738
8739         if (netif_running(dev)) {
8740                 tg3_netif_stop(tp);
8741                 irq_sync = 1;
8742         }
8743
8744         tg3_full_lock(tp, irq_sync);
8745
8746         if (epause->autoneg)
8747                 tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
8748         else
8749                 tp->tg3_flags &= ~TG3_FLAG_PAUSE_AUTONEG;
8750         if (epause->rx_pause)
8751                 tp->link_config.flowctrl |= TG3_FLOW_CTRL_RX;
8752         else
8753                 tp->link_config.flowctrl &= ~TG3_FLOW_CTRL_RX;
8754         if (epause->tx_pause)
8755                 tp->link_config.flowctrl |= TG3_FLOW_CTRL_TX;
8756         else
8757                 tp->link_config.flowctrl &= ~TG3_FLOW_CTRL_TX;
8758
8759         if (netif_running(dev)) {
8760                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
8761                 err = tg3_restart_hw(tp, 1);
8762                 if (!err)
8763                         tg3_netif_start(tp);
8764         }
8765
8766         tg3_full_unlock(tp);
8767
8768         return err;
8769 }
8770
8771 static u32 tg3_get_rx_csum(struct net_device *dev)
8772 {
8773         struct tg3 *tp = netdev_priv(dev);
8774         return (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0;
8775 }
8776
8777 static int tg3_set_rx_csum(struct net_device *dev, u32 data)
8778 {
8779         struct tg3 *tp = netdev_priv(dev);
8780
8781         if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
8782                 if (data != 0)
8783                         return -EINVAL;
8784                 return 0;
8785         }
8786
8787         spin_lock_bh(&tp->lock);
8788         if (data)
8789                 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
8790         else
8791                 tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
8792         spin_unlock_bh(&tp->lock);
8793
8794         return 0;
8795 }
8796
8797 static int tg3_set_tx_csum(struct net_device *dev, u32 data)
8798 {
8799         struct tg3 *tp = netdev_priv(dev);
8800
8801         if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
8802                 if (data != 0)
8803                         return -EINVAL;
8804                 return 0;
8805         }
8806
8807         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
8808             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
8809             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
8810             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
8811                 ethtool_op_set_tx_ipv6_csum(dev, data);
8812         else
8813                 ethtool_op_set_tx_csum(dev, data);
8814
8815         return 0;
8816 }
8817
8818 static int tg3_get_sset_count (struct net_device *dev, int sset)
8819 {
8820         switch (sset) {
8821         case ETH_SS_TEST:
8822                 return TG3_NUM_TEST;
8823         case ETH_SS_STATS:
8824                 return TG3_NUM_STATS;
8825         default:
8826                 return -EOPNOTSUPP;
8827         }
8828 }
8829
8830 static void tg3_get_strings (struct net_device *dev, u32 stringset, u8 *buf)
8831 {
8832         switch (stringset) {
8833         case ETH_SS_STATS:
8834                 memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
8835                 break;
8836         case ETH_SS_TEST:
8837                 memcpy(buf, &ethtool_test_keys, sizeof(ethtool_test_keys));
8838                 break;
8839         default:
8840                 WARN_ON(1);     /* we need a WARN() */
8841                 break;
8842         }
8843 }
8844
8845 static int tg3_phys_id(struct net_device *dev, u32 data)
8846 {
8847         struct tg3 *tp = netdev_priv(dev);
8848         int i;
8849
8850         if (!netif_running(tp->dev))
8851                 return -EAGAIN;
8852
8853         if (data == 0)
8854                 data = UINT_MAX / 2;
8855
8856         for (i = 0; i < (data * 2); i++) {
8857                 if ((i % 2) == 0)
8858                         tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
8859                                            LED_CTRL_1000MBPS_ON |
8860                                            LED_CTRL_100MBPS_ON |
8861                                            LED_CTRL_10MBPS_ON |
8862                                            LED_CTRL_TRAFFIC_OVERRIDE |
8863                                            LED_CTRL_TRAFFIC_BLINK |
8864                                            LED_CTRL_TRAFFIC_LED);
8865
8866                 else
8867                         tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
8868                                            LED_CTRL_TRAFFIC_OVERRIDE);
8869
8870                 if (msleep_interruptible(500))
8871                         break;
8872         }
8873         tw32(MAC_LED_CTRL, tp->led_ctrl);
8874         return 0;
8875 }
8876
8877 static void tg3_get_ethtool_stats (struct net_device *dev,
8878                                    struct ethtool_stats *estats, u64 *tmp_stats)
8879 {
8880         struct tg3 *tp = netdev_priv(dev);
8881         memcpy(tmp_stats, tg3_get_estats(tp), sizeof(tp->estats));
8882 }
8883
8884 #define NVRAM_TEST_SIZE 0x100
8885 #define NVRAM_SELFBOOT_FORMAT1_0_SIZE   0x14
8886 #define NVRAM_SELFBOOT_FORMAT1_2_SIZE   0x18
8887 #define NVRAM_SELFBOOT_FORMAT1_3_SIZE   0x1c
8888 #define NVRAM_SELFBOOT_HW_SIZE 0x20
8889 #define NVRAM_SELFBOOT_DATA_SIZE 0x1c
8890
8891 static int tg3_test_nvram(struct tg3 *tp)
8892 {
8893         u32 csum, magic;
8894         __le32 *buf;
8895         int i, j, k, err = 0, size;
8896
8897         if (tg3_nvram_read_swab(tp, 0, &magic) != 0)
8898                 return -EIO;
8899
8900         if (magic == TG3_EEPROM_MAGIC)
8901                 size = NVRAM_TEST_SIZE;
8902         else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) {
8903                 if ((magic & TG3_EEPROM_SB_FORMAT_MASK) ==
8904                     TG3_EEPROM_SB_FORMAT_1) {
8905                         switch (magic & TG3_EEPROM_SB_REVISION_MASK) {
8906                         case TG3_EEPROM_SB_REVISION_0:
8907                                 size = NVRAM_SELFBOOT_FORMAT1_0_SIZE;
8908                                 break;
8909                         case TG3_EEPROM_SB_REVISION_2:
8910                                 size = NVRAM_SELFBOOT_FORMAT1_2_SIZE;
8911                                 break;
8912                         case TG3_EEPROM_SB_REVISION_3:
8913                                 size = NVRAM_SELFBOOT_FORMAT1_3_SIZE;
8914                                 break;
8915                         default:
8916                                 return 0;
8917                         }
8918                 } else
8919                         return 0;
8920         } else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
8921                 size = NVRAM_SELFBOOT_HW_SIZE;
8922         else
8923                 return -EIO;
8924
8925         buf = kmalloc(size, GFP_KERNEL);
8926         if (buf == NULL)
8927                 return -ENOMEM;
8928
8929         err = -EIO;
8930         for (i = 0, j = 0; i < size; i += 4, j++) {
8931                 if ((err = tg3_nvram_read_le(tp, i, &buf[j])) != 0)
8932                         break;
8933         }
8934         if (i < size)
8935                 goto out;
8936
8937         /* Selfboot format */
8938         magic = swab32(le32_to_cpu(buf[0]));
8939         if ((magic & TG3_EEPROM_MAGIC_FW_MSK) ==
8940             TG3_EEPROM_MAGIC_FW) {
8941                 u8 *buf8 = (u8 *) buf, csum8 = 0;
8942
8943                 if ((magic & TG3_EEPROM_SB_REVISION_MASK) ==
8944                     TG3_EEPROM_SB_REVISION_2) {
8945                         /* For rev 2, the csum doesn't include the MBA. */
8946                         for (i = 0; i < TG3_EEPROM_SB_F1R2_MBA_OFF; i++)
8947                                 csum8 += buf8[i];
8948                         for (i = TG3_EEPROM_SB_F1R2_MBA_OFF + 4; i < size; i++)
8949                                 csum8 += buf8[i];
8950                 } else {
8951                         for (i = 0; i < size; i++)
8952                                 csum8 += buf8[i];
8953                 }
8954
8955                 if (csum8 == 0) {
8956                         err = 0;
8957                         goto out;
8958                 }
8959
8960                 err = -EIO;
8961                 goto out;
8962         }
8963
8964         if ((magic & TG3_EEPROM_MAGIC_HW_MSK) ==
8965             TG3_EEPROM_MAGIC_HW) {
8966                 u8 data[NVRAM_SELFBOOT_DATA_SIZE];
8967                 u8 parity[NVRAM_SELFBOOT_DATA_SIZE];
8968                 u8 *buf8 = (u8 *) buf;
8969
8970                 /* Separate the parity bits and the data bytes.  */
8971                 for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) {
8972                         if ((i == 0) || (i == 8)) {
8973                                 int l;
8974                                 u8 msk;
8975
8976                                 for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1)
8977                                         parity[k++] = buf8[i] & msk;
8978                                 i++;
8979                         }
8980                         else if (i == 16) {
8981                                 int l;
8982                                 u8 msk;
8983
8984                                 for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1)
8985                                         parity[k++] = buf8[i] & msk;
8986                                 i++;
8987
8988                                 for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1)
8989                                         parity[k++] = buf8[i] & msk;
8990                                 i++;
8991                         }
8992                         data[j++] = buf8[i];
8993                 }
8994
8995                 err = -EIO;
8996                 for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) {
8997                         u8 hw8 = hweight8(data[i]);
8998
8999                         if ((hw8 & 0x1) && parity[i])
9000                                 goto out;
9001                         else if (!(hw8 & 0x1) && !parity[i])
9002                                 goto out;
9003                 }
9004                 err = 0;
9005                 goto out;
9006         }
9007
9008         /* Bootstrap checksum at offset 0x10 */
9009         csum = calc_crc((unsigned char *) buf, 0x10);
9010         if(csum != le32_to_cpu(buf[0x10/4]))
9011                 goto out;
9012
9013         /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
9014         csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
9015         if (csum != le32_to_cpu(buf[0xfc/4]))
9016                  goto out;
9017
9018         err = 0;
9019
9020 out:
9021         kfree(buf);
9022         return err;
9023 }
9024
9025 #define TG3_SERDES_TIMEOUT_SEC  2
9026 #define TG3_COPPER_TIMEOUT_SEC  6
9027
9028 static int tg3_test_link(struct tg3 *tp)
9029 {
9030         int i, max;
9031
9032         if (!netif_running(tp->dev))
9033                 return -ENODEV;
9034
9035         if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
9036                 max = TG3_SERDES_TIMEOUT_SEC;
9037         else
9038                 max = TG3_COPPER_TIMEOUT_SEC;
9039
9040         for (i = 0; i < max; i++) {
9041                 if (netif_carrier_ok(tp->dev))
9042                         return 0;
9043
9044                 if (msleep_interruptible(1000))
9045                         break;
9046         }
9047
9048         return -EIO;
9049 }
9050
9051 /* Only test the commonly used registers */
9052 static int tg3_test_registers(struct tg3 *tp)
9053 {
9054         int i, is_5705, is_5750;
9055         u32 offset, read_mask, write_mask, val, save_val, read_val;
9056         static struct {
9057                 u16 offset;
9058                 u16 flags;
9059 #define TG3_FL_5705     0x1
9060 #define TG3_FL_NOT_5705 0x2
9061 #define TG3_FL_NOT_5788 0x4
9062 #define TG3_FL_NOT_5750 0x8
9063                 u32 read_mask;
9064                 u32 write_mask;
9065         } reg_tbl[] = {
9066                 /* MAC Control Registers */
9067                 { MAC_MODE, TG3_FL_NOT_5705,
9068                         0x00000000, 0x00ef6f8c },
9069                 { MAC_MODE, TG3_FL_5705,
9070                         0x00000000, 0x01ef6b8c },
9071                 { MAC_STATUS, TG3_FL_NOT_5705,
9072                         0x03800107, 0x00000000 },
9073                 { MAC_STATUS, TG3_FL_5705,
9074                         0x03800100, 0x00000000 },
9075                 { MAC_ADDR_0_HIGH, 0x0000,
9076                         0x00000000, 0x0000ffff },
9077                 { MAC_ADDR_0_LOW, 0x0000,
9078                         0x00000000, 0xffffffff },
9079                 { MAC_RX_MTU_SIZE, 0x0000,
9080                         0x00000000, 0x0000ffff },
9081                 { MAC_TX_MODE, 0x0000,
9082                         0x00000000, 0x00000070 },
9083                 { MAC_TX_LENGTHS, 0x0000,
9084                         0x00000000, 0x00003fff },
9085                 { MAC_RX_MODE, TG3_FL_NOT_5705,
9086                         0x00000000, 0x000007fc },
9087                 { MAC_RX_MODE, TG3_FL_5705,
9088                         0x00000000, 0x000007dc },
9089                 { MAC_HASH_REG_0, 0x0000,
9090                         0x00000000, 0xffffffff },
9091                 { MAC_HASH_REG_1, 0x0000,
9092                         0x00000000, 0xffffffff },
9093                 { MAC_HASH_REG_2, 0x0000,
9094                         0x00000000, 0xffffffff },
9095                 { MAC_HASH_REG_3, 0x0000,
9096                         0x00000000, 0xffffffff },
9097
9098                 /* Receive Data and Receive BD Initiator Control Registers. */
9099                 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
9100                         0x00000000, 0xffffffff },
9101                 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
9102                         0x00000000, 0xffffffff },
9103                 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
9104                         0x00000000, 0x00000003 },
9105                 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
9106                         0x00000000, 0xffffffff },
9107                 { RCVDBDI_STD_BD+0, 0x0000,
9108                         0x00000000, 0xffffffff },
9109                 { RCVDBDI_STD_BD+4, 0x0000,
9110                         0x00000000, 0xffffffff },
9111                 { RCVDBDI_STD_BD+8, 0x0000,
9112                         0x00000000, 0xffff0002 },
9113                 { RCVDBDI_STD_BD+0xc, 0x0000,
9114                         0x00000000, 0xffffffff },
9115
9116                 /* Receive BD Initiator Control Registers. */
9117                 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
9118                         0x00000000, 0xffffffff },
9119                 { RCVBDI_STD_THRESH, TG3_FL_5705,
9120                         0x00000000, 0x000003ff },
9121                 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
9122                         0x00000000, 0xffffffff },
9123
9124                 /* Host Coalescing Control Registers. */
9125                 { HOSTCC_MODE, TG3_FL_NOT_5705,
9126                         0x00000000, 0x00000004 },
9127                 { HOSTCC_MODE, TG3_FL_5705,
9128                         0x00000000, 0x000000f6 },
9129                 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
9130                         0x00000000, 0xffffffff },
9131                 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
9132                         0x00000000, 0x000003ff },
9133                 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
9134                         0x00000000, 0xffffffff },
9135                 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
9136                         0x00000000, 0x000003ff },
9137                 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
9138                         0x00000000, 0xffffffff },
9139                 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
9140                         0x00000000, 0x000000ff },
9141                 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
9142                         0x00000000, 0xffffffff },
9143                 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
9144                         0x00000000, 0x000000ff },
9145                 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
9146                         0x00000000, 0xffffffff },
9147                 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
9148                         0x00000000, 0xffffffff },
9149                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
9150                         0x00000000, 0xffffffff },
9151                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
9152                         0x00000000, 0x000000ff },
9153                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
9154                         0x00000000, 0xffffffff },
9155                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
9156                         0x00000000, 0x000000ff },
9157                 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
9158                         0x00000000, 0xffffffff },
9159                 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
9160                         0x00000000, 0xffffffff },
9161                 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
9162                         0x00000000, 0xffffffff },
9163                 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
9164                         0x00000000, 0xffffffff },
9165                 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
9166                         0x00000000, 0xffffffff },
9167                 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
9168                         0xffffffff, 0x00000000 },
9169                 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
9170                         0xffffffff, 0x00000000 },
9171
9172                 /* Buffer Manager Control Registers. */
9173                 { BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750,
9174                         0x00000000, 0x007fff80 },
9175                 { BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750,
9176                         0x00000000, 0x007fffff },
9177                 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
9178                         0x00000000, 0x0000003f },
9179                 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
9180                         0x00000000, 0x000001ff },
9181                 { BUFMGR_MB_HIGH_WATER, 0x0000,
9182                         0x00000000, 0x000001ff },
9183                 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
9184                         0xffffffff, 0x00000000 },
9185                 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
9186                         0xffffffff, 0x00000000 },
9187
9188                 /* Mailbox Registers */
9189                 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
9190                         0x00000000, 0x000001ff },
9191                 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
9192                         0x00000000, 0x000001ff },
9193                 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
9194                         0x00000000, 0x000007ff },
9195                 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
9196                         0x00000000, 0x000001ff },
9197
9198                 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
9199         };
9200
9201         is_5705 = is_5750 = 0;
9202         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
9203                 is_5705 = 1;
9204                 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
9205                         is_5750 = 1;
9206         }
9207
9208         for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
9209                 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
9210                         continue;
9211
9212                 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
9213                         continue;
9214
9215                 if ((tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
9216                     (reg_tbl[i].flags & TG3_FL_NOT_5788))
9217                         continue;
9218
9219                 if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750))
9220                         continue;
9221
9222                 offset = (u32) reg_tbl[i].offset;
9223                 read_mask = reg_tbl[i].read_mask;
9224                 write_mask = reg_tbl[i].write_mask;
9225
9226                 /* Save the original register content */
9227                 save_val = tr32(offset);
9228
9229                 /* Determine the read-only value. */
9230                 read_val = save_val & read_mask;
9231
9232                 /* Write zero to the register, then make sure the read-only bits
9233                  * are not changed and the read/write bits are all zeros.
9234                  */
9235                 tw32(offset, 0);
9236
9237                 val = tr32(offset);
9238
9239                 /* Test the read-only and read/write bits. */
9240                 if (((val & read_mask) != read_val) || (val & write_mask))
9241                         goto out;
9242
9243                 /* Write ones to all the bits defined by RdMask and WrMask, then
9244                  * make sure the read-only bits are not changed and the
9245                  * read/write bits are all ones.
9246                  */
9247                 tw32(offset, read_mask | write_mask);
9248
9249                 val = tr32(offset);
9250
9251                 /* Test the read-only bits. */
9252                 if ((val & read_mask) != read_val)
9253                         goto out;
9254
9255                 /* Test the read/write bits. */
9256                 if ((val & write_mask) != write_mask)
9257                         goto out;
9258
9259                 tw32(offset, save_val);
9260         }
9261
9262         return 0;
9263
9264 out:
9265         if (netif_msg_hw(tp))
9266                 printk(KERN_ERR PFX "Register test failed at offset %x\n",
9267                        offset);
9268         tw32(offset, save_val);
9269         return -EIO;
9270 }
9271
9272 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
9273 {
9274         static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
9275         int i;
9276         u32 j;
9277
9278         for (i = 0; i < ARRAY_SIZE(test_pattern); i++) {
9279                 for (j = 0; j < len; j += 4) {
9280                         u32 val;
9281
9282                         tg3_write_mem(tp, offset + j, test_pattern[i]);
9283                         tg3_read_mem(tp, offset + j, &val);
9284                         if (val != test_pattern[i])
9285                                 return -EIO;
9286                 }
9287         }
9288         return 0;
9289 }
9290
9291 static int tg3_test_memory(struct tg3 *tp)
9292 {
9293         static struct mem_entry {
9294                 u32 offset;
9295                 u32 len;
9296         } mem_tbl_570x[] = {
9297                 { 0x00000000, 0x00b50},
9298                 { 0x00002000, 0x1c000},
9299                 { 0xffffffff, 0x00000}
9300         }, mem_tbl_5705[] = {
9301                 { 0x00000100, 0x0000c},
9302                 { 0x00000200, 0x00008},
9303                 { 0x00004000, 0x00800},
9304                 { 0x00006000, 0x01000},
9305                 { 0x00008000, 0x02000},
9306                 { 0x00010000, 0x0e000},
9307                 { 0xffffffff, 0x00000}
9308         }, mem_tbl_5755[] = {
9309                 { 0x00000200, 0x00008},
9310                 { 0x00004000, 0x00800},
9311                 { 0x00006000, 0x00800},
9312                 { 0x00008000, 0x02000},
9313                 { 0x00010000, 0x0c000},
9314                 { 0xffffffff, 0x00000}
9315         }, mem_tbl_5906[] = {
9316                 { 0x00000200, 0x00008},
9317                 { 0x00004000, 0x00400},
9318                 { 0x00006000, 0x00400},
9319                 { 0x00008000, 0x01000},
9320                 { 0x00010000, 0x01000},
9321                 { 0xffffffff, 0x00000}
9322         };
9323         struct mem_entry *mem_tbl;
9324         int err = 0;
9325         int i;
9326
9327         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
9328                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
9329                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
9330                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
9331                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
9332                         mem_tbl = mem_tbl_5755;
9333                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
9334                         mem_tbl = mem_tbl_5906;
9335                 else
9336                         mem_tbl = mem_tbl_5705;
9337         } else
9338                 mem_tbl = mem_tbl_570x;
9339
9340         for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
9341                 if ((err = tg3_do_mem_test(tp, mem_tbl[i].offset,
9342                     mem_tbl[i].len)) != 0)
9343                         break;
9344         }
9345
9346         return err;
9347 }
9348
9349 #define TG3_MAC_LOOPBACK        0
9350 #define TG3_PHY_LOOPBACK        1
9351
9352 static int tg3_run_loopback(struct tg3 *tp, int loopback_mode)
9353 {
9354         u32 mac_mode, rx_start_idx, rx_idx, tx_idx, opaque_key;
9355         u32 desc_idx;
9356         struct sk_buff *skb, *rx_skb;
9357         u8 *tx_data;
9358         dma_addr_t map;
9359         int num_pkts, tx_len, rx_len, i, err;
9360         struct tg3_rx_buffer_desc *desc;
9361
9362         if (loopback_mode == TG3_MAC_LOOPBACK) {
9363                 /* HW errata - mac loopback fails in some cases on 5780.
9364                  * Normal traffic and PHY loopback are not affected by
9365                  * errata.
9366                  */
9367                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780)
9368                         return 0;
9369
9370                 mac_mode = (tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK) |
9371                            MAC_MODE_PORT_INT_LPBACK;
9372                 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
9373                         mac_mode |= MAC_MODE_LINK_POLARITY;
9374                 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
9375                         mac_mode |= MAC_MODE_PORT_MODE_MII;
9376                 else
9377                         mac_mode |= MAC_MODE_PORT_MODE_GMII;
9378                 tw32(MAC_MODE, mac_mode);
9379         } else if (loopback_mode == TG3_PHY_LOOPBACK) {
9380                 u32 val;
9381
9382                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
9383                         u32 phytest;
9384
9385                         if (!tg3_readphy(tp, MII_TG3_EPHY_TEST, &phytest)) {
9386                                 u32 phy;
9387
9388                                 tg3_writephy(tp, MII_TG3_EPHY_TEST,
9389                                              phytest | MII_TG3_EPHY_SHADOW_EN);
9390                                 if (!tg3_readphy(tp, 0x1b, &phy))
9391                                         tg3_writephy(tp, 0x1b, phy & ~0x20);
9392                                 tg3_writephy(tp, MII_TG3_EPHY_TEST, phytest);
9393                         }
9394                         val = BMCR_LOOPBACK | BMCR_FULLDPLX | BMCR_SPEED100;
9395                 } else
9396                         val = BMCR_LOOPBACK | BMCR_FULLDPLX | BMCR_SPEED1000;
9397
9398                 tg3_phy_toggle_automdix(tp, 0);
9399
9400                 tg3_writephy(tp, MII_BMCR, val);
9401                 udelay(40);
9402
9403                 mac_mode = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
9404                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
9405                         tg3_writephy(tp, MII_TG3_EPHY_PTEST, 0x1800);
9406                         mac_mode |= MAC_MODE_PORT_MODE_MII;
9407                 } else
9408                         mac_mode |= MAC_MODE_PORT_MODE_GMII;
9409
9410                 /* reset to prevent losing 1st rx packet intermittently */
9411                 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
9412                         tw32_f(MAC_RX_MODE, RX_MODE_RESET);
9413                         udelay(10);
9414                         tw32_f(MAC_RX_MODE, tp->rx_mode);
9415                 }
9416                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
9417                         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401)
9418                                 mac_mode &= ~MAC_MODE_LINK_POLARITY;
9419                         else if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411)
9420                                 mac_mode |= MAC_MODE_LINK_POLARITY;
9421                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
9422                                      MII_TG3_EXT_CTRL_LNK3_LED_MODE);
9423                 }
9424                 tw32(MAC_MODE, mac_mode);
9425         }
9426         else
9427                 return -EINVAL;
9428
9429         err = -EIO;
9430
9431         tx_len = 1514;
9432         skb = netdev_alloc_skb(tp->dev, tx_len);
9433         if (!skb)
9434                 return -ENOMEM;
9435
9436         tx_data = skb_put(skb, tx_len);
9437         memcpy(tx_data, tp->dev->dev_addr, 6);
9438         memset(tx_data + 6, 0x0, 8);
9439
9440         tw32(MAC_RX_MTU_SIZE, tx_len + 4);
9441
9442         for (i = 14; i < tx_len; i++)
9443                 tx_data[i] = (u8) (i & 0xff);
9444
9445         map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
9446
9447         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
9448              HOSTCC_MODE_NOW);
9449
9450         udelay(10);
9451
9452         rx_start_idx = tp->hw_status->idx[0].rx_producer;
9453
9454         num_pkts = 0;
9455
9456         tg3_set_txd(tp, tp->tx_prod, map, tx_len, 0, 1);
9457
9458         tp->tx_prod++;
9459         num_pkts++;
9460
9461         tw32_tx_mbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW,
9462                      tp->tx_prod);
9463         tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW);
9464
9465         udelay(10);
9466
9467         /* 250 usec to allow enough time on some 10/100 Mbps devices.  */
9468         for (i = 0; i < 25; i++) {
9469                 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
9470                        HOSTCC_MODE_NOW);
9471
9472                 udelay(10);
9473
9474                 tx_idx = tp->hw_status->idx[0].tx_consumer;
9475                 rx_idx = tp->hw_status->idx[0].rx_producer;
9476                 if ((tx_idx == tp->tx_prod) &&
9477                     (rx_idx == (rx_start_idx + num_pkts)))
9478                         break;
9479         }
9480
9481         pci_unmap_single(tp->pdev, map, tx_len, PCI_DMA_TODEVICE);
9482         dev_kfree_skb(skb);
9483
9484         if (tx_idx != tp->tx_prod)
9485                 goto out;
9486
9487         if (rx_idx != rx_start_idx + num_pkts)
9488                 goto out;
9489
9490         desc = &tp->rx_rcb[rx_start_idx];
9491         desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
9492         opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
9493         if (opaque_key != RXD_OPAQUE_RING_STD)
9494                 goto out;
9495
9496         if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
9497             (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
9498                 goto out;
9499
9500         rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4;
9501         if (rx_len != tx_len)
9502                 goto out;
9503
9504         rx_skb = tp->rx_std_buffers[desc_idx].skb;
9505
9506         map = pci_unmap_addr(&tp->rx_std_buffers[desc_idx], mapping);
9507         pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len, PCI_DMA_FROMDEVICE);
9508
9509         for (i = 14; i < tx_len; i++) {
9510                 if (*(rx_skb->data + i) != (u8) (i & 0xff))
9511                         goto out;
9512         }
9513         err = 0;
9514
9515         /* tg3_free_rings will unmap and free the rx_skb */
9516 out:
9517         return err;
9518 }
9519
9520 #define TG3_MAC_LOOPBACK_FAILED         1
9521 #define TG3_PHY_LOOPBACK_FAILED         2
9522 #define TG3_LOOPBACK_FAILED             (TG3_MAC_LOOPBACK_FAILED |      \
9523                                          TG3_PHY_LOOPBACK_FAILED)
9524
9525 static int tg3_test_loopback(struct tg3 *tp)
9526 {
9527         int err = 0;
9528         u32 cpmuctrl = 0;
9529
9530         if (!netif_running(tp->dev))
9531                 return TG3_LOOPBACK_FAILED;
9532
9533         err = tg3_reset_hw(tp, 1);
9534         if (err)
9535                 return TG3_LOOPBACK_FAILED;
9536
9537         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
9538             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
9539                 int i;
9540                 u32 status;
9541
9542                 tw32(TG3_CPMU_MUTEX_REQ, CPMU_MUTEX_REQ_DRIVER);
9543
9544                 /* Wait for up to 40 microseconds to acquire lock. */
9545                 for (i = 0; i < 4; i++) {
9546                         status = tr32(TG3_CPMU_MUTEX_GNT);
9547                         if (status == CPMU_MUTEX_GNT_DRIVER)
9548                                 break;
9549                         udelay(10);
9550                 }
9551
9552                 if (status != CPMU_MUTEX_GNT_DRIVER)
9553                         return TG3_LOOPBACK_FAILED;
9554
9555                 /* Turn off link-based power management. */
9556                 cpmuctrl = tr32(TG3_CPMU_CTRL);
9557                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
9558                     GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX)
9559                         tw32(TG3_CPMU_CTRL,
9560                              cpmuctrl & ~(CPMU_CTRL_LINK_SPEED_MODE |
9561                                           CPMU_CTRL_LINK_AWARE_MODE));
9562                 else
9563                         tw32(TG3_CPMU_CTRL,
9564                              cpmuctrl & ~CPMU_CTRL_LINK_AWARE_MODE);
9565         }
9566
9567         if (tg3_run_loopback(tp, TG3_MAC_LOOPBACK))
9568                 err |= TG3_MAC_LOOPBACK_FAILED;
9569
9570         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
9571             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
9572                 tw32(TG3_CPMU_CTRL, cpmuctrl);
9573
9574                 /* Release the mutex */
9575                 tw32(TG3_CPMU_MUTEX_GNT, CPMU_MUTEX_GNT_DRIVER);
9576         }
9577
9578         if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
9579                 if (tg3_run_loopback(tp, TG3_PHY_LOOPBACK))
9580                         err |= TG3_PHY_LOOPBACK_FAILED;
9581         }
9582
9583         return err;
9584 }
9585
9586 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
9587                           u64 *data)
9588 {
9589         struct tg3 *tp = netdev_priv(dev);
9590
9591         if (tp->link_config.phy_is_low_power)
9592                 tg3_set_power_state(tp, PCI_D0);
9593
9594         memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
9595
9596         if (tg3_test_nvram(tp) != 0) {
9597                 etest->flags |= ETH_TEST_FL_FAILED;
9598                 data[0] = 1;
9599         }
9600         if (tg3_test_link(tp) != 0) {
9601                 etest->flags |= ETH_TEST_FL_FAILED;
9602                 data[1] = 1;
9603         }
9604         if (etest->flags & ETH_TEST_FL_OFFLINE) {
9605                 int err, irq_sync = 0;
9606
9607                 if (netif_running(dev)) {
9608                         tg3_netif_stop(tp);
9609                         irq_sync = 1;
9610                 }
9611
9612                 tg3_full_lock(tp, irq_sync);
9613
9614                 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
9615                 err = tg3_nvram_lock(tp);
9616                 tg3_halt_cpu(tp, RX_CPU_BASE);
9617                 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
9618                         tg3_halt_cpu(tp, TX_CPU_BASE);
9619                 if (!err)
9620                         tg3_nvram_unlock(tp);
9621
9622                 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
9623                         tg3_phy_reset(tp);
9624
9625                 if (tg3_test_registers(tp) != 0) {
9626                         etest->flags |= ETH_TEST_FL_FAILED;
9627                         data[2] = 1;
9628                 }
9629                 if (tg3_test_memory(tp) != 0) {
9630                         etest->flags |= ETH_TEST_FL_FAILED;
9631                         data[3] = 1;
9632                 }
9633                 if ((data[4] = tg3_test_loopback(tp)) != 0)
9634                         etest->flags |= ETH_TEST_FL_FAILED;
9635
9636                 tg3_full_unlock(tp);
9637
9638                 if (tg3_test_interrupt(tp) != 0) {
9639                         etest->flags |= ETH_TEST_FL_FAILED;
9640                         data[5] = 1;
9641                 }
9642
9643                 tg3_full_lock(tp, 0);
9644
9645                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9646                 if (netif_running(dev)) {
9647                         tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
9648                         if (!tg3_restart_hw(tp, 1))
9649                                 tg3_netif_start(tp);
9650                 }
9651
9652                 tg3_full_unlock(tp);
9653         }
9654         if (tp->link_config.phy_is_low_power)
9655                 tg3_set_power_state(tp, PCI_D3hot);
9656
9657 }
9658
9659 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
9660 {
9661         struct mii_ioctl_data *data = if_mii(ifr);
9662         struct tg3 *tp = netdev_priv(dev);
9663         int err;
9664
9665         switch(cmd) {
9666         case SIOCGMIIPHY:
9667                 data->phy_id = PHY_ADDR;
9668
9669                 /* fallthru */
9670         case SIOCGMIIREG: {
9671                 u32 mii_regval;
9672
9673                 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
9674                         break;                  /* We have no PHY */
9675
9676                 if (tp->link_config.phy_is_low_power)
9677                         return -EAGAIN;
9678
9679                 spin_lock_bh(&tp->lock);
9680                 err = tg3_readphy(tp, data->reg_num & 0x1f, &mii_regval);
9681                 spin_unlock_bh(&tp->lock);
9682
9683                 data->val_out = mii_regval;
9684
9685                 return err;
9686         }
9687
9688         case SIOCSMIIREG:
9689                 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
9690                         break;                  /* We have no PHY */
9691
9692                 if (!capable(CAP_NET_ADMIN))
9693                         return -EPERM;
9694
9695                 if (tp->link_config.phy_is_low_power)
9696                         return -EAGAIN;
9697
9698                 spin_lock_bh(&tp->lock);
9699                 err = tg3_writephy(tp, data->reg_num & 0x1f, data->val_in);
9700                 spin_unlock_bh(&tp->lock);
9701
9702                 return err;
9703
9704         default:
9705                 /* do nothing */
9706                 break;
9707         }
9708         return -EOPNOTSUPP;
9709 }
9710
9711 #if TG3_VLAN_TAG_USED
9712 static void tg3_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
9713 {
9714         struct tg3 *tp = netdev_priv(dev);
9715
9716         if (netif_running(dev))
9717                 tg3_netif_stop(tp);
9718
9719         tg3_full_lock(tp, 0);
9720
9721         tp->vlgrp = grp;
9722
9723         /* Update RX_MODE_KEEP_VLAN_TAG bit in RX_MODE register. */
9724         __tg3_set_rx_mode(dev);
9725
9726         if (netif_running(dev))
9727                 tg3_netif_start(tp);
9728
9729         tg3_full_unlock(tp);
9730 }
9731 #endif
9732
9733 static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
9734 {
9735         struct tg3 *tp = netdev_priv(dev);
9736
9737         memcpy(ec, &tp->coal, sizeof(*ec));
9738         return 0;
9739 }
9740
9741 static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
9742 {
9743         struct tg3 *tp = netdev_priv(dev);
9744         u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
9745         u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
9746
9747         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
9748                 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
9749                 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
9750                 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
9751                 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
9752         }
9753
9754         if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
9755             (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
9756             (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
9757             (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
9758             (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
9759             (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
9760             (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
9761             (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
9762             (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
9763             (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
9764                 return -EINVAL;
9765
9766         /* No rx interrupts will be generated if both are zero */
9767         if ((ec->rx_coalesce_usecs == 0) &&
9768             (ec->rx_max_coalesced_frames == 0))
9769                 return -EINVAL;
9770
9771         /* No tx interrupts will be generated if both are zero */
9772         if ((ec->tx_coalesce_usecs == 0) &&
9773             (ec->tx_max_coalesced_frames == 0))
9774                 return -EINVAL;
9775
9776         /* Only copy relevant parameters, ignore all others. */
9777         tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
9778         tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
9779         tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
9780         tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
9781         tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
9782         tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
9783         tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
9784         tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
9785         tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
9786
9787         if (netif_running(dev)) {
9788                 tg3_full_lock(tp, 0);
9789                 __tg3_set_coalesce(tp, &tp->coal);
9790                 tg3_full_unlock(tp);
9791         }
9792         return 0;
9793 }
9794
9795 static const struct ethtool_ops tg3_ethtool_ops = {
9796         .get_settings           = tg3_get_settings,
9797         .set_settings           = tg3_set_settings,
9798         .get_drvinfo            = tg3_get_drvinfo,
9799         .get_regs_len           = tg3_get_regs_len,
9800         .get_regs               = tg3_get_regs,
9801         .get_wol                = tg3_get_wol,
9802         .set_wol                = tg3_set_wol,
9803         .get_msglevel           = tg3_get_msglevel,
9804         .set_msglevel           = tg3_set_msglevel,
9805         .nway_reset             = tg3_nway_reset,
9806         .get_link               = ethtool_op_get_link,
9807         .get_eeprom_len         = tg3_get_eeprom_len,
9808         .get_eeprom             = tg3_get_eeprom,
9809         .set_eeprom             = tg3_set_eeprom,
9810         .get_ringparam          = tg3_get_ringparam,
9811         .set_ringparam          = tg3_set_ringparam,
9812         .get_pauseparam         = tg3_get_pauseparam,
9813         .set_pauseparam         = tg3_set_pauseparam,
9814         .get_rx_csum            = tg3_get_rx_csum,
9815         .set_rx_csum            = tg3_set_rx_csum,
9816         .set_tx_csum            = tg3_set_tx_csum,
9817         .set_sg                 = ethtool_op_set_sg,
9818         .set_tso                = tg3_set_tso,
9819         .self_test              = tg3_self_test,
9820         .get_strings            = tg3_get_strings,
9821         .phys_id                = tg3_phys_id,
9822         .get_ethtool_stats      = tg3_get_ethtool_stats,
9823         .get_coalesce           = tg3_get_coalesce,
9824         .set_coalesce           = tg3_set_coalesce,
9825         .get_sset_count         = tg3_get_sset_count,
9826 };
9827
9828 static void __devinit tg3_get_eeprom_size(struct tg3 *tp)
9829 {
9830         u32 cursize, val, magic;
9831
9832         tp->nvram_size = EEPROM_CHIP_SIZE;
9833
9834         if (tg3_nvram_read_swab(tp, 0, &magic) != 0)
9835                 return;
9836
9837         if ((magic != TG3_EEPROM_MAGIC) &&
9838             ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) &&
9839             ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW))
9840                 return;
9841
9842         /*
9843          * Size the chip by reading offsets at increasing powers of two.
9844          * When we encounter our validation signature, we know the addressing
9845          * has wrapped around, and thus have our chip size.
9846          */
9847         cursize = 0x10;
9848
9849         while (cursize < tp->nvram_size) {
9850                 if (tg3_nvram_read_swab(tp, cursize, &val) != 0)
9851                         return;
9852
9853                 if (val == magic)
9854                         break;
9855
9856                 cursize <<= 1;
9857         }
9858
9859         tp->nvram_size = cursize;
9860 }
9861
9862 static void __devinit tg3_get_nvram_size(struct tg3 *tp)
9863 {
9864         u32 val;
9865
9866         if (tg3_nvram_read_swab(tp, 0, &val) != 0)
9867                 return;
9868
9869         /* Selfboot format */
9870         if (val != TG3_EEPROM_MAGIC) {
9871                 tg3_get_eeprom_size(tp);
9872                 return;
9873         }
9874
9875         if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
9876                 if (val != 0) {
9877                         tp->nvram_size = (val >> 16) * 1024;
9878                         return;
9879                 }
9880         }
9881         tp->nvram_size = 0x80000;
9882 }
9883
9884 static void __devinit tg3_get_nvram_info(struct tg3 *tp)
9885 {
9886         u32 nvcfg1;
9887
9888         nvcfg1 = tr32(NVRAM_CFG1);
9889         if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
9890                 tp->tg3_flags2 |= TG3_FLG2_FLASH;
9891         }
9892         else {
9893                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
9894                 tw32(NVRAM_CFG1, nvcfg1);
9895         }
9896
9897         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) ||
9898             (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
9899                 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
9900                         case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
9901                                 tp->nvram_jedecnum = JEDEC_ATMEL;
9902                                 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
9903                                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9904                                 break;
9905                         case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
9906                                 tp->nvram_jedecnum = JEDEC_ATMEL;
9907                                 tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
9908                                 break;
9909                         case FLASH_VENDOR_ATMEL_EEPROM:
9910                                 tp->nvram_jedecnum = JEDEC_ATMEL;
9911                                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
9912                                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9913                                 break;
9914                         case FLASH_VENDOR_ST:
9915                                 tp->nvram_jedecnum = JEDEC_ST;
9916                                 tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
9917                                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9918                                 break;
9919                         case FLASH_VENDOR_SAIFUN:
9920                                 tp->nvram_jedecnum = JEDEC_SAIFUN;
9921                                 tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
9922                                 break;
9923                         case FLASH_VENDOR_SST_SMALL:
9924                         case FLASH_VENDOR_SST_LARGE:
9925                                 tp->nvram_jedecnum = JEDEC_SST;
9926                                 tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
9927                                 break;
9928                 }
9929         }
9930         else {
9931                 tp->nvram_jedecnum = JEDEC_ATMEL;
9932                 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
9933                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9934         }
9935 }
9936
9937 static void __devinit tg3_get_5752_nvram_info(struct tg3 *tp)
9938 {
9939         u32 nvcfg1;
9940
9941         nvcfg1 = tr32(NVRAM_CFG1);
9942
9943         /* NVRAM protection for TPM */
9944         if (nvcfg1 & (1 << 27))
9945                 tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM;
9946
9947         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
9948                 case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
9949                 case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
9950                         tp->nvram_jedecnum = JEDEC_ATMEL;
9951                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9952                         break;
9953                 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
9954                         tp->nvram_jedecnum = JEDEC_ATMEL;
9955                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9956                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
9957                         break;
9958                 case FLASH_5752VENDOR_ST_M45PE10:
9959                 case FLASH_5752VENDOR_ST_M45PE20:
9960                 case FLASH_5752VENDOR_ST_M45PE40:
9961                         tp->nvram_jedecnum = JEDEC_ST;
9962                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9963                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
9964                         break;
9965         }
9966
9967         if (tp->tg3_flags2 & TG3_FLG2_FLASH) {
9968                 switch (nvcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
9969                         case FLASH_5752PAGE_SIZE_256:
9970                                 tp->nvram_pagesize = 256;
9971                                 break;
9972                         case FLASH_5752PAGE_SIZE_512:
9973                                 tp->nvram_pagesize = 512;
9974                                 break;
9975                         case FLASH_5752PAGE_SIZE_1K:
9976                                 tp->nvram_pagesize = 1024;
9977                                 break;
9978                         case FLASH_5752PAGE_SIZE_2K:
9979                                 tp->nvram_pagesize = 2048;
9980                                 break;
9981                         case FLASH_5752PAGE_SIZE_4K:
9982                                 tp->nvram_pagesize = 4096;
9983                                 break;
9984                         case FLASH_5752PAGE_SIZE_264:
9985                                 tp->nvram_pagesize = 264;
9986                                 break;
9987                 }
9988         }
9989         else {
9990                 /* For eeprom, set pagesize to maximum eeprom size */
9991                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
9992
9993                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
9994                 tw32(NVRAM_CFG1, nvcfg1);
9995         }
9996 }
9997
9998 static void __devinit tg3_get_5755_nvram_info(struct tg3 *tp)
9999 {
10000         u32 nvcfg1, protect = 0;
10001
10002         nvcfg1 = tr32(NVRAM_CFG1);
10003
10004         /* NVRAM protection for TPM */
10005         if (nvcfg1 & (1 << 27)) {
10006                 tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM;
10007                 protect = 1;
10008         }
10009
10010         nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
10011         switch (nvcfg1) {
10012                 case FLASH_5755VENDOR_ATMEL_FLASH_1:
10013                 case FLASH_5755VENDOR_ATMEL_FLASH_2:
10014                 case FLASH_5755VENDOR_ATMEL_FLASH_3:
10015                 case FLASH_5755VENDOR_ATMEL_FLASH_5:
10016                         tp->nvram_jedecnum = JEDEC_ATMEL;
10017                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10018                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
10019                         tp->nvram_pagesize = 264;
10020                         if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 ||
10021                             nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5)
10022                                 tp->nvram_size = (protect ? 0x3e200 : 0x80000);
10023                         else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2)
10024                                 tp->nvram_size = (protect ? 0x1f200 : 0x40000);
10025                         else
10026                                 tp->nvram_size = (protect ? 0x1f200 : 0x20000);
10027                         break;
10028                 case FLASH_5752VENDOR_ST_M45PE10:
10029                 case FLASH_5752VENDOR_ST_M45PE20:
10030                 case FLASH_5752VENDOR_ST_M45PE40:
10031                         tp->nvram_jedecnum = JEDEC_ST;
10032                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10033                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
10034                         tp->nvram_pagesize = 256;
10035                         if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10)
10036                                 tp->nvram_size = (protect ? 0x10000 : 0x20000);
10037                         else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20)
10038                                 tp->nvram_size = (protect ? 0x10000 : 0x40000);
10039                         else
10040                                 tp->nvram_size = (protect ? 0x20000 : 0x80000);
10041                         break;
10042         }
10043 }
10044
10045 static void __devinit tg3_get_5787_nvram_info(struct tg3 *tp)
10046 {
10047         u32 nvcfg1;
10048
10049         nvcfg1 = tr32(NVRAM_CFG1);
10050
10051         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
10052                 case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
10053                 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
10054                 case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
10055                 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
10056                         tp->nvram_jedecnum = JEDEC_ATMEL;
10057                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10058                         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
10059
10060                         nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
10061                         tw32(NVRAM_CFG1, nvcfg1);
10062                         break;
10063                 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
10064                 case FLASH_5755VENDOR_ATMEL_FLASH_1:
10065                 case FLASH_5755VENDOR_ATMEL_FLASH_2:
10066                 case FLASH_5755VENDOR_ATMEL_FLASH_3:
10067                         tp->nvram_jedecnum = JEDEC_ATMEL;
10068                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10069                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
10070                         tp->nvram_pagesize = 264;
10071                         break;
10072                 case FLASH_5752VENDOR_ST_M45PE10:
10073                 case FLASH_5752VENDOR_ST_M45PE20:
10074                 case FLASH_5752VENDOR_ST_M45PE40:
10075                         tp->nvram_jedecnum = JEDEC_ST;
10076                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10077                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
10078                         tp->nvram_pagesize = 256;
10079                         break;
10080         }
10081 }
10082
10083 static void __devinit tg3_get_5761_nvram_info(struct tg3 *tp)
10084 {
10085         u32 nvcfg1, protect = 0;
10086
10087         nvcfg1 = tr32(NVRAM_CFG1);
10088
10089         /* NVRAM protection for TPM */
10090         if (nvcfg1 & (1 << 27)) {
10091                 tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM;
10092                 protect = 1;
10093         }
10094
10095         nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
10096         switch (nvcfg1) {
10097                 case FLASH_5761VENDOR_ATMEL_ADB021D:
10098                 case FLASH_5761VENDOR_ATMEL_ADB041D:
10099                 case FLASH_5761VENDOR_ATMEL_ADB081D:
10100                 case FLASH_5761VENDOR_ATMEL_ADB161D:
10101                 case FLASH_5761VENDOR_ATMEL_MDB021D:
10102                 case FLASH_5761VENDOR_ATMEL_MDB041D:
10103                 case FLASH_5761VENDOR_ATMEL_MDB081D:
10104                 case FLASH_5761VENDOR_ATMEL_MDB161D:
10105                         tp->nvram_jedecnum = JEDEC_ATMEL;
10106                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10107                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
10108                         tp->tg3_flags3 |= TG3_FLG3_NO_NVRAM_ADDR_TRANS;
10109                         tp->nvram_pagesize = 256;
10110                         break;
10111                 case FLASH_5761VENDOR_ST_A_M45PE20:
10112                 case FLASH_5761VENDOR_ST_A_M45PE40:
10113                 case FLASH_5761VENDOR_ST_A_M45PE80:
10114                 case FLASH_5761VENDOR_ST_A_M45PE16:
10115                 case FLASH_5761VENDOR_ST_M_M45PE20:
10116                 case FLASH_5761VENDOR_ST_M_M45PE40:
10117                 case FLASH_5761VENDOR_ST_M_M45PE80:
10118                 case FLASH_5761VENDOR_ST_M_M45PE16:
10119                         tp->nvram_jedecnum = JEDEC_ST;
10120                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10121                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
10122                         tp->nvram_pagesize = 256;
10123                         break;
10124         }
10125
10126         if (protect) {
10127                 tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT);
10128         } else {
10129                 switch (nvcfg1) {
10130                         case FLASH_5761VENDOR_ATMEL_ADB161D:
10131                         case FLASH_5761VENDOR_ATMEL_MDB161D:
10132                         case FLASH_5761VENDOR_ST_A_M45PE16:
10133                         case FLASH_5761VENDOR_ST_M_M45PE16:
10134                                 tp->nvram_size = 0x100000;
10135                                 break;
10136                         case FLASH_5761VENDOR_ATMEL_ADB081D:
10137                         case FLASH_5761VENDOR_ATMEL_MDB081D:
10138                         case FLASH_5761VENDOR_ST_A_M45PE80:
10139                         case FLASH_5761VENDOR_ST_M_M45PE80:
10140                                 tp->nvram_size = 0x80000;
10141                                 break;
10142                         case FLASH_5761VENDOR_ATMEL_ADB041D:
10143                         case FLASH_5761VENDOR_ATMEL_MDB041D:
10144                         case FLASH_5761VENDOR_ST_A_M45PE40:
10145                         case FLASH_5761VENDOR_ST_M_M45PE40:
10146                                 tp->nvram_size = 0x40000;
10147                                 break;
10148                         case FLASH_5761VENDOR_ATMEL_ADB021D:
10149                         case FLASH_5761VENDOR_ATMEL_MDB021D:
10150                         case FLASH_5761VENDOR_ST_A_M45PE20:
10151                         case FLASH_5761VENDOR_ST_M_M45PE20:
10152                                 tp->nvram_size = 0x20000;
10153                                 break;
10154                 }
10155         }
10156 }
10157
10158 static void __devinit tg3_get_5906_nvram_info(struct tg3 *tp)
10159 {
10160         tp->nvram_jedecnum = JEDEC_ATMEL;
10161         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10162         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
10163 }
10164
10165 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
10166 static void __devinit tg3_nvram_init(struct tg3 *tp)
10167 {
10168         tw32_f(GRC_EEPROM_ADDR,
10169              (EEPROM_ADDR_FSM_RESET |
10170               (EEPROM_DEFAULT_CLOCK_PERIOD <<
10171                EEPROM_ADDR_CLKPERD_SHIFT)));
10172
10173         msleep(1);
10174
10175         /* Enable seeprom accesses. */
10176         tw32_f(GRC_LOCAL_CTRL,
10177              tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
10178         udelay(100);
10179
10180         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
10181             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
10182                 tp->tg3_flags |= TG3_FLAG_NVRAM;
10183
10184                 if (tg3_nvram_lock(tp)) {
10185                         printk(KERN_WARNING PFX "%s: Cannot get nvarm lock, "
10186                                "tg3_nvram_init failed.\n", tp->dev->name);
10187                         return;
10188                 }
10189                 tg3_enable_nvram_access(tp);
10190
10191                 tp->nvram_size = 0;
10192
10193                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
10194                         tg3_get_5752_nvram_info(tp);
10195                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
10196                         tg3_get_5755_nvram_info(tp);
10197                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
10198                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784)
10199                         tg3_get_5787_nvram_info(tp);
10200                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
10201                         tg3_get_5761_nvram_info(tp);
10202                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
10203                         tg3_get_5906_nvram_info(tp);
10204                 else
10205                         tg3_get_nvram_info(tp);
10206
10207                 if (tp->nvram_size == 0)
10208                         tg3_get_nvram_size(tp);
10209
10210                 tg3_disable_nvram_access(tp);
10211                 tg3_nvram_unlock(tp);
10212
10213         } else {
10214                 tp->tg3_flags &= ~(TG3_FLAG_NVRAM | TG3_FLAG_NVRAM_BUFFERED);
10215
10216                 tg3_get_eeprom_size(tp);
10217         }
10218 }
10219
10220 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
10221                                         u32 offset, u32 *val)
10222 {
10223         u32 tmp;
10224         int i;
10225
10226         if (offset > EEPROM_ADDR_ADDR_MASK ||
10227             (offset % 4) != 0)
10228                 return -EINVAL;
10229
10230         tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
10231                                         EEPROM_ADDR_DEVID_MASK |
10232                                         EEPROM_ADDR_READ);
10233         tw32(GRC_EEPROM_ADDR,
10234              tmp |
10235              (0 << EEPROM_ADDR_DEVID_SHIFT) |
10236              ((offset << EEPROM_ADDR_ADDR_SHIFT) &
10237               EEPROM_ADDR_ADDR_MASK) |
10238              EEPROM_ADDR_READ | EEPROM_ADDR_START);
10239
10240         for (i = 0; i < 1000; i++) {
10241                 tmp = tr32(GRC_EEPROM_ADDR);
10242
10243                 if (tmp & EEPROM_ADDR_COMPLETE)
10244                         break;
10245                 msleep(1);
10246         }
10247         if (!(tmp & EEPROM_ADDR_COMPLETE))
10248                 return -EBUSY;
10249
10250         *val = tr32(GRC_EEPROM_DATA);
10251         return 0;
10252 }
10253
10254 #define NVRAM_CMD_TIMEOUT 10000
10255
10256 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
10257 {
10258         int i;
10259
10260         tw32(NVRAM_CMD, nvram_cmd);
10261         for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
10262                 udelay(10);
10263                 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
10264                         udelay(10);
10265                         break;
10266                 }
10267         }
10268         if (i == NVRAM_CMD_TIMEOUT) {
10269                 return -EBUSY;
10270         }
10271         return 0;
10272 }
10273
10274 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
10275 {
10276         if ((tp->tg3_flags & TG3_FLAG_NVRAM) &&
10277             (tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) &&
10278             (tp->tg3_flags2 & TG3_FLG2_FLASH) &&
10279            !(tp->tg3_flags3 & TG3_FLG3_NO_NVRAM_ADDR_TRANS) &&
10280             (tp->nvram_jedecnum == JEDEC_ATMEL))
10281
10282                 addr = ((addr / tp->nvram_pagesize) <<
10283                         ATMEL_AT45DB0X1B_PAGE_POS) +
10284                        (addr % tp->nvram_pagesize);
10285
10286         return addr;
10287 }
10288
10289 static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
10290 {
10291         if ((tp->tg3_flags & TG3_FLAG_NVRAM) &&
10292             (tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) &&
10293             (tp->tg3_flags2 & TG3_FLG2_FLASH) &&
10294            !(tp->tg3_flags3 & TG3_FLG3_NO_NVRAM_ADDR_TRANS) &&
10295             (tp->nvram_jedecnum == JEDEC_ATMEL))
10296
10297                 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
10298                         tp->nvram_pagesize) +
10299                        (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
10300
10301         return addr;
10302 }
10303
10304 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
10305 {
10306         int ret;
10307
10308         if (!(tp->tg3_flags & TG3_FLAG_NVRAM))
10309                 return tg3_nvram_read_using_eeprom(tp, offset, val);
10310
10311         offset = tg3_nvram_phys_addr(tp, offset);
10312
10313         if (offset > NVRAM_ADDR_MSK)
10314                 return -EINVAL;
10315
10316         ret = tg3_nvram_lock(tp);
10317         if (ret)
10318                 return ret;
10319
10320         tg3_enable_nvram_access(tp);
10321
10322         tw32(NVRAM_ADDR, offset);
10323         ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
10324                 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
10325
10326         if (ret == 0)
10327                 *val = swab32(tr32(NVRAM_RDDATA));
10328
10329         tg3_disable_nvram_access(tp);
10330
10331         tg3_nvram_unlock(tp);
10332
10333         return ret;
10334 }
10335
10336 static int tg3_nvram_read_le(struct tg3 *tp, u32 offset, __le32 *val)
10337 {
10338         u32 v;
10339         int res = tg3_nvram_read(tp, offset, &v);
10340         if (!res)
10341                 *val = cpu_to_le32(v);
10342         return res;
10343 }
10344
10345 static int tg3_nvram_read_swab(struct tg3 *tp, u32 offset, u32 *val)
10346 {
10347         int err;
10348         u32 tmp;
10349
10350         err = tg3_nvram_read(tp, offset, &tmp);
10351         *val = swab32(tmp);
10352         return err;
10353 }
10354
10355 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
10356                                     u32 offset, u32 len, u8 *buf)
10357 {
10358         int i, j, rc = 0;
10359         u32 val;
10360
10361         for (i = 0; i < len; i += 4) {
10362                 u32 addr;
10363                 __le32 data;
10364
10365                 addr = offset + i;
10366
10367                 memcpy(&data, buf + i, 4);
10368
10369                 tw32(GRC_EEPROM_DATA, le32_to_cpu(data));
10370
10371                 val = tr32(GRC_EEPROM_ADDR);
10372                 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
10373
10374                 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
10375                         EEPROM_ADDR_READ);
10376                 tw32(GRC_EEPROM_ADDR, val |
10377                         (0 << EEPROM_ADDR_DEVID_SHIFT) |
10378                         (addr & EEPROM_ADDR_ADDR_MASK) |
10379                         EEPROM_ADDR_START |
10380                         EEPROM_ADDR_WRITE);
10381
10382                 for (j = 0; j < 1000; j++) {
10383                         val = tr32(GRC_EEPROM_ADDR);
10384
10385                         if (val & EEPROM_ADDR_COMPLETE)
10386                                 break;
10387                         msleep(1);
10388                 }
10389                 if (!(val & EEPROM_ADDR_COMPLETE)) {
10390                         rc = -EBUSY;
10391                         break;
10392                 }
10393         }
10394
10395         return rc;
10396 }
10397
10398 /* offset and length are dword aligned */
10399 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
10400                 u8 *buf)
10401 {
10402         int ret = 0;
10403         u32 pagesize = tp->nvram_pagesize;
10404         u32 pagemask = pagesize - 1;
10405         u32 nvram_cmd;
10406         u8 *tmp;
10407
10408         tmp = kmalloc(pagesize, GFP_KERNEL);
10409         if (tmp == NULL)
10410                 return -ENOMEM;
10411
10412         while (len) {
10413                 int j;
10414                 u32 phy_addr, page_off, size;
10415
10416                 phy_addr = offset & ~pagemask;
10417
10418                 for (j = 0; j < pagesize; j += 4) {
10419                         if ((ret = tg3_nvram_read_le(tp, phy_addr + j,
10420                                                 (__le32 *) (tmp + j))))
10421                                 break;
10422                 }
10423                 if (ret)
10424                         break;
10425
10426                 page_off = offset & pagemask;
10427                 size = pagesize;
10428                 if (len < size)
10429                         size = len;
10430
10431                 len -= size;
10432
10433                 memcpy(tmp + page_off, buf, size);
10434
10435                 offset = offset + (pagesize - page_off);
10436
10437                 tg3_enable_nvram_access(tp);
10438
10439                 /*
10440                  * Before we can erase the flash page, we need
10441                  * to issue a special "write enable" command.
10442                  */
10443                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
10444
10445                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
10446                         break;
10447
10448                 /* Erase the target page */
10449                 tw32(NVRAM_ADDR, phy_addr);
10450
10451                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
10452                         NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
10453
10454                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
10455                         break;
10456
10457                 /* Issue another write enable to start the write. */
10458                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
10459
10460                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
10461                         break;
10462
10463                 for (j = 0; j < pagesize; j += 4) {
10464                         __be32 data;
10465
10466                         data = *((__be32 *) (tmp + j));
10467                         /* swab32(le32_to_cpu(data)), actually */
10468                         tw32(NVRAM_WRDATA, be32_to_cpu(data));
10469
10470                         tw32(NVRAM_ADDR, phy_addr + j);
10471
10472                         nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
10473                                 NVRAM_CMD_WR;
10474
10475                         if (j == 0)
10476                                 nvram_cmd |= NVRAM_CMD_FIRST;
10477                         else if (j == (pagesize - 4))
10478                                 nvram_cmd |= NVRAM_CMD_LAST;
10479
10480                         if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
10481                                 break;
10482                 }
10483                 if (ret)
10484                         break;
10485         }
10486
10487         nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
10488         tg3_nvram_exec_cmd(tp, nvram_cmd);
10489
10490         kfree(tmp);
10491
10492         return ret;
10493 }
10494
10495 /* offset and length are dword aligned */
10496 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
10497                 u8 *buf)
10498 {
10499         int i, ret = 0;
10500
10501         for (i = 0; i < len; i += 4, offset += 4) {
10502                 u32 page_off, phy_addr, nvram_cmd;
10503                 __be32 data;
10504
10505                 memcpy(&data, buf + i, 4);
10506                 tw32(NVRAM_WRDATA, be32_to_cpu(data));
10507
10508                 page_off = offset % tp->nvram_pagesize;
10509
10510                 phy_addr = tg3_nvram_phys_addr(tp, offset);
10511
10512                 tw32(NVRAM_ADDR, phy_addr);
10513
10514                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
10515
10516                 if ((page_off == 0) || (i == 0))
10517                         nvram_cmd |= NVRAM_CMD_FIRST;
10518                 if (page_off == (tp->nvram_pagesize - 4))
10519                         nvram_cmd |= NVRAM_CMD_LAST;
10520
10521                 if (i == (len - 4))
10522                         nvram_cmd |= NVRAM_CMD_LAST;
10523
10524                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752) &&
10525                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5755) &&
10526                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5787) &&
10527                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784) &&
10528                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761) &&
10529                     (tp->nvram_jedecnum == JEDEC_ST) &&
10530                     (nvram_cmd & NVRAM_CMD_FIRST)) {
10531
10532                         if ((ret = tg3_nvram_exec_cmd(tp,
10533                                 NVRAM_CMD_WREN | NVRAM_CMD_GO |
10534                                 NVRAM_CMD_DONE)))
10535
10536                                 break;
10537                 }
10538                 if (!(tp->tg3_flags2 & TG3_FLG2_FLASH)) {
10539                         /* We always do complete word writes to eeprom. */
10540                         nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
10541                 }
10542
10543                 if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
10544                         break;
10545         }
10546         return ret;
10547 }
10548
10549 /* offset and length are dword aligned */
10550 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
10551 {
10552         int ret;
10553
10554         if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
10555                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
10556                        ~GRC_LCLCTRL_GPIO_OUTPUT1);
10557                 udelay(40);
10558         }
10559
10560         if (!(tp->tg3_flags & TG3_FLAG_NVRAM)) {
10561                 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
10562         }
10563         else {
10564                 u32 grc_mode;
10565
10566                 ret = tg3_nvram_lock(tp);
10567                 if (ret)
10568                         return ret;
10569
10570                 tg3_enable_nvram_access(tp);
10571                 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
10572                     !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM))
10573                         tw32(NVRAM_WRITE1, 0x406);
10574
10575                 grc_mode = tr32(GRC_MODE);
10576                 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
10577
10578                 if ((tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) ||
10579                         !(tp->tg3_flags2 & TG3_FLG2_FLASH)) {
10580
10581                         ret = tg3_nvram_write_block_buffered(tp, offset, len,
10582                                 buf);
10583                 }
10584                 else {
10585                         ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
10586                                 buf);
10587                 }
10588
10589                 grc_mode = tr32(GRC_MODE);
10590                 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
10591
10592                 tg3_disable_nvram_access(tp);
10593                 tg3_nvram_unlock(tp);
10594         }
10595
10596         if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
10597                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
10598                 udelay(40);
10599         }
10600
10601         return ret;
10602 }
10603
10604 struct subsys_tbl_ent {
10605         u16 subsys_vendor, subsys_devid;
10606         u32 phy_id;
10607 };
10608
10609 static struct subsys_tbl_ent subsys_id_to_phy_id[] = {
10610         /* Broadcom boards. */
10611         { PCI_VENDOR_ID_BROADCOM, 0x1644, PHY_ID_BCM5401 }, /* BCM95700A6 */
10612         { PCI_VENDOR_ID_BROADCOM, 0x0001, PHY_ID_BCM5701 }, /* BCM95701A5 */
10613         { PCI_VENDOR_ID_BROADCOM, 0x0002, PHY_ID_BCM8002 }, /* BCM95700T6 */
10614         { PCI_VENDOR_ID_BROADCOM, 0x0003, 0 },              /* BCM95700A9 */
10615         { PCI_VENDOR_ID_BROADCOM, 0x0005, PHY_ID_BCM5701 }, /* BCM95701T1 */
10616         { PCI_VENDOR_ID_BROADCOM, 0x0006, PHY_ID_BCM5701 }, /* BCM95701T8 */
10617         { PCI_VENDOR_ID_BROADCOM, 0x0007, 0 },              /* BCM95701A7 */
10618         { PCI_VENDOR_ID_BROADCOM, 0x0008, PHY_ID_BCM5701 }, /* BCM95701A10 */
10619         { PCI_VENDOR_ID_BROADCOM, 0x8008, PHY_ID_BCM5701 }, /* BCM95701A12 */
10620         { PCI_VENDOR_ID_BROADCOM, 0x0009, PHY_ID_BCM5703 }, /* BCM95703Ax1 */
10621         { PCI_VENDOR_ID_BROADCOM, 0x8009, PHY_ID_BCM5703 }, /* BCM95703Ax2 */
10622
10623         /* 3com boards. */
10624         { PCI_VENDOR_ID_3COM, 0x1000, PHY_ID_BCM5401 }, /* 3C996T */
10625         { PCI_VENDOR_ID_3COM, 0x1006, PHY_ID_BCM5701 }, /* 3C996BT */
10626         { PCI_VENDOR_ID_3COM, 0x1004, 0 },              /* 3C996SX */
10627         { PCI_VENDOR_ID_3COM, 0x1007, PHY_ID_BCM5701 }, /* 3C1000T */
10628         { PCI_VENDOR_ID_3COM, 0x1008, PHY_ID_BCM5701 }, /* 3C940BR01 */
10629
10630         /* DELL boards. */
10631         { PCI_VENDOR_ID_DELL, 0x00d1, PHY_ID_BCM5401 }, /* VIPER */
10632         { PCI_VENDOR_ID_DELL, 0x0106, PHY_ID_BCM5401 }, /* JAGUAR */
10633         { PCI_VENDOR_ID_DELL, 0x0109, PHY_ID_BCM5411 }, /* MERLOT */
10634         { PCI_VENDOR_ID_DELL, 0x010a, PHY_ID_BCM5411 }, /* SLIM_MERLOT */
10635
10636         /* Compaq boards. */
10637         { PCI_VENDOR_ID_COMPAQ, 0x007c, PHY_ID_BCM5701 }, /* BANSHEE */
10638         { PCI_VENDOR_ID_COMPAQ, 0x009a, PHY_ID_BCM5701 }, /* BANSHEE_2 */
10639         { PCI_VENDOR_ID_COMPAQ, 0x007d, 0 },              /* CHANGELING */
10640         { PCI_VENDOR_ID_COMPAQ, 0x0085, PHY_ID_BCM5701 }, /* NC7780 */
10641         { PCI_VENDOR_ID_COMPAQ, 0x0099, PHY_ID_BCM5701 }, /* NC7780_2 */
10642
10643         /* IBM boards. */
10644         { PCI_VENDOR_ID_IBM, 0x0281, 0 } /* IBM??? */
10645 };
10646
10647 static inline struct subsys_tbl_ent *lookup_by_subsys(struct tg3 *tp)
10648 {
10649         int i;
10650
10651         for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
10652                 if ((subsys_id_to_phy_id[i].subsys_vendor ==
10653                      tp->pdev->subsystem_vendor) &&
10654                     (subsys_id_to_phy_id[i].subsys_devid ==
10655                      tp->pdev->subsystem_device))
10656                         return &subsys_id_to_phy_id[i];
10657         }
10658         return NULL;
10659 }
10660
10661 static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
10662 {
10663         u32 val;
10664         u16 pmcsr;
10665
10666         /* On some early chips the SRAM cannot be accessed in D3hot state,
10667          * so need make sure we're in D0.
10668          */
10669         pci_read_config_word(tp->pdev, tp->pm_cap + PCI_PM_CTRL, &pmcsr);
10670         pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
10671         pci_write_config_word(tp->pdev, tp->pm_cap + PCI_PM_CTRL, pmcsr);
10672         msleep(1);
10673
10674         /* Make sure register accesses (indirect or otherwise)
10675          * will function correctly.
10676          */
10677         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
10678                                tp->misc_host_ctrl);
10679
10680         /* The memory arbiter has to be enabled in order for SRAM accesses
10681          * to succeed.  Normally on powerup the tg3 chip firmware will make
10682          * sure it is enabled, but other entities such as system netboot
10683          * code might disable it.
10684          */
10685         val = tr32(MEMARB_MODE);
10686         tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
10687
10688         tp->phy_id = PHY_ID_INVALID;
10689         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
10690
10691         /* Assume an onboard device and WOL capable by default.  */
10692         tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT | TG3_FLAG_WOL_CAP;
10693
10694         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
10695                 if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
10696                         tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT;
10697                         tp->tg3_flags2 |= TG3_FLG2_IS_NIC;
10698                 }
10699                 val = tr32(VCPU_CFGSHDW);
10700                 if (val & VCPU_CFGSHDW_ASPM_DBNC)
10701                         tp->tg3_flags |= TG3_FLAG_ASPM_WORKAROUND;
10702                 if ((val & VCPU_CFGSHDW_WOL_ENABLE) &&
10703                     (val & VCPU_CFGSHDW_WOL_MAGPKT))
10704                         tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
10705                 return;
10706         }
10707
10708         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
10709         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
10710                 u32 nic_cfg, led_cfg;
10711                 u32 nic_phy_id, ver, cfg2 = 0, eeprom_phy_id;
10712                 int eeprom_phy_serdes = 0;
10713
10714                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
10715                 tp->nic_sram_data_cfg = nic_cfg;
10716
10717                 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
10718                 ver >>= NIC_SRAM_DATA_VER_SHIFT;
10719                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700) &&
10720                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) &&
10721                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5703) &&
10722                     (ver > 0) && (ver < 0x100))
10723                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
10724
10725                 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
10726                     NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
10727                         eeprom_phy_serdes = 1;
10728
10729                 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
10730                 if (nic_phy_id != 0) {
10731                         u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
10732                         u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
10733
10734                         eeprom_phy_id  = (id1 >> 16) << 10;
10735                         eeprom_phy_id |= (id2 & 0xfc00) << 16;
10736                         eeprom_phy_id |= (id2 & 0x03ff) <<  0;
10737                 } else
10738                         eeprom_phy_id = 0;
10739
10740                 tp->phy_id = eeprom_phy_id;
10741                 if (eeprom_phy_serdes) {
10742                         if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
10743                                 tp->tg3_flags2 |= TG3_FLG2_MII_SERDES;
10744                         else
10745                                 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
10746                 }
10747
10748                 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
10749                         led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
10750                                     SHASTA_EXT_LED_MODE_MASK);
10751                 else
10752                         led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
10753
10754                 switch (led_cfg) {
10755                 default:
10756                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
10757                         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
10758                         break;
10759
10760                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
10761                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
10762                         break;
10763
10764                 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
10765                         tp->led_ctrl = LED_CTRL_MODE_MAC;
10766
10767                         /* Default to PHY_1_MODE if 0 (MAC_MODE) is
10768                          * read on some older 5700/5701 bootcode.
10769                          */
10770                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
10771                             ASIC_REV_5700 ||
10772                             GET_ASIC_REV(tp->pci_chip_rev_id) ==
10773                             ASIC_REV_5701)
10774                                 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
10775
10776                         break;
10777
10778                 case SHASTA_EXT_LED_SHARED:
10779                         tp->led_ctrl = LED_CTRL_MODE_SHARED;
10780                         if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
10781                             tp->pci_chip_rev_id != CHIPREV_ID_5750_A1)
10782                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
10783                                                  LED_CTRL_MODE_PHY_2);
10784                         break;
10785
10786                 case SHASTA_EXT_LED_MAC:
10787                         tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
10788                         break;
10789
10790                 case SHASTA_EXT_LED_COMBO:
10791                         tp->led_ctrl = LED_CTRL_MODE_COMBO;
10792                         if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0)
10793                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
10794                                                  LED_CTRL_MODE_PHY_2);
10795                         break;
10796
10797                 };
10798
10799                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
10800                      GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) &&
10801                     tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
10802                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
10803
10804                 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX)
10805                         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
10806
10807                 if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
10808                         tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT;
10809                         if ((tp->pdev->subsystem_vendor ==
10810                              PCI_VENDOR_ID_ARIMA) &&
10811                             (tp->pdev->subsystem_device == 0x205a ||
10812                              tp->pdev->subsystem_device == 0x2063))
10813                                 tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT;
10814                 } else {
10815                         tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT;
10816                         tp->tg3_flags2 |= TG3_FLG2_IS_NIC;
10817                 }
10818
10819                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
10820                         tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
10821                         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
10822                                 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
10823                 }
10824                 if (nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE)
10825                         tp->tg3_flags3 |= TG3_FLG3_ENABLE_APE;
10826                 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES &&
10827                     !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL))
10828                         tp->tg3_flags &= ~TG3_FLAG_WOL_CAP;
10829
10830                 if (tp->tg3_flags & TG3_FLAG_WOL_CAP &&
10831                     nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE)
10832                         tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
10833
10834                 if (cfg2 & (1 << 17))
10835                         tp->tg3_flags2 |= TG3_FLG2_CAPACITIVE_COUPLING;
10836
10837                 /* serdes signal pre-emphasis in register 0x590 set by */
10838                 /* bootcode if bit 18 is set */
10839                 if (cfg2 & (1 << 18))
10840                         tp->tg3_flags2 |= TG3_FLG2_SERDES_PREEMPHASIS;
10841
10842                 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
10843                         u32 cfg3;
10844
10845                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
10846                         if (cfg3 & NIC_SRAM_ASPM_DEBOUNCE)
10847                                 tp->tg3_flags |= TG3_FLAG_ASPM_WORKAROUND;
10848                 }
10849         }
10850 }
10851
10852 static int __devinit tg3_issue_otp_command(struct tg3 *tp, u32 cmd)
10853 {
10854         int i;
10855         u32 val;
10856
10857         tw32(OTP_CTRL, cmd | OTP_CTRL_OTP_CMD_START);
10858         tw32(OTP_CTRL, cmd);
10859
10860         /* Wait for up to 1 ms for command to execute. */
10861         for (i = 0; i < 100; i++) {
10862                 val = tr32(OTP_STATUS);
10863                 if (val & OTP_STATUS_CMD_DONE)
10864                         break;
10865                 udelay(10);
10866         }
10867
10868         return (val & OTP_STATUS_CMD_DONE) ? 0 : -EBUSY;
10869 }
10870
10871 /* Read the gphy configuration from the OTP region of the chip.  The gphy
10872  * configuration is a 32-bit value that straddles the alignment boundary.
10873  * We do two 32-bit reads and then shift and merge the results.
10874  */
10875 static u32 __devinit tg3_read_otp_phycfg(struct tg3 *tp)
10876 {
10877         u32 bhalf_otp, thalf_otp;
10878
10879         tw32(OTP_MODE, OTP_MODE_OTP_THRU_GRC);
10880
10881         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_INIT))
10882                 return 0;
10883
10884         tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC1);
10885
10886         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
10887                 return 0;
10888
10889         thalf_otp = tr32(OTP_READ_DATA);
10890
10891         tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC2);
10892
10893         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
10894                 return 0;
10895
10896         bhalf_otp = tr32(OTP_READ_DATA);
10897
10898         return ((thalf_otp & 0x0000ffff) << 16) | (bhalf_otp >> 16);
10899 }
10900
10901 static int __devinit tg3_phy_probe(struct tg3 *tp)
10902 {
10903         u32 hw_phy_id_1, hw_phy_id_2;
10904         u32 hw_phy_id, hw_phy_id_masked;
10905         int err;
10906
10907         /* Reading the PHY ID register can conflict with ASF
10908          * firwmare access to the PHY hardware.
10909          */
10910         err = 0;
10911         if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) ||
10912             (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)) {
10913                 hw_phy_id = hw_phy_id_masked = PHY_ID_INVALID;
10914         } else {
10915                 /* Now read the physical PHY_ID from the chip and verify
10916                  * that it is sane.  If it doesn't look good, we fall back
10917                  * to either the hard-coded table based PHY_ID and failing
10918                  * that the value found in the eeprom area.
10919                  */
10920                 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
10921                 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
10922
10923                 hw_phy_id  = (hw_phy_id_1 & 0xffff) << 10;
10924                 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
10925                 hw_phy_id |= (hw_phy_id_2 & 0x03ff) <<  0;
10926
10927                 hw_phy_id_masked = hw_phy_id & PHY_ID_MASK;
10928         }
10929
10930         if (!err && KNOWN_PHY_ID(hw_phy_id_masked)) {
10931                 tp->phy_id = hw_phy_id;
10932                 if (hw_phy_id_masked == PHY_ID_BCM8002)
10933                         tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
10934                 else
10935                         tp->tg3_flags2 &= ~TG3_FLG2_PHY_SERDES;
10936         } else {
10937                 if (tp->phy_id != PHY_ID_INVALID) {
10938                         /* Do nothing, phy ID already set up in
10939                          * tg3_get_eeprom_hw_cfg().
10940                          */
10941                 } else {
10942                         struct subsys_tbl_ent *p;
10943
10944                         /* No eeprom signature?  Try the hardcoded
10945                          * subsys device table.
10946                          */
10947                         p = lookup_by_subsys(tp);
10948                         if (!p)
10949                                 return -ENODEV;
10950
10951                         tp->phy_id = p->phy_id;
10952                         if (!tp->phy_id ||
10953                             tp->phy_id == PHY_ID_BCM8002)
10954                                 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
10955                 }
10956         }
10957
10958         if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) &&
10959             !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) &&
10960             !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
10961                 u32 bmsr, adv_reg, tg3_ctrl, mask;
10962
10963                 tg3_readphy(tp, MII_BMSR, &bmsr);
10964                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
10965                     (bmsr & BMSR_LSTATUS))
10966                         goto skip_phy_reset;
10967
10968                 err = tg3_phy_reset(tp);
10969                 if (err)
10970                         return err;
10971
10972                 adv_reg = (ADVERTISE_10HALF | ADVERTISE_10FULL |
10973                            ADVERTISE_100HALF | ADVERTISE_100FULL |
10974                            ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
10975                 tg3_ctrl = 0;
10976                 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
10977                         tg3_ctrl = (MII_TG3_CTRL_ADV_1000_HALF |
10978                                     MII_TG3_CTRL_ADV_1000_FULL);
10979                         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
10980                             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
10981                                 tg3_ctrl |= (MII_TG3_CTRL_AS_MASTER |
10982                                              MII_TG3_CTRL_ENABLE_AS_MASTER);
10983                 }
10984
10985                 mask = (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
10986                         ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
10987                         ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full);
10988                 if (!tg3_copper_is_advertising_all(tp, mask)) {
10989                         tg3_writephy(tp, MII_ADVERTISE, adv_reg);
10990
10991                         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
10992                                 tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
10993
10994                         tg3_writephy(tp, MII_BMCR,
10995                                      BMCR_ANENABLE | BMCR_ANRESTART);
10996                 }
10997                 tg3_phy_set_wirespeed(tp);
10998
10999                 tg3_writephy(tp, MII_ADVERTISE, adv_reg);
11000                 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
11001                         tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
11002         }
11003
11004 skip_phy_reset:
11005         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
11006                 err = tg3_init_5401phy_dsp(tp);
11007                 if (err)
11008                         return err;
11009         }
11010
11011         if (!err && ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401)) {
11012                 err = tg3_init_5401phy_dsp(tp);
11013         }
11014
11015         if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
11016                 tp->link_config.advertising =
11017                         (ADVERTISED_1000baseT_Half |
11018                          ADVERTISED_1000baseT_Full |
11019                          ADVERTISED_Autoneg |
11020                          ADVERTISED_FIBRE);
11021         if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
11022                 tp->link_config.advertising &=
11023                         ~(ADVERTISED_1000baseT_Half |
11024                           ADVERTISED_1000baseT_Full);
11025
11026         return err;
11027 }
11028
11029 static void __devinit tg3_read_partno(struct tg3 *tp)
11030 {
11031         unsigned char vpd_data[256];
11032         unsigned int i;
11033         u32 magic;
11034
11035         if (tg3_nvram_read_swab(tp, 0x0, &magic))
11036                 goto out_not_found;
11037
11038         if (magic == TG3_EEPROM_MAGIC) {
11039                 for (i = 0; i < 256; i += 4) {
11040                         u32 tmp;
11041
11042                         if (tg3_nvram_read(tp, 0x100 + i, &tmp))
11043                                 goto out_not_found;
11044
11045                         vpd_data[i + 0] = ((tmp >>  0) & 0xff);
11046                         vpd_data[i + 1] = ((tmp >>  8) & 0xff);
11047                         vpd_data[i + 2] = ((tmp >> 16) & 0xff);
11048                         vpd_data[i + 3] = ((tmp >> 24) & 0xff);
11049                 }
11050         } else {
11051                 int vpd_cap;
11052
11053                 vpd_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_VPD);
11054                 for (i = 0; i < 256; i += 4) {
11055                         u32 tmp, j = 0;
11056                         __le32 v;
11057                         u16 tmp16;
11058
11059                         pci_write_config_word(tp->pdev, vpd_cap + PCI_VPD_ADDR,
11060                                               i);
11061                         while (j++ < 100) {
11062                                 pci_read_config_word(tp->pdev, vpd_cap +
11063                                                      PCI_VPD_ADDR, &tmp16);
11064                                 if (tmp16 & 0x8000)
11065                                         break;
11066                                 msleep(1);
11067                         }
11068                         if (!(tmp16 & 0x8000))
11069                                 goto out_not_found;
11070
11071                         pci_read_config_dword(tp->pdev, vpd_cap + PCI_VPD_DATA,
11072                                               &tmp);
11073                         v = cpu_to_le32(tmp);
11074                         memcpy(&vpd_data[i], &v, 4);
11075                 }
11076         }
11077
11078         /* Now parse and find the part number. */
11079         for (i = 0; i < 254; ) {
11080                 unsigned char val = vpd_data[i];
11081                 unsigned int block_end;
11082
11083                 if (val == 0x82 || val == 0x91) {
11084                         i = (i + 3 +
11085                              (vpd_data[i + 1] +
11086                               (vpd_data[i + 2] << 8)));
11087                         continue;
11088                 }
11089
11090                 if (val != 0x90)
11091                         goto out_not_found;
11092
11093                 block_end = (i + 3 +
11094                              (vpd_data[i + 1] +
11095                               (vpd_data[i + 2] << 8)));
11096                 i += 3;
11097
11098                 if (block_end > 256)
11099                         goto out_not_found;
11100
11101                 while (i < (block_end - 2)) {
11102                         if (vpd_data[i + 0] == 'P' &&
11103                             vpd_data[i + 1] == 'N') {
11104                                 int partno_len = vpd_data[i + 2];
11105
11106                                 i += 3;
11107                                 if (partno_len > 24 || (partno_len + i) > 256)
11108                                         goto out_not_found;
11109
11110                                 memcpy(tp->board_part_number,
11111                                        &vpd_data[i], partno_len);
11112
11113                                 /* Success. */
11114                                 return;
11115                         }
11116                         i += 3 + vpd_data[i + 2];
11117                 }
11118
11119                 /* Part number not found. */
11120                 goto out_not_found;
11121         }
11122
11123 out_not_found:
11124         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
11125                 strcpy(tp->board_part_number, "BCM95906");
11126         else
11127                 strcpy(tp->board_part_number, "none");
11128 }
11129
11130 static int __devinit tg3_fw_img_is_valid(struct tg3 *tp, u32 offset)
11131 {
11132         u32 val;
11133
11134         if (tg3_nvram_read_swab(tp, offset, &val) ||
11135             (val & 0xfc000000) != 0x0c000000 ||
11136             tg3_nvram_read_swab(tp, offset + 4, &val) ||
11137             val != 0)
11138                 return 0;
11139
11140         return 1;
11141 }
11142
11143 static void __devinit tg3_read_fw_ver(struct tg3 *tp)
11144 {
11145         u32 val, offset, start;
11146         u32 ver_offset;
11147         int i, bcnt;
11148
11149         if (tg3_nvram_read_swab(tp, 0, &val))
11150                 return;
11151
11152         if (val != TG3_EEPROM_MAGIC)
11153                 return;
11154
11155         if (tg3_nvram_read_swab(tp, 0xc, &offset) ||
11156             tg3_nvram_read_swab(tp, 0x4, &start))
11157                 return;
11158
11159         offset = tg3_nvram_logical_addr(tp, offset);
11160
11161         if (!tg3_fw_img_is_valid(tp, offset) ||
11162             tg3_nvram_read_swab(tp, offset + 8, &ver_offset))
11163                 return;
11164
11165         offset = offset + ver_offset - start;
11166         for (i = 0; i < 16; i += 4) {
11167                 __le32 v;
11168                 if (tg3_nvram_read_le(tp, offset + i, &v))
11169                         return;
11170
11171                 memcpy(tp->fw_ver + i, &v, 4);
11172         }
11173
11174         if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF) ||
11175              (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
11176                 return;
11177
11178         for (offset = TG3_NVM_DIR_START;
11179              offset < TG3_NVM_DIR_END;
11180              offset += TG3_NVM_DIRENT_SIZE) {
11181                 if (tg3_nvram_read_swab(tp, offset, &val))
11182                         return;
11183
11184                 if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI)
11185                         break;
11186         }
11187
11188         if (offset == TG3_NVM_DIR_END)
11189                 return;
11190
11191         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
11192                 start = 0x08000000;
11193         else if (tg3_nvram_read_swab(tp, offset - 4, &start))
11194                 return;
11195
11196         if (tg3_nvram_read_swab(tp, offset + 4, &offset) ||
11197             !tg3_fw_img_is_valid(tp, offset) ||
11198             tg3_nvram_read_swab(tp, offset + 8, &val))
11199                 return;
11200
11201         offset += val - start;
11202
11203         bcnt = strlen(tp->fw_ver);
11204
11205         tp->fw_ver[bcnt++] = ',';
11206         tp->fw_ver[bcnt++] = ' ';
11207
11208         for (i = 0; i < 4; i++) {
11209                 __le32 v;
11210                 if (tg3_nvram_read_le(tp, offset, &v))
11211                         return;
11212
11213                 offset += sizeof(v);
11214
11215                 if (bcnt > TG3_VER_SIZE - sizeof(v)) {
11216                         memcpy(&tp->fw_ver[bcnt], &v, TG3_VER_SIZE - bcnt);
11217                         break;
11218                 }
11219
11220                 memcpy(&tp->fw_ver[bcnt], &v, sizeof(v));
11221                 bcnt += sizeof(v);
11222         }
11223
11224         tp->fw_ver[TG3_VER_SIZE - 1] = 0;
11225 }
11226
11227 static struct pci_dev * __devinit tg3_find_peer(struct tg3 *);
11228
11229 static int __devinit tg3_get_invariants(struct tg3 *tp)
11230 {
11231         static struct pci_device_id write_reorder_chipsets[] = {
11232                 { PCI_DEVICE(PCI_VENDOR_ID_AMD,
11233                              PCI_DEVICE_ID_AMD_FE_GATE_700C) },
11234                 { PCI_DEVICE(PCI_VENDOR_ID_AMD,
11235                              PCI_DEVICE_ID_AMD_8131_BRIDGE) },
11236                 { PCI_DEVICE(PCI_VENDOR_ID_VIA,
11237                              PCI_DEVICE_ID_VIA_8385_0) },
11238                 { },
11239         };
11240         u32 misc_ctrl_reg;
11241         u32 cacheline_sz_reg;
11242         u32 pci_state_reg, grc_misc_cfg;
11243         u32 val;
11244         u16 pci_cmd;
11245         int err, pcie_cap;
11246
11247         /* Force memory write invalidate off.  If we leave it on,
11248          * then on 5700_BX chips we have to enable a workaround.
11249          * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
11250          * to match the cacheline size.  The Broadcom driver have this
11251          * workaround but turns MWI off all the times so never uses
11252          * it.  This seems to suggest that the workaround is insufficient.
11253          */
11254         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
11255         pci_cmd &= ~PCI_COMMAND_INVALIDATE;
11256         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
11257
11258         /* It is absolutely critical that TG3PCI_MISC_HOST_CTRL
11259          * has the register indirect write enable bit set before
11260          * we try to access any of the MMIO registers.  It is also
11261          * critical that the PCI-X hw workaround situation is decided
11262          * before that as well.
11263          */
11264         pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
11265                               &misc_ctrl_reg);
11266
11267         tp->pci_chip_rev_id = (misc_ctrl_reg >>
11268                                MISC_HOST_CTRL_CHIPREV_SHIFT);
11269         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_USE_PROD_ID_REG) {
11270                 u32 prod_id_asic_rev;
11271
11272                 pci_read_config_dword(tp->pdev, TG3PCI_PRODID_ASICREV,
11273                                       &prod_id_asic_rev);
11274                 tp->pci_chip_rev_id = prod_id_asic_rev & PROD_ID_ASIC_REV_MASK;
11275         }
11276
11277         /* Wrong chip ID in 5752 A0. This code can be removed later
11278          * as A0 is not in production.
11279          */
11280         if (tp->pci_chip_rev_id == CHIPREV_ID_5752_A0_HW)
11281                 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
11282
11283         /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
11284          * we need to disable memory and use config. cycles
11285          * only to access all registers. The 5702/03 chips
11286          * can mistakenly decode the special cycles from the
11287          * ICH chipsets as memory write cycles, causing corruption
11288          * of register and memory space. Only certain ICH bridges
11289          * will drive special cycles with non-zero data during the
11290          * address phase which can fall within the 5703's address
11291          * range. This is not an ICH bug as the PCI spec allows
11292          * non-zero address during special cycles. However, only
11293          * these ICH bridges are known to drive non-zero addresses
11294          * during special cycles.
11295          *
11296          * Since special cycles do not cross PCI bridges, we only
11297          * enable this workaround if the 5703 is on the secondary
11298          * bus of these ICH bridges.
11299          */
11300         if ((tp->pci_chip_rev_id == CHIPREV_ID_5703_A1) ||
11301             (tp->pci_chip_rev_id == CHIPREV_ID_5703_A2)) {
11302                 static struct tg3_dev_id {
11303                         u32     vendor;
11304                         u32     device;
11305                         u32     rev;
11306                 } ich_chipsets[] = {
11307                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
11308                           PCI_ANY_ID },
11309                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
11310                           PCI_ANY_ID },
11311                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
11312                           0xa },
11313                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
11314                           PCI_ANY_ID },
11315                         { },
11316                 };
11317                 struct tg3_dev_id *pci_id = &ich_chipsets[0];
11318                 struct pci_dev *bridge = NULL;
11319
11320                 while (pci_id->vendor != 0) {
11321                         bridge = pci_get_device(pci_id->vendor, pci_id->device,
11322                                                 bridge);
11323                         if (!bridge) {
11324                                 pci_id++;
11325                                 continue;
11326                         }
11327                         if (pci_id->rev != PCI_ANY_ID) {
11328                                 if (bridge->revision > pci_id->rev)
11329                                         continue;
11330                         }
11331                         if (bridge->subordinate &&
11332                             (bridge->subordinate->number ==
11333                              tp->pdev->bus->number)) {
11334
11335                                 tp->tg3_flags2 |= TG3_FLG2_ICH_WORKAROUND;
11336                                 pci_dev_put(bridge);
11337                                 break;
11338                         }
11339                 }
11340         }
11341
11342         /* The EPB bridge inside 5714, 5715, and 5780 cannot support
11343          * DMA addresses > 40-bit. This bridge may have other additional
11344          * 57xx devices behind it in some 4-port NIC designs for example.
11345          * Any tg3 device found behind the bridge will also need the 40-bit
11346          * DMA workaround.
11347          */
11348         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 ||
11349             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
11350                 tp->tg3_flags2 |= TG3_FLG2_5780_CLASS;
11351                 tp->tg3_flags |= TG3_FLAG_40BIT_DMA_BUG;
11352                 tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI);
11353         }
11354         else {
11355                 struct pci_dev *bridge = NULL;
11356
11357                 do {
11358                         bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
11359                                                 PCI_DEVICE_ID_SERVERWORKS_EPB,
11360                                                 bridge);
11361                         if (bridge && bridge->subordinate &&
11362                             (bridge->subordinate->number <=
11363                              tp->pdev->bus->number) &&
11364                             (bridge->subordinate->subordinate >=
11365                              tp->pdev->bus->number)) {
11366                                 tp->tg3_flags |= TG3_FLAG_40BIT_DMA_BUG;
11367                                 pci_dev_put(bridge);
11368                                 break;
11369                         }
11370                 } while (bridge);
11371         }
11372
11373         /* Initialize misc host control in PCI block. */
11374         tp->misc_host_ctrl |= (misc_ctrl_reg &
11375                                MISC_HOST_CTRL_CHIPREV);
11376         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
11377                                tp->misc_host_ctrl);
11378
11379         pci_read_config_dword(tp->pdev, TG3PCI_CACHELINESZ,
11380                               &cacheline_sz_reg);
11381
11382         tp->pci_cacheline_sz = (cacheline_sz_reg >>  0) & 0xff;
11383         tp->pci_lat_timer    = (cacheline_sz_reg >>  8) & 0xff;
11384         tp->pci_hdr_type     = (cacheline_sz_reg >> 16) & 0xff;
11385         tp->pci_bist         = (cacheline_sz_reg >> 24) & 0xff;
11386
11387         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
11388             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714))
11389                 tp->pdev_peer = tg3_find_peer(tp);
11390
11391         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
11392             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
11393             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
11394             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
11395             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
11396             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
11397             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 ||
11398             (tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
11399                 tp->tg3_flags2 |= TG3_FLG2_5750_PLUS;
11400
11401         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) ||
11402             (tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
11403                 tp->tg3_flags2 |= TG3_FLG2_5705_PLUS;
11404
11405         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
11406                 tp->tg3_flags |= TG3_FLAG_SUPPORT_MSI;
11407                 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX ||
11408                     GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX ||
11409                     (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 &&
11410                      tp->pci_chip_rev_id <= CHIPREV_ID_5714_A2 &&
11411                      tp->pdev_peer == tp->pdev))
11412                         tp->tg3_flags &= ~TG3_FLAG_SUPPORT_MSI;
11413
11414                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
11415                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
11416                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
11417                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
11418                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
11419                         tp->tg3_flags2 |= TG3_FLG2_HW_TSO_2;
11420                         tp->tg3_flags2 |= TG3_FLG2_1SHOT_MSI;
11421                 } else {
11422                         tp->tg3_flags2 |= TG3_FLG2_HW_TSO_1 | TG3_FLG2_TSO_BUG;
11423                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
11424                                 ASIC_REV_5750 &&
11425                             tp->pci_chip_rev_id >= CHIPREV_ID_5750_C2)
11426                                 tp->tg3_flags2 &= ~TG3_FLG2_TSO_BUG;
11427                 }
11428         }
11429
11430         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705 &&
11431             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5750 &&
11432             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752 &&
11433             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5755 &&
11434             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5787 &&
11435             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 &&
11436             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761 &&
11437             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906)
11438                 tp->tg3_flags2 |= TG3_FLG2_JUMBO_CAPABLE;
11439
11440         pcie_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_EXP);
11441         if (pcie_cap != 0) {
11442                 tp->tg3_flags2 |= TG3_FLG2_PCI_EXPRESS;
11443
11444                 pcie_set_readrq(tp->pdev, 4096);
11445
11446                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
11447                         u16 lnkctl;
11448
11449                         pci_read_config_word(tp->pdev,
11450                                              pcie_cap + PCI_EXP_LNKCTL,
11451                                              &lnkctl);
11452                         if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN)
11453                                 tp->tg3_flags2 &= ~TG3_FLG2_HW_TSO_2;
11454                 }
11455         }
11456
11457         /* If we have an AMD 762 or VIA K8T800 chipset, write
11458          * reordering to the mailbox registers done by the host
11459          * controller can cause major troubles.  We read back from
11460          * every mailbox register write to force the writes to be
11461          * posted to the chip in order.
11462          */
11463         if (pci_dev_present(write_reorder_chipsets) &&
11464             !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
11465                 tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;
11466
11467         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
11468             tp->pci_lat_timer < 64) {
11469                 tp->pci_lat_timer = 64;
11470
11471                 cacheline_sz_reg  = ((tp->pci_cacheline_sz & 0xff) <<  0);
11472                 cacheline_sz_reg |= ((tp->pci_lat_timer    & 0xff) <<  8);
11473                 cacheline_sz_reg |= ((tp->pci_hdr_type     & 0xff) << 16);
11474                 cacheline_sz_reg |= ((tp->pci_bist         & 0xff) << 24);
11475
11476                 pci_write_config_dword(tp->pdev, TG3PCI_CACHELINESZ,
11477                                        cacheline_sz_reg);
11478         }
11479
11480         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
11481             (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
11482                 tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
11483                 if (!tp->pcix_cap) {
11484                         printk(KERN_ERR PFX "Cannot find PCI-X "
11485                                             "capability, aborting.\n");
11486                         return -EIO;
11487                 }
11488         }
11489
11490         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
11491                               &pci_state_reg);
11492
11493         if (tp->pcix_cap && (pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0) {
11494                 tp->tg3_flags |= TG3_FLAG_PCIX_MODE;
11495
11496                 /* If this is a 5700 BX chipset, and we are in PCI-X
11497                  * mode, enable register write workaround.
11498                  *
11499                  * The workaround is to use indirect register accesses
11500                  * for all chip writes not to mailbox registers.
11501                  */
11502                 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX) {
11503                         u32 pm_reg;
11504
11505                         tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
11506
11507                         /* The chip can have it's power management PCI config
11508                          * space registers clobbered due to this bug.
11509                          * So explicitly force the chip into D0 here.
11510                          */
11511                         pci_read_config_dword(tp->pdev,
11512                                               tp->pm_cap + PCI_PM_CTRL,
11513                                               &pm_reg);
11514                         pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
11515                         pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
11516                         pci_write_config_dword(tp->pdev,
11517                                                tp->pm_cap + PCI_PM_CTRL,
11518                                                pm_reg);
11519
11520                         /* Also, force SERR#/PERR# in PCI command. */
11521                         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
11522                         pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
11523                         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
11524                 }
11525         }
11526
11527         /* 5700 BX chips need to have their TX producer index mailboxes
11528          * written twice to workaround a bug.
11529          */
11530         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX)
11531                 tp->tg3_flags |= TG3_FLAG_TXD_MBOX_HWBUG;
11532
11533         if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
11534                 tp->tg3_flags |= TG3_FLAG_PCI_HIGH_SPEED;
11535         if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
11536                 tp->tg3_flags |= TG3_FLAG_PCI_32BIT;
11537
11538         /* Chip-specific fixup from Broadcom driver */
11539         if ((tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) &&
11540             (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
11541                 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
11542                 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
11543         }
11544
11545         /* Default fast path register access methods */
11546         tp->read32 = tg3_read32;
11547         tp->write32 = tg3_write32;
11548         tp->read32_mbox = tg3_read32;
11549         tp->write32_mbox = tg3_write32;
11550         tp->write32_tx_mbox = tg3_write32;
11551         tp->write32_rx_mbox = tg3_write32;
11552
11553         /* Various workaround register access methods */
11554         if (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG)
11555                 tp->write32 = tg3_write_indirect_reg32;
11556         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
11557                  ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
11558                   tp->pci_chip_rev_id == CHIPREV_ID_5750_A0)) {
11559                 /*
11560                  * Back to back register writes can cause problems on these
11561                  * chips, the workaround is to read back all reg writes
11562                  * except those to mailbox regs.
11563                  *
11564                  * See tg3_write_indirect_reg32().
11565                  */
11566                 tp->write32 = tg3_write_flush_reg32;
11567         }
11568
11569
11570         if ((tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG) ||
11571             (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)) {
11572                 tp->write32_tx_mbox = tg3_write32_tx_mbox;
11573                 if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
11574                         tp->write32_rx_mbox = tg3_write_flush_reg32;
11575         }
11576
11577         if (tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND) {
11578                 tp->read32 = tg3_read_indirect_reg32;
11579                 tp->write32 = tg3_write_indirect_reg32;
11580                 tp->read32_mbox = tg3_read_indirect_mbox;
11581                 tp->write32_mbox = tg3_write_indirect_mbox;
11582                 tp->write32_tx_mbox = tg3_write_indirect_mbox;
11583                 tp->write32_rx_mbox = tg3_write_indirect_mbox;
11584
11585                 iounmap(tp->regs);
11586                 tp->regs = NULL;
11587
11588                 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
11589                 pci_cmd &= ~PCI_COMMAND_MEMORY;
11590                 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
11591         }
11592         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
11593                 tp->read32_mbox = tg3_read32_mbox_5906;
11594                 tp->write32_mbox = tg3_write32_mbox_5906;
11595                 tp->write32_tx_mbox = tg3_write32_mbox_5906;
11596                 tp->write32_rx_mbox = tg3_write32_mbox_5906;
11597         }
11598
11599         if (tp->write32 == tg3_write_indirect_reg32 ||
11600             ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
11601              (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
11602               GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)))
11603                 tp->tg3_flags |= TG3_FLAG_SRAM_USE_CONFIG;
11604
11605         /* Get eeprom hw config before calling tg3_set_power_state().
11606          * In particular, the TG3_FLG2_IS_NIC flag must be
11607          * determined before calling tg3_set_power_state() so that
11608          * we know whether or not to switch out of Vaux power.
11609          * When the flag is set, it means that GPIO1 is used for eeprom
11610          * write protect and also implies that it is a LOM where GPIOs
11611          * are not used to switch power.
11612          */
11613         tg3_get_eeprom_hw_cfg(tp);
11614
11615         if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
11616                 /* Allow reads and writes to the
11617                  * APE register and memory space.
11618                  */
11619                 pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR |
11620                                  PCISTATE_ALLOW_APE_SHMEM_WR;
11621                 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE,
11622                                        pci_state_reg);
11623         }
11624
11625         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
11626             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
11627                 tp->tg3_flags |= TG3_FLAG_CPMU_PRESENT;
11628
11629                 if (tp->pci_chip_rev_id == CHIPREV_ID_5784_A0 ||
11630                     tp->pci_chip_rev_id == CHIPREV_ID_5784_A1 ||
11631                     tp->pci_chip_rev_id == CHIPREV_ID_5761_A0 ||
11632                     tp->pci_chip_rev_id == CHIPREV_ID_5761_A1)
11633                         tp->tg3_flags3 |= TG3_FLG3_5761_5784_AX_FIXES;
11634         }
11635
11636         /* Set up tp->grc_local_ctrl before calling tg3_set_power_state().
11637          * GPIO1 driven high will bring 5700's external PHY out of reset.
11638          * It is also used as eeprom write protect on LOMs.
11639          */
11640         tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
11641         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
11642             (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT))
11643                 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
11644                                        GRC_LCLCTRL_GPIO_OUTPUT1);
11645         /* Unused GPIO3 must be driven as output on 5752 because there
11646          * are no pull-up resistors on unused GPIO pins.
11647          */
11648         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
11649                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
11650
11651         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
11652                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
11653
11654         /* Force the chip into D0. */
11655         err = tg3_set_power_state(tp, PCI_D0);
11656         if (err) {
11657                 printk(KERN_ERR PFX "(%s) transition to D0 failed\n",
11658                        pci_name(tp->pdev));
11659                 return err;
11660         }
11661
11662         /* 5700 B0 chips do not support checksumming correctly due
11663          * to hardware bugs.
11664          */
11665         if (tp->pci_chip_rev_id == CHIPREV_ID_5700_B0)
11666                 tp->tg3_flags |= TG3_FLAG_BROKEN_CHECKSUMS;
11667
11668         /* Derive initial jumbo mode from MTU assigned in
11669          * ether_setup() via the alloc_etherdev() call
11670          */
11671         if (tp->dev->mtu > ETH_DATA_LEN &&
11672             !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
11673                 tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE;
11674
11675         /* Determine WakeOnLan speed to use. */
11676         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
11677             tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
11678             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0 ||
11679             tp->pci_chip_rev_id == CHIPREV_ID_5701_B2) {
11680                 tp->tg3_flags &= ~(TG3_FLAG_WOL_SPEED_100MB);
11681         } else {
11682                 tp->tg3_flags |= TG3_FLAG_WOL_SPEED_100MB;
11683         }
11684
11685         /* A few boards don't want Ethernet@WireSpeed phy feature */
11686         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
11687             ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
11688              (tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) &&
11689              (tp->pci_chip_rev_id != CHIPREV_ID_5705_A1)) ||
11690             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) ||
11691             (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES))
11692                 tp->tg3_flags2 |= TG3_FLG2_NO_ETH_WIRE_SPEED;
11693
11694         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5703_AX ||
11695             GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_AX)
11696                 tp->tg3_flags2 |= TG3_FLG2_PHY_ADC_BUG;
11697         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0)
11698                 tp->tg3_flags2 |= TG3_FLG2_PHY_5704_A0_BUG;
11699
11700         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
11701                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
11702                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
11703                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
11704                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
11705                         if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
11706                             tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
11707                                 tp->tg3_flags2 |= TG3_FLG2_PHY_JITTER_BUG;
11708                         if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
11709                                 tp->tg3_flags2 |= TG3_FLG2_PHY_ADJUST_TRIM;
11710                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906)
11711                         tp->tg3_flags2 |= TG3_FLG2_PHY_BER_BUG;
11712         }
11713
11714         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
11715             GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
11716                 tp->phy_otp = tg3_read_otp_phycfg(tp);
11717                 if (tp->phy_otp == 0)
11718                         tp->phy_otp = TG3_OTP_DEFAULT;
11719         }
11720
11721         tp->coalesce_mode = 0;
11722         if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX &&
11723             GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX)
11724                 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
11725
11726         /* Initialize MAC MI mode, polling disabled. */
11727         tw32_f(MAC_MI_MODE, tp->mi_mode);
11728         udelay(80);
11729
11730         /* Initialize data/descriptor byte/word swapping. */
11731         val = tr32(GRC_MODE);
11732         val &= GRC_MODE_HOST_STACKUP;
11733         tw32(GRC_MODE, val | tp->grc_mode);
11734
11735         tg3_switch_clocks(tp);
11736
11737         /* Clear this out for sanity. */
11738         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
11739
11740         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
11741                               &pci_state_reg);
11742         if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
11743             (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) == 0) {
11744                 u32 chiprevid = GET_CHIP_REV_ID(tp->misc_host_ctrl);
11745
11746                 if (chiprevid == CHIPREV_ID_5701_A0 ||
11747                     chiprevid == CHIPREV_ID_5701_B0 ||
11748                     chiprevid == CHIPREV_ID_5701_B2 ||
11749                     chiprevid == CHIPREV_ID_5701_B5) {
11750                         void __iomem *sram_base;
11751
11752                         /* Write some dummy words into the SRAM status block
11753                          * area, see if it reads back correctly.  If the return
11754                          * value is bad, force enable the PCIX workaround.
11755                          */
11756                         sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
11757
11758                         writel(0x00000000, sram_base);
11759                         writel(0x00000000, sram_base + 4);
11760                         writel(0xffffffff, sram_base + 4);
11761                         if (readl(sram_base) != 0x00000000)
11762                                 tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
11763                 }
11764         }
11765
11766         udelay(50);
11767         tg3_nvram_init(tp);
11768
11769         grc_misc_cfg = tr32(GRC_MISC_CFG);
11770         grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
11771
11772         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
11773             (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
11774              grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
11775                 tp->tg3_flags2 |= TG3_FLG2_IS_5788;
11776
11777         if (!(tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
11778             (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700))
11779                 tp->tg3_flags |= TG3_FLAG_TAGGED_STATUS;
11780         if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) {
11781                 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
11782                                       HOSTCC_MODE_CLRTICK_TXBD);
11783
11784                 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
11785                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
11786                                        tp->misc_host_ctrl);
11787         }
11788
11789         /* these are limited to 10/100 only */
11790         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
11791              (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
11792             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
11793              tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
11794              (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901 ||
11795               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901_2 ||
11796               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5705F)) ||
11797             (tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
11798              (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5751F ||
11799               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5753F ||
11800               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5787F)) ||
11801             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
11802                 tp->tg3_flags |= TG3_FLAG_10_100_ONLY;
11803
11804         err = tg3_phy_probe(tp);
11805         if (err) {
11806                 printk(KERN_ERR PFX "(%s) phy probe failed, err %d\n",
11807                        pci_name(tp->pdev), err);
11808                 /* ... but do not return immediately ... */
11809         }
11810
11811         tg3_read_partno(tp);
11812         tg3_read_fw_ver(tp);
11813
11814         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
11815                 tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
11816         } else {
11817                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
11818                         tp->tg3_flags |= TG3_FLAG_USE_MI_INTERRUPT;
11819                 else
11820                         tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
11821         }
11822
11823         /* 5700 {AX,BX} chips have a broken status block link
11824          * change bit implementation, so we must use the
11825          * status register in those cases.
11826          */
11827         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
11828                 tp->tg3_flags |= TG3_FLAG_USE_LINKCHG_REG;
11829         else
11830                 tp->tg3_flags &= ~TG3_FLAG_USE_LINKCHG_REG;
11831
11832         /* The led_ctrl is set during tg3_phy_probe, here we might
11833          * have to force the link status polling mechanism based
11834          * upon subsystem IDs.
11835          */
11836         if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
11837             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
11838             !(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
11839                 tp->tg3_flags |= (TG3_FLAG_USE_MI_INTERRUPT |
11840                                   TG3_FLAG_USE_LINKCHG_REG);
11841         }
11842
11843         /* For all SERDES we poll the MAC status register. */
11844         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
11845                 tp->tg3_flags |= TG3_FLAG_POLL_SERDES;
11846         else
11847                 tp->tg3_flags &= ~TG3_FLAG_POLL_SERDES;
11848
11849         /* All chips before 5787 can get confused if TX buffers
11850          * straddle the 4GB address boundary in some cases.
11851          */
11852         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
11853             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
11854             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
11855             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
11856             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
11857                 tp->dev->hard_start_xmit = tg3_start_xmit;
11858         else
11859                 tp->dev->hard_start_xmit = tg3_start_xmit_dma_bug;
11860
11861         tp->rx_offset = 2;
11862         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
11863             (tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0)
11864                 tp->rx_offset = 0;
11865
11866         tp->rx_std_max_post = TG3_RX_RING_SIZE;
11867
11868         /* Increment the rx prod index on the rx std ring by at most
11869          * 8 for these chips to workaround hw errata.
11870          */
11871         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
11872             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
11873             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
11874                 tp->rx_std_max_post = 8;
11875
11876         if (tp->tg3_flags & TG3_FLAG_ASPM_WORKAROUND)
11877                 tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) &
11878                                      PCIE_PWR_MGMT_L1_THRESH_MSK;
11879
11880         return err;
11881 }
11882
11883 #ifdef CONFIG_SPARC
11884 static int __devinit tg3_get_macaddr_sparc(struct tg3 *tp)
11885 {
11886         struct net_device *dev = tp->dev;
11887         struct pci_dev *pdev = tp->pdev;
11888         struct device_node *dp = pci_device_to_OF_node(pdev);
11889         const unsigned char *addr;
11890         int len;
11891
11892         addr = of_get_property(dp, "local-mac-address", &len);
11893         if (addr && len == 6) {
11894                 memcpy(dev->dev_addr, addr, 6);
11895                 memcpy(dev->perm_addr, dev->dev_addr, 6);
11896                 return 0;
11897         }
11898         return -ENODEV;
11899 }
11900
11901 static int __devinit tg3_get_default_macaddr_sparc(struct tg3 *tp)
11902 {
11903         struct net_device *dev = tp->dev;
11904
11905         memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
11906         memcpy(dev->perm_addr, idprom->id_ethaddr, 6);
11907         return 0;
11908 }
11909 #endif
11910
11911 static int __devinit tg3_get_device_address(struct tg3 *tp)
11912 {
11913         struct net_device *dev = tp->dev;
11914         u32 hi, lo, mac_offset;
11915         int addr_ok = 0;
11916
11917 #ifdef CONFIG_SPARC
11918         if (!tg3_get_macaddr_sparc(tp))
11919                 return 0;
11920 #endif
11921
11922         mac_offset = 0x7c;
11923         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
11924             (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
11925                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
11926                         mac_offset = 0xcc;
11927                 if (tg3_nvram_lock(tp))
11928                         tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
11929                 else
11930                         tg3_nvram_unlock(tp);
11931         }
11932         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
11933                 mac_offset = 0x10;
11934
11935         /* First try to get it from MAC address mailbox. */
11936         tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
11937         if ((hi >> 16) == 0x484b) {
11938                 dev->dev_addr[0] = (hi >>  8) & 0xff;
11939                 dev->dev_addr[1] = (hi >>  0) & 0xff;
11940
11941                 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
11942                 dev->dev_addr[2] = (lo >> 24) & 0xff;
11943                 dev->dev_addr[3] = (lo >> 16) & 0xff;
11944                 dev->dev_addr[4] = (lo >>  8) & 0xff;
11945                 dev->dev_addr[5] = (lo >>  0) & 0xff;
11946
11947                 /* Some old bootcode may report a 0 MAC address in SRAM */
11948                 addr_ok = is_valid_ether_addr(&dev->dev_addr[0]);
11949         }
11950         if (!addr_ok) {
11951                 /* Next, try NVRAM. */
11952                 if (!tg3_nvram_read(tp, mac_offset + 0, &hi) &&
11953                     !tg3_nvram_read(tp, mac_offset + 4, &lo)) {
11954                         dev->dev_addr[0] = ((hi >> 16) & 0xff);
11955                         dev->dev_addr[1] = ((hi >> 24) & 0xff);
11956                         dev->dev_addr[2] = ((lo >>  0) & 0xff);
11957                         dev->dev_addr[3] = ((lo >>  8) & 0xff);
11958                         dev->dev_addr[4] = ((lo >> 16) & 0xff);
11959                         dev->dev_addr[5] = ((lo >> 24) & 0xff);
11960                 }
11961                 /* Finally just fetch it out of the MAC control regs. */
11962                 else {
11963                         hi = tr32(MAC_ADDR_0_HIGH);
11964                         lo = tr32(MAC_ADDR_0_LOW);
11965
11966                         dev->dev_addr[5] = lo & 0xff;
11967                         dev->dev_addr[4] = (lo >> 8) & 0xff;
11968                         dev->dev_addr[3] = (lo >> 16) & 0xff;
11969                         dev->dev_addr[2] = (lo >> 24) & 0xff;
11970                         dev->dev_addr[1] = hi & 0xff;
11971                         dev->dev_addr[0] = (hi >> 8) & 0xff;
11972                 }
11973         }
11974
11975         if (!is_valid_ether_addr(&dev->dev_addr[0])) {
11976 #ifdef CONFIG_SPARC
11977                 if (!tg3_get_default_macaddr_sparc(tp))
11978                         return 0;
11979 #endif
11980                 return -EINVAL;
11981         }
11982         memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
11983         return 0;
11984 }
11985
11986 #define BOUNDARY_SINGLE_CACHELINE       1
11987 #define BOUNDARY_MULTI_CACHELINE        2
11988
11989 static u32 __devinit tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
11990 {
11991         int cacheline_size;
11992         u8 byte;
11993         int goal;
11994
11995         pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
11996         if (byte == 0)
11997                 cacheline_size = 1024;
11998         else
11999                 cacheline_size = (int) byte * 4;
12000
12001         /* On 5703 and later chips, the boundary bits have no
12002          * effect.
12003          */
12004         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
12005             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
12006             !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
12007                 goto out;
12008
12009 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
12010         goal = BOUNDARY_MULTI_CACHELINE;
12011 #else
12012 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
12013         goal = BOUNDARY_SINGLE_CACHELINE;
12014 #else
12015         goal = 0;
12016 #endif
12017 #endif
12018
12019         if (!goal)
12020                 goto out;
12021
12022         /* PCI controllers on most RISC systems tend to disconnect
12023          * when a device tries to burst across a cache-line boundary.
12024          * Therefore, letting tg3 do so just wastes PCI bandwidth.
12025          *
12026          * Unfortunately, for PCI-E there are only limited
12027          * write-side controls for this, and thus for reads
12028          * we will still get the disconnects.  We'll also waste
12029          * these PCI cycles for both read and write for chips
12030          * other than 5700 and 5701 which do not implement the
12031          * boundary bits.
12032          */
12033         if ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
12034             !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
12035                 switch (cacheline_size) {
12036                 case 16:
12037                 case 32:
12038                 case 64:
12039                 case 128:
12040                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
12041                                 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
12042                                         DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
12043                         } else {
12044                                 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
12045                                         DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
12046                         }
12047                         break;
12048
12049                 case 256:
12050                         val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
12051                                 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
12052                         break;
12053
12054                 default:
12055                         val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
12056                                 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
12057                         break;
12058                 };
12059         } else if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
12060                 switch (cacheline_size) {
12061                 case 16:
12062                 case 32:
12063                 case 64:
12064                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
12065                                 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
12066                                 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
12067                                 break;
12068                         }
12069                         /* fallthrough */
12070                 case 128:
12071                 default:
12072                         val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
12073                         val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
12074                         break;
12075                 };
12076         } else {
12077                 switch (cacheline_size) {
12078                 case 16:
12079                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
12080                                 val |= (DMA_RWCTRL_READ_BNDRY_16 |
12081                                         DMA_RWCTRL_WRITE_BNDRY_16);
12082                                 break;
12083                         }
12084                         /* fallthrough */
12085                 case 32:
12086                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
12087                                 val |= (DMA_RWCTRL_READ_BNDRY_32 |
12088                                         DMA_RWCTRL_WRITE_BNDRY_32);
12089                                 break;
12090                         }
12091                         /* fallthrough */
12092                 case 64:
12093                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
12094                                 val |= (DMA_RWCTRL_READ_BNDRY_64 |
12095                                         DMA_RWCTRL_WRITE_BNDRY_64);
12096                                 break;
12097                         }
12098                         /* fallthrough */
12099                 case 128:
12100                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
12101                                 val |= (DMA_RWCTRL_READ_BNDRY_128 |
12102                                         DMA_RWCTRL_WRITE_BNDRY_128);
12103                                 break;
12104                         }
12105                         /* fallthrough */
12106                 case 256:
12107                         val |= (DMA_RWCTRL_READ_BNDRY_256 |
12108                                 DMA_RWCTRL_WRITE_BNDRY_256);
12109                         break;
12110                 case 512:
12111                         val |= (DMA_RWCTRL_READ_BNDRY_512 |
12112                                 DMA_RWCTRL_WRITE_BNDRY_512);
12113                         break;
12114                 case 1024:
12115                 default:
12116                         val |= (DMA_RWCTRL_READ_BNDRY_1024 |
12117                                 DMA_RWCTRL_WRITE_BNDRY_1024);
12118                         break;
12119                 };
12120         }
12121
12122 out:
12123         return val;
12124 }
12125
12126 static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma, int size, int to_device)
12127 {
12128         struct tg3_internal_buffer_desc test_desc;
12129         u32 sram_dma_descs;
12130         int i, ret;
12131
12132         sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
12133
12134         tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
12135         tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
12136         tw32(RDMAC_STATUS, 0);
12137         tw32(WDMAC_STATUS, 0);
12138
12139         tw32(BUFMGR_MODE, 0);
12140         tw32(FTQ_RESET, 0);
12141
12142         test_desc.addr_hi = ((u64) buf_dma) >> 32;
12143         test_desc.addr_lo = buf_dma & 0xffffffff;
12144         test_desc.nic_mbuf = 0x00002100;
12145         test_desc.len = size;
12146
12147         /*
12148          * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
12149          * the *second* time the tg3 driver was getting loaded after an
12150          * initial scan.
12151          *
12152          * Broadcom tells me:
12153          *   ...the DMA engine is connected to the GRC block and a DMA
12154          *   reset may affect the GRC block in some unpredictable way...
12155          *   The behavior of resets to individual blocks has not been tested.
12156          *
12157          * Broadcom noted the GRC reset will also reset all sub-components.
12158          */
12159         if (to_device) {
12160                 test_desc.cqid_sqid = (13 << 8) | 2;
12161
12162                 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
12163                 udelay(40);
12164         } else {
12165                 test_desc.cqid_sqid = (16 << 8) | 7;
12166
12167                 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
12168                 udelay(40);
12169         }
12170         test_desc.flags = 0x00000005;
12171
12172         for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
12173                 u32 val;
12174
12175                 val = *(((u32 *)&test_desc) + i);
12176                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
12177                                        sram_dma_descs + (i * sizeof(u32)));
12178                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
12179         }
12180         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
12181
12182         if (to_device) {
12183                 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
12184         } else {
12185                 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
12186         }
12187
12188         ret = -ENODEV;
12189         for (i = 0; i < 40; i++) {
12190                 u32 val;
12191
12192                 if (to_device)
12193                         val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
12194                 else
12195                         val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
12196                 if ((val & 0xffff) == sram_dma_descs) {
12197                         ret = 0;
12198                         break;
12199                 }
12200
12201                 udelay(100);
12202         }
12203
12204         return ret;
12205 }
12206
12207 #define TEST_BUFFER_SIZE        0x2000
12208
12209 static int __devinit tg3_test_dma(struct tg3 *tp)
12210 {
12211         dma_addr_t buf_dma;
12212         u32 *buf, saved_dma_rwctrl;
12213         int ret;
12214
12215         buf = pci_alloc_consistent(tp->pdev, TEST_BUFFER_SIZE, &buf_dma);
12216         if (!buf) {
12217                 ret = -ENOMEM;
12218                 goto out_nofree;
12219         }
12220
12221         tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
12222                           (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
12223
12224         tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
12225
12226         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
12227                 /* DMA read watermark not used on PCIE */
12228                 tp->dma_rwctrl |= 0x00180000;
12229         } else if (!(tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
12230                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
12231                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
12232                         tp->dma_rwctrl |= 0x003f0000;
12233                 else
12234                         tp->dma_rwctrl |= 0x003f000f;
12235         } else {
12236                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
12237                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
12238                         u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
12239                         u32 read_water = 0x7;
12240
12241                         /* If the 5704 is behind the EPB bridge, we can
12242                          * do the less restrictive ONE_DMA workaround for
12243                          * better performance.
12244                          */
12245                         if ((tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG) &&
12246                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
12247                                 tp->dma_rwctrl |= 0x8000;
12248                         else if (ccval == 0x6 || ccval == 0x7)
12249                                 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
12250
12251                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703)
12252                                 read_water = 4;
12253                         /* Set bit 23 to enable PCIX hw bug fix */
12254                         tp->dma_rwctrl |=
12255                                 (read_water << DMA_RWCTRL_READ_WATER_SHIFT) |
12256                                 (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
12257                                 (1 << 23);
12258                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) {
12259                         /* 5780 always in PCIX mode */
12260                         tp->dma_rwctrl |= 0x00144000;
12261                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
12262                         /* 5714 always in PCIX mode */
12263                         tp->dma_rwctrl |= 0x00148000;
12264                 } else {
12265                         tp->dma_rwctrl |= 0x001b000f;
12266                 }
12267         }
12268
12269         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
12270             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
12271                 tp->dma_rwctrl &= 0xfffffff0;
12272
12273         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
12274             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
12275                 /* Remove this if it causes problems for some boards. */
12276                 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
12277
12278                 /* On 5700/5701 chips, we need to set this bit.
12279                  * Otherwise the chip will issue cacheline transactions
12280                  * to streamable DMA memory with not all the byte
12281                  * enables turned on.  This is an error on several
12282                  * RISC PCI controllers, in particular sparc64.
12283                  *
12284                  * On 5703/5704 chips, this bit has been reassigned
12285                  * a different meaning.  In particular, it is used
12286                  * on those chips to enable a PCI-X workaround.
12287                  */
12288                 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
12289         }
12290
12291         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
12292
12293 #if 0
12294         /* Unneeded, already done by tg3_get_invariants.  */
12295         tg3_switch_clocks(tp);
12296 #endif
12297
12298         ret = 0;
12299         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
12300             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
12301                 goto out;
12302
12303         /* It is best to perform DMA test with maximum write burst size
12304          * to expose the 5700/5701 write DMA bug.
12305          */
12306         saved_dma_rwctrl = tp->dma_rwctrl;
12307         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
12308         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
12309
12310         while (1) {
12311                 u32 *p = buf, i;
12312
12313                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
12314                         p[i] = i;
12315
12316                 /* Send the buffer to the chip. */
12317                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1);
12318                 if (ret) {
12319                         printk(KERN_ERR "tg3_test_dma() Write the buffer failed %d\n", ret);
12320                         break;
12321                 }
12322
12323 #if 0
12324                 /* validate data reached card RAM correctly. */
12325                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
12326                         u32 val;
12327                         tg3_read_mem(tp, 0x2100 + (i*4), &val);
12328                         if (le32_to_cpu(val) != p[i]) {
12329                                 printk(KERN_ERR "  tg3_test_dma()  Card buffer corrupted on write! (%d != %d)\n", val, i);
12330                                 /* ret = -ENODEV here? */
12331                         }
12332                         p[i] = 0;
12333                 }
12334 #endif
12335                 /* Now read it back. */
12336                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0);
12337                 if (ret) {
12338                         printk(KERN_ERR "tg3_test_dma() Read the buffer failed %d\n", ret);
12339
12340                         break;
12341                 }
12342
12343                 /* Verify it. */
12344                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
12345                         if (p[i] == i)
12346                                 continue;
12347
12348                         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
12349                             DMA_RWCTRL_WRITE_BNDRY_16) {
12350                                 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
12351                                 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
12352                                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
12353                                 break;
12354                         } else {
12355                                 printk(KERN_ERR "tg3_test_dma() buffer corrupted on read back! (%d != %d)\n", p[i], i);
12356                                 ret = -ENODEV;
12357                                 goto out;
12358                         }
12359                 }
12360
12361                 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
12362                         /* Success. */
12363                         ret = 0;
12364                         break;
12365                 }
12366         }
12367         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
12368             DMA_RWCTRL_WRITE_BNDRY_16) {
12369                 static struct pci_device_id dma_wait_state_chipsets[] = {
12370                         { PCI_DEVICE(PCI_VENDOR_ID_APPLE,
12371                                      PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
12372                         { },
12373                 };
12374
12375                 /* DMA test passed without adjusting DMA boundary,
12376                  * now look for chipsets that are known to expose the
12377                  * DMA bug without failing the test.
12378                  */
12379                 if (pci_dev_present(dma_wait_state_chipsets)) {
12380                         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
12381                         tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
12382                 }
12383                 else
12384                         /* Safe to use the calculated DMA boundary. */
12385                         tp->dma_rwctrl = saved_dma_rwctrl;
12386
12387                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
12388         }
12389
12390 out:
12391         pci_free_consistent(tp->pdev, TEST_BUFFER_SIZE, buf, buf_dma);
12392 out_nofree:
12393         return ret;
12394 }
12395
12396 static void __devinit tg3_init_link_config(struct tg3 *tp)
12397 {
12398         tp->link_config.advertising =
12399                 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
12400                  ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
12401                  ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full |
12402                  ADVERTISED_Autoneg | ADVERTISED_MII);
12403         tp->link_config.speed = SPEED_INVALID;
12404         tp->link_config.duplex = DUPLEX_INVALID;
12405         tp->link_config.autoneg = AUTONEG_ENABLE;
12406         tp->link_config.active_speed = SPEED_INVALID;
12407         tp->link_config.active_duplex = DUPLEX_INVALID;
12408         tp->link_config.phy_is_low_power = 0;
12409         tp->link_config.orig_speed = SPEED_INVALID;
12410         tp->link_config.orig_duplex = DUPLEX_INVALID;
12411         tp->link_config.orig_autoneg = AUTONEG_INVALID;
12412 }
12413
12414 static void __devinit tg3_init_bufmgr_config(struct tg3 *tp)
12415 {
12416         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
12417                 tp->bufmgr_config.mbuf_read_dma_low_water =
12418                         DEFAULT_MB_RDMA_LOW_WATER_5705;
12419                 tp->bufmgr_config.mbuf_mac_rx_low_water =
12420                         DEFAULT_MB_MACRX_LOW_WATER_5705;
12421                 tp->bufmgr_config.mbuf_high_water =
12422                         DEFAULT_MB_HIGH_WATER_5705;
12423                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
12424                         tp->bufmgr_config.mbuf_mac_rx_low_water =
12425                                 DEFAULT_MB_MACRX_LOW_WATER_5906;
12426                         tp->bufmgr_config.mbuf_high_water =
12427                                 DEFAULT_MB_HIGH_WATER_5906;
12428                 }
12429
12430                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
12431                         DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
12432                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
12433                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
12434                 tp->bufmgr_config.mbuf_high_water_jumbo =
12435                         DEFAULT_MB_HIGH_WATER_JUMBO_5780;
12436         } else {
12437                 tp->bufmgr_config.mbuf_read_dma_low_water =
12438                         DEFAULT_MB_RDMA_LOW_WATER;
12439                 tp->bufmgr_config.mbuf_mac_rx_low_water =
12440                         DEFAULT_MB_MACRX_LOW_WATER;
12441                 tp->bufmgr_config.mbuf_high_water =
12442                         DEFAULT_MB_HIGH_WATER;
12443
12444                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
12445                         DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
12446                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
12447                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
12448                 tp->bufmgr_config.mbuf_high_water_jumbo =
12449                         DEFAULT_MB_HIGH_WATER_JUMBO;
12450         }
12451
12452         tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
12453         tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
12454 }
12455
12456 static char * __devinit tg3_phy_string(struct tg3 *tp)
12457 {
12458         switch (tp->phy_id & PHY_ID_MASK) {
12459         case PHY_ID_BCM5400:    return "5400";
12460         case PHY_ID_BCM5401:    return "5401";
12461         case PHY_ID_BCM5411:    return "5411";
12462         case PHY_ID_BCM5701:    return "5701";
12463         case PHY_ID_BCM5703:    return "5703";
12464         case PHY_ID_BCM5704:    return "5704";
12465         case PHY_ID_BCM5705:    return "5705";
12466         case PHY_ID_BCM5750:    return "5750";
12467         case PHY_ID_BCM5752:    return "5752";
12468         case PHY_ID_BCM5714:    return "5714";
12469         case PHY_ID_BCM5780:    return "5780";
12470         case PHY_ID_BCM5755:    return "5755";
12471         case PHY_ID_BCM5787:    return "5787";
12472         case PHY_ID_BCM5784:    return "5784";
12473         case PHY_ID_BCM5756:    return "5722/5756";
12474         case PHY_ID_BCM5906:    return "5906";
12475         case PHY_ID_BCM5761:    return "5761";
12476         case PHY_ID_BCM8002:    return "8002/serdes";
12477         case 0:                 return "serdes";
12478         default:                return "unknown";
12479         };
12480 }
12481
12482 static char * __devinit tg3_bus_string(struct tg3 *tp, char *str)
12483 {
12484         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
12485                 strcpy(str, "PCI Express");
12486                 return str;
12487         } else if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) {
12488                 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
12489
12490                 strcpy(str, "PCIX:");
12491
12492                 if ((clock_ctrl == 7) ||
12493                     ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
12494                      GRC_MISC_CFG_BOARD_ID_5704CIOBE))
12495                         strcat(str, "133MHz");
12496                 else if (clock_ctrl == 0)
12497                         strcat(str, "33MHz");
12498                 else if (clock_ctrl == 2)
12499                         strcat(str, "50MHz");
12500                 else if (clock_ctrl == 4)
12501                         strcat(str, "66MHz");
12502                 else if (clock_ctrl == 6)
12503                         strcat(str, "100MHz");
12504         } else {
12505                 strcpy(str, "PCI:");
12506                 if (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED)
12507                         strcat(str, "66MHz");
12508                 else
12509                         strcat(str, "33MHz");
12510         }
12511         if (tp->tg3_flags & TG3_FLAG_PCI_32BIT)
12512                 strcat(str, ":32-bit");
12513         else
12514                 strcat(str, ":64-bit");
12515         return str;
12516 }
12517
12518 static struct pci_dev * __devinit tg3_find_peer(struct tg3 *tp)
12519 {
12520         struct pci_dev *peer;
12521         unsigned int func, devnr = tp->pdev->devfn & ~7;
12522
12523         for (func = 0; func < 8; func++) {
12524                 peer = pci_get_slot(tp->pdev->bus, devnr | func);
12525                 if (peer && peer != tp->pdev)
12526                         break;
12527                 pci_dev_put(peer);
12528         }
12529         /* 5704 can be configured in single-port mode, set peer to
12530          * tp->pdev in that case.
12531          */
12532         if (!peer) {
12533                 peer = tp->pdev;
12534                 return peer;
12535         }
12536
12537         /*
12538          * We don't need to keep the refcount elevated; there's no way
12539          * to remove one half of this device without removing the other
12540          */
12541         pci_dev_put(peer);
12542
12543         return peer;
12544 }
12545
12546 static void __devinit tg3_init_coal(struct tg3 *tp)
12547 {
12548         struct ethtool_coalesce *ec = &tp->coal;
12549
12550         memset(ec, 0, sizeof(*ec));
12551         ec->cmd = ETHTOOL_GCOALESCE;
12552         ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
12553         ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
12554         ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
12555         ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
12556         ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
12557         ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
12558         ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
12559         ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
12560         ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
12561
12562         if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
12563                                  HOSTCC_MODE_CLRTICK_TXBD)) {
12564                 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
12565                 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
12566                 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
12567                 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
12568         }
12569
12570         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
12571                 ec->rx_coalesce_usecs_irq = 0;
12572                 ec->tx_coalesce_usecs_irq = 0;
12573                 ec->stats_block_coalesce_usecs = 0;
12574         }
12575 }
12576
12577 static int __devinit tg3_init_one(struct pci_dev *pdev,
12578                                   const struct pci_device_id *ent)
12579 {
12580         static int tg3_version_printed = 0;
12581         resource_size_t tg3reg_base;
12582         unsigned long tg3reg_len;
12583         struct net_device *dev;
12584         struct tg3 *tp;
12585         int err, pm_cap;
12586         char str[40];
12587         u64 dma_mask, persist_dma_mask;
12588         DECLARE_MAC_BUF(mac);
12589
12590         if (tg3_version_printed++ == 0)
12591                 printk(KERN_INFO "%s", version);
12592
12593         err = pci_enable_device(pdev);
12594         if (err) {
12595                 printk(KERN_ERR PFX "Cannot enable PCI device, "
12596                        "aborting.\n");
12597                 return err;
12598         }
12599
12600         if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
12601                 printk(KERN_ERR PFX "Cannot find proper PCI device "
12602                        "base address, aborting.\n");
12603                 err = -ENODEV;
12604                 goto err_out_disable_pdev;
12605         }
12606
12607         err = pci_request_regions(pdev, DRV_MODULE_NAME);
12608         if (err) {
12609                 printk(KERN_ERR PFX "Cannot obtain PCI resources, "
12610                        "aborting.\n");
12611                 goto err_out_disable_pdev;
12612         }
12613
12614         pci_set_master(pdev);
12615
12616         /* Find power-management capability. */
12617         pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
12618         if (pm_cap == 0) {
12619                 printk(KERN_ERR PFX "Cannot find PowerManagement capability, "
12620                        "aborting.\n");
12621                 err = -EIO;
12622                 goto err_out_free_res;
12623         }
12624
12625         tg3reg_base = pci_resource_start(pdev, 0);
12626         tg3reg_len = pci_resource_len(pdev, 0);
12627
12628         dev = alloc_etherdev(sizeof(*tp));
12629         if (!dev) {
12630                 printk(KERN_ERR PFX "Etherdev alloc failed, aborting.\n");
12631                 err = -ENOMEM;
12632                 goto err_out_free_res;
12633         }
12634
12635         SET_NETDEV_DEV(dev, &pdev->dev);
12636
12637 #if TG3_VLAN_TAG_USED
12638         dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
12639         dev->vlan_rx_register = tg3_vlan_rx_register;
12640 #endif
12641
12642         tp = netdev_priv(dev);
12643         tp->pdev = pdev;
12644         tp->dev = dev;
12645         tp->pm_cap = pm_cap;
12646         tp->mac_mode = TG3_DEF_MAC_MODE;
12647         tp->rx_mode = TG3_DEF_RX_MODE;
12648         tp->tx_mode = TG3_DEF_TX_MODE;
12649         tp->mi_mode = MAC_MI_MODE_BASE;
12650         if (tg3_debug > 0)
12651                 tp->msg_enable = tg3_debug;
12652         else
12653                 tp->msg_enable = TG3_DEF_MSG_ENABLE;
12654
12655         /* The word/byte swap controls here control register access byte
12656          * swapping.  DMA data byte swapping is controlled in the GRC_MODE
12657          * setting below.
12658          */
12659         tp->misc_host_ctrl =
12660                 MISC_HOST_CTRL_MASK_PCI_INT |
12661                 MISC_HOST_CTRL_WORD_SWAP |
12662                 MISC_HOST_CTRL_INDIR_ACCESS |
12663                 MISC_HOST_CTRL_PCISTATE_RW;
12664
12665         /* The NONFRM (non-frame) byte/word swap controls take effect
12666          * on descriptor entries, anything which isn't packet data.
12667          *
12668          * The StrongARM chips on the board (one for tx, one for rx)
12669          * are running in big-endian mode.
12670          */
12671         tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
12672                         GRC_MODE_WSWAP_NONFRM_DATA);
12673 #ifdef __BIG_ENDIAN
12674         tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
12675 #endif
12676         spin_lock_init(&tp->lock);
12677         spin_lock_init(&tp->indirect_lock);
12678         INIT_WORK(&tp->reset_task, tg3_reset_task);
12679
12680         tp->regs = ioremap_nocache(tg3reg_base, tg3reg_len);
12681         if (!tp->regs) {
12682                 printk(KERN_ERR PFX "Cannot map device registers, "
12683                        "aborting.\n");
12684                 err = -ENOMEM;
12685                 goto err_out_free_dev;
12686         }
12687
12688         tg3_init_link_config(tp);
12689
12690         tp->rx_pending = TG3_DEF_RX_RING_PENDING;
12691         tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
12692         tp->tx_pending = TG3_DEF_TX_RING_PENDING;
12693
12694         dev->open = tg3_open;
12695         dev->stop = tg3_close;
12696         dev->get_stats = tg3_get_stats;
12697         dev->set_multicast_list = tg3_set_rx_mode;
12698         dev->set_mac_address = tg3_set_mac_addr;
12699         dev->do_ioctl = tg3_ioctl;
12700         dev->tx_timeout = tg3_tx_timeout;
12701         netif_napi_add(dev, &tp->napi, tg3_poll, 64);
12702         dev->ethtool_ops = &tg3_ethtool_ops;
12703         dev->watchdog_timeo = TG3_TX_TIMEOUT;
12704         dev->change_mtu = tg3_change_mtu;
12705         dev->irq = pdev->irq;
12706 #ifdef CONFIG_NET_POLL_CONTROLLER
12707         dev->poll_controller = tg3_poll_controller;
12708 #endif
12709
12710         err = tg3_get_invariants(tp);
12711         if (err) {
12712                 printk(KERN_ERR PFX "Problem fetching invariants of chip, "
12713                        "aborting.\n");
12714                 goto err_out_iounmap;
12715         }
12716
12717         /* The EPB bridge inside 5714, 5715, and 5780 and any
12718          * device behind the EPB cannot support DMA addresses > 40-bit.
12719          * On 64-bit systems with IOMMU, use 40-bit dma_mask.
12720          * On 64-bit systems without IOMMU, use 64-bit dma_mask and
12721          * do DMA address check in tg3_start_xmit().
12722          */
12723         if (tp->tg3_flags2 & TG3_FLG2_IS_5788)
12724                 persist_dma_mask = dma_mask = DMA_32BIT_MASK;
12725         else if (tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG) {
12726                 persist_dma_mask = dma_mask = DMA_40BIT_MASK;
12727 #ifdef CONFIG_HIGHMEM
12728                 dma_mask = DMA_64BIT_MASK;
12729 #endif
12730         } else
12731                 persist_dma_mask = dma_mask = DMA_64BIT_MASK;
12732
12733         /* Configure DMA attributes. */
12734         if (dma_mask > DMA_32BIT_MASK) {
12735                 err = pci_set_dma_mask(pdev, dma_mask);
12736                 if (!err) {
12737                         dev->features |= NETIF_F_HIGHDMA;
12738                         err = pci_set_consistent_dma_mask(pdev,
12739                                                           persist_dma_mask);
12740                         if (err < 0) {
12741                                 printk(KERN_ERR PFX "Unable to obtain 64 bit "
12742                                        "DMA for consistent allocations\n");
12743                                 goto err_out_iounmap;
12744                         }
12745                 }
12746         }
12747         if (err || dma_mask == DMA_32BIT_MASK) {
12748                 err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
12749                 if (err) {
12750                         printk(KERN_ERR PFX "No usable DMA configuration, "
12751                                "aborting.\n");
12752                         goto err_out_iounmap;
12753                 }
12754         }
12755
12756         tg3_init_bufmgr_config(tp);
12757
12758         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
12759                 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
12760         }
12761         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
12762             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
12763             tp->pci_chip_rev_id == CHIPREV_ID_5705_A0 ||
12764             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 ||
12765             (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0) {
12766                 tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
12767         } else {
12768                 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE | TG3_FLG2_TSO_BUG;
12769         }
12770
12771         /* TSO is on by default on chips that support hardware TSO.
12772          * Firmware TSO on older chips gives lower performance, so it
12773          * is off by default, but can be enabled using ethtool.
12774          */
12775         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
12776                 dev->features |= NETIF_F_TSO;
12777                 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_2) &&
12778                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906))
12779                         dev->features |= NETIF_F_TSO6;
12780                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
12781                         dev->features |= NETIF_F_TSO_ECN;
12782         }
12783
12784
12785         if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 &&
12786             !(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) &&
12787             !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
12788                 tp->tg3_flags2 |= TG3_FLG2_MAX_RXPEND_64;
12789                 tp->rx_pending = 63;
12790         }
12791
12792         err = tg3_get_device_address(tp);
12793         if (err) {
12794                 printk(KERN_ERR PFX "Could not obtain valid ethernet address, "
12795                        "aborting.\n");
12796                 goto err_out_iounmap;
12797         }
12798
12799         if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
12800                 if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
12801                         printk(KERN_ERR PFX "Cannot find proper PCI device "
12802                                "base address for APE, aborting.\n");
12803                         err = -ENODEV;
12804                         goto err_out_iounmap;
12805                 }
12806
12807                 tg3reg_base = pci_resource_start(pdev, 2);
12808                 tg3reg_len = pci_resource_len(pdev, 2);
12809
12810                 tp->aperegs = ioremap_nocache(tg3reg_base, tg3reg_len);
12811                 if (!tp->aperegs) {
12812                         printk(KERN_ERR PFX "Cannot map APE registers, "
12813                                "aborting.\n");
12814                         err = -ENOMEM;
12815                         goto err_out_iounmap;
12816                 }
12817
12818                 tg3_ape_lock_init(tp);
12819         }
12820
12821         /*
12822          * Reset chip in case UNDI or EFI driver did not shutdown
12823          * DMA self test will enable WDMAC and we'll see (spurious)
12824          * pending DMA on the PCI bus at that point.
12825          */
12826         if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
12827             (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
12828                 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
12829                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12830         }
12831
12832         err = tg3_test_dma(tp);
12833         if (err) {
12834                 printk(KERN_ERR PFX "DMA engine test failed, aborting.\n");
12835                 goto err_out_apeunmap;
12836         }
12837
12838         /* Tigon3 can do ipv4 only... and some chips have buggy
12839          * checksumming.
12840          */
12841         if ((tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) == 0) {
12842                 dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
12843                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
12844                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
12845                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
12846                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
12847                         dev->features |= NETIF_F_IPV6_CSUM;
12848
12849                 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
12850         } else
12851                 tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
12852
12853         /* flow control autonegotiation is default behavior */
12854         tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
12855         tp->link_config.flowctrl = TG3_FLOW_CTRL_TX | TG3_FLOW_CTRL_RX;
12856
12857         tg3_init_coal(tp);
12858
12859         pci_set_drvdata(pdev, dev);
12860
12861         err = register_netdev(dev);
12862         if (err) {
12863                 printk(KERN_ERR PFX "Cannot register net device, "
12864                        "aborting.\n");
12865                 goto err_out_apeunmap;
12866         }
12867
12868         printk(KERN_INFO "%s: Tigon3 [partno(%s) rev %04x PHY(%s)] "
12869                "(%s) %s Ethernet %s\n",
12870                dev->name,
12871                tp->board_part_number,
12872                tp->pci_chip_rev_id,
12873                tg3_phy_string(tp),
12874                tg3_bus_string(tp, str),
12875                ((tp->tg3_flags & TG3_FLAG_10_100_ONLY) ? "10/100Base-TX" :
12876                 ((tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) ? "1000Base-SX" :
12877                  "10/100/1000Base-T")),
12878                print_mac(mac, dev->dev_addr));
12879
12880         printk(KERN_INFO "%s: RXcsums[%d] LinkChgREG[%d] "
12881                "MIirq[%d] ASF[%d] WireSpeed[%d] TSOcap[%d]\n",
12882                dev->name,
12883                (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0,
12884                (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) != 0,
12885                (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) != 0,
12886                (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0,
12887                (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED) == 0,
12888                (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) != 0);
12889         printk(KERN_INFO "%s: dma_rwctrl[%08x] dma_mask[%d-bit]\n",
12890                dev->name, tp->dma_rwctrl,
12891                (pdev->dma_mask == DMA_32BIT_MASK) ? 32 :
12892                 (((u64) pdev->dma_mask == DMA_40BIT_MASK) ? 40 : 64));
12893
12894         return 0;
12895
12896 err_out_apeunmap:
12897         if (tp->aperegs) {
12898                 iounmap(tp->aperegs);
12899                 tp->aperegs = NULL;
12900         }
12901
12902 err_out_iounmap:
12903         if (tp->regs) {
12904                 iounmap(tp->regs);
12905                 tp->regs = NULL;
12906         }
12907
12908 err_out_free_dev:
12909         free_netdev(dev);
12910
12911 err_out_free_res:
12912         pci_release_regions(pdev);
12913
12914 err_out_disable_pdev:
12915         pci_disable_device(pdev);
12916         pci_set_drvdata(pdev, NULL);
12917         return err;
12918 }
12919
12920 static void __devexit tg3_remove_one(struct pci_dev *pdev)
12921 {
12922         struct net_device *dev = pci_get_drvdata(pdev);
12923
12924         if (dev) {
12925                 struct tg3 *tp = netdev_priv(dev);
12926
12927                 flush_scheduled_work();
12928                 unregister_netdev(dev);
12929                 if (tp->aperegs) {
12930                         iounmap(tp->aperegs);
12931                         tp->aperegs = NULL;
12932                 }
12933                 if (tp->regs) {
12934                         iounmap(tp->regs);
12935                         tp->regs = NULL;
12936                 }
12937                 free_netdev(dev);
12938                 pci_release_regions(pdev);
12939                 pci_disable_device(pdev);
12940                 pci_set_drvdata(pdev, NULL);
12941         }
12942 }
12943
12944 static int tg3_suspend(struct pci_dev *pdev, pm_message_t state)
12945 {
12946         struct net_device *dev = pci_get_drvdata(pdev);
12947         struct tg3 *tp = netdev_priv(dev);
12948         int err;
12949
12950         /* PCI register 4 needs to be saved whether netif_running() or not.
12951          * MSI address and data need to be saved if using MSI and
12952          * netif_running().
12953          */
12954         pci_save_state(pdev);
12955
12956         if (!netif_running(dev))
12957                 return 0;
12958
12959         flush_scheduled_work();
12960         tg3_netif_stop(tp);
12961
12962         del_timer_sync(&tp->timer);
12963
12964         tg3_full_lock(tp, 1);
12965         tg3_disable_ints(tp);
12966         tg3_full_unlock(tp);
12967
12968         netif_device_detach(dev);
12969
12970         tg3_full_lock(tp, 0);
12971         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12972         tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
12973         tg3_full_unlock(tp);
12974
12975         err = tg3_set_power_state(tp, pci_choose_state(pdev, state));
12976         if (err) {
12977                 tg3_full_lock(tp, 0);
12978
12979                 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
12980                 if (tg3_restart_hw(tp, 1))
12981                         goto out;
12982
12983                 tp->timer.expires = jiffies + tp->timer_offset;
12984                 add_timer(&tp->timer);
12985
12986                 netif_device_attach(dev);
12987                 tg3_netif_start(tp);
12988
12989 out:
12990                 tg3_full_unlock(tp);
12991         }
12992
12993         return err;
12994 }
12995
12996 static int tg3_resume(struct pci_dev *pdev)
12997 {
12998         struct net_device *dev = pci_get_drvdata(pdev);
12999         struct tg3 *tp = netdev_priv(dev);
13000         int err;
13001
13002         pci_restore_state(tp->pdev);
13003
13004         if (!netif_running(dev))
13005                 return 0;
13006
13007         err = tg3_set_power_state(tp, PCI_D0);
13008         if (err)
13009                 return err;
13010
13011         netif_device_attach(dev);
13012
13013         tg3_full_lock(tp, 0);
13014
13015         tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
13016         err = tg3_restart_hw(tp, 1);
13017         if (err)
13018                 goto out;
13019
13020         tp->timer.expires = jiffies + tp->timer_offset;
13021         add_timer(&tp->timer);
13022
13023         tg3_netif_start(tp);
13024
13025 out:
13026         tg3_full_unlock(tp);
13027
13028         return err;
13029 }
13030
13031 static struct pci_driver tg3_driver = {
13032         .name           = DRV_MODULE_NAME,
13033         .id_table       = tg3_pci_tbl,
13034         .probe          = tg3_init_one,
13035         .remove         = __devexit_p(tg3_remove_one),
13036         .suspend        = tg3_suspend,
13037         .resume         = tg3_resume
13038 };
13039
13040 static int __init tg3_init(void)
13041 {
13042         return pci_register_driver(&tg3_driver);
13043 }
13044
13045 static void __exit tg3_cleanup(void)
13046 {
13047         pci_unregister_driver(&tg3_driver);
13048 }
13049
13050 module_init(tg3_init);
13051 module_exit(tg3_cleanup);