Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/ieee1394...
[linux-2.6] / drivers / net / tg3.c
1 /*
2  * tg3.c: Broadcom Tigon3 ethernet driver.
3  *
4  * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5  * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6  * Copyright (C) 2004 Sun Microsystems Inc.
7  * Copyright (C) 2005 Broadcom Corporation.
8  *
9  * Firmware is:
10  *      Derived from proprietary unpublished source code,
11  *      Copyright (C) 2000-2003 Broadcom Corporation.
12  *
13  *      Permission is hereby granted for the distribution of this firmware
14  *      data in hexadecimal or equivalent format, provided this copyright
15  *      notice is accompanying it.
16  */
17
18
19 #include <linux/module.h>
20 #include <linux/moduleparam.h>
21 #include <linux/kernel.h>
22 #include <linux/types.h>
23 #include <linux/compiler.h>
24 #include <linux/slab.h>
25 #include <linux/delay.h>
26 #include <linux/in.h>
27 #include <linux/init.h>
28 #include <linux/ioport.h>
29 #include <linux/pci.h>
30 #include <linux/netdevice.h>
31 #include <linux/etherdevice.h>
32 #include <linux/skbuff.h>
33 #include <linux/ethtool.h>
34 #include <linux/mii.h>
35 #include <linux/if_vlan.h>
36 #include <linux/ip.h>
37 #include <linux/tcp.h>
38 #include <linux/workqueue.h>
39 #include <linux/prefetch.h>
40 #include <linux/dma-mapping.h>
41
42 #include <net/checksum.h>
43
44 #include <asm/system.h>
45 #include <asm/io.h>
46 #include <asm/byteorder.h>
47 #include <asm/uaccess.h>
48
49 #ifdef CONFIG_SPARC64
50 #include <asm/idprom.h>
51 #include <asm/oplib.h>
52 #include <asm/pbm.h>
53 #endif
54
55 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
56 #define TG3_VLAN_TAG_USED 1
57 #else
58 #define TG3_VLAN_TAG_USED 0
59 #endif
60
61 #ifdef NETIF_F_TSO
62 #define TG3_TSO_SUPPORT 1
63 #else
64 #define TG3_TSO_SUPPORT 0
65 #endif
66
67 #include "tg3.h"
68
69 #define DRV_MODULE_NAME         "tg3"
70 #define PFX DRV_MODULE_NAME     ": "
71 #define DRV_MODULE_VERSION      "3.71"
72 #define DRV_MODULE_RELDATE      "December 15, 2006"
73
74 #define TG3_DEF_MAC_MODE        0
75 #define TG3_DEF_RX_MODE         0
76 #define TG3_DEF_TX_MODE         0
77 #define TG3_DEF_MSG_ENABLE        \
78         (NETIF_MSG_DRV          | \
79          NETIF_MSG_PROBE        | \
80          NETIF_MSG_LINK         | \
81          NETIF_MSG_TIMER        | \
82          NETIF_MSG_IFDOWN       | \
83          NETIF_MSG_IFUP         | \
84          NETIF_MSG_RX_ERR       | \
85          NETIF_MSG_TX_ERR)
86
87 /* length of time before we decide the hardware is borked,
88  * and dev->tx_timeout() should be called to fix the problem
89  */
90 #define TG3_TX_TIMEOUT                  (5 * HZ)
91
92 /* hardware minimum and maximum for a single frame's data payload */
93 #define TG3_MIN_MTU                     60
94 #define TG3_MAX_MTU(tp) \
95         ((tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) ? 9000 : 1500)
96
97 /* These numbers seem to be hard coded in the NIC firmware somehow.
98  * You can't change the ring sizes, but you can change where you place
99  * them in the NIC onboard memory.
100  */
101 #define TG3_RX_RING_SIZE                512
102 #define TG3_DEF_RX_RING_PENDING         200
103 #define TG3_RX_JUMBO_RING_SIZE          256
104 #define TG3_DEF_RX_JUMBO_RING_PENDING   100
105
106 /* Do not place this n-ring entries value into the tp struct itself,
107  * we really want to expose these constants to GCC so that modulo et
108  * al.  operations are done with shifts and masks instead of with
109  * hw multiply/modulo instructions.  Another solution would be to
110  * replace things like '% foo' with '& (foo - 1)'.
111  */
112 #define TG3_RX_RCB_RING_SIZE(tp)        \
113         ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ?  512 : 1024)
114
115 #define TG3_TX_RING_SIZE                512
116 #define TG3_DEF_TX_RING_PENDING         (TG3_TX_RING_SIZE - 1)
117
118 #define TG3_RX_RING_BYTES       (sizeof(struct tg3_rx_buffer_desc) * \
119                                  TG3_RX_RING_SIZE)
120 #define TG3_RX_JUMBO_RING_BYTES (sizeof(struct tg3_rx_buffer_desc) * \
121                                  TG3_RX_JUMBO_RING_SIZE)
122 #define TG3_RX_RCB_RING_BYTES(tp) (sizeof(struct tg3_rx_buffer_desc) * \
123                                    TG3_RX_RCB_RING_SIZE(tp))
124 #define TG3_TX_RING_BYTES       (sizeof(struct tg3_tx_buffer_desc) * \
125                                  TG3_TX_RING_SIZE)
126 #define NEXT_TX(N)              (((N) + 1) & (TG3_TX_RING_SIZE - 1))
127
128 #define RX_PKT_BUF_SZ           (1536 + tp->rx_offset + 64)
129 #define RX_JUMBO_PKT_BUF_SZ     (9046 + tp->rx_offset + 64)
130
131 /* minimum number of free TX descriptors required to wake up TX process */
132 #define TG3_TX_WAKEUP_THRESH(tp)                ((tp)->tx_pending / 4)
133
134 /* number of ETHTOOL_GSTATS u64's */
135 #define TG3_NUM_STATS           (sizeof(struct tg3_ethtool_stats)/sizeof(u64))
136
137 #define TG3_NUM_TEST            6
138
139 static char version[] __devinitdata =
140         DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
141
142 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
143 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
144 MODULE_LICENSE("GPL");
145 MODULE_VERSION(DRV_MODULE_VERSION);
146
147 static int tg3_debug = -1;      /* -1 == use TG3_DEF_MSG_ENABLE as value */
148 module_param(tg3_debug, int, 0);
149 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
150
151 static struct pci_device_id tg3_pci_tbl[] = {
152         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
153         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
154         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
155         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
156         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
157         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
158         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
159         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
160         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
161         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
162         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
163         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
164         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
165         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
166         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
167         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
168         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
169         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
170         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901)},
171         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2)},
172         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
173         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F)},
174         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5720)},
175         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
176         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
177         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750)},
178         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
179         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750M)},
180         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
181         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F)},
182         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
183         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
184         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
185         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
186         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F)},
187         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
188         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
189         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
190         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
191         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
192         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
193         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
194         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
195         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F)},
196         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
197         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
198         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
199         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
200         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
201         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
202         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
203         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
204         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
205         {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
206         {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
207         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
208         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
209         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
210         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
211         {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
212         {}
213 };
214
215 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
216
217 static const struct {
218         const char string[ETH_GSTRING_LEN];
219 } ethtool_stats_keys[TG3_NUM_STATS] = {
220         { "rx_octets" },
221         { "rx_fragments" },
222         { "rx_ucast_packets" },
223         { "rx_mcast_packets" },
224         { "rx_bcast_packets" },
225         { "rx_fcs_errors" },
226         { "rx_align_errors" },
227         { "rx_xon_pause_rcvd" },
228         { "rx_xoff_pause_rcvd" },
229         { "rx_mac_ctrl_rcvd" },
230         { "rx_xoff_entered" },
231         { "rx_frame_too_long_errors" },
232         { "rx_jabbers" },
233         { "rx_undersize_packets" },
234         { "rx_in_length_errors" },
235         { "rx_out_length_errors" },
236         { "rx_64_or_less_octet_packets" },
237         { "rx_65_to_127_octet_packets" },
238         { "rx_128_to_255_octet_packets" },
239         { "rx_256_to_511_octet_packets" },
240         { "rx_512_to_1023_octet_packets" },
241         { "rx_1024_to_1522_octet_packets" },
242         { "rx_1523_to_2047_octet_packets" },
243         { "rx_2048_to_4095_octet_packets" },
244         { "rx_4096_to_8191_octet_packets" },
245         { "rx_8192_to_9022_octet_packets" },
246
247         { "tx_octets" },
248         { "tx_collisions" },
249
250         { "tx_xon_sent" },
251         { "tx_xoff_sent" },
252         { "tx_flow_control" },
253         { "tx_mac_errors" },
254         { "tx_single_collisions" },
255         { "tx_mult_collisions" },
256         { "tx_deferred" },
257         { "tx_excessive_collisions" },
258         { "tx_late_collisions" },
259         { "tx_collide_2times" },
260         { "tx_collide_3times" },
261         { "tx_collide_4times" },
262         { "tx_collide_5times" },
263         { "tx_collide_6times" },
264         { "tx_collide_7times" },
265         { "tx_collide_8times" },
266         { "tx_collide_9times" },
267         { "tx_collide_10times" },
268         { "tx_collide_11times" },
269         { "tx_collide_12times" },
270         { "tx_collide_13times" },
271         { "tx_collide_14times" },
272         { "tx_collide_15times" },
273         { "tx_ucast_packets" },
274         { "tx_mcast_packets" },
275         { "tx_bcast_packets" },
276         { "tx_carrier_sense_errors" },
277         { "tx_discards" },
278         { "tx_errors" },
279
280         { "dma_writeq_full" },
281         { "dma_write_prioq_full" },
282         { "rxbds_empty" },
283         { "rx_discards" },
284         { "rx_errors" },
285         { "rx_threshold_hit" },
286
287         { "dma_readq_full" },
288         { "dma_read_prioq_full" },
289         { "tx_comp_queue_full" },
290
291         { "ring_set_send_prod_index" },
292         { "ring_status_update" },
293         { "nic_irqs" },
294         { "nic_avoided_irqs" },
295         { "nic_tx_threshold_hit" }
296 };
297
298 static const struct {
299         const char string[ETH_GSTRING_LEN];
300 } ethtool_test_keys[TG3_NUM_TEST] = {
301         { "nvram test     (online) " },
302         { "link test      (online) " },
303         { "register test  (offline)" },
304         { "memory test    (offline)" },
305         { "loopback test  (offline)" },
306         { "interrupt test (offline)" },
307 };
308
309 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
310 {
311         writel(val, tp->regs + off);
312 }
313
314 static u32 tg3_read32(struct tg3 *tp, u32 off)
315 {
316         return (readl(tp->regs + off));
317 }
318
319 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
320 {
321         unsigned long flags;
322
323         spin_lock_irqsave(&tp->indirect_lock, flags);
324         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
325         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
326         spin_unlock_irqrestore(&tp->indirect_lock, flags);
327 }
328
329 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
330 {
331         writel(val, tp->regs + off);
332         readl(tp->regs + off);
333 }
334
335 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
336 {
337         unsigned long flags;
338         u32 val;
339
340         spin_lock_irqsave(&tp->indirect_lock, flags);
341         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
342         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
343         spin_unlock_irqrestore(&tp->indirect_lock, flags);
344         return val;
345 }
346
347 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
348 {
349         unsigned long flags;
350
351         if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
352                 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
353                                        TG3_64BIT_REG_LOW, val);
354                 return;
355         }
356         if (off == (MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW)) {
357                 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
358                                        TG3_64BIT_REG_LOW, val);
359                 return;
360         }
361
362         spin_lock_irqsave(&tp->indirect_lock, flags);
363         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
364         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
365         spin_unlock_irqrestore(&tp->indirect_lock, flags);
366
367         /* In indirect mode when disabling interrupts, we also need
368          * to clear the interrupt bit in the GRC local ctrl register.
369          */
370         if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
371             (val == 0x1)) {
372                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
373                                        tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
374         }
375 }
376
377 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
378 {
379         unsigned long flags;
380         u32 val;
381
382         spin_lock_irqsave(&tp->indirect_lock, flags);
383         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
384         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
385         spin_unlock_irqrestore(&tp->indirect_lock, flags);
386         return val;
387 }
388
389 /* usec_wait specifies the wait time in usec when writing to certain registers
390  * where it is unsafe to read back the register without some delay.
391  * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
392  * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
393  */
394 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
395 {
396         if ((tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) ||
397             (tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND))
398                 /* Non-posted methods */
399                 tp->write32(tp, off, val);
400         else {
401                 /* Posted method */
402                 tg3_write32(tp, off, val);
403                 if (usec_wait)
404                         udelay(usec_wait);
405                 tp->read32(tp, off);
406         }
407         /* Wait again after the read for the posted method to guarantee that
408          * the wait time is met.
409          */
410         if (usec_wait)
411                 udelay(usec_wait);
412 }
413
414 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
415 {
416         tp->write32_mbox(tp, off, val);
417         if (!(tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) &&
418             !(tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND))
419                 tp->read32_mbox(tp, off);
420 }
421
422 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
423 {
424         void __iomem *mbox = tp->regs + off;
425         writel(val, mbox);
426         if (tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG)
427                 writel(val, mbox);
428         if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
429                 readl(mbox);
430 }
431
432 static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
433 {
434         return (readl(tp->regs + off + GRCMBOX_BASE));
435 }
436
437 static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
438 {
439         writel(val, tp->regs + off + GRCMBOX_BASE);
440 }
441
442 #define tw32_mailbox(reg, val)  tp->write32_mbox(tp, reg, val)
443 #define tw32_mailbox_f(reg, val)        tw32_mailbox_flush(tp, (reg), (val))
444 #define tw32_rx_mbox(reg, val)  tp->write32_rx_mbox(tp, reg, val)
445 #define tw32_tx_mbox(reg, val)  tp->write32_tx_mbox(tp, reg, val)
446 #define tr32_mailbox(reg)       tp->read32_mbox(tp, reg)
447
448 #define tw32(reg,val)           tp->write32(tp, reg, val)
449 #define tw32_f(reg,val)         _tw32_flush(tp,(reg),(val), 0)
450 #define tw32_wait_f(reg,val,us) _tw32_flush(tp,(reg),(val), (us))
451 #define tr32(reg)               tp->read32(tp, reg)
452
453 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
454 {
455         unsigned long flags;
456
457         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) &&
458             (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
459                 return;
460
461         spin_lock_irqsave(&tp->indirect_lock, flags);
462         if (tp->tg3_flags & TG3_FLAG_SRAM_USE_CONFIG) {
463                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
464                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
465
466                 /* Always leave this as zero. */
467                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
468         } else {
469                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
470                 tw32_f(TG3PCI_MEM_WIN_DATA, val);
471
472                 /* Always leave this as zero. */
473                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
474         }
475         spin_unlock_irqrestore(&tp->indirect_lock, flags);
476 }
477
478 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
479 {
480         unsigned long flags;
481
482         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) &&
483             (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
484                 *val = 0;
485                 return;
486         }
487
488         spin_lock_irqsave(&tp->indirect_lock, flags);
489         if (tp->tg3_flags & TG3_FLAG_SRAM_USE_CONFIG) {
490                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
491                 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
492
493                 /* Always leave this as zero. */
494                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
495         } else {
496                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
497                 *val = tr32(TG3PCI_MEM_WIN_DATA);
498
499                 /* Always leave this as zero. */
500                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
501         }
502         spin_unlock_irqrestore(&tp->indirect_lock, flags);
503 }
504
505 static void tg3_disable_ints(struct tg3 *tp)
506 {
507         tw32(TG3PCI_MISC_HOST_CTRL,
508              (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
509         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
510 }
511
512 static inline void tg3_cond_int(struct tg3 *tp)
513 {
514         if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
515             (tp->hw_status->status & SD_STATUS_UPDATED))
516                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
517         else
518                 tw32(HOSTCC_MODE, tp->coalesce_mode |
519                      (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
520 }
521
522 static void tg3_enable_ints(struct tg3 *tp)
523 {
524         tp->irq_sync = 0;
525         wmb();
526
527         tw32(TG3PCI_MISC_HOST_CTRL,
528              (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
529         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
530                        (tp->last_tag << 24));
531         if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI)
532                 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
533                                (tp->last_tag << 24));
534         tg3_cond_int(tp);
535 }
536
537 static inline unsigned int tg3_has_work(struct tg3 *tp)
538 {
539         struct tg3_hw_status *sblk = tp->hw_status;
540         unsigned int work_exists = 0;
541
542         /* check for phy events */
543         if (!(tp->tg3_flags &
544               (TG3_FLAG_USE_LINKCHG_REG |
545                TG3_FLAG_POLL_SERDES))) {
546                 if (sblk->status & SD_STATUS_LINK_CHG)
547                         work_exists = 1;
548         }
549         /* check for RX/TX work to do */
550         if (sblk->idx[0].tx_consumer != tp->tx_cons ||
551             sblk->idx[0].rx_producer != tp->rx_rcb_ptr)
552                 work_exists = 1;
553
554         return work_exists;
555 }
556
557 /* tg3_restart_ints
558  *  similar to tg3_enable_ints, but it accurately determines whether there
559  *  is new work pending and can return without flushing the PIO write
560  *  which reenables interrupts
561  */
562 static void tg3_restart_ints(struct tg3 *tp)
563 {
564         tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
565                      tp->last_tag << 24);
566         mmiowb();
567
568         /* When doing tagged status, this work check is unnecessary.
569          * The last_tag we write above tells the chip which piece of
570          * work we've completed.
571          */
572         if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
573             tg3_has_work(tp))
574                 tw32(HOSTCC_MODE, tp->coalesce_mode |
575                      (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
576 }
577
578 static inline void tg3_netif_stop(struct tg3 *tp)
579 {
580         tp->dev->trans_start = jiffies; /* prevent tx timeout */
581         netif_poll_disable(tp->dev);
582         netif_tx_disable(tp->dev);
583 }
584
585 static inline void tg3_netif_start(struct tg3 *tp)
586 {
587         netif_wake_queue(tp->dev);
588         /* NOTE: unconditional netif_wake_queue is only appropriate
589          * so long as all callers are assured to have free tx slots
590          * (such as after tg3_init_hw)
591          */
592         netif_poll_enable(tp->dev);
593         tp->hw_status->status |= SD_STATUS_UPDATED;
594         tg3_enable_ints(tp);
595 }
596
597 static void tg3_switch_clocks(struct tg3 *tp)
598 {
599         u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
600         u32 orig_clock_ctrl;
601
602         if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
603                 return;
604
605         orig_clock_ctrl = clock_ctrl;
606         clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
607                        CLOCK_CTRL_CLKRUN_OENABLE |
608                        0x1f);
609         tp->pci_clock_ctrl = clock_ctrl;
610
611         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
612                 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
613                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
614                                     clock_ctrl | CLOCK_CTRL_625_CORE, 40);
615                 }
616         } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
617                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
618                             clock_ctrl |
619                             (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
620                             40);
621                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
622                             clock_ctrl | (CLOCK_CTRL_ALTCLK),
623                             40);
624         }
625         tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
626 }
627
628 #define PHY_BUSY_LOOPS  5000
629
630 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
631 {
632         u32 frame_val;
633         unsigned int loops;
634         int ret;
635
636         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
637                 tw32_f(MAC_MI_MODE,
638                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
639                 udelay(80);
640         }
641
642         *val = 0x0;
643
644         frame_val  = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
645                       MI_COM_PHY_ADDR_MASK);
646         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
647                       MI_COM_REG_ADDR_MASK);
648         frame_val |= (MI_COM_CMD_READ | MI_COM_START);
649
650         tw32_f(MAC_MI_COM, frame_val);
651
652         loops = PHY_BUSY_LOOPS;
653         while (loops != 0) {
654                 udelay(10);
655                 frame_val = tr32(MAC_MI_COM);
656
657                 if ((frame_val & MI_COM_BUSY) == 0) {
658                         udelay(5);
659                         frame_val = tr32(MAC_MI_COM);
660                         break;
661                 }
662                 loops -= 1;
663         }
664
665         ret = -EBUSY;
666         if (loops != 0) {
667                 *val = frame_val & MI_COM_DATA_MASK;
668                 ret = 0;
669         }
670
671         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
672                 tw32_f(MAC_MI_MODE, tp->mi_mode);
673                 udelay(80);
674         }
675
676         return ret;
677 }
678
679 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
680 {
681         u32 frame_val;
682         unsigned int loops;
683         int ret;
684
685         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 &&
686             (reg == MII_TG3_CTRL || reg == MII_TG3_AUX_CTRL))
687                 return 0;
688
689         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
690                 tw32_f(MAC_MI_MODE,
691                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
692                 udelay(80);
693         }
694
695         frame_val  = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
696                       MI_COM_PHY_ADDR_MASK);
697         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
698                       MI_COM_REG_ADDR_MASK);
699         frame_val |= (val & MI_COM_DATA_MASK);
700         frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
701
702         tw32_f(MAC_MI_COM, frame_val);
703
704         loops = PHY_BUSY_LOOPS;
705         while (loops != 0) {
706                 udelay(10);
707                 frame_val = tr32(MAC_MI_COM);
708                 if ((frame_val & MI_COM_BUSY) == 0) {
709                         udelay(5);
710                         frame_val = tr32(MAC_MI_COM);
711                         break;
712                 }
713                 loops -= 1;
714         }
715
716         ret = -EBUSY;
717         if (loops != 0)
718                 ret = 0;
719
720         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
721                 tw32_f(MAC_MI_MODE, tp->mi_mode);
722                 udelay(80);
723         }
724
725         return ret;
726 }
727
728 static void tg3_phy_set_wirespeed(struct tg3 *tp)
729 {
730         u32 val;
731
732         if (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED)
733                 return;
734
735         if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x7007) &&
736             !tg3_readphy(tp, MII_TG3_AUX_CTRL, &val))
737                 tg3_writephy(tp, MII_TG3_AUX_CTRL,
738                              (val | (1 << 15) | (1 << 4)));
739 }
740
741 static int tg3_bmcr_reset(struct tg3 *tp)
742 {
743         u32 phy_control;
744         int limit, err;
745
746         /* OK, reset it, and poll the BMCR_RESET bit until it
747          * clears or we time out.
748          */
749         phy_control = BMCR_RESET;
750         err = tg3_writephy(tp, MII_BMCR, phy_control);
751         if (err != 0)
752                 return -EBUSY;
753
754         limit = 5000;
755         while (limit--) {
756                 err = tg3_readphy(tp, MII_BMCR, &phy_control);
757                 if (err != 0)
758                         return -EBUSY;
759
760                 if ((phy_control & BMCR_RESET) == 0) {
761                         udelay(40);
762                         break;
763                 }
764                 udelay(10);
765         }
766         if (limit <= 0)
767                 return -EBUSY;
768
769         return 0;
770 }
771
772 static int tg3_wait_macro_done(struct tg3 *tp)
773 {
774         int limit = 100;
775
776         while (limit--) {
777                 u32 tmp32;
778
779                 if (!tg3_readphy(tp, 0x16, &tmp32)) {
780                         if ((tmp32 & 0x1000) == 0)
781                                 break;
782                 }
783         }
784         if (limit <= 0)
785                 return -EBUSY;
786
787         return 0;
788 }
789
790 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
791 {
792         static const u32 test_pat[4][6] = {
793         { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
794         { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
795         { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
796         { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
797         };
798         int chan;
799
800         for (chan = 0; chan < 4; chan++) {
801                 int i;
802
803                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
804                              (chan * 0x2000) | 0x0200);
805                 tg3_writephy(tp, 0x16, 0x0002);
806
807                 for (i = 0; i < 6; i++)
808                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
809                                      test_pat[chan][i]);
810
811                 tg3_writephy(tp, 0x16, 0x0202);
812                 if (tg3_wait_macro_done(tp)) {
813                         *resetp = 1;
814                         return -EBUSY;
815                 }
816
817                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
818                              (chan * 0x2000) | 0x0200);
819                 tg3_writephy(tp, 0x16, 0x0082);
820                 if (tg3_wait_macro_done(tp)) {
821                         *resetp = 1;
822                         return -EBUSY;
823                 }
824
825                 tg3_writephy(tp, 0x16, 0x0802);
826                 if (tg3_wait_macro_done(tp)) {
827                         *resetp = 1;
828                         return -EBUSY;
829                 }
830
831                 for (i = 0; i < 6; i += 2) {
832                         u32 low, high;
833
834                         if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
835                             tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
836                             tg3_wait_macro_done(tp)) {
837                                 *resetp = 1;
838                                 return -EBUSY;
839                         }
840                         low &= 0x7fff;
841                         high &= 0x000f;
842                         if (low != test_pat[chan][i] ||
843                             high != test_pat[chan][i+1]) {
844                                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
845                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
846                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
847
848                                 return -EBUSY;
849                         }
850                 }
851         }
852
853         return 0;
854 }
855
856 static int tg3_phy_reset_chanpat(struct tg3 *tp)
857 {
858         int chan;
859
860         for (chan = 0; chan < 4; chan++) {
861                 int i;
862
863                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
864                              (chan * 0x2000) | 0x0200);
865                 tg3_writephy(tp, 0x16, 0x0002);
866                 for (i = 0; i < 6; i++)
867                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
868                 tg3_writephy(tp, 0x16, 0x0202);
869                 if (tg3_wait_macro_done(tp))
870                         return -EBUSY;
871         }
872
873         return 0;
874 }
875
876 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
877 {
878         u32 reg32, phy9_orig;
879         int retries, do_phy_reset, err;
880
881         retries = 10;
882         do_phy_reset = 1;
883         do {
884                 if (do_phy_reset) {
885                         err = tg3_bmcr_reset(tp);
886                         if (err)
887                                 return err;
888                         do_phy_reset = 0;
889                 }
890
891                 /* Disable transmitter and interrupt.  */
892                 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
893                         continue;
894
895                 reg32 |= 0x3000;
896                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
897
898                 /* Set full-duplex, 1000 mbps.  */
899                 tg3_writephy(tp, MII_BMCR,
900                              BMCR_FULLDPLX | TG3_BMCR_SPEED1000);
901
902                 /* Set to master mode.  */
903                 if (tg3_readphy(tp, MII_TG3_CTRL, &phy9_orig))
904                         continue;
905
906                 tg3_writephy(tp, MII_TG3_CTRL,
907                              (MII_TG3_CTRL_AS_MASTER |
908                               MII_TG3_CTRL_ENABLE_AS_MASTER));
909
910                 /* Enable SM_DSP_CLOCK and 6dB.  */
911                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
912
913                 /* Block the PHY control access.  */
914                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
915                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0800);
916
917                 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
918                 if (!err)
919                         break;
920         } while (--retries);
921
922         err = tg3_phy_reset_chanpat(tp);
923         if (err)
924                 return err;
925
926         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
927         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0000);
928
929         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
930         tg3_writephy(tp, 0x16, 0x0000);
931
932         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
933             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
934                 /* Set Extended packet length bit for jumbo frames */
935                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4400);
936         }
937         else {
938                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
939         }
940
941         tg3_writephy(tp, MII_TG3_CTRL, phy9_orig);
942
943         if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32)) {
944                 reg32 &= ~0x3000;
945                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
946         } else if (!err)
947                 err = -EBUSY;
948
949         return err;
950 }
951
952 static void tg3_link_report(struct tg3 *);
953
954 /* This will reset the tigon3 PHY if there is no valid
955  * link unless the FORCE argument is non-zero.
956  */
957 static int tg3_phy_reset(struct tg3 *tp)
958 {
959         u32 phy_status;
960         int err;
961
962         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
963                 u32 val;
964
965                 val = tr32(GRC_MISC_CFG);
966                 tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
967                 udelay(40);
968         }
969         err  = tg3_readphy(tp, MII_BMSR, &phy_status);
970         err |= tg3_readphy(tp, MII_BMSR, &phy_status);
971         if (err != 0)
972                 return -EBUSY;
973
974         if (netif_running(tp->dev) && netif_carrier_ok(tp->dev)) {
975                 netif_carrier_off(tp->dev);
976                 tg3_link_report(tp);
977         }
978
979         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
980             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
981             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
982                 err = tg3_phy_reset_5703_4_5(tp);
983                 if (err)
984                         return err;
985                 goto out;
986         }
987
988         err = tg3_bmcr_reset(tp);
989         if (err)
990                 return err;
991
992 out:
993         if (tp->tg3_flags2 & TG3_FLG2_PHY_ADC_BUG) {
994                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
995                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
996                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x2aaa);
997                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
998                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0323);
999                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1000         }
1001         if (tp->tg3_flags2 & TG3_FLG2_PHY_5704_A0_BUG) {
1002                 tg3_writephy(tp, 0x1c, 0x8d68);
1003                 tg3_writephy(tp, 0x1c, 0x8d68);
1004         }
1005         if (tp->tg3_flags2 & TG3_FLG2_PHY_BER_BUG) {
1006                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1007                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1008                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x310b);
1009                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1010                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x9506);
1011                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x401f);
1012                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x14e2);
1013                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1014         }
1015         else if (tp->tg3_flags2 & TG3_FLG2_PHY_JITTER_BUG) {
1016                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1017                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1018                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
1019                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1020         }
1021         /* Set Extended packet length bit (bit 14) on all chips that */
1022         /* support jumbo frames */
1023         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
1024                 /* Cannot do read-modify-write on 5401 */
1025                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
1026         } else if (tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) {
1027                 u32 phy_reg;
1028
1029                 /* Set bit 14 with read-modify-write to preserve other bits */
1030                 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0007) &&
1031                     !tg3_readphy(tp, MII_TG3_AUX_CTRL, &phy_reg))
1032                         tg3_writephy(tp, MII_TG3_AUX_CTRL, phy_reg | 0x4000);
1033         }
1034
1035         /* Set phy register 0x10 bit 0 to high fifo elasticity to support
1036          * jumbo frames transmission.
1037          */
1038         if (tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) {
1039                 u32 phy_reg;
1040
1041                 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &phy_reg))
1042                     tg3_writephy(tp, MII_TG3_EXT_CTRL,
1043                                  phy_reg | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
1044         }
1045
1046         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1047                 u32 phy_reg;
1048
1049                 /* adjust output voltage */
1050                 tg3_writephy(tp, MII_TG3_EPHY_PTEST, 0x12);
1051
1052                 if (!tg3_readphy(tp, MII_TG3_EPHY_TEST, &phy_reg)) {
1053                         u32 phy_reg2;
1054
1055                         tg3_writephy(tp, MII_TG3_EPHY_TEST,
1056                                      phy_reg | MII_TG3_EPHY_SHADOW_EN);
1057                         /* Enable auto-MDIX */
1058                         if (!tg3_readphy(tp, 0x10, &phy_reg2))
1059                                 tg3_writephy(tp, 0x10, phy_reg2 | 0x4000);
1060                         tg3_writephy(tp, MII_TG3_EPHY_TEST, phy_reg);
1061                 }
1062         }
1063
1064         tg3_phy_set_wirespeed(tp);
1065         return 0;
1066 }
1067
1068 static void tg3_frob_aux_power(struct tg3 *tp)
1069 {
1070         struct tg3 *tp_peer = tp;
1071
1072         if ((tp->tg3_flags2 & TG3_FLG2_IS_NIC) == 0)
1073                 return;
1074
1075         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
1076             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)) {
1077                 struct net_device *dev_peer;
1078
1079                 dev_peer = pci_get_drvdata(tp->pdev_peer);
1080                 /* remove_one() may have been run on the peer. */
1081                 if (!dev_peer)
1082                         tp_peer = tp;
1083                 else
1084                         tp_peer = netdev_priv(dev_peer);
1085         }
1086
1087         if ((tp->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
1088             (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0 ||
1089             (tp_peer->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
1090             (tp_peer->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0) {
1091                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1092                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1093                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1094                                     (GRC_LCLCTRL_GPIO_OE0 |
1095                                      GRC_LCLCTRL_GPIO_OE1 |
1096                                      GRC_LCLCTRL_GPIO_OE2 |
1097                                      GRC_LCLCTRL_GPIO_OUTPUT0 |
1098                                      GRC_LCLCTRL_GPIO_OUTPUT1),
1099                                     100);
1100                 } else {
1101                         u32 no_gpio2;
1102                         u32 grc_local_ctrl = 0;
1103
1104                         if (tp_peer != tp &&
1105                             (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
1106                                 return;
1107
1108                         /* Workaround to prevent overdrawing Amps. */
1109                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
1110                             ASIC_REV_5714) {
1111                                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
1112                                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1113                                             grc_local_ctrl, 100);
1114                         }
1115
1116                         /* On 5753 and variants, GPIO2 cannot be used. */
1117                         no_gpio2 = tp->nic_sram_data_cfg &
1118                                     NIC_SRAM_DATA_CFG_NO_GPIO2;
1119
1120                         grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
1121                                          GRC_LCLCTRL_GPIO_OE1 |
1122                                          GRC_LCLCTRL_GPIO_OE2 |
1123                                          GRC_LCLCTRL_GPIO_OUTPUT1 |
1124                                          GRC_LCLCTRL_GPIO_OUTPUT2;
1125                         if (no_gpio2) {
1126                                 grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
1127                                                     GRC_LCLCTRL_GPIO_OUTPUT2);
1128                         }
1129                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1130                                                     grc_local_ctrl, 100);
1131
1132                         grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
1133
1134                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1135                                                     grc_local_ctrl, 100);
1136
1137                         if (!no_gpio2) {
1138                                 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
1139                                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1140                                             grc_local_ctrl, 100);
1141                         }
1142                 }
1143         } else {
1144                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
1145                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
1146                         if (tp_peer != tp &&
1147                             (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
1148                                 return;
1149
1150                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1151                                     (GRC_LCLCTRL_GPIO_OE1 |
1152                                      GRC_LCLCTRL_GPIO_OUTPUT1), 100);
1153
1154                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1155                                     GRC_LCLCTRL_GPIO_OE1, 100);
1156
1157                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1158                                     (GRC_LCLCTRL_GPIO_OE1 |
1159                                      GRC_LCLCTRL_GPIO_OUTPUT1), 100);
1160                 }
1161         }
1162 }
1163
1164 static int tg3_setup_phy(struct tg3 *, int);
1165
1166 #define RESET_KIND_SHUTDOWN     0
1167 #define RESET_KIND_INIT         1
1168 #define RESET_KIND_SUSPEND      2
1169
1170 static void tg3_write_sig_post_reset(struct tg3 *, int);
1171 static int tg3_halt_cpu(struct tg3 *, u32);
1172 static int tg3_nvram_lock(struct tg3 *);
1173 static void tg3_nvram_unlock(struct tg3 *);
1174
1175 static void tg3_power_down_phy(struct tg3 *tp)
1176 {
1177         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
1178                 return;
1179
1180         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1181                 u32 val;
1182
1183                 tg3_bmcr_reset(tp);
1184                 val = tr32(GRC_MISC_CFG);
1185                 tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
1186                 udelay(40);
1187                 return;
1188         } else {
1189                 tg3_writephy(tp, MII_TG3_EXT_CTRL,
1190                              MII_TG3_EXT_CTRL_FORCE_LED_OFF);
1191                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x01b2);
1192         }
1193
1194         /* The PHY should not be powered down on some chips because
1195          * of bugs.
1196          */
1197         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1198             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
1199             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 &&
1200              (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)))
1201                 return;
1202         tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
1203 }
1204
1205 static int tg3_set_power_state(struct tg3 *tp, pci_power_t state)
1206 {
1207         u32 misc_host_ctrl;
1208         u16 power_control, power_caps;
1209         int pm = tp->pm_cap;
1210
1211         /* Make sure register accesses (indirect or otherwise)
1212          * will function correctly.
1213          */
1214         pci_write_config_dword(tp->pdev,
1215                                TG3PCI_MISC_HOST_CTRL,
1216                                tp->misc_host_ctrl);
1217
1218         pci_read_config_word(tp->pdev,
1219                              pm + PCI_PM_CTRL,
1220                              &power_control);
1221         power_control |= PCI_PM_CTRL_PME_STATUS;
1222         power_control &= ~(PCI_PM_CTRL_STATE_MASK);
1223         switch (state) {
1224         case PCI_D0:
1225                 power_control |= 0;
1226                 pci_write_config_word(tp->pdev,
1227                                       pm + PCI_PM_CTRL,
1228                                       power_control);
1229                 udelay(100);    /* Delay after power state change */
1230
1231                 /* Switch out of Vaux if it is a NIC */
1232                 if (tp->tg3_flags2 & TG3_FLG2_IS_NIC)
1233                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl, 100);
1234
1235                 return 0;
1236
1237         case PCI_D1:
1238                 power_control |= 1;
1239                 break;
1240
1241         case PCI_D2:
1242                 power_control |= 2;
1243                 break;
1244
1245         case PCI_D3hot:
1246                 power_control |= 3;
1247                 break;
1248
1249         default:
1250                 printk(KERN_WARNING PFX "%s: Invalid power state (%d) "
1251                        "requested.\n",
1252                        tp->dev->name, state);
1253                 return -EINVAL;
1254         };
1255
1256         power_control |= PCI_PM_CTRL_PME_ENABLE;
1257
1258         misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
1259         tw32(TG3PCI_MISC_HOST_CTRL,
1260              misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
1261
1262         if (tp->link_config.phy_is_low_power == 0) {
1263                 tp->link_config.phy_is_low_power = 1;
1264                 tp->link_config.orig_speed = tp->link_config.speed;
1265                 tp->link_config.orig_duplex = tp->link_config.duplex;
1266                 tp->link_config.orig_autoneg = tp->link_config.autoneg;
1267         }
1268
1269         if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)) {
1270                 tp->link_config.speed = SPEED_10;
1271                 tp->link_config.duplex = DUPLEX_HALF;
1272                 tp->link_config.autoneg = AUTONEG_ENABLE;
1273                 tg3_setup_phy(tp, 0);
1274         }
1275
1276         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1277                 u32 val;
1278
1279                 val = tr32(GRC_VCPU_EXT_CTRL);
1280                 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
1281         } else if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
1282                 int i;
1283                 u32 val;
1284
1285                 for (i = 0; i < 200; i++) {
1286                         tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
1287                         if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
1288                                 break;
1289                         msleep(1);
1290                 }
1291         }
1292         tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
1293                                              WOL_DRV_STATE_SHUTDOWN |
1294                                              WOL_DRV_WOL | WOL_SET_MAGIC_PKT);
1295
1296         pci_read_config_word(tp->pdev, pm + PCI_PM_PMC, &power_caps);
1297
1298         if (tp->tg3_flags & TG3_FLAG_WOL_ENABLE) {
1299                 u32 mac_mode;
1300
1301                 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
1302                         tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x5a);
1303                         udelay(40);
1304
1305                         if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
1306                                 mac_mode = MAC_MODE_PORT_MODE_GMII;
1307                         else
1308                                 mac_mode = MAC_MODE_PORT_MODE_MII;
1309
1310                         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 ||
1311                             !(tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB))
1312                                 mac_mode |= MAC_MODE_LINK_POLARITY;
1313                 } else {
1314                         mac_mode = MAC_MODE_PORT_MODE_TBI;
1315                 }
1316
1317                 if (!(tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
1318                         tw32(MAC_LED_CTRL, tp->led_ctrl);
1319
1320                 if (((power_caps & PCI_PM_CAP_PME_D3cold) &&
1321                      (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)))
1322                         mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
1323
1324                 tw32_f(MAC_MODE, mac_mode);
1325                 udelay(100);
1326
1327                 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
1328                 udelay(10);
1329         }
1330
1331         if (!(tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB) &&
1332             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1333              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
1334                 u32 base_val;
1335
1336                 base_val = tp->pci_clock_ctrl;
1337                 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
1338                              CLOCK_CTRL_TXCLK_DISABLE);
1339
1340                 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
1341                             CLOCK_CTRL_PWRDOWN_PLL133, 40);
1342         } else if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
1343                 /* do nothing */
1344         } else if (!((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
1345                      (tp->tg3_flags & TG3_FLAG_ENABLE_ASF))) {
1346                 u32 newbits1, newbits2;
1347
1348                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1349                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1350                         newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
1351                                     CLOCK_CTRL_TXCLK_DISABLE |
1352                                     CLOCK_CTRL_ALTCLK);
1353                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
1354                 } else if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
1355                         newbits1 = CLOCK_CTRL_625_CORE;
1356                         newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
1357                 } else {
1358                         newbits1 = CLOCK_CTRL_ALTCLK;
1359                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
1360                 }
1361
1362                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
1363                             40);
1364
1365                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
1366                             40);
1367
1368                 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
1369                         u32 newbits3;
1370
1371                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1372                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1373                                 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
1374                                             CLOCK_CTRL_TXCLK_DISABLE |
1375                                             CLOCK_CTRL_44MHZ_CORE);
1376                         } else {
1377                                 newbits3 = CLOCK_CTRL_44MHZ_CORE;
1378                         }
1379
1380                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
1381                                     tp->pci_clock_ctrl | newbits3, 40);
1382                 }
1383         }
1384
1385         if (!(tp->tg3_flags & TG3_FLAG_WOL_ENABLE) &&
1386             !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
1387                 tg3_power_down_phy(tp);
1388
1389         tg3_frob_aux_power(tp);
1390
1391         /* Workaround for unstable PLL clock */
1392         if ((GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX) ||
1393             (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX)) {
1394                 u32 val = tr32(0x7d00);
1395
1396                 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
1397                 tw32(0x7d00, val);
1398                 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
1399                         int err;
1400
1401                         err = tg3_nvram_lock(tp);
1402                         tg3_halt_cpu(tp, RX_CPU_BASE);
1403                         if (!err)
1404                                 tg3_nvram_unlock(tp);
1405                 }
1406         }
1407
1408         tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
1409
1410         /* Finally, set the new power state. */
1411         pci_write_config_word(tp->pdev, pm + PCI_PM_CTRL, power_control);
1412         udelay(100);    /* Delay after power state change */
1413
1414         return 0;
1415 }
1416
1417 static void tg3_link_report(struct tg3 *tp)
1418 {
1419         if (!netif_carrier_ok(tp->dev)) {
1420                 if (netif_msg_link(tp))
1421                         printk(KERN_INFO PFX "%s: Link is down.\n",
1422                                tp->dev->name);
1423         } else if (netif_msg_link(tp)) {
1424                 printk(KERN_INFO PFX "%s: Link is up at %d Mbps, %s duplex.\n",
1425                        tp->dev->name,
1426                        (tp->link_config.active_speed == SPEED_1000 ?
1427                         1000 :
1428                         (tp->link_config.active_speed == SPEED_100 ?
1429                          100 : 10)),
1430                        (tp->link_config.active_duplex == DUPLEX_FULL ?
1431                         "full" : "half"));
1432
1433                 printk(KERN_INFO PFX "%s: Flow control is %s for TX and "
1434                        "%s for RX.\n",
1435                        tp->dev->name,
1436                        (tp->tg3_flags & TG3_FLAG_TX_PAUSE) ? "on" : "off",
1437                        (tp->tg3_flags & TG3_FLAG_RX_PAUSE) ? "on" : "off");
1438         }
1439 }
1440
1441 static void tg3_setup_flow_control(struct tg3 *tp, u32 local_adv, u32 remote_adv)
1442 {
1443         u32 new_tg3_flags = 0;
1444         u32 old_rx_mode = tp->rx_mode;
1445         u32 old_tx_mode = tp->tx_mode;
1446
1447         if (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG) {
1448
1449                 /* Convert 1000BaseX flow control bits to 1000BaseT
1450                  * bits before resolving flow control.
1451                  */
1452                 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
1453                         local_adv &= ~(ADVERTISE_PAUSE_CAP |
1454                                        ADVERTISE_PAUSE_ASYM);
1455                         remote_adv &= ~(LPA_PAUSE_CAP | LPA_PAUSE_ASYM);
1456
1457                         if (local_adv & ADVERTISE_1000XPAUSE)
1458                                 local_adv |= ADVERTISE_PAUSE_CAP;
1459                         if (local_adv & ADVERTISE_1000XPSE_ASYM)
1460                                 local_adv |= ADVERTISE_PAUSE_ASYM;
1461                         if (remote_adv & LPA_1000XPAUSE)
1462                                 remote_adv |= LPA_PAUSE_CAP;
1463                         if (remote_adv & LPA_1000XPAUSE_ASYM)
1464                                 remote_adv |= LPA_PAUSE_ASYM;
1465                 }
1466
1467                 if (local_adv & ADVERTISE_PAUSE_CAP) {
1468                         if (local_adv & ADVERTISE_PAUSE_ASYM) {
1469                                 if (remote_adv & LPA_PAUSE_CAP)
1470                                         new_tg3_flags |=
1471                                                 (TG3_FLAG_RX_PAUSE |
1472                                                 TG3_FLAG_TX_PAUSE);
1473                                 else if (remote_adv & LPA_PAUSE_ASYM)
1474                                         new_tg3_flags |=
1475                                                 (TG3_FLAG_RX_PAUSE);
1476                         } else {
1477                                 if (remote_adv & LPA_PAUSE_CAP)
1478                                         new_tg3_flags |=
1479                                                 (TG3_FLAG_RX_PAUSE |
1480                                                 TG3_FLAG_TX_PAUSE);
1481                         }
1482                 } else if (local_adv & ADVERTISE_PAUSE_ASYM) {
1483                         if ((remote_adv & LPA_PAUSE_CAP) &&
1484                         (remote_adv & LPA_PAUSE_ASYM))
1485                                 new_tg3_flags |= TG3_FLAG_TX_PAUSE;
1486                 }
1487
1488                 tp->tg3_flags &= ~(TG3_FLAG_RX_PAUSE | TG3_FLAG_TX_PAUSE);
1489                 tp->tg3_flags |= new_tg3_flags;
1490         } else {
1491                 new_tg3_flags = tp->tg3_flags;
1492         }
1493
1494         if (new_tg3_flags & TG3_FLAG_RX_PAUSE)
1495                 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1496         else
1497                 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1498
1499         if (old_rx_mode != tp->rx_mode) {
1500                 tw32_f(MAC_RX_MODE, tp->rx_mode);
1501         }
1502
1503         if (new_tg3_flags & TG3_FLAG_TX_PAUSE)
1504                 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1505         else
1506                 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1507
1508         if (old_tx_mode != tp->tx_mode) {
1509                 tw32_f(MAC_TX_MODE, tp->tx_mode);
1510         }
1511 }
1512
1513 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
1514 {
1515         switch (val & MII_TG3_AUX_STAT_SPDMASK) {
1516         case MII_TG3_AUX_STAT_10HALF:
1517                 *speed = SPEED_10;
1518                 *duplex = DUPLEX_HALF;
1519                 break;
1520
1521         case MII_TG3_AUX_STAT_10FULL:
1522                 *speed = SPEED_10;
1523                 *duplex = DUPLEX_FULL;
1524                 break;
1525
1526         case MII_TG3_AUX_STAT_100HALF:
1527                 *speed = SPEED_100;
1528                 *duplex = DUPLEX_HALF;
1529                 break;
1530
1531         case MII_TG3_AUX_STAT_100FULL:
1532                 *speed = SPEED_100;
1533                 *duplex = DUPLEX_FULL;
1534                 break;
1535
1536         case MII_TG3_AUX_STAT_1000HALF:
1537                 *speed = SPEED_1000;
1538                 *duplex = DUPLEX_HALF;
1539                 break;
1540
1541         case MII_TG3_AUX_STAT_1000FULL:
1542                 *speed = SPEED_1000;
1543                 *duplex = DUPLEX_FULL;
1544                 break;
1545
1546         default:
1547                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1548                         *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
1549                                  SPEED_10;
1550                         *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
1551                                   DUPLEX_HALF;
1552                         break;
1553                 }
1554                 *speed = SPEED_INVALID;
1555                 *duplex = DUPLEX_INVALID;
1556                 break;
1557         };
1558 }
1559
1560 static void tg3_phy_copper_begin(struct tg3 *tp)
1561 {
1562         u32 new_adv;
1563         int i;
1564
1565         if (tp->link_config.phy_is_low_power) {
1566                 /* Entering low power mode.  Disable gigabit and
1567                  * 100baseT advertisements.
1568                  */
1569                 tg3_writephy(tp, MII_TG3_CTRL, 0);
1570
1571                 new_adv = (ADVERTISE_10HALF | ADVERTISE_10FULL |
1572                            ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
1573                 if (tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB)
1574                         new_adv |= (ADVERTISE_100HALF | ADVERTISE_100FULL);
1575
1576                 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1577         } else if (tp->link_config.speed == SPEED_INVALID) {
1578                 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
1579                         tp->link_config.advertising &=
1580                                 ~(ADVERTISED_1000baseT_Half |
1581                                   ADVERTISED_1000baseT_Full);
1582
1583                 new_adv = (ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
1584                 if (tp->link_config.advertising & ADVERTISED_10baseT_Half)
1585                         new_adv |= ADVERTISE_10HALF;
1586                 if (tp->link_config.advertising & ADVERTISED_10baseT_Full)
1587                         new_adv |= ADVERTISE_10FULL;
1588                 if (tp->link_config.advertising & ADVERTISED_100baseT_Half)
1589                         new_adv |= ADVERTISE_100HALF;
1590                 if (tp->link_config.advertising & ADVERTISED_100baseT_Full)
1591                         new_adv |= ADVERTISE_100FULL;
1592                 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1593
1594                 if (tp->link_config.advertising &
1595                     (ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full)) {
1596                         new_adv = 0;
1597                         if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
1598                                 new_adv |= MII_TG3_CTRL_ADV_1000_HALF;
1599                         if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
1600                                 new_adv |= MII_TG3_CTRL_ADV_1000_FULL;
1601                         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY) &&
1602                             (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1603                              tp->pci_chip_rev_id == CHIPREV_ID_5701_B0))
1604                                 new_adv |= (MII_TG3_CTRL_AS_MASTER |
1605                                             MII_TG3_CTRL_ENABLE_AS_MASTER);
1606                         tg3_writephy(tp, MII_TG3_CTRL, new_adv);
1607                 } else {
1608                         tg3_writephy(tp, MII_TG3_CTRL, 0);
1609                 }
1610         } else {
1611                 /* Asking for a specific link mode. */
1612                 if (tp->link_config.speed == SPEED_1000) {
1613                         new_adv = ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP;
1614                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
1615
1616                         if (tp->link_config.duplex == DUPLEX_FULL)
1617                                 new_adv = MII_TG3_CTRL_ADV_1000_FULL;
1618                         else
1619                                 new_adv = MII_TG3_CTRL_ADV_1000_HALF;
1620                         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1621                             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
1622                                 new_adv |= (MII_TG3_CTRL_AS_MASTER |
1623                                             MII_TG3_CTRL_ENABLE_AS_MASTER);
1624                         tg3_writephy(tp, MII_TG3_CTRL, new_adv);
1625                 } else {
1626                         tg3_writephy(tp, MII_TG3_CTRL, 0);
1627
1628                         new_adv = ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP;
1629                         if (tp->link_config.speed == SPEED_100) {
1630                                 if (tp->link_config.duplex == DUPLEX_FULL)
1631                                         new_adv |= ADVERTISE_100FULL;
1632                                 else
1633                                         new_adv |= ADVERTISE_100HALF;
1634                         } else {
1635                                 if (tp->link_config.duplex == DUPLEX_FULL)
1636                                         new_adv |= ADVERTISE_10FULL;
1637                                 else
1638                                         new_adv |= ADVERTISE_10HALF;
1639                         }
1640                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
1641                 }
1642         }
1643
1644         if (tp->link_config.autoneg == AUTONEG_DISABLE &&
1645             tp->link_config.speed != SPEED_INVALID) {
1646                 u32 bmcr, orig_bmcr;
1647
1648                 tp->link_config.active_speed = tp->link_config.speed;
1649                 tp->link_config.active_duplex = tp->link_config.duplex;
1650
1651                 bmcr = 0;
1652                 switch (tp->link_config.speed) {
1653                 default:
1654                 case SPEED_10:
1655                         break;
1656
1657                 case SPEED_100:
1658                         bmcr |= BMCR_SPEED100;
1659                         break;
1660
1661                 case SPEED_1000:
1662                         bmcr |= TG3_BMCR_SPEED1000;
1663                         break;
1664                 };
1665
1666                 if (tp->link_config.duplex == DUPLEX_FULL)
1667                         bmcr |= BMCR_FULLDPLX;
1668
1669                 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
1670                     (bmcr != orig_bmcr)) {
1671                         tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
1672                         for (i = 0; i < 1500; i++) {
1673                                 u32 tmp;
1674
1675                                 udelay(10);
1676                                 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
1677                                     tg3_readphy(tp, MII_BMSR, &tmp))
1678                                         continue;
1679                                 if (!(tmp & BMSR_LSTATUS)) {
1680                                         udelay(40);
1681                                         break;
1682                                 }
1683                         }
1684                         tg3_writephy(tp, MII_BMCR, bmcr);
1685                         udelay(40);
1686                 }
1687         } else {
1688                 tg3_writephy(tp, MII_BMCR,
1689                              BMCR_ANENABLE | BMCR_ANRESTART);
1690         }
1691 }
1692
1693 static int tg3_init_5401phy_dsp(struct tg3 *tp)
1694 {
1695         int err;
1696
1697         /* Turn off tap power management. */
1698         /* Set Extended packet length bit */
1699         err  = tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
1700
1701         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0012);
1702         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1804);
1703
1704         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0013);
1705         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1204);
1706
1707         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
1708         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0132);
1709
1710         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
1711         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0232);
1712
1713         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1714         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0a20);
1715
1716         udelay(40);
1717
1718         return err;
1719 }
1720
1721 static int tg3_copper_is_advertising_all(struct tg3 *tp, u32 mask)
1722 {
1723         u32 adv_reg, all_mask = 0;
1724
1725         if (mask & ADVERTISED_10baseT_Half)
1726                 all_mask |= ADVERTISE_10HALF;
1727         if (mask & ADVERTISED_10baseT_Full)
1728                 all_mask |= ADVERTISE_10FULL;
1729         if (mask & ADVERTISED_100baseT_Half)
1730                 all_mask |= ADVERTISE_100HALF;
1731         if (mask & ADVERTISED_100baseT_Full)
1732                 all_mask |= ADVERTISE_100FULL;
1733
1734         if (tg3_readphy(tp, MII_ADVERTISE, &adv_reg))
1735                 return 0;
1736
1737         if ((adv_reg & all_mask) != all_mask)
1738                 return 0;
1739         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
1740                 u32 tg3_ctrl;
1741
1742                 all_mask = 0;
1743                 if (mask & ADVERTISED_1000baseT_Half)
1744                         all_mask |= ADVERTISE_1000HALF;
1745                 if (mask & ADVERTISED_1000baseT_Full)
1746                         all_mask |= ADVERTISE_1000FULL;
1747
1748                 if (tg3_readphy(tp, MII_TG3_CTRL, &tg3_ctrl))
1749                         return 0;
1750
1751                 if ((tg3_ctrl & all_mask) != all_mask)
1752                         return 0;
1753         }
1754         return 1;
1755 }
1756
1757 static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
1758 {
1759         int current_link_up;
1760         u32 bmsr, dummy;
1761         u16 current_speed;
1762         u8 current_duplex;
1763         int i, err;
1764
1765         tw32(MAC_EVENT, 0);
1766
1767         tw32_f(MAC_STATUS,
1768              (MAC_STATUS_SYNC_CHANGED |
1769               MAC_STATUS_CFG_CHANGED |
1770               MAC_STATUS_MI_COMPLETION |
1771               MAC_STATUS_LNKSTATE_CHANGED));
1772         udelay(40);
1773
1774         tp->mi_mode = MAC_MI_MODE_BASE;
1775         tw32_f(MAC_MI_MODE, tp->mi_mode);
1776         udelay(80);
1777
1778         tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x02);
1779
1780         /* Some third-party PHYs need to be reset on link going
1781          * down.
1782          */
1783         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
1784              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
1785              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
1786             netif_carrier_ok(tp->dev)) {
1787                 tg3_readphy(tp, MII_BMSR, &bmsr);
1788                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
1789                     !(bmsr & BMSR_LSTATUS))
1790                         force_reset = 1;
1791         }
1792         if (force_reset)
1793                 tg3_phy_reset(tp);
1794
1795         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
1796                 tg3_readphy(tp, MII_BMSR, &bmsr);
1797                 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
1798                     !(tp->tg3_flags & TG3_FLAG_INIT_COMPLETE))
1799                         bmsr = 0;
1800
1801                 if (!(bmsr & BMSR_LSTATUS)) {
1802                         err = tg3_init_5401phy_dsp(tp);
1803                         if (err)
1804                                 return err;
1805
1806                         tg3_readphy(tp, MII_BMSR, &bmsr);
1807                         for (i = 0; i < 1000; i++) {
1808                                 udelay(10);
1809                                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
1810                                     (bmsr & BMSR_LSTATUS)) {
1811                                         udelay(40);
1812                                         break;
1813                                 }
1814                         }
1815
1816                         if ((tp->phy_id & PHY_ID_REV_MASK) == PHY_REV_BCM5401_B0 &&
1817                             !(bmsr & BMSR_LSTATUS) &&
1818                             tp->link_config.active_speed == SPEED_1000) {
1819                                 err = tg3_phy_reset(tp);
1820                                 if (!err)
1821                                         err = tg3_init_5401phy_dsp(tp);
1822                                 if (err)
1823                                         return err;
1824                         }
1825                 }
1826         } else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1827                    tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
1828                 /* 5701 {A0,B0} CRC bug workaround */
1829                 tg3_writephy(tp, 0x15, 0x0a75);
1830                 tg3_writephy(tp, 0x1c, 0x8c68);
1831                 tg3_writephy(tp, 0x1c, 0x8d68);
1832                 tg3_writephy(tp, 0x1c, 0x8c68);
1833         }
1834
1835         /* Clear pending interrupts... */
1836         tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
1837         tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
1838
1839         if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT)
1840                 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
1841         else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906)
1842                 tg3_writephy(tp, MII_TG3_IMASK, ~0);
1843
1844         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1845             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1846                 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
1847                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
1848                                      MII_TG3_EXT_CTRL_LNK3_LED_MODE);
1849                 else
1850                         tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
1851         }
1852
1853         current_link_up = 0;
1854         current_speed = SPEED_INVALID;
1855         current_duplex = DUPLEX_INVALID;
1856
1857         if (tp->tg3_flags2 & TG3_FLG2_CAPACITIVE_COUPLING) {
1858                 u32 val;
1859
1860                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4007);
1861                 tg3_readphy(tp, MII_TG3_AUX_CTRL, &val);
1862                 if (!(val & (1 << 10))) {
1863                         val |= (1 << 10);
1864                         tg3_writephy(tp, MII_TG3_AUX_CTRL, val);
1865                         goto relink;
1866                 }
1867         }
1868
1869         bmsr = 0;
1870         for (i = 0; i < 100; i++) {
1871                 tg3_readphy(tp, MII_BMSR, &bmsr);
1872                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
1873                     (bmsr & BMSR_LSTATUS))
1874                         break;
1875                 udelay(40);
1876         }
1877
1878         if (bmsr & BMSR_LSTATUS) {
1879                 u32 aux_stat, bmcr;
1880
1881                 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
1882                 for (i = 0; i < 2000; i++) {
1883                         udelay(10);
1884                         if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
1885                             aux_stat)
1886                                 break;
1887                 }
1888
1889                 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
1890                                              &current_speed,
1891                                              &current_duplex);
1892
1893                 bmcr = 0;
1894                 for (i = 0; i < 200; i++) {
1895                         tg3_readphy(tp, MII_BMCR, &bmcr);
1896                         if (tg3_readphy(tp, MII_BMCR, &bmcr))
1897                                 continue;
1898                         if (bmcr && bmcr != 0x7fff)
1899                                 break;
1900                         udelay(10);
1901                 }
1902
1903                 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
1904                         if (bmcr & BMCR_ANENABLE) {
1905                                 current_link_up = 1;
1906
1907                                 /* Force autoneg restart if we are exiting
1908                                  * low power mode.
1909                                  */
1910                                 if (!tg3_copper_is_advertising_all(tp,
1911                                                 tp->link_config.advertising))
1912                                         current_link_up = 0;
1913                         } else {
1914                                 current_link_up = 0;
1915                         }
1916                 } else {
1917                         if (!(bmcr & BMCR_ANENABLE) &&
1918                             tp->link_config.speed == current_speed &&
1919                             tp->link_config.duplex == current_duplex) {
1920                                 current_link_up = 1;
1921                         } else {
1922                                 current_link_up = 0;
1923                         }
1924                 }
1925
1926                 tp->link_config.active_speed = current_speed;
1927                 tp->link_config.active_duplex = current_duplex;
1928         }
1929
1930         if (current_link_up == 1 &&
1931             (tp->link_config.active_duplex == DUPLEX_FULL) &&
1932             (tp->link_config.autoneg == AUTONEG_ENABLE)) {
1933                 u32 local_adv, remote_adv;
1934
1935                 if (tg3_readphy(tp, MII_ADVERTISE, &local_adv))
1936                         local_adv = 0;
1937                 local_adv &= (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
1938
1939                 if (tg3_readphy(tp, MII_LPA, &remote_adv))
1940                         remote_adv = 0;
1941
1942                 remote_adv &= (LPA_PAUSE_CAP | LPA_PAUSE_ASYM);
1943
1944                 /* If we are not advertising full pause capability,
1945                  * something is wrong.  Bring the link down and reconfigure.
1946                  */
1947                 if (local_adv != ADVERTISE_PAUSE_CAP) {
1948                         current_link_up = 0;
1949                 } else {
1950                         tg3_setup_flow_control(tp, local_adv, remote_adv);
1951                 }
1952         }
1953 relink:
1954         if (current_link_up == 0 || tp->link_config.phy_is_low_power) {
1955                 u32 tmp;
1956
1957                 tg3_phy_copper_begin(tp);
1958
1959                 tg3_readphy(tp, MII_BMSR, &tmp);
1960                 if (!tg3_readphy(tp, MII_BMSR, &tmp) &&
1961                     (tmp & BMSR_LSTATUS))
1962                         current_link_up = 1;
1963         }
1964
1965         tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
1966         if (current_link_up == 1) {
1967                 if (tp->link_config.active_speed == SPEED_100 ||
1968                     tp->link_config.active_speed == SPEED_10)
1969                         tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
1970                 else
1971                         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
1972         } else
1973                 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
1974
1975         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
1976         if (tp->link_config.active_duplex == DUPLEX_HALF)
1977                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
1978
1979         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
1980         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
1981                 if ((tp->led_ctrl == LED_CTRL_MODE_PHY_2) ||
1982                     (current_link_up == 1 &&
1983                      tp->link_config.active_speed == SPEED_10))
1984                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
1985         } else {
1986                 if (current_link_up == 1)
1987                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
1988         }
1989
1990         /* ??? Without this setting Netgear GA302T PHY does not
1991          * ??? send/receive packets...
1992          */
1993         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411 &&
1994             tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) {
1995                 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
1996                 tw32_f(MAC_MI_MODE, tp->mi_mode);
1997                 udelay(80);
1998         }
1999
2000         tw32_f(MAC_MODE, tp->mac_mode);
2001         udelay(40);
2002
2003         if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
2004                 /* Polled via timer. */
2005                 tw32_f(MAC_EVENT, 0);
2006         } else {
2007                 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2008         }
2009         udelay(40);
2010
2011         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 &&
2012             current_link_up == 1 &&
2013             tp->link_config.active_speed == SPEED_1000 &&
2014             ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ||
2015              (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED))) {
2016                 udelay(120);
2017                 tw32_f(MAC_STATUS,
2018                      (MAC_STATUS_SYNC_CHANGED |
2019                       MAC_STATUS_CFG_CHANGED));
2020                 udelay(40);
2021                 tg3_write_mem(tp,
2022                               NIC_SRAM_FIRMWARE_MBOX,
2023                               NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
2024         }
2025
2026         if (current_link_up != netif_carrier_ok(tp->dev)) {
2027                 if (current_link_up)
2028                         netif_carrier_on(tp->dev);
2029                 else
2030                         netif_carrier_off(tp->dev);
2031                 tg3_link_report(tp);
2032         }
2033
2034         return 0;
2035 }
2036
2037 struct tg3_fiber_aneginfo {
2038         int state;
2039 #define ANEG_STATE_UNKNOWN              0
2040 #define ANEG_STATE_AN_ENABLE            1
2041 #define ANEG_STATE_RESTART_INIT         2
2042 #define ANEG_STATE_RESTART              3
2043 #define ANEG_STATE_DISABLE_LINK_OK      4
2044 #define ANEG_STATE_ABILITY_DETECT_INIT  5
2045 #define ANEG_STATE_ABILITY_DETECT       6
2046 #define ANEG_STATE_ACK_DETECT_INIT      7
2047 #define ANEG_STATE_ACK_DETECT           8
2048 #define ANEG_STATE_COMPLETE_ACK_INIT    9
2049 #define ANEG_STATE_COMPLETE_ACK         10
2050 #define ANEG_STATE_IDLE_DETECT_INIT     11
2051 #define ANEG_STATE_IDLE_DETECT          12
2052 #define ANEG_STATE_LINK_OK              13
2053 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT  14
2054 #define ANEG_STATE_NEXT_PAGE_WAIT       15
2055
2056         u32 flags;
2057 #define MR_AN_ENABLE            0x00000001
2058 #define MR_RESTART_AN           0x00000002
2059 #define MR_AN_COMPLETE          0x00000004
2060 #define MR_PAGE_RX              0x00000008
2061 #define MR_NP_LOADED            0x00000010
2062 #define MR_TOGGLE_TX            0x00000020
2063 #define MR_LP_ADV_FULL_DUPLEX   0x00000040
2064 #define MR_LP_ADV_HALF_DUPLEX   0x00000080
2065 #define MR_LP_ADV_SYM_PAUSE     0x00000100
2066 #define MR_LP_ADV_ASYM_PAUSE    0x00000200
2067 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
2068 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
2069 #define MR_LP_ADV_NEXT_PAGE     0x00001000
2070 #define MR_TOGGLE_RX            0x00002000
2071 #define MR_NP_RX                0x00004000
2072
2073 #define MR_LINK_OK              0x80000000
2074
2075         unsigned long link_time, cur_time;
2076
2077         u32 ability_match_cfg;
2078         int ability_match_count;
2079
2080         char ability_match, idle_match, ack_match;
2081
2082         u32 txconfig, rxconfig;
2083 #define ANEG_CFG_NP             0x00000080
2084 #define ANEG_CFG_ACK            0x00000040
2085 #define ANEG_CFG_RF2            0x00000020
2086 #define ANEG_CFG_RF1            0x00000010
2087 #define ANEG_CFG_PS2            0x00000001
2088 #define ANEG_CFG_PS1            0x00008000
2089 #define ANEG_CFG_HD             0x00004000
2090 #define ANEG_CFG_FD             0x00002000
2091 #define ANEG_CFG_INVAL          0x00001f06
2092
2093 };
2094 #define ANEG_OK         0
2095 #define ANEG_DONE       1
2096 #define ANEG_TIMER_ENAB 2
2097 #define ANEG_FAILED     -1
2098
2099 #define ANEG_STATE_SETTLE_TIME  10000
2100
2101 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
2102                                    struct tg3_fiber_aneginfo *ap)
2103 {
2104         unsigned long delta;
2105         u32 rx_cfg_reg;
2106         int ret;
2107
2108         if (ap->state == ANEG_STATE_UNKNOWN) {
2109                 ap->rxconfig = 0;
2110                 ap->link_time = 0;
2111                 ap->cur_time = 0;
2112                 ap->ability_match_cfg = 0;
2113                 ap->ability_match_count = 0;
2114                 ap->ability_match = 0;
2115                 ap->idle_match = 0;
2116                 ap->ack_match = 0;
2117         }
2118         ap->cur_time++;
2119
2120         if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
2121                 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
2122
2123                 if (rx_cfg_reg != ap->ability_match_cfg) {
2124                         ap->ability_match_cfg = rx_cfg_reg;
2125                         ap->ability_match = 0;
2126                         ap->ability_match_count = 0;
2127                 } else {
2128                         if (++ap->ability_match_count > 1) {
2129                                 ap->ability_match = 1;
2130                                 ap->ability_match_cfg = rx_cfg_reg;
2131                         }
2132                 }
2133                 if (rx_cfg_reg & ANEG_CFG_ACK)
2134                         ap->ack_match = 1;
2135                 else
2136                         ap->ack_match = 0;
2137
2138                 ap->idle_match = 0;
2139         } else {
2140                 ap->idle_match = 1;
2141                 ap->ability_match_cfg = 0;
2142                 ap->ability_match_count = 0;
2143                 ap->ability_match = 0;
2144                 ap->ack_match = 0;
2145
2146                 rx_cfg_reg = 0;
2147         }
2148
2149         ap->rxconfig = rx_cfg_reg;
2150         ret = ANEG_OK;
2151
2152         switch(ap->state) {
2153         case ANEG_STATE_UNKNOWN:
2154                 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
2155                         ap->state = ANEG_STATE_AN_ENABLE;
2156
2157                 /* fallthru */
2158         case ANEG_STATE_AN_ENABLE:
2159                 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
2160                 if (ap->flags & MR_AN_ENABLE) {
2161                         ap->link_time = 0;
2162                         ap->cur_time = 0;
2163                         ap->ability_match_cfg = 0;
2164                         ap->ability_match_count = 0;
2165                         ap->ability_match = 0;
2166                         ap->idle_match = 0;
2167                         ap->ack_match = 0;
2168
2169                         ap->state = ANEG_STATE_RESTART_INIT;
2170                 } else {
2171                         ap->state = ANEG_STATE_DISABLE_LINK_OK;
2172                 }
2173                 break;
2174
2175         case ANEG_STATE_RESTART_INIT:
2176                 ap->link_time = ap->cur_time;
2177                 ap->flags &= ~(MR_NP_LOADED);
2178                 ap->txconfig = 0;
2179                 tw32(MAC_TX_AUTO_NEG, 0);
2180                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
2181                 tw32_f(MAC_MODE, tp->mac_mode);
2182                 udelay(40);
2183
2184                 ret = ANEG_TIMER_ENAB;
2185                 ap->state = ANEG_STATE_RESTART;
2186
2187                 /* fallthru */
2188         case ANEG_STATE_RESTART:
2189                 delta = ap->cur_time - ap->link_time;
2190                 if (delta > ANEG_STATE_SETTLE_TIME) {
2191                         ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
2192                 } else {
2193                         ret = ANEG_TIMER_ENAB;
2194                 }
2195                 break;
2196
2197         case ANEG_STATE_DISABLE_LINK_OK:
2198                 ret = ANEG_DONE;
2199                 break;
2200
2201         case ANEG_STATE_ABILITY_DETECT_INIT:
2202                 ap->flags &= ~(MR_TOGGLE_TX);
2203                 ap->txconfig = (ANEG_CFG_FD | ANEG_CFG_PS1);
2204                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
2205                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
2206                 tw32_f(MAC_MODE, tp->mac_mode);
2207                 udelay(40);
2208
2209                 ap->state = ANEG_STATE_ABILITY_DETECT;
2210                 break;
2211
2212         case ANEG_STATE_ABILITY_DETECT:
2213                 if (ap->ability_match != 0 && ap->rxconfig != 0) {
2214                         ap->state = ANEG_STATE_ACK_DETECT_INIT;
2215                 }
2216                 break;
2217
2218         case ANEG_STATE_ACK_DETECT_INIT:
2219                 ap->txconfig |= ANEG_CFG_ACK;
2220                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
2221                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
2222                 tw32_f(MAC_MODE, tp->mac_mode);
2223                 udelay(40);
2224
2225                 ap->state = ANEG_STATE_ACK_DETECT;
2226
2227                 /* fallthru */
2228         case ANEG_STATE_ACK_DETECT:
2229                 if (ap->ack_match != 0) {
2230                         if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
2231                             (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
2232                                 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
2233                         } else {
2234                                 ap->state = ANEG_STATE_AN_ENABLE;
2235                         }
2236                 } else if (ap->ability_match != 0 &&
2237                            ap->rxconfig == 0) {
2238                         ap->state = ANEG_STATE_AN_ENABLE;
2239                 }
2240                 break;
2241
2242         case ANEG_STATE_COMPLETE_ACK_INIT:
2243                 if (ap->rxconfig & ANEG_CFG_INVAL) {
2244                         ret = ANEG_FAILED;
2245                         break;
2246                 }
2247                 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
2248                                MR_LP_ADV_HALF_DUPLEX |
2249                                MR_LP_ADV_SYM_PAUSE |
2250                                MR_LP_ADV_ASYM_PAUSE |
2251                                MR_LP_ADV_REMOTE_FAULT1 |
2252                                MR_LP_ADV_REMOTE_FAULT2 |
2253                                MR_LP_ADV_NEXT_PAGE |
2254                                MR_TOGGLE_RX |
2255                                MR_NP_RX);
2256                 if (ap->rxconfig & ANEG_CFG_FD)
2257                         ap->flags |= MR_LP_ADV_FULL_DUPLEX;
2258                 if (ap->rxconfig & ANEG_CFG_HD)
2259                         ap->flags |= MR_LP_ADV_HALF_DUPLEX;
2260                 if (ap->rxconfig & ANEG_CFG_PS1)
2261                         ap->flags |= MR_LP_ADV_SYM_PAUSE;
2262                 if (ap->rxconfig & ANEG_CFG_PS2)
2263                         ap->flags |= MR_LP_ADV_ASYM_PAUSE;
2264                 if (ap->rxconfig & ANEG_CFG_RF1)
2265                         ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
2266                 if (ap->rxconfig & ANEG_CFG_RF2)
2267                         ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
2268                 if (ap->rxconfig & ANEG_CFG_NP)
2269                         ap->flags |= MR_LP_ADV_NEXT_PAGE;
2270
2271                 ap->link_time = ap->cur_time;
2272
2273                 ap->flags ^= (MR_TOGGLE_TX);
2274                 if (ap->rxconfig & 0x0008)
2275                         ap->flags |= MR_TOGGLE_RX;
2276                 if (ap->rxconfig & ANEG_CFG_NP)
2277                         ap->flags |= MR_NP_RX;
2278                 ap->flags |= MR_PAGE_RX;
2279
2280                 ap->state = ANEG_STATE_COMPLETE_ACK;
2281                 ret = ANEG_TIMER_ENAB;
2282                 break;
2283
2284         case ANEG_STATE_COMPLETE_ACK:
2285                 if (ap->ability_match != 0 &&
2286                     ap->rxconfig == 0) {
2287                         ap->state = ANEG_STATE_AN_ENABLE;
2288                         break;
2289                 }
2290                 delta = ap->cur_time - ap->link_time;
2291                 if (delta > ANEG_STATE_SETTLE_TIME) {
2292                         if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
2293                                 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
2294                         } else {
2295                                 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
2296                                     !(ap->flags & MR_NP_RX)) {
2297                                         ap->state = ANEG_STATE_IDLE_DETECT_INIT;
2298                                 } else {
2299                                         ret = ANEG_FAILED;
2300                                 }
2301                         }
2302                 }
2303                 break;
2304
2305         case ANEG_STATE_IDLE_DETECT_INIT:
2306                 ap->link_time = ap->cur_time;
2307                 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
2308                 tw32_f(MAC_MODE, tp->mac_mode);
2309                 udelay(40);
2310
2311                 ap->state = ANEG_STATE_IDLE_DETECT;
2312                 ret = ANEG_TIMER_ENAB;
2313                 break;
2314
2315         case ANEG_STATE_IDLE_DETECT:
2316                 if (ap->ability_match != 0 &&
2317                     ap->rxconfig == 0) {
2318                         ap->state = ANEG_STATE_AN_ENABLE;
2319                         break;
2320                 }
2321                 delta = ap->cur_time - ap->link_time;
2322                 if (delta > ANEG_STATE_SETTLE_TIME) {
2323                         /* XXX another gem from the Broadcom driver :( */
2324                         ap->state = ANEG_STATE_LINK_OK;
2325                 }
2326                 break;
2327
2328         case ANEG_STATE_LINK_OK:
2329                 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
2330                 ret = ANEG_DONE;
2331                 break;
2332
2333         case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
2334                 /* ??? unimplemented */
2335                 break;
2336
2337         case ANEG_STATE_NEXT_PAGE_WAIT:
2338                 /* ??? unimplemented */
2339                 break;
2340
2341         default:
2342                 ret = ANEG_FAILED;
2343                 break;
2344         };
2345
2346         return ret;
2347 }
2348
2349 static int fiber_autoneg(struct tg3 *tp, u32 *flags)
2350 {
2351         int res = 0;
2352         struct tg3_fiber_aneginfo aninfo;
2353         int status = ANEG_FAILED;
2354         unsigned int tick;
2355         u32 tmp;
2356
2357         tw32_f(MAC_TX_AUTO_NEG, 0);
2358
2359         tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
2360         tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
2361         udelay(40);
2362
2363         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
2364         udelay(40);
2365
2366         memset(&aninfo, 0, sizeof(aninfo));
2367         aninfo.flags |= MR_AN_ENABLE;
2368         aninfo.state = ANEG_STATE_UNKNOWN;
2369         aninfo.cur_time = 0;
2370         tick = 0;
2371         while (++tick < 195000) {
2372                 status = tg3_fiber_aneg_smachine(tp, &aninfo);
2373                 if (status == ANEG_DONE || status == ANEG_FAILED)
2374                         break;
2375
2376                 udelay(1);
2377         }
2378
2379         tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
2380         tw32_f(MAC_MODE, tp->mac_mode);
2381         udelay(40);
2382
2383         *flags = aninfo.flags;
2384
2385         if (status == ANEG_DONE &&
2386             (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
2387                              MR_LP_ADV_FULL_DUPLEX)))
2388                 res = 1;
2389
2390         return res;
2391 }
2392
2393 static void tg3_init_bcm8002(struct tg3 *tp)
2394 {
2395         u32 mac_status = tr32(MAC_STATUS);
2396         int i;
2397
2398         /* Reset when initting first time or we have a link. */
2399         if ((tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) &&
2400             !(mac_status & MAC_STATUS_PCS_SYNCED))
2401                 return;
2402
2403         /* Set PLL lock range. */
2404         tg3_writephy(tp, 0x16, 0x8007);
2405
2406         /* SW reset */
2407         tg3_writephy(tp, MII_BMCR, BMCR_RESET);
2408
2409         /* Wait for reset to complete. */
2410         /* XXX schedule_timeout() ... */
2411         for (i = 0; i < 500; i++)
2412                 udelay(10);
2413
2414         /* Config mode; select PMA/Ch 1 regs. */
2415         tg3_writephy(tp, 0x10, 0x8411);
2416
2417         /* Enable auto-lock and comdet, select txclk for tx. */
2418         tg3_writephy(tp, 0x11, 0x0a10);
2419
2420         tg3_writephy(tp, 0x18, 0x00a0);
2421         tg3_writephy(tp, 0x16, 0x41ff);
2422
2423         /* Assert and deassert POR. */
2424         tg3_writephy(tp, 0x13, 0x0400);
2425         udelay(40);
2426         tg3_writephy(tp, 0x13, 0x0000);
2427
2428         tg3_writephy(tp, 0x11, 0x0a50);
2429         udelay(40);
2430         tg3_writephy(tp, 0x11, 0x0a10);
2431
2432         /* Wait for signal to stabilize */
2433         /* XXX schedule_timeout() ... */
2434         for (i = 0; i < 15000; i++)
2435                 udelay(10);
2436
2437         /* Deselect the channel register so we can read the PHYID
2438          * later.
2439          */
2440         tg3_writephy(tp, 0x10, 0x8011);
2441 }
2442
2443 static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
2444 {
2445         u32 sg_dig_ctrl, sg_dig_status;
2446         u32 serdes_cfg, expected_sg_dig_ctrl;
2447         int workaround, port_a;
2448         int current_link_up;
2449
2450         serdes_cfg = 0;
2451         expected_sg_dig_ctrl = 0;
2452         workaround = 0;
2453         port_a = 1;
2454         current_link_up = 0;
2455
2456         if (tp->pci_chip_rev_id != CHIPREV_ID_5704_A0 &&
2457             tp->pci_chip_rev_id != CHIPREV_ID_5704_A1) {
2458                 workaround = 1;
2459                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
2460                         port_a = 0;
2461
2462                 /* preserve bits 0-11,13,14 for signal pre-emphasis */
2463                 /* preserve bits 20-23 for voltage regulator */
2464                 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
2465         }
2466
2467         sg_dig_ctrl = tr32(SG_DIG_CTRL);
2468
2469         if (tp->link_config.autoneg != AUTONEG_ENABLE) {
2470                 if (sg_dig_ctrl & (1 << 31)) {
2471                         if (workaround) {
2472                                 u32 val = serdes_cfg;
2473
2474                                 if (port_a)
2475                                         val |= 0xc010000;
2476                                 else
2477                                         val |= 0x4010000;
2478                                 tw32_f(MAC_SERDES_CFG, val);
2479                         }
2480                         tw32_f(SG_DIG_CTRL, 0x01388400);
2481                 }
2482                 if (mac_status & MAC_STATUS_PCS_SYNCED) {
2483                         tg3_setup_flow_control(tp, 0, 0);
2484                         current_link_up = 1;
2485                 }
2486                 goto out;
2487         }
2488
2489         /* Want auto-negotiation.  */
2490         expected_sg_dig_ctrl = 0x81388400;
2491
2492         /* Pause capability */
2493         expected_sg_dig_ctrl |= (1 << 11);
2494
2495         /* Asymettric pause */
2496         expected_sg_dig_ctrl |= (1 << 12);
2497
2498         if (sg_dig_ctrl != expected_sg_dig_ctrl) {
2499                 if ((tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT) &&
2500                     tp->serdes_counter &&
2501                     ((mac_status & (MAC_STATUS_PCS_SYNCED |
2502                                     MAC_STATUS_RCVD_CFG)) ==
2503                      MAC_STATUS_PCS_SYNCED)) {
2504                         tp->serdes_counter--;
2505                         current_link_up = 1;
2506                         goto out;
2507                 }
2508 restart_autoneg:
2509                 if (workaround)
2510                         tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
2511                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | (1 << 30));
2512                 udelay(5);
2513                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
2514
2515                 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
2516                 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2517         } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
2518                                  MAC_STATUS_SIGNAL_DET)) {
2519                 sg_dig_status = tr32(SG_DIG_STATUS);
2520                 mac_status = tr32(MAC_STATUS);
2521
2522                 if ((sg_dig_status & (1 << 1)) &&
2523                     (mac_status & MAC_STATUS_PCS_SYNCED)) {
2524                         u32 local_adv, remote_adv;
2525
2526                         local_adv = ADVERTISE_PAUSE_CAP;
2527                         remote_adv = 0;
2528                         if (sg_dig_status & (1 << 19))
2529                                 remote_adv |= LPA_PAUSE_CAP;
2530                         if (sg_dig_status & (1 << 20))
2531                                 remote_adv |= LPA_PAUSE_ASYM;
2532
2533                         tg3_setup_flow_control(tp, local_adv, remote_adv);
2534                         current_link_up = 1;
2535                         tp->serdes_counter = 0;
2536                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2537                 } else if (!(sg_dig_status & (1 << 1))) {
2538                         if (tp->serdes_counter)
2539                                 tp->serdes_counter--;
2540                         else {
2541                                 if (workaround) {
2542                                         u32 val = serdes_cfg;
2543
2544                                         if (port_a)
2545                                                 val |= 0xc010000;
2546                                         else
2547                                                 val |= 0x4010000;
2548
2549                                         tw32_f(MAC_SERDES_CFG, val);
2550                                 }
2551
2552                                 tw32_f(SG_DIG_CTRL, 0x01388400);
2553                                 udelay(40);
2554
2555                                 /* Link parallel detection - link is up */
2556                                 /* only if we have PCS_SYNC and not */
2557                                 /* receiving config code words */
2558                                 mac_status = tr32(MAC_STATUS);
2559                                 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
2560                                     !(mac_status & MAC_STATUS_RCVD_CFG)) {
2561                                         tg3_setup_flow_control(tp, 0, 0);
2562                                         current_link_up = 1;
2563                                         tp->tg3_flags2 |=
2564                                                 TG3_FLG2_PARALLEL_DETECT;
2565                                         tp->serdes_counter =
2566                                                 SERDES_PARALLEL_DET_TIMEOUT;
2567                                 } else
2568                                         goto restart_autoneg;
2569                         }
2570                 }
2571         } else {
2572                 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
2573                 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2574         }
2575
2576 out:
2577         return current_link_up;
2578 }
2579
2580 static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
2581 {
2582         int current_link_up = 0;
2583
2584         if (!(mac_status & MAC_STATUS_PCS_SYNCED)) {
2585                 tp->tg3_flags &= ~TG3_FLAG_GOT_SERDES_FLOWCTL;
2586                 goto out;
2587         }
2588
2589         if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2590                 u32 flags;
2591                 int i;
2592
2593                 if (fiber_autoneg(tp, &flags)) {
2594                         u32 local_adv, remote_adv;
2595
2596                         local_adv = ADVERTISE_PAUSE_CAP;
2597                         remote_adv = 0;
2598                         if (flags & MR_LP_ADV_SYM_PAUSE)
2599                                 remote_adv |= LPA_PAUSE_CAP;
2600                         if (flags & MR_LP_ADV_ASYM_PAUSE)
2601                                 remote_adv |= LPA_PAUSE_ASYM;
2602
2603                         tg3_setup_flow_control(tp, local_adv, remote_adv);
2604
2605                         tp->tg3_flags |= TG3_FLAG_GOT_SERDES_FLOWCTL;
2606                         current_link_up = 1;
2607                 }
2608                 for (i = 0; i < 30; i++) {
2609                         udelay(20);
2610                         tw32_f(MAC_STATUS,
2611                                (MAC_STATUS_SYNC_CHANGED |
2612                                 MAC_STATUS_CFG_CHANGED));
2613                         udelay(40);
2614                         if ((tr32(MAC_STATUS) &
2615                              (MAC_STATUS_SYNC_CHANGED |
2616                               MAC_STATUS_CFG_CHANGED)) == 0)
2617                                 break;
2618                 }
2619
2620                 mac_status = tr32(MAC_STATUS);
2621                 if (current_link_up == 0 &&
2622                     (mac_status & MAC_STATUS_PCS_SYNCED) &&
2623                     !(mac_status & MAC_STATUS_RCVD_CFG))
2624                         current_link_up = 1;
2625         } else {
2626                 /* Forcing 1000FD link up. */
2627                 current_link_up = 1;
2628                 tp->tg3_flags |= TG3_FLAG_GOT_SERDES_FLOWCTL;
2629
2630                 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
2631                 udelay(40);
2632         }
2633
2634 out:
2635         return current_link_up;
2636 }
2637
2638 static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
2639 {
2640         u32 orig_pause_cfg;
2641         u16 orig_active_speed;
2642         u8 orig_active_duplex;
2643         u32 mac_status;
2644         int current_link_up;
2645         int i;
2646
2647         orig_pause_cfg =
2648                 (tp->tg3_flags & (TG3_FLAG_RX_PAUSE |
2649                                   TG3_FLAG_TX_PAUSE));
2650         orig_active_speed = tp->link_config.active_speed;
2651         orig_active_duplex = tp->link_config.active_duplex;
2652
2653         if (!(tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG) &&
2654             netif_carrier_ok(tp->dev) &&
2655             (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE)) {
2656                 mac_status = tr32(MAC_STATUS);
2657                 mac_status &= (MAC_STATUS_PCS_SYNCED |
2658                                MAC_STATUS_SIGNAL_DET |
2659                                MAC_STATUS_CFG_CHANGED |
2660                                MAC_STATUS_RCVD_CFG);
2661                 if (mac_status == (MAC_STATUS_PCS_SYNCED |
2662                                    MAC_STATUS_SIGNAL_DET)) {
2663                         tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
2664                                             MAC_STATUS_CFG_CHANGED));
2665                         return 0;
2666                 }
2667         }
2668
2669         tw32_f(MAC_TX_AUTO_NEG, 0);
2670
2671         tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
2672         tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
2673         tw32_f(MAC_MODE, tp->mac_mode);
2674         udelay(40);
2675
2676         if (tp->phy_id == PHY_ID_BCM8002)
2677                 tg3_init_bcm8002(tp);
2678
2679         /* Enable link change event even when serdes polling.  */
2680         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2681         udelay(40);
2682
2683         current_link_up = 0;
2684         mac_status = tr32(MAC_STATUS);
2685
2686         if (tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG)
2687                 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
2688         else
2689                 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
2690
2691         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
2692         tw32_f(MAC_MODE, tp->mac_mode);
2693         udelay(40);
2694
2695         tp->hw_status->status =
2696                 (SD_STATUS_UPDATED |
2697                  (tp->hw_status->status & ~SD_STATUS_LINK_CHG));
2698
2699         for (i = 0; i < 100; i++) {
2700                 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
2701                                     MAC_STATUS_CFG_CHANGED));
2702                 udelay(5);
2703                 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
2704                                          MAC_STATUS_CFG_CHANGED |
2705                                          MAC_STATUS_LNKSTATE_CHANGED)) == 0)
2706                         break;
2707         }
2708
2709         mac_status = tr32(MAC_STATUS);
2710         if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
2711                 current_link_up = 0;
2712                 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
2713                     tp->serdes_counter == 0) {
2714                         tw32_f(MAC_MODE, (tp->mac_mode |
2715                                           MAC_MODE_SEND_CONFIGS));
2716                         udelay(1);
2717                         tw32_f(MAC_MODE, tp->mac_mode);
2718                 }
2719         }
2720
2721         if (current_link_up == 1) {
2722                 tp->link_config.active_speed = SPEED_1000;
2723                 tp->link_config.active_duplex = DUPLEX_FULL;
2724                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
2725                                     LED_CTRL_LNKLED_OVERRIDE |
2726                                     LED_CTRL_1000MBPS_ON));
2727         } else {
2728                 tp->link_config.active_speed = SPEED_INVALID;
2729                 tp->link_config.active_duplex = DUPLEX_INVALID;
2730                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
2731                                     LED_CTRL_LNKLED_OVERRIDE |
2732                                     LED_CTRL_TRAFFIC_OVERRIDE));
2733         }
2734
2735         if (current_link_up != netif_carrier_ok(tp->dev)) {
2736                 if (current_link_up)
2737                         netif_carrier_on(tp->dev);
2738                 else
2739                         netif_carrier_off(tp->dev);
2740                 tg3_link_report(tp);
2741         } else {
2742                 u32 now_pause_cfg =
2743                         tp->tg3_flags & (TG3_FLAG_RX_PAUSE |
2744                                          TG3_FLAG_TX_PAUSE);
2745                 if (orig_pause_cfg != now_pause_cfg ||
2746                     orig_active_speed != tp->link_config.active_speed ||
2747                     orig_active_duplex != tp->link_config.active_duplex)
2748                         tg3_link_report(tp);
2749         }
2750
2751         return 0;
2752 }
2753
2754 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
2755 {
2756         int current_link_up, err = 0;
2757         u32 bmsr, bmcr;
2758         u16 current_speed;
2759         u8 current_duplex;
2760
2761         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
2762         tw32_f(MAC_MODE, tp->mac_mode);
2763         udelay(40);
2764
2765         tw32(MAC_EVENT, 0);
2766
2767         tw32_f(MAC_STATUS,
2768              (MAC_STATUS_SYNC_CHANGED |
2769               MAC_STATUS_CFG_CHANGED |
2770               MAC_STATUS_MI_COMPLETION |
2771               MAC_STATUS_LNKSTATE_CHANGED));
2772         udelay(40);
2773
2774         if (force_reset)
2775                 tg3_phy_reset(tp);
2776
2777         current_link_up = 0;
2778         current_speed = SPEED_INVALID;
2779         current_duplex = DUPLEX_INVALID;
2780
2781         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
2782         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
2783         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
2784                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
2785                         bmsr |= BMSR_LSTATUS;
2786                 else
2787                         bmsr &= ~BMSR_LSTATUS;
2788         }
2789
2790         err |= tg3_readphy(tp, MII_BMCR, &bmcr);
2791
2792         if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
2793             (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT)) {
2794                 /* do nothing, just check for link up at the end */
2795         } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2796                 u32 adv, new_adv;
2797
2798                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
2799                 new_adv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
2800                                   ADVERTISE_1000XPAUSE |
2801                                   ADVERTISE_1000XPSE_ASYM |
2802                                   ADVERTISE_SLCT);
2803
2804                 /* Always advertise symmetric PAUSE just like copper */
2805                 new_adv |= ADVERTISE_1000XPAUSE;
2806
2807                 if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
2808                         new_adv |= ADVERTISE_1000XHALF;
2809                 if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
2810                         new_adv |= ADVERTISE_1000XFULL;
2811
2812                 if ((new_adv != adv) || !(bmcr & BMCR_ANENABLE)) {
2813                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
2814                         bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
2815                         tg3_writephy(tp, MII_BMCR, bmcr);
2816
2817                         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2818                         tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
2819                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2820
2821                         return err;
2822                 }
2823         } else {
2824                 u32 new_bmcr;
2825
2826                 bmcr &= ~BMCR_SPEED1000;
2827                 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
2828
2829                 if (tp->link_config.duplex == DUPLEX_FULL)
2830                         new_bmcr |= BMCR_FULLDPLX;
2831
2832                 if (new_bmcr != bmcr) {
2833                         /* BMCR_SPEED1000 is a reserved bit that needs
2834                          * to be set on write.
2835                          */
2836                         new_bmcr |= BMCR_SPEED1000;
2837
2838                         /* Force a linkdown */
2839                         if (netif_carrier_ok(tp->dev)) {
2840                                 u32 adv;
2841
2842                                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
2843                                 adv &= ~(ADVERTISE_1000XFULL |
2844                                          ADVERTISE_1000XHALF |
2845                                          ADVERTISE_SLCT);
2846                                 tg3_writephy(tp, MII_ADVERTISE, adv);
2847                                 tg3_writephy(tp, MII_BMCR, bmcr |
2848                                                            BMCR_ANRESTART |
2849                                                            BMCR_ANENABLE);
2850                                 udelay(10);
2851                                 netif_carrier_off(tp->dev);
2852                         }
2853                         tg3_writephy(tp, MII_BMCR, new_bmcr);
2854                         bmcr = new_bmcr;
2855                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
2856                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
2857                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
2858                             ASIC_REV_5714) {
2859                                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
2860                                         bmsr |= BMSR_LSTATUS;
2861                                 else
2862                                         bmsr &= ~BMSR_LSTATUS;
2863                         }
2864                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2865                 }
2866         }
2867
2868         if (bmsr & BMSR_LSTATUS) {
2869                 current_speed = SPEED_1000;
2870                 current_link_up = 1;
2871                 if (bmcr & BMCR_FULLDPLX)
2872                         current_duplex = DUPLEX_FULL;
2873                 else
2874                         current_duplex = DUPLEX_HALF;
2875
2876                 if (bmcr & BMCR_ANENABLE) {
2877                         u32 local_adv, remote_adv, common;
2878
2879                         err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
2880                         err |= tg3_readphy(tp, MII_LPA, &remote_adv);
2881                         common = local_adv & remote_adv;
2882                         if (common & (ADVERTISE_1000XHALF |
2883                                       ADVERTISE_1000XFULL)) {
2884                                 if (common & ADVERTISE_1000XFULL)
2885                                         current_duplex = DUPLEX_FULL;
2886                                 else
2887                                         current_duplex = DUPLEX_HALF;
2888
2889                                 tg3_setup_flow_control(tp, local_adv,
2890                                                        remote_adv);
2891                         }
2892                         else
2893                                 current_link_up = 0;
2894                 }
2895         }
2896
2897         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
2898         if (tp->link_config.active_duplex == DUPLEX_HALF)
2899                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
2900
2901         tw32_f(MAC_MODE, tp->mac_mode);
2902         udelay(40);
2903
2904         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2905
2906         tp->link_config.active_speed = current_speed;
2907         tp->link_config.active_duplex = current_duplex;
2908
2909         if (current_link_up != netif_carrier_ok(tp->dev)) {
2910                 if (current_link_up)
2911                         netif_carrier_on(tp->dev);
2912                 else {
2913                         netif_carrier_off(tp->dev);
2914                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2915                 }
2916                 tg3_link_report(tp);
2917         }
2918         return err;
2919 }
2920
2921 static void tg3_serdes_parallel_detect(struct tg3 *tp)
2922 {
2923         if (tp->serdes_counter) {
2924                 /* Give autoneg time to complete. */
2925                 tp->serdes_counter--;
2926                 return;
2927         }
2928         if (!netif_carrier_ok(tp->dev) &&
2929             (tp->link_config.autoneg == AUTONEG_ENABLE)) {
2930                 u32 bmcr;
2931
2932                 tg3_readphy(tp, MII_BMCR, &bmcr);
2933                 if (bmcr & BMCR_ANENABLE) {
2934                         u32 phy1, phy2;
2935
2936                         /* Select shadow register 0x1f */
2937                         tg3_writephy(tp, 0x1c, 0x7c00);
2938                         tg3_readphy(tp, 0x1c, &phy1);
2939
2940                         /* Select expansion interrupt status register */
2941                         tg3_writephy(tp, 0x17, 0x0f01);
2942                         tg3_readphy(tp, 0x15, &phy2);
2943                         tg3_readphy(tp, 0x15, &phy2);
2944
2945                         if ((phy1 & 0x10) && !(phy2 & 0x20)) {
2946                                 /* We have signal detect and not receiving
2947                                  * config code words, link is up by parallel
2948                                  * detection.
2949                                  */
2950
2951                                 bmcr &= ~BMCR_ANENABLE;
2952                                 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
2953                                 tg3_writephy(tp, MII_BMCR, bmcr);
2954                                 tp->tg3_flags2 |= TG3_FLG2_PARALLEL_DETECT;
2955                         }
2956                 }
2957         }
2958         else if (netif_carrier_ok(tp->dev) &&
2959                  (tp->link_config.autoneg == AUTONEG_ENABLE) &&
2960                  (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT)) {
2961                 u32 phy2;
2962
2963                 /* Select expansion interrupt status register */
2964                 tg3_writephy(tp, 0x17, 0x0f01);
2965                 tg3_readphy(tp, 0x15, &phy2);
2966                 if (phy2 & 0x20) {
2967                         u32 bmcr;
2968
2969                         /* Config code words received, turn on autoneg. */
2970                         tg3_readphy(tp, MII_BMCR, &bmcr);
2971                         tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
2972
2973                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2974
2975                 }
2976         }
2977 }
2978
2979 static int tg3_setup_phy(struct tg3 *tp, int force_reset)
2980 {
2981         int err;
2982
2983         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
2984                 err = tg3_setup_fiber_phy(tp, force_reset);
2985         } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
2986                 err = tg3_setup_fiber_mii_phy(tp, force_reset);
2987         } else {
2988                 err = tg3_setup_copper_phy(tp, force_reset);
2989         }
2990
2991         if (tp->link_config.active_speed == SPEED_1000 &&
2992             tp->link_config.active_duplex == DUPLEX_HALF)
2993                 tw32(MAC_TX_LENGTHS,
2994                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2995                       (6 << TX_LENGTHS_IPG_SHIFT) |
2996                       (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
2997         else
2998                 tw32(MAC_TX_LENGTHS,
2999                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
3000                       (6 << TX_LENGTHS_IPG_SHIFT) |
3001                       (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
3002
3003         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
3004                 if (netif_carrier_ok(tp->dev)) {
3005                         tw32(HOSTCC_STAT_COAL_TICKS,
3006                              tp->coal.stats_block_coalesce_usecs);
3007                 } else {
3008                         tw32(HOSTCC_STAT_COAL_TICKS, 0);
3009                 }
3010         }
3011
3012         return err;
3013 }
3014
3015 /* This is called whenever we suspect that the system chipset is re-
3016  * ordering the sequence of MMIO to the tx send mailbox. The symptom
3017  * is bogus tx completions. We try to recover by setting the
3018  * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
3019  * in the workqueue.
3020  */
3021 static void tg3_tx_recover(struct tg3 *tp)
3022 {
3023         BUG_ON((tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) ||
3024                tp->write32_tx_mbox == tg3_write_indirect_mbox);
3025
3026         printk(KERN_WARNING PFX "%s: The system may be re-ordering memory-"
3027                "mapped I/O cycles to the network device, attempting to "
3028                "recover. Please report the problem to the driver maintainer "
3029                "and include system chipset information.\n", tp->dev->name);
3030
3031         spin_lock(&tp->lock);
3032         tp->tg3_flags |= TG3_FLAG_TX_RECOVERY_PENDING;
3033         spin_unlock(&tp->lock);
3034 }
3035
3036 static inline u32 tg3_tx_avail(struct tg3 *tp)
3037 {
3038         smp_mb();
3039         return (tp->tx_pending -
3040                 ((tp->tx_prod - tp->tx_cons) & (TG3_TX_RING_SIZE - 1)));
3041 }
3042
3043 /* Tigon3 never reports partial packet sends.  So we do not
3044  * need special logic to handle SKBs that have not had all
3045  * of their frags sent yet, like SunGEM does.
3046  */
3047 static void tg3_tx(struct tg3 *tp)
3048 {
3049         u32 hw_idx = tp->hw_status->idx[0].tx_consumer;
3050         u32 sw_idx = tp->tx_cons;
3051
3052         while (sw_idx != hw_idx) {
3053                 struct tx_ring_info *ri = &tp->tx_buffers[sw_idx];
3054                 struct sk_buff *skb = ri->skb;
3055                 int i, tx_bug = 0;
3056
3057                 if (unlikely(skb == NULL)) {
3058                         tg3_tx_recover(tp);
3059                         return;
3060                 }
3061
3062                 pci_unmap_single(tp->pdev,
3063                                  pci_unmap_addr(ri, mapping),
3064                                  skb_headlen(skb),
3065                                  PCI_DMA_TODEVICE);
3066
3067                 ri->skb = NULL;
3068
3069                 sw_idx = NEXT_TX(sw_idx);
3070
3071                 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
3072                         ri = &tp->tx_buffers[sw_idx];
3073                         if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
3074                                 tx_bug = 1;
3075
3076                         pci_unmap_page(tp->pdev,
3077                                        pci_unmap_addr(ri, mapping),
3078                                        skb_shinfo(skb)->frags[i].size,
3079                                        PCI_DMA_TODEVICE);
3080
3081                         sw_idx = NEXT_TX(sw_idx);
3082                 }
3083
3084                 dev_kfree_skb(skb);
3085
3086                 if (unlikely(tx_bug)) {
3087                         tg3_tx_recover(tp);
3088                         return;
3089                 }
3090         }
3091
3092         tp->tx_cons = sw_idx;
3093
3094         /* Need to make the tx_cons update visible to tg3_start_xmit()
3095          * before checking for netif_queue_stopped().  Without the
3096          * memory barrier, there is a small possibility that tg3_start_xmit()
3097          * will miss it and cause the queue to be stopped forever.
3098          */
3099         smp_mb();
3100
3101         if (unlikely(netif_queue_stopped(tp->dev) &&
3102                      (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp)))) {
3103                 netif_tx_lock(tp->dev);
3104                 if (netif_queue_stopped(tp->dev) &&
3105                     (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp)))
3106                         netif_wake_queue(tp->dev);
3107                 netif_tx_unlock(tp->dev);
3108         }
3109 }
3110
3111 /* Returns size of skb allocated or < 0 on error.
3112  *
3113  * We only need to fill in the address because the other members
3114  * of the RX descriptor are invariant, see tg3_init_rings.
3115  *
3116  * Note the purposeful assymetry of cpu vs. chip accesses.  For
3117  * posting buffers we only dirty the first cache line of the RX
3118  * descriptor (containing the address).  Whereas for the RX status
3119  * buffers the cpu only reads the last cacheline of the RX descriptor
3120  * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
3121  */
3122 static int tg3_alloc_rx_skb(struct tg3 *tp, u32 opaque_key,
3123                             int src_idx, u32 dest_idx_unmasked)
3124 {
3125         struct tg3_rx_buffer_desc *desc;
3126         struct ring_info *map, *src_map;
3127         struct sk_buff *skb;
3128         dma_addr_t mapping;
3129         int skb_size, dest_idx;
3130
3131         src_map = NULL;
3132         switch (opaque_key) {
3133         case RXD_OPAQUE_RING_STD:
3134                 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
3135                 desc = &tp->rx_std[dest_idx];
3136                 map = &tp->rx_std_buffers[dest_idx];
3137                 if (src_idx >= 0)
3138                         src_map = &tp->rx_std_buffers[src_idx];
3139                 skb_size = tp->rx_pkt_buf_sz;
3140                 break;
3141
3142         case RXD_OPAQUE_RING_JUMBO:
3143                 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
3144                 desc = &tp->rx_jumbo[dest_idx];
3145                 map = &tp->rx_jumbo_buffers[dest_idx];
3146                 if (src_idx >= 0)
3147                         src_map = &tp->rx_jumbo_buffers[src_idx];
3148                 skb_size = RX_JUMBO_PKT_BUF_SZ;
3149                 break;
3150
3151         default:
3152                 return -EINVAL;
3153         };
3154
3155         /* Do not overwrite any of the map or rp information
3156          * until we are sure we can commit to a new buffer.
3157          *
3158          * Callers depend upon this behavior and assume that
3159          * we leave everything unchanged if we fail.
3160          */
3161         skb = netdev_alloc_skb(tp->dev, skb_size);
3162         if (skb == NULL)
3163                 return -ENOMEM;
3164
3165         skb_reserve(skb, tp->rx_offset);
3166
3167         mapping = pci_map_single(tp->pdev, skb->data,
3168                                  skb_size - tp->rx_offset,
3169                                  PCI_DMA_FROMDEVICE);
3170
3171         map->skb = skb;
3172         pci_unmap_addr_set(map, mapping, mapping);
3173
3174         if (src_map != NULL)
3175                 src_map->skb = NULL;
3176
3177         desc->addr_hi = ((u64)mapping >> 32);
3178         desc->addr_lo = ((u64)mapping & 0xffffffff);
3179
3180         return skb_size;
3181 }
3182
3183 /* We only need to move over in the address because the other
3184  * members of the RX descriptor are invariant.  See notes above
3185  * tg3_alloc_rx_skb for full details.
3186  */
3187 static void tg3_recycle_rx(struct tg3 *tp, u32 opaque_key,
3188                            int src_idx, u32 dest_idx_unmasked)
3189 {
3190         struct tg3_rx_buffer_desc *src_desc, *dest_desc;
3191         struct ring_info *src_map, *dest_map;
3192         int dest_idx;
3193
3194         switch (opaque_key) {
3195         case RXD_OPAQUE_RING_STD:
3196                 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
3197                 dest_desc = &tp->rx_std[dest_idx];
3198                 dest_map = &tp->rx_std_buffers[dest_idx];
3199                 src_desc = &tp->rx_std[src_idx];
3200                 src_map = &tp->rx_std_buffers[src_idx];
3201                 break;
3202
3203         case RXD_OPAQUE_RING_JUMBO:
3204                 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
3205                 dest_desc = &tp->rx_jumbo[dest_idx];
3206                 dest_map = &tp->rx_jumbo_buffers[dest_idx];
3207                 src_desc = &tp->rx_jumbo[src_idx];
3208                 src_map = &tp->rx_jumbo_buffers[src_idx];
3209                 break;
3210
3211         default:
3212                 return;
3213         };
3214
3215         dest_map->skb = src_map->skb;
3216         pci_unmap_addr_set(dest_map, mapping,
3217                            pci_unmap_addr(src_map, mapping));
3218         dest_desc->addr_hi = src_desc->addr_hi;
3219         dest_desc->addr_lo = src_desc->addr_lo;
3220
3221         src_map->skb = NULL;
3222 }
3223
3224 #if TG3_VLAN_TAG_USED
3225 static int tg3_vlan_rx(struct tg3 *tp, struct sk_buff *skb, u16 vlan_tag)
3226 {
3227         return vlan_hwaccel_receive_skb(skb, tp->vlgrp, vlan_tag);
3228 }
3229 #endif
3230
3231 /* The RX ring scheme is composed of multiple rings which post fresh
3232  * buffers to the chip, and one special ring the chip uses to report
3233  * status back to the host.
3234  *
3235  * The special ring reports the status of received packets to the
3236  * host.  The chip does not write into the original descriptor the
3237  * RX buffer was obtained from.  The chip simply takes the original
3238  * descriptor as provided by the host, updates the status and length
3239  * field, then writes this into the next status ring entry.
3240  *
3241  * Each ring the host uses to post buffers to the chip is described
3242  * by a TG3_BDINFO entry in the chips SRAM area.  When a packet arrives,
3243  * it is first placed into the on-chip ram.  When the packet's length
3244  * is known, it walks down the TG3_BDINFO entries to select the ring.
3245  * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
3246  * which is within the range of the new packet's length is chosen.
3247  *
3248  * The "separate ring for rx status" scheme may sound queer, but it makes
3249  * sense from a cache coherency perspective.  If only the host writes
3250  * to the buffer post rings, and only the chip writes to the rx status
3251  * rings, then cache lines never move beyond shared-modified state.
3252  * If both the host and chip were to write into the same ring, cache line
3253  * eviction could occur since both entities want it in an exclusive state.
3254  */
3255 static int tg3_rx(struct tg3 *tp, int budget)
3256 {
3257         u32 work_mask, rx_std_posted = 0;
3258         u32 sw_idx = tp->rx_rcb_ptr;
3259         u16 hw_idx;
3260         int received;
3261
3262         hw_idx = tp->hw_status->idx[0].rx_producer;
3263         /*
3264          * We need to order the read of hw_idx and the read of
3265          * the opaque cookie.
3266          */
3267         rmb();
3268         work_mask = 0;
3269         received = 0;
3270         while (sw_idx != hw_idx && budget > 0) {
3271                 struct tg3_rx_buffer_desc *desc = &tp->rx_rcb[sw_idx];
3272                 unsigned int len;
3273                 struct sk_buff *skb;
3274                 dma_addr_t dma_addr;
3275                 u32 opaque_key, desc_idx, *post_ptr;
3276
3277                 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
3278                 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
3279                 if (opaque_key == RXD_OPAQUE_RING_STD) {
3280                         dma_addr = pci_unmap_addr(&tp->rx_std_buffers[desc_idx],
3281                                                   mapping);
3282                         skb = tp->rx_std_buffers[desc_idx].skb;
3283                         post_ptr = &tp->rx_std_ptr;
3284                         rx_std_posted++;
3285                 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
3286                         dma_addr = pci_unmap_addr(&tp->rx_jumbo_buffers[desc_idx],
3287                                                   mapping);
3288                         skb = tp->rx_jumbo_buffers[desc_idx].skb;
3289                         post_ptr = &tp->rx_jumbo_ptr;
3290                 }
3291                 else {
3292                         goto next_pkt_nopost;
3293                 }
3294
3295                 work_mask |= opaque_key;
3296
3297                 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
3298                     (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
3299                 drop_it:
3300                         tg3_recycle_rx(tp, opaque_key,
3301                                        desc_idx, *post_ptr);
3302                 drop_it_no_recycle:
3303                         /* Other statistics kept track of by card. */
3304                         tp->net_stats.rx_dropped++;
3305                         goto next_pkt;
3306                 }
3307
3308                 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4; /* omit crc */
3309
3310                 if (len > RX_COPY_THRESHOLD
3311                         && tp->rx_offset == 2
3312                         /* rx_offset != 2 iff this is a 5701 card running
3313                          * in PCI-X mode [see tg3_get_invariants()] */
3314                 ) {
3315                         int skb_size;
3316
3317                         skb_size = tg3_alloc_rx_skb(tp, opaque_key,
3318                                                     desc_idx, *post_ptr);
3319                         if (skb_size < 0)
3320                                 goto drop_it;
3321
3322                         pci_unmap_single(tp->pdev, dma_addr,
3323                                          skb_size - tp->rx_offset,
3324                                          PCI_DMA_FROMDEVICE);
3325
3326                         skb_put(skb, len);
3327                 } else {
3328                         struct sk_buff *copy_skb;
3329
3330                         tg3_recycle_rx(tp, opaque_key,
3331                                        desc_idx, *post_ptr);
3332
3333                         copy_skb = netdev_alloc_skb(tp->dev, len + 2);
3334                         if (copy_skb == NULL)
3335                                 goto drop_it_no_recycle;
3336
3337                         skb_reserve(copy_skb, 2);
3338                         skb_put(copy_skb, len);
3339                         pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
3340                         memcpy(copy_skb->data, skb->data, len);
3341                         pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
3342
3343                         /* We'll reuse the original ring buffer. */
3344                         skb = copy_skb;
3345                 }
3346
3347                 if ((tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) &&
3348                     (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
3349                     (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
3350                       >> RXD_TCPCSUM_SHIFT) == 0xffff))
3351                         skb->ip_summed = CHECKSUM_UNNECESSARY;
3352                 else
3353                         skb->ip_summed = CHECKSUM_NONE;
3354
3355                 skb->protocol = eth_type_trans(skb, tp->dev);
3356 #if TG3_VLAN_TAG_USED
3357                 if (tp->vlgrp != NULL &&
3358                     desc->type_flags & RXD_FLAG_VLAN) {
3359                         tg3_vlan_rx(tp, skb,
3360                                     desc->err_vlan & RXD_VLAN_MASK);
3361                 } else
3362 #endif
3363                         netif_receive_skb(skb);
3364
3365                 tp->dev->last_rx = jiffies;
3366                 received++;
3367                 budget--;
3368
3369 next_pkt:
3370                 (*post_ptr)++;
3371
3372                 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
3373                         u32 idx = *post_ptr % TG3_RX_RING_SIZE;
3374
3375                         tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX +
3376                                      TG3_64BIT_REG_LOW, idx);
3377                         work_mask &= ~RXD_OPAQUE_RING_STD;
3378                         rx_std_posted = 0;
3379                 }
3380 next_pkt_nopost:
3381                 sw_idx++;
3382                 sw_idx %= TG3_RX_RCB_RING_SIZE(tp);
3383
3384                 /* Refresh hw_idx to see if there is new work */
3385                 if (sw_idx == hw_idx) {
3386                         hw_idx = tp->hw_status->idx[0].rx_producer;
3387                         rmb();
3388                 }
3389         }
3390
3391         /* ACK the status ring. */
3392         tp->rx_rcb_ptr = sw_idx;
3393         tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW, sw_idx);
3394
3395         /* Refill RX ring(s). */
3396         if (work_mask & RXD_OPAQUE_RING_STD) {
3397                 sw_idx = tp->rx_std_ptr % TG3_RX_RING_SIZE;
3398                 tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
3399                              sw_idx);
3400         }
3401         if (work_mask & RXD_OPAQUE_RING_JUMBO) {
3402                 sw_idx = tp->rx_jumbo_ptr % TG3_RX_JUMBO_RING_SIZE;
3403                 tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
3404                              sw_idx);
3405         }
3406         mmiowb();
3407
3408         return received;
3409 }
3410
3411 static int tg3_poll(struct net_device *netdev, int *budget)
3412 {
3413         struct tg3 *tp = netdev_priv(netdev);
3414         struct tg3_hw_status *sblk = tp->hw_status;
3415         int done;
3416
3417         /* handle link change and other phy events */
3418         if (!(tp->tg3_flags &
3419               (TG3_FLAG_USE_LINKCHG_REG |
3420                TG3_FLAG_POLL_SERDES))) {
3421                 if (sblk->status & SD_STATUS_LINK_CHG) {
3422                         sblk->status = SD_STATUS_UPDATED |
3423                                 (sblk->status & ~SD_STATUS_LINK_CHG);
3424                         spin_lock(&tp->lock);
3425                         tg3_setup_phy(tp, 0);
3426                         spin_unlock(&tp->lock);
3427                 }
3428         }
3429
3430         /* run TX completion thread */
3431         if (sblk->idx[0].tx_consumer != tp->tx_cons) {
3432                 tg3_tx(tp);
3433                 if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING)) {
3434                         netif_rx_complete(netdev);
3435                         schedule_work(&tp->reset_task);
3436                         return 0;
3437                 }
3438         }
3439
3440         /* run RX thread, within the bounds set by NAPI.
3441          * All RX "locking" is done by ensuring outside
3442          * code synchronizes with dev->poll()
3443          */
3444         if (sblk->idx[0].rx_producer != tp->rx_rcb_ptr) {
3445                 int orig_budget = *budget;
3446                 int work_done;
3447
3448                 if (orig_budget > netdev->quota)
3449                         orig_budget = netdev->quota;
3450
3451                 work_done = tg3_rx(tp, orig_budget);
3452
3453                 *budget -= work_done;
3454                 netdev->quota -= work_done;
3455         }
3456
3457         if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) {
3458                 tp->last_tag = sblk->status_tag;
3459                 rmb();
3460         } else
3461                 sblk->status &= ~SD_STATUS_UPDATED;
3462
3463         /* if no more work, tell net stack and NIC we're done */
3464         done = !tg3_has_work(tp);
3465         if (done) {
3466                 netif_rx_complete(netdev);
3467                 tg3_restart_ints(tp);
3468         }
3469
3470         return (done ? 0 : 1);
3471 }
3472
3473 static void tg3_irq_quiesce(struct tg3 *tp)
3474 {
3475         BUG_ON(tp->irq_sync);
3476
3477         tp->irq_sync = 1;
3478         smp_mb();
3479
3480         synchronize_irq(tp->pdev->irq);
3481 }
3482
3483 static inline int tg3_irq_sync(struct tg3 *tp)
3484 {
3485         return tp->irq_sync;
3486 }
3487
3488 /* Fully shutdown all tg3 driver activity elsewhere in the system.
3489  * If irq_sync is non-zero, then the IRQ handler must be synchronized
3490  * with as well.  Most of the time, this is not necessary except when
3491  * shutting down the device.
3492  */
3493 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
3494 {
3495         if (irq_sync)
3496                 tg3_irq_quiesce(tp);
3497         spin_lock_bh(&tp->lock);
3498 }
3499
3500 static inline void tg3_full_unlock(struct tg3 *tp)
3501 {
3502         spin_unlock_bh(&tp->lock);
3503 }
3504
3505 /* One-shot MSI handler - Chip automatically disables interrupt
3506  * after sending MSI so driver doesn't have to do it.
3507  */
3508 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
3509 {
3510         struct net_device *dev = dev_id;
3511         struct tg3 *tp = netdev_priv(dev);
3512
3513         prefetch(tp->hw_status);
3514         prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
3515
3516         if (likely(!tg3_irq_sync(tp)))
3517                 netif_rx_schedule(dev);         /* schedule NAPI poll */
3518
3519         return IRQ_HANDLED;
3520 }
3521
3522 /* MSI ISR - No need to check for interrupt sharing and no need to
3523  * flush status block and interrupt mailbox. PCI ordering rules
3524  * guarantee that MSI will arrive after the status block.
3525  */
3526 static irqreturn_t tg3_msi(int irq, void *dev_id)
3527 {
3528         struct net_device *dev = dev_id;
3529         struct tg3 *tp = netdev_priv(dev);
3530
3531         prefetch(tp->hw_status);
3532         prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
3533         /*
3534          * Writing any value to intr-mbox-0 clears PCI INTA# and
3535          * chip-internal interrupt pending events.
3536          * Writing non-zero to intr-mbox-0 additional tells the
3537          * NIC to stop sending us irqs, engaging "in-intr-handler"
3538          * event coalescing.
3539          */
3540         tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
3541         if (likely(!tg3_irq_sync(tp)))
3542                 netif_rx_schedule(dev);         /* schedule NAPI poll */
3543
3544         return IRQ_RETVAL(1);
3545 }
3546
3547 static irqreturn_t tg3_interrupt(int irq, void *dev_id)
3548 {
3549         struct net_device *dev = dev_id;
3550         struct tg3 *tp = netdev_priv(dev);
3551         struct tg3_hw_status *sblk = tp->hw_status;
3552         unsigned int handled = 1;
3553
3554         /* In INTx mode, it is possible for the interrupt to arrive at
3555          * the CPU before the status block posted prior to the interrupt.
3556          * Reading the PCI State register will confirm whether the
3557          * interrupt is ours and will flush the status block.
3558          */
3559         if ((sblk->status & SD_STATUS_UPDATED) ||
3560             !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
3561                 /*
3562                  * Writing any value to intr-mbox-0 clears PCI INTA# and
3563                  * chip-internal interrupt pending events.
3564                  * Writing non-zero to intr-mbox-0 additional tells the
3565                  * NIC to stop sending us irqs, engaging "in-intr-handler"
3566                  * event coalescing.
3567                  */
3568                 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
3569                              0x00000001);
3570                 if (tg3_irq_sync(tp))
3571                         goto out;
3572                 sblk->status &= ~SD_STATUS_UPDATED;
3573                 if (likely(tg3_has_work(tp))) {
3574                         prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
3575                         netif_rx_schedule(dev);         /* schedule NAPI poll */
3576                 } else {
3577                         /* No work, shared interrupt perhaps?  re-enable
3578                          * interrupts, and flush that PCI write
3579                          */
3580                         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
3581                                 0x00000000);
3582                 }
3583         } else {        /* shared interrupt */
3584                 handled = 0;
3585         }
3586 out:
3587         return IRQ_RETVAL(handled);
3588 }
3589
3590 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
3591 {
3592         struct net_device *dev = dev_id;
3593         struct tg3 *tp = netdev_priv(dev);
3594         struct tg3_hw_status *sblk = tp->hw_status;
3595         unsigned int handled = 1;
3596
3597         /* In INTx mode, it is possible for the interrupt to arrive at
3598          * the CPU before the status block posted prior to the interrupt.
3599          * Reading the PCI State register will confirm whether the
3600          * interrupt is ours and will flush the status block.
3601          */
3602         if ((sblk->status_tag != tp->last_tag) ||
3603             !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
3604                 /*
3605                  * writing any value to intr-mbox-0 clears PCI INTA# and
3606                  * chip-internal interrupt pending events.
3607                  * writing non-zero to intr-mbox-0 additional tells the
3608                  * NIC to stop sending us irqs, engaging "in-intr-handler"
3609                  * event coalescing.
3610                  */
3611                 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
3612                              0x00000001);
3613                 if (tg3_irq_sync(tp))
3614                         goto out;
3615                 if (netif_rx_schedule_prep(dev)) {
3616                         prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
3617                         /* Update last_tag to mark that this status has been
3618                          * seen. Because interrupt may be shared, we may be
3619                          * racing with tg3_poll(), so only update last_tag
3620                          * if tg3_poll() is not scheduled.
3621                          */
3622                         tp->last_tag = sblk->status_tag;
3623                         __netif_rx_schedule(dev);
3624                 }
3625         } else {        /* shared interrupt */
3626                 handled = 0;
3627         }
3628 out:
3629         return IRQ_RETVAL(handled);
3630 }
3631
3632 /* ISR for interrupt test */
3633 static irqreturn_t tg3_test_isr(int irq, void *dev_id)
3634 {
3635         struct net_device *dev = dev_id;
3636         struct tg3 *tp = netdev_priv(dev);
3637         struct tg3_hw_status *sblk = tp->hw_status;
3638
3639         if ((sblk->status & SD_STATUS_UPDATED) ||
3640             !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
3641                 tg3_disable_ints(tp);
3642                 return IRQ_RETVAL(1);
3643         }
3644         return IRQ_RETVAL(0);
3645 }
3646
3647 static int tg3_init_hw(struct tg3 *, int);
3648 static int tg3_halt(struct tg3 *, int, int);
3649
3650 /* Restart hardware after configuration changes, self-test, etc.
3651  * Invoked with tp->lock held.
3652  */
3653 static int tg3_restart_hw(struct tg3 *tp, int reset_phy)
3654 {
3655         int err;
3656
3657         err = tg3_init_hw(tp, reset_phy);
3658         if (err) {
3659                 printk(KERN_ERR PFX "%s: Failed to re-initialize device, "
3660                        "aborting.\n", tp->dev->name);
3661                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
3662                 tg3_full_unlock(tp);
3663                 del_timer_sync(&tp->timer);
3664                 tp->irq_sync = 0;
3665                 netif_poll_enable(tp->dev);
3666                 dev_close(tp->dev);
3667                 tg3_full_lock(tp, 0);
3668         }
3669         return err;
3670 }
3671
3672 #ifdef CONFIG_NET_POLL_CONTROLLER
3673 static void tg3_poll_controller(struct net_device *dev)
3674 {
3675         struct tg3 *tp = netdev_priv(dev);
3676
3677         tg3_interrupt(tp->pdev->irq, dev);
3678 }
3679 #endif
3680
3681 static void tg3_reset_task(struct work_struct *work)
3682 {
3683         struct tg3 *tp = container_of(work, struct tg3, reset_task);
3684         unsigned int restart_timer;
3685
3686         tg3_full_lock(tp, 0);
3687         tp->tg3_flags |= TG3_FLAG_IN_RESET_TASK;
3688
3689         if (!netif_running(tp->dev)) {
3690                 tp->tg3_flags &= ~TG3_FLAG_IN_RESET_TASK;
3691                 tg3_full_unlock(tp);
3692                 return;
3693         }
3694
3695         tg3_full_unlock(tp);
3696
3697         tg3_netif_stop(tp);
3698
3699         tg3_full_lock(tp, 1);
3700
3701         restart_timer = tp->tg3_flags2 & TG3_FLG2_RESTART_TIMER;
3702         tp->tg3_flags2 &= ~TG3_FLG2_RESTART_TIMER;
3703
3704         if (tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING) {
3705                 tp->write32_tx_mbox = tg3_write32_tx_mbox;
3706                 tp->write32_rx_mbox = tg3_write_flush_reg32;
3707                 tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;
3708                 tp->tg3_flags &= ~TG3_FLAG_TX_RECOVERY_PENDING;
3709         }
3710
3711         tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
3712         if (tg3_init_hw(tp, 1))
3713                 goto out;
3714
3715         tg3_netif_start(tp);
3716
3717         if (restart_timer)
3718                 mod_timer(&tp->timer, jiffies + 1);
3719
3720 out:
3721         tp->tg3_flags &= ~TG3_FLAG_IN_RESET_TASK;
3722
3723         tg3_full_unlock(tp);
3724 }
3725
3726 static void tg3_tx_timeout(struct net_device *dev)
3727 {
3728         struct tg3 *tp = netdev_priv(dev);
3729
3730         if (netif_msg_tx_err(tp))
3731                 printk(KERN_ERR PFX "%s: transmit timed out, resetting\n",
3732                        dev->name);
3733
3734         schedule_work(&tp->reset_task);
3735 }
3736
3737 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
3738 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
3739 {
3740         u32 base = (u32) mapping & 0xffffffff;
3741
3742         return ((base > 0xffffdcc0) &&
3743                 (base + len + 8 < base));
3744 }
3745
3746 /* Test for DMA addresses > 40-bit */
3747 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
3748                                           int len)
3749 {
3750 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
3751         if (tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG)
3752                 return (((u64) mapping + len) > DMA_40BIT_MASK);
3753         return 0;
3754 #else
3755         return 0;
3756 #endif
3757 }
3758
3759 static void tg3_set_txd(struct tg3 *, int, dma_addr_t, int, u32, u32);
3760
3761 /* Workaround 4GB and 40-bit hardware DMA bugs. */
3762 static int tigon3_dma_hwbug_workaround(struct tg3 *tp, struct sk_buff *skb,
3763                                        u32 last_plus_one, u32 *start,
3764                                        u32 base_flags, u32 mss)
3765 {
3766         struct sk_buff *new_skb = skb_copy(skb, GFP_ATOMIC);
3767         dma_addr_t new_addr = 0;
3768         u32 entry = *start;
3769         int i, ret = 0;
3770
3771         if (!new_skb) {
3772                 ret = -1;
3773         } else {
3774                 /* New SKB is guaranteed to be linear. */
3775                 entry = *start;
3776                 new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
3777                                           PCI_DMA_TODEVICE);
3778                 /* Make sure new skb does not cross any 4G boundaries.
3779                  * Drop the packet if it does.
3780                  */
3781                 if (tg3_4g_overflow_test(new_addr, new_skb->len)) {
3782                         ret = -1;
3783                         dev_kfree_skb(new_skb);
3784                         new_skb = NULL;
3785                 } else {
3786                         tg3_set_txd(tp, entry, new_addr, new_skb->len,
3787                                     base_flags, 1 | (mss << 1));
3788                         *start = NEXT_TX(entry);
3789                 }
3790         }
3791
3792         /* Now clean up the sw ring entries. */
3793         i = 0;
3794         while (entry != last_plus_one) {
3795                 int len;
3796
3797                 if (i == 0)
3798                         len = skb_headlen(skb);
3799                 else
3800                         len = skb_shinfo(skb)->frags[i-1].size;
3801                 pci_unmap_single(tp->pdev,
3802                                  pci_unmap_addr(&tp->tx_buffers[entry], mapping),
3803                                  len, PCI_DMA_TODEVICE);
3804                 if (i == 0) {
3805                         tp->tx_buffers[entry].skb = new_skb;
3806                         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, new_addr);
3807                 } else {
3808                         tp->tx_buffers[entry].skb = NULL;
3809                 }
3810                 entry = NEXT_TX(entry);
3811                 i++;
3812         }
3813
3814         dev_kfree_skb(skb);
3815
3816         return ret;
3817 }
3818
3819 static void tg3_set_txd(struct tg3 *tp, int entry,
3820                         dma_addr_t mapping, int len, u32 flags,
3821                         u32 mss_and_is_end)
3822 {
3823         struct tg3_tx_buffer_desc *txd = &tp->tx_ring[entry];
3824         int is_end = (mss_and_is_end & 0x1);
3825         u32 mss = (mss_and_is_end >> 1);
3826         u32 vlan_tag = 0;
3827
3828         if (is_end)
3829                 flags |= TXD_FLAG_END;
3830         if (flags & TXD_FLAG_VLAN) {
3831                 vlan_tag = flags >> 16;
3832                 flags &= 0xffff;
3833         }
3834         vlan_tag |= (mss << TXD_MSS_SHIFT);
3835
3836         txd->addr_hi = ((u64) mapping >> 32);
3837         txd->addr_lo = ((u64) mapping & 0xffffffff);
3838         txd->len_flags = (len << TXD_LEN_SHIFT) | flags;
3839         txd->vlan_tag = vlan_tag << TXD_VLAN_TAG_SHIFT;
3840 }
3841
3842 /* hard_start_xmit for devices that don't have any bugs and
3843  * support TG3_FLG2_HW_TSO_2 only.
3844  */
3845 static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
3846 {
3847         struct tg3 *tp = netdev_priv(dev);
3848         dma_addr_t mapping;
3849         u32 len, entry, base_flags, mss;
3850
3851         len = skb_headlen(skb);
3852
3853         /* We are running in BH disabled context with netif_tx_lock
3854          * and TX reclaim runs via tp->poll inside of a software
3855          * interrupt.  Furthermore, IRQ processing runs lockless so we have
3856          * no IRQ context deadlocks to worry about either.  Rejoice!
3857          */
3858         if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->nr_frags + 1))) {
3859                 if (!netif_queue_stopped(dev)) {
3860                         netif_stop_queue(dev);
3861
3862                         /* This is a hard error, log it. */
3863                         printk(KERN_ERR PFX "%s: BUG! Tx Ring full when "
3864                                "queue awake!\n", dev->name);
3865                 }
3866                 return NETDEV_TX_BUSY;
3867         }
3868
3869         entry = tp->tx_prod;
3870         base_flags = 0;
3871 #if TG3_TSO_SUPPORT != 0
3872         mss = 0;
3873         if (skb->len > (tp->dev->mtu + ETH_HLEN) &&
3874             (mss = skb_shinfo(skb)->gso_size) != 0) {
3875                 int tcp_opt_len, ip_tcp_len;
3876
3877                 if (skb_header_cloned(skb) &&
3878                     pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
3879                         dev_kfree_skb(skb);
3880                         goto out_unlock;
3881                 }
3882
3883                 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
3884                         mss |= (skb_headlen(skb) - ETH_HLEN) << 9;
3885                 else {
3886                         tcp_opt_len = ((skb->h.th->doff - 5) * 4);
3887                         ip_tcp_len = (skb->nh.iph->ihl * 4) +
3888                                      sizeof(struct tcphdr);
3889
3890                         skb->nh.iph->check = 0;
3891                         skb->nh.iph->tot_len = htons(mss + ip_tcp_len +
3892                                                      tcp_opt_len);
3893                         mss |= (ip_tcp_len + tcp_opt_len) << 9;
3894                 }
3895
3896                 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
3897                                TXD_FLAG_CPU_POST_DMA);
3898
3899                 skb->h.th->check = 0;
3900
3901         }
3902         else if (skb->ip_summed == CHECKSUM_PARTIAL)
3903                 base_flags |= TXD_FLAG_TCPUDP_CSUM;
3904 #else
3905         mss = 0;
3906         if (skb->ip_summed == CHECKSUM_PARTIAL)
3907                 base_flags |= TXD_FLAG_TCPUDP_CSUM;
3908 #endif
3909 #if TG3_VLAN_TAG_USED
3910         if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
3911                 base_flags |= (TXD_FLAG_VLAN |
3912                                (vlan_tx_tag_get(skb) << 16));
3913 #endif
3914
3915         /* Queue skb data, a.k.a. the main skb fragment. */
3916         mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
3917
3918         tp->tx_buffers[entry].skb = skb;
3919         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
3920
3921         tg3_set_txd(tp, entry, mapping, len, base_flags,
3922                     (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
3923
3924         entry = NEXT_TX(entry);
3925
3926         /* Now loop through additional data fragments, and queue them. */
3927         if (skb_shinfo(skb)->nr_frags > 0) {
3928                 unsigned int i, last;
3929
3930                 last = skb_shinfo(skb)->nr_frags - 1;
3931                 for (i = 0; i <= last; i++) {
3932                         skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3933
3934                         len = frag->size;
3935                         mapping = pci_map_page(tp->pdev,
3936                                                frag->page,
3937                                                frag->page_offset,
3938                                                len, PCI_DMA_TODEVICE);
3939
3940                         tp->tx_buffers[entry].skb = NULL;
3941                         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
3942
3943                         tg3_set_txd(tp, entry, mapping, len,
3944                                     base_flags, (i == last) | (mss << 1));
3945
3946                         entry = NEXT_TX(entry);
3947                 }
3948         }
3949
3950         /* Packets are ready, update Tx producer idx local and on card. */
3951         tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry);
3952
3953         tp->tx_prod = entry;
3954         if (unlikely(tg3_tx_avail(tp) <= (MAX_SKB_FRAGS + 1))) {
3955                 netif_stop_queue(dev);
3956                 if (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp))
3957                         netif_wake_queue(tp->dev);
3958         }
3959
3960 out_unlock:
3961         mmiowb();
3962
3963         dev->trans_start = jiffies;
3964
3965         return NETDEV_TX_OK;
3966 }
3967
3968 #if TG3_TSO_SUPPORT != 0
3969 static int tg3_start_xmit_dma_bug(struct sk_buff *, struct net_device *);
3970
3971 /* Use GSO to workaround a rare TSO bug that may be triggered when the
3972  * TSO header is greater than 80 bytes.
3973  */
3974 static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
3975 {
3976         struct sk_buff *segs, *nskb;
3977
3978         /* Estimate the number of fragments in the worst case */
3979         if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->gso_segs * 3))) {
3980                 netif_stop_queue(tp->dev);
3981                 return NETDEV_TX_BUSY;
3982         }
3983
3984         segs = skb_gso_segment(skb, tp->dev->features & ~NETIF_F_TSO);
3985         if (unlikely(IS_ERR(segs)))
3986                 goto tg3_tso_bug_end;
3987
3988         do {
3989                 nskb = segs;
3990                 segs = segs->next;
3991                 nskb->next = NULL;
3992                 tg3_start_xmit_dma_bug(nskb, tp->dev);
3993         } while (segs);
3994
3995 tg3_tso_bug_end:
3996         dev_kfree_skb(skb);
3997
3998         return NETDEV_TX_OK;
3999 }
4000 #endif
4001
4002 /* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
4003  * support TG3_FLG2_HW_TSO_1 or firmware TSO only.
4004  */
4005 static int tg3_start_xmit_dma_bug(struct sk_buff *skb, struct net_device *dev)
4006 {
4007         struct tg3 *tp = netdev_priv(dev);
4008         dma_addr_t mapping;
4009         u32 len, entry, base_flags, mss;
4010         int would_hit_hwbug;
4011
4012         len = skb_headlen(skb);
4013
4014         /* We are running in BH disabled context with netif_tx_lock
4015          * and TX reclaim runs via tp->poll inside of a software
4016          * interrupt.  Furthermore, IRQ processing runs lockless so we have
4017          * no IRQ context deadlocks to worry about either.  Rejoice!
4018          */
4019         if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->nr_frags + 1))) {
4020                 if (!netif_queue_stopped(dev)) {
4021                         netif_stop_queue(dev);
4022
4023                         /* This is a hard error, log it. */
4024                         printk(KERN_ERR PFX "%s: BUG! Tx Ring full when "
4025                                "queue awake!\n", dev->name);
4026                 }
4027                 return NETDEV_TX_BUSY;
4028         }
4029
4030         entry = tp->tx_prod;
4031         base_flags = 0;
4032         if (skb->ip_summed == CHECKSUM_PARTIAL)
4033                 base_flags |= TXD_FLAG_TCPUDP_CSUM;
4034 #if TG3_TSO_SUPPORT != 0
4035         mss = 0;
4036         if (skb->len > (tp->dev->mtu + ETH_HLEN) &&
4037             (mss = skb_shinfo(skb)->gso_size) != 0) {
4038                 int tcp_opt_len, ip_tcp_len, hdr_len;
4039
4040                 if (skb_header_cloned(skb) &&
4041                     pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
4042                         dev_kfree_skb(skb);
4043                         goto out_unlock;
4044                 }
4045
4046                 tcp_opt_len = ((skb->h.th->doff - 5) * 4);
4047                 ip_tcp_len = (skb->nh.iph->ihl * 4) + sizeof(struct tcphdr);
4048
4049                 hdr_len = ip_tcp_len + tcp_opt_len;
4050                 if (unlikely((ETH_HLEN + hdr_len) > 80) &&
4051                              (tp->tg3_flags2 & TG3_FLG2_HW_TSO_1_BUG))
4052                         return (tg3_tso_bug(tp, skb));
4053
4054                 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
4055                                TXD_FLAG_CPU_POST_DMA);
4056
4057                 skb->nh.iph->check = 0;
4058                 skb->nh.iph->tot_len = htons(mss + hdr_len);
4059                 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
4060                         skb->h.th->check = 0;
4061                         base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
4062                 }
4063                 else {
4064                         skb->h.th->check =
4065                                 ~csum_tcpudp_magic(skb->nh.iph->saddr,
4066                                                    skb->nh.iph->daddr,
4067                                                    0, IPPROTO_TCP, 0);
4068                 }
4069
4070                 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO) ||
4071                     (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)) {
4072                         if (tcp_opt_len || skb->nh.iph->ihl > 5) {
4073                                 int tsflags;
4074
4075                                 tsflags = ((skb->nh.iph->ihl - 5) +
4076                                            (tcp_opt_len >> 2));
4077                                 mss |= (tsflags << 11);
4078                         }
4079                 } else {
4080                         if (tcp_opt_len || skb->nh.iph->ihl > 5) {
4081                                 int tsflags;
4082
4083                                 tsflags = ((skb->nh.iph->ihl - 5) +
4084                                            (tcp_opt_len >> 2));
4085                                 base_flags |= tsflags << 12;
4086                         }
4087                 }
4088         }
4089 #else
4090         mss = 0;
4091 #endif
4092 #if TG3_VLAN_TAG_USED
4093         if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
4094                 base_flags |= (TXD_FLAG_VLAN |
4095                                (vlan_tx_tag_get(skb) << 16));
4096 #endif
4097
4098         /* Queue skb data, a.k.a. the main skb fragment. */
4099         mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
4100
4101         tp->tx_buffers[entry].skb = skb;
4102         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
4103
4104         would_hit_hwbug = 0;
4105
4106         if (tg3_4g_overflow_test(mapping, len))
4107                 would_hit_hwbug = 1;
4108
4109         tg3_set_txd(tp, entry, mapping, len, base_flags,
4110                     (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
4111
4112         entry = NEXT_TX(entry);
4113
4114         /* Now loop through additional data fragments, and queue them. */
4115         if (skb_shinfo(skb)->nr_frags > 0) {
4116                 unsigned int i, last;
4117
4118                 last = skb_shinfo(skb)->nr_frags - 1;
4119                 for (i = 0; i <= last; i++) {
4120                         skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4121
4122                         len = frag->size;
4123                         mapping = pci_map_page(tp->pdev,
4124                                                frag->page,
4125                                                frag->page_offset,
4126                                                len, PCI_DMA_TODEVICE);
4127
4128                         tp->tx_buffers[entry].skb = NULL;
4129                         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
4130
4131                         if (tg3_4g_overflow_test(mapping, len))
4132                                 would_hit_hwbug = 1;
4133
4134                         if (tg3_40bit_overflow_test(tp, mapping, len))
4135                                 would_hit_hwbug = 1;
4136
4137                         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
4138                                 tg3_set_txd(tp, entry, mapping, len,
4139                                             base_flags, (i == last)|(mss << 1));
4140                         else
4141                                 tg3_set_txd(tp, entry, mapping, len,
4142                                             base_flags, (i == last));
4143
4144                         entry = NEXT_TX(entry);
4145                 }
4146         }
4147
4148         if (would_hit_hwbug) {
4149                 u32 last_plus_one = entry;
4150                 u32 start;
4151
4152                 start = entry - 1 - skb_shinfo(skb)->nr_frags;
4153                 start &= (TG3_TX_RING_SIZE - 1);
4154
4155                 /* If the workaround fails due to memory/mapping
4156                  * failure, silently drop this packet.
4157                  */
4158                 if (tigon3_dma_hwbug_workaround(tp, skb, last_plus_one,
4159                                                 &start, base_flags, mss))
4160                         goto out_unlock;
4161
4162                 entry = start;
4163         }
4164
4165         /* Packets are ready, update Tx producer idx local and on card. */
4166         tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry);
4167
4168         tp->tx_prod = entry;
4169         if (unlikely(tg3_tx_avail(tp) <= (MAX_SKB_FRAGS + 1))) {
4170                 netif_stop_queue(dev);
4171                 if (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp))
4172                         netif_wake_queue(tp->dev);
4173         }
4174
4175 out_unlock:
4176         mmiowb();
4177
4178         dev->trans_start = jiffies;
4179
4180         return NETDEV_TX_OK;
4181 }
4182
4183 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
4184                                int new_mtu)
4185 {
4186         dev->mtu = new_mtu;
4187
4188         if (new_mtu > ETH_DATA_LEN) {
4189                 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
4190                         tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
4191                         ethtool_op_set_tso(dev, 0);
4192                 }
4193                 else
4194                         tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE;
4195         } else {
4196                 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
4197                         tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
4198                 tp->tg3_flags &= ~TG3_FLAG_JUMBO_RING_ENABLE;
4199         }
4200 }
4201
4202 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
4203 {
4204         struct tg3 *tp = netdev_priv(dev);
4205         int err;
4206
4207         if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
4208                 return -EINVAL;
4209
4210         if (!netif_running(dev)) {
4211                 /* We'll just catch it later when the
4212                  * device is up'd.
4213                  */
4214                 tg3_set_mtu(dev, tp, new_mtu);
4215                 return 0;
4216         }
4217
4218         tg3_netif_stop(tp);
4219
4220         tg3_full_lock(tp, 1);
4221
4222         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
4223
4224         tg3_set_mtu(dev, tp, new_mtu);
4225
4226         err = tg3_restart_hw(tp, 0);
4227
4228         if (!err)
4229                 tg3_netif_start(tp);
4230
4231         tg3_full_unlock(tp);
4232
4233         return err;
4234 }
4235
4236 /* Free up pending packets in all rx/tx rings.
4237  *
4238  * The chip has been shut down and the driver detached from
4239  * the networking, so no interrupts or new tx packets will
4240  * end up in the driver.  tp->{tx,}lock is not held and we are not
4241  * in an interrupt context and thus may sleep.
4242  */
4243 static void tg3_free_rings(struct tg3 *tp)
4244 {
4245         struct ring_info *rxp;
4246         int i;
4247
4248         for (i = 0; i < TG3_RX_RING_SIZE; i++) {
4249                 rxp = &tp->rx_std_buffers[i];
4250
4251                 if (rxp->skb == NULL)
4252                         continue;
4253                 pci_unmap_single(tp->pdev,
4254                                  pci_unmap_addr(rxp, mapping),
4255                                  tp->rx_pkt_buf_sz - tp->rx_offset,
4256                                  PCI_DMA_FROMDEVICE);
4257                 dev_kfree_skb_any(rxp->skb);
4258                 rxp->skb = NULL;
4259         }
4260
4261         for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
4262                 rxp = &tp->rx_jumbo_buffers[i];
4263
4264                 if (rxp->skb == NULL)
4265                         continue;
4266                 pci_unmap_single(tp->pdev,
4267                                  pci_unmap_addr(rxp, mapping),
4268                                  RX_JUMBO_PKT_BUF_SZ - tp->rx_offset,
4269                                  PCI_DMA_FROMDEVICE);
4270                 dev_kfree_skb_any(rxp->skb);
4271                 rxp->skb = NULL;
4272         }
4273
4274         for (i = 0; i < TG3_TX_RING_SIZE; ) {
4275                 struct tx_ring_info *txp;
4276                 struct sk_buff *skb;
4277                 int j;
4278
4279                 txp = &tp->tx_buffers[i];
4280                 skb = txp->skb;
4281
4282                 if (skb == NULL) {
4283                         i++;
4284                         continue;
4285                 }
4286
4287                 pci_unmap_single(tp->pdev,
4288                                  pci_unmap_addr(txp, mapping),
4289                                  skb_headlen(skb),
4290                                  PCI_DMA_TODEVICE);
4291                 txp->skb = NULL;
4292
4293                 i++;
4294
4295                 for (j = 0; j < skb_shinfo(skb)->nr_frags; j++) {
4296                         txp = &tp->tx_buffers[i & (TG3_TX_RING_SIZE - 1)];
4297                         pci_unmap_page(tp->pdev,
4298                                        pci_unmap_addr(txp, mapping),
4299                                        skb_shinfo(skb)->frags[j].size,
4300                                        PCI_DMA_TODEVICE);
4301                         i++;
4302                 }
4303
4304                 dev_kfree_skb_any(skb);
4305         }
4306 }
4307
4308 /* Initialize tx/rx rings for packet processing.
4309  *
4310  * The chip has been shut down and the driver detached from
4311  * the networking, so no interrupts or new tx packets will
4312  * end up in the driver.  tp->{tx,}lock are held and thus
4313  * we may not sleep.
4314  */
4315 static int tg3_init_rings(struct tg3 *tp)
4316 {
4317         u32 i;
4318
4319         /* Free up all the SKBs. */
4320         tg3_free_rings(tp);
4321
4322         /* Zero out all descriptors. */
4323         memset(tp->rx_std, 0, TG3_RX_RING_BYTES);
4324         memset(tp->rx_jumbo, 0, TG3_RX_JUMBO_RING_BYTES);
4325         memset(tp->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
4326         memset(tp->tx_ring, 0, TG3_TX_RING_BYTES);
4327
4328         tp->rx_pkt_buf_sz = RX_PKT_BUF_SZ;
4329         if ((tp->tg3_flags2 & TG3_FLG2_5780_CLASS) &&
4330             (tp->dev->mtu > ETH_DATA_LEN))
4331                 tp->rx_pkt_buf_sz = RX_JUMBO_PKT_BUF_SZ;
4332
4333         /* Initialize invariants of the rings, we only set this
4334          * stuff once.  This works because the card does not
4335          * write into the rx buffer posting rings.
4336          */
4337         for (i = 0; i < TG3_RX_RING_SIZE; i++) {
4338                 struct tg3_rx_buffer_desc *rxd;
4339
4340                 rxd = &tp->rx_std[i];
4341                 rxd->idx_len = (tp->rx_pkt_buf_sz - tp->rx_offset - 64)
4342                         << RXD_LEN_SHIFT;
4343                 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
4344                 rxd->opaque = (RXD_OPAQUE_RING_STD |
4345                                (i << RXD_OPAQUE_INDEX_SHIFT));
4346         }
4347
4348         if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
4349                 for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
4350                         struct tg3_rx_buffer_desc *rxd;
4351
4352                         rxd = &tp->rx_jumbo[i];
4353                         rxd->idx_len = (RX_JUMBO_PKT_BUF_SZ - tp->rx_offset - 64)
4354                                 << RXD_LEN_SHIFT;
4355                         rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
4356                                 RXD_FLAG_JUMBO;
4357                         rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
4358                                (i << RXD_OPAQUE_INDEX_SHIFT));
4359                 }
4360         }
4361
4362         /* Now allocate fresh SKBs for each rx ring. */
4363         for (i = 0; i < tp->rx_pending; i++) {
4364                 if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_STD, -1, i) < 0) {
4365                         printk(KERN_WARNING PFX
4366                                "%s: Using a smaller RX standard ring, "
4367                                "only %d out of %d buffers were allocated "
4368                                "successfully.\n",
4369                                tp->dev->name, i, tp->rx_pending);
4370                         if (i == 0)
4371                                 return -ENOMEM;
4372                         tp->rx_pending = i;
4373                         break;
4374                 }
4375         }
4376
4377         if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
4378                 for (i = 0; i < tp->rx_jumbo_pending; i++) {
4379                         if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_JUMBO,
4380                                              -1, i) < 0) {
4381                                 printk(KERN_WARNING PFX
4382                                        "%s: Using a smaller RX jumbo ring, "
4383                                        "only %d out of %d buffers were "
4384                                        "allocated successfully.\n",
4385                                        tp->dev->name, i, tp->rx_jumbo_pending);
4386                                 if (i == 0) {
4387                                         tg3_free_rings(tp);
4388                                         return -ENOMEM;
4389                                 }
4390                                 tp->rx_jumbo_pending = i;
4391                                 break;
4392                         }
4393                 }
4394         }
4395         return 0;
4396 }
4397
4398 /*
4399  * Must not be invoked with interrupt sources disabled and
4400  * the hardware shutdown down.
4401  */
4402 static void tg3_free_consistent(struct tg3 *tp)
4403 {
4404         kfree(tp->rx_std_buffers);
4405         tp->rx_std_buffers = NULL;
4406         if (tp->rx_std) {
4407                 pci_free_consistent(tp->pdev, TG3_RX_RING_BYTES,
4408                                     tp->rx_std, tp->rx_std_mapping);
4409                 tp->rx_std = NULL;
4410         }
4411         if (tp->rx_jumbo) {
4412                 pci_free_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
4413                                     tp->rx_jumbo, tp->rx_jumbo_mapping);
4414                 tp->rx_jumbo = NULL;
4415         }
4416         if (tp->rx_rcb) {
4417                 pci_free_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
4418                                     tp->rx_rcb, tp->rx_rcb_mapping);
4419                 tp->rx_rcb = NULL;
4420         }
4421         if (tp->tx_ring) {
4422                 pci_free_consistent(tp->pdev, TG3_TX_RING_BYTES,
4423                         tp->tx_ring, tp->tx_desc_mapping);
4424                 tp->tx_ring = NULL;
4425         }
4426         if (tp->hw_status) {
4427                 pci_free_consistent(tp->pdev, TG3_HW_STATUS_SIZE,
4428                                     tp->hw_status, tp->status_mapping);
4429                 tp->hw_status = NULL;
4430         }
4431         if (tp->hw_stats) {
4432                 pci_free_consistent(tp->pdev, sizeof(struct tg3_hw_stats),
4433                                     tp->hw_stats, tp->stats_mapping);
4434                 tp->hw_stats = NULL;
4435         }
4436 }
4437
4438 /*
4439  * Must not be invoked with interrupt sources disabled and
4440  * the hardware shutdown down.  Can sleep.
4441  */
4442 static int tg3_alloc_consistent(struct tg3 *tp)
4443 {
4444         tp->rx_std_buffers = kzalloc((sizeof(struct ring_info) *
4445                                       (TG3_RX_RING_SIZE +
4446                                        TG3_RX_JUMBO_RING_SIZE)) +
4447                                      (sizeof(struct tx_ring_info) *
4448                                       TG3_TX_RING_SIZE),
4449                                      GFP_KERNEL);
4450         if (!tp->rx_std_buffers)
4451                 return -ENOMEM;
4452
4453         tp->rx_jumbo_buffers = &tp->rx_std_buffers[TG3_RX_RING_SIZE];
4454         tp->tx_buffers = (struct tx_ring_info *)
4455                 &tp->rx_jumbo_buffers[TG3_RX_JUMBO_RING_SIZE];
4456
4457         tp->rx_std = pci_alloc_consistent(tp->pdev, TG3_RX_RING_BYTES,
4458                                           &tp->rx_std_mapping);
4459         if (!tp->rx_std)
4460                 goto err_out;
4461
4462         tp->rx_jumbo = pci_alloc_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
4463                                             &tp->rx_jumbo_mapping);
4464
4465         if (!tp->rx_jumbo)
4466                 goto err_out;
4467
4468         tp->rx_rcb = pci_alloc_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
4469                                           &tp->rx_rcb_mapping);
4470         if (!tp->rx_rcb)
4471                 goto err_out;
4472
4473         tp->tx_ring = pci_alloc_consistent(tp->pdev, TG3_TX_RING_BYTES,
4474                                            &tp->tx_desc_mapping);
4475         if (!tp->tx_ring)
4476                 goto err_out;
4477
4478         tp->hw_status = pci_alloc_consistent(tp->pdev,
4479                                              TG3_HW_STATUS_SIZE,
4480                                              &tp->status_mapping);
4481         if (!tp->hw_status)
4482                 goto err_out;
4483
4484         tp->hw_stats = pci_alloc_consistent(tp->pdev,
4485                                             sizeof(struct tg3_hw_stats),
4486                                             &tp->stats_mapping);
4487         if (!tp->hw_stats)
4488                 goto err_out;
4489
4490         memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
4491         memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
4492
4493         return 0;
4494
4495 err_out:
4496         tg3_free_consistent(tp);
4497         return -ENOMEM;
4498 }
4499
4500 #define MAX_WAIT_CNT 1000
4501
4502 /* To stop a block, clear the enable bit and poll till it
4503  * clears.  tp->lock is held.
4504  */
4505 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, int silent)
4506 {
4507         unsigned int i;
4508         u32 val;
4509
4510         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
4511                 switch (ofs) {
4512                 case RCVLSC_MODE:
4513                 case DMAC_MODE:
4514                 case MBFREE_MODE:
4515                 case BUFMGR_MODE:
4516                 case MEMARB_MODE:
4517                         /* We can't enable/disable these bits of the
4518                          * 5705/5750, just say success.
4519                          */
4520                         return 0;
4521
4522                 default:
4523                         break;
4524                 };
4525         }
4526
4527         val = tr32(ofs);
4528         val &= ~enable_bit;
4529         tw32_f(ofs, val);
4530
4531         for (i = 0; i < MAX_WAIT_CNT; i++) {
4532                 udelay(100);
4533                 val = tr32(ofs);
4534                 if ((val & enable_bit) == 0)
4535                         break;
4536         }
4537
4538         if (i == MAX_WAIT_CNT && !silent) {
4539                 printk(KERN_ERR PFX "tg3_stop_block timed out, "
4540                        "ofs=%lx enable_bit=%x\n",
4541                        ofs, enable_bit);
4542                 return -ENODEV;
4543         }
4544
4545         return 0;
4546 }
4547
4548 /* tp->lock is held. */
4549 static int tg3_abort_hw(struct tg3 *tp, int silent)
4550 {
4551         int i, err;
4552
4553         tg3_disable_ints(tp);
4554
4555         tp->rx_mode &= ~RX_MODE_ENABLE;
4556         tw32_f(MAC_RX_MODE, tp->rx_mode);
4557         udelay(10);
4558
4559         err  = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
4560         err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
4561         err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
4562         err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
4563         err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
4564         err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
4565
4566         err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
4567         err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
4568         err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
4569         err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
4570         err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
4571         err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
4572         err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
4573
4574         tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
4575         tw32_f(MAC_MODE, tp->mac_mode);
4576         udelay(40);
4577
4578         tp->tx_mode &= ~TX_MODE_ENABLE;
4579         tw32_f(MAC_TX_MODE, tp->tx_mode);
4580
4581         for (i = 0; i < MAX_WAIT_CNT; i++) {
4582                 udelay(100);
4583                 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
4584                         break;
4585         }
4586         if (i >= MAX_WAIT_CNT) {
4587                 printk(KERN_ERR PFX "tg3_abort_hw timed out for %s, "
4588                        "TX_MODE_ENABLE will not clear MAC_TX_MODE=%08x\n",
4589                        tp->dev->name, tr32(MAC_TX_MODE));
4590                 err |= -ENODEV;
4591         }
4592
4593         err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
4594         err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
4595         err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
4596
4597         tw32(FTQ_RESET, 0xffffffff);
4598         tw32(FTQ_RESET, 0x00000000);
4599
4600         err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
4601         err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
4602
4603         if (tp->hw_status)
4604                 memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
4605         if (tp->hw_stats)
4606                 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
4607
4608         return err;
4609 }
4610
4611 /* tp->lock is held. */
4612 static int tg3_nvram_lock(struct tg3 *tp)
4613 {
4614         if (tp->tg3_flags & TG3_FLAG_NVRAM) {
4615                 int i;
4616
4617                 if (tp->nvram_lock_cnt == 0) {
4618                         tw32(NVRAM_SWARB, SWARB_REQ_SET1);
4619                         for (i = 0; i < 8000; i++) {
4620                                 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
4621                                         break;
4622                                 udelay(20);
4623                         }
4624                         if (i == 8000) {
4625                                 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
4626                                 return -ENODEV;
4627                         }
4628                 }
4629                 tp->nvram_lock_cnt++;
4630         }
4631         return 0;
4632 }
4633
4634 /* tp->lock is held. */
4635 static void tg3_nvram_unlock(struct tg3 *tp)
4636 {
4637         if (tp->tg3_flags & TG3_FLAG_NVRAM) {
4638                 if (tp->nvram_lock_cnt > 0)
4639                         tp->nvram_lock_cnt--;
4640                 if (tp->nvram_lock_cnt == 0)
4641                         tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
4642         }
4643 }
4644
4645 /* tp->lock is held. */
4646 static void tg3_enable_nvram_access(struct tg3 *tp)
4647 {
4648         if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
4649             !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM)) {
4650                 u32 nvaccess = tr32(NVRAM_ACCESS);
4651
4652                 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
4653         }
4654 }
4655
4656 /* tp->lock is held. */
4657 static void tg3_disable_nvram_access(struct tg3 *tp)
4658 {
4659         if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
4660             !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM)) {
4661                 u32 nvaccess = tr32(NVRAM_ACCESS);
4662
4663                 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
4664         }
4665 }
4666
4667 /* tp->lock is held. */
4668 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
4669 {
4670         tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
4671                       NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
4672
4673         if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
4674                 switch (kind) {
4675                 case RESET_KIND_INIT:
4676                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4677                                       DRV_STATE_START);
4678                         break;
4679
4680                 case RESET_KIND_SHUTDOWN:
4681                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4682                                       DRV_STATE_UNLOAD);
4683                         break;
4684
4685                 case RESET_KIND_SUSPEND:
4686                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4687                                       DRV_STATE_SUSPEND);
4688                         break;
4689
4690                 default:
4691                         break;
4692                 };
4693         }
4694 }
4695
4696 /* tp->lock is held. */
4697 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
4698 {
4699         if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
4700                 switch (kind) {
4701                 case RESET_KIND_INIT:
4702                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4703                                       DRV_STATE_START_DONE);
4704                         break;
4705
4706                 case RESET_KIND_SHUTDOWN:
4707                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4708                                       DRV_STATE_UNLOAD_DONE);
4709                         break;
4710
4711                 default:
4712                         break;
4713                 };
4714         }
4715 }
4716
4717 /* tp->lock is held. */
4718 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
4719 {
4720         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
4721                 switch (kind) {
4722                 case RESET_KIND_INIT:
4723                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4724                                       DRV_STATE_START);
4725                         break;
4726
4727                 case RESET_KIND_SHUTDOWN:
4728                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4729                                       DRV_STATE_UNLOAD);
4730                         break;
4731
4732                 case RESET_KIND_SUSPEND:
4733                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4734                                       DRV_STATE_SUSPEND);
4735                         break;
4736
4737                 default:
4738                         break;
4739                 };
4740         }
4741 }
4742
4743 static int tg3_poll_fw(struct tg3 *tp)
4744 {
4745         int i;
4746         u32 val;
4747
4748         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
4749                 /* Wait up to 20ms for init done. */
4750                 for (i = 0; i < 200; i++) {
4751                         if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
4752                                 return 0;
4753                         udelay(100);
4754                 }
4755                 return -ENODEV;
4756         }
4757
4758         /* Wait for firmware initialization to complete. */
4759         for (i = 0; i < 100000; i++) {
4760                 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
4761                 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
4762                         break;
4763                 udelay(10);
4764         }
4765
4766         /* Chip might not be fitted with firmware.  Some Sun onboard
4767          * parts are configured like that.  So don't signal the timeout
4768          * of the above loop as an error, but do report the lack of
4769          * running firmware once.
4770          */
4771         if (i >= 100000 &&
4772             !(tp->tg3_flags2 & TG3_FLG2_NO_FWARE_REPORTED)) {
4773                 tp->tg3_flags2 |= TG3_FLG2_NO_FWARE_REPORTED;
4774
4775                 printk(KERN_INFO PFX "%s: No firmware running.\n",
4776                        tp->dev->name);
4777         }
4778
4779         return 0;
4780 }
4781
4782 static void tg3_stop_fw(struct tg3 *);
4783
4784 /* tp->lock is held. */
4785 static int tg3_chip_reset(struct tg3 *tp)
4786 {
4787         u32 val;
4788         void (*write_op)(struct tg3 *, u32, u32);
4789         int err;
4790
4791         tg3_nvram_lock(tp);
4792
4793         /* No matching tg3_nvram_unlock() after this because
4794          * chip reset below will undo the nvram lock.
4795          */
4796         tp->nvram_lock_cnt = 0;
4797
4798         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
4799             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
4800             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
4801                 tw32(GRC_FASTBOOT_PC, 0);
4802
4803         /*
4804          * We must avoid the readl() that normally takes place.
4805          * It locks machines, causes machine checks, and other
4806          * fun things.  So, temporarily disable the 5701
4807          * hardware workaround, while we do the reset.
4808          */
4809         write_op = tp->write32;
4810         if (write_op == tg3_write_flush_reg32)
4811                 tp->write32 = tg3_write32;
4812
4813         /* do the reset */
4814         val = GRC_MISC_CFG_CORECLK_RESET;
4815
4816         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
4817                 if (tr32(0x7e2c) == 0x60) {
4818                         tw32(0x7e2c, 0x20);
4819                 }
4820                 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
4821                         tw32(GRC_MISC_CFG, (1 << 29));
4822                         val |= (1 << 29);
4823                 }
4824         }
4825
4826         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
4827                 tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
4828                 tw32(GRC_VCPU_EXT_CTRL,
4829                      tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
4830         }
4831
4832         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
4833                 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
4834         tw32(GRC_MISC_CFG, val);
4835
4836         /* restore 5701 hardware bug workaround write method */
4837         tp->write32 = write_op;
4838
4839         /* Unfortunately, we have to delay before the PCI read back.
4840          * Some 575X chips even will not respond to a PCI cfg access
4841          * when the reset command is given to the chip.
4842          *
4843          * How do these hardware designers expect things to work
4844          * properly if the PCI write is posted for a long period
4845          * of time?  It is always necessary to have some method by
4846          * which a register read back can occur to push the write
4847          * out which does the reset.
4848          *
4849          * For most tg3 variants the trick below was working.
4850          * Ho hum...
4851          */
4852         udelay(120);
4853
4854         /* Flush PCI posted writes.  The normal MMIO registers
4855          * are inaccessible at this time so this is the only
4856          * way to make this reliably (actually, this is no longer
4857          * the case, see above).  I tried to use indirect
4858          * register read/write but this upset some 5701 variants.
4859          */
4860         pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
4861
4862         udelay(120);
4863
4864         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
4865                 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) {
4866                         int i;
4867                         u32 cfg_val;
4868
4869                         /* Wait for link training to complete.  */
4870                         for (i = 0; i < 5000; i++)
4871                                 udelay(100);
4872
4873                         pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
4874                         pci_write_config_dword(tp->pdev, 0xc4,
4875                                                cfg_val | (1 << 15));
4876                 }
4877                 /* Set PCIE max payload size and clear error status.  */
4878                 pci_write_config_dword(tp->pdev, 0xd8, 0xf5000);
4879         }
4880
4881         /* Re-enable indirect register accesses. */
4882         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
4883                                tp->misc_host_ctrl);
4884
4885         /* Set MAX PCI retry to zero. */
4886         val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
4887         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
4888             (tp->tg3_flags & TG3_FLAG_PCIX_MODE))
4889                 val |= PCISTATE_RETRY_SAME_DMA;
4890         pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
4891
4892         pci_restore_state(tp->pdev);
4893
4894         /* Make sure PCI-X relaxed ordering bit is clear. */
4895         pci_read_config_dword(tp->pdev, TG3PCI_X_CAPS, &val);
4896         val &= ~PCIX_CAPS_RELAXED_ORDERING;
4897         pci_write_config_dword(tp->pdev, TG3PCI_X_CAPS, val);
4898
4899         if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
4900                 u32 val;
4901
4902                 /* Chip reset on 5780 will reset MSI enable bit,
4903                  * so need to restore it.
4904                  */
4905                 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
4906                         u16 ctrl;
4907
4908                         pci_read_config_word(tp->pdev,
4909                                              tp->msi_cap + PCI_MSI_FLAGS,
4910                                              &ctrl);
4911                         pci_write_config_word(tp->pdev,
4912                                               tp->msi_cap + PCI_MSI_FLAGS,
4913                                               ctrl | PCI_MSI_FLAGS_ENABLE);
4914                         val = tr32(MSGINT_MODE);
4915                         tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
4916                 }
4917
4918                 val = tr32(MEMARB_MODE);
4919                 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
4920
4921         } else
4922                 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
4923
4924         if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A3) {
4925                 tg3_stop_fw(tp);
4926                 tw32(0x5000, 0x400);
4927         }
4928
4929         tw32(GRC_MODE, tp->grc_mode);
4930
4931         if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0) {
4932                 u32 val = tr32(0xc4);
4933
4934                 tw32(0xc4, val | (1 << 15));
4935         }
4936
4937         if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
4938             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
4939                 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
4940                 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0)
4941                         tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
4942                 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
4943         }
4944
4945         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
4946                 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
4947                 tw32_f(MAC_MODE, tp->mac_mode);
4948         } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
4949                 tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
4950                 tw32_f(MAC_MODE, tp->mac_mode);
4951         } else
4952                 tw32_f(MAC_MODE, 0);
4953         udelay(40);
4954
4955         err = tg3_poll_fw(tp);
4956         if (err)
4957                 return err;
4958
4959         if ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
4960             tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
4961                 u32 val = tr32(0x7c00);
4962
4963                 tw32(0x7c00, val | (1 << 25));
4964         }
4965
4966         /* Reprobe ASF enable state.  */
4967         tp->tg3_flags &= ~TG3_FLAG_ENABLE_ASF;
4968         tp->tg3_flags2 &= ~TG3_FLG2_ASF_NEW_HANDSHAKE;
4969         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
4970         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
4971                 u32 nic_cfg;
4972
4973                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
4974                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
4975                         tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
4976                         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
4977                                 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
4978                 }
4979         }
4980
4981         return 0;
4982 }
4983
4984 /* tp->lock is held. */
4985 static void tg3_stop_fw(struct tg3 *tp)
4986 {
4987         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
4988                 u32 val;
4989                 int i;
4990
4991                 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
4992                 val = tr32(GRC_RX_CPU_EVENT);
4993                 val |= (1 << 14);
4994                 tw32(GRC_RX_CPU_EVENT, val);
4995
4996                 /* Wait for RX cpu to ACK the event.  */
4997                 for (i = 0; i < 100; i++) {
4998                         if (!(tr32(GRC_RX_CPU_EVENT) & (1 << 14)))
4999                                 break;
5000                         udelay(1);
5001                 }
5002         }
5003 }
5004
5005 /* tp->lock is held. */
5006 static int tg3_halt(struct tg3 *tp, int kind, int silent)
5007 {
5008         int err;
5009
5010         tg3_stop_fw(tp);
5011
5012         tg3_write_sig_pre_reset(tp, kind);
5013
5014         tg3_abort_hw(tp, silent);
5015         err = tg3_chip_reset(tp);
5016
5017         tg3_write_sig_legacy(tp, kind);
5018         tg3_write_sig_post_reset(tp, kind);
5019
5020         if (err)
5021                 return err;
5022
5023         return 0;
5024 }
5025
5026 #define TG3_FW_RELEASE_MAJOR    0x0
5027 #define TG3_FW_RELASE_MINOR     0x0
5028 #define TG3_FW_RELEASE_FIX      0x0
5029 #define TG3_FW_START_ADDR       0x08000000
5030 #define TG3_FW_TEXT_ADDR        0x08000000
5031 #define TG3_FW_TEXT_LEN         0x9c0
5032 #define TG3_FW_RODATA_ADDR      0x080009c0
5033 #define TG3_FW_RODATA_LEN       0x60
5034 #define TG3_FW_DATA_ADDR        0x08000a40
5035 #define TG3_FW_DATA_LEN         0x20
5036 #define TG3_FW_SBSS_ADDR        0x08000a60
5037 #define TG3_FW_SBSS_LEN         0xc
5038 #define TG3_FW_BSS_ADDR         0x08000a70
5039 #define TG3_FW_BSS_LEN          0x10
5040
5041 static const u32 tg3FwText[(TG3_FW_TEXT_LEN / sizeof(u32)) + 1] = {
5042         0x00000000, 0x10000003, 0x00000000, 0x0000000d, 0x0000000d, 0x3c1d0800,
5043         0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100000, 0x0e000018, 0x00000000,
5044         0x0000000d, 0x3c1d0800, 0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100034,
5045         0x0e00021c, 0x00000000, 0x0000000d, 0x00000000, 0x00000000, 0x00000000,
5046         0x27bdffe0, 0x3c1cc000, 0xafbf0018, 0xaf80680c, 0x0e00004c, 0x241b2105,
5047         0x97850000, 0x97870002, 0x9782002c, 0x9783002e, 0x3c040800, 0x248409c0,
5048         0xafa00014, 0x00021400, 0x00621825, 0x00052c00, 0xafa30010, 0x8f860010,
5049         0x00e52825, 0x0e000060, 0x24070102, 0x3c02ac00, 0x34420100, 0x3c03ac01,
5050         0x34630100, 0xaf820490, 0x3c02ffff, 0xaf820494, 0xaf830498, 0xaf82049c,
5051         0x24020001, 0xaf825ce0, 0x0e00003f, 0xaf825d00, 0x0e000140, 0x00000000,
5052         0x8fbf0018, 0x03e00008, 0x27bd0020, 0x2402ffff, 0xaf825404, 0x8f835400,
5053         0x34630400, 0xaf835400, 0xaf825404, 0x3c020800, 0x24420034, 0xaf82541c,
5054         0x03e00008, 0xaf805400, 0x00000000, 0x00000000, 0x3c020800, 0x34423000,
5055         0x3c030800, 0x34633000, 0x3c040800, 0x348437ff, 0x3c010800, 0xac220a64,
5056         0x24020040, 0x3c010800, 0xac220a68, 0x3c010800, 0xac200a60, 0xac600000,
5057         0x24630004, 0x0083102b, 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000,
5058         0x00804821, 0x8faa0010, 0x3c020800, 0x8c420a60, 0x3c040800, 0x8c840a68,
5059         0x8fab0014, 0x24430001, 0x0044102b, 0x3c010800, 0xac230a60, 0x14400003,
5060         0x00004021, 0x3c010800, 0xac200a60, 0x3c020800, 0x8c420a60, 0x3c030800,
5061         0x8c630a64, 0x91240000, 0x00021140, 0x00431021, 0x00481021, 0x25080001,
5062         0xa0440000, 0x29020008, 0x1440fff4, 0x25290001, 0x3c020800, 0x8c420a60,
5063         0x3c030800, 0x8c630a64, 0x8f84680c, 0x00021140, 0x00431021, 0xac440008,
5064         0xac45000c, 0xac460010, 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c,
5065         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
5066         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
5067         0, 0, 0, 0, 0, 0,
5068         0x02000008, 0x00000000, 0x0a0001e3, 0x3c0a0001, 0x0a0001e3, 0x3c0a0002,
5069         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
5070         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
5071         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
5072         0x0a0001e3, 0x3c0a0007, 0x0a0001e3, 0x3c0a0008, 0x0a0001e3, 0x3c0a0009,
5073         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a000b,
5074         0x0a0001e3, 0x3c0a000c, 0x0a0001e3, 0x3c0a000d, 0x0a0001e3, 0x00000000,
5075         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a000e, 0x0a0001e3, 0x00000000,
5076         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
5077         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
5078         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a0013, 0x0a0001e3, 0x3c0a0014,
5079         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
5080         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
5081         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
5082         0x27bdffe0, 0x00001821, 0x00001021, 0xafbf0018, 0xafb10014, 0xafb00010,
5083         0x3c010800, 0x00220821, 0xac200a70, 0x3c010800, 0x00220821, 0xac200a74,
5084         0x3c010800, 0x00220821, 0xac200a78, 0x24630001, 0x1860fff5, 0x2442000c,
5085         0x24110001, 0x8f906810, 0x32020004, 0x14400005, 0x24040001, 0x3c020800,
5086         0x8c420a78, 0x18400003, 0x00002021, 0x0e000182, 0x00000000, 0x32020001,
5087         0x10400003, 0x00000000, 0x0e000169, 0x00000000, 0x0a000153, 0xaf915028,
5088         0x8fbf0018, 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0020, 0x3c050800,
5089         0x8ca50a70, 0x3c060800, 0x8cc60a80, 0x3c070800, 0x8ce70a78, 0x27bdffe0,
5090         0x3c040800, 0x248409d0, 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014,
5091         0x0e00017b, 0x00002021, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x24020001,
5092         0x8f836810, 0x00821004, 0x00021027, 0x00621824, 0x03e00008, 0xaf836810,
5093         0x27bdffd8, 0xafbf0024, 0x1080002e, 0xafb00020, 0x8f825cec, 0xafa20018,
5094         0x8f825cec, 0x3c100800, 0x26100a78, 0xafa2001c, 0x34028000, 0xaf825cec,
5095         0x8e020000, 0x18400016, 0x00000000, 0x3c020800, 0x94420a74, 0x8fa3001c,
5096         0x000221c0, 0xac830004, 0x8fa2001c, 0x3c010800, 0x0e000201, 0xac220a74,
5097         0x10400005, 0x00000000, 0x8e020000, 0x24420001, 0x0a0001df, 0xae020000,
5098         0x3c020800, 0x8c420a70, 0x00021c02, 0x000321c0, 0x0a0001c5, 0xafa2001c,
5099         0x0e000201, 0x00000000, 0x1040001f, 0x00000000, 0x8e020000, 0x8fa3001c,
5100         0x24420001, 0x3c010800, 0xac230a70, 0x3c010800, 0xac230a74, 0x0a0001df,
5101         0xae020000, 0x3c100800, 0x26100a78, 0x8e020000, 0x18400028, 0x00000000,
5102         0x0e000201, 0x00000000, 0x14400024, 0x00000000, 0x8e020000, 0x3c030800,
5103         0x8c630a70, 0x2442ffff, 0xafa3001c, 0x18400006, 0xae020000, 0x00031402,
5104         0x000221c0, 0x8c820004, 0x3c010800, 0xac220a70, 0x97a2001e, 0x2442ff00,
5105         0x2c420300, 0x1440000b, 0x24024000, 0x3c040800, 0x248409dc, 0xafa00010,
5106         0xafa00014, 0x8fa6001c, 0x24050008, 0x0e000060, 0x00003821, 0x0a0001df,
5107         0x00000000, 0xaf825cf8, 0x3c020800, 0x8c420a40, 0x8fa3001c, 0x24420001,
5108         0xaf835cf8, 0x3c010800, 0xac220a40, 0x8fbf0024, 0x8fb00020, 0x03e00008,
5109         0x27bd0028, 0x27bdffe0, 0x3c040800, 0x248409e8, 0x00002821, 0x00003021,
5110         0x00003821, 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014, 0x8fbf0018,
5111         0x03e00008, 0x27bd0020, 0x8f82680c, 0x8f85680c, 0x00021827, 0x0003182b,
5112         0x00031823, 0x00431024, 0x00441021, 0x00a2282b, 0x10a00006, 0x00000000,
5113         0x00401821, 0x8f82680c, 0x0043102b, 0x1440fffd, 0x00000000, 0x03e00008,
5114         0x00000000, 0x3c040800, 0x8c840000, 0x3c030800, 0x8c630a40, 0x0064102b,
5115         0x54400002, 0x00831023, 0x00641023, 0x2c420008, 0x03e00008, 0x38420001,
5116         0x27bdffe0, 0x00802821, 0x3c040800, 0x24840a00, 0x00003021, 0x00003821,
5117         0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014, 0x0a000216, 0x00000000,
5118         0x8fbf0018, 0x03e00008, 0x27bd0020, 0x00000000, 0x27bdffe0, 0x3c1cc000,
5119         0xafbf0018, 0x0e00004c, 0xaf80680c, 0x3c040800, 0x24840a10, 0x03802821,
5120         0x00003021, 0x00003821, 0xafa00010, 0x0e000060, 0xafa00014, 0x2402ffff,
5121         0xaf825404, 0x3c0200aa, 0x0e000234, 0xaf825434, 0x8fbf0018, 0x03e00008,
5122         0x27bd0020, 0x00000000, 0x00000000, 0x00000000, 0x27bdffe8, 0xafb00010,
5123         0x24100001, 0xafbf0014, 0x3c01c003, 0xac200000, 0x8f826810, 0x30422000,
5124         0x10400003, 0x00000000, 0x0e000246, 0x00000000, 0x0a00023a, 0xaf905428,
5125         0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x27bdfff8, 0x8f845d0c,
5126         0x3c0200ff, 0x3c030800, 0x8c630a50, 0x3442fff8, 0x00821024, 0x1043001e,
5127         0x3c0500ff, 0x34a5fff8, 0x3c06c003, 0x3c074000, 0x00851824, 0x8c620010,
5128         0x3c010800, 0xac230a50, 0x30420008, 0x10400005, 0x00871025, 0x8cc20000,
5129         0x24420001, 0xacc20000, 0x00871025, 0xaf825d0c, 0x8fa20000, 0x24420001,
5130         0xafa20000, 0x8fa20000, 0x8fa20000, 0x24420001, 0xafa20000, 0x8fa20000,
5131         0x8f845d0c, 0x3c030800, 0x8c630a50, 0x00851024, 0x1443ffe8, 0x00851824,
5132         0x27bd0008, 0x03e00008, 0x00000000, 0x00000000, 0x00000000
5133 };
5134
5135 static const u32 tg3FwRodata[(TG3_FW_RODATA_LEN / sizeof(u32)) + 1] = {
5136         0x35373031, 0x726c7341, 0x00000000, 0x00000000, 0x53774576, 0x656e7430,
5137         0x00000000, 0x726c7045, 0x76656e74, 0x31000000, 0x556e6b6e, 0x45766e74,
5138         0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x66617461, 0x6c457272,
5139         0x00000000, 0x00000000, 0x4d61696e, 0x43707542, 0x00000000, 0x00000000,
5140         0x00000000
5141 };
5142
5143 #if 0 /* All zeros, don't eat up space with it. */
5144 u32 tg3FwData[(TG3_FW_DATA_LEN / sizeof(u32)) + 1] = {
5145         0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
5146         0x00000000, 0x00000000, 0x00000000, 0x00000000
5147 };
5148 #endif
5149
5150 #define RX_CPU_SCRATCH_BASE     0x30000
5151 #define RX_CPU_SCRATCH_SIZE     0x04000
5152 #define TX_CPU_SCRATCH_BASE     0x34000
5153 #define TX_CPU_SCRATCH_SIZE     0x04000
5154
5155 /* tp->lock is held. */
5156 static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
5157 {
5158         int i;
5159
5160         BUG_ON(offset == TX_CPU_BASE &&
5161             (tp->tg3_flags2 & TG3_FLG2_5705_PLUS));
5162
5163         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
5164                 u32 val = tr32(GRC_VCPU_EXT_CTRL);
5165
5166                 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
5167                 return 0;
5168         }
5169         if (offset == RX_CPU_BASE) {
5170                 for (i = 0; i < 10000; i++) {
5171                         tw32(offset + CPU_STATE, 0xffffffff);
5172                         tw32(offset + CPU_MODE,  CPU_MODE_HALT);
5173                         if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
5174                                 break;
5175                 }
5176
5177                 tw32(offset + CPU_STATE, 0xffffffff);
5178                 tw32_f(offset + CPU_MODE,  CPU_MODE_HALT);
5179                 udelay(10);
5180         } else {
5181                 for (i = 0; i < 10000; i++) {
5182                         tw32(offset + CPU_STATE, 0xffffffff);
5183                         tw32(offset + CPU_MODE,  CPU_MODE_HALT);
5184                         if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
5185                                 break;
5186                 }
5187         }
5188
5189         if (i >= 10000) {
5190                 printk(KERN_ERR PFX "tg3_reset_cpu timed out for %s, "
5191                        "and %s CPU\n",
5192                        tp->dev->name,
5193                        (offset == RX_CPU_BASE ? "RX" : "TX"));
5194                 return -ENODEV;
5195         }
5196
5197         /* Clear firmware's nvram arbitration. */
5198         if (tp->tg3_flags & TG3_FLAG_NVRAM)
5199                 tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
5200         return 0;
5201 }
5202
5203 struct fw_info {
5204         unsigned int text_base;
5205         unsigned int text_len;
5206         const u32 *text_data;
5207         unsigned int rodata_base;
5208         unsigned int rodata_len;
5209         const u32 *rodata_data;
5210         unsigned int data_base;
5211         unsigned int data_len;
5212         const u32 *data_data;
5213 };
5214
5215 /* tp->lock is held. */
5216 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base, u32 cpu_scratch_base,
5217                                  int cpu_scratch_size, struct fw_info *info)
5218 {
5219         int err, lock_err, i;
5220         void (*write_op)(struct tg3 *, u32, u32);
5221
5222         if (cpu_base == TX_CPU_BASE &&
5223             (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5224                 printk(KERN_ERR PFX "tg3_load_firmware_cpu: Trying to load "
5225                        "TX cpu firmware on %s which is 5705.\n",
5226                        tp->dev->name);
5227                 return -EINVAL;
5228         }
5229
5230         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
5231                 write_op = tg3_write_mem;
5232         else
5233                 write_op = tg3_write_indirect_reg32;
5234
5235         /* It is possible that bootcode is still loading at this point.
5236          * Get the nvram lock first before halting the cpu.
5237          */
5238         lock_err = tg3_nvram_lock(tp);
5239         err = tg3_halt_cpu(tp, cpu_base);
5240         if (!lock_err)
5241                 tg3_nvram_unlock(tp);
5242         if (err)
5243                 goto out;
5244
5245         for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
5246                 write_op(tp, cpu_scratch_base + i, 0);
5247         tw32(cpu_base + CPU_STATE, 0xffffffff);
5248         tw32(cpu_base + CPU_MODE, tr32(cpu_base+CPU_MODE)|CPU_MODE_HALT);
5249         for (i = 0; i < (info->text_len / sizeof(u32)); i++)
5250                 write_op(tp, (cpu_scratch_base +
5251                               (info->text_base & 0xffff) +
5252                               (i * sizeof(u32))),
5253                          (info->text_data ?
5254                           info->text_data[i] : 0));
5255         for (i = 0; i < (info->rodata_len / sizeof(u32)); i++)
5256                 write_op(tp, (cpu_scratch_base +
5257                               (info->rodata_base & 0xffff) +
5258                               (i * sizeof(u32))),
5259                          (info->rodata_data ?
5260                           info->rodata_data[i] : 0));
5261         for (i = 0; i < (info->data_len / sizeof(u32)); i++)
5262                 write_op(tp, (cpu_scratch_base +
5263                               (info->data_base & 0xffff) +
5264                               (i * sizeof(u32))),
5265                          (info->data_data ?
5266                           info->data_data[i] : 0));
5267
5268         err = 0;
5269
5270 out:
5271         return err;
5272 }
5273
5274 /* tp->lock is held. */
5275 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
5276 {
5277         struct fw_info info;
5278         int err, i;
5279
5280         info.text_base = TG3_FW_TEXT_ADDR;
5281         info.text_len = TG3_FW_TEXT_LEN;
5282         info.text_data = &tg3FwText[0];
5283         info.rodata_base = TG3_FW_RODATA_ADDR;
5284         info.rodata_len = TG3_FW_RODATA_LEN;
5285         info.rodata_data = &tg3FwRodata[0];
5286         info.data_base = TG3_FW_DATA_ADDR;
5287         info.data_len = TG3_FW_DATA_LEN;
5288         info.data_data = NULL;
5289
5290         err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
5291                                     RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
5292                                     &info);
5293         if (err)
5294                 return err;
5295
5296         err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
5297                                     TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
5298                                     &info);
5299         if (err)
5300                 return err;
5301
5302         /* Now startup only the RX cpu. */
5303         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
5304         tw32_f(RX_CPU_BASE + CPU_PC,    TG3_FW_TEXT_ADDR);
5305
5306         for (i = 0; i < 5; i++) {
5307                 if (tr32(RX_CPU_BASE + CPU_PC) == TG3_FW_TEXT_ADDR)
5308                         break;
5309                 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
5310                 tw32(RX_CPU_BASE + CPU_MODE,  CPU_MODE_HALT);
5311                 tw32_f(RX_CPU_BASE + CPU_PC,    TG3_FW_TEXT_ADDR);
5312                 udelay(1000);
5313         }
5314         if (i >= 5) {
5315                 printk(KERN_ERR PFX "tg3_load_firmware fails for %s "
5316                        "to set RX CPU PC, is %08x should be %08x\n",
5317                        tp->dev->name, tr32(RX_CPU_BASE + CPU_PC),
5318                        TG3_FW_TEXT_ADDR);
5319                 return -ENODEV;
5320         }
5321         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
5322         tw32_f(RX_CPU_BASE + CPU_MODE,  0x00000000);
5323
5324         return 0;
5325 }
5326
5327 #if TG3_TSO_SUPPORT != 0
5328
5329 #define TG3_TSO_FW_RELEASE_MAJOR        0x1
5330 #define TG3_TSO_FW_RELASE_MINOR         0x6
5331 #define TG3_TSO_FW_RELEASE_FIX          0x0
5332 #define TG3_TSO_FW_START_ADDR           0x08000000
5333 #define TG3_TSO_FW_TEXT_ADDR            0x08000000
5334 #define TG3_TSO_FW_TEXT_LEN             0x1aa0
5335 #define TG3_TSO_FW_RODATA_ADDR          0x08001aa0
5336 #define TG3_TSO_FW_RODATA_LEN           0x60
5337 #define TG3_TSO_FW_DATA_ADDR            0x08001b20
5338 #define TG3_TSO_FW_DATA_LEN             0x30
5339 #define TG3_TSO_FW_SBSS_ADDR            0x08001b50
5340 #define TG3_TSO_FW_SBSS_LEN             0x2c
5341 #define TG3_TSO_FW_BSS_ADDR             0x08001b80
5342 #define TG3_TSO_FW_BSS_LEN              0x894
5343
5344 static const u32 tg3TsoFwText[(TG3_TSO_FW_TEXT_LEN / 4) + 1] = {
5345         0x0e000003, 0x00000000, 0x08001b24, 0x00000000, 0x10000003, 0x00000000,
5346         0x0000000d, 0x0000000d, 0x3c1d0800, 0x37bd4000, 0x03a0f021, 0x3c100800,
5347         0x26100000, 0x0e000010, 0x00000000, 0x0000000d, 0x27bdffe0, 0x3c04fefe,
5348         0xafbf0018, 0x0e0005d8, 0x34840002, 0x0e000668, 0x00000000, 0x3c030800,
5349         0x90631b68, 0x24020002, 0x3c040800, 0x24841aac, 0x14620003, 0x24050001,
5350         0x3c040800, 0x24841aa0, 0x24060006, 0x00003821, 0xafa00010, 0x0e00067c,
5351         0xafa00014, 0x8f625c50, 0x34420001, 0xaf625c50, 0x8f625c90, 0x34420001,
5352         0xaf625c90, 0x2402ffff, 0x0e000034, 0xaf625404, 0x8fbf0018, 0x03e00008,
5353         0x27bd0020, 0x00000000, 0x00000000, 0x00000000, 0x27bdffe0, 0xafbf001c,
5354         0xafb20018, 0xafb10014, 0x0e00005b, 0xafb00010, 0x24120002, 0x24110001,
5355         0x8f706820, 0x32020100, 0x10400003, 0x00000000, 0x0e0000bb, 0x00000000,
5356         0x8f706820, 0x32022000, 0x10400004, 0x32020001, 0x0e0001f0, 0x24040001,
5357         0x32020001, 0x10400003, 0x00000000, 0x0e0000a3, 0x00000000, 0x3c020800,
5358         0x90421b98, 0x14520003, 0x00000000, 0x0e0004c0, 0x00000000, 0x0a00003c,
5359         0xaf715028, 0x8fbf001c, 0x8fb20018, 0x8fb10014, 0x8fb00010, 0x03e00008,
5360         0x27bd0020, 0x27bdffe0, 0x3c040800, 0x24841ac0, 0x00002821, 0x00003021,
5361         0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014, 0x3c040800,
5362         0x248423d8, 0xa4800000, 0x3c010800, 0xa0201b98, 0x3c010800, 0xac201b9c,
5363         0x3c010800, 0xac201ba0, 0x3c010800, 0xac201ba4, 0x3c010800, 0xac201bac,
5364         0x3c010800, 0xac201bb8, 0x3c010800, 0xac201bbc, 0x8f624434, 0x3c010800,
5365         0xac221b88, 0x8f624438, 0x3c010800, 0xac221b8c, 0x8f624410, 0xac80f7a8,
5366         0x3c010800, 0xac201b84, 0x3c010800, 0xac2023e0, 0x3c010800, 0xac2023c8,
5367         0x3c010800, 0xac2023cc, 0x3c010800, 0xac202400, 0x3c010800, 0xac221b90,
5368         0x8f620068, 0x24030007, 0x00021702, 0x10430005, 0x00000000, 0x8f620068,
5369         0x00021702, 0x14400004, 0x24020001, 0x3c010800, 0x0a000097, 0xac20240c,
5370         0xac820034, 0x3c040800, 0x24841acc, 0x3c050800, 0x8ca5240c, 0x00003021,
5371         0x00003821, 0xafa00010, 0x0e00067c, 0xafa00014, 0x8fbf0018, 0x03e00008,
5372         0x27bd0020, 0x27bdffe0, 0x3c040800, 0x24841ad8, 0x00002821, 0x00003021,
5373         0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014, 0x0e00005b,
5374         0x00000000, 0x0e0000b4, 0x00002021, 0x8fbf0018, 0x03e00008, 0x27bd0020,
5375         0x24020001, 0x8f636820, 0x00821004, 0x00021027, 0x00621824, 0x03e00008,
5376         0xaf636820, 0x27bdffd0, 0xafbf002c, 0xafb60028, 0xafb50024, 0xafb40020,
5377         0xafb3001c, 0xafb20018, 0xafb10014, 0xafb00010, 0x8f675c5c, 0x3c030800,
5378         0x24631bbc, 0x8c620000, 0x14470005, 0x3c0200ff, 0x3c020800, 0x90421b98,
5379         0x14400119, 0x3c0200ff, 0x3442fff8, 0x00e28824, 0xac670000, 0x00111902,
5380         0x306300ff, 0x30e20003, 0x000211c0, 0x00622825, 0x00a04021, 0x00071602,
5381         0x3c030800, 0x90631b98, 0x3044000f, 0x14600036, 0x00804821, 0x24020001,
5382         0x3c010800, 0xa0221b98, 0x00051100, 0x00821025, 0x3c010800, 0xac201b9c,
5383         0x3c010800, 0xac201ba0, 0x3c010800, 0xac201ba4, 0x3c010800, 0xac201bac,
5384         0x3c010800, 0xac201bb8, 0x3c010800, 0xac201bb0, 0x3c010800, 0xac201bb4,
5385         0x3c010800, 0xa42223d8, 0x9622000c, 0x30437fff, 0x3c010800, 0xa4222410,
5386         0x30428000, 0x3c010800, 0xa4231bc6, 0x10400005, 0x24020001, 0x3c010800,
5387         0xac2223f4, 0x0a000102, 0x2406003e, 0x24060036, 0x3c010800, 0xac2023f4,
5388         0x9622000a, 0x3c030800, 0x94631bc6, 0x3c010800, 0xac2023f0, 0x3c010800,
5389         0xac2023f8, 0x00021302, 0x00021080, 0x00c21021, 0x00621821, 0x3c010800,
5390         0xa42223d0, 0x3c010800, 0x0a000115, 0xa4231b96, 0x9622000c, 0x3c010800,
5391         0xa42223ec, 0x3c040800, 0x24841b9c, 0x8c820000, 0x00021100, 0x3c010800,
5392         0x00220821, 0xac311bc8, 0x8c820000, 0x00021100, 0x3c010800, 0x00220821,
5393         0xac271bcc, 0x8c820000, 0x25030001, 0x306601ff, 0x00021100, 0x3c010800,
5394         0x00220821, 0xac261bd0, 0x8c820000, 0x00021100, 0x3c010800, 0x00220821,
5395         0xac291bd4, 0x96230008, 0x3c020800, 0x8c421bac, 0x00432821, 0x3c010800,
5396         0xac251bac, 0x9622000a, 0x30420004, 0x14400018, 0x00061100, 0x8f630c14,
5397         0x3063000f, 0x2c620002, 0x1440000b, 0x3c02c000, 0x8f630c14, 0x3c020800,
5398         0x8c421b40, 0x3063000f, 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002,
5399         0x1040fff7, 0x3c02c000, 0x00e21825, 0xaf635c5c, 0x8f625c50, 0x30420002,
5400         0x10400014, 0x00000000, 0x0a000147, 0x00000000, 0x3c030800, 0x8c631b80,
5401         0x3c040800, 0x94841b94, 0x01221025, 0x3c010800, 0xa42223da, 0x24020001,
5402         0x3c010800, 0xac221bb8, 0x24630001, 0x0085202a, 0x3c010800, 0x10800003,
5403         0xac231b80, 0x3c010800, 0xa4251b94, 0x3c060800, 0x24c61b9c, 0x8cc20000,
5404         0x24420001, 0xacc20000, 0x28420080, 0x14400005, 0x00000000, 0x0e000656,
5405         0x24040002, 0x0a0001e6, 0x00000000, 0x3c020800, 0x8c421bb8, 0x10400078,
5406         0x24020001, 0x3c050800, 0x90a51b98, 0x14a20072, 0x00000000, 0x3c150800,
5407         0x96b51b96, 0x3c040800, 0x8c841bac, 0x32a3ffff, 0x0083102a, 0x1440006c,
5408         0x00000000, 0x14830003, 0x00000000, 0x3c010800, 0xac2523f0, 0x1060005c,
5409         0x00009021, 0x24d60004, 0x0060a021, 0x24d30014, 0x8ec20000, 0x00028100,
5410         0x3c110800, 0x02308821, 0x0e000625, 0x8e311bc8, 0x00402821, 0x10a00054,
5411         0x00000000, 0x9628000a, 0x31020040, 0x10400005, 0x2407180c, 0x8e22000c,
5412         0x2407188c, 0x00021400, 0xaca20018, 0x3c030800, 0x00701821, 0x8c631bd0,
5413         0x3c020800, 0x00501021, 0x8c421bd4, 0x00031d00, 0x00021400, 0x00621825,
5414         0xaca30014, 0x8ec30004, 0x96220008, 0x00432023, 0x3242ffff, 0x3083ffff,
5415         0x00431021, 0x0282102a, 0x14400002, 0x02b23023, 0x00803021, 0x8e620000,
5416         0x30c4ffff, 0x00441021, 0xae620000, 0x8e220000, 0xaca20000, 0x8e220004,
5417         0x8e63fff4, 0x00431021, 0xaca20004, 0xa4a6000e, 0x8e62fff4, 0x00441021,
5418         0xae62fff4, 0x96230008, 0x0043102a, 0x14400005, 0x02469021, 0x8e62fff0,
5419         0xae60fff4, 0x24420001, 0xae62fff0, 0xaca00008, 0x3242ffff, 0x14540008,
5420         0x24020305, 0x31020080, 0x54400001, 0x34e70010, 0x24020905, 0xa4a2000c,
5421         0x0a0001cb, 0x34e70020, 0xa4a2000c, 0x3c020800, 0x8c4223f0, 0x10400003,
5422         0x3c024b65, 0x0a0001d3, 0x34427654, 0x3c02b49a, 0x344289ab, 0xaca2001c,
5423         0x30e2ffff, 0xaca20010, 0x0e0005a2, 0x00a02021, 0x3242ffff, 0x0054102b,
5424         0x1440ffa9, 0x00000000, 0x24020002, 0x3c010800, 0x0a0001e6, 0xa0221b98,
5425         0x8ec2083c, 0x24420001, 0x0a0001e6, 0xaec2083c, 0x0e0004c0, 0x00000000,
5426         0x8fbf002c, 0x8fb60028, 0x8fb50024, 0x8fb40020, 0x8fb3001c, 0x8fb20018,
5427         0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0030, 0x27bdffd0, 0xafbf0028,
5428         0xafb30024, 0xafb20020, 0xafb1001c, 0xafb00018, 0x8f725c9c, 0x3c0200ff,
5429         0x3442fff8, 0x3c070800, 0x24e71bb4, 0x02428824, 0x9623000e, 0x8ce20000,
5430         0x00431021, 0xace20000, 0x8e220010, 0x30420020, 0x14400011, 0x00809821,
5431         0x0e00063b, 0x02202021, 0x3c02c000, 0x02421825, 0xaf635c9c, 0x8f625c90,
5432         0x30420002, 0x1040011e, 0x00000000, 0xaf635c9c, 0x8f625c90, 0x30420002,
5433         0x10400119, 0x00000000, 0x0a00020d, 0x00000000, 0x8e240008, 0x8e230014,
5434         0x00041402, 0x000231c0, 0x00031502, 0x304201ff, 0x2442ffff, 0x3042007f,
5435         0x00031942, 0x30637800, 0x00021100, 0x24424000, 0x00624821, 0x9522000a,
5436         0x3084ffff, 0x30420008, 0x104000b0, 0x000429c0, 0x3c020800, 0x8c422400,
5437         0x14400024, 0x24c50008, 0x94c20014, 0x3c010800, 0xa42223d0, 0x8cc40010,
5438         0x00041402, 0x3c010800, 0xa42223d2, 0x3c010800, 0xa42423d4, 0x94c2000e,
5439         0x3083ffff, 0x00431023, 0x3c010800, 0xac222408, 0x94c2001a, 0x3c010800,
5440         0xac262400, 0x3c010800, 0xac322404, 0x3c010800, 0xac2223fc, 0x3c02c000,
5441         0x02421825, 0xaf635c9c, 0x8f625c90, 0x30420002, 0x104000e5, 0x00000000,
5442         0xaf635c9c, 0x8f625c90, 0x30420002, 0x104000e0, 0x00000000, 0x0a000246,
5443         0x00000000, 0x94c2000e, 0x3c030800, 0x946323d4, 0x00434023, 0x3103ffff,
5444         0x2c620008, 0x1040001c, 0x00000000, 0x94c20014, 0x24420028, 0x00a22821,
5445         0x00031042, 0x1840000b, 0x00002021, 0x24e60848, 0x00403821, 0x94a30000,
5446         0x8cc20000, 0x24840001, 0x00431021, 0xacc20000, 0x0087102a, 0x1440fff9,
5447         0x24a50002, 0x31020001, 0x1040001f, 0x3c024000, 0x3c040800, 0x248423fc,
5448         0xa0a00001, 0x94a30000, 0x8c820000, 0x00431021, 0x0a000285, 0xac820000,
5449         0x8f626800, 0x3c030010, 0x00431024, 0x10400009, 0x00000000, 0x94c2001a,
5450         0x3c030800, 0x8c6323fc, 0x00431021, 0x3c010800, 0xac2223fc, 0x0a000286,
5451         0x3c024000, 0x94c2001a, 0x94c4001c, 0x3c030800, 0x8c6323fc, 0x00441023,
5452         0x00621821, 0x3c010800, 0xac2323fc, 0x3c024000, 0x02421825, 0xaf635c9c,
5453         0x8f625c90, 0x30420002, 0x1440fffc, 0x00000000, 0x9522000a, 0x30420010,
5454         0x1040009b, 0x00000000, 0x3c030800, 0x946323d4, 0x3c070800, 0x24e72400,
5455         0x8ce40000, 0x8f626800, 0x24630030, 0x00832821, 0x3c030010, 0x00431024,
5456         0x1440000a, 0x00000000, 0x94a20004, 0x3c040800, 0x8c842408, 0x3c030800,
5457         0x8c6323fc, 0x00441023, 0x00621821, 0x3c010800, 0xac2323fc, 0x3c040800,
5458         0x8c8423fc, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402, 0x00822021,
5459         0x00041027, 0xa4a20006, 0x3c030800, 0x8c632404, 0x3c0200ff, 0x3442fff8,
5460         0x00628824, 0x96220008, 0x24050001, 0x24034000, 0x000231c0, 0x00801021,
5461         0xa4c2001a, 0xa4c0001c, 0xace00000, 0x3c010800, 0xac251b60, 0xaf635cb8,
5462         0x8f625cb0, 0x30420002, 0x10400003, 0x00000000, 0x3c010800, 0xac201b60,
5463         0x8e220008, 0xaf625cb8, 0x8f625cb0, 0x30420002, 0x10400003, 0x00000000,
5464         0x3c010800, 0xac201b60, 0x3c020800, 0x8c421b60, 0x1040ffec, 0x00000000,
5465         0x3c040800, 0x0e00063b, 0x8c842404, 0x0a00032a, 0x00000000, 0x3c030800,
5466         0x90631b98, 0x24020002, 0x14620003, 0x3c034b65, 0x0a0002e1, 0x00008021,
5467         0x8e22001c, 0x34637654, 0x10430002, 0x24100002, 0x24100001, 0x00c02021,
5468         0x0e000350, 0x02003021, 0x24020003, 0x3c010800, 0xa0221b98, 0x24020002,
5469         0x1202000a, 0x24020001, 0x3c030800, 0x8c6323f0, 0x10620006, 0x00000000,
5470         0x3c020800, 0x944223d8, 0x00021400, 0x0a00031f, 0xae220014, 0x3c040800,
5471         0x248423da, 0x94820000, 0x00021400, 0xae220014, 0x3c020800, 0x8c421bbc,
5472         0x3c03c000, 0x3c010800, 0xa0201b98, 0x00431025, 0xaf625c5c, 0x8f625c50,
5473         0x30420002, 0x10400009, 0x00000000, 0x2484f7e2, 0x8c820000, 0x00431025,
5474         0xaf625c5c, 0x8f625c50, 0x30420002, 0x1440fffa, 0x00000000, 0x3c020800,
5475         0x24421b84, 0x8c430000, 0x24630001, 0xac430000, 0x8f630c14, 0x3063000f,
5476         0x2c620002, 0x1440000c, 0x3c024000, 0x8f630c14, 0x3c020800, 0x8c421b40,
5477         0x3063000f, 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002, 0x1040fff7,
5478         0x00000000, 0x3c024000, 0x02421825, 0xaf635c9c, 0x8f625c90, 0x30420002,
5479         0x1440fffc, 0x00000000, 0x12600003, 0x00000000, 0x0e0004c0, 0x00000000,
5480         0x8fbf0028, 0x8fb30024, 0x8fb20020, 0x8fb1001c, 0x8fb00018, 0x03e00008,
5481         0x27bd0030, 0x8f634450, 0x3c040800, 0x24841b88, 0x8c820000, 0x00031c02,
5482         0x0043102b, 0x14400007, 0x3c038000, 0x8c840004, 0x8f624450, 0x00021c02,
5483         0x0083102b, 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024,
5484         0x1440fffd, 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3c024000,
5485         0x00822025, 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00000000,
5486         0x03e00008, 0x00000000, 0x27bdffe0, 0x00805821, 0x14c00011, 0x256e0008,
5487         0x3c020800, 0x8c4223f4, 0x10400007, 0x24020016, 0x3c010800, 0xa42223d2,
5488         0x2402002a, 0x3c010800, 0x0a000364, 0xa42223d4, 0x8d670010, 0x00071402,
5489         0x3c010800, 0xa42223d2, 0x3c010800, 0xa42723d4, 0x3c040800, 0x948423d4,
5490         0x3c030800, 0x946323d2, 0x95cf0006, 0x3c020800, 0x944223d0, 0x00832023,
5491         0x01e2c023, 0x3065ffff, 0x24a20028, 0x01c24821, 0x3082ffff, 0x14c0001a,
5492         0x01226021, 0x9582000c, 0x3042003f, 0x3c010800, 0xa42223d6, 0x95820004,
5493         0x95830006, 0x3c010800, 0xac2023e4, 0x3c010800, 0xac2023e8, 0x00021400,
5494         0x00431025, 0x3c010800, 0xac221bc0, 0x95220004, 0x3c010800, 0xa4221bc4,
5495         0x95230002, 0x01e51023, 0x0043102a, 0x10400010, 0x24020001, 0x3c010800,
5496         0x0a000398, 0xac2223f8, 0x3c030800, 0x8c6323e8, 0x3c020800, 0x94421bc4,
5497         0x00431021, 0xa5220004, 0x3c020800, 0x94421bc0, 0xa5820004, 0x3c020800,
5498         0x8c421bc0, 0xa5820006, 0x3c020800, 0x8c4223f0, 0x3c0d0800, 0x8dad23e4,
5499         0x3c0a0800, 0x144000e5, 0x8d4a23e8, 0x3c020800, 0x94421bc4, 0x004a1821,
5500         0x3063ffff, 0x0062182b, 0x24020002, 0x10c2000d, 0x01435023, 0x3c020800,
5501         0x944223d6, 0x30420009, 0x10400008, 0x00000000, 0x9582000c, 0x3042fff6,
5502         0xa582000c, 0x3c020800, 0x944223d6, 0x30420009, 0x01a26823, 0x3c020800,
5503         0x8c4223f8, 0x1040004a, 0x01203821, 0x3c020800, 0x944223d2, 0x00004021,
5504         0xa520000a, 0x01e21023, 0xa5220002, 0x3082ffff, 0x00021042, 0x18400008,
5505         0x00003021, 0x00401821, 0x94e20000, 0x25080001, 0x00c23021, 0x0103102a,
5506         0x1440fffb, 0x24e70002, 0x00061c02, 0x30c2ffff, 0x00623021, 0x00061402,
5507         0x00c23021, 0x00c02821, 0x00061027, 0xa522000a, 0x00003021, 0x2527000c,
5508         0x00004021, 0x94e20000, 0x25080001, 0x00c23021, 0x2d020004, 0x1440fffb,
5509         0x24e70002, 0x95220002, 0x00004021, 0x91230009, 0x00442023, 0x01803821,
5510         0x3082ffff, 0xa4e00010, 0x00621821, 0x00021042, 0x18400010, 0x00c33021,
5511         0x00404821, 0x94e20000, 0x24e70002, 0x00c23021, 0x30e2007f, 0x14400006,
5512         0x25080001, 0x8d630000, 0x3c02007f, 0x3442ff80, 0x00625824, 0x25670008,
5513         0x0109102a, 0x1440fff3, 0x00000000, 0x30820001, 0x10400005, 0x00061c02,
5514         0xa0e00001, 0x94e20000, 0x00c23021, 0x00061c02, 0x30c2ffff, 0x00623021,
5515         0x00061402, 0x00c23021, 0x0a00047d, 0x30c6ffff, 0x24020002, 0x14c20081,
5516         0x00000000, 0x3c020800, 0x8c42240c, 0x14400007, 0x00000000, 0x3c020800,
5517         0x944223d2, 0x95230002, 0x01e21023, 0x10620077, 0x00000000, 0x3c020800,
5518         0x944223d2, 0x01e21023, 0xa5220002, 0x3c020800, 0x8c42240c, 0x1040001a,
5519         0x31e3ffff, 0x8dc70010, 0x3c020800, 0x94421b96, 0x00e04021, 0x00072c02,
5520         0x00aa2021, 0x00431023, 0x00823823, 0x00072402, 0x30e2ffff, 0x00823821,
5521         0x00071027, 0xa522000a, 0x3102ffff, 0x3c040800, 0x948423d4, 0x00453023,
5522         0x00e02821, 0x00641823, 0x006d1821, 0x00c33021, 0x00061c02, 0x30c2ffff,
5523         0x0a00047d, 0x00623021, 0x01203821, 0x00004021, 0x3082ffff, 0x00021042,
5524         0x18400008, 0x00003021, 0x00401821, 0x94e20000, 0x25080001, 0x00c23021,
5525         0x0103102a, 0x1440fffb, 0x24e70002, 0x00061c02, 0x30c2ffff, 0x00623021,
5526         0x00061402, 0x00c23021, 0x00c02821, 0x00061027, 0xa522000a, 0x00003021,
5527         0x2527000c, 0x00004021, 0x94e20000, 0x25080001, 0x00c23021, 0x2d020004,
5528         0x1440fffb, 0x24e70002, 0x95220002, 0x00004021, 0x91230009, 0x00442023,
5529         0x01803821, 0x3082ffff, 0xa4e00010, 0x3c040800, 0x948423d4, 0x00621821,
5530         0x00c33021, 0x00061c02, 0x30c2ffff, 0x00623021, 0x00061c02, 0x3c020800,
5531         0x944223d0, 0x00c34821, 0x00441023, 0x00021fc2, 0x00431021, 0x00021043,
5532         0x18400010, 0x00003021, 0x00402021, 0x94e20000, 0x24e70002, 0x00c23021,
5533         0x30e2007f, 0x14400006, 0x25080001, 0x8d630000, 0x3c02007f, 0x3442ff80,
5534         0x00625824, 0x25670008, 0x0104102a, 0x1440fff3, 0x00000000, 0x3c020800,
5535         0x944223ec, 0x00c23021, 0x3122ffff, 0x00c23021, 0x00061c02, 0x30c2ffff,
5536         0x00623021, 0x00061402, 0x00c23021, 0x00c04021, 0x00061027, 0xa5820010,
5537         0xadc00014, 0x0a00049d, 0xadc00000, 0x8dc70010, 0x00e04021, 0x11400007,
5538         0x00072c02, 0x00aa3021, 0x00061402, 0x30c3ffff, 0x00433021, 0x00061402,
5539         0x00c22821, 0x00051027, 0xa522000a, 0x3c030800, 0x946323d4, 0x3102ffff,
5540         0x01e21021, 0x00433023, 0x00cd3021, 0x00061c02, 0x30c2ffff, 0x00623021,
5541         0x00061402, 0x00c23021, 0x00c04021, 0x00061027, 0xa5820010, 0x3102ffff,
5542         0x00051c00, 0x00431025, 0xadc20010, 0x3c020800, 0x8c4223f4, 0x10400005,
5543         0x2de205eb, 0x14400002, 0x25e2fff2, 0x34028870, 0xa5c20034, 0x3c030800,
5544         0x246323e8, 0x8c620000, 0x24420001, 0xac620000, 0x3c040800, 0x8c8423e4,
5545         0x3c020800, 0x8c421bc0, 0x3303ffff, 0x00832021, 0x00431821, 0x0062102b,
5546         0x3c010800, 0xac2423e4, 0x10400003, 0x2482ffff, 0x3c010800, 0xac2223e4,
5547         0x3c010800, 0xac231bc0, 0x03e00008, 0x27bd0020, 0x27bdffb8, 0x3c050800,
5548         0x24a51b96, 0xafbf0044, 0xafbe0040, 0xafb7003c, 0xafb60038, 0xafb50034,
5549         0xafb40030, 0xafb3002c, 0xafb20028, 0xafb10024, 0xafb00020, 0x94a90000,
5550         0x3c020800, 0x944223d0, 0x3c030800, 0x8c631bb0, 0x3c040800, 0x8c841bac,
5551         0x01221023, 0x0064182a, 0xa7a9001e, 0x106000be, 0xa7a20016, 0x24be0022,
5552         0x97b6001e, 0x24b3001a, 0x24b70016, 0x8fc20000, 0x14400008, 0x00000000,
5553         0x8fc2fff8, 0x97a30016, 0x8fc4fff4, 0x00431021, 0x0082202a, 0x148000b0,
5554         0x00000000, 0x97d50818, 0x32a2ffff, 0x104000a3, 0x00009021, 0x0040a021,
5555         0x00008821, 0x0e000625, 0x00000000, 0x00403021, 0x14c00007, 0x00000000,
5556         0x3c020800, 0x8c4223dc, 0x24420001, 0x3c010800, 0x0a000596, 0xac2223dc,
5557         0x3c100800, 0x02118021, 0x8e101bc8, 0x9608000a, 0x31020040, 0x10400005,
5558         0x2407180c, 0x8e02000c, 0x2407188c, 0x00021400, 0xacc20018, 0x31020080,
5559         0x54400001, 0x34e70010, 0x3c020800, 0x00511021, 0x8c421bd0, 0x3c030800,
5560         0x00711821, 0x8c631bd4, 0x00021500, 0x00031c00, 0x00431025, 0xacc20014,
5561         0x96040008, 0x3242ffff, 0x00821021, 0x0282102a, 0x14400002, 0x02b22823,
5562         0x00802821, 0x8e020000, 0x02459021, 0xacc20000, 0x8e020004, 0x00c02021,
5563         0x26310010, 0xac820004, 0x30e2ffff, 0xac800008, 0xa485000e, 0xac820010,
5564         0x24020305, 0x0e0005a2, 0xa482000c, 0x3242ffff, 0x0054102b, 0x1440ffc5,
5565         0x3242ffff, 0x0a00058e, 0x00000000, 0x8e620000, 0x8e63fffc, 0x0043102a,
5566         0x10400067, 0x00000000, 0x8e62fff0, 0x00028900, 0x3c100800, 0x02118021,
5567         0x0e000625, 0x8e101bc8, 0x00403021, 0x14c00005, 0x00000000, 0x8e62082c,
5568         0x24420001, 0x0a000596, 0xae62082c, 0x9608000a, 0x31020040, 0x10400005,
5569         0x2407180c, 0x8e02000c, 0x2407188c, 0x00021400, 0xacc20018, 0x3c020800,
5570         0x00511021, 0x8c421bd0, 0x3c030800, 0x00711821, 0x8c631bd4, 0x00021500,
5571         0x00031c00, 0x00431025, 0xacc20014, 0x8e63fff4, 0x96020008, 0x00432023,
5572         0x3242ffff, 0x3083ffff, 0x00431021, 0x02c2102a, 0x10400003, 0x00802821,
5573         0x97a9001e, 0x01322823, 0x8e620000, 0x30a4ffff, 0x00441021, 0xae620000,
5574         0xa4c5000e, 0x8e020000, 0xacc20000, 0x8e020004, 0x8e63fff4, 0x00431021,
5575         0xacc20004, 0x8e63fff4, 0x96020008, 0x00641821, 0x0062102a, 0x14400006,
5576         0x02459021, 0x8e62fff0, 0xae60fff4, 0x24420001, 0x0a000571, 0xae62fff0,
5577         0xae63fff4, 0xacc00008, 0x3242ffff, 0x10560003, 0x31020004, 0x10400006,
5578         0x24020305, 0x31020080, 0x54400001, 0x34e70010, 0x34e70020, 0x24020905,
5579         0xa4c2000c, 0x8ee30000, 0x8ee20004, 0x14620007, 0x3c02b49a, 0x8ee20860,
5580         0x54400001, 0x34e70400, 0x3c024b65, 0x0a000588, 0x34427654, 0x344289ab,
5581         0xacc2001c, 0x30e2ffff, 0xacc20010, 0x0e0005a2, 0x00c02021, 0x3242ffff,
5582         0x0056102b, 0x1440ff9b, 0x00000000, 0x8e620000, 0x8e63fffc, 0x0043102a,
5583         0x1440ff48, 0x00000000, 0x8fbf0044, 0x8fbe0040, 0x8fb7003c, 0x8fb60038,
5584         0x8fb50034, 0x8fb40030, 0x8fb3002c, 0x8fb20028, 0x8fb10024, 0x8fb00020,
5585         0x03e00008, 0x27bd0048, 0x27bdffe8, 0xafbf0014, 0xafb00010, 0x8f624450,
5586         0x8f634410, 0x0a0005b1, 0x00808021, 0x8f626820, 0x30422000, 0x10400003,
5587         0x00000000, 0x0e0001f0, 0x00002021, 0x8f624450, 0x8f634410, 0x3042ffff,
5588         0x0043102b, 0x1440fff5, 0x00000000, 0x8f630c14, 0x3063000f, 0x2c620002,
5589         0x1440000b, 0x00000000, 0x8f630c14, 0x3c020800, 0x8c421b40, 0x3063000f,
5590         0x24420001, 0x3c010800, 0xac221b40, 0x2c620002, 0x1040fff7, 0x00000000,
5591         0xaf705c18, 0x8f625c10, 0x30420002, 0x10400009, 0x00000000, 0x8f626820,
5592         0x30422000, 0x1040fff8, 0x00000000, 0x0e0001f0, 0x00002021, 0x0a0005c4,
5593         0x00000000, 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x00000000,
5594         0x00000000, 0x00000000, 0x27bdffe8, 0x3c1bc000, 0xafbf0014, 0xafb00010,
5595         0xaf60680c, 0x8f626804, 0x34420082, 0xaf626804, 0x8f634000, 0x24020b50,
5596         0x3c010800, 0xac221b54, 0x24020b78, 0x3c010800, 0xac221b64, 0x34630002,
5597         0xaf634000, 0x0e000605, 0x00808021, 0x3c010800, 0xa0221b68, 0x304200ff,
5598         0x24030002, 0x14430005, 0x00000000, 0x3c020800, 0x8c421b54, 0x0a0005f8,
5599         0xac5000c0, 0x3c020800, 0x8c421b54, 0xac5000bc, 0x8f624434, 0x8f634438,
5600         0x8f644410, 0x3c010800, 0xac221b5c, 0x3c010800, 0xac231b6c, 0x3c010800,
5601         0xac241b58, 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x3c040800,
5602         0x8c870000, 0x3c03aa55, 0x3463aa55, 0x3c06c003, 0xac830000, 0x8cc20000,
5603         0x14430007, 0x24050002, 0x3c0355aa, 0x346355aa, 0xac830000, 0x8cc20000,
5604         0x50430001, 0x24050001, 0x3c020800, 0xac470000, 0x03e00008, 0x00a01021,
5605         0x27bdfff8, 0x18800009, 0x00002821, 0x8f63680c, 0x8f62680c, 0x1043fffe,
5606         0x00000000, 0x24a50001, 0x00a4102a, 0x1440fff9, 0x00000000, 0x03e00008,
5607         0x27bd0008, 0x8f634450, 0x3c020800, 0x8c421b5c, 0x00031c02, 0x0043102b,
5608         0x14400008, 0x3c038000, 0x3c040800, 0x8c841b6c, 0x8f624450, 0x00021c02,
5609         0x0083102b, 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024,
5610         0x1440fffd, 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3082ffff,
5611         0x2442e000, 0x2c422001, 0x14400003, 0x3c024000, 0x0a000648, 0x2402ffff,
5612         0x00822025, 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00001021,
5613         0x03e00008, 0x00000000, 0x8f624450, 0x3c030800, 0x8c631b58, 0x0a000651,
5614         0x3042ffff, 0x8f624450, 0x3042ffff, 0x0043102b, 0x1440fffc, 0x00000000,
5615         0x03e00008, 0x00000000, 0x27bdffe0, 0x00802821, 0x3c040800, 0x24841af0,
5616         0x00003021, 0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014,
5617         0x0a000660, 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x00000000,
5618         0x00000000, 0x00000000, 0x3c020800, 0x34423000, 0x3c030800, 0x34633000,
5619         0x3c040800, 0x348437ff, 0x3c010800, 0xac221b74, 0x24020040, 0x3c010800,
5620         0xac221b78, 0x3c010800, 0xac201b70, 0xac600000, 0x24630004, 0x0083102b,
5621         0x5040fffd, 0xac600000, 0x03e00008, 0x00000000, 0x00804821, 0x8faa0010,
5622         0x3c020800, 0x8c421b70, 0x3c040800, 0x8c841b78, 0x8fab0014, 0x24430001,
5623         0x0044102b, 0x3c010800, 0xac231b70, 0x14400003, 0x00004021, 0x3c010800,
5624         0xac201b70, 0x3c020800, 0x8c421b70, 0x3c030800, 0x8c631b74, 0x91240000,
5625         0x00021140, 0x00431021, 0x00481021, 0x25080001, 0xa0440000, 0x29020008,
5626         0x1440fff4, 0x25290001, 0x3c020800, 0x8c421b70, 0x3c030800, 0x8c631b74,
5627         0x8f64680c, 0x00021140, 0x00431021, 0xac440008, 0xac45000c, 0xac460010,
5628         0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c, 0x00000000, 0x00000000,
5629 };
5630
5631 static const u32 tg3TsoFwRodata[] = {
5632         0x4d61696e, 0x43707542, 0x00000000, 0x4d61696e, 0x43707541, 0x00000000,
5633         0x00000000, 0x00000000, 0x73746b6f, 0x66666c64, 0x496e0000, 0x73746b6f,
5634         0x66662a2a, 0x00000000, 0x53774576, 0x656e7430, 0x00000000, 0x00000000,
5635         0x00000000, 0x00000000, 0x66617461, 0x6c457272, 0x00000000, 0x00000000,
5636         0x00000000,
5637 };
5638
5639 static const u32 tg3TsoFwData[] = {
5640         0x00000000, 0x73746b6f, 0x66666c64, 0x5f76312e, 0x362e3000, 0x00000000,
5641         0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
5642         0x00000000,
5643 };
5644
5645 /* 5705 needs a special version of the TSO firmware.  */
5646 #define TG3_TSO5_FW_RELEASE_MAJOR       0x1
5647 #define TG3_TSO5_FW_RELASE_MINOR        0x2
5648 #define TG3_TSO5_FW_RELEASE_FIX         0x0
5649 #define TG3_TSO5_FW_START_ADDR          0x00010000
5650 #define TG3_TSO5_FW_TEXT_ADDR           0x00010000
5651 #define TG3_TSO5_FW_TEXT_LEN            0xe90
5652 #define TG3_TSO5_FW_RODATA_ADDR         0x00010e90
5653 #define TG3_TSO5_FW_RODATA_LEN          0x50
5654 #define TG3_TSO5_FW_DATA_ADDR           0x00010f00
5655 #define TG3_TSO5_FW_DATA_LEN            0x20
5656 #define TG3_TSO5_FW_SBSS_ADDR           0x00010f20
5657 #define TG3_TSO5_FW_SBSS_LEN            0x28
5658 #define TG3_TSO5_FW_BSS_ADDR            0x00010f50
5659 #define TG3_TSO5_FW_BSS_LEN             0x88
5660
5661 static const u32 tg3Tso5FwText[(TG3_TSO5_FW_TEXT_LEN / 4) + 1] = {
5662         0x0c004003, 0x00000000, 0x00010f04, 0x00000000, 0x10000003, 0x00000000,
5663         0x0000000d, 0x0000000d, 0x3c1d0001, 0x37bde000, 0x03a0f021, 0x3c100001,
5664         0x26100000, 0x0c004010, 0x00000000, 0x0000000d, 0x27bdffe0, 0x3c04fefe,
5665         0xafbf0018, 0x0c0042e8, 0x34840002, 0x0c004364, 0x00000000, 0x3c030001,
5666         0x90630f34, 0x24020002, 0x3c040001, 0x24840e9c, 0x14620003, 0x24050001,
5667         0x3c040001, 0x24840e90, 0x24060002, 0x00003821, 0xafa00010, 0x0c004378,
5668         0xafa00014, 0x0c00402c, 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020,
5669         0x00000000, 0x00000000, 0x27bdffe0, 0xafbf001c, 0xafb20018, 0xafb10014,
5670         0x0c0042d4, 0xafb00010, 0x3c128000, 0x24110001, 0x8f706810, 0x32020400,
5671         0x10400007, 0x00000000, 0x8f641008, 0x00921024, 0x14400003, 0x00000000,
5672         0x0c004064, 0x00000000, 0x3c020001, 0x90420f56, 0x10510003, 0x32020200,
5673         0x1040fff1, 0x00000000, 0x0c0041b4, 0x00000000, 0x08004034, 0x00000000,
5674         0x8fbf001c, 0x8fb20018, 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0020,
5675         0x27bdffe0, 0x3c040001, 0x24840eb0, 0x00002821, 0x00003021, 0x00003821,
5676         0xafbf0018, 0xafa00010, 0x0c004378, 0xafa00014, 0x0000d021, 0x24020130,
5677         0xaf625000, 0x3c010001, 0xa4200f50, 0x3c010001, 0xa0200f57, 0x8fbf0018,
5678         0x03e00008, 0x27bd0020, 0x00000000, 0x00000000, 0x3c030001, 0x24630f60,
5679         0x90620000, 0x27bdfff0, 0x14400003, 0x0080c021, 0x08004073, 0x00004821,
5680         0x3c022000, 0x03021024, 0x10400003, 0x24090002, 0x08004073, 0xa0600000,
5681         0x24090001, 0x00181040, 0x30431f80, 0x346f8008, 0x1520004b, 0x25eb0028,
5682         0x3c040001, 0x00832021, 0x8c848010, 0x3c050001, 0x24a50f7a, 0x00041402,
5683         0xa0a20000, 0x3c010001, 0xa0240f7b, 0x3c020001, 0x00431021, 0x94428014,
5684         0x3c010001, 0xa0220f7c, 0x3c0c0001, 0x01836021, 0x8d8c8018, 0x304200ff,
5685         0x24420008, 0x000220c3, 0x24020001, 0x3c010001, 0xa0220f60, 0x0124102b,
5686         0x1040000c, 0x00003821, 0x24a6000e, 0x01602821, 0x8ca20000, 0x8ca30004,
5687         0x24a50008, 0x24e70001, 0xacc20000, 0xacc30004, 0x00e4102b, 0x1440fff8,
5688         0x24c60008, 0x00003821, 0x3c080001, 0x25080f7b, 0x91060000, 0x3c020001,
5689         0x90420f7c, 0x2503000d, 0x00c32821, 0x00461023, 0x00021fc2, 0x00431021,
5690         0x00021043, 0x1840000c, 0x00002021, 0x91020001, 0x00461023, 0x00021fc2,
5691         0x00431021, 0x00021843, 0x94a20000, 0x24e70001, 0x00822021, 0x00e3102a,
5692         0x1440fffb, 0x24a50002, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402,
5693         0x00822021, 0x3c02ffff, 0x01821024, 0x3083ffff, 0x00431025, 0x3c010001,
5694         0x080040fa, 0xac220f80, 0x3c050001, 0x24a50f7c, 0x90a20000, 0x3c0c0001,
5695         0x01836021, 0x8d8c8018, 0x000220c2, 0x1080000e, 0x00003821, 0x01603021,
5696         0x24a5000c, 0x8ca20000, 0x8ca30004, 0x24a50008, 0x24e70001, 0xacc20000,
5697         0xacc30004, 0x00e4102b, 0x1440fff8, 0x24c60008, 0x3c050001, 0x24a50f7c,
5698         0x90a20000, 0x30430007, 0x24020004, 0x10620011, 0x28620005, 0x10400005,
5699         0x24020002, 0x10620008, 0x000710c0, 0x080040fa, 0x00000000, 0x24020006,
5700         0x1062000e, 0x000710c0, 0x080040fa, 0x00000000, 0x00a21821, 0x9463000c,
5701         0x004b1021, 0x080040fa, 0xa4430000, 0x000710c0, 0x00a21821, 0x8c63000c,
5702         0x004b1021, 0x080040fa, 0xac430000, 0x00a21821, 0x8c63000c, 0x004b2021,
5703         0x00a21021, 0xac830000, 0x94420010, 0xa4820004, 0x95e70006, 0x3c020001,
5704         0x90420f7c, 0x3c030001, 0x90630f7a, 0x00e2c823, 0x3c020001, 0x90420f7b,
5705         0x24630028, 0x01e34021, 0x24420028, 0x15200012, 0x01e23021, 0x94c2000c,
5706         0x3c010001, 0xa4220f78, 0x94c20004, 0x94c30006, 0x3c010001, 0xa4200f76,
5707         0x3c010001, 0xa4200f72, 0x00021400, 0x00431025, 0x3c010001, 0xac220f6c,
5708         0x95020004, 0x3c010001, 0x08004124, 0xa4220f70, 0x3c020001, 0x94420f70,
5709         0x3c030001, 0x94630f72, 0x00431021, 0xa5020004, 0x3c020001, 0x94420f6c,
5710         0xa4c20004, 0x3c020001, 0x8c420f6c, 0xa4c20006, 0x3c040001, 0x94840f72,
5711         0x3c020001, 0x94420f70, 0x3c0a0001, 0x954a0f76, 0x00441821, 0x3063ffff,
5712         0x0062182a, 0x24020002, 0x1122000b, 0x00832023, 0x3c030001, 0x94630f78,
5713         0x30620009, 0x10400006, 0x3062fff6, 0xa4c2000c, 0x3c020001, 0x94420f78,
5714         0x30420009, 0x01425023, 0x24020001, 0x1122001b, 0x29220002, 0x50400005,
5715         0x24020002, 0x11200007, 0x31a2ffff, 0x08004197, 0x00000000, 0x1122001d,
5716         0x24020016, 0x08004197, 0x31a2ffff, 0x3c0e0001, 0x95ce0f80, 0x10800005,
5717         0x01806821, 0x01c42021, 0x00041c02, 0x3082ffff, 0x00627021, 0x000e1027,
5718         0xa502000a, 0x3c030001, 0x90630f7b, 0x31a2ffff, 0x00e21021, 0x0800418d,
5719         0x00432023, 0x3c020001, 0x94420f80, 0x00442021, 0x00041c02, 0x3082ffff,
5720         0x00622021, 0x00807021, 0x00041027, 0x08004185, 0xa502000a, 0x3c050001,
5721         0x24a50f7a, 0x90a30000, 0x14620002, 0x24e2fff2, 0xa5e20034, 0x90a20000,
5722         0x00e21023, 0xa5020002, 0x3c030001, 0x94630f80, 0x3c020001, 0x94420f5a,
5723         0x30e5ffff, 0x00641821, 0x00451023, 0x00622023, 0x00041c02, 0x3082ffff,
5724         0x00622021, 0x00041027, 0xa502000a, 0x3c030001, 0x90630f7c, 0x24620001,
5725         0x14a20005, 0x00807021, 0x01631021, 0x90420000, 0x08004185, 0x00026200,
5726         0x24620002, 0x14a20003, 0x306200fe, 0x004b1021, 0x944c0000, 0x3c020001,
5727         0x94420f82, 0x3183ffff, 0x3c040001, 0x90840f7b, 0x00431021, 0x00e21021,
5728         0x00442023, 0x008a2021, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402,
5729         0x00822021, 0x00806821, 0x00041027, 0xa4c20010, 0x31a2ffff, 0x000e1c00,
5730         0x00431025, 0x3c040001, 0x24840f72, 0xade20010, 0x94820000, 0x3c050001,
5731         0x94a50f76, 0x3c030001, 0x8c630f6c, 0x24420001, 0x00b92821, 0xa4820000,
5732         0x3322ffff, 0x00622021, 0x0083182b, 0x3c010001, 0xa4250f76, 0x10600003,
5733         0x24a2ffff, 0x3c010001, 0xa4220f76, 0x3c024000, 0x03021025, 0x3c010001,
5734         0xac240f6c, 0xaf621008, 0x03e00008, 0x27bd0010, 0x3c030001, 0x90630f56,
5735         0x27bdffe8, 0x24020001, 0xafbf0014, 0x10620026, 0xafb00010, 0x8f620cf4,
5736         0x2442ffff, 0x3042007f, 0x00021100, 0x8c434000, 0x3c010001, 0xac230f64,
5737         0x8c434008, 0x24444000, 0x8c5c4004, 0x30620040, 0x14400002, 0x24020088,
5738         0x24020008, 0x3c010001, 0xa4220f68, 0x30620004, 0x10400005, 0x24020001,
5739         0x3c010001, 0xa0220f57, 0x080041d5, 0x00031402, 0x3c010001, 0xa0200f57,
5740         0x00031402, 0x3c010001, 0xa4220f54, 0x9483000c, 0x24020001, 0x3c010001,
5741         0xa4200f50, 0x3c010001, 0xa0220f56, 0x3c010001, 0xa4230f62, 0x24020001,
5742         0x1342001e, 0x00000000, 0x13400005, 0x24020003, 0x13420067, 0x00000000,
5743         0x080042cf, 0x00000000, 0x3c020001, 0x94420f62, 0x241a0001, 0x3c010001,
5744         0xa4200f5e, 0x3c010001, 0xa4200f52, 0x304407ff, 0x00021bc2, 0x00031823,
5745         0x3063003e, 0x34630036, 0x00021242, 0x3042003c, 0x00621821, 0x3c010001,
5746         0xa4240f58, 0x00832021, 0x24630030, 0x3c010001, 0xa4240f5a, 0x3c010001,
5747         0xa4230f5c, 0x3c060001, 0x24c60f52, 0x94c50000, 0x94c30002, 0x3c040001,
5748         0x94840f5a, 0x00651021, 0x0044102a, 0x10400013, 0x3c108000, 0x00a31021,
5749         0xa4c20000, 0x3c02a000, 0xaf620cf4, 0x3c010001, 0xa0200f56, 0x8f641008,
5750         0x00901024, 0x14400003, 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4,
5751         0x00501024, 0x104000b7, 0x00000000, 0x0800420f, 0x00000000, 0x3c030001,
5752         0x94630f50, 0x00851023, 0xa4c40000, 0x00621821, 0x3042ffff, 0x3c010001,
5753         0xa4230f50, 0xaf620ce8, 0x3c020001, 0x94420f68, 0x34420024, 0xaf620cec,
5754         0x94c30002, 0x3c020001, 0x94420f50, 0x14620012, 0x3c028000, 0x3c108000,
5755         0x3c02a000, 0xaf620cf4, 0x3c010001, 0xa0200f56, 0x8f641008, 0x00901024,
5756         0x14400003, 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4, 0x00501024,
5757         0x1440fff7, 0x00000000, 0x080042cf, 0x241a0003, 0xaf620cf4, 0x3c108000,
5758         0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064, 0x00000000,
5759         0x8f620cf4, 0x00501024, 0x1440fff7, 0x00000000, 0x080042cf, 0x241a0003,
5760         0x3c070001, 0x24e70f50, 0x94e20000, 0x03821021, 0xaf620ce0, 0x3c020001,
5761         0x8c420f64, 0xaf620ce4, 0x3c050001, 0x94a50f54, 0x94e30000, 0x3c040001,
5762         0x94840f58, 0x3c020001, 0x94420f5e, 0x00a32823, 0x00822023, 0x30a6ffff,
5763         0x3083ffff, 0x00c3102b, 0x14400043, 0x00000000, 0x3c020001, 0x94420f5c,
5764         0x00021400, 0x00621025, 0xaf620ce8, 0x94e20000, 0x3c030001, 0x94630f54,
5765         0x00441021, 0xa4e20000, 0x3042ffff, 0x14430021, 0x3c020008, 0x3c020001,
5766         0x90420f57, 0x10400006, 0x3c03000c, 0x3c020001, 0x94420f68, 0x34630624,
5767         0x0800427c, 0x0000d021, 0x3c020001, 0x94420f68, 0x3c030008, 0x34630624,
5768         0x00431025, 0xaf620cec, 0x3c108000, 0x3c02a000, 0xaf620cf4, 0x3c010001,
5769         0xa0200f56, 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064,
5770         0x00000000, 0x8f620cf4, 0x00501024, 0x10400015, 0x00000000, 0x08004283,
5771         0x00000000, 0x3c030001, 0x94630f68, 0x34420624, 0x3c108000, 0x00621825,
5772         0x3c028000, 0xaf630cec, 0xaf620cf4, 0x8f641008, 0x00901024, 0x14400003,
5773         0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4, 0x00501024, 0x1440fff7,
5774         0x00000000, 0x3c010001, 0x080042cf, 0xa4200f5e, 0x3c020001, 0x94420f5c,
5775         0x00021400, 0x00c21025, 0xaf620ce8, 0x3c020001, 0x90420f57, 0x10400009,
5776         0x3c03000c, 0x3c020001, 0x94420f68, 0x34630624, 0x0000d021, 0x00431025,
5777         0xaf620cec, 0x080042c1, 0x3c108000, 0x3c020001, 0x94420f68, 0x3c030008,
5778         0x34630604, 0x00431025, 0xaf620cec, 0x3c020001, 0x94420f5e, 0x00451021,
5779         0x3c010001, 0xa4220f5e, 0x3c108000, 0x3c02a000, 0xaf620cf4, 0x3c010001,
5780         0xa0200f56, 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064,
5781         0x00000000, 0x8f620cf4, 0x00501024, 0x1440fff7, 0x00000000, 0x8fbf0014,
5782         0x8fb00010, 0x03e00008, 0x27bd0018, 0x00000000, 0x27bdffe0, 0x3c040001,
5783         0x24840ec0, 0x00002821, 0x00003021, 0x00003821, 0xafbf0018, 0xafa00010,
5784         0x0c004378, 0xafa00014, 0x0000d021, 0x24020130, 0xaf625000, 0x3c010001,
5785         0xa4200f50, 0x3c010001, 0xa0200f57, 0x8fbf0018, 0x03e00008, 0x27bd0020,
5786         0x27bdffe8, 0x3c1bc000, 0xafbf0014, 0xafb00010, 0xaf60680c, 0x8f626804,
5787         0x34420082, 0xaf626804, 0x8f634000, 0x24020b50, 0x3c010001, 0xac220f20,
5788         0x24020b78, 0x3c010001, 0xac220f30, 0x34630002, 0xaf634000, 0x0c004315,
5789         0x00808021, 0x3c010001, 0xa0220f34, 0x304200ff, 0x24030002, 0x14430005,
5790         0x00000000, 0x3c020001, 0x8c420f20, 0x08004308, 0xac5000c0, 0x3c020001,
5791         0x8c420f20, 0xac5000bc, 0x8f624434, 0x8f634438, 0x8f644410, 0x3c010001,
5792         0xac220f28, 0x3c010001, 0xac230f38, 0x3c010001, 0xac240f24, 0x8fbf0014,
5793         0x8fb00010, 0x03e00008, 0x27bd0018, 0x03e00008, 0x24020001, 0x27bdfff8,
5794         0x18800009, 0x00002821, 0x8f63680c, 0x8f62680c, 0x1043fffe, 0x00000000,
5795         0x24a50001, 0x00a4102a, 0x1440fff9, 0x00000000, 0x03e00008, 0x27bd0008,
5796         0x8f634450, 0x3c020001, 0x8c420f28, 0x00031c02, 0x0043102b, 0x14400008,
5797         0x3c038000, 0x3c040001, 0x8c840f38, 0x8f624450, 0x00021c02, 0x0083102b,
5798         0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024, 0x1440fffd,
5799         0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3082ffff, 0x2442e000,
5800         0x2c422001, 0x14400003, 0x3c024000, 0x08004347, 0x2402ffff, 0x00822025,
5801         0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00001021, 0x03e00008,
5802         0x00000000, 0x8f624450, 0x3c030001, 0x8c630f24, 0x08004350, 0x3042ffff,
5803         0x8f624450, 0x3042ffff, 0x0043102b, 0x1440fffc, 0x00000000, 0x03e00008,
5804         0x00000000, 0x27bdffe0, 0x00802821, 0x3c040001, 0x24840ed0, 0x00003021,
5805         0x00003821, 0xafbf0018, 0xafa00010, 0x0c004378, 0xafa00014, 0x0800435f,
5806         0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x3c020001, 0x3442d600,
5807         0x3c030001, 0x3463d600, 0x3c040001, 0x3484ddff, 0x3c010001, 0xac220f40,
5808         0x24020040, 0x3c010001, 0xac220f44, 0x3c010001, 0xac200f3c, 0xac600000,
5809         0x24630004, 0x0083102b, 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000,
5810         0x00804821, 0x8faa0010, 0x3c020001, 0x8c420f3c, 0x3c040001, 0x8c840f44,
5811         0x8fab0014, 0x24430001, 0x0044102b, 0x3c010001, 0xac230f3c, 0x14400003,
5812         0x00004021, 0x3c010001, 0xac200f3c, 0x3c020001, 0x8c420f3c, 0x3c030001,
5813         0x8c630f40, 0x91240000, 0x00021140, 0x00431021, 0x00481021, 0x25080001,
5814         0xa0440000, 0x29020008, 0x1440fff4, 0x25290001, 0x3c020001, 0x8c420f3c,
5815         0x3c030001, 0x8c630f40, 0x8f64680c, 0x00021140, 0x00431021, 0xac440008,
5816         0xac45000c, 0xac460010, 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c,
5817         0x00000000, 0x00000000, 0x00000000,
5818 };
5819
5820 static const u32 tg3Tso5FwRodata[(TG3_TSO5_FW_RODATA_LEN / 4) + 1] = {
5821         0x4d61696e, 0x43707542, 0x00000000, 0x4d61696e, 0x43707541, 0x00000000,
5822         0x00000000, 0x00000000, 0x73746b6f, 0x66666c64, 0x00000000, 0x00000000,
5823         0x73746b6f, 0x66666c64, 0x00000000, 0x00000000, 0x66617461, 0x6c457272,
5824         0x00000000, 0x00000000, 0x00000000,
5825 };
5826
5827 static const u32 tg3Tso5FwData[(TG3_TSO5_FW_DATA_LEN / 4) + 1] = {
5828         0x00000000, 0x73746b6f, 0x66666c64, 0x5f76312e, 0x322e3000, 0x00000000,
5829         0x00000000, 0x00000000, 0x00000000,
5830 };
5831
5832 /* tp->lock is held. */
5833 static int tg3_load_tso_firmware(struct tg3 *tp)
5834 {
5835         struct fw_info info;
5836         unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
5837         int err, i;
5838
5839         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
5840                 return 0;
5841
5842         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
5843                 info.text_base = TG3_TSO5_FW_TEXT_ADDR;
5844                 info.text_len = TG3_TSO5_FW_TEXT_LEN;
5845                 info.text_data = &tg3Tso5FwText[0];
5846                 info.rodata_base = TG3_TSO5_FW_RODATA_ADDR;
5847                 info.rodata_len = TG3_TSO5_FW_RODATA_LEN;
5848                 info.rodata_data = &tg3Tso5FwRodata[0];
5849                 info.data_base = TG3_TSO5_FW_DATA_ADDR;
5850                 info.data_len = TG3_TSO5_FW_DATA_LEN;
5851                 info.data_data = &tg3Tso5FwData[0];
5852                 cpu_base = RX_CPU_BASE;
5853                 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
5854                 cpu_scratch_size = (info.text_len +
5855                                     info.rodata_len +
5856                                     info.data_len +
5857                                     TG3_TSO5_FW_SBSS_LEN +
5858                                     TG3_TSO5_FW_BSS_LEN);
5859         } else {
5860                 info.text_base = TG3_TSO_FW_TEXT_ADDR;
5861                 info.text_len = TG3_TSO_FW_TEXT_LEN;
5862                 info.text_data = &tg3TsoFwText[0];
5863                 info.rodata_base = TG3_TSO_FW_RODATA_ADDR;
5864                 info.rodata_len = TG3_TSO_FW_RODATA_LEN;
5865                 info.rodata_data = &tg3TsoFwRodata[0];
5866                 info.data_base = TG3_TSO_FW_DATA_ADDR;
5867                 info.data_len = TG3_TSO_FW_DATA_LEN;
5868                 info.data_data = &tg3TsoFwData[0];
5869                 cpu_base = TX_CPU_BASE;
5870                 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
5871                 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
5872         }
5873
5874         err = tg3_load_firmware_cpu(tp, cpu_base,
5875                                     cpu_scratch_base, cpu_scratch_size,
5876                                     &info);
5877         if (err)
5878                 return err;
5879
5880         /* Now startup the cpu. */
5881         tw32(cpu_base + CPU_STATE, 0xffffffff);
5882         tw32_f(cpu_base + CPU_PC,    info.text_base);
5883
5884         for (i = 0; i < 5; i++) {
5885                 if (tr32(cpu_base + CPU_PC) == info.text_base)
5886                         break;
5887                 tw32(cpu_base + CPU_STATE, 0xffffffff);
5888                 tw32(cpu_base + CPU_MODE,  CPU_MODE_HALT);
5889                 tw32_f(cpu_base + CPU_PC,    info.text_base);
5890                 udelay(1000);
5891         }
5892         if (i >= 5) {
5893                 printk(KERN_ERR PFX "tg3_load_tso_firmware fails for %s "
5894                        "to set CPU PC, is %08x should be %08x\n",
5895                        tp->dev->name, tr32(cpu_base + CPU_PC),
5896                        info.text_base);
5897                 return -ENODEV;
5898         }
5899         tw32(cpu_base + CPU_STATE, 0xffffffff);
5900         tw32_f(cpu_base + CPU_MODE,  0x00000000);
5901         return 0;
5902 }
5903
5904 #endif /* TG3_TSO_SUPPORT != 0 */
5905
5906 /* tp->lock is held. */
5907 static void __tg3_set_mac_addr(struct tg3 *tp)
5908 {
5909         u32 addr_high, addr_low;
5910         int i;
5911
5912         addr_high = ((tp->dev->dev_addr[0] << 8) |
5913                      tp->dev->dev_addr[1]);
5914         addr_low = ((tp->dev->dev_addr[2] << 24) |
5915                     (tp->dev->dev_addr[3] << 16) |
5916                     (tp->dev->dev_addr[4] <<  8) |
5917                     (tp->dev->dev_addr[5] <<  0));
5918         for (i = 0; i < 4; i++) {
5919                 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
5920                 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
5921         }
5922
5923         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
5924             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
5925                 for (i = 0; i < 12; i++) {
5926                         tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
5927                         tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
5928                 }
5929         }
5930
5931         addr_high = (tp->dev->dev_addr[0] +
5932                      tp->dev->dev_addr[1] +
5933                      tp->dev->dev_addr[2] +
5934                      tp->dev->dev_addr[3] +
5935                      tp->dev->dev_addr[4] +
5936                      tp->dev->dev_addr[5]) &
5937                 TX_BACKOFF_SEED_MASK;
5938         tw32(MAC_TX_BACKOFF_SEED, addr_high);
5939 }
5940
5941 static int tg3_set_mac_addr(struct net_device *dev, void *p)
5942 {
5943         struct tg3 *tp = netdev_priv(dev);
5944         struct sockaddr *addr = p;
5945         int err = 0;
5946
5947         if (!is_valid_ether_addr(addr->sa_data))
5948                 return -EINVAL;
5949
5950         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
5951
5952         if (!netif_running(dev))
5953                 return 0;
5954
5955         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
5956                 /* Reset chip so that ASF can re-init any MAC addresses it
5957                  * needs.
5958                  */
5959                 tg3_netif_stop(tp);
5960                 tg3_full_lock(tp, 1);
5961
5962                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
5963                 err = tg3_restart_hw(tp, 0);
5964                 if (!err)
5965                         tg3_netif_start(tp);
5966                 tg3_full_unlock(tp);
5967         } else {
5968                 spin_lock_bh(&tp->lock);
5969                 __tg3_set_mac_addr(tp);
5970                 spin_unlock_bh(&tp->lock);
5971         }
5972
5973         return err;
5974 }
5975
5976 /* tp->lock is held. */
5977 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
5978                            dma_addr_t mapping, u32 maxlen_flags,
5979                            u32 nic_addr)
5980 {
5981         tg3_write_mem(tp,
5982                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
5983                       ((u64) mapping >> 32));
5984         tg3_write_mem(tp,
5985                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
5986                       ((u64) mapping & 0xffffffff));
5987         tg3_write_mem(tp,
5988                       (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
5989                        maxlen_flags);
5990
5991         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
5992                 tg3_write_mem(tp,
5993                               (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
5994                               nic_addr);
5995 }
5996
5997 static void __tg3_set_rx_mode(struct net_device *);
5998 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
5999 {
6000         tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
6001         tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
6002         tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
6003         tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
6004         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6005                 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
6006                 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
6007         }
6008         tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
6009         tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
6010         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6011                 u32 val = ec->stats_block_coalesce_usecs;
6012
6013                 if (!netif_carrier_ok(tp->dev))
6014                         val = 0;
6015
6016                 tw32(HOSTCC_STAT_COAL_TICKS, val);
6017         }
6018 }
6019
6020 /* tp->lock is held. */
6021 static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
6022 {
6023         u32 val, rdmac_mode;
6024         int i, err, limit;
6025
6026         tg3_disable_ints(tp);
6027
6028         tg3_stop_fw(tp);
6029
6030         tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
6031
6032         if (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) {
6033                 tg3_abort_hw(tp, 1);
6034         }
6035
6036         if (reset_phy)
6037                 tg3_phy_reset(tp);
6038
6039         err = tg3_chip_reset(tp);
6040         if (err)
6041                 return err;
6042
6043         tg3_write_sig_legacy(tp, RESET_KIND_INIT);
6044
6045         /* This works around an issue with Athlon chipsets on
6046          * B3 tigon3 silicon.  This bit has no effect on any
6047          * other revision.  But do not set this on PCI Express
6048          * chips.
6049          */
6050         if (!(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
6051                 tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
6052         tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
6053
6054         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
6055             (tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
6056                 val = tr32(TG3PCI_PCISTATE);
6057                 val |= PCISTATE_RETRY_SAME_DMA;
6058                 tw32(TG3PCI_PCISTATE, val);
6059         }
6060
6061         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_BX) {
6062                 /* Enable some hw fixes.  */
6063                 val = tr32(TG3PCI_MSI_DATA);
6064                 val |= (1 << 26) | (1 << 28) | (1 << 29);
6065                 tw32(TG3PCI_MSI_DATA, val);
6066         }
6067
6068         /* Descriptor ring init may make accesses to the
6069          * NIC SRAM area to setup the TX descriptors, so we
6070          * can only do this after the hardware has been
6071          * successfully reset.
6072          */
6073         err = tg3_init_rings(tp);
6074         if (err)
6075                 return err;
6076
6077         /* This value is determined during the probe time DMA
6078          * engine test, tg3_test_dma.
6079          */
6080         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
6081
6082         tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
6083                           GRC_MODE_4X_NIC_SEND_RINGS |
6084                           GRC_MODE_NO_TX_PHDR_CSUM |
6085                           GRC_MODE_NO_RX_PHDR_CSUM);
6086         tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
6087
6088         /* Pseudo-header checksum is done by hardware logic and not
6089          * the offload processers, so make the chip do the pseudo-
6090          * header checksums on receive.  For transmit it is more
6091          * convenient to do the pseudo-header checksum in software
6092          * as Linux does that on transmit for us in all cases.
6093          */
6094         tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
6095
6096         tw32(GRC_MODE,
6097              tp->grc_mode |
6098              (GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP));
6099
6100         /* Setup the timer prescalar register.  Clock is always 66Mhz. */
6101         val = tr32(GRC_MISC_CFG);
6102         val &= ~0xff;
6103         val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
6104         tw32(GRC_MISC_CFG, val);
6105
6106         /* Initialize MBUF/DESC pool. */
6107         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
6108                 /* Do nothing.  */
6109         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
6110                 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
6111                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
6112                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
6113                 else
6114                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
6115                 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
6116                 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
6117         }
6118 #if TG3_TSO_SUPPORT != 0
6119         else if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
6120                 int fw_len;
6121
6122                 fw_len = (TG3_TSO5_FW_TEXT_LEN +
6123                           TG3_TSO5_FW_RODATA_LEN +
6124                           TG3_TSO5_FW_DATA_LEN +
6125                           TG3_TSO5_FW_SBSS_LEN +
6126                           TG3_TSO5_FW_BSS_LEN);
6127                 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
6128                 tw32(BUFMGR_MB_POOL_ADDR,
6129                      NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
6130                 tw32(BUFMGR_MB_POOL_SIZE,
6131                      NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
6132         }
6133 #endif
6134
6135         if (tp->dev->mtu <= ETH_DATA_LEN) {
6136                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
6137                      tp->bufmgr_config.mbuf_read_dma_low_water);
6138                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
6139                      tp->bufmgr_config.mbuf_mac_rx_low_water);
6140                 tw32(BUFMGR_MB_HIGH_WATER,
6141                      tp->bufmgr_config.mbuf_high_water);
6142         } else {
6143                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
6144                      tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
6145                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
6146                      tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
6147                 tw32(BUFMGR_MB_HIGH_WATER,
6148                      tp->bufmgr_config.mbuf_high_water_jumbo);
6149         }
6150         tw32(BUFMGR_DMA_LOW_WATER,
6151              tp->bufmgr_config.dma_low_water);
6152         tw32(BUFMGR_DMA_HIGH_WATER,
6153              tp->bufmgr_config.dma_high_water);
6154
6155         tw32(BUFMGR_MODE, BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE);
6156         for (i = 0; i < 2000; i++) {
6157                 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
6158                         break;
6159                 udelay(10);
6160         }
6161         if (i >= 2000) {
6162                 printk(KERN_ERR PFX "tg3_reset_hw cannot enable BUFMGR for %s.\n",
6163                        tp->dev->name);
6164                 return -ENODEV;
6165         }
6166
6167         /* Setup replenish threshold. */
6168         val = tp->rx_pending / 8;
6169         if (val == 0)
6170                 val = 1;
6171         else if (val > tp->rx_std_max_post)
6172                 val = tp->rx_std_max_post;
6173         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
6174                 if (tp->pci_chip_rev_id == CHIPREV_ID_5906_A1)
6175                         tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
6176
6177                 if (val > (TG3_RX_INTERNAL_RING_SZ_5906 / 2))
6178                         val = TG3_RX_INTERNAL_RING_SZ_5906 / 2;
6179         }
6180
6181         tw32(RCVBDI_STD_THRESH, val);
6182
6183         /* Initialize TG3_BDINFO's at:
6184          *  RCVDBDI_STD_BD:     standard eth size rx ring
6185          *  RCVDBDI_JUMBO_BD:   jumbo frame rx ring
6186          *  RCVDBDI_MINI_BD:    small frame rx ring (??? does not work)
6187          *
6188          * like so:
6189          *  TG3_BDINFO_HOST_ADDR:       high/low parts of DMA address of ring
6190          *  TG3_BDINFO_MAXLEN_FLAGS:    (rx max buffer size << 16) |
6191          *                              ring attribute flags
6192          *  TG3_BDINFO_NIC_ADDR:        location of descriptors in nic SRAM
6193          *
6194          * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
6195          * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
6196          *
6197          * The size of each ring is fixed in the firmware, but the location is
6198          * configurable.
6199          */
6200         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
6201              ((u64) tp->rx_std_mapping >> 32));
6202         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
6203              ((u64) tp->rx_std_mapping & 0xffffffff));
6204         tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
6205              NIC_SRAM_RX_BUFFER_DESC);
6206
6207         /* Don't even try to program the JUMBO/MINI buffer descriptor
6208          * configs on 5705.
6209          */
6210         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
6211                 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS,
6212                      RX_STD_MAX_SIZE_5705 << BDINFO_FLAGS_MAXLEN_SHIFT);
6213         } else {
6214                 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS,
6215                      RX_STD_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT);
6216
6217                 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
6218                      BDINFO_FLAGS_DISABLED);
6219
6220                 /* Setup replenish threshold. */
6221                 tw32(RCVBDI_JUMBO_THRESH, tp->rx_jumbo_pending / 8);
6222
6223                 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
6224                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
6225                              ((u64) tp->rx_jumbo_mapping >> 32));
6226                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
6227                              ((u64) tp->rx_jumbo_mapping & 0xffffffff));
6228                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
6229                              RX_JUMBO_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT);
6230                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
6231                              NIC_SRAM_RX_JUMBO_BUFFER_DESC);
6232                 } else {
6233                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
6234                              BDINFO_FLAGS_DISABLED);
6235                 }
6236
6237         }
6238
6239         /* There is only one send ring on 5705/5750, no need to explicitly
6240          * disable the others.
6241          */
6242         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6243                 /* Clear out send RCB ring in SRAM. */
6244                 for (i = NIC_SRAM_SEND_RCB; i < NIC_SRAM_RCV_RET_RCB; i += TG3_BDINFO_SIZE)
6245                         tg3_write_mem(tp, i + TG3_BDINFO_MAXLEN_FLAGS,
6246                                       BDINFO_FLAGS_DISABLED);
6247         }
6248
6249         tp->tx_prod = 0;
6250         tp->tx_cons = 0;
6251         tw32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0);
6252         tw32_tx_mbox(MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0);
6253
6254         tg3_set_bdinfo(tp, NIC_SRAM_SEND_RCB,
6255                        tp->tx_desc_mapping,
6256                        (TG3_TX_RING_SIZE <<
6257                         BDINFO_FLAGS_MAXLEN_SHIFT),
6258                        NIC_SRAM_TX_BUFFER_DESC);
6259
6260         /* There is only one receive return ring on 5705/5750, no need
6261          * to explicitly disable the others.
6262          */
6263         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6264                 for (i = NIC_SRAM_RCV_RET_RCB; i < NIC_SRAM_STATS_BLK;
6265                      i += TG3_BDINFO_SIZE) {
6266                         tg3_write_mem(tp, i + TG3_BDINFO_MAXLEN_FLAGS,
6267                                       BDINFO_FLAGS_DISABLED);
6268                 }
6269         }
6270
6271         tp->rx_rcb_ptr = 0;
6272         tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW, 0);
6273
6274         tg3_set_bdinfo(tp, NIC_SRAM_RCV_RET_RCB,
6275                        tp->rx_rcb_mapping,
6276                        (TG3_RX_RCB_RING_SIZE(tp) <<
6277                         BDINFO_FLAGS_MAXLEN_SHIFT),
6278                        0);
6279
6280         tp->rx_std_ptr = tp->rx_pending;
6281         tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
6282                      tp->rx_std_ptr);
6283
6284         tp->rx_jumbo_ptr = (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) ?
6285                                                 tp->rx_jumbo_pending : 0;
6286         tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
6287                      tp->rx_jumbo_ptr);
6288
6289         /* Initialize MAC address and backoff seed. */
6290         __tg3_set_mac_addr(tp);
6291
6292         /* MTU + ethernet header + FCS + optional VLAN tag */
6293         tw32(MAC_RX_MTU_SIZE, tp->dev->mtu + ETH_HLEN + 8);
6294
6295         /* The slot time is changed by tg3_setup_phy if we
6296          * run at gigabit with half duplex.
6297          */
6298         tw32(MAC_TX_LENGTHS,
6299              (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
6300              (6 << TX_LENGTHS_IPG_SHIFT) |
6301              (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
6302
6303         /* Receive rules. */
6304         tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
6305         tw32(RCVLPC_CONFIG, 0x0181);
6306
6307         /* Calculate RDMAC_MODE setting early, we need it to determine
6308          * the RCVLPC_STATE_ENABLE mask.
6309          */
6310         rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
6311                       RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
6312                       RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
6313                       RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
6314                       RDMAC_MODE_LNGREAD_ENAB);
6315         if (tp->tg3_flags & TG3_FLAG_SPLIT_MODE)
6316                 rdmac_mode |= RDMAC_MODE_SPLIT_ENABLE;
6317
6318         /* If statement applies to 5705 and 5750 PCI devices only */
6319         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
6320              tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
6321             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)) {
6322                 if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE &&
6323                     (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
6324                      tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
6325                         rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
6326                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
6327                            !(tp->tg3_flags2 & TG3_FLG2_IS_5788)) {
6328                         rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
6329                 }
6330         }
6331
6332         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)
6333                 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
6334
6335 #if TG3_TSO_SUPPORT != 0
6336         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
6337                 rdmac_mode |= (1 << 27);
6338 #endif
6339
6340         /* Receive/send statistics. */
6341         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
6342                 val = tr32(RCVLPC_STATS_ENABLE);
6343                 val &= ~RCVLPC_STATSENAB_DACK_FIX;
6344                 tw32(RCVLPC_STATS_ENABLE, val);
6345         } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
6346                    (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
6347                 val = tr32(RCVLPC_STATS_ENABLE);
6348                 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
6349                 tw32(RCVLPC_STATS_ENABLE, val);
6350         } else {
6351                 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
6352         }
6353         tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
6354         tw32(SNDDATAI_STATSENAB, 0xffffff);
6355         tw32(SNDDATAI_STATSCTRL,
6356              (SNDDATAI_SCTRL_ENABLE |
6357               SNDDATAI_SCTRL_FASTUPD));
6358
6359         /* Setup host coalescing engine. */
6360         tw32(HOSTCC_MODE, 0);
6361         for (i = 0; i < 2000; i++) {
6362                 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
6363                         break;
6364                 udelay(10);
6365         }
6366
6367         __tg3_set_coalesce(tp, &tp->coal);
6368
6369         /* set status block DMA address */
6370         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
6371              ((u64) tp->status_mapping >> 32));
6372         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
6373              ((u64) tp->status_mapping & 0xffffffff));
6374
6375         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6376                 /* Status/statistics block address.  See tg3_timer,
6377                  * the tg3_periodic_fetch_stats call there, and
6378                  * tg3_get_stats to see how this works for 5705/5750 chips.
6379                  */
6380                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
6381                      ((u64) tp->stats_mapping >> 32));
6382                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
6383                      ((u64) tp->stats_mapping & 0xffffffff));
6384                 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
6385                 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
6386         }
6387
6388         tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
6389
6390         tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
6391         tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
6392         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
6393                 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
6394
6395         /* Clear statistics/status block in chip, and status block in ram. */
6396         for (i = NIC_SRAM_STATS_BLK;
6397              i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
6398              i += sizeof(u32)) {
6399                 tg3_write_mem(tp, i, 0);
6400                 udelay(40);
6401         }
6402         memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
6403
6404         if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
6405                 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
6406                 /* reset to prevent losing 1st rx packet intermittently */
6407                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
6408                 udelay(10);
6409         }
6410
6411         tp->mac_mode = MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
6412                 MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE | MAC_MODE_FHDE_ENABLE;
6413         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
6414         udelay(40);
6415
6416         /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
6417          * If TG3_FLG2_IS_NIC is zero, we should read the
6418          * register to preserve the GPIO settings for LOMs. The GPIOs,
6419          * whether used as inputs or outputs, are set by boot code after
6420          * reset.
6421          */
6422         if (!(tp->tg3_flags2 & TG3_FLG2_IS_NIC)) {
6423                 u32 gpio_mask;
6424
6425                 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
6426                             GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
6427                             GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
6428
6429                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
6430                         gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
6431                                      GRC_LCLCTRL_GPIO_OUTPUT3;
6432
6433                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
6434                         gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
6435
6436                 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
6437
6438                 /* GPIO1 must be driven high for eeprom write protect */
6439                 if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT)
6440                         tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
6441                                                GRC_LCLCTRL_GPIO_OUTPUT1);
6442         }
6443         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
6444         udelay(100);
6445
6446         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0);
6447         tp->last_tag = 0;
6448
6449         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6450                 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
6451                 udelay(40);
6452         }
6453
6454         val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
6455                WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
6456                WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
6457                WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
6458                WDMAC_MODE_LNGREAD_ENAB);
6459
6460         /* If statement applies to 5705 and 5750 PCI devices only */
6461         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
6462              tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
6463             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
6464                 if ((tp->tg3_flags & TG3_FLG2_TSO_CAPABLE) &&
6465                     (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
6466                      tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
6467                         /* nothing */
6468                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
6469                            !(tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
6470                            !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
6471                         val |= WDMAC_MODE_RX_ACCEL;
6472                 }
6473         }
6474
6475         /* Enable host coalescing bug fix */
6476         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755) ||
6477             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787))
6478                 val |= (1 << 29);
6479
6480         tw32_f(WDMAC_MODE, val);
6481         udelay(40);
6482
6483         if ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0) {
6484                 val = tr32(TG3PCI_X_CAPS);
6485                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) {
6486                         val &= ~PCIX_CAPS_BURST_MASK;
6487                         val |= (PCIX_CAPS_MAX_BURST_CPIOB << PCIX_CAPS_BURST_SHIFT);
6488                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
6489                         val &= ~(PCIX_CAPS_SPLIT_MASK | PCIX_CAPS_BURST_MASK);
6490                         val |= (PCIX_CAPS_MAX_BURST_CPIOB << PCIX_CAPS_BURST_SHIFT);
6491                         if (tp->tg3_flags & TG3_FLAG_SPLIT_MODE)
6492                                 val |= (tp->split_mode_max_reqs <<
6493                                         PCIX_CAPS_SPLIT_SHIFT);
6494                 }
6495                 tw32(TG3PCI_X_CAPS, val);
6496         }
6497
6498         tw32_f(RDMAC_MODE, rdmac_mode);
6499         udelay(40);
6500
6501         tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
6502         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
6503                 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
6504         tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
6505         tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
6506         tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
6507         tw32(RCVDBDI_MODE, RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ);
6508         tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
6509 #if TG3_TSO_SUPPORT != 0
6510         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
6511                 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
6512 #endif
6513         tw32(SNDBDI_MODE, SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE);
6514         tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
6515
6516         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
6517                 err = tg3_load_5701_a0_firmware_fix(tp);
6518                 if (err)
6519                         return err;
6520         }
6521
6522 #if TG3_TSO_SUPPORT != 0
6523         if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
6524                 err = tg3_load_tso_firmware(tp);
6525                 if (err)
6526                         return err;
6527         }
6528 #endif
6529
6530         tp->tx_mode = TX_MODE_ENABLE;
6531         tw32_f(MAC_TX_MODE, tp->tx_mode);
6532         udelay(100);
6533
6534         tp->rx_mode = RX_MODE_ENABLE;
6535         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
6536                 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
6537
6538         tw32_f(MAC_RX_MODE, tp->rx_mode);
6539         udelay(10);
6540
6541         if (tp->link_config.phy_is_low_power) {
6542                 tp->link_config.phy_is_low_power = 0;
6543                 tp->link_config.speed = tp->link_config.orig_speed;
6544                 tp->link_config.duplex = tp->link_config.orig_duplex;
6545                 tp->link_config.autoneg = tp->link_config.orig_autoneg;
6546         }
6547
6548         tp->mi_mode = MAC_MI_MODE_BASE;
6549         tw32_f(MAC_MI_MODE, tp->mi_mode);
6550         udelay(80);
6551
6552         tw32(MAC_LED_CTRL, tp->led_ctrl);
6553
6554         tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
6555         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
6556                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
6557                 udelay(10);
6558         }
6559         tw32_f(MAC_RX_MODE, tp->rx_mode);
6560         udelay(10);
6561
6562         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
6563                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) &&
6564                         !(tp->tg3_flags2 & TG3_FLG2_SERDES_PREEMPHASIS)) {
6565                         /* Set drive transmission level to 1.2V  */
6566                         /* only if the signal pre-emphasis bit is not set  */
6567                         val = tr32(MAC_SERDES_CFG);
6568                         val &= 0xfffff000;
6569                         val |= 0x880;
6570                         tw32(MAC_SERDES_CFG, val);
6571                 }
6572                 if (tp->pci_chip_rev_id == CHIPREV_ID_5703_A1)
6573                         tw32(MAC_SERDES_CFG, 0x616000);
6574         }
6575
6576         /* Prevent chip from dropping frames when flow control
6577          * is enabled.
6578          */
6579         tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, 2);
6580
6581         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
6582             (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
6583                 /* Use hardware link auto-negotiation */
6584                 tp->tg3_flags2 |= TG3_FLG2_HW_AUTONEG;
6585         }
6586
6587         if ((tp->tg3_flags2 & TG3_FLG2_MII_SERDES) &&
6588             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)) {
6589                 u32 tmp;
6590
6591                 tmp = tr32(SERDES_RX_CTRL);
6592                 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
6593                 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
6594                 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
6595                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
6596         }
6597
6598         err = tg3_setup_phy(tp, 0);
6599         if (err)
6600                 return err;
6601
6602         if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
6603             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906) {
6604                 u32 tmp;
6605
6606                 /* Clear CRC stats. */
6607                 if (!tg3_readphy(tp, 0x1e, &tmp)) {
6608                         tg3_writephy(tp, 0x1e, tmp | 0x8000);
6609                         tg3_readphy(tp, 0x14, &tmp);
6610                 }
6611         }
6612
6613         __tg3_set_rx_mode(tp->dev);
6614
6615         /* Initialize receive rules. */
6616         tw32(MAC_RCV_RULE_0,  0xc2000000 & RCV_RULE_DISABLE_MASK);
6617         tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
6618         tw32(MAC_RCV_RULE_1,  0x86000004 & RCV_RULE_DISABLE_MASK);
6619         tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
6620
6621         if ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
6622             !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
6623                 limit = 8;
6624         else
6625                 limit = 16;
6626         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF)
6627                 limit -= 4;
6628         switch (limit) {
6629         case 16:
6630                 tw32(MAC_RCV_RULE_15,  0); tw32(MAC_RCV_VALUE_15,  0);
6631         case 15:
6632                 tw32(MAC_RCV_RULE_14,  0); tw32(MAC_RCV_VALUE_14,  0);
6633         case 14:
6634                 tw32(MAC_RCV_RULE_13,  0); tw32(MAC_RCV_VALUE_13,  0);
6635         case 13:
6636                 tw32(MAC_RCV_RULE_12,  0); tw32(MAC_RCV_VALUE_12,  0);
6637         case 12:
6638                 tw32(MAC_RCV_RULE_11,  0); tw32(MAC_RCV_VALUE_11,  0);
6639         case 11:
6640                 tw32(MAC_RCV_RULE_10,  0); tw32(MAC_RCV_VALUE_10,  0);
6641         case 10:
6642                 tw32(MAC_RCV_RULE_9,  0); tw32(MAC_RCV_VALUE_9,  0);
6643         case 9:
6644                 tw32(MAC_RCV_RULE_8,  0); tw32(MAC_RCV_VALUE_8,  0);
6645         case 8:
6646                 tw32(MAC_RCV_RULE_7,  0); tw32(MAC_RCV_VALUE_7,  0);
6647         case 7:
6648                 tw32(MAC_RCV_RULE_6,  0); tw32(MAC_RCV_VALUE_6,  0);
6649         case 6:
6650                 tw32(MAC_RCV_RULE_5,  0); tw32(MAC_RCV_VALUE_5,  0);
6651         case 5:
6652                 tw32(MAC_RCV_RULE_4,  0); tw32(MAC_RCV_VALUE_4,  0);
6653         case 4:
6654                 /* tw32(MAC_RCV_RULE_3,  0); tw32(MAC_RCV_VALUE_3,  0); */
6655         case 3:
6656                 /* tw32(MAC_RCV_RULE_2,  0); tw32(MAC_RCV_VALUE_2,  0); */
6657         case 2:
6658         case 1:
6659
6660         default:
6661                 break;
6662         };
6663
6664         tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
6665
6666         return 0;
6667 }
6668
6669 /* Called at device open time to get the chip ready for
6670  * packet processing.  Invoked with tp->lock held.
6671  */
6672 static int tg3_init_hw(struct tg3 *tp, int reset_phy)
6673 {
6674         int err;
6675
6676         /* Force the chip into D0. */
6677         err = tg3_set_power_state(tp, PCI_D0);
6678         if (err)
6679                 goto out;
6680
6681         tg3_switch_clocks(tp);
6682
6683         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
6684
6685         err = tg3_reset_hw(tp, reset_phy);
6686
6687 out:
6688         return err;
6689 }
6690
6691 #define TG3_STAT_ADD32(PSTAT, REG) \
6692 do {    u32 __val = tr32(REG); \
6693         (PSTAT)->low += __val; \
6694         if ((PSTAT)->low < __val) \
6695                 (PSTAT)->high += 1; \
6696 } while (0)
6697
6698 static void tg3_periodic_fetch_stats(struct tg3 *tp)
6699 {
6700         struct tg3_hw_stats *sp = tp->hw_stats;
6701
6702         if (!netif_carrier_ok(tp->dev))
6703                 return;
6704
6705         TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
6706         TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
6707         TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
6708         TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
6709         TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
6710         TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
6711         TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
6712         TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
6713         TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
6714         TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
6715         TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
6716         TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
6717         TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
6718
6719         TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
6720         TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
6721         TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
6722         TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
6723         TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
6724         TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
6725         TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
6726         TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
6727         TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
6728         TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
6729         TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
6730         TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
6731         TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
6732         TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
6733
6734         TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
6735         TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
6736         TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
6737 }
6738
6739 static void tg3_timer(unsigned long __opaque)
6740 {
6741         struct tg3 *tp = (struct tg3 *) __opaque;
6742
6743         if (tp->irq_sync)
6744                 goto restart_timer;
6745
6746         spin_lock(&tp->lock);
6747
6748         if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) {
6749                 /* All of this garbage is because when using non-tagged
6750                  * IRQ status the mailbox/status_block protocol the chip
6751                  * uses with the cpu is race prone.
6752                  */
6753                 if (tp->hw_status->status & SD_STATUS_UPDATED) {
6754                         tw32(GRC_LOCAL_CTRL,
6755                              tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
6756                 } else {
6757                         tw32(HOSTCC_MODE, tp->coalesce_mode |
6758                              (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
6759                 }
6760
6761                 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
6762                         tp->tg3_flags2 |= TG3_FLG2_RESTART_TIMER;
6763                         spin_unlock(&tp->lock);
6764                         schedule_work(&tp->reset_task);
6765                         return;
6766                 }
6767         }
6768
6769         /* This part only runs once per second. */
6770         if (!--tp->timer_counter) {
6771                 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
6772                         tg3_periodic_fetch_stats(tp);
6773
6774                 if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
6775                         u32 mac_stat;
6776                         int phy_event;
6777
6778                         mac_stat = tr32(MAC_STATUS);
6779
6780                         phy_event = 0;
6781                         if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) {
6782                                 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
6783                                         phy_event = 1;
6784                         } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
6785                                 phy_event = 1;
6786
6787                         if (phy_event)
6788                                 tg3_setup_phy(tp, 0);
6789                 } else if (tp->tg3_flags & TG3_FLAG_POLL_SERDES) {
6790                         u32 mac_stat = tr32(MAC_STATUS);
6791                         int need_setup = 0;
6792
6793                         if (netif_carrier_ok(tp->dev) &&
6794                             (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
6795                                 need_setup = 1;
6796                         }
6797                         if (! netif_carrier_ok(tp->dev) &&
6798                             (mac_stat & (MAC_STATUS_PCS_SYNCED |
6799                                          MAC_STATUS_SIGNAL_DET))) {
6800                                 need_setup = 1;
6801                         }
6802                         if (need_setup) {
6803                                 if (!tp->serdes_counter) {
6804                                         tw32_f(MAC_MODE,
6805                                              (tp->mac_mode &
6806                                               ~MAC_MODE_PORT_MODE_MASK));
6807                                         udelay(40);
6808                                         tw32_f(MAC_MODE, tp->mac_mode);
6809                                         udelay(40);
6810                                 }
6811                                 tg3_setup_phy(tp, 0);
6812                         }
6813                 } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
6814                         tg3_serdes_parallel_detect(tp);
6815
6816                 tp->timer_counter = tp->timer_multiplier;
6817         }
6818
6819         /* Heartbeat is only sent once every 2 seconds.
6820          *
6821          * The heartbeat is to tell the ASF firmware that the host
6822          * driver is still alive.  In the event that the OS crashes,
6823          * ASF needs to reset the hardware to free up the FIFO space
6824          * that may be filled with rx packets destined for the host.
6825          * If the FIFO is full, ASF will no longer function properly.
6826          *
6827          * Unintended resets have been reported on real time kernels
6828          * where the timer doesn't run on time.  Netpoll will also have
6829          * same problem.
6830          *
6831          * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
6832          * to check the ring condition when the heartbeat is expiring
6833          * before doing the reset.  This will prevent most unintended
6834          * resets.
6835          */
6836         if (!--tp->asf_counter) {
6837                 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
6838                         u32 val;
6839
6840                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
6841                                       FWCMD_NICDRV_ALIVE3);
6842                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
6843                         /* 5 seconds timeout */
6844                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, 5);
6845                         val = tr32(GRC_RX_CPU_EVENT);
6846                         val |= (1 << 14);
6847                         tw32(GRC_RX_CPU_EVENT, val);
6848                 }
6849                 tp->asf_counter = tp->asf_multiplier;
6850         }
6851
6852         spin_unlock(&tp->lock);
6853
6854 restart_timer:
6855         tp->timer.expires = jiffies + tp->timer_offset;
6856         add_timer(&tp->timer);
6857 }
6858
6859 static int tg3_request_irq(struct tg3 *tp)
6860 {
6861         irq_handler_t fn;
6862         unsigned long flags;
6863         struct net_device *dev = tp->dev;
6864
6865         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6866                 fn = tg3_msi;
6867                 if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI)
6868                         fn = tg3_msi_1shot;
6869                 flags = IRQF_SAMPLE_RANDOM;
6870         } else {
6871                 fn = tg3_interrupt;
6872                 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
6873                         fn = tg3_interrupt_tagged;
6874                 flags = IRQF_SHARED | IRQF_SAMPLE_RANDOM;
6875         }
6876         return (request_irq(tp->pdev->irq, fn, flags, dev->name, dev));
6877 }
6878
6879 static int tg3_test_interrupt(struct tg3 *tp)
6880 {
6881         struct net_device *dev = tp->dev;
6882         int err, i, intr_ok = 0;
6883
6884         if (!netif_running(dev))
6885                 return -ENODEV;
6886
6887         tg3_disable_ints(tp);
6888
6889         free_irq(tp->pdev->irq, dev);
6890
6891         err = request_irq(tp->pdev->irq, tg3_test_isr,
6892                           IRQF_SHARED | IRQF_SAMPLE_RANDOM, dev->name, dev);
6893         if (err)
6894                 return err;
6895
6896         tp->hw_status->status &= ~SD_STATUS_UPDATED;
6897         tg3_enable_ints(tp);
6898
6899         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
6900                HOSTCC_MODE_NOW);
6901
6902         for (i = 0; i < 5; i++) {
6903                 u32 int_mbox, misc_host_ctrl;
6904
6905                 int_mbox = tr32_mailbox(MAILBOX_INTERRUPT_0 +
6906                                         TG3_64BIT_REG_LOW);
6907                 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
6908
6909                 if ((int_mbox != 0) ||
6910                     (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) {
6911                         intr_ok = 1;
6912                         break;
6913                 }
6914
6915                 msleep(10);
6916         }
6917
6918         tg3_disable_ints(tp);
6919
6920         free_irq(tp->pdev->irq, dev);
6921
6922         err = tg3_request_irq(tp);
6923
6924         if (err)
6925                 return err;
6926
6927         if (intr_ok)
6928                 return 0;
6929
6930         return -EIO;
6931 }
6932
6933 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
6934  * successfully restored
6935  */
6936 static int tg3_test_msi(struct tg3 *tp)
6937 {
6938         struct net_device *dev = tp->dev;
6939         int err;
6940         u16 pci_cmd;
6941
6942         if (!(tp->tg3_flags2 & TG3_FLG2_USING_MSI))
6943                 return 0;
6944
6945         /* Turn off SERR reporting in case MSI terminates with Master
6946          * Abort.
6947          */
6948         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
6949         pci_write_config_word(tp->pdev, PCI_COMMAND,
6950                               pci_cmd & ~PCI_COMMAND_SERR);
6951
6952         err = tg3_test_interrupt(tp);
6953
6954         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
6955
6956         if (!err)
6957                 return 0;
6958
6959         /* other failures */
6960         if (err != -EIO)
6961                 return err;
6962
6963         /* MSI test failed, go back to INTx mode */
6964         printk(KERN_WARNING PFX "%s: No interrupt was generated using MSI, "
6965                "switching to INTx mode. Please report this failure to "
6966                "the PCI maintainer and include system chipset information.\n",
6967                        tp->dev->name);
6968
6969         free_irq(tp->pdev->irq, dev);
6970         pci_disable_msi(tp->pdev);
6971
6972         tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
6973
6974         err = tg3_request_irq(tp);
6975         if (err)
6976                 return err;
6977
6978         /* Need to reset the chip because the MSI cycle may have terminated
6979          * with Master Abort.
6980          */
6981         tg3_full_lock(tp, 1);
6982
6983         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
6984         err = tg3_init_hw(tp, 1);
6985
6986         tg3_full_unlock(tp);
6987
6988         if (err)
6989                 free_irq(tp->pdev->irq, dev);
6990
6991         return err;
6992 }
6993
6994 static int tg3_open(struct net_device *dev)
6995 {
6996         struct tg3 *tp = netdev_priv(dev);
6997         int err;
6998
6999         netif_carrier_off(tp->dev);
7000
7001         tg3_full_lock(tp, 0);
7002
7003         err = tg3_set_power_state(tp, PCI_D0);
7004         if (err) {
7005                 tg3_full_unlock(tp);
7006                 return err;
7007         }
7008
7009         tg3_disable_ints(tp);
7010         tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
7011
7012         tg3_full_unlock(tp);
7013
7014         /* The placement of this call is tied
7015          * to the setup and use of Host TX descriptors.
7016          */
7017         err = tg3_alloc_consistent(tp);
7018         if (err)
7019                 return err;
7020
7021         if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
7022             (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5750_AX) &&
7023             (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5750_BX) &&
7024             !((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) &&
7025               (tp->pdev_peer == tp->pdev))) {
7026                 /* All MSI supporting chips should support tagged
7027                  * status.  Assert that this is the case.
7028                  */
7029                 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) {
7030                         printk(KERN_WARNING PFX "%s: MSI without TAGGED? "
7031                                "Not using MSI.\n", tp->dev->name);
7032                 } else if (pci_enable_msi(tp->pdev) == 0) {
7033                         u32 msi_mode;
7034
7035                         msi_mode = tr32(MSGINT_MODE);
7036                         tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
7037                         tp->tg3_flags2 |= TG3_FLG2_USING_MSI;
7038                 }
7039         }
7040         err = tg3_request_irq(tp);
7041
7042         if (err) {
7043                 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7044                         pci_disable_msi(tp->pdev);
7045                         tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
7046                 }
7047                 tg3_free_consistent(tp);
7048                 return err;
7049         }
7050
7051         tg3_full_lock(tp, 0);
7052
7053         err = tg3_init_hw(tp, 1);
7054         if (err) {
7055                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
7056                 tg3_free_rings(tp);
7057         } else {
7058                 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
7059                         tp->timer_offset = HZ;
7060                 else
7061                         tp->timer_offset = HZ / 10;
7062
7063                 BUG_ON(tp->timer_offset > HZ);
7064                 tp->timer_counter = tp->timer_multiplier =
7065                         (HZ / tp->timer_offset);
7066                 tp->asf_counter = tp->asf_multiplier =
7067                         ((HZ / tp->timer_offset) * 2);
7068
7069                 init_timer(&tp->timer);
7070                 tp->timer.expires = jiffies + tp->timer_offset;
7071                 tp->timer.data = (unsigned long) tp;
7072                 tp->timer.function = tg3_timer;
7073         }
7074
7075         tg3_full_unlock(tp);
7076
7077         if (err) {
7078                 free_irq(tp->pdev->irq, dev);
7079                 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7080                         pci_disable_msi(tp->pdev);
7081                         tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
7082                 }
7083                 tg3_free_consistent(tp);
7084                 return err;
7085         }
7086
7087         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7088                 err = tg3_test_msi(tp);
7089
7090                 if (err) {
7091                         tg3_full_lock(tp, 0);
7092
7093                         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7094                                 pci_disable_msi(tp->pdev);
7095                                 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
7096                         }
7097                         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
7098                         tg3_free_rings(tp);
7099                         tg3_free_consistent(tp);
7100
7101                         tg3_full_unlock(tp);
7102
7103                         return err;
7104                 }
7105
7106                 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7107                         if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI) {
7108                                 u32 val = tr32(PCIE_TRANSACTION_CFG);
7109
7110                                 tw32(PCIE_TRANSACTION_CFG,
7111                                      val | PCIE_TRANS_CFG_1SHOT_MSI);
7112                         }
7113                 }
7114         }
7115
7116         tg3_full_lock(tp, 0);
7117
7118         add_timer(&tp->timer);
7119         tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
7120         tg3_enable_ints(tp);
7121
7122         tg3_full_unlock(tp);
7123
7124         netif_start_queue(dev);
7125
7126         return 0;
7127 }
7128
7129 #if 0
7130 /*static*/ void tg3_dump_state(struct tg3 *tp)
7131 {
7132         u32 val32, val32_2, val32_3, val32_4, val32_5;
7133         u16 val16;
7134         int i;
7135
7136         pci_read_config_word(tp->pdev, PCI_STATUS, &val16);
7137         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE, &val32);
7138         printk("DEBUG: PCI status [%04x] TG3PCI state[%08x]\n",
7139                val16, val32);
7140
7141         /* MAC block */
7142         printk("DEBUG: MAC_MODE[%08x] MAC_STATUS[%08x]\n",
7143                tr32(MAC_MODE), tr32(MAC_STATUS));
7144         printk("       MAC_EVENT[%08x] MAC_LED_CTRL[%08x]\n",
7145                tr32(MAC_EVENT), tr32(MAC_LED_CTRL));
7146         printk("DEBUG: MAC_TX_MODE[%08x] MAC_TX_STATUS[%08x]\n",
7147                tr32(MAC_TX_MODE), tr32(MAC_TX_STATUS));
7148         printk("       MAC_RX_MODE[%08x] MAC_RX_STATUS[%08x]\n",
7149                tr32(MAC_RX_MODE), tr32(MAC_RX_STATUS));
7150
7151         /* Send data initiator control block */
7152         printk("DEBUG: SNDDATAI_MODE[%08x] SNDDATAI_STATUS[%08x]\n",
7153                tr32(SNDDATAI_MODE), tr32(SNDDATAI_STATUS));
7154         printk("       SNDDATAI_STATSCTRL[%08x]\n",
7155                tr32(SNDDATAI_STATSCTRL));
7156
7157         /* Send data completion control block */
7158         printk("DEBUG: SNDDATAC_MODE[%08x]\n", tr32(SNDDATAC_MODE));
7159
7160         /* Send BD ring selector block */
7161         printk("DEBUG: SNDBDS_MODE[%08x] SNDBDS_STATUS[%08x]\n",
7162                tr32(SNDBDS_MODE), tr32(SNDBDS_STATUS));
7163
7164         /* Send BD initiator control block */
7165         printk("DEBUG: SNDBDI_MODE[%08x] SNDBDI_STATUS[%08x]\n",
7166                tr32(SNDBDI_MODE), tr32(SNDBDI_STATUS));
7167
7168         /* Send BD completion control block */
7169         printk("DEBUG: SNDBDC_MODE[%08x]\n", tr32(SNDBDC_MODE));
7170
7171         /* Receive list placement control block */
7172         printk("DEBUG: RCVLPC_MODE[%08x] RCVLPC_STATUS[%08x]\n",
7173                tr32(RCVLPC_MODE), tr32(RCVLPC_STATUS));
7174         printk("       RCVLPC_STATSCTRL[%08x]\n",
7175                tr32(RCVLPC_STATSCTRL));
7176
7177         /* Receive data and receive BD initiator control block */
7178         printk("DEBUG: RCVDBDI_MODE[%08x] RCVDBDI_STATUS[%08x]\n",
7179                tr32(RCVDBDI_MODE), tr32(RCVDBDI_STATUS));
7180
7181         /* Receive data completion control block */
7182         printk("DEBUG: RCVDCC_MODE[%08x]\n",
7183                tr32(RCVDCC_MODE));
7184
7185         /* Receive BD initiator control block */
7186         printk("DEBUG: RCVBDI_MODE[%08x] RCVBDI_STATUS[%08x]\n",
7187                tr32(RCVBDI_MODE), tr32(RCVBDI_STATUS));
7188
7189         /* Receive BD completion control block */
7190         printk("DEBUG: RCVCC_MODE[%08x] RCVCC_STATUS[%08x]\n",
7191                tr32(RCVCC_MODE), tr32(RCVCC_STATUS));
7192
7193         /* Receive list selector control block */
7194         printk("DEBUG: RCVLSC_MODE[%08x] RCVLSC_STATUS[%08x]\n",
7195                tr32(RCVLSC_MODE), tr32(RCVLSC_STATUS));
7196
7197         /* Mbuf cluster free block */
7198         printk("DEBUG: MBFREE_MODE[%08x] MBFREE_STATUS[%08x]\n",
7199                tr32(MBFREE_MODE), tr32(MBFREE_STATUS));
7200
7201         /* Host coalescing control block */
7202         printk("DEBUG: HOSTCC_MODE[%08x] HOSTCC_STATUS[%08x]\n",
7203                tr32(HOSTCC_MODE), tr32(HOSTCC_STATUS));
7204         printk("DEBUG: HOSTCC_STATS_BLK_HOST_ADDR[%08x%08x]\n",
7205                tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
7206                tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
7207         printk("DEBUG: HOSTCC_STATUS_BLK_HOST_ADDR[%08x%08x]\n",
7208                tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
7209                tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
7210         printk("DEBUG: HOSTCC_STATS_BLK_NIC_ADDR[%08x]\n",
7211                tr32(HOSTCC_STATS_BLK_NIC_ADDR));
7212         printk("DEBUG: HOSTCC_STATUS_BLK_NIC_ADDR[%08x]\n",
7213                tr32(HOSTCC_STATUS_BLK_NIC_ADDR));
7214
7215         /* Memory arbiter control block */
7216         printk("DEBUG: MEMARB_MODE[%08x] MEMARB_STATUS[%08x]\n",
7217                tr32(MEMARB_MODE), tr32(MEMARB_STATUS));
7218
7219         /* Buffer manager control block */
7220         printk("DEBUG: BUFMGR_MODE[%08x] BUFMGR_STATUS[%08x]\n",
7221                tr32(BUFMGR_MODE), tr32(BUFMGR_STATUS));
7222         printk("DEBUG: BUFMGR_MB_POOL_ADDR[%08x] BUFMGR_MB_POOL_SIZE[%08x]\n",
7223                tr32(BUFMGR_MB_POOL_ADDR), tr32(BUFMGR_MB_POOL_SIZE));
7224         printk("DEBUG: BUFMGR_DMA_DESC_POOL_ADDR[%08x] "
7225                "BUFMGR_DMA_DESC_POOL_SIZE[%08x]\n",
7226                tr32(BUFMGR_DMA_DESC_POOL_ADDR),
7227                tr32(BUFMGR_DMA_DESC_POOL_SIZE));
7228
7229         /* Read DMA control block */
7230         printk("DEBUG: RDMAC_MODE[%08x] RDMAC_STATUS[%08x]\n",
7231                tr32(RDMAC_MODE), tr32(RDMAC_STATUS));
7232
7233         /* Write DMA control block */
7234         printk("DEBUG: WDMAC_MODE[%08x] WDMAC_STATUS[%08x]\n",
7235                tr32(WDMAC_MODE), tr32(WDMAC_STATUS));
7236
7237         /* DMA completion block */
7238         printk("DEBUG: DMAC_MODE[%08x]\n",
7239                tr32(DMAC_MODE));
7240
7241         /* GRC block */
7242         printk("DEBUG: GRC_MODE[%08x] GRC_MISC_CFG[%08x]\n",
7243                tr32(GRC_MODE), tr32(GRC_MISC_CFG));
7244         printk("DEBUG: GRC_LOCAL_CTRL[%08x]\n",
7245                tr32(GRC_LOCAL_CTRL));
7246
7247         /* TG3_BDINFOs */
7248         printk("DEBUG: RCVDBDI_JUMBO_BD[%08x%08x:%08x:%08x]\n",
7249                tr32(RCVDBDI_JUMBO_BD + 0x0),
7250                tr32(RCVDBDI_JUMBO_BD + 0x4),
7251                tr32(RCVDBDI_JUMBO_BD + 0x8),
7252                tr32(RCVDBDI_JUMBO_BD + 0xc));
7253         printk("DEBUG: RCVDBDI_STD_BD[%08x%08x:%08x:%08x]\n",
7254                tr32(RCVDBDI_STD_BD + 0x0),
7255                tr32(RCVDBDI_STD_BD + 0x4),
7256                tr32(RCVDBDI_STD_BD + 0x8),
7257                tr32(RCVDBDI_STD_BD + 0xc));
7258         printk("DEBUG: RCVDBDI_MINI_BD[%08x%08x:%08x:%08x]\n",
7259                tr32(RCVDBDI_MINI_BD + 0x0),
7260                tr32(RCVDBDI_MINI_BD + 0x4),
7261                tr32(RCVDBDI_MINI_BD + 0x8),
7262                tr32(RCVDBDI_MINI_BD + 0xc));
7263
7264         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x0, &val32);
7265         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x4, &val32_2);
7266         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x8, &val32_3);
7267         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0xc, &val32_4);
7268         printk("DEBUG: SRAM_SEND_RCB_0[%08x%08x:%08x:%08x]\n",
7269                val32, val32_2, val32_3, val32_4);
7270
7271         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x0, &val32);
7272         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x4, &val32_2);
7273         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x8, &val32_3);
7274         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0xc, &val32_4);
7275         printk("DEBUG: SRAM_RCV_RET_RCB_0[%08x%08x:%08x:%08x]\n",
7276                val32, val32_2, val32_3, val32_4);
7277
7278         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x0, &val32);
7279         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x4, &val32_2);
7280         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x8, &val32_3);
7281         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0xc, &val32_4);
7282         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x10, &val32_5);
7283         printk("DEBUG: SRAM_STATUS_BLK[%08x:%08x:%08x:%08x:%08x]\n",
7284                val32, val32_2, val32_3, val32_4, val32_5);
7285
7286         /* SW status block */
7287         printk("DEBUG: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
7288                tp->hw_status->status,
7289                tp->hw_status->status_tag,
7290                tp->hw_status->rx_jumbo_consumer,
7291                tp->hw_status->rx_consumer,
7292                tp->hw_status->rx_mini_consumer,
7293                tp->hw_status->idx[0].rx_producer,
7294                tp->hw_status->idx[0].tx_consumer);
7295
7296         /* SW statistics block */
7297         printk("DEBUG: Host statistics block [%08x:%08x:%08x:%08x]\n",
7298                ((u32 *)tp->hw_stats)[0],
7299                ((u32 *)tp->hw_stats)[1],
7300                ((u32 *)tp->hw_stats)[2],
7301                ((u32 *)tp->hw_stats)[3]);
7302
7303         /* Mailboxes */
7304         printk("DEBUG: SNDHOST_PROD[%08x%08x] SNDNIC_PROD[%08x%08x]\n",
7305                tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + 0x0),
7306                tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + 0x4),
7307                tr32_mailbox(MAILBOX_SNDNIC_PROD_IDX_0 + 0x0),
7308                tr32_mailbox(MAILBOX_SNDNIC_PROD_IDX_0 + 0x4));
7309
7310         /* NIC side send descriptors. */
7311         for (i = 0; i < 6; i++) {
7312                 unsigned long txd;
7313
7314                 txd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_TX_BUFFER_DESC
7315                         + (i * sizeof(struct tg3_tx_buffer_desc));
7316                 printk("DEBUG: NIC TXD(%d)[%08x:%08x:%08x:%08x]\n",
7317                        i,
7318                        readl(txd + 0x0), readl(txd + 0x4),
7319                        readl(txd + 0x8), readl(txd + 0xc));
7320         }
7321
7322         /* NIC side RX descriptors. */
7323         for (i = 0; i < 6; i++) {
7324                 unsigned long rxd;
7325
7326                 rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_BUFFER_DESC
7327                         + (i * sizeof(struct tg3_rx_buffer_desc));
7328                 printk("DEBUG: NIC RXD_STD(%d)[0][%08x:%08x:%08x:%08x]\n",
7329                        i,
7330                        readl(rxd + 0x0), readl(rxd + 0x4),
7331                        readl(rxd + 0x8), readl(rxd + 0xc));
7332                 rxd += (4 * sizeof(u32));
7333                 printk("DEBUG: NIC RXD_STD(%d)[1][%08x:%08x:%08x:%08x]\n",
7334                        i,
7335                        readl(rxd + 0x0), readl(rxd + 0x4),
7336                        readl(rxd + 0x8), readl(rxd + 0xc));
7337         }
7338
7339         for (i = 0; i < 6; i++) {
7340                 unsigned long rxd;
7341
7342                 rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_JUMBO_BUFFER_DESC
7343                         + (i * sizeof(struct tg3_rx_buffer_desc));
7344                 printk("DEBUG: NIC RXD_JUMBO(%d)[0][%08x:%08x:%08x:%08x]\n",
7345                        i,
7346                        readl(rxd + 0x0), readl(rxd + 0x4),
7347                        readl(rxd + 0x8), readl(rxd + 0xc));
7348                 rxd += (4 * sizeof(u32));
7349                 printk("DEBUG: NIC RXD_JUMBO(%d)[1][%08x:%08x:%08x:%08x]\n",
7350                        i,
7351                        readl(rxd + 0x0), readl(rxd + 0x4),
7352                        readl(rxd + 0x8), readl(rxd + 0xc));
7353         }
7354 }
7355 #endif
7356
7357 static struct net_device_stats *tg3_get_stats(struct net_device *);
7358 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *);
7359
7360 static int tg3_close(struct net_device *dev)
7361 {
7362         struct tg3 *tp = netdev_priv(dev);
7363
7364         /* Calling flush_scheduled_work() may deadlock because
7365          * linkwatch_event() may be on the workqueue and it will try to get
7366          * the rtnl_lock which we are holding.
7367          */
7368         while (tp->tg3_flags & TG3_FLAG_IN_RESET_TASK)
7369                 msleep(1);
7370
7371         netif_stop_queue(dev);
7372
7373         del_timer_sync(&tp->timer);
7374
7375         tg3_full_lock(tp, 1);
7376 #if 0
7377         tg3_dump_state(tp);
7378 #endif
7379
7380         tg3_disable_ints(tp);
7381
7382         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
7383         tg3_free_rings(tp);
7384         tp->tg3_flags &=
7385                 ~(TG3_FLAG_INIT_COMPLETE |
7386                   TG3_FLAG_GOT_SERDES_FLOWCTL);
7387
7388         tg3_full_unlock(tp);
7389
7390         free_irq(tp->pdev->irq, dev);
7391         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7392                 pci_disable_msi(tp->pdev);
7393                 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
7394         }
7395
7396         memcpy(&tp->net_stats_prev, tg3_get_stats(tp->dev),
7397                sizeof(tp->net_stats_prev));
7398         memcpy(&tp->estats_prev, tg3_get_estats(tp),
7399                sizeof(tp->estats_prev));
7400
7401         tg3_free_consistent(tp);
7402
7403         tg3_set_power_state(tp, PCI_D3hot);
7404
7405         netif_carrier_off(tp->dev);
7406
7407         return 0;
7408 }
7409
7410 static inline unsigned long get_stat64(tg3_stat64_t *val)
7411 {
7412         unsigned long ret;
7413
7414 #if (BITS_PER_LONG == 32)
7415         ret = val->low;
7416 #else
7417         ret = ((u64)val->high << 32) | ((u64)val->low);
7418 #endif
7419         return ret;
7420 }
7421
7422 static unsigned long calc_crc_errors(struct tg3 *tp)
7423 {
7424         struct tg3_hw_stats *hw_stats = tp->hw_stats;
7425
7426         if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
7427             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
7428              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
7429                 u32 val;
7430
7431                 spin_lock_bh(&tp->lock);
7432                 if (!tg3_readphy(tp, 0x1e, &val)) {
7433                         tg3_writephy(tp, 0x1e, val | 0x8000);
7434                         tg3_readphy(tp, 0x14, &val);
7435                 } else
7436                         val = 0;
7437                 spin_unlock_bh(&tp->lock);
7438
7439                 tp->phy_crc_errors += val;
7440
7441                 return tp->phy_crc_errors;
7442         }
7443
7444         return get_stat64(&hw_stats->rx_fcs_errors);
7445 }
7446
7447 #define ESTAT_ADD(member) \
7448         estats->member =        old_estats->member + \
7449                                 get_stat64(&hw_stats->member)
7450
7451 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *tp)
7452 {
7453         struct tg3_ethtool_stats *estats = &tp->estats;
7454         struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
7455         struct tg3_hw_stats *hw_stats = tp->hw_stats;
7456
7457         if (!hw_stats)
7458                 return old_estats;
7459
7460         ESTAT_ADD(rx_octets);
7461         ESTAT_ADD(rx_fragments);
7462         ESTAT_ADD(rx_ucast_packets);
7463         ESTAT_ADD(rx_mcast_packets);
7464         ESTAT_ADD(rx_bcast_packets);
7465         ESTAT_ADD(rx_fcs_errors);
7466         ESTAT_ADD(rx_align_errors);
7467         ESTAT_ADD(rx_xon_pause_rcvd);
7468         ESTAT_ADD(rx_xoff_pause_rcvd);
7469         ESTAT_ADD(rx_mac_ctrl_rcvd);
7470         ESTAT_ADD(rx_xoff_entered);
7471         ESTAT_ADD(rx_frame_too_long_errors);
7472         ESTAT_ADD(rx_jabbers);
7473         ESTAT_ADD(rx_undersize_packets);
7474         ESTAT_ADD(rx_in_length_errors);
7475         ESTAT_ADD(rx_out_length_errors);
7476         ESTAT_ADD(rx_64_or_less_octet_packets);
7477         ESTAT_ADD(rx_65_to_127_octet_packets);
7478         ESTAT_ADD(rx_128_to_255_octet_packets);
7479         ESTAT_ADD(rx_256_to_511_octet_packets);
7480         ESTAT_ADD(rx_512_to_1023_octet_packets);
7481         ESTAT_ADD(rx_1024_to_1522_octet_packets);
7482         ESTAT_ADD(rx_1523_to_2047_octet_packets);
7483         ESTAT_ADD(rx_2048_to_4095_octet_packets);
7484         ESTAT_ADD(rx_4096_to_8191_octet_packets);
7485         ESTAT_ADD(rx_8192_to_9022_octet_packets);
7486
7487         ESTAT_ADD(tx_octets);
7488         ESTAT_ADD(tx_collisions);
7489         ESTAT_ADD(tx_xon_sent);
7490         ESTAT_ADD(tx_xoff_sent);
7491         ESTAT_ADD(tx_flow_control);
7492         ESTAT_ADD(tx_mac_errors);
7493         ESTAT_ADD(tx_single_collisions);
7494         ESTAT_ADD(tx_mult_collisions);
7495         ESTAT_ADD(tx_deferred);
7496         ESTAT_ADD(tx_excessive_collisions);
7497         ESTAT_ADD(tx_late_collisions);
7498         ESTAT_ADD(tx_collide_2times);
7499         ESTAT_ADD(tx_collide_3times);
7500         ESTAT_ADD(tx_collide_4times);
7501         ESTAT_ADD(tx_collide_5times);
7502         ESTAT_ADD(tx_collide_6times);
7503         ESTAT_ADD(tx_collide_7times);
7504         ESTAT_ADD(tx_collide_8times);
7505         ESTAT_ADD(tx_collide_9times);
7506         ESTAT_ADD(tx_collide_10times);
7507         ESTAT_ADD(tx_collide_11times);
7508         ESTAT_ADD(tx_collide_12times);
7509         ESTAT_ADD(tx_collide_13times);
7510         ESTAT_ADD(tx_collide_14times);
7511         ESTAT_ADD(tx_collide_15times);
7512         ESTAT_ADD(tx_ucast_packets);
7513         ESTAT_ADD(tx_mcast_packets);
7514         ESTAT_ADD(tx_bcast_packets);
7515         ESTAT_ADD(tx_carrier_sense_errors);
7516         ESTAT_ADD(tx_discards);
7517         ESTAT_ADD(tx_errors);
7518
7519         ESTAT_ADD(dma_writeq_full);
7520         ESTAT_ADD(dma_write_prioq_full);
7521         ESTAT_ADD(rxbds_empty);
7522         ESTAT_ADD(rx_discards);
7523         ESTAT_ADD(rx_errors);
7524         ESTAT_ADD(rx_threshold_hit);
7525
7526         ESTAT_ADD(dma_readq_full);
7527         ESTAT_ADD(dma_read_prioq_full);
7528         ESTAT_ADD(tx_comp_queue_full);
7529
7530         ESTAT_ADD(ring_set_send_prod_index);
7531         ESTAT_ADD(ring_status_update);
7532         ESTAT_ADD(nic_irqs);
7533         ESTAT_ADD(nic_avoided_irqs);
7534         ESTAT_ADD(nic_tx_threshold_hit);
7535
7536         return estats;
7537 }
7538
7539 static struct net_device_stats *tg3_get_stats(struct net_device *dev)
7540 {
7541         struct tg3 *tp = netdev_priv(dev);
7542         struct net_device_stats *stats = &tp->net_stats;
7543         struct net_device_stats *old_stats = &tp->net_stats_prev;
7544         struct tg3_hw_stats *hw_stats = tp->hw_stats;
7545
7546         if (!hw_stats)
7547                 return old_stats;
7548
7549         stats->rx_packets = old_stats->rx_packets +
7550                 get_stat64(&hw_stats->rx_ucast_packets) +
7551                 get_stat64(&hw_stats->rx_mcast_packets) +
7552                 get_stat64(&hw_stats->rx_bcast_packets);
7553
7554         stats->tx_packets = old_stats->tx_packets +
7555                 get_stat64(&hw_stats->tx_ucast_packets) +
7556                 get_stat64(&hw_stats->tx_mcast_packets) +
7557                 get_stat64(&hw_stats->tx_bcast_packets);
7558
7559         stats->rx_bytes = old_stats->rx_bytes +
7560                 get_stat64(&hw_stats->rx_octets);
7561         stats->tx_bytes = old_stats->tx_bytes +
7562                 get_stat64(&hw_stats->tx_octets);
7563
7564         stats->rx_errors = old_stats->rx_errors +
7565                 get_stat64(&hw_stats->rx_errors);
7566         stats->tx_errors = old_stats->tx_errors +
7567                 get_stat64(&hw_stats->tx_errors) +
7568                 get_stat64(&hw_stats->tx_mac_errors) +
7569                 get_stat64(&hw_stats->tx_carrier_sense_errors) +
7570                 get_stat64(&hw_stats->tx_discards);
7571
7572         stats->multicast = old_stats->multicast +
7573                 get_stat64(&hw_stats->rx_mcast_packets);
7574         stats->collisions = old_stats->collisions +
7575                 get_stat64(&hw_stats->tx_collisions);
7576
7577         stats->rx_length_errors = old_stats->rx_length_errors +
7578                 get_stat64(&hw_stats->rx_frame_too_long_errors) +
7579                 get_stat64(&hw_stats->rx_undersize_packets);
7580
7581         stats->rx_over_errors = old_stats->rx_over_errors +
7582                 get_stat64(&hw_stats->rxbds_empty);
7583         stats->rx_frame_errors = old_stats->rx_frame_errors +
7584                 get_stat64(&hw_stats->rx_align_errors);
7585         stats->tx_aborted_errors = old_stats->tx_aborted_errors +
7586                 get_stat64(&hw_stats->tx_discards);
7587         stats->tx_carrier_errors = old_stats->tx_carrier_errors +
7588                 get_stat64(&hw_stats->tx_carrier_sense_errors);
7589
7590         stats->rx_crc_errors = old_stats->rx_crc_errors +
7591                 calc_crc_errors(tp);
7592
7593         stats->rx_missed_errors = old_stats->rx_missed_errors +
7594                 get_stat64(&hw_stats->rx_discards);
7595
7596         return stats;
7597 }
7598
7599 static inline u32 calc_crc(unsigned char *buf, int len)
7600 {
7601         u32 reg;
7602         u32 tmp;
7603         int j, k;
7604
7605         reg = 0xffffffff;
7606
7607         for (j = 0; j < len; j++) {
7608                 reg ^= buf[j];
7609
7610                 for (k = 0; k < 8; k++) {
7611                         tmp = reg & 0x01;
7612
7613                         reg >>= 1;
7614
7615                         if (tmp) {
7616                                 reg ^= 0xedb88320;
7617                         }
7618                 }
7619         }
7620
7621         return ~reg;
7622 }
7623
7624 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
7625 {
7626         /* accept or reject all multicast frames */
7627         tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
7628         tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
7629         tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
7630         tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
7631 }
7632
7633 static void __tg3_set_rx_mode(struct net_device *dev)
7634 {
7635         struct tg3 *tp = netdev_priv(dev);
7636         u32 rx_mode;
7637
7638         rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
7639                                   RX_MODE_KEEP_VLAN_TAG);
7640
7641         /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
7642          * flag clear.
7643          */
7644 #if TG3_VLAN_TAG_USED
7645         if (!tp->vlgrp &&
7646             !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
7647                 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
7648 #else
7649         /* By definition, VLAN is disabled always in this
7650          * case.
7651          */
7652         if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
7653                 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
7654 #endif
7655
7656         if (dev->flags & IFF_PROMISC) {
7657                 /* Promiscuous mode. */
7658                 rx_mode |= RX_MODE_PROMISC;
7659         } else if (dev->flags & IFF_ALLMULTI) {
7660                 /* Accept all multicast. */
7661                 tg3_set_multi (tp, 1);
7662         } else if (dev->mc_count < 1) {
7663                 /* Reject all multicast. */
7664                 tg3_set_multi (tp, 0);
7665         } else {
7666                 /* Accept one or more multicast(s). */
7667                 struct dev_mc_list *mclist;
7668                 unsigned int i;
7669                 u32 mc_filter[4] = { 0, };
7670                 u32 regidx;
7671                 u32 bit;
7672                 u32 crc;
7673
7674                 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
7675                      i++, mclist = mclist->next) {
7676
7677                         crc = calc_crc (mclist->dmi_addr, ETH_ALEN);
7678                         bit = ~crc & 0x7f;
7679                         regidx = (bit & 0x60) >> 5;
7680                         bit &= 0x1f;
7681                         mc_filter[regidx] |= (1 << bit);
7682                 }
7683
7684                 tw32(MAC_HASH_REG_0, mc_filter[0]);
7685                 tw32(MAC_HASH_REG_1, mc_filter[1]);
7686                 tw32(MAC_HASH_REG_2, mc_filter[2]);
7687                 tw32(MAC_HASH_REG_3, mc_filter[3]);
7688         }
7689
7690         if (rx_mode != tp->rx_mode) {
7691                 tp->rx_mode = rx_mode;
7692                 tw32_f(MAC_RX_MODE, rx_mode);
7693                 udelay(10);
7694         }
7695 }
7696
7697 static void tg3_set_rx_mode(struct net_device *dev)
7698 {
7699         struct tg3 *tp = netdev_priv(dev);
7700
7701         if (!netif_running(dev))
7702                 return;
7703
7704         tg3_full_lock(tp, 0);
7705         __tg3_set_rx_mode(dev);
7706         tg3_full_unlock(tp);
7707 }
7708
7709 #define TG3_REGDUMP_LEN         (32 * 1024)
7710
7711 static int tg3_get_regs_len(struct net_device *dev)
7712 {
7713         return TG3_REGDUMP_LEN;
7714 }
7715
7716 static void tg3_get_regs(struct net_device *dev,
7717                 struct ethtool_regs *regs, void *_p)
7718 {
7719         u32 *p = _p;
7720         struct tg3 *tp = netdev_priv(dev);
7721         u8 *orig_p = _p;
7722         int i;
7723
7724         regs->version = 0;
7725
7726         memset(p, 0, TG3_REGDUMP_LEN);
7727
7728         if (tp->link_config.phy_is_low_power)
7729                 return;
7730
7731         tg3_full_lock(tp, 0);
7732
7733 #define __GET_REG32(reg)        (*(p)++ = tr32(reg))
7734 #define GET_REG32_LOOP(base,len)                \
7735 do {    p = (u32 *)(orig_p + (base));           \
7736         for (i = 0; i < len; i += 4)            \
7737                 __GET_REG32((base) + i);        \
7738 } while (0)
7739 #define GET_REG32_1(reg)                        \
7740 do {    p = (u32 *)(orig_p + (reg));            \
7741         __GET_REG32((reg));                     \
7742 } while (0)
7743
7744         GET_REG32_LOOP(TG3PCI_VENDOR, 0xb0);
7745         GET_REG32_LOOP(MAILBOX_INTERRUPT_0, 0x200);
7746         GET_REG32_LOOP(MAC_MODE, 0x4f0);
7747         GET_REG32_LOOP(SNDDATAI_MODE, 0xe0);
7748         GET_REG32_1(SNDDATAC_MODE);
7749         GET_REG32_LOOP(SNDBDS_MODE, 0x80);
7750         GET_REG32_LOOP(SNDBDI_MODE, 0x48);
7751         GET_REG32_1(SNDBDC_MODE);
7752         GET_REG32_LOOP(RCVLPC_MODE, 0x20);
7753         GET_REG32_LOOP(RCVLPC_SELLST_BASE, 0x15c);
7754         GET_REG32_LOOP(RCVDBDI_MODE, 0x0c);
7755         GET_REG32_LOOP(RCVDBDI_JUMBO_BD, 0x3c);
7756         GET_REG32_LOOP(RCVDBDI_BD_PROD_IDX_0, 0x44);
7757         GET_REG32_1(RCVDCC_MODE);
7758         GET_REG32_LOOP(RCVBDI_MODE, 0x20);
7759         GET_REG32_LOOP(RCVCC_MODE, 0x14);
7760         GET_REG32_LOOP(RCVLSC_MODE, 0x08);
7761         GET_REG32_1(MBFREE_MODE);
7762         GET_REG32_LOOP(HOSTCC_MODE, 0x100);
7763         GET_REG32_LOOP(MEMARB_MODE, 0x10);
7764         GET_REG32_LOOP(BUFMGR_MODE, 0x58);
7765         GET_REG32_LOOP(RDMAC_MODE, 0x08);
7766         GET_REG32_LOOP(WDMAC_MODE, 0x08);
7767         GET_REG32_1(RX_CPU_MODE);
7768         GET_REG32_1(RX_CPU_STATE);
7769         GET_REG32_1(RX_CPU_PGMCTR);
7770         GET_REG32_1(RX_CPU_HWBKPT);
7771         GET_REG32_1(TX_CPU_MODE);
7772         GET_REG32_1(TX_CPU_STATE);
7773         GET_REG32_1(TX_CPU_PGMCTR);
7774         GET_REG32_LOOP(GRCMBOX_INTERRUPT_0, 0x110);
7775         GET_REG32_LOOP(FTQ_RESET, 0x120);
7776         GET_REG32_LOOP(MSGINT_MODE, 0x0c);
7777         GET_REG32_1(DMAC_MODE);
7778         GET_REG32_LOOP(GRC_MODE, 0x4c);
7779         if (tp->tg3_flags & TG3_FLAG_NVRAM)
7780                 GET_REG32_LOOP(NVRAM_CMD, 0x24);
7781
7782 #undef __GET_REG32
7783 #undef GET_REG32_LOOP
7784 #undef GET_REG32_1
7785
7786         tg3_full_unlock(tp);
7787 }
7788
7789 static int tg3_get_eeprom_len(struct net_device *dev)
7790 {
7791         struct tg3 *tp = netdev_priv(dev);
7792
7793         return tp->nvram_size;
7794 }
7795
7796 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val);
7797 static int tg3_nvram_read_swab(struct tg3 *tp, u32 offset, u32 *val);
7798
7799 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
7800 {
7801         struct tg3 *tp = netdev_priv(dev);
7802         int ret;
7803         u8  *pd;
7804         u32 i, offset, len, val, b_offset, b_count;
7805
7806         if (tp->link_config.phy_is_low_power)
7807                 return -EAGAIN;
7808
7809         offset = eeprom->offset;
7810         len = eeprom->len;
7811         eeprom->len = 0;
7812
7813         eeprom->magic = TG3_EEPROM_MAGIC;
7814
7815         if (offset & 3) {
7816                 /* adjustments to start on required 4 byte boundary */
7817                 b_offset = offset & 3;
7818                 b_count = 4 - b_offset;
7819                 if (b_count > len) {
7820                         /* i.e. offset=1 len=2 */
7821                         b_count = len;
7822                 }
7823                 ret = tg3_nvram_read(tp, offset-b_offset, &val);
7824                 if (ret)
7825                         return ret;
7826                 val = cpu_to_le32(val);
7827                 memcpy(data, ((char*)&val) + b_offset, b_count);
7828                 len -= b_count;
7829                 offset += b_count;
7830                 eeprom->len += b_count;
7831         }
7832
7833         /* read bytes upto the last 4 byte boundary */
7834         pd = &data[eeprom->len];
7835         for (i = 0; i < (len - (len & 3)); i += 4) {
7836                 ret = tg3_nvram_read(tp, offset + i, &val);
7837                 if (ret) {
7838                         eeprom->len += i;
7839                         return ret;
7840                 }
7841                 val = cpu_to_le32(val);
7842                 memcpy(pd + i, &val, 4);
7843         }
7844         eeprom->len += i;
7845
7846         if (len & 3) {
7847                 /* read last bytes not ending on 4 byte boundary */
7848                 pd = &data[eeprom->len];
7849                 b_count = len & 3;
7850                 b_offset = offset + len - b_count;
7851                 ret = tg3_nvram_read(tp, b_offset, &val);
7852                 if (ret)
7853                         return ret;
7854                 val = cpu_to_le32(val);
7855                 memcpy(pd, ((char*)&val), b_count);
7856                 eeprom->len += b_count;
7857         }
7858         return 0;
7859 }
7860
7861 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf);
7862
7863 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
7864 {
7865         struct tg3 *tp = netdev_priv(dev);
7866         int ret;
7867         u32 offset, len, b_offset, odd_len, start, end;
7868         u8 *buf;
7869
7870         if (tp->link_config.phy_is_low_power)
7871                 return -EAGAIN;
7872
7873         if (eeprom->magic != TG3_EEPROM_MAGIC)
7874                 return -EINVAL;
7875
7876         offset = eeprom->offset;
7877         len = eeprom->len;
7878
7879         if ((b_offset = (offset & 3))) {
7880                 /* adjustments to start on required 4 byte boundary */
7881                 ret = tg3_nvram_read(tp, offset-b_offset, &start);
7882                 if (ret)
7883                         return ret;
7884                 start = cpu_to_le32(start);
7885                 len += b_offset;
7886                 offset &= ~3;
7887                 if (len < 4)
7888                         len = 4;
7889         }
7890
7891         odd_len = 0;
7892         if (len & 3) {
7893                 /* adjustments to end on required 4 byte boundary */
7894                 odd_len = 1;
7895                 len = (len + 3) & ~3;
7896                 ret = tg3_nvram_read(tp, offset+len-4, &end);
7897                 if (ret)
7898                         return ret;
7899                 end = cpu_to_le32(end);
7900         }
7901
7902         buf = data;
7903         if (b_offset || odd_len) {
7904                 buf = kmalloc(len, GFP_KERNEL);
7905                 if (buf == 0)
7906                         return -ENOMEM;
7907                 if (b_offset)
7908                         memcpy(buf, &start, 4);
7909                 if (odd_len)
7910                         memcpy(buf+len-4, &end, 4);
7911                 memcpy(buf + b_offset, data, eeprom->len);
7912         }
7913
7914         ret = tg3_nvram_write_block(tp, offset, len, buf);
7915
7916         if (buf != data)
7917                 kfree(buf);
7918
7919         return ret;
7920 }
7921
7922 static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
7923 {
7924         struct tg3 *tp = netdev_priv(dev);
7925
7926         cmd->supported = (SUPPORTED_Autoneg);
7927
7928         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
7929                 cmd->supported |= (SUPPORTED_1000baseT_Half |
7930                                    SUPPORTED_1000baseT_Full);
7931
7932         if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)) {
7933                 cmd->supported |= (SUPPORTED_100baseT_Half |
7934                                   SUPPORTED_100baseT_Full |
7935                                   SUPPORTED_10baseT_Half |
7936                                   SUPPORTED_10baseT_Full |
7937                                   SUPPORTED_MII);
7938                 cmd->port = PORT_TP;
7939         } else {
7940                 cmd->supported |= SUPPORTED_FIBRE;
7941                 cmd->port = PORT_FIBRE;
7942         }
7943
7944         cmd->advertising = tp->link_config.advertising;
7945         if (netif_running(dev)) {
7946                 cmd->speed = tp->link_config.active_speed;
7947                 cmd->duplex = tp->link_config.active_duplex;
7948         }
7949         cmd->phy_address = PHY_ADDR;
7950         cmd->transceiver = 0;
7951         cmd->autoneg = tp->link_config.autoneg;
7952         cmd->maxtxpkt = 0;
7953         cmd->maxrxpkt = 0;
7954         return 0;
7955 }
7956
7957 static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
7958 {
7959         struct tg3 *tp = netdev_priv(dev);
7960
7961         if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) {
7962                 /* These are the only valid advertisement bits allowed.  */
7963                 if (cmd->autoneg == AUTONEG_ENABLE &&
7964                     (cmd->advertising & ~(ADVERTISED_1000baseT_Half |
7965                                           ADVERTISED_1000baseT_Full |
7966                                           ADVERTISED_Autoneg |
7967                                           ADVERTISED_FIBRE)))
7968                         return -EINVAL;
7969                 /* Fiber can only do SPEED_1000.  */
7970                 else if ((cmd->autoneg != AUTONEG_ENABLE) &&
7971                          (cmd->speed != SPEED_1000))
7972                         return -EINVAL;
7973         /* Copper cannot force SPEED_1000.  */
7974         } else if ((cmd->autoneg != AUTONEG_ENABLE) &&
7975                    (cmd->speed == SPEED_1000))
7976                 return -EINVAL;
7977         else if ((cmd->speed == SPEED_1000) &&
7978                  (tp->tg3_flags2 & TG3_FLAG_10_100_ONLY))
7979                 return -EINVAL;
7980
7981         tg3_full_lock(tp, 0);
7982
7983         tp->link_config.autoneg = cmd->autoneg;
7984         if (cmd->autoneg == AUTONEG_ENABLE) {
7985                 tp->link_config.advertising = cmd->advertising;
7986                 tp->link_config.speed = SPEED_INVALID;
7987                 tp->link_config.duplex = DUPLEX_INVALID;
7988         } else {
7989                 tp->link_config.advertising = 0;
7990                 tp->link_config.speed = cmd->speed;
7991                 tp->link_config.duplex = cmd->duplex;
7992         }
7993
7994         tp->link_config.orig_speed = tp->link_config.speed;
7995         tp->link_config.orig_duplex = tp->link_config.duplex;
7996         tp->link_config.orig_autoneg = tp->link_config.autoneg;
7997
7998         if (netif_running(dev))
7999                 tg3_setup_phy(tp, 1);
8000
8001         tg3_full_unlock(tp);
8002
8003         return 0;
8004 }
8005
8006 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
8007 {
8008         struct tg3 *tp = netdev_priv(dev);
8009
8010         strcpy(info->driver, DRV_MODULE_NAME);
8011         strcpy(info->version, DRV_MODULE_VERSION);
8012         strcpy(info->fw_version, tp->fw_ver);
8013         strcpy(info->bus_info, pci_name(tp->pdev));
8014 }
8015
8016 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
8017 {
8018         struct tg3 *tp = netdev_priv(dev);
8019
8020         wol->supported = WAKE_MAGIC;
8021         wol->wolopts = 0;
8022         if (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)
8023                 wol->wolopts = WAKE_MAGIC;
8024         memset(&wol->sopass, 0, sizeof(wol->sopass));
8025 }
8026
8027 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
8028 {
8029         struct tg3 *tp = netdev_priv(dev);
8030
8031         if (wol->wolopts & ~WAKE_MAGIC)
8032                 return -EINVAL;
8033         if ((wol->wolopts & WAKE_MAGIC) &&
8034             tp->tg3_flags2 & TG3_FLG2_ANY_SERDES &&
8035             !(tp->tg3_flags & TG3_FLAG_SERDES_WOL_CAP))
8036                 return -EINVAL;
8037
8038         spin_lock_bh(&tp->lock);
8039         if (wol->wolopts & WAKE_MAGIC)
8040                 tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
8041         else
8042                 tp->tg3_flags &= ~TG3_FLAG_WOL_ENABLE;
8043         spin_unlock_bh(&tp->lock);
8044
8045         return 0;
8046 }
8047
8048 static u32 tg3_get_msglevel(struct net_device *dev)
8049 {
8050         struct tg3 *tp = netdev_priv(dev);
8051         return tp->msg_enable;
8052 }
8053
8054 static void tg3_set_msglevel(struct net_device *dev, u32 value)
8055 {
8056         struct tg3 *tp = netdev_priv(dev);
8057         tp->msg_enable = value;
8058 }
8059
8060 #if TG3_TSO_SUPPORT != 0
8061 static int tg3_set_tso(struct net_device *dev, u32 value)
8062 {
8063         struct tg3 *tp = netdev_priv(dev);
8064
8065         if (!(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
8066                 if (value)
8067                         return -EINVAL;
8068                 return 0;
8069         }
8070         if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_2) &&
8071             (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906)) {
8072                 if (value)
8073                         dev->features |= NETIF_F_TSO6;
8074                 else
8075                         dev->features &= ~NETIF_F_TSO6;
8076         }
8077         return ethtool_op_set_tso(dev, value);
8078 }
8079 #endif
8080
8081 static int tg3_nway_reset(struct net_device *dev)
8082 {
8083         struct tg3 *tp = netdev_priv(dev);
8084         u32 bmcr;
8085         int r;
8086
8087         if (!netif_running(dev))
8088                 return -EAGAIN;
8089
8090         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
8091                 return -EINVAL;
8092
8093         spin_lock_bh(&tp->lock);
8094         r = -EINVAL;
8095         tg3_readphy(tp, MII_BMCR, &bmcr);
8096         if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
8097             ((bmcr & BMCR_ANENABLE) ||
8098              (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT))) {
8099                 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
8100                                            BMCR_ANENABLE);
8101                 r = 0;
8102         }
8103         spin_unlock_bh(&tp->lock);
8104
8105         return r;
8106 }
8107
8108 static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
8109 {
8110         struct tg3 *tp = netdev_priv(dev);
8111
8112         ering->rx_max_pending = TG3_RX_RING_SIZE - 1;
8113         ering->rx_mini_max_pending = 0;
8114         if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE)
8115                 ering->rx_jumbo_max_pending = TG3_RX_JUMBO_RING_SIZE - 1;
8116         else
8117                 ering->rx_jumbo_max_pending = 0;
8118
8119         ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
8120
8121         ering->rx_pending = tp->rx_pending;
8122         ering->rx_mini_pending = 0;
8123         if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE)
8124                 ering->rx_jumbo_pending = tp->rx_jumbo_pending;
8125         else
8126                 ering->rx_jumbo_pending = 0;
8127
8128         ering->tx_pending = tp->tx_pending;
8129 }
8130
8131 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
8132 {
8133         struct tg3 *tp = netdev_priv(dev);
8134         int irq_sync = 0, err = 0;
8135
8136         if ((ering->rx_pending > TG3_RX_RING_SIZE - 1) ||
8137             (ering->rx_jumbo_pending > TG3_RX_JUMBO_RING_SIZE - 1) ||
8138             (ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
8139             (ering->tx_pending <= MAX_SKB_FRAGS) ||
8140             ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_1_BUG) &&
8141              (ering->tx_pending <= (MAX_SKB_FRAGS * 3))))
8142                 return -EINVAL;
8143
8144         if (netif_running(dev)) {
8145                 tg3_netif_stop(tp);
8146                 irq_sync = 1;
8147         }
8148
8149         tg3_full_lock(tp, irq_sync);
8150
8151         tp->rx_pending = ering->rx_pending;
8152
8153         if ((tp->tg3_flags2 & TG3_FLG2_MAX_RXPEND_64) &&
8154             tp->rx_pending > 63)
8155                 tp->rx_pending = 63;
8156         tp->rx_jumbo_pending = ering->rx_jumbo_pending;
8157         tp->tx_pending = ering->tx_pending;
8158
8159         if (netif_running(dev)) {
8160                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
8161                 err = tg3_restart_hw(tp, 1);
8162                 if (!err)
8163                         tg3_netif_start(tp);
8164         }
8165
8166         tg3_full_unlock(tp);
8167
8168         return err;
8169 }
8170
8171 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
8172 {
8173         struct tg3 *tp = netdev_priv(dev);
8174
8175         epause->autoneg = (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG) != 0;
8176         epause->rx_pause = (tp->tg3_flags & TG3_FLAG_RX_PAUSE) != 0;
8177         epause->tx_pause = (tp->tg3_flags & TG3_FLAG_TX_PAUSE) != 0;
8178 }
8179
8180 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
8181 {
8182         struct tg3 *tp = netdev_priv(dev);
8183         int irq_sync = 0, err = 0;
8184
8185         if (netif_running(dev)) {
8186                 tg3_netif_stop(tp);
8187                 irq_sync = 1;
8188         }
8189
8190         tg3_full_lock(tp, irq_sync);
8191
8192         if (epause->autoneg)
8193                 tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
8194         else
8195                 tp->tg3_flags &= ~TG3_FLAG_PAUSE_AUTONEG;
8196         if (epause->rx_pause)
8197                 tp->tg3_flags |= TG3_FLAG_RX_PAUSE;
8198         else
8199                 tp->tg3_flags &= ~TG3_FLAG_RX_PAUSE;
8200         if (epause->tx_pause)
8201                 tp->tg3_flags |= TG3_FLAG_TX_PAUSE;
8202         else
8203                 tp->tg3_flags &= ~TG3_FLAG_TX_PAUSE;
8204
8205         if (netif_running(dev)) {
8206                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
8207                 err = tg3_restart_hw(tp, 1);
8208                 if (!err)
8209                         tg3_netif_start(tp);
8210         }
8211
8212         tg3_full_unlock(tp);
8213
8214         return err;
8215 }
8216
8217 static u32 tg3_get_rx_csum(struct net_device *dev)
8218 {
8219         struct tg3 *tp = netdev_priv(dev);
8220         return (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0;
8221 }
8222
8223 static int tg3_set_rx_csum(struct net_device *dev, u32 data)
8224 {
8225         struct tg3 *tp = netdev_priv(dev);
8226
8227         if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
8228                 if (data != 0)
8229                         return -EINVAL;
8230                 return 0;
8231         }
8232
8233         spin_lock_bh(&tp->lock);
8234         if (data)
8235                 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
8236         else
8237                 tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
8238         spin_unlock_bh(&tp->lock);
8239
8240         return 0;
8241 }
8242
8243 static int tg3_set_tx_csum(struct net_device *dev, u32 data)
8244 {
8245         struct tg3 *tp = netdev_priv(dev);
8246
8247         if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
8248                 if (data != 0)
8249                         return -EINVAL;
8250                 return 0;
8251         }
8252
8253         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
8254             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
8255                 ethtool_op_set_tx_hw_csum(dev, data);
8256         else
8257                 ethtool_op_set_tx_csum(dev, data);
8258
8259         return 0;
8260 }
8261
8262 static int tg3_get_stats_count (struct net_device *dev)
8263 {
8264         return TG3_NUM_STATS;
8265 }
8266
8267 static int tg3_get_test_count (struct net_device *dev)
8268 {
8269         return TG3_NUM_TEST;
8270 }
8271
8272 static void tg3_get_strings (struct net_device *dev, u32 stringset, u8 *buf)
8273 {
8274         switch (stringset) {
8275         case ETH_SS_STATS:
8276                 memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
8277                 break;
8278         case ETH_SS_TEST:
8279                 memcpy(buf, &ethtool_test_keys, sizeof(ethtool_test_keys));
8280                 break;
8281         default:
8282                 WARN_ON(1);     /* we need a WARN() */
8283                 break;
8284         }
8285 }
8286
8287 static int tg3_phys_id(struct net_device *dev, u32 data)
8288 {
8289         struct tg3 *tp = netdev_priv(dev);
8290         int i;
8291
8292         if (!netif_running(tp->dev))
8293                 return -EAGAIN;
8294
8295         if (data == 0)
8296                 data = 2;
8297
8298         for (i = 0; i < (data * 2); i++) {
8299                 if ((i % 2) == 0)
8300                         tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
8301                                            LED_CTRL_1000MBPS_ON |
8302                                            LED_CTRL_100MBPS_ON |
8303                                            LED_CTRL_10MBPS_ON |
8304                                            LED_CTRL_TRAFFIC_OVERRIDE |
8305                                            LED_CTRL_TRAFFIC_BLINK |
8306                                            LED_CTRL_TRAFFIC_LED);
8307
8308                 else
8309                         tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
8310                                            LED_CTRL_TRAFFIC_OVERRIDE);
8311
8312                 if (msleep_interruptible(500))
8313                         break;
8314         }
8315         tw32(MAC_LED_CTRL, tp->led_ctrl);
8316         return 0;
8317 }
8318
8319 static void tg3_get_ethtool_stats (struct net_device *dev,
8320                                    struct ethtool_stats *estats, u64 *tmp_stats)
8321 {
8322         struct tg3 *tp = netdev_priv(dev);
8323         memcpy(tmp_stats, tg3_get_estats(tp), sizeof(tp->estats));
8324 }
8325
8326 #define NVRAM_TEST_SIZE 0x100
8327 #define NVRAM_SELFBOOT_FORMAT1_SIZE 0x14
8328 #define NVRAM_SELFBOOT_HW_SIZE 0x20
8329 #define NVRAM_SELFBOOT_DATA_SIZE 0x1c
8330
8331 static int tg3_test_nvram(struct tg3 *tp)
8332 {
8333         u32 *buf, csum, magic;
8334         int i, j, err = 0, size;
8335
8336         if (tg3_nvram_read_swab(tp, 0, &magic) != 0)
8337                 return -EIO;
8338
8339         if (magic == TG3_EEPROM_MAGIC)
8340                 size = NVRAM_TEST_SIZE;
8341         else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) {
8342                 if ((magic & 0xe00000) == 0x200000)
8343                         size = NVRAM_SELFBOOT_FORMAT1_SIZE;
8344                 else
8345                         return 0;
8346         } else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
8347                 size = NVRAM_SELFBOOT_HW_SIZE;
8348         else
8349                 return -EIO;
8350
8351         buf = kmalloc(size, GFP_KERNEL);
8352         if (buf == NULL)
8353                 return -ENOMEM;
8354
8355         err = -EIO;
8356         for (i = 0, j = 0; i < size; i += 4, j++) {
8357                 u32 val;
8358
8359                 if ((err = tg3_nvram_read(tp, i, &val)) != 0)
8360                         break;
8361                 buf[j] = cpu_to_le32(val);
8362         }
8363         if (i < size)
8364                 goto out;
8365
8366         /* Selfboot format */
8367         if ((cpu_to_be32(buf[0]) & TG3_EEPROM_MAGIC_FW_MSK) ==
8368             TG3_EEPROM_MAGIC_FW) {
8369                 u8 *buf8 = (u8 *) buf, csum8 = 0;
8370
8371                 for (i = 0; i < size; i++)
8372                         csum8 += buf8[i];
8373
8374                 if (csum8 == 0) {
8375                         err = 0;
8376                         goto out;
8377                 }
8378
8379                 err = -EIO;
8380                 goto out;
8381         }
8382
8383         if ((cpu_to_be32(buf[0]) & TG3_EEPROM_MAGIC_HW_MSK) ==
8384             TG3_EEPROM_MAGIC_HW) {
8385                 u8 data[NVRAM_SELFBOOT_DATA_SIZE];
8386                 u8 parity[NVRAM_SELFBOOT_DATA_SIZE];
8387                 u8 *buf8 = (u8 *) buf;
8388                 int j, k;
8389
8390                 /* Separate the parity bits and the data bytes.  */
8391                 for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) {
8392                         if ((i == 0) || (i == 8)) {
8393                                 int l;
8394                                 u8 msk;
8395
8396                                 for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1)
8397                                         parity[k++] = buf8[i] & msk;
8398                                 i++;
8399                         }
8400                         else if (i == 16) {
8401                                 int l;
8402                                 u8 msk;
8403
8404                                 for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1)
8405                                         parity[k++] = buf8[i] & msk;
8406                                 i++;
8407
8408                                 for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1)
8409                                         parity[k++] = buf8[i] & msk;
8410                                 i++;
8411                         }
8412                         data[j++] = buf8[i];
8413                 }
8414
8415                 err = -EIO;
8416                 for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) {
8417                         u8 hw8 = hweight8(data[i]);
8418
8419                         if ((hw8 & 0x1) && parity[i])
8420                                 goto out;
8421                         else if (!(hw8 & 0x1) && !parity[i])
8422                                 goto out;
8423                 }
8424                 err = 0;
8425                 goto out;
8426         }
8427
8428         /* Bootstrap checksum at offset 0x10 */
8429         csum = calc_crc((unsigned char *) buf, 0x10);
8430         if(csum != cpu_to_le32(buf[0x10/4]))
8431                 goto out;
8432
8433         /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
8434         csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
8435         if (csum != cpu_to_le32(buf[0xfc/4]))
8436                  goto out;
8437
8438         err = 0;
8439
8440 out:
8441         kfree(buf);
8442         return err;
8443 }
8444
8445 #define TG3_SERDES_TIMEOUT_SEC  2
8446 #define TG3_COPPER_TIMEOUT_SEC  6
8447
8448 static int tg3_test_link(struct tg3 *tp)
8449 {
8450         int i, max;
8451
8452         if (!netif_running(tp->dev))
8453                 return -ENODEV;
8454
8455         if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
8456                 max = TG3_SERDES_TIMEOUT_SEC;
8457         else
8458                 max = TG3_COPPER_TIMEOUT_SEC;
8459
8460         for (i = 0; i < max; i++) {
8461                 if (netif_carrier_ok(tp->dev))
8462                         return 0;
8463
8464                 if (msleep_interruptible(1000))
8465                         break;
8466         }
8467
8468         return -EIO;
8469 }
8470
8471 /* Only test the commonly used registers */
8472 static int tg3_test_registers(struct tg3 *tp)
8473 {
8474         int i, is_5705, is_5750;
8475         u32 offset, read_mask, write_mask, val, save_val, read_val;
8476         static struct {
8477                 u16 offset;
8478                 u16 flags;
8479 #define TG3_FL_5705     0x1
8480 #define TG3_FL_NOT_5705 0x2
8481 #define TG3_FL_NOT_5788 0x4
8482 #define TG3_FL_NOT_5750 0x8
8483                 u32 read_mask;
8484                 u32 write_mask;
8485         } reg_tbl[] = {
8486                 /* MAC Control Registers */
8487                 { MAC_MODE, TG3_FL_NOT_5705,
8488                         0x00000000, 0x00ef6f8c },
8489                 { MAC_MODE, TG3_FL_5705,
8490                         0x00000000, 0x01ef6b8c },
8491                 { MAC_STATUS, TG3_FL_NOT_5705,
8492                         0x03800107, 0x00000000 },
8493                 { MAC_STATUS, TG3_FL_5705,
8494                         0x03800100, 0x00000000 },
8495                 { MAC_ADDR_0_HIGH, 0x0000,
8496                         0x00000000, 0x0000ffff },
8497                 { MAC_ADDR_0_LOW, 0x0000,
8498                         0x00000000, 0xffffffff },
8499                 { MAC_RX_MTU_SIZE, 0x0000,
8500                         0x00000000, 0x0000ffff },
8501                 { MAC_TX_MODE, 0x0000,
8502                         0x00000000, 0x00000070 },
8503                 { MAC_TX_LENGTHS, 0x0000,
8504                         0x00000000, 0x00003fff },
8505                 { MAC_RX_MODE, TG3_FL_NOT_5705,
8506                         0x00000000, 0x000007fc },
8507                 { MAC_RX_MODE, TG3_FL_5705,
8508                         0x00000000, 0x000007dc },
8509                 { MAC_HASH_REG_0, 0x0000,
8510                         0x00000000, 0xffffffff },
8511                 { MAC_HASH_REG_1, 0x0000,
8512                         0x00000000, 0xffffffff },
8513                 { MAC_HASH_REG_2, 0x0000,
8514                         0x00000000, 0xffffffff },
8515                 { MAC_HASH_REG_3, 0x0000,
8516                         0x00000000, 0xffffffff },
8517
8518                 /* Receive Data and Receive BD Initiator Control Registers. */
8519                 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
8520                         0x00000000, 0xffffffff },
8521                 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
8522                         0x00000000, 0xffffffff },
8523                 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
8524                         0x00000000, 0x00000003 },
8525                 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
8526                         0x00000000, 0xffffffff },
8527                 { RCVDBDI_STD_BD+0, 0x0000,
8528                         0x00000000, 0xffffffff },
8529                 { RCVDBDI_STD_BD+4, 0x0000,
8530                         0x00000000, 0xffffffff },
8531                 { RCVDBDI_STD_BD+8, 0x0000,
8532                         0x00000000, 0xffff0002 },
8533                 { RCVDBDI_STD_BD+0xc, 0x0000,
8534                         0x00000000, 0xffffffff },
8535
8536                 /* Receive BD Initiator Control Registers. */
8537                 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
8538                         0x00000000, 0xffffffff },
8539                 { RCVBDI_STD_THRESH, TG3_FL_5705,
8540                         0x00000000, 0x000003ff },
8541                 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
8542                         0x00000000, 0xffffffff },
8543
8544                 /* Host Coalescing Control Registers. */
8545                 { HOSTCC_MODE, TG3_FL_NOT_5705,
8546                         0x00000000, 0x00000004 },
8547                 { HOSTCC_MODE, TG3_FL_5705,
8548                         0x00000000, 0x000000f6 },
8549                 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
8550                         0x00000000, 0xffffffff },
8551                 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
8552                         0x00000000, 0x000003ff },
8553                 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
8554                         0x00000000, 0xffffffff },
8555                 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
8556                         0x00000000, 0x000003ff },
8557                 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
8558                         0x00000000, 0xffffffff },
8559                 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
8560                         0x00000000, 0x000000ff },
8561                 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
8562                         0x00000000, 0xffffffff },
8563                 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
8564                         0x00000000, 0x000000ff },
8565                 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
8566                         0x00000000, 0xffffffff },
8567                 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
8568                         0x00000000, 0xffffffff },
8569                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
8570                         0x00000000, 0xffffffff },
8571                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
8572                         0x00000000, 0x000000ff },
8573                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
8574                         0x00000000, 0xffffffff },
8575                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
8576                         0x00000000, 0x000000ff },
8577                 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
8578                         0x00000000, 0xffffffff },
8579                 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
8580                         0x00000000, 0xffffffff },
8581                 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
8582                         0x00000000, 0xffffffff },
8583                 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
8584                         0x00000000, 0xffffffff },
8585                 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
8586                         0x00000000, 0xffffffff },
8587                 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
8588                         0xffffffff, 0x00000000 },
8589                 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
8590                         0xffffffff, 0x00000000 },
8591
8592                 /* Buffer Manager Control Registers. */
8593                 { BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750,
8594                         0x00000000, 0x007fff80 },
8595                 { BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750,
8596                         0x00000000, 0x007fffff },
8597                 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
8598                         0x00000000, 0x0000003f },
8599                 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
8600                         0x00000000, 0x000001ff },
8601                 { BUFMGR_MB_HIGH_WATER, 0x0000,
8602                         0x00000000, 0x000001ff },
8603                 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
8604                         0xffffffff, 0x00000000 },
8605                 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
8606                         0xffffffff, 0x00000000 },
8607
8608                 /* Mailbox Registers */
8609                 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
8610                         0x00000000, 0x000001ff },
8611                 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
8612                         0x00000000, 0x000001ff },
8613                 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
8614                         0x00000000, 0x000007ff },
8615                 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
8616                         0x00000000, 0x000001ff },
8617
8618                 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
8619         };
8620
8621         is_5705 = is_5750 = 0;
8622         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
8623                 is_5705 = 1;
8624                 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
8625                         is_5750 = 1;
8626         }
8627
8628         for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
8629                 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
8630                         continue;
8631
8632                 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
8633                         continue;
8634
8635                 if ((tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
8636                     (reg_tbl[i].flags & TG3_FL_NOT_5788))
8637                         continue;
8638
8639                 if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750))
8640                         continue;
8641
8642                 offset = (u32) reg_tbl[i].offset;
8643                 read_mask = reg_tbl[i].read_mask;
8644                 write_mask = reg_tbl[i].write_mask;
8645
8646                 /* Save the original register content */
8647                 save_val = tr32(offset);
8648
8649                 /* Determine the read-only value. */
8650                 read_val = save_val & read_mask;
8651
8652                 /* Write zero to the register, then make sure the read-only bits
8653                  * are not changed and the read/write bits are all zeros.
8654                  */
8655                 tw32(offset, 0);
8656
8657                 val = tr32(offset);
8658
8659                 /* Test the read-only and read/write bits. */
8660                 if (((val & read_mask) != read_val) || (val & write_mask))
8661                         goto out;
8662
8663                 /* Write ones to all the bits defined by RdMask and WrMask, then
8664                  * make sure the read-only bits are not changed and the
8665                  * read/write bits are all ones.
8666                  */
8667                 tw32(offset, read_mask | write_mask);
8668
8669                 val = tr32(offset);
8670
8671                 /* Test the read-only bits. */
8672                 if ((val & read_mask) != read_val)
8673                         goto out;
8674
8675                 /* Test the read/write bits. */
8676                 if ((val & write_mask) != write_mask)
8677                         goto out;
8678
8679                 tw32(offset, save_val);
8680         }
8681
8682         return 0;
8683
8684 out:
8685         if (netif_msg_hw(tp))
8686                 printk(KERN_ERR PFX "Register test failed at offset %x\n",
8687                        offset);
8688         tw32(offset, save_val);
8689         return -EIO;
8690 }
8691
8692 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
8693 {
8694         static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
8695         int i;
8696         u32 j;
8697
8698         for (i = 0; i < sizeof(test_pattern)/sizeof(u32); i++) {
8699                 for (j = 0; j < len; j += 4) {
8700                         u32 val;
8701
8702                         tg3_write_mem(tp, offset + j, test_pattern[i]);
8703                         tg3_read_mem(tp, offset + j, &val);
8704                         if (val != test_pattern[i])
8705                                 return -EIO;
8706                 }
8707         }
8708         return 0;
8709 }
8710
8711 static int tg3_test_memory(struct tg3 *tp)
8712 {
8713         static struct mem_entry {
8714                 u32 offset;
8715                 u32 len;
8716         } mem_tbl_570x[] = {
8717                 { 0x00000000, 0x00b50},
8718                 { 0x00002000, 0x1c000},
8719                 { 0xffffffff, 0x00000}
8720         }, mem_tbl_5705[] = {
8721                 { 0x00000100, 0x0000c},
8722                 { 0x00000200, 0x00008},
8723                 { 0x00004000, 0x00800},
8724                 { 0x00006000, 0x01000},
8725                 { 0x00008000, 0x02000},
8726                 { 0x00010000, 0x0e000},
8727                 { 0xffffffff, 0x00000}
8728         }, mem_tbl_5755[] = {
8729                 { 0x00000200, 0x00008},
8730                 { 0x00004000, 0x00800},
8731                 { 0x00006000, 0x00800},
8732                 { 0x00008000, 0x02000},
8733                 { 0x00010000, 0x0c000},
8734                 { 0xffffffff, 0x00000}
8735         }, mem_tbl_5906[] = {
8736                 { 0x00000200, 0x00008},
8737                 { 0x00004000, 0x00400},
8738                 { 0x00006000, 0x00400},
8739                 { 0x00008000, 0x01000},
8740                 { 0x00010000, 0x01000},
8741                 { 0xffffffff, 0x00000}
8742         };
8743         struct mem_entry *mem_tbl;
8744         int err = 0;
8745         int i;
8746
8747         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
8748                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
8749                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
8750                         mem_tbl = mem_tbl_5755;
8751                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
8752                         mem_tbl = mem_tbl_5906;
8753                 else
8754                         mem_tbl = mem_tbl_5705;
8755         } else
8756                 mem_tbl = mem_tbl_570x;
8757
8758         for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
8759                 if ((err = tg3_do_mem_test(tp, mem_tbl[i].offset,
8760                     mem_tbl[i].len)) != 0)
8761                         break;
8762         }
8763
8764         return err;
8765 }
8766
8767 #define TG3_MAC_LOOPBACK        0
8768 #define TG3_PHY_LOOPBACK        1
8769
8770 static int tg3_run_loopback(struct tg3 *tp, int loopback_mode)
8771 {
8772         u32 mac_mode, rx_start_idx, rx_idx, tx_idx, opaque_key;
8773         u32 desc_idx;
8774         struct sk_buff *skb, *rx_skb;
8775         u8 *tx_data;
8776         dma_addr_t map;
8777         int num_pkts, tx_len, rx_len, i, err;
8778         struct tg3_rx_buffer_desc *desc;
8779
8780         if (loopback_mode == TG3_MAC_LOOPBACK) {
8781                 /* HW errata - mac loopback fails in some cases on 5780.
8782                  * Normal traffic and PHY loopback are not affected by
8783                  * errata.
8784                  */
8785                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780)
8786                         return 0;
8787
8788                 mac_mode = (tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK) |
8789                            MAC_MODE_PORT_INT_LPBACK | MAC_MODE_LINK_POLARITY;
8790                 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
8791                         mac_mode |= MAC_MODE_PORT_MODE_MII;
8792                 else
8793                         mac_mode |= MAC_MODE_PORT_MODE_GMII;
8794                 tw32(MAC_MODE, mac_mode);
8795         } else if (loopback_mode == TG3_PHY_LOOPBACK) {
8796                 u32 val;
8797
8798                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
8799                         u32 phytest;
8800
8801                         if (!tg3_readphy(tp, MII_TG3_EPHY_TEST, &phytest)) {
8802                                 u32 phy;
8803
8804                                 tg3_writephy(tp, MII_TG3_EPHY_TEST,
8805                                              phytest | MII_TG3_EPHY_SHADOW_EN);
8806                                 if (!tg3_readphy(tp, 0x1b, &phy))
8807                                         tg3_writephy(tp, 0x1b, phy & ~0x20);
8808                                 if (!tg3_readphy(tp, 0x10, &phy))
8809                                         tg3_writephy(tp, 0x10, phy & ~0x4000);
8810                                 tg3_writephy(tp, MII_TG3_EPHY_TEST, phytest);
8811                         }
8812                         val = BMCR_LOOPBACK | BMCR_FULLDPLX | BMCR_SPEED100;
8813                 } else
8814                         val = BMCR_LOOPBACK | BMCR_FULLDPLX | BMCR_SPEED1000;
8815
8816                 tg3_writephy(tp, MII_BMCR, val);
8817                 udelay(40);
8818
8819                 mac_mode = (tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK) |
8820                            MAC_MODE_LINK_POLARITY;
8821                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
8822                         tg3_writephy(tp, MII_TG3_EPHY_PTEST, 0x1800);
8823                         mac_mode |= MAC_MODE_PORT_MODE_MII;
8824                 } else
8825                         mac_mode |= MAC_MODE_PORT_MODE_GMII;
8826
8827                 /* reset to prevent losing 1st rx packet intermittently */
8828                 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
8829                         tw32_f(MAC_RX_MODE, RX_MODE_RESET);
8830                         udelay(10);
8831                         tw32_f(MAC_RX_MODE, tp->rx_mode);
8832                 }
8833                 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
8834                         mac_mode &= ~MAC_MODE_LINK_POLARITY;
8835                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
8836                                      MII_TG3_EXT_CTRL_LNK3_LED_MODE);
8837                 }
8838                 tw32(MAC_MODE, mac_mode);
8839         }
8840         else
8841                 return -EINVAL;
8842
8843         err = -EIO;
8844
8845         tx_len = 1514;
8846         skb = netdev_alloc_skb(tp->dev, tx_len);
8847         if (!skb)
8848                 return -ENOMEM;
8849
8850         tx_data = skb_put(skb, tx_len);
8851         memcpy(tx_data, tp->dev->dev_addr, 6);
8852         memset(tx_data + 6, 0x0, 8);
8853
8854         tw32(MAC_RX_MTU_SIZE, tx_len + 4);
8855
8856         for (i = 14; i < tx_len; i++)
8857                 tx_data[i] = (u8) (i & 0xff);
8858
8859         map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
8860
8861         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
8862              HOSTCC_MODE_NOW);
8863
8864         udelay(10);
8865
8866         rx_start_idx = tp->hw_status->idx[0].rx_producer;
8867
8868         num_pkts = 0;
8869
8870         tg3_set_txd(tp, tp->tx_prod, map, tx_len, 0, 1);
8871
8872         tp->tx_prod++;
8873         num_pkts++;
8874
8875         tw32_tx_mbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW,
8876                      tp->tx_prod);
8877         tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW);
8878
8879         udelay(10);
8880
8881         /* 250 usec to allow enough time on some 10/100 Mbps devices.  */
8882         for (i = 0; i < 25; i++) {
8883                 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
8884                        HOSTCC_MODE_NOW);
8885
8886                 udelay(10);
8887
8888                 tx_idx = tp->hw_status->idx[0].tx_consumer;
8889                 rx_idx = tp->hw_status->idx[0].rx_producer;
8890                 if ((tx_idx == tp->tx_prod) &&
8891                     (rx_idx == (rx_start_idx + num_pkts)))
8892                         break;
8893         }
8894
8895         pci_unmap_single(tp->pdev, map, tx_len, PCI_DMA_TODEVICE);
8896         dev_kfree_skb(skb);
8897
8898         if (tx_idx != tp->tx_prod)
8899                 goto out;
8900
8901         if (rx_idx != rx_start_idx + num_pkts)
8902                 goto out;
8903
8904         desc = &tp->rx_rcb[rx_start_idx];
8905         desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
8906         opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
8907         if (opaque_key != RXD_OPAQUE_RING_STD)
8908                 goto out;
8909
8910         if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
8911             (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
8912                 goto out;
8913
8914         rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4;
8915         if (rx_len != tx_len)
8916                 goto out;
8917
8918         rx_skb = tp->rx_std_buffers[desc_idx].skb;
8919
8920         map = pci_unmap_addr(&tp->rx_std_buffers[desc_idx], mapping);
8921         pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len, PCI_DMA_FROMDEVICE);
8922
8923         for (i = 14; i < tx_len; i++) {
8924                 if (*(rx_skb->data + i) != (u8) (i & 0xff))
8925                         goto out;
8926         }
8927         err = 0;
8928
8929         /* tg3_free_rings will unmap and free the rx_skb */
8930 out:
8931         return err;
8932 }
8933
8934 #define TG3_MAC_LOOPBACK_FAILED         1
8935 #define TG3_PHY_LOOPBACK_FAILED         2
8936 #define TG3_LOOPBACK_FAILED             (TG3_MAC_LOOPBACK_FAILED |      \
8937                                          TG3_PHY_LOOPBACK_FAILED)
8938
8939 static int tg3_test_loopback(struct tg3 *tp)
8940 {
8941         int err = 0;
8942
8943         if (!netif_running(tp->dev))
8944                 return TG3_LOOPBACK_FAILED;
8945
8946         err = tg3_reset_hw(tp, 1);
8947         if (err)
8948                 return TG3_LOOPBACK_FAILED;
8949
8950         if (tg3_run_loopback(tp, TG3_MAC_LOOPBACK))
8951                 err |= TG3_MAC_LOOPBACK_FAILED;
8952         if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
8953                 if (tg3_run_loopback(tp, TG3_PHY_LOOPBACK))
8954                         err |= TG3_PHY_LOOPBACK_FAILED;
8955         }
8956
8957         return err;
8958 }
8959
8960 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
8961                           u64 *data)
8962 {
8963         struct tg3 *tp = netdev_priv(dev);
8964
8965         if (tp->link_config.phy_is_low_power)
8966                 tg3_set_power_state(tp, PCI_D0);
8967
8968         memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
8969
8970         if (tg3_test_nvram(tp) != 0) {
8971                 etest->flags |= ETH_TEST_FL_FAILED;
8972                 data[0] = 1;
8973         }
8974         if (tg3_test_link(tp) != 0) {
8975                 etest->flags |= ETH_TEST_FL_FAILED;
8976                 data[1] = 1;
8977         }
8978         if (etest->flags & ETH_TEST_FL_OFFLINE) {
8979                 int err, irq_sync = 0;
8980
8981                 if (netif_running(dev)) {
8982                         tg3_netif_stop(tp);
8983                         irq_sync = 1;
8984                 }
8985
8986                 tg3_full_lock(tp, irq_sync);
8987
8988                 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
8989                 err = tg3_nvram_lock(tp);
8990                 tg3_halt_cpu(tp, RX_CPU_BASE);
8991                 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
8992                         tg3_halt_cpu(tp, TX_CPU_BASE);
8993                 if (!err)
8994                         tg3_nvram_unlock(tp);
8995
8996                 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
8997                         tg3_phy_reset(tp);
8998
8999                 if (tg3_test_registers(tp) != 0) {
9000                         etest->flags |= ETH_TEST_FL_FAILED;
9001                         data[2] = 1;
9002                 }
9003                 if (tg3_test_memory(tp) != 0) {
9004                         etest->flags |= ETH_TEST_FL_FAILED;
9005                         data[3] = 1;
9006                 }
9007                 if ((data[4] = tg3_test_loopback(tp)) != 0)
9008                         etest->flags |= ETH_TEST_FL_FAILED;
9009
9010                 tg3_full_unlock(tp);
9011
9012                 if (tg3_test_interrupt(tp) != 0) {
9013                         etest->flags |= ETH_TEST_FL_FAILED;
9014                         data[5] = 1;
9015                 }
9016
9017                 tg3_full_lock(tp, 0);
9018
9019                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9020                 if (netif_running(dev)) {
9021                         tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
9022                         if (!tg3_restart_hw(tp, 1))
9023                                 tg3_netif_start(tp);
9024                 }
9025
9026                 tg3_full_unlock(tp);
9027         }
9028         if (tp->link_config.phy_is_low_power)
9029                 tg3_set_power_state(tp, PCI_D3hot);
9030
9031 }
9032
9033 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
9034 {
9035         struct mii_ioctl_data *data = if_mii(ifr);
9036         struct tg3 *tp = netdev_priv(dev);
9037         int err;
9038
9039         switch(cmd) {
9040         case SIOCGMIIPHY:
9041                 data->phy_id = PHY_ADDR;
9042
9043                 /* fallthru */
9044         case SIOCGMIIREG: {
9045                 u32 mii_regval;
9046
9047                 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
9048                         break;                  /* We have no PHY */
9049
9050                 if (tp->link_config.phy_is_low_power)
9051                         return -EAGAIN;
9052
9053                 spin_lock_bh(&tp->lock);
9054                 err = tg3_readphy(tp, data->reg_num & 0x1f, &mii_regval);
9055                 spin_unlock_bh(&tp->lock);
9056
9057                 data->val_out = mii_regval;
9058
9059                 return err;
9060         }
9061
9062         case SIOCSMIIREG:
9063                 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
9064                         break;                  /* We have no PHY */
9065
9066                 if (!capable(CAP_NET_ADMIN))
9067                         return -EPERM;
9068
9069                 if (tp->link_config.phy_is_low_power)
9070                         return -EAGAIN;
9071
9072                 spin_lock_bh(&tp->lock);
9073                 err = tg3_writephy(tp, data->reg_num & 0x1f, data->val_in);
9074                 spin_unlock_bh(&tp->lock);
9075
9076                 return err;
9077
9078         default:
9079                 /* do nothing */
9080                 break;
9081         }
9082         return -EOPNOTSUPP;
9083 }
9084
9085 #if TG3_VLAN_TAG_USED
9086 static void tg3_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
9087 {
9088         struct tg3 *tp = netdev_priv(dev);
9089
9090         if (netif_running(dev))
9091                 tg3_netif_stop(tp);
9092
9093         tg3_full_lock(tp, 0);
9094
9095         tp->vlgrp = grp;
9096
9097         /* Update RX_MODE_KEEP_VLAN_TAG bit in RX_MODE register. */
9098         __tg3_set_rx_mode(dev);
9099
9100         tg3_full_unlock(tp);
9101
9102         if (netif_running(dev))
9103                 tg3_netif_start(tp);
9104 }
9105
9106 static void tg3_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
9107 {
9108         struct tg3 *tp = netdev_priv(dev);
9109
9110         if (netif_running(dev))
9111                 tg3_netif_stop(tp);
9112
9113         tg3_full_lock(tp, 0);
9114         if (tp->vlgrp)
9115                 tp->vlgrp->vlan_devices[vid] = NULL;
9116         tg3_full_unlock(tp);
9117
9118         if (netif_running(dev))
9119                 tg3_netif_start(tp);
9120 }
9121 #endif
9122
9123 static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
9124 {
9125         struct tg3 *tp = netdev_priv(dev);
9126
9127         memcpy(ec, &tp->coal, sizeof(*ec));
9128         return 0;
9129 }
9130
9131 static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
9132 {
9133         struct tg3 *tp = netdev_priv(dev);
9134         u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
9135         u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
9136
9137         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
9138                 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
9139                 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
9140                 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
9141                 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
9142         }
9143
9144         if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
9145             (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
9146             (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
9147             (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
9148             (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
9149             (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
9150             (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
9151             (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
9152             (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
9153             (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
9154                 return -EINVAL;
9155
9156         /* No rx interrupts will be generated if both are zero */
9157         if ((ec->rx_coalesce_usecs == 0) &&
9158             (ec->rx_max_coalesced_frames == 0))
9159                 return -EINVAL;
9160
9161         /* No tx interrupts will be generated if both are zero */
9162         if ((ec->tx_coalesce_usecs == 0) &&
9163             (ec->tx_max_coalesced_frames == 0))
9164                 return -EINVAL;
9165
9166         /* Only copy relevant parameters, ignore all others. */
9167         tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
9168         tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
9169         tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
9170         tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
9171         tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
9172         tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
9173         tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
9174         tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
9175         tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
9176
9177         if (netif_running(dev)) {
9178                 tg3_full_lock(tp, 0);
9179                 __tg3_set_coalesce(tp, &tp->coal);
9180                 tg3_full_unlock(tp);
9181         }
9182         return 0;
9183 }
9184
9185 static const struct ethtool_ops tg3_ethtool_ops = {
9186         .get_settings           = tg3_get_settings,
9187         .set_settings           = tg3_set_settings,
9188         .get_drvinfo            = tg3_get_drvinfo,
9189         .get_regs_len           = tg3_get_regs_len,
9190         .get_regs               = tg3_get_regs,
9191         .get_wol                = tg3_get_wol,
9192         .set_wol                = tg3_set_wol,
9193         .get_msglevel           = tg3_get_msglevel,
9194         .set_msglevel           = tg3_set_msglevel,
9195         .nway_reset             = tg3_nway_reset,
9196         .get_link               = ethtool_op_get_link,
9197         .get_eeprom_len         = tg3_get_eeprom_len,
9198         .get_eeprom             = tg3_get_eeprom,
9199         .set_eeprom             = tg3_set_eeprom,
9200         .get_ringparam          = tg3_get_ringparam,
9201         .set_ringparam          = tg3_set_ringparam,
9202         .get_pauseparam         = tg3_get_pauseparam,
9203         .set_pauseparam         = tg3_set_pauseparam,
9204         .get_rx_csum            = tg3_get_rx_csum,
9205         .set_rx_csum            = tg3_set_rx_csum,
9206         .get_tx_csum            = ethtool_op_get_tx_csum,
9207         .set_tx_csum            = tg3_set_tx_csum,
9208         .get_sg                 = ethtool_op_get_sg,
9209         .set_sg                 = ethtool_op_set_sg,
9210 #if TG3_TSO_SUPPORT != 0
9211         .get_tso                = ethtool_op_get_tso,
9212         .set_tso                = tg3_set_tso,
9213 #endif
9214         .self_test_count        = tg3_get_test_count,
9215         .self_test              = tg3_self_test,
9216         .get_strings            = tg3_get_strings,
9217         .phys_id                = tg3_phys_id,
9218         .get_stats_count        = tg3_get_stats_count,
9219         .get_ethtool_stats      = tg3_get_ethtool_stats,
9220         .get_coalesce           = tg3_get_coalesce,
9221         .set_coalesce           = tg3_set_coalesce,
9222         .get_perm_addr          = ethtool_op_get_perm_addr,
9223 };
9224
9225 static void __devinit tg3_get_eeprom_size(struct tg3 *tp)
9226 {
9227         u32 cursize, val, magic;
9228
9229         tp->nvram_size = EEPROM_CHIP_SIZE;
9230
9231         if (tg3_nvram_read_swab(tp, 0, &magic) != 0)
9232                 return;
9233
9234         if ((magic != TG3_EEPROM_MAGIC) &&
9235             ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) &&
9236             ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW))
9237                 return;
9238
9239         /*
9240          * Size the chip by reading offsets at increasing powers of two.
9241          * When we encounter our validation signature, we know the addressing
9242          * has wrapped around, and thus have our chip size.
9243          */
9244         cursize = 0x10;
9245
9246         while (cursize < tp->nvram_size) {
9247                 if (tg3_nvram_read_swab(tp, cursize, &val) != 0)
9248                         return;
9249
9250                 if (val == magic)
9251                         break;
9252
9253                 cursize <<= 1;
9254         }
9255
9256         tp->nvram_size = cursize;
9257 }
9258
9259 static void __devinit tg3_get_nvram_size(struct tg3 *tp)
9260 {
9261         u32 val;
9262
9263         if (tg3_nvram_read_swab(tp, 0, &val) != 0)
9264                 return;
9265
9266         /* Selfboot format */
9267         if (val != TG3_EEPROM_MAGIC) {
9268                 tg3_get_eeprom_size(tp);
9269                 return;
9270         }
9271
9272         if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
9273                 if (val != 0) {
9274                         tp->nvram_size = (val >> 16) * 1024;
9275                         return;
9276                 }
9277         }
9278         tp->nvram_size = 0x20000;
9279 }
9280
9281 static void __devinit tg3_get_nvram_info(struct tg3 *tp)
9282 {
9283         u32 nvcfg1;
9284
9285         nvcfg1 = tr32(NVRAM_CFG1);
9286         if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
9287                 tp->tg3_flags2 |= TG3_FLG2_FLASH;
9288         }
9289         else {
9290                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
9291                 tw32(NVRAM_CFG1, nvcfg1);
9292         }
9293
9294         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) ||
9295             (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
9296                 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
9297                         case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
9298                                 tp->nvram_jedecnum = JEDEC_ATMEL;
9299                                 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
9300                                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9301                                 break;
9302                         case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
9303                                 tp->nvram_jedecnum = JEDEC_ATMEL;
9304                                 tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
9305                                 break;
9306                         case FLASH_VENDOR_ATMEL_EEPROM:
9307                                 tp->nvram_jedecnum = JEDEC_ATMEL;
9308                                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
9309                                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9310                                 break;
9311                         case FLASH_VENDOR_ST:
9312                                 tp->nvram_jedecnum = JEDEC_ST;
9313                                 tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
9314                                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9315                                 break;
9316                         case FLASH_VENDOR_SAIFUN:
9317                                 tp->nvram_jedecnum = JEDEC_SAIFUN;
9318                                 tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
9319                                 break;
9320                         case FLASH_VENDOR_SST_SMALL:
9321                         case FLASH_VENDOR_SST_LARGE:
9322                                 tp->nvram_jedecnum = JEDEC_SST;
9323                                 tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
9324                                 break;
9325                 }
9326         }
9327         else {
9328                 tp->nvram_jedecnum = JEDEC_ATMEL;
9329                 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
9330                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9331         }
9332 }
9333
9334 static void __devinit tg3_get_5752_nvram_info(struct tg3 *tp)
9335 {
9336         u32 nvcfg1;
9337
9338         nvcfg1 = tr32(NVRAM_CFG1);
9339
9340         /* NVRAM protection for TPM */
9341         if (nvcfg1 & (1 << 27))
9342                 tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM;
9343
9344         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
9345                 case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
9346                 case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
9347                         tp->nvram_jedecnum = JEDEC_ATMEL;
9348                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9349                         break;
9350                 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
9351                         tp->nvram_jedecnum = JEDEC_ATMEL;
9352                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9353                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
9354                         break;
9355                 case FLASH_5752VENDOR_ST_M45PE10:
9356                 case FLASH_5752VENDOR_ST_M45PE20:
9357                 case FLASH_5752VENDOR_ST_M45PE40:
9358                         tp->nvram_jedecnum = JEDEC_ST;
9359                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9360                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
9361                         break;
9362         }
9363
9364         if (tp->tg3_flags2 & TG3_FLG2_FLASH) {
9365                 switch (nvcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
9366                         case FLASH_5752PAGE_SIZE_256:
9367                                 tp->nvram_pagesize = 256;
9368                                 break;
9369                         case FLASH_5752PAGE_SIZE_512:
9370                                 tp->nvram_pagesize = 512;
9371                                 break;
9372                         case FLASH_5752PAGE_SIZE_1K:
9373                                 tp->nvram_pagesize = 1024;
9374                                 break;
9375                         case FLASH_5752PAGE_SIZE_2K:
9376                                 tp->nvram_pagesize = 2048;
9377                                 break;
9378                         case FLASH_5752PAGE_SIZE_4K:
9379                                 tp->nvram_pagesize = 4096;
9380                                 break;
9381                         case FLASH_5752PAGE_SIZE_264:
9382                                 tp->nvram_pagesize = 264;
9383                                 break;
9384                 }
9385         }
9386         else {
9387                 /* For eeprom, set pagesize to maximum eeprom size */
9388                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
9389
9390                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
9391                 tw32(NVRAM_CFG1, nvcfg1);
9392         }
9393 }
9394
9395 static void __devinit tg3_get_5755_nvram_info(struct tg3 *tp)
9396 {
9397         u32 nvcfg1;
9398
9399         nvcfg1 = tr32(NVRAM_CFG1);
9400
9401         /* NVRAM protection for TPM */
9402         if (nvcfg1 & (1 << 27))
9403                 tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM;
9404
9405         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
9406                 case FLASH_5755VENDOR_ATMEL_EEPROM_64KHZ:
9407                 case FLASH_5755VENDOR_ATMEL_EEPROM_376KHZ:
9408                         tp->nvram_jedecnum = JEDEC_ATMEL;
9409                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9410                         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
9411
9412                         nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
9413                         tw32(NVRAM_CFG1, nvcfg1);
9414                         break;
9415                 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
9416                 case FLASH_5755VENDOR_ATMEL_FLASH_1:
9417                 case FLASH_5755VENDOR_ATMEL_FLASH_2:
9418                 case FLASH_5755VENDOR_ATMEL_FLASH_3:
9419                 case FLASH_5755VENDOR_ATMEL_FLASH_4:
9420                         tp->nvram_jedecnum = JEDEC_ATMEL;
9421                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9422                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
9423                         tp->nvram_pagesize = 264;
9424                         break;
9425                 case FLASH_5752VENDOR_ST_M45PE10:
9426                 case FLASH_5752VENDOR_ST_M45PE20:
9427                 case FLASH_5752VENDOR_ST_M45PE40:
9428                         tp->nvram_jedecnum = JEDEC_ST;
9429                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9430                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
9431                         tp->nvram_pagesize = 256;
9432                         break;
9433         }
9434 }
9435
9436 static void __devinit tg3_get_5787_nvram_info(struct tg3 *tp)
9437 {
9438         u32 nvcfg1;
9439
9440         nvcfg1 = tr32(NVRAM_CFG1);
9441
9442         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
9443                 case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
9444                 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
9445                 case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
9446                 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
9447                         tp->nvram_jedecnum = JEDEC_ATMEL;
9448                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9449                         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
9450
9451                         nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
9452                         tw32(NVRAM_CFG1, nvcfg1);
9453                         break;
9454                 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
9455                 case FLASH_5755VENDOR_ATMEL_FLASH_1:
9456                 case FLASH_5755VENDOR_ATMEL_FLASH_2:
9457                 case FLASH_5755VENDOR_ATMEL_FLASH_3:
9458                         tp->nvram_jedecnum = JEDEC_ATMEL;
9459                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9460                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
9461                         tp->nvram_pagesize = 264;
9462                         break;
9463                 case FLASH_5752VENDOR_ST_M45PE10:
9464                 case FLASH_5752VENDOR_ST_M45PE20:
9465                 case FLASH_5752VENDOR_ST_M45PE40:
9466                         tp->nvram_jedecnum = JEDEC_ST;
9467                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9468                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
9469                         tp->nvram_pagesize = 256;
9470                         break;
9471         }
9472 }
9473
9474 static void __devinit tg3_get_5906_nvram_info(struct tg3 *tp)
9475 {
9476         tp->nvram_jedecnum = JEDEC_ATMEL;
9477         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9478         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
9479 }
9480
9481 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
9482 static void __devinit tg3_nvram_init(struct tg3 *tp)
9483 {
9484         tw32_f(GRC_EEPROM_ADDR,
9485              (EEPROM_ADDR_FSM_RESET |
9486               (EEPROM_DEFAULT_CLOCK_PERIOD <<
9487                EEPROM_ADDR_CLKPERD_SHIFT)));
9488
9489         msleep(1);
9490
9491         /* Enable seeprom accesses. */
9492         tw32_f(GRC_LOCAL_CTRL,
9493              tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
9494         udelay(100);
9495
9496         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
9497             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
9498                 tp->tg3_flags |= TG3_FLAG_NVRAM;
9499
9500                 if (tg3_nvram_lock(tp)) {
9501                         printk(KERN_WARNING PFX "%s: Cannot get nvarm lock, "
9502                                "tg3_nvram_init failed.\n", tp->dev->name);
9503                         return;
9504                 }
9505                 tg3_enable_nvram_access(tp);
9506
9507                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
9508                         tg3_get_5752_nvram_info(tp);
9509                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
9510                         tg3_get_5755_nvram_info(tp);
9511                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
9512                         tg3_get_5787_nvram_info(tp);
9513                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
9514                         tg3_get_5906_nvram_info(tp);
9515                 else
9516                         tg3_get_nvram_info(tp);
9517
9518                 tg3_get_nvram_size(tp);
9519
9520                 tg3_disable_nvram_access(tp);
9521                 tg3_nvram_unlock(tp);
9522
9523         } else {
9524                 tp->tg3_flags &= ~(TG3_FLAG_NVRAM | TG3_FLAG_NVRAM_BUFFERED);
9525
9526                 tg3_get_eeprom_size(tp);
9527         }
9528 }
9529
9530 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
9531                                         u32 offset, u32 *val)
9532 {
9533         u32 tmp;
9534         int i;
9535
9536         if (offset > EEPROM_ADDR_ADDR_MASK ||
9537             (offset % 4) != 0)
9538                 return -EINVAL;
9539
9540         tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
9541                                         EEPROM_ADDR_DEVID_MASK |
9542                                         EEPROM_ADDR_READ);
9543         tw32(GRC_EEPROM_ADDR,
9544              tmp |
9545              (0 << EEPROM_ADDR_DEVID_SHIFT) |
9546              ((offset << EEPROM_ADDR_ADDR_SHIFT) &
9547               EEPROM_ADDR_ADDR_MASK) |
9548              EEPROM_ADDR_READ | EEPROM_ADDR_START);
9549
9550         for (i = 0; i < 1000; i++) {
9551                 tmp = tr32(GRC_EEPROM_ADDR);
9552
9553                 if (tmp & EEPROM_ADDR_COMPLETE)
9554                         break;
9555                 msleep(1);
9556         }
9557         if (!(tmp & EEPROM_ADDR_COMPLETE))
9558                 return -EBUSY;
9559
9560         *val = tr32(GRC_EEPROM_DATA);
9561         return 0;
9562 }
9563
9564 #define NVRAM_CMD_TIMEOUT 10000
9565
9566 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
9567 {
9568         int i;
9569
9570         tw32(NVRAM_CMD, nvram_cmd);
9571         for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
9572                 udelay(10);
9573                 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
9574                         udelay(10);
9575                         break;
9576                 }
9577         }
9578         if (i == NVRAM_CMD_TIMEOUT) {
9579                 return -EBUSY;
9580         }
9581         return 0;
9582 }
9583
9584 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
9585 {
9586         if ((tp->tg3_flags & TG3_FLAG_NVRAM) &&
9587             (tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) &&
9588             (tp->tg3_flags2 & TG3_FLG2_FLASH) &&
9589             (tp->nvram_jedecnum == JEDEC_ATMEL))
9590
9591                 addr = ((addr / tp->nvram_pagesize) <<
9592                         ATMEL_AT45DB0X1B_PAGE_POS) +
9593                        (addr % tp->nvram_pagesize);
9594
9595         return addr;
9596 }
9597
9598 static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
9599 {
9600         if ((tp->tg3_flags & TG3_FLAG_NVRAM) &&
9601             (tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) &&
9602             (tp->tg3_flags2 & TG3_FLG2_FLASH) &&
9603             (tp->nvram_jedecnum == JEDEC_ATMEL))
9604
9605                 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
9606                         tp->nvram_pagesize) +
9607                        (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
9608
9609         return addr;
9610 }
9611
9612 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
9613 {
9614         int ret;
9615
9616         if (!(tp->tg3_flags & TG3_FLAG_NVRAM))
9617                 return tg3_nvram_read_using_eeprom(tp, offset, val);
9618
9619         offset = tg3_nvram_phys_addr(tp, offset);
9620
9621         if (offset > NVRAM_ADDR_MSK)
9622                 return -EINVAL;
9623
9624         ret = tg3_nvram_lock(tp);
9625         if (ret)
9626                 return ret;
9627
9628         tg3_enable_nvram_access(tp);
9629
9630         tw32(NVRAM_ADDR, offset);
9631         ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
9632                 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
9633
9634         if (ret == 0)
9635                 *val = swab32(tr32(NVRAM_RDDATA));
9636
9637         tg3_disable_nvram_access(tp);
9638
9639         tg3_nvram_unlock(tp);
9640
9641         return ret;
9642 }
9643
9644 static int tg3_nvram_read_swab(struct tg3 *tp, u32 offset, u32 *val)
9645 {
9646         int err;
9647         u32 tmp;
9648
9649         err = tg3_nvram_read(tp, offset, &tmp);
9650         *val = swab32(tmp);
9651         return err;
9652 }
9653
9654 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
9655                                     u32 offset, u32 len, u8 *buf)
9656 {
9657         int i, j, rc = 0;
9658         u32 val;
9659
9660         for (i = 0; i < len; i += 4) {
9661                 u32 addr, data;
9662
9663                 addr = offset + i;
9664
9665                 memcpy(&data, buf + i, 4);
9666
9667                 tw32(GRC_EEPROM_DATA, cpu_to_le32(data));
9668
9669                 val = tr32(GRC_EEPROM_ADDR);
9670                 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
9671
9672                 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
9673                         EEPROM_ADDR_READ);
9674                 tw32(GRC_EEPROM_ADDR, val |
9675                         (0 << EEPROM_ADDR_DEVID_SHIFT) |
9676                         (addr & EEPROM_ADDR_ADDR_MASK) |
9677                         EEPROM_ADDR_START |
9678                         EEPROM_ADDR_WRITE);
9679
9680                 for (j = 0; j < 1000; j++) {
9681                         val = tr32(GRC_EEPROM_ADDR);
9682
9683                         if (val & EEPROM_ADDR_COMPLETE)
9684                                 break;
9685                         msleep(1);
9686                 }
9687                 if (!(val & EEPROM_ADDR_COMPLETE)) {
9688                         rc = -EBUSY;
9689                         break;
9690                 }
9691         }
9692
9693         return rc;
9694 }
9695
9696 /* offset and length are dword aligned */
9697 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
9698                 u8 *buf)
9699 {
9700         int ret = 0;
9701         u32 pagesize = tp->nvram_pagesize;
9702         u32 pagemask = pagesize - 1;
9703         u32 nvram_cmd;
9704         u8 *tmp;
9705
9706         tmp = kmalloc(pagesize, GFP_KERNEL);
9707         if (tmp == NULL)
9708                 return -ENOMEM;
9709
9710         while (len) {
9711                 int j;
9712                 u32 phy_addr, page_off, size;
9713
9714                 phy_addr = offset & ~pagemask;
9715
9716                 for (j = 0; j < pagesize; j += 4) {
9717                         if ((ret = tg3_nvram_read(tp, phy_addr + j,
9718                                                 (u32 *) (tmp + j))))
9719                                 break;
9720                 }
9721                 if (ret)
9722                         break;
9723
9724                 page_off = offset & pagemask;
9725                 size = pagesize;
9726                 if (len < size)
9727                         size = len;
9728
9729                 len -= size;
9730
9731                 memcpy(tmp + page_off, buf, size);
9732
9733                 offset = offset + (pagesize - page_off);
9734
9735                 tg3_enable_nvram_access(tp);
9736
9737                 /*
9738                  * Before we can erase the flash page, we need
9739                  * to issue a special "write enable" command.
9740                  */
9741                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
9742
9743                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
9744                         break;
9745
9746                 /* Erase the target page */
9747                 tw32(NVRAM_ADDR, phy_addr);
9748
9749                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
9750                         NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
9751
9752                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
9753                         break;
9754
9755                 /* Issue another write enable to start the write. */
9756                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
9757
9758                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
9759                         break;
9760
9761                 for (j = 0; j < pagesize; j += 4) {
9762                         u32 data;
9763
9764                         data = *((u32 *) (tmp + j));
9765                         tw32(NVRAM_WRDATA, cpu_to_be32(data));
9766
9767                         tw32(NVRAM_ADDR, phy_addr + j);
9768
9769                         nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
9770                                 NVRAM_CMD_WR;
9771
9772                         if (j == 0)
9773                                 nvram_cmd |= NVRAM_CMD_FIRST;
9774                         else if (j == (pagesize - 4))
9775                                 nvram_cmd |= NVRAM_CMD_LAST;
9776
9777                         if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
9778                                 break;
9779                 }
9780                 if (ret)
9781                         break;
9782         }
9783
9784         nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
9785         tg3_nvram_exec_cmd(tp, nvram_cmd);
9786
9787         kfree(tmp);
9788
9789         return ret;
9790 }
9791
9792 /* offset and length are dword aligned */
9793 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
9794                 u8 *buf)
9795 {
9796         int i, ret = 0;
9797
9798         for (i = 0; i < len; i += 4, offset += 4) {
9799                 u32 data, page_off, phy_addr, nvram_cmd;
9800
9801                 memcpy(&data, buf + i, 4);
9802                 tw32(NVRAM_WRDATA, cpu_to_be32(data));
9803
9804                 page_off = offset % tp->nvram_pagesize;
9805
9806                 phy_addr = tg3_nvram_phys_addr(tp, offset);
9807
9808                 tw32(NVRAM_ADDR, phy_addr);
9809
9810                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
9811
9812                 if ((page_off == 0) || (i == 0))
9813                         nvram_cmd |= NVRAM_CMD_FIRST;
9814                 if (page_off == (tp->nvram_pagesize - 4))
9815                         nvram_cmd |= NVRAM_CMD_LAST;
9816
9817                 if (i == (len - 4))
9818                         nvram_cmd |= NVRAM_CMD_LAST;
9819
9820                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752) &&
9821                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5755) &&
9822                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5787) &&
9823                     (tp->nvram_jedecnum == JEDEC_ST) &&
9824                     (nvram_cmd & NVRAM_CMD_FIRST)) {
9825
9826                         if ((ret = tg3_nvram_exec_cmd(tp,
9827                                 NVRAM_CMD_WREN | NVRAM_CMD_GO |
9828                                 NVRAM_CMD_DONE)))
9829
9830                                 break;
9831                 }
9832                 if (!(tp->tg3_flags2 & TG3_FLG2_FLASH)) {
9833                         /* We always do complete word writes to eeprom. */
9834                         nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
9835                 }
9836
9837                 if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
9838                         break;
9839         }
9840         return ret;
9841 }
9842
9843 /* offset and length are dword aligned */
9844 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
9845 {
9846         int ret;
9847
9848         if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
9849                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
9850                        ~GRC_LCLCTRL_GPIO_OUTPUT1);
9851                 udelay(40);
9852         }
9853
9854         if (!(tp->tg3_flags & TG3_FLAG_NVRAM)) {
9855                 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
9856         }
9857         else {
9858                 u32 grc_mode;
9859
9860                 ret = tg3_nvram_lock(tp);
9861                 if (ret)
9862                         return ret;
9863
9864                 tg3_enable_nvram_access(tp);
9865                 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
9866                     !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM))
9867                         tw32(NVRAM_WRITE1, 0x406);
9868
9869                 grc_mode = tr32(GRC_MODE);
9870                 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
9871
9872                 if ((tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) ||
9873                         !(tp->tg3_flags2 & TG3_FLG2_FLASH)) {
9874
9875                         ret = tg3_nvram_write_block_buffered(tp, offset, len,
9876                                 buf);
9877                 }
9878                 else {
9879                         ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
9880                                 buf);
9881                 }
9882
9883                 grc_mode = tr32(GRC_MODE);
9884                 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
9885
9886                 tg3_disable_nvram_access(tp);
9887                 tg3_nvram_unlock(tp);
9888         }
9889
9890         if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
9891                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
9892                 udelay(40);
9893         }
9894
9895         return ret;
9896 }
9897
9898 struct subsys_tbl_ent {
9899         u16 subsys_vendor, subsys_devid;
9900         u32 phy_id;
9901 };
9902
9903 static struct subsys_tbl_ent subsys_id_to_phy_id[] = {
9904         /* Broadcom boards. */
9905         { PCI_VENDOR_ID_BROADCOM, 0x1644, PHY_ID_BCM5401 }, /* BCM95700A6 */
9906         { PCI_VENDOR_ID_BROADCOM, 0x0001, PHY_ID_BCM5701 }, /* BCM95701A5 */
9907         { PCI_VENDOR_ID_BROADCOM, 0x0002, PHY_ID_BCM8002 }, /* BCM95700T6 */
9908         { PCI_VENDOR_ID_BROADCOM, 0x0003, 0 },              /* BCM95700A9 */
9909         { PCI_VENDOR_ID_BROADCOM, 0x0005, PHY_ID_BCM5701 }, /* BCM95701T1 */
9910         { PCI_VENDOR_ID_BROADCOM, 0x0006, PHY_ID_BCM5701 }, /* BCM95701T8 */
9911         { PCI_VENDOR_ID_BROADCOM, 0x0007, 0 },              /* BCM95701A7 */
9912         { PCI_VENDOR_ID_BROADCOM, 0x0008, PHY_ID_BCM5701 }, /* BCM95701A10 */
9913         { PCI_VENDOR_ID_BROADCOM, 0x8008, PHY_ID_BCM5701 }, /* BCM95701A12 */
9914         { PCI_VENDOR_ID_BROADCOM, 0x0009, PHY_ID_BCM5703 }, /* BCM95703Ax1 */
9915         { PCI_VENDOR_ID_BROADCOM, 0x8009, PHY_ID_BCM5703 }, /* BCM95703Ax2 */
9916
9917         /* 3com boards. */
9918         { PCI_VENDOR_ID_3COM, 0x1000, PHY_ID_BCM5401 }, /* 3C996T */
9919         { PCI_VENDOR_ID_3COM, 0x1006, PHY_ID_BCM5701 }, /* 3C996BT */
9920         { PCI_VENDOR_ID_3COM, 0x1004, 0 },              /* 3C996SX */
9921         { PCI_VENDOR_ID_3COM, 0x1007, PHY_ID_BCM5701 }, /* 3C1000T */
9922         { PCI_VENDOR_ID_3COM, 0x1008, PHY_ID_BCM5701 }, /* 3C940BR01 */
9923
9924         /* DELL boards. */
9925         { PCI_VENDOR_ID_DELL, 0x00d1, PHY_ID_BCM5401 }, /* VIPER */
9926         { PCI_VENDOR_ID_DELL, 0x0106, PHY_ID_BCM5401 }, /* JAGUAR */
9927         { PCI_VENDOR_ID_DELL, 0x0109, PHY_ID_BCM5411 }, /* MERLOT */
9928         { PCI_VENDOR_ID_DELL, 0x010a, PHY_ID_BCM5411 }, /* SLIM_MERLOT */
9929
9930         /* Compaq boards. */
9931         { PCI_VENDOR_ID_COMPAQ, 0x007c, PHY_ID_BCM5701 }, /* BANSHEE */
9932         { PCI_VENDOR_ID_COMPAQ, 0x009a, PHY_ID_BCM5701 }, /* BANSHEE_2 */
9933         { PCI_VENDOR_ID_COMPAQ, 0x007d, 0 },              /* CHANGELING */
9934         { PCI_VENDOR_ID_COMPAQ, 0x0085, PHY_ID_BCM5701 }, /* NC7780 */
9935         { PCI_VENDOR_ID_COMPAQ, 0x0099, PHY_ID_BCM5701 }, /* NC7780_2 */
9936
9937         /* IBM boards. */
9938         { PCI_VENDOR_ID_IBM, 0x0281, 0 } /* IBM??? */
9939 };
9940
9941 static inline struct subsys_tbl_ent *lookup_by_subsys(struct tg3 *tp)
9942 {
9943         int i;
9944
9945         for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
9946                 if ((subsys_id_to_phy_id[i].subsys_vendor ==
9947                      tp->pdev->subsystem_vendor) &&
9948                     (subsys_id_to_phy_id[i].subsys_devid ==
9949                      tp->pdev->subsystem_device))
9950                         return &subsys_id_to_phy_id[i];
9951         }
9952         return NULL;
9953 }
9954
9955 static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
9956 {
9957         u32 val;
9958         u16 pmcsr;
9959
9960         /* On some early chips the SRAM cannot be accessed in D3hot state,
9961          * so need make sure we're in D0.
9962          */
9963         pci_read_config_word(tp->pdev, tp->pm_cap + PCI_PM_CTRL, &pmcsr);
9964         pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
9965         pci_write_config_word(tp->pdev, tp->pm_cap + PCI_PM_CTRL, pmcsr);
9966         msleep(1);
9967
9968         /* Make sure register accesses (indirect or otherwise)
9969          * will function correctly.
9970          */
9971         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
9972                                tp->misc_host_ctrl);
9973
9974         /* The memory arbiter has to be enabled in order for SRAM accesses
9975          * to succeed.  Normally on powerup the tg3 chip firmware will make
9976          * sure it is enabled, but other entities such as system netboot
9977          * code might disable it.
9978          */
9979         val = tr32(MEMARB_MODE);
9980         tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
9981
9982         tp->phy_id = PHY_ID_INVALID;
9983         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
9984
9985         /* Assume an onboard device by default.  */
9986         tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT;
9987
9988         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
9989                 if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
9990                         tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT;
9991                         tp->tg3_flags2 |= TG3_FLG2_IS_NIC;
9992                 }
9993                 return;
9994         }
9995
9996         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
9997         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
9998                 u32 nic_cfg, led_cfg;
9999                 u32 nic_phy_id, ver, cfg2 = 0, eeprom_phy_id;
10000                 int eeprom_phy_serdes = 0;
10001
10002                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
10003                 tp->nic_sram_data_cfg = nic_cfg;
10004
10005                 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
10006                 ver >>= NIC_SRAM_DATA_VER_SHIFT;
10007                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700) &&
10008                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) &&
10009                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5703) &&
10010                     (ver > 0) && (ver < 0x100))
10011                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
10012
10013                 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
10014                     NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
10015                         eeprom_phy_serdes = 1;
10016
10017                 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
10018                 if (nic_phy_id != 0) {
10019                         u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
10020                         u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
10021
10022                         eeprom_phy_id  = (id1 >> 16) << 10;
10023                         eeprom_phy_id |= (id2 & 0xfc00) << 16;
10024                         eeprom_phy_id |= (id2 & 0x03ff) <<  0;
10025                 } else
10026                         eeprom_phy_id = 0;
10027
10028                 tp->phy_id = eeprom_phy_id;
10029                 if (eeprom_phy_serdes) {
10030                         if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
10031                                 tp->tg3_flags2 |= TG3_FLG2_MII_SERDES;
10032                         else
10033                                 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
10034                 }
10035
10036                 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
10037                         led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
10038                                     SHASTA_EXT_LED_MODE_MASK);
10039                 else
10040                         led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
10041
10042                 switch (led_cfg) {
10043                 default:
10044                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
10045                         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
10046                         break;
10047
10048                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
10049                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
10050                         break;
10051
10052                 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
10053                         tp->led_ctrl = LED_CTRL_MODE_MAC;
10054
10055                         /* Default to PHY_1_MODE if 0 (MAC_MODE) is
10056                          * read on some older 5700/5701 bootcode.
10057                          */
10058                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
10059                             ASIC_REV_5700 ||
10060                             GET_ASIC_REV(tp->pci_chip_rev_id) ==
10061                             ASIC_REV_5701)
10062                                 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
10063
10064                         break;
10065
10066                 case SHASTA_EXT_LED_SHARED:
10067                         tp->led_ctrl = LED_CTRL_MODE_SHARED;
10068                         if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
10069                             tp->pci_chip_rev_id != CHIPREV_ID_5750_A1)
10070                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
10071                                                  LED_CTRL_MODE_PHY_2);
10072                         break;
10073
10074                 case SHASTA_EXT_LED_MAC:
10075                         tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
10076                         break;
10077
10078                 case SHASTA_EXT_LED_COMBO:
10079                         tp->led_ctrl = LED_CTRL_MODE_COMBO;
10080                         if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0)
10081                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
10082                                                  LED_CTRL_MODE_PHY_2);
10083                         break;
10084
10085                 };
10086
10087                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
10088                      GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) &&
10089                     tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
10090                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
10091
10092                 if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
10093                         tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT;
10094                         if ((tp->pdev->subsystem_vendor ==
10095                              PCI_VENDOR_ID_ARIMA) &&
10096                             (tp->pdev->subsystem_device == 0x205a ||
10097                              tp->pdev->subsystem_device == 0x2063))
10098                                 tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT;
10099                 } else {
10100                         tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT;
10101                         tp->tg3_flags2 |= TG3_FLG2_IS_NIC;
10102                 }
10103
10104                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
10105                         tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
10106                         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
10107                                 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
10108                 }
10109                 if (nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL)
10110                         tp->tg3_flags |= TG3_FLAG_SERDES_WOL_CAP;
10111
10112                 if (cfg2 & (1 << 17))
10113                         tp->tg3_flags2 |= TG3_FLG2_CAPACITIVE_COUPLING;
10114
10115                 /* serdes signal pre-emphasis in register 0x590 set by */
10116                 /* bootcode if bit 18 is set */
10117                 if (cfg2 & (1 << 18))
10118                         tp->tg3_flags2 |= TG3_FLG2_SERDES_PREEMPHASIS;
10119         }
10120 }
10121
10122 static int __devinit tg3_phy_probe(struct tg3 *tp)
10123 {
10124         u32 hw_phy_id_1, hw_phy_id_2;
10125         u32 hw_phy_id, hw_phy_id_masked;
10126         int err;
10127
10128         /* Reading the PHY ID register can conflict with ASF
10129          * firwmare access to the PHY hardware.
10130          */
10131         err = 0;
10132         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
10133                 hw_phy_id = hw_phy_id_masked = PHY_ID_INVALID;
10134         } else {
10135                 /* Now read the physical PHY_ID from the chip and verify
10136                  * that it is sane.  If it doesn't look good, we fall back
10137                  * to either the hard-coded table based PHY_ID and failing
10138                  * that the value found in the eeprom area.
10139                  */
10140                 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
10141                 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
10142
10143                 hw_phy_id  = (hw_phy_id_1 & 0xffff) << 10;
10144                 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
10145                 hw_phy_id |= (hw_phy_id_2 & 0x03ff) <<  0;
10146
10147                 hw_phy_id_masked = hw_phy_id & PHY_ID_MASK;
10148         }
10149
10150         if (!err && KNOWN_PHY_ID(hw_phy_id_masked)) {
10151                 tp->phy_id = hw_phy_id;
10152                 if (hw_phy_id_masked == PHY_ID_BCM8002)
10153                         tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
10154                 else
10155                         tp->tg3_flags2 &= ~TG3_FLG2_PHY_SERDES;
10156         } else {
10157                 if (tp->phy_id != PHY_ID_INVALID) {
10158                         /* Do nothing, phy ID already set up in
10159                          * tg3_get_eeprom_hw_cfg().
10160                          */
10161                 } else {
10162                         struct subsys_tbl_ent *p;
10163
10164                         /* No eeprom signature?  Try the hardcoded
10165                          * subsys device table.
10166                          */
10167                         p = lookup_by_subsys(tp);
10168                         if (!p)
10169                                 return -ENODEV;
10170
10171                         tp->phy_id = p->phy_id;
10172                         if (!tp->phy_id ||
10173                             tp->phy_id == PHY_ID_BCM8002)
10174                                 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
10175                 }
10176         }
10177
10178         if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) &&
10179             !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
10180                 u32 bmsr, adv_reg, tg3_ctrl, mask;
10181
10182                 tg3_readphy(tp, MII_BMSR, &bmsr);
10183                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
10184                     (bmsr & BMSR_LSTATUS))
10185                         goto skip_phy_reset;
10186
10187                 err = tg3_phy_reset(tp);
10188                 if (err)
10189                         return err;
10190
10191                 adv_reg = (ADVERTISE_10HALF | ADVERTISE_10FULL |
10192                            ADVERTISE_100HALF | ADVERTISE_100FULL |
10193                            ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
10194                 tg3_ctrl = 0;
10195                 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
10196                         tg3_ctrl = (MII_TG3_CTRL_ADV_1000_HALF |
10197                                     MII_TG3_CTRL_ADV_1000_FULL);
10198                         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
10199                             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
10200                                 tg3_ctrl |= (MII_TG3_CTRL_AS_MASTER |
10201                                              MII_TG3_CTRL_ENABLE_AS_MASTER);
10202                 }
10203
10204                 mask = (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
10205                         ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
10206                         ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full);
10207                 if (!tg3_copper_is_advertising_all(tp, mask)) {
10208                         tg3_writephy(tp, MII_ADVERTISE, adv_reg);
10209
10210                         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
10211                                 tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
10212
10213                         tg3_writephy(tp, MII_BMCR,
10214                                      BMCR_ANENABLE | BMCR_ANRESTART);
10215                 }
10216                 tg3_phy_set_wirespeed(tp);
10217
10218                 tg3_writephy(tp, MII_ADVERTISE, adv_reg);
10219                 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
10220                         tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
10221         }
10222
10223 skip_phy_reset:
10224         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
10225                 err = tg3_init_5401phy_dsp(tp);
10226                 if (err)
10227                         return err;
10228         }
10229
10230         if (!err && ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401)) {
10231                 err = tg3_init_5401phy_dsp(tp);
10232         }
10233
10234         if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
10235                 tp->link_config.advertising =
10236                         (ADVERTISED_1000baseT_Half |
10237                          ADVERTISED_1000baseT_Full |
10238                          ADVERTISED_Autoneg |
10239                          ADVERTISED_FIBRE);
10240         if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
10241                 tp->link_config.advertising &=
10242                         ~(ADVERTISED_1000baseT_Half |
10243                           ADVERTISED_1000baseT_Full);
10244
10245         return err;
10246 }
10247
10248 static void __devinit tg3_read_partno(struct tg3 *tp)
10249 {
10250         unsigned char vpd_data[256];
10251         unsigned int i;
10252         u32 magic;
10253
10254         if (tg3_nvram_read_swab(tp, 0x0, &magic))
10255                 goto out_not_found;
10256
10257         if (magic == TG3_EEPROM_MAGIC) {
10258                 for (i = 0; i < 256; i += 4) {
10259                         u32 tmp;
10260
10261                         if (tg3_nvram_read(tp, 0x100 + i, &tmp))
10262                                 goto out_not_found;
10263
10264                         vpd_data[i + 0] = ((tmp >>  0) & 0xff);
10265                         vpd_data[i + 1] = ((tmp >>  8) & 0xff);
10266                         vpd_data[i + 2] = ((tmp >> 16) & 0xff);
10267                         vpd_data[i + 3] = ((tmp >> 24) & 0xff);
10268                 }
10269         } else {
10270                 int vpd_cap;
10271
10272                 vpd_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_VPD);
10273                 for (i = 0; i < 256; i += 4) {
10274                         u32 tmp, j = 0;
10275                         u16 tmp16;
10276
10277                         pci_write_config_word(tp->pdev, vpd_cap + PCI_VPD_ADDR,
10278                                               i);
10279                         while (j++ < 100) {
10280                                 pci_read_config_word(tp->pdev, vpd_cap +
10281                                                      PCI_VPD_ADDR, &tmp16);
10282                                 if (tmp16 & 0x8000)
10283                                         break;
10284                                 msleep(1);
10285                         }
10286                         if (!(tmp16 & 0x8000))
10287                                 goto out_not_found;
10288
10289                         pci_read_config_dword(tp->pdev, vpd_cap + PCI_VPD_DATA,
10290                                               &tmp);
10291                         tmp = cpu_to_le32(tmp);
10292                         memcpy(&vpd_data[i], &tmp, 4);
10293                 }
10294         }
10295
10296         /* Now parse and find the part number. */
10297         for (i = 0; i < 254; ) {
10298                 unsigned char val = vpd_data[i];
10299                 unsigned int block_end;
10300
10301                 if (val == 0x82 || val == 0x91) {
10302                         i = (i + 3 +
10303                              (vpd_data[i + 1] +
10304                               (vpd_data[i + 2] << 8)));
10305                         continue;
10306                 }
10307
10308                 if (val != 0x90)
10309                         goto out_not_found;
10310
10311                 block_end = (i + 3 +
10312                              (vpd_data[i + 1] +
10313                               (vpd_data[i + 2] << 8)));
10314                 i += 3;
10315
10316                 if (block_end > 256)
10317                         goto out_not_found;
10318
10319                 while (i < (block_end - 2)) {
10320                         if (vpd_data[i + 0] == 'P' &&
10321                             vpd_data[i + 1] == 'N') {
10322                                 int partno_len = vpd_data[i + 2];
10323
10324                                 i += 3;
10325                                 if (partno_len > 24 || (partno_len + i) > 256)
10326                                         goto out_not_found;
10327
10328                                 memcpy(tp->board_part_number,
10329                                        &vpd_data[i], partno_len);
10330
10331                                 /* Success. */
10332                                 return;
10333                         }
10334                         i += 3 + vpd_data[i + 2];
10335                 }
10336
10337                 /* Part number not found. */
10338                 goto out_not_found;
10339         }
10340
10341 out_not_found:
10342         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
10343                 strcpy(tp->board_part_number, "BCM95906");
10344         else
10345                 strcpy(tp->board_part_number, "none");
10346 }
10347
10348 static void __devinit tg3_read_fw_ver(struct tg3 *tp)
10349 {
10350         u32 val, offset, start;
10351
10352         if (tg3_nvram_read_swab(tp, 0, &val))
10353                 return;
10354
10355         if (val != TG3_EEPROM_MAGIC)
10356                 return;
10357
10358         if (tg3_nvram_read_swab(tp, 0xc, &offset) ||
10359             tg3_nvram_read_swab(tp, 0x4, &start))
10360                 return;
10361
10362         offset = tg3_nvram_logical_addr(tp, offset);
10363         if (tg3_nvram_read_swab(tp, offset, &val))
10364                 return;
10365
10366         if ((val & 0xfc000000) == 0x0c000000) {
10367                 u32 ver_offset, addr;
10368                 int i;
10369
10370                 if (tg3_nvram_read_swab(tp, offset + 4, &val) ||
10371                     tg3_nvram_read_swab(tp, offset + 8, &ver_offset))
10372                         return;
10373
10374                 if (val != 0)
10375                         return;
10376
10377                 addr = offset + ver_offset - start;
10378                 for (i = 0; i < 16; i += 4) {
10379                         if (tg3_nvram_read(tp, addr + i, &val))
10380                                 return;
10381
10382                         val = cpu_to_le32(val);
10383                         memcpy(tp->fw_ver + i, &val, 4);
10384                 }
10385         }
10386 }
10387
10388 static int __devinit tg3_get_invariants(struct tg3 *tp)
10389 {
10390         static struct pci_device_id write_reorder_chipsets[] = {
10391                 { PCI_DEVICE(PCI_VENDOR_ID_AMD,
10392                              PCI_DEVICE_ID_AMD_FE_GATE_700C) },
10393                 { PCI_DEVICE(PCI_VENDOR_ID_AMD,
10394                              PCI_DEVICE_ID_AMD_8131_BRIDGE) },
10395                 { PCI_DEVICE(PCI_VENDOR_ID_VIA,
10396                              PCI_DEVICE_ID_VIA_8385_0) },
10397                 { },
10398         };
10399         u32 misc_ctrl_reg;
10400         u32 cacheline_sz_reg;
10401         u32 pci_state_reg, grc_misc_cfg;
10402         u32 val;
10403         u16 pci_cmd;
10404         int err, pcie_cap;
10405
10406         /* Force memory write invalidate off.  If we leave it on,
10407          * then on 5700_BX chips we have to enable a workaround.
10408          * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
10409          * to match the cacheline size.  The Broadcom driver have this
10410          * workaround but turns MWI off all the times so never uses
10411          * it.  This seems to suggest that the workaround is insufficient.
10412          */
10413         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
10414         pci_cmd &= ~PCI_COMMAND_INVALIDATE;
10415         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
10416
10417         /* It is absolutely critical that TG3PCI_MISC_HOST_CTRL
10418          * has the register indirect write enable bit set before
10419          * we try to access any of the MMIO registers.  It is also
10420          * critical that the PCI-X hw workaround situation is decided
10421          * before that as well.
10422          */
10423         pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
10424                               &misc_ctrl_reg);
10425
10426         tp->pci_chip_rev_id = (misc_ctrl_reg >>
10427                                MISC_HOST_CTRL_CHIPREV_SHIFT);
10428
10429         /* Wrong chip ID in 5752 A0. This code can be removed later
10430          * as A0 is not in production.
10431          */
10432         if (tp->pci_chip_rev_id == CHIPREV_ID_5752_A0_HW)
10433                 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
10434
10435         /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
10436          * we need to disable memory and use config. cycles
10437          * only to access all registers. The 5702/03 chips
10438          * can mistakenly decode the special cycles from the
10439          * ICH chipsets as memory write cycles, causing corruption
10440          * of register and memory space. Only certain ICH bridges
10441          * will drive special cycles with non-zero data during the
10442          * address phase which can fall within the 5703's address
10443          * range. This is not an ICH bug as the PCI spec allows
10444          * non-zero address during special cycles. However, only
10445          * these ICH bridges are known to drive non-zero addresses
10446          * during special cycles.
10447          *
10448          * Since special cycles do not cross PCI bridges, we only
10449          * enable this workaround if the 5703 is on the secondary
10450          * bus of these ICH bridges.
10451          */
10452         if ((tp->pci_chip_rev_id == CHIPREV_ID_5703_A1) ||
10453             (tp->pci_chip_rev_id == CHIPREV_ID_5703_A2)) {
10454                 static struct tg3_dev_id {
10455                         u32     vendor;
10456                         u32     device;
10457                         u32     rev;
10458                 } ich_chipsets[] = {
10459                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
10460                           PCI_ANY_ID },
10461                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
10462                           PCI_ANY_ID },
10463                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
10464                           0xa },
10465                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
10466                           PCI_ANY_ID },
10467                         { },
10468                 };
10469                 struct tg3_dev_id *pci_id = &ich_chipsets[0];
10470                 struct pci_dev *bridge = NULL;
10471
10472                 while (pci_id->vendor != 0) {
10473                         bridge = pci_get_device(pci_id->vendor, pci_id->device,
10474                                                 bridge);
10475                         if (!bridge) {
10476                                 pci_id++;
10477                                 continue;
10478                         }
10479                         if (pci_id->rev != PCI_ANY_ID) {
10480                                 u8 rev;
10481
10482                                 pci_read_config_byte(bridge, PCI_REVISION_ID,
10483                                                      &rev);
10484                                 if (rev > pci_id->rev)
10485                                         continue;
10486                         }
10487                         if (bridge->subordinate &&
10488                             (bridge->subordinate->number ==
10489                              tp->pdev->bus->number)) {
10490
10491                                 tp->tg3_flags2 |= TG3_FLG2_ICH_WORKAROUND;
10492                                 pci_dev_put(bridge);
10493                                 break;
10494                         }
10495                 }
10496         }
10497
10498         /* The EPB bridge inside 5714, 5715, and 5780 cannot support
10499          * DMA addresses > 40-bit. This bridge may have other additional
10500          * 57xx devices behind it in some 4-port NIC designs for example.
10501          * Any tg3 device found behind the bridge will also need the 40-bit
10502          * DMA workaround.
10503          */
10504         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 ||
10505             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
10506                 tp->tg3_flags2 |= TG3_FLG2_5780_CLASS;
10507                 tp->tg3_flags |= TG3_FLAG_40BIT_DMA_BUG;
10508                 tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI);
10509         }
10510         else {
10511                 struct pci_dev *bridge = NULL;
10512
10513                 do {
10514                         bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
10515                                                 PCI_DEVICE_ID_SERVERWORKS_EPB,
10516                                                 bridge);
10517                         if (bridge && bridge->subordinate &&
10518                             (bridge->subordinate->number <=
10519                              tp->pdev->bus->number) &&
10520                             (bridge->subordinate->subordinate >=
10521                              tp->pdev->bus->number)) {
10522                                 tp->tg3_flags |= TG3_FLAG_40BIT_DMA_BUG;
10523                                 pci_dev_put(bridge);
10524                                 break;
10525                         }
10526                 } while (bridge);
10527         }
10528
10529         /* Initialize misc host control in PCI block. */
10530         tp->misc_host_ctrl |= (misc_ctrl_reg &
10531                                MISC_HOST_CTRL_CHIPREV);
10532         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
10533                                tp->misc_host_ctrl);
10534
10535         pci_read_config_dword(tp->pdev, TG3PCI_CACHELINESZ,
10536                               &cacheline_sz_reg);
10537
10538         tp->pci_cacheline_sz = (cacheline_sz_reg >>  0) & 0xff;
10539         tp->pci_lat_timer    = (cacheline_sz_reg >>  8) & 0xff;
10540         tp->pci_hdr_type     = (cacheline_sz_reg >> 16) & 0xff;
10541         tp->pci_bist         = (cacheline_sz_reg >> 24) & 0xff;
10542
10543         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
10544             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
10545             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
10546             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
10547             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 ||
10548             (tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
10549                 tp->tg3_flags2 |= TG3_FLG2_5750_PLUS;
10550
10551         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) ||
10552             (tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
10553                 tp->tg3_flags2 |= TG3_FLG2_5705_PLUS;
10554
10555         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
10556                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
10557                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
10558                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
10559                         tp->tg3_flags2 |= TG3_FLG2_HW_TSO_2;
10560                         tp->tg3_flags2 |= TG3_FLG2_1SHOT_MSI;
10561                 } else {
10562                         tp->tg3_flags2 |= TG3_FLG2_HW_TSO_1 |
10563                                           TG3_FLG2_HW_TSO_1_BUG;
10564                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
10565                                 ASIC_REV_5750 &&
10566                             tp->pci_chip_rev_id >= CHIPREV_ID_5750_C2)
10567                                 tp->tg3_flags2 &= ~TG3_FLG2_HW_TSO_1_BUG;
10568                 }
10569         }
10570
10571         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705 &&
10572             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5750 &&
10573             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752 &&
10574             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5755 &&
10575             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5787 &&
10576             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906)
10577                 tp->tg3_flags2 |= TG3_FLG2_JUMBO_CAPABLE;
10578
10579         pcie_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_EXP);
10580         if (pcie_cap != 0) {
10581                 tp->tg3_flags2 |= TG3_FLG2_PCI_EXPRESS;
10582                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
10583                         u16 lnkctl;
10584
10585                         pci_read_config_word(tp->pdev,
10586                                              pcie_cap + PCI_EXP_LNKCTL,
10587                                              &lnkctl);
10588                         if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN)
10589                                 tp->tg3_flags2 &= ~TG3_FLG2_HW_TSO_2;
10590                 }
10591         }
10592
10593         /* If we have an AMD 762 or VIA K8T800 chipset, write
10594          * reordering to the mailbox registers done by the host
10595          * controller can cause major troubles.  We read back from
10596          * every mailbox register write to force the writes to be
10597          * posted to the chip in order.
10598          */
10599         if (pci_dev_present(write_reorder_chipsets) &&
10600             !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
10601                 tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;
10602
10603         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
10604             tp->pci_lat_timer < 64) {
10605                 tp->pci_lat_timer = 64;
10606
10607                 cacheline_sz_reg  = ((tp->pci_cacheline_sz & 0xff) <<  0);
10608                 cacheline_sz_reg |= ((tp->pci_lat_timer    & 0xff) <<  8);
10609                 cacheline_sz_reg |= ((tp->pci_hdr_type     & 0xff) << 16);
10610                 cacheline_sz_reg |= ((tp->pci_bist         & 0xff) << 24);
10611
10612                 pci_write_config_dword(tp->pdev, TG3PCI_CACHELINESZ,
10613                                        cacheline_sz_reg);
10614         }
10615
10616         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
10617                               &pci_state_reg);
10618
10619         if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0) {
10620                 tp->tg3_flags |= TG3_FLAG_PCIX_MODE;
10621
10622                 /* If this is a 5700 BX chipset, and we are in PCI-X
10623                  * mode, enable register write workaround.
10624                  *
10625                  * The workaround is to use indirect register accesses
10626                  * for all chip writes not to mailbox registers.
10627                  */
10628                 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX) {
10629                         u32 pm_reg;
10630                         u16 pci_cmd;
10631
10632                         tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
10633
10634                         /* The chip can have it's power management PCI config
10635                          * space registers clobbered due to this bug.
10636                          * So explicitly force the chip into D0 here.
10637                          */
10638                         pci_read_config_dword(tp->pdev, TG3PCI_PM_CTRL_STAT,
10639                                               &pm_reg);
10640                         pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
10641                         pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
10642                         pci_write_config_dword(tp->pdev, TG3PCI_PM_CTRL_STAT,
10643                                                pm_reg);
10644
10645                         /* Also, force SERR#/PERR# in PCI command. */
10646                         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
10647                         pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
10648                         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
10649                 }
10650         }
10651
10652         /* 5700 BX chips need to have their TX producer index mailboxes
10653          * written twice to workaround a bug.
10654          */
10655         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX)
10656                 tp->tg3_flags |= TG3_FLAG_TXD_MBOX_HWBUG;
10657
10658         /* Back to back register writes can cause problems on this chip,
10659          * the workaround is to read back all reg writes except those to
10660          * mailbox regs.  See tg3_write_indirect_reg32().
10661          *
10662          * PCI Express 5750_A0 rev chips need this workaround too.
10663          */
10664         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
10665             ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
10666              tp->pci_chip_rev_id == CHIPREV_ID_5750_A0))
10667                 tp->tg3_flags |= TG3_FLAG_5701_REG_WRITE_BUG;
10668
10669         if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
10670                 tp->tg3_flags |= TG3_FLAG_PCI_HIGH_SPEED;
10671         if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
10672                 tp->tg3_flags |= TG3_FLAG_PCI_32BIT;
10673
10674         /* Chip-specific fixup from Broadcom driver */
10675         if ((tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) &&
10676             (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
10677                 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
10678                 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
10679         }
10680
10681         /* Default fast path register access methods */
10682         tp->read32 = tg3_read32;
10683         tp->write32 = tg3_write32;
10684         tp->read32_mbox = tg3_read32;
10685         tp->write32_mbox = tg3_write32;
10686         tp->write32_tx_mbox = tg3_write32;
10687         tp->write32_rx_mbox = tg3_write32;
10688
10689         /* Various workaround register access methods */
10690         if (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG)
10691                 tp->write32 = tg3_write_indirect_reg32;
10692         else if (tp->tg3_flags & TG3_FLAG_5701_REG_WRITE_BUG)
10693                 tp->write32 = tg3_write_flush_reg32;
10694
10695         if ((tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG) ||
10696             (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)) {
10697                 tp->write32_tx_mbox = tg3_write32_tx_mbox;
10698                 if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
10699                         tp->write32_rx_mbox = tg3_write_flush_reg32;
10700         }
10701
10702         if (tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND) {
10703                 tp->read32 = tg3_read_indirect_reg32;
10704                 tp->write32 = tg3_write_indirect_reg32;
10705                 tp->read32_mbox = tg3_read_indirect_mbox;
10706                 tp->write32_mbox = tg3_write_indirect_mbox;
10707                 tp->write32_tx_mbox = tg3_write_indirect_mbox;
10708                 tp->write32_rx_mbox = tg3_write_indirect_mbox;
10709
10710                 iounmap(tp->regs);
10711                 tp->regs = NULL;
10712
10713                 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
10714                 pci_cmd &= ~PCI_COMMAND_MEMORY;
10715                 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
10716         }
10717         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
10718                 tp->read32_mbox = tg3_read32_mbox_5906;
10719                 tp->write32_mbox = tg3_write32_mbox_5906;
10720                 tp->write32_tx_mbox = tg3_write32_mbox_5906;
10721                 tp->write32_rx_mbox = tg3_write32_mbox_5906;
10722         }
10723
10724         if (tp->write32 == tg3_write_indirect_reg32 ||
10725             ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
10726              (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
10727               GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)))
10728                 tp->tg3_flags |= TG3_FLAG_SRAM_USE_CONFIG;
10729
10730         /* Get eeprom hw config before calling tg3_set_power_state().
10731          * In particular, the TG3_FLG2_IS_NIC flag must be
10732          * determined before calling tg3_set_power_state() so that
10733          * we know whether or not to switch out of Vaux power.
10734          * When the flag is set, it means that GPIO1 is used for eeprom
10735          * write protect and also implies that it is a LOM where GPIOs
10736          * are not used to switch power.
10737          */
10738         tg3_get_eeprom_hw_cfg(tp);
10739
10740         /* Set up tp->grc_local_ctrl before calling tg3_set_power_state().
10741          * GPIO1 driven high will bring 5700's external PHY out of reset.
10742          * It is also used as eeprom write protect on LOMs.
10743          */
10744         tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
10745         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
10746             (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT))
10747                 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
10748                                        GRC_LCLCTRL_GPIO_OUTPUT1);
10749         /* Unused GPIO3 must be driven as output on 5752 because there
10750          * are no pull-up resistors on unused GPIO pins.
10751          */
10752         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
10753                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
10754
10755         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
10756                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
10757
10758         /* Force the chip into D0. */
10759         err = tg3_set_power_state(tp, PCI_D0);
10760         if (err) {
10761                 printk(KERN_ERR PFX "(%s) transition to D0 failed\n",
10762                        pci_name(tp->pdev));
10763                 return err;
10764         }
10765
10766         /* 5700 B0 chips do not support checksumming correctly due
10767          * to hardware bugs.
10768          */
10769         if (tp->pci_chip_rev_id == CHIPREV_ID_5700_B0)
10770                 tp->tg3_flags |= TG3_FLAG_BROKEN_CHECKSUMS;
10771
10772         /* Derive initial jumbo mode from MTU assigned in
10773          * ether_setup() via the alloc_etherdev() call
10774          */
10775         if (tp->dev->mtu > ETH_DATA_LEN &&
10776             !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
10777                 tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE;
10778
10779         /* Determine WakeOnLan speed to use. */
10780         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
10781             tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
10782             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0 ||
10783             tp->pci_chip_rev_id == CHIPREV_ID_5701_B2) {
10784                 tp->tg3_flags &= ~(TG3_FLAG_WOL_SPEED_100MB);
10785         } else {
10786                 tp->tg3_flags |= TG3_FLAG_WOL_SPEED_100MB;
10787         }
10788
10789         /* A few boards don't want Ethernet@WireSpeed phy feature */
10790         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
10791             ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
10792              (tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) &&
10793              (tp->pci_chip_rev_id != CHIPREV_ID_5705_A1)) ||
10794             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) ||
10795             (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES))
10796                 tp->tg3_flags2 |= TG3_FLG2_NO_ETH_WIRE_SPEED;
10797
10798         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5703_AX ||
10799             GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_AX)
10800                 tp->tg3_flags2 |= TG3_FLG2_PHY_ADC_BUG;
10801         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0)
10802                 tp->tg3_flags2 |= TG3_FLG2_PHY_5704_A0_BUG;
10803
10804         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
10805                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
10806                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
10807                         tp->tg3_flags2 |= TG3_FLG2_PHY_JITTER_BUG;
10808                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906)
10809                         tp->tg3_flags2 |= TG3_FLG2_PHY_BER_BUG;
10810         }
10811
10812         tp->coalesce_mode = 0;
10813         if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX &&
10814             GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX)
10815                 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
10816
10817         /* Initialize MAC MI mode, polling disabled. */
10818         tw32_f(MAC_MI_MODE, tp->mi_mode);
10819         udelay(80);
10820
10821         /* Initialize data/descriptor byte/word swapping. */
10822         val = tr32(GRC_MODE);
10823         val &= GRC_MODE_HOST_STACKUP;
10824         tw32(GRC_MODE, val | tp->grc_mode);
10825
10826         tg3_switch_clocks(tp);
10827
10828         /* Clear this out for sanity. */
10829         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
10830
10831         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
10832                               &pci_state_reg);
10833         if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
10834             (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) == 0) {
10835                 u32 chiprevid = GET_CHIP_REV_ID(tp->misc_host_ctrl);
10836
10837                 if (chiprevid == CHIPREV_ID_5701_A0 ||
10838                     chiprevid == CHIPREV_ID_5701_B0 ||
10839                     chiprevid == CHIPREV_ID_5701_B2 ||
10840                     chiprevid == CHIPREV_ID_5701_B5) {
10841                         void __iomem *sram_base;
10842
10843                         /* Write some dummy words into the SRAM status block
10844                          * area, see if it reads back correctly.  If the return
10845                          * value is bad, force enable the PCIX workaround.
10846                          */
10847                         sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
10848
10849                         writel(0x00000000, sram_base);
10850                         writel(0x00000000, sram_base + 4);
10851                         writel(0xffffffff, sram_base + 4);
10852                         if (readl(sram_base) != 0x00000000)
10853                                 tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
10854                 }
10855         }
10856
10857         udelay(50);
10858         tg3_nvram_init(tp);
10859
10860         grc_misc_cfg = tr32(GRC_MISC_CFG);
10861         grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
10862
10863         /* Broadcom's driver says that CIOBE multisplit has a bug */
10864 #if 0
10865         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
10866             grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5704CIOBE) {
10867                 tp->tg3_flags |= TG3_FLAG_SPLIT_MODE;
10868                 tp->split_mode_max_reqs = SPLIT_MODE_5704_MAX_REQ;
10869         }
10870 #endif
10871         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
10872             (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
10873              grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
10874                 tp->tg3_flags2 |= TG3_FLG2_IS_5788;
10875
10876         if (!(tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
10877             (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700))
10878                 tp->tg3_flags |= TG3_FLAG_TAGGED_STATUS;
10879         if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) {
10880                 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
10881                                       HOSTCC_MODE_CLRTICK_TXBD);
10882
10883                 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
10884                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
10885                                        tp->misc_host_ctrl);
10886         }
10887
10888         /* these are limited to 10/100 only */
10889         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
10890              (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
10891             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
10892              tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
10893              (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901 ||
10894               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901_2 ||
10895               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5705F)) ||
10896             (tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
10897              (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5751F ||
10898               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5753F ||
10899               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5787F)) ||
10900             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
10901                 tp->tg3_flags |= TG3_FLAG_10_100_ONLY;
10902
10903         err = tg3_phy_probe(tp);
10904         if (err) {
10905                 printk(KERN_ERR PFX "(%s) phy probe failed, err %d\n",
10906                        pci_name(tp->pdev), err);
10907                 /* ... but do not return immediately ... */
10908         }
10909
10910         tg3_read_partno(tp);
10911         tg3_read_fw_ver(tp);
10912
10913         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
10914                 tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
10915         } else {
10916                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
10917                         tp->tg3_flags |= TG3_FLAG_USE_MI_INTERRUPT;
10918                 else
10919                         tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
10920         }
10921
10922         /* 5700 {AX,BX} chips have a broken status block link
10923          * change bit implementation, so we must use the
10924          * status register in those cases.
10925          */
10926         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
10927                 tp->tg3_flags |= TG3_FLAG_USE_LINKCHG_REG;
10928         else
10929                 tp->tg3_flags &= ~TG3_FLAG_USE_LINKCHG_REG;
10930
10931         /* The led_ctrl is set during tg3_phy_probe, here we might
10932          * have to force the link status polling mechanism based
10933          * upon subsystem IDs.
10934          */
10935         if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
10936             !(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
10937                 tp->tg3_flags |= (TG3_FLAG_USE_MI_INTERRUPT |
10938                                   TG3_FLAG_USE_LINKCHG_REG);
10939         }
10940
10941         /* For all SERDES we poll the MAC status register. */
10942         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
10943                 tp->tg3_flags |= TG3_FLAG_POLL_SERDES;
10944         else
10945                 tp->tg3_flags &= ~TG3_FLAG_POLL_SERDES;
10946
10947         /* All chips before 5787 can get confused if TX buffers
10948          * straddle the 4GB address boundary in some cases.
10949          */
10950         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
10951             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
10952             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
10953                 tp->dev->hard_start_xmit = tg3_start_xmit;
10954         else
10955                 tp->dev->hard_start_xmit = tg3_start_xmit_dma_bug;
10956
10957         tp->rx_offset = 2;
10958         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
10959             (tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0)
10960                 tp->rx_offset = 0;
10961
10962         tp->rx_std_max_post = TG3_RX_RING_SIZE;
10963
10964         /* Increment the rx prod index on the rx std ring by at most
10965          * 8 for these chips to workaround hw errata.
10966          */
10967         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
10968             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
10969             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
10970                 tp->rx_std_max_post = 8;
10971
10972         /* By default, disable wake-on-lan.  User can change this
10973          * using ETHTOOL_SWOL.
10974          */
10975         tp->tg3_flags &= ~TG3_FLAG_WOL_ENABLE;
10976
10977         return err;
10978 }
10979
10980 #ifdef CONFIG_SPARC64
10981 static int __devinit tg3_get_macaddr_sparc(struct tg3 *tp)
10982 {
10983         struct net_device *dev = tp->dev;
10984         struct pci_dev *pdev = tp->pdev;
10985         struct pcidev_cookie *pcp = pdev->sysdata;
10986
10987         if (pcp != NULL) {
10988                 unsigned char *addr;
10989                 int len;
10990
10991                 addr = of_get_property(pcp->prom_node, "local-mac-address",
10992                                         &len);
10993                 if (addr && len == 6) {
10994                         memcpy(dev->dev_addr, addr, 6);
10995                         memcpy(dev->perm_addr, dev->dev_addr, 6);
10996                         return 0;
10997                 }
10998         }
10999         return -ENODEV;
11000 }
11001
11002 static int __devinit tg3_get_default_macaddr_sparc(struct tg3 *tp)
11003 {
11004         struct net_device *dev = tp->dev;
11005
11006         memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
11007         memcpy(dev->perm_addr, idprom->id_ethaddr, 6);
11008         return 0;
11009 }
11010 #endif
11011
11012 static int __devinit tg3_get_device_address(struct tg3 *tp)
11013 {
11014         struct net_device *dev = tp->dev;
11015         u32 hi, lo, mac_offset;
11016         int addr_ok = 0;
11017
11018 #ifdef CONFIG_SPARC64
11019         if (!tg3_get_macaddr_sparc(tp))
11020                 return 0;
11021 #endif
11022
11023         mac_offset = 0x7c;
11024         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
11025             (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
11026                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
11027                         mac_offset = 0xcc;
11028                 if (tg3_nvram_lock(tp))
11029                         tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
11030                 else
11031                         tg3_nvram_unlock(tp);
11032         }
11033         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
11034                 mac_offset = 0x10;
11035
11036         /* First try to get it from MAC address mailbox. */
11037         tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
11038         if ((hi >> 16) == 0x484b) {
11039                 dev->dev_addr[0] = (hi >>  8) & 0xff;
11040                 dev->dev_addr[1] = (hi >>  0) & 0xff;
11041
11042                 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
11043                 dev->dev_addr[2] = (lo >> 24) & 0xff;
11044                 dev->dev_addr[3] = (lo >> 16) & 0xff;
11045                 dev->dev_addr[4] = (lo >>  8) & 0xff;
11046                 dev->dev_addr[5] = (lo >>  0) & 0xff;
11047
11048                 /* Some old bootcode may report a 0 MAC address in SRAM */
11049                 addr_ok = is_valid_ether_addr(&dev->dev_addr[0]);
11050         }
11051         if (!addr_ok) {
11052                 /* Next, try NVRAM. */
11053                 if (!tg3_nvram_read(tp, mac_offset + 0, &hi) &&
11054                     !tg3_nvram_read(tp, mac_offset + 4, &lo)) {
11055                         dev->dev_addr[0] = ((hi >> 16) & 0xff);
11056                         dev->dev_addr[1] = ((hi >> 24) & 0xff);
11057                         dev->dev_addr[2] = ((lo >>  0) & 0xff);
11058                         dev->dev_addr[3] = ((lo >>  8) & 0xff);
11059                         dev->dev_addr[4] = ((lo >> 16) & 0xff);
11060                         dev->dev_addr[5] = ((lo >> 24) & 0xff);
11061                 }
11062                 /* Finally just fetch it out of the MAC control regs. */
11063                 else {
11064                         hi = tr32(MAC_ADDR_0_HIGH);
11065                         lo = tr32(MAC_ADDR_0_LOW);
11066
11067                         dev->dev_addr[5] = lo & 0xff;
11068                         dev->dev_addr[4] = (lo >> 8) & 0xff;
11069                         dev->dev_addr[3] = (lo >> 16) & 0xff;
11070                         dev->dev_addr[2] = (lo >> 24) & 0xff;
11071                         dev->dev_addr[1] = hi & 0xff;
11072                         dev->dev_addr[0] = (hi >> 8) & 0xff;
11073                 }
11074         }
11075
11076         if (!is_valid_ether_addr(&dev->dev_addr[0])) {
11077 #ifdef CONFIG_SPARC64
11078                 if (!tg3_get_default_macaddr_sparc(tp))
11079                         return 0;
11080 #endif
11081                 return -EINVAL;
11082         }
11083         memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
11084         return 0;
11085 }
11086
11087 #define BOUNDARY_SINGLE_CACHELINE       1
11088 #define BOUNDARY_MULTI_CACHELINE        2
11089
11090 static u32 __devinit tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
11091 {
11092         int cacheline_size;
11093         u8 byte;
11094         int goal;
11095
11096         pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
11097         if (byte == 0)
11098                 cacheline_size = 1024;
11099         else
11100                 cacheline_size = (int) byte * 4;
11101
11102         /* On 5703 and later chips, the boundary bits have no
11103          * effect.
11104          */
11105         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
11106             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
11107             !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
11108                 goto out;
11109
11110 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
11111         goal = BOUNDARY_MULTI_CACHELINE;
11112 #else
11113 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
11114         goal = BOUNDARY_SINGLE_CACHELINE;
11115 #else
11116         goal = 0;
11117 #endif
11118 #endif
11119
11120         if (!goal)
11121                 goto out;
11122
11123         /* PCI controllers on most RISC systems tend to disconnect
11124          * when a device tries to burst across a cache-line boundary.
11125          * Therefore, letting tg3 do so just wastes PCI bandwidth.
11126          *
11127          * Unfortunately, for PCI-E there are only limited
11128          * write-side controls for this, and thus for reads
11129          * we will still get the disconnects.  We'll also waste
11130          * these PCI cycles for both read and write for chips
11131          * other than 5700 and 5701 which do not implement the
11132          * boundary bits.
11133          */
11134         if ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
11135             !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
11136                 switch (cacheline_size) {
11137                 case 16:
11138                 case 32:
11139                 case 64:
11140                 case 128:
11141                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
11142                                 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
11143                                         DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
11144                         } else {
11145                                 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
11146                                         DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
11147                         }
11148                         break;
11149
11150                 case 256:
11151                         val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
11152                                 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
11153                         break;
11154
11155                 default:
11156                         val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
11157                                 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
11158                         break;
11159                 };
11160         } else if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
11161                 switch (cacheline_size) {
11162                 case 16:
11163                 case 32:
11164                 case 64:
11165                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
11166                                 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
11167                                 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
11168                                 break;
11169                         }
11170                         /* fallthrough */
11171                 case 128:
11172                 default:
11173                         val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
11174                         val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
11175                         break;
11176                 };
11177         } else {
11178                 switch (cacheline_size) {
11179                 case 16:
11180                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
11181                                 val |= (DMA_RWCTRL_READ_BNDRY_16 |
11182                                         DMA_RWCTRL_WRITE_BNDRY_16);
11183                                 break;
11184                         }
11185                         /* fallthrough */
11186                 case 32:
11187                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
11188                                 val |= (DMA_RWCTRL_READ_BNDRY_32 |
11189                                         DMA_RWCTRL_WRITE_BNDRY_32);
11190                                 break;
11191                         }
11192                         /* fallthrough */
11193                 case 64:
11194                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
11195                                 val |= (DMA_RWCTRL_READ_BNDRY_64 |
11196                                         DMA_RWCTRL_WRITE_BNDRY_64);
11197                                 break;
11198                         }
11199                         /* fallthrough */
11200                 case 128:
11201                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
11202                                 val |= (DMA_RWCTRL_READ_BNDRY_128 |
11203                                         DMA_RWCTRL_WRITE_BNDRY_128);
11204                                 break;
11205                         }
11206                         /* fallthrough */
11207                 case 256:
11208                         val |= (DMA_RWCTRL_READ_BNDRY_256 |
11209                                 DMA_RWCTRL_WRITE_BNDRY_256);
11210                         break;
11211                 case 512:
11212                         val |= (DMA_RWCTRL_READ_BNDRY_512 |
11213                                 DMA_RWCTRL_WRITE_BNDRY_512);
11214                         break;
11215                 case 1024:
11216                 default:
11217                         val |= (DMA_RWCTRL_READ_BNDRY_1024 |
11218                                 DMA_RWCTRL_WRITE_BNDRY_1024);
11219                         break;
11220                 };
11221         }
11222
11223 out:
11224         return val;
11225 }
11226
11227 static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma, int size, int to_device)
11228 {
11229         struct tg3_internal_buffer_desc test_desc;
11230         u32 sram_dma_descs;
11231         int i, ret;
11232
11233         sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
11234
11235         tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
11236         tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
11237         tw32(RDMAC_STATUS, 0);
11238         tw32(WDMAC_STATUS, 0);
11239
11240         tw32(BUFMGR_MODE, 0);
11241         tw32(FTQ_RESET, 0);
11242
11243         test_desc.addr_hi = ((u64) buf_dma) >> 32;
11244         test_desc.addr_lo = buf_dma & 0xffffffff;
11245         test_desc.nic_mbuf = 0x00002100;
11246         test_desc.len = size;
11247
11248         /*
11249          * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
11250          * the *second* time the tg3 driver was getting loaded after an
11251          * initial scan.
11252          *
11253          * Broadcom tells me:
11254          *   ...the DMA engine is connected to the GRC block and a DMA
11255          *   reset may affect the GRC block in some unpredictable way...
11256          *   The behavior of resets to individual blocks has not been tested.
11257          *
11258          * Broadcom noted the GRC reset will also reset all sub-components.
11259          */
11260         if (to_device) {
11261                 test_desc.cqid_sqid = (13 << 8) | 2;
11262
11263                 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
11264                 udelay(40);
11265         } else {
11266                 test_desc.cqid_sqid = (16 << 8) | 7;
11267
11268                 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
11269                 udelay(40);
11270         }
11271         test_desc.flags = 0x00000005;
11272
11273         for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
11274                 u32 val;
11275
11276                 val = *(((u32 *)&test_desc) + i);
11277                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
11278                                        sram_dma_descs + (i * sizeof(u32)));
11279                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
11280         }
11281         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
11282
11283         if (to_device) {
11284                 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
11285         } else {
11286                 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
11287         }
11288
11289         ret = -ENODEV;
11290         for (i = 0; i < 40; i++) {
11291                 u32 val;
11292
11293                 if (to_device)
11294                         val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
11295                 else
11296                         val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
11297                 if ((val & 0xffff) == sram_dma_descs) {
11298                         ret = 0;
11299                         break;
11300                 }
11301
11302                 udelay(100);
11303         }
11304
11305         return ret;
11306 }
11307
11308 #define TEST_BUFFER_SIZE        0x2000
11309
11310 static int __devinit tg3_test_dma(struct tg3 *tp)
11311 {
11312         dma_addr_t buf_dma;
11313         u32 *buf, saved_dma_rwctrl;
11314         int ret;
11315
11316         buf = pci_alloc_consistent(tp->pdev, TEST_BUFFER_SIZE, &buf_dma);
11317         if (!buf) {
11318                 ret = -ENOMEM;
11319                 goto out_nofree;
11320         }
11321
11322         tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
11323                           (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
11324
11325         tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
11326
11327         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
11328                 /* DMA read watermark not used on PCIE */
11329                 tp->dma_rwctrl |= 0x00180000;
11330         } else if (!(tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
11331                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
11332                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
11333                         tp->dma_rwctrl |= 0x003f0000;
11334                 else
11335                         tp->dma_rwctrl |= 0x003f000f;
11336         } else {
11337                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
11338                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
11339                         u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
11340
11341                         /* If the 5704 is behind the EPB bridge, we can
11342                          * do the less restrictive ONE_DMA workaround for
11343                          * better performance.
11344                          */
11345                         if ((tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG) &&
11346                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
11347                                 tp->dma_rwctrl |= 0x8000;
11348                         else if (ccval == 0x6 || ccval == 0x7)
11349                                 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
11350
11351                         /* Set bit 23 to enable PCIX hw bug fix */
11352                         tp->dma_rwctrl |= 0x009f0000;
11353                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) {
11354                         /* 5780 always in PCIX mode */
11355                         tp->dma_rwctrl |= 0x00144000;
11356                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
11357                         /* 5714 always in PCIX mode */
11358                         tp->dma_rwctrl |= 0x00148000;
11359                 } else {
11360                         tp->dma_rwctrl |= 0x001b000f;
11361                 }
11362         }
11363
11364         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
11365             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
11366                 tp->dma_rwctrl &= 0xfffffff0;
11367
11368         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
11369             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
11370                 /* Remove this if it causes problems for some boards. */
11371                 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
11372
11373                 /* On 5700/5701 chips, we need to set this bit.
11374                  * Otherwise the chip will issue cacheline transactions
11375                  * to streamable DMA memory with not all the byte
11376                  * enables turned on.  This is an error on several
11377                  * RISC PCI controllers, in particular sparc64.
11378                  *
11379                  * On 5703/5704 chips, this bit has been reassigned
11380                  * a different meaning.  In particular, it is used
11381                  * on those chips to enable a PCI-X workaround.
11382                  */
11383                 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
11384         }
11385
11386         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
11387
11388 #if 0
11389         /* Unneeded, already done by tg3_get_invariants.  */
11390         tg3_switch_clocks(tp);
11391 #endif
11392
11393         ret = 0;
11394         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
11395             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
11396                 goto out;
11397
11398         /* It is best to perform DMA test with maximum write burst size
11399          * to expose the 5700/5701 write DMA bug.
11400          */
11401         saved_dma_rwctrl = tp->dma_rwctrl;
11402         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
11403         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
11404
11405         while (1) {
11406                 u32 *p = buf, i;
11407
11408                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
11409                         p[i] = i;
11410
11411                 /* Send the buffer to the chip. */
11412                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1);
11413                 if (ret) {
11414                         printk(KERN_ERR "tg3_test_dma() Write the buffer failed %d\n", ret);
11415                         break;
11416                 }
11417
11418 #if 0
11419                 /* validate data reached card RAM correctly. */
11420                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
11421                         u32 val;
11422                         tg3_read_mem(tp, 0x2100 + (i*4), &val);
11423                         if (le32_to_cpu(val) != p[i]) {
11424                                 printk(KERN_ERR "  tg3_test_dma()  Card buffer corrupted on write! (%d != %d)\n", val, i);
11425                                 /* ret = -ENODEV here? */
11426                         }
11427                         p[i] = 0;
11428                 }
11429 #endif
11430                 /* Now read it back. */
11431                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0);
11432                 if (ret) {
11433                         printk(KERN_ERR "tg3_test_dma() Read the buffer failed %d\n", ret);
11434
11435                         break;
11436                 }
11437
11438                 /* Verify it. */
11439                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
11440                         if (p[i] == i)
11441                                 continue;
11442
11443                         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
11444                             DMA_RWCTRL_WRITE_BNDRY_16) {
11445                                 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
11446                                 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
11447                                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
11448                                 break;
11449                         } else {
11450                                 printk(KERN_ERR "tg3_test_dma() buffer corrupted on read back! (%d != %d)\n", p[i], i);
11451                                 ret = -ENODEV;
11452                                 goto out;
11453                         }
11454                 }
11455
11456                 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
11457                         /* Success. */
11458                         ret = 0;
11459                         break;
11460                 }
11461         }
11462         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
11463             DMA_RWCTRL_WRITE_BNDRY_16) {
11464                 static struct pci_device_id dma_wait_state_chipsets[] = {
11465                         { PCI_DEVICE(PCI_VENDOR_ID_APPLE,
11466                                      PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
11467                         { },
11468                 };
11469
11470                 /* DMA test passed without adjusting DMA boundary,
11471                  * now look for chipsets that are known to expose the
11472                  * DMA bug without failing the test.
11473                  */
11474                 if (pci_dev_present(dma_wait_state_chipsets)) {
11475                         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
11476                         tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
11477                 }
11478                 else
11479                         /* Safe to use the calculated DMA boundary. */
11480                         tp->dma_rwctrl = saved_dma_rwctrl;
11481
11482                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
11483         }
11484
11485 out:
11486         pci_free_consistent(tp->pdev, TEST_BUFFER_SIZE, buf, buf_dma);
11487 out_nofree:
11488         return ret;
11489 }
11490
11491 static void __devinit tg3_init_link_config(struct tg3 *tp)
11492 {
11493         tp->link_config.advertising =
11494                 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
11495                  ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
11496                  ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full |
11497                  ADVERTISED_Autoneg | ADVERTISED_MII);
11498         tp->link_config.speed = SPEED_INVALID;
11499         tp->link_config.duplex = DUPLEX_INVALID;
11500         tp->link_config.autoneg = AUTONEG_ENABLE;
11501         tp->link_config.active_speed = SPEED_INVALID;
11502         tp->link_config.active_duplex = DUPLEX_INVALID;
11503         tp->link_config.phy_is_low_power = 0;
11504         tp->link_config.orig_speed = SPEED_INVALID;
11505         tp->link_config.orig_duplex = DUPLEX_INVALID;
11506         tp->link_config.orig_autoneg = AUTONEG_INVALID;
11507 }
11508
11509 static void __devinit tg3_init_bufmgr_config(struct tg3 *tp)
11510 {
11511         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
11512                 tp->bufmgr_config.mbuf_read_dma_low_water =
11513                         DEFAULT_MB_RDMA_LOW_WATER_5705;
11514                 tp->bufmgr_config.mbuf_mac_rx_low_water =
11515                         DEFAULT_MB_MACRX_LOW_WATER_5705;
11516                 tp->bufmgr_config.mbuf_high_water =
11517                         DEFAULT_MB_HIGH_WATER_5705;
11518                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
11519                         tp->bufmgr_config.mbuf_mac_rx_low_water =
11520                                 DEFAULT_MB_MACRX_LOW_WATER_5906;
11521                         tp->bufmgr_config.mbuf_high_water =
11522                                 DEFAULT_MB_HIGH_WATER_5906;
11523                 }
11524
11525                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
11526                         DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
11527                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
11528                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
11529                 tp->bufmgr_config.mbuf_high_water_jumbo =
11530                         DEFAULT_MB_HIGH_WATER_JUMBO_5780;
11531         } else {
11532                 tp->bufmgr_config.mbuf_read_dma_low_water =
11533                         DEFAULT_MB_RDMA_LOW_WATER;
11534                 tp->bufmgr_config.mbuf_mac_rx_low_water =
11535                         DEFAULT_MB_MACRX_LOW_WATER;
11536                 tp->bufmgr_config.mbuf_high_water =
11537                         DEFAULT_MB_HIGH_WATER;
11538
11539                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
11540                         DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
11541                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
11542                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
11543                 tp->bufmgr_config.mbuf_high_water_jumbo =
11544                         DEFAULT_MB_HIGH_WATER_JUMBO;
11545         }
11546
11547         tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
11548         tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
11549 }
11550
11551 static char * __devinit tg3_phy_string(struct tg3 *tp)
11552 {
11553         switch (tp->phy_id & PHY_ID_MASK) {
11554         case PHY_ID_BCM5400:    return "5400";
11555         case PHY_ID_BCM5401:    return "5401";
11556         case PHY_ID_BCM5411:    return "5411";
11557         case PHY_ID_BCM5701:    return "5701";
11558         case PHY_ID_BCM5703:    return "5703";
11559         case PHY_ID_BCM5704:    return "5704";
11560         case PHY_ID_BCM5705:    return "5705";
11561         case PHY_ID_BCM5750:    return "5750";
11562         case PHY_ID_BCM5752:    return "5752";
11563         case PHY_ID_BCM5714:    return "5714";
11564         case PHY_ID_BCM5780:    return "5780";
11565         case PHY_ID_BCM5755:    return "5755";
11566         case PHY_ID_BCM5787:    return "5787";
11567         case PHY_ID_BCM5756:    return "5722/5756";
11568         case PHY_ID_BCM5906:    return "5906";
11569         case PHY_ID_BCM8002:    return "8002/serdes";
11570         case 0:                 return "serdes";
11571         default:                return "unknown";
11572         };
11573 }
11574
11575 static char * __devinit tg3_bus_string(struct tg3 *tp, char *str)
11576 {
11577         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
11578                 strcpy(str, "PCI Express");
11579                 return str;
11580         } else if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) {
11581                 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
11582
11583                 strcpy(str, "PCIX:");
11584
11585                 if ((clock_ctrl == 7) ||
11586                     ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
11587                      GRC_MISC_CFG_BOARD_ID_5704CIOBE))
11588                         strcat(str, "133MHz");
11589                 else if (clock_ctrl == 0)
11590                         strcat(str, "33MHz");
11591                 else if (clock_ctrl == 2)
11592                         strcat(str, "50MHz");
11593                 else if (clock_ctrl == 4)
11594                         strcat(str, "66MHz");
11595                 else if (clock_ctrl == 6)
11596                         strcat(str, "100MHz");
11597         } else {
11598                 strcpy(str, "PCI:");
11599                 if (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED)
11600                         strcat(str, "66MHz");
11601                 else
11602                         strcat(str, "33MHz");
11603         }
11604         if (tp->tg3_flags & TG3_FLAG_PCI_32BIT)
11605                 strcat(str, ":32-bit");
11606         else
11607                 strcat(str, ":64-bit");
11608         return str;
11609 }
11610
11611 static struct pci_dev * __devinit tg3_find_peer(struct tg3 *tp)
11612 {
11613         struct pci_dev *peer;
11614         unsigned int func, devnr = tp->pdev->devfn & ~7;
11615
11616         for (func = 0; func < 8; func++) {
11617                 peer = pci_get_slot(tp->pdev->bus, devnr | func);
11618                 if (peer && peer != tp->pdev)
11619                         break;
11620                 pci_dev_put(peer);
11621         }
11622         /* 5704 can be configured in single-port mode, set peer to
11623          * tp->pdev in that case.
11624          */
11625         if (!peer) {
11626                 peer = tp->pdev;
11627                 return peer;
11628         }
11629
11630         /*
11631          * We don't need to keep the refcount elevated; there's no way
11632          * to remove one half of this device without removing the other
11633          */
11634         pci_dev_put(peer);
11635
11636         return peer;
11637 }
11638
11639 static void __devinit tg3_init_coal(struct tg3 *tp)
11640 {
11641         struct ethtool_coalesce *ec = &tp->coal;
11642
11643         memset(ec, 0, sizeof(*ec));
11644         ec->cmd = ETHTOOL_GCOALESCE;
11645         ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
11646         ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
11647         ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
11648         ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
11649         ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
11650         ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
11651         ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
11652         ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
11653         ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
11654
11655         if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
11656                                  HOSTCC_MODE_CLRTICK_TXBD)) {
11657                 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
11658                 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
11659                 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
11660                 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
11661         }
11662
11663         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
11664                 ec->rx_coalesce_usecs_irq = 0;
11665                 ec->tx_coalesce_usecs_irq = 0;
11666                 ec->stats_block_coalesce_usecs = 0;
11667         }
11668 }
11669
11670 static int __devinit tg3_init_one(struct pci_dev *pdev,
11671                                   const struct pci_device_id *ent)
11672 {
11673         static int tg3_version_printed = 0;
11674         unsigned long tg3reg_base, tg3reg_len;
11675         struct net_device *dev;
11676         struct tg3 *tp;
11677         int i, err, pm_cap;
11678         char str[40];
11679         u64 dma_mask, persist_dma_mask;
11680
11681         if (tg3_version_printed++ == 0)
11682                 printk(KERN_INFO "%s", version);
11683
11684         err = pci_enable_device(pdev);
11685         if (err) {
11686                 printk(KERN_ERR PFX "Cannot enable PCI device, "
11687                        "aborting.\n");
11688                 return err;
11689         }
11690
11691         if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
11692                 printk(KERN_ERR PFX "Cannot find proper PCI device "
11693                        "base address, aborting.\n");
11694                 err = -ENODEV;
11695                 goto err_out_disable_pdev;
11696         }
11697
11698         err = pci_request_regions(pdev, DRV_MODULE_NAME);
11699         if (err) {
11700                 printk(KERN_ERR PFX "Cannot obtain PCI resources, "
11701                        "aborting.\n");
11702                 goto err_out_disable_pdev;
11703         }
11704
11705         pci_set_master(pdev);
11706
11707         /* Find power-management capability. */
11708         pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
11709         if (pm_cap == 0) {
11710                 printk(KERN_ERR PFX "Cannot find PowerManagement capability, "
11711                        "aborting.\n");
11712                 err = -EIO;
11713                 goto err_out_free_res;
11714         }
11715
11716         tg3reg_base = pci_resource_start(pdev, 0);
11717         tg3reg_len = pci_resource_len(pdev, 0);
11718
11719         dev = alloc_etherdev(sizeof(*tp));
11720         if (!dev) {
11721                 printk(KERN_ERR PFX "Etherdev alloc failed, aborting.\n");
11722                 err = -ENOMEM;
11723                 goto err_out_free_res;
11724         }
11725
11726         SET_MODULE_OWNER(dev);
11727         SET_NETDEV_DEV(dev, &pdev->dev);
11728
11729 #if TG3_VLAN_TAG_USED
11730         dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
11731         dev->vlan_rx_register = tg3_vlan_rx_register;
11732         dev->vlan_rx_kill_vid = tg3_vlan_rx_kill_vid;
11733 #endif
11734
11735         tp = netdev_priv(dev);
11736         tp->pdev = pdev;
11737         tp->dev = dev;
11738         tp->pm_cap = pm_cap;
11739         tp->mac_mode = TG3_DEF_MAC_MODE;
11740         tp->rx_mode = TG3_DEF_RX_MODE;
11741         tp->tx_mode = TG3_DEF_TX_MODE;
11742         tp->mi_mode = MAC_MI_MODE_BASE;
11743         if (tg3_debug > 0)
11744                 tp->msg_enable = tg3_debug;
11745         else
11746                 tp->msg_enable = TG3_DEF_MSG_ENABLE;
11747
11748         /* The word/byte swap controls here control register access byte
11749          * swapping.  DMA data byte swapping is controlled in the GRC_MODE
11750          * setting below.
11751          */
11752         tp->misc_host_ctrl =
11753                 MISC_HOST_CTRL_MASK_PCI_INT |
11754                 MISC_HOST_CTRL_WORD_SWAP |
11755                 MISC_HOST_CTRL_INDIR_ACCESS |
11756                 MISC_HOST_CTRL_PCISTATE_RW;
11757
11758         /* The NONFRM (non-frame) byte/word swap controls take effect
11759          * on descriptor entries, anything which isn't packet data.
11760          *
11761          * The StrongARM chips on the board (one for tx, one for rx)
11762          * are running in big-endian mode.
11763          */
11764         tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
11765                         GRC_MODE_WSWAP_NONFRM_DATA);
11766 #ifdef __BIG_ENDIAN
11767         tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
11768 #endif
11769         spin_lock_init(&tp->lock);
11770         spin_lock_init(&tp->indirect_lock);
11771         INIT_WORK(&tp->reset_task, tg3_reset_task);
11772
11773         tp->regs = ioremap_nocache(tg3reg_base, tg3reg_len);
11774         if (tp->regs == 0UL) {
11775                 printk(KERN_ERR PFX "Cannot map device registers, "
11776                        "aborting.\n");
11777                 err = -ENOMEM;
11778                 goto err_out_free_dev;
11779         }
11780
11781         tg3_init_link_config(tp);
11782
11783         tp->rx_pending = TG3_DEF_RX_RING_PENDING;
11784         tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
11785         tp->tx_pending = TG3_DEF_TX_RING_PENDING;
11786
11787         dev->open = tg3_open;
11788         dev->stop = tg3_close;
11789         dev->get_stats = tg3_get_stats;
11790         dev->set_multicast_list = tg3_set_rx_mode;
11791         dev->set_mac_address = tg3_set_mac_addr;
11792         dev->do_ioctl = tg3_ioctl;
11793         dev->tx_timeout = tg3_tx_timeout;
11794         dev->poll = tg3_poll;
11795         dev->ethtool_ops = &tg3_ethtool_ops;
11796         dev->weight = 64;
11797         dev->watchdog_timeo = TG3_TX_TIMEOUT;
11798         dev->change_mtu = tg3_change_mtu;
11799         dev->irq = pdev->irq;
11800 #ifdef CONFIG_NET_POLL_CONTROLLER
11801         dev->poll_controller = tg3_poll_controller;
11802 #endif
11803
11804         err = tg3_get_invariants(tp);
11805         if (err) {
11806                 printk(KERN_ERR PFX "Problem fetching invariants of chip, "
11807                        "aborting.\n");
11808                 goto err_out_iounmap;
11809         }
11810
11811         /* The EPB bridge inside 5714, 5715, and 5780 and any
11812          * device behind the EPB cannot support DMA addresses > 40-bit.
11813          * On 64-bit systems with IOMMU, use 40-bit dma_mask.
11814          * On 64-bit systems without IOMMU, use 64-bit dma_mask and
11815          * do DMA address check in tg3_start_xmit().
11816          */
11817         if (tp->tg3_flags2 & TG3_FLG2_IS_5788)
11818                 persist_dma_mask = dma_mask = DMA_32BIT_MASK;
11819         else if (tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG) {
11820                 persist_dma_mask = dma_mask = DMA_40BIT_MASK;
11821 #ifdef CONFIG_HIGHMEM
11822                 dma_mask = DMA_64BIT_MASK;
11823 #endif
11824         } else
11825                 persist_dma_mask = dma_mask = DMA_64BIT_MASK;
11826
11827         /* Configure DMA attributes. */
11828         if (dma_mask > DMA_32BIT_MASK) {
11829                 err = pci_set_dma_mask(pdev, dma_mask);
11830                 if (!err) {
11831                         dev->features |= NETIF_F_HIGHDMA;
11832                         err = pci_set_consistent_dma_mask(pdev,
11833                                                           persist_dma_mask);
11834                         if (err < 0) {
11835                                 printk(KERN_ERR PFX "Unable to obtain 64 bit "
11836                                        "DMA for consistent allocations\n");
11837                                 goto err_out_iounmap;
11838                         }
11839                 }
11840         }
11841         if (err || dma_mask == DMA_32BIT_MASK) {
11842                 err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
11843                 if (err) {
11844                         printk(KERN_ERR PFX "No usable DMA configuration, "
11845                                "aborting.\n");
11846                         goto err_out_iounmap;
11847                 }
11848         }
11849
11850         tg3_init_bufmgr_config(tp);
11851
11852 #if TG3_TSO_SUPPORT != 0
11853         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
11854                 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
11855         }
11856         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
11857             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
11858             tp->pci_chip_rev_id == CHIPREV_ID_5705_A0 ||
11859             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 ||
11860             (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0) {
11861                 tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
11862         } else {
11863                 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
11864         }
11865
11866         /* TSO is on by default on chips that support hardware TSO.
11867          * Firmware TSO on older chips gives lower performance, so it
11868          * is off by default, but can be enabled using ethtool.
11869          */
11870         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
11871                 dev->features |= NETIF_F_TSO;
11872                 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_2) &&
11873                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906))
11874                         dev->features |= NETIF_F_TSO6;
11875         }
11876
11877 #endif
11878
11879         if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 &&
11880             !(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) &&
11881             !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
11882                 tp->tg3_flags2 |= TG3_FLG2_MAX_RXPEND_64;
11883                 tp->rx_pending = 63;
11884         }
11885
11886         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
11887             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714))
11888                 tp->pdev_peer = tg3_find_peer(tp);
11889
11890         err = tg3_get_device_address(tp);
11891         if (err) {
11892                 printk(KERN_ERR PFX "Could not obtain valid ethernet address, "
11893                        "aborting.\n");
11894                 goto err_out_iounmap;
11895         }
11896
11897         /*
11898          * Reset chip in case UNDI or EFI driver did not shutdown
11899          * DMA self test will enable WDMAC and we'll see (spurious)
11900          * pending DMA on the PCI bus at that point.
11901          */
11902         if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
11903             (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
11904                 pci_save_state(tp->pdev);
11905                 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
11906                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11907         }
11908
11909         err = tg3_test_dma(tp);
11910         if (err) {
11911                 printk(KERN_ERR PFX "DMA engine test failed, aborting.\n");
11912                 goto err_out_iounmap;
11913         }
11914
11915         /* Tigon3 can do ipv4 only... and some chips have buggy
11916          * checksumming.
11917          */
11918         if ((tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) == 0) {
11919                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
11920                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
11921                         dev->features |= NETIF_F_HW_CSUM;
11922                 else
11923                         dev->features |= NETIF_F_IP_CSUM;
11924                 dev->features |= NETIF_F_SG;
11925                 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
11926         } else
11927                 tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
11928
11929         /* flow control autonegotiation is default behavior */
11930         tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
11931
11932         tg3_init_coal(tp);
11933
11934         /* Now that we have fully setup the chip, save away a snapshot
11935          * of the PCI config space.  We need to restore this after
11936          * GRC_MISC_CFG core clock resets and some resume events.
11937          */
11938         pci_save_state(tp->pdev);
11939
11940         pci_set_drvdata(pdev, dev);
11941
11942         err = register_netdev(dev);
11943         if (err) {
11944                 printk(KERN_ERR PFX "Cannot register net device, "
11945                        "aborting.\n");
11946                 goto err_out_iounmap;
11947         }
11948
11949         printk(KERN_INFO "%s: Tigon3 [partno(%s) rev %04x PHY(%s)] (%s) %s Ethernet ",
11950                dev->name,
11951                tp->board_part_number,
11952                tp->pci_chip_rev_id,
11953                tg3_phy_string(tp),
11954                tg3_bus_string(tp, str),
11955                ((tp->tg3_flags & TG3_FLAG_10_100_ONLY) ? "10/100Base-TX" :
11956                 ((tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) ? "1000Base-SX" :
11957                  "10/100/1000Base-T")));
11958
11959         for (i = 0; i < 6; i++)
11960                 printk("%2.2x%c", dev->dev_addr[i],
11961                        i == 5 ? '\n' : ':');
11962
11963         printk(KERN_INFO "%s: RXcsums[%d] LinkChgREG[%d] "
11964                "MIirq[%d] ASF[%d] Split[%d] WireSpeed[%d] "
11965                "TSOcap[%d] \n",
11966                dev->name,
11967                (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0,
11968                (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) != 0,
11969                (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) != 0,
11970                (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0,
11971                (tp->tg3_flags & TG3_FLAG_SPLIT_MODE) != 0,
11972                (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED) == 0,
11973                (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) != 0);
11974         printk(KERN_INFO "%s: dma_rwctrl[%08x] dma_mask[%d-bit]\n",
11975                dev->name, tp->dma_rwctrl,
11976                (pdev->dma_mask == DMA_32BIT_MASK) ? 32 :
11977                 (((u64) pdev->dma_mask == DMA_40BIT_MASK) ? 40 : 64));
11978
11979         return 0;
11980
11981 err_out_iounmap:
11982         if (tp->regs) {
11983                 iounmap(tp->regs);
11984                 tp->regs = NULL;
11985         }
11986
11987 err_out_free_dev:
11988         free_netdev(dev);
11989
11990 err_out_free_res:
11991         pci_release_regions(pdev);
11992
11993 err_out_disable_pdev:
11994         pci_disable_device(pdev);
11995         pci_set_drvdata(pdev, NULL);
11996         return err;
11997 }
11998
11999 static void __devexit tg3_remove_one(struct pci_dev *pdev)
12000 {
12001         struct net_device *dev = pci_get_drvdata(pdev);
12002
12003         if (dev) {
12004                 struct tg3 *tp = netdev_priv(dev);
12005
12006                 flush_scheduled_work();
12007                 unregister_netdev(dev);
12008                 if (tp->regs) {
12009                         iounmap(tp->regs);
12010                         tp->regs = NULL;
12011                 }
12012                 free_netdev(dev);
12013                 pci_release_regions(pdev);
12014                 pci_disable_device(pdev);
12015                 pci_set_drvdata(pdev, NULL);
12016         }
12017 }
12018
12019 static int tg3_suspend(struct pci_dev *pdev, pm_message_t state)
12020 {
12021         struct net_device *dev = pci_get_drvdata(pdev);
12022         struct tg3 *tp = netdev_priv(dev);
12023         int err;
12024
12025         if (!netif_running(dev))
12026                 return 0;
12027
12028         flush_scheduled_work();
12029         tg3_netif_stop(tp);
12030
12031         del_timer_sync(&tp->timer);
12032
12033         tg3_full_lock(tp, 1);
12034         tg3_disable_ints(tp);
12035         tg3_full_unlock(tp);
12036
12037         netif_device_detach(dev);
12038
12039         tg3_full_lock(tp, 0);
12040         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12041         tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
12042         tg3_full_unlock(tp);
12043
12044         err = tg3_set_power_state(tp, pci_choose_state(pdev, state));
12045         if (err) {
12046                 tg3_full_lock(tp, 0);
12047
12048                 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
12049                 if (tg3_restart_hw(tp, 1))
12050                         goto out;
12051
12052                 tp->timer.expires = jiffies + tp->timer_offset;
12053                 add_timer(&tp->timer);
12054
12055                 netif_device_attach(dev);
12056                 tg3_netif_start(tp);
12057
12058 out:
12059                 tg3_full_unlock(tp);
12060         }
12061
12062         return err;
12063 }
12064
12065 static int tg3_resume(struct pci_dev *pdev)
12066 {
12067         struct net_device *dev = pci_get_drvdata(pdev);
12068         struct tg3 *tp = netdev_priv(dev);
12069         int err;
12070
12071         if (!netif_running(dev))
12072                 return 0;
12073
12074         pci_restore_state(tp->pdev);
12075
12076         err = tg3_set_power_state(tp, PCI_D0);
12077         if (err)
12078                 return err;
12079
12080         netif_device_attach(dev);
12081
12082         tg3_full_lock(tp, 0);
12083
12084         tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
12085         err = tg3_restart_hw(tp, 1);
12086         if (err)
12087                 goto out;
12088
12089         tp->timer.expires = jiffies + tp->timer_offset;
12090         add_timer(&tp->timer);
12091
12092         tg3_netif_start(tp);
12093
12094 out:
12095         tg3_full_unlock(tp);
12096
12097         return err;
12098 }
12099
12100 static struct pci_driver tg3_driver = {
12101         .name           = DRV_MODULE_NAME,
12102         .id_table       = tg3_pci_tbl,
12103         .probe          = tg3_init_one,
12104         .remove         = __devexit_p(tg3_remove_one),
12105         .suspend        = tg3_suspend,
12106         .resume         = tg3_resume
12107 };
12108
12109 static int __init tg3_init(void)
12110 {
12111         return pci_register_driver(&tg3_driver);
12112 }
12113
12114 static void __exit tg3_cleanup(void)
12115 {
12116         pci_unregister_driver(&tg3_driver);
12117 }
12118
12119 module_init(tg3_init);
12120 module_exit(tg3_cleanup);