Merge master.kernel.org:/pub/scm/linux/kernel/git/davej/agpgart
[linux-2.6] / drivers / net / tg3.c
1 /*
2  * tg3.c: Broadcom Tigon3 ethernet driver.
3  *
4  * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5  * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6  * Copyright (C) 2004 Sun Microsystems Inc.
7  * Copyright (C) 2005 Broadcom Corporation.
8  *
9  * Firmware is:
10  *      Derived from proprietary unpublished source code,
11  *      Copyright (C) 2000-2003 Broadcom Corporation.
12  *
13  *      Permission is hereby granted for the distribution of this firmware
14  *      data in hexadecimal or equivalent format, provided this copyright
15  *      notice is accompanying it.
16  */
17
18
19 #include <linux/module.h>
20 #include <linux/moduleparam.h>
21 #include <linux/kernel.h>
22 #include <linux/types.h>
23 #include <linux/compiler.h>
24 #include <linux/slab.h>
25 #include <linux/delay.h>
26 #include <linux/in.h>
27 #include <linux/init.h>
28 #include <linux/ioport.h>
29 #include <linux/pci.h>
30 #include <linux/netdevice.h>
31 #include <linux/etherdevice.h>
32 #include <linux/skbuff.h>
33 #include <linux/ethtool.h>
34 #include <linux/mii.h>
35 #include <linux/if_vlan.h>
36 #include <linux/ip.h>
37 #include <linux/tcp.h>
38 #include <linux/workqueue.h>
39 #include <linux/prefetch.h>
40 #include <linux/dma-mapping.h>
41
42 #include <net/checksum.h>
43
44 #include <asm/system.h>
45 #include <asm/io.h>
46 #include <asm/byteorder.h>
47 #include <asm/uaccess.h>
48
49 #ifdef CONFIG_SPARC64
50 #include <asm/idprom.h>
51 #include <asm/oplib.h>
52 #include <asm/pbm.h>
53 #endif
54
55 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
56 #define TG3_VLAN_TAG_USED 1
57 #else
58 #define TG3_VLAN_TAG_USED 0
59 #endif
60
61 #ifdef NETIF_F_TSO
62 #define TG3_TSO_SUPPORT 1
63 #else
64 #define TG3_TSO_SUPPORT 0
65 #endif
66
67 #include "tg3.h"
68
69 #define DRV_MODULE_NAME         "tg3"
70 #define PFX DRV_MODULE_NAME     ": "
71 #define DRV_MODULE_VERSION      "3.72"
72 #define DRV_MODULE_RELDATE      "January 8, 2007"
73
74 #define TG3_DEF_MAC_MODE        0
75 #define TG3_DEF_RX_MODE         0
76 #define TG3_DEF_TX_MODE         0
77 #define TG3_DEF_MSG_ENABLE        \
78         (NETIF_MSG_DRV          | \
79          NETIF_MSG_PROBE        | \
80          NETIF_MSG_LINK         | \
81          NETIF_MSG_TIMER        | \
82          NETIF_MSG_IFDOWN       | \
83          NETIF_MSG_IFUP         | \
84          NETIF_MSG_RX_ERR       | \
85          NETIF_MSG_TX_ERR)
86
87 /* length of time before we decide the hardware is borked,
88  * and dev->tx_timeout() should be called to fix the problem
89  */
90 #define TG3_TX_TIMEOUT                  (5 * HZ)
91
92 /* hardware minimum and maximum for a single frame's data payload */
93 #define TG3_MIN_MTU                     60
94 #define TG3_MAX_MTU(tp) \
95         ((tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) ? 9000 : 1500)
96
97 /* These numbers seem to be hard coded in the NIC firmware somehow.
98  * You can't change the ring sizes, but you can change where you place
99  * them in the NIC onboard memory.
100  */
101 #define TG3_RX_RING_SIZE                512
102 #define TG3_DEF_RX_RING_PENDING         200
103 #define TG3_RX_JUMBO_RING_SIZE          256
104 #define TG3_DEF_RX_JUMBO_RING_PENDING   100
105
106 /* Do not place this n-ring entries value into the tp struct itself,
107  * we really want to expose these constants to GCC so that modulo et
108  * al.  operations are done with shifts and masks instead of with
109  * hw multiply/modulo instructions.  Another solution would be to
110  * replace things like '% foo' with '& (foo - 1)'.
111  */
112 #define TG3_RX_RCB_RING_SIZE(tp)        \
113         ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ?  512 : 1024)
114
115 #define TG3_TX_RING_SIZE                512
116 #define TG3_DEF_TX_RING_PENDING         (TG3_TX_RING_SIZE - 1)
117
118 #define TG3_RX_RING_BYTES       (sizeof(struct tg3_rx_buffer_desc) * \
119                                  TG3_RX_RING_SIZE)
120 #define TG3_RX_JUMBO_RING_BYTES (sizeof(struct tg3_rx_buffer_desc) * \
121                                  TG3_RX_JUMBO_RING_SIZE)
122 #define TG3_RX_RCB_RING_BYTES(tp) (sizeof(struct tg3_rx_buffer_desc) * \
123                                    TG3_RX_RCB_RING_SIZE(tp))
124 #define TG3_TX_RING_BYTES       (sizeof(struct tg3_tx_buffer_desc) * \
125                                  TG3_TX_RING_SIZE)
126 #define NEXT_TX(N)              (((N) + 1) & (TG3_TX_RING_SIZE - 1))
127
128 #define RX_PKT_BUF_SZ           (1536 + tp->rx_offset + 64)
129 #define RX_JUMBO_PKT_BUF_SZ     (9046 + tp->rx_offset + 64)
130
131 /* minimum number of free TX descriptors required to wake up TX process */
132 #define TG3_TX_WAKEUP_THRESH(tp)                ((tp)->tx_pending / 4)
133
134 /* number of ETHTOOL_GSTATS u64's */
135 #define TG3_NUM_STATS           (sizeof(struct tg3_ethtool_stats)/sizeof(u64))
136
137 #define TG3_NUM_TEST            6
138
139 static char version[] __devinitdata =
140         DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
141
142 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
143 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
144 MODULE_LICENSE("GPL");
145 MODULE_VERSION(DRV_MODULE_VERSION);
146
147 static int tg3_debug = -1;      /* -1 == use TG3_DEF_MSG_ENABLE as value */
148 module_param(tg3_debug, int, 0);
149 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
150
151 static struct pci_device_id tg3_pci_tbl[] = {
152         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
153         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
154         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
155         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
156         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
157         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
158         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
159         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
160         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
161         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
162         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
163         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
164         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
165         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
166         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
167         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
168         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
169         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
170         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901)},
171         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2)},
172         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
173         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F)},
174         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5720)},
175         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
176         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
177         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750)},
178         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
179         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750M)},
180         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
181         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F)},
182         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
183         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
184         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
185         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
186         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F)},
187         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
188         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
189         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
190         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
191         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
192         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
193         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
194         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
195         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F)},
196         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
197         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
198         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
199         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
200         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
201         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
202         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
203         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
204         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
205         {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
206         {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
207         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
208         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
209         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
210         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
211         {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
212         {}
213 };
214
215 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
216
217 static const struct {
218         const char string[ETH_GSTRING_LEN];
219 } ethtool_stats_keys[TG3_NUM_STATS] = {
220         { "rx_octets" },
221         { "rx_fragments" },
222         { "rx_ucast_packets" },
223         { "rx_mcast_packets" },
224         { "rx_bcast_packets" },
225         { "rx_fcs_errors" },
226         { "rx_align_errors" },
227         { "rx_xon_pause_rcvd" },
228         { "rx_xoff_pause_rcvd" },
229         { "rx_mac_ctrl_rcvd" },
230         { "rx_xoff_entered" },
231         { "rx_frame_too_long_errors" },
232         { "rx_jabbers" },
233         { "rx_undersize_packets" },
234         { "rx_in_length_errors" },
235         { "rx_out_length_errors" },
236         { "rx_64_or_less_octet_packets" },
237         { "rx_65_to_127_octet_packets" },
238         { "rx_128_to_255_octet_packets" },
239         { "rx_256_to_511_octet_packets" },
240         { "rx_512_to_1023_octet_packets" },
241         { "rx_1024_to_1522_octet_packets" },
242         { "rx_1523_to_2047_octet_packets" },
243         { "rx_2048_to_4095_octet_packets" },
244         { "rx_4096_to_8191_octet_packets" },
245         { "rx_8192_to_9022_octet_packets" },
246
247         { "tx_octets" },
248         { "tx_collisions" },
249
250         { "tx_xon_sent" },
251         { "tx_xoff_sent" },
252         { "tx_flow_control" },
253         { "tx_mac_errors" },
254         { "tx_single_collisions" },
255         { "tx_mult_collisions" },
256         { "tx_deferred" },
257         { "tx_excessive_collisions" },
258         { "tx_late_collisions" },
259         { "tx_collide_2times" },
260         { "tx_collide_3times" },
261         { "tx_collide_4times" },
262         { "tx_collide_5times" },
263         { "tx_collide_6times" },
264         { "tx_collide_7times" },
265         { "tx_collide_8times" },
266         { "tx_collide_9times" },
267         { "tx_collide_10times" },
268         { "tx_collide_11times" },
269         { "tx_collide_12times" },
270         { "tx_collide_13times" },
271         { "tx_collide_14times" },
272         { "tx_collide_15times" },
273         { "tx_ucast_packets" },
274         { "tx_mcast_packets" },
275         { "tx_bcast_packets" },
276         { "tx_carrier_sense_errors" },
277         { "tx_discards" },
278         { "tx_errors" },
279
280         { "dma_writeq_full" },
281         { "dma_write_prioq_full" },
282         { "rxbds_empty" },
283         { "rx_discards" },
284         { "rx_errors" },
285         { "rx_threshold_hit" },
286
287         { "dma_readq_full" },
288         { "dma_read_prioq_full" },
289         { "tx_comp_queue_full" },
290
291         { "ring_set_send_prod_index" },
292         { "ring_status_update" },
293         { "nic_irqs" },
294         { "nic_avoided_irqs" },
295         { "nic_tx_threshold_hit" }
296 };
297
298 static const struct {
299         const char string[ETH_GSTRING_LEN];
300 } ethtool_test_keys[TG3_NUM_TEST] = {
301         { "nvram test     (online) " },
302         { "link test      (online) " },
303         { "register test  (offline)" },
304         { "memory test    (offline)" },
305         { "loopback test  (offline)" },
306         { "interrupt test (offline)" },
307 };
308
309 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
310 {
311         writel(val, tp->regs + off);
312 }
313
314 static u32 tg3_read32(struct tg3 *tp, u32 off)
315 {
316         return (readl(tp->regs + off));
317 }
318
319 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
320 {
321         unsigned long flags;
322
323         spin_lock_irqsave(&tp->indirect_lock, flags);
324         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
325         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
326         spin_unlock_irqrestore(&tp->indirect_lock, flags);
327 }
328
329 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
330 {
331         writel(val, tp->regs + off);
332         readl(tp->regs + off);
333 }
334
335 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
336 {
337         unsigned long flags;
338         u32 val;
339
340         spin_lock_irqsave(&tp->indirect_lock, flags);
341         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
342         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
343         spin_unlock_irqrestore(&tp->indirect_lock, flags);
344         return val;
345 }
346
347 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
348 {
349         unsigned long flags;
350
351         if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
352                 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
353                                        TG3_64BIT_REG_LOW, val);
354                 return;
355         }
356         if (off == (MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW)) {
357                 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
358                                        TG3_64BIT_REG_LOW, val);
359                 return;
360         }
361
362         spin_lock_irqsave(&tp->indirect_lock, flags);
363         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
364         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
365         spin_unlock_irqrestore(&tp->indirect_lock, flags);
366
367         /* In indirect mode when disabling interrupts, we also need
368          * to clear the interrupt bit in the GRC local ctrl register.
369          */
370         if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
371             (val == 0x1)) {
372                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
373                                        tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
374         }
375 }
376
377 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
378 {
379         unsigned long flags;
380         u32 val;
381
382         spin_lock_irqsave(&tp->indirect_lock, flags);
383         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
384         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
385         spin_unlock_irqrestore(&tp->indirect_lock, flags);
386         return val;
387 }
388
389 /* usec_wait specifies the wait time in usec when writing to certain registers
390  * where it is unsafe to read back the register without some delay.
391  * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
392  * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
393  */
394 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
395 {
396         if ((tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) ||
397             (tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND))
398                 /* Non-posted methods */
399                 tp->write32(tp, off, val);
400         else {
401                 /* Posted method */
402                 tg3_write32(tp, off, val);
403                 if (usec_wait)
404                         udelay(usec_wait);
405                 tp->read32(tp, off);
406         }
407         /* Wait again after the read for the posted method to guarantee that
408          * the wait time is met.
409          */
410         if (usec_wait)
411                 udelay(usec_wait);
412 }
413
414 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
415 {
416         tp->write32_mbox(tp, off, val);
417         if (!(tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) &&
418             !(tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND))
419                 tp->read32_mbox(tp, off);
420 }
421
422 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
423 {
424         void __iomem *mbox = tp->regs + off;
425         writel(val, mbox);
426         if (tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG)
427                 writel(val, mbox);
428         if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
429                 readl(mbox);
430 }
431
432 static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
433 {
434         return (readl(tp->regs + off + GRCMBOX_BASE));
435 }
436
437 static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
438 {
439         writel(val, tp->regs + off + GRCMBOX_BASE);
440 }
441
442 #define tw32_mailbox(reg, val)  tp->write32_mbox(tp, reg, val)
443 #define tw32_mailbox_f(reg, val)        tw32_mailbox_flush(tp, (reg), (val))
444 #define tw32_rx_mbox(reg, val)  tp->write32_rx_mbox(tp, reg, val)
445 #define tw32_tx_mbox(reg, val)  tp->write32_tx_mbox(tp, reg, val)
446 #define tr32_mailbox(reg)       tp->read32_mbox(tp, reg)
447
448 #define tw32(reg,val)           tp->write32(tp, reg, val)
449 #define tw32_f(reg,val)         _tw32_flush(tp,(reg),(val), 0)
450 #define tw32_wait_f(reg,val,us) _tw32_flush(tp,(reg),(val), (us))
451 #define tr32(reg)               tp->read32(tp, reg)
452
453 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
454 {
455         unsigned long flags;
456
457         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) &&
458             (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
459                 return;
460
461         spin_lock_irqsave(&tp->indirect_lock, flags);
462         if (tp->tg3_flags & TG3_FLAG_SRAM_USE_CONFIG) {
463                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
464                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
465
466                 /* Always leave this as zero. */
467                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
468         } else {
469                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
470                 tw32_f(TG3PCI_MEM_WIN_DATA, val);
471
472                 /* Always leave this as zero. */
473                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
474         }
475         spin_unlock_irqrestore(&tp->indirect_lock, flags);
476 }
477
478 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
479 {
480         unsigned long flags;
481
482         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) &&
483             (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
484                 *val = 0;
485                 return;
486         }
487
488         spin_lock_irqsave(&tp->indirect_lock, flags);
489         if (tp->tg3_flags & TG3_FLAG_SRAM_USE_CONFIG) {
490                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
491                 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
492
493                 /* Always leave this as zero. */
494                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
495         } else {
496                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
497                 *val = tr32(TG3PCI_MEM_WIN_DATA);
498
499                 /* Always leave this as zero. */
500                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
501         }
502         spin_unlock_irqrestore(&tp->indirect_lock, flags);
503 }
504
505 static void tg3_disable_ints(struct tg3 *tp)
506 {
507         tw32(TG3PCI_MISC_HOST_CTRL,
508              (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
509         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
510 }
511
512 static inline void tg3_cond_int(struct tg3 *tp)
513 {
514         if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
515             (tp->hw_status->status & SD_STATUS_UPDATED))
516                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
517         else
518                 tw32(HOSTCC_MODE, tp->coalesce_mode |
519                      (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
520 }
521
522 static void tg3_enable_ints(struct tg3 *tp)
523 {
524         tp->irq_sync = 0;
525         wmb();
526
527         tw32(TG3PCI_MISC_HOST_CTRL,
528              (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
529         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
530                        (tp->last_tag << 24));
531         if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI)
532                 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
533                                (tp->last_tag << 24));
534         tg3_cond_int(tp);
535 }
536
537 static inline unsigned int tg3_has_work(struct tg3 *tp)
538 {
539         struct tg3_hw_status *sblk = tp->hw_status;
540         unsigned int work_exists = 0;
541
542         /* check for phy events */
543         if (!(tp->tg3_flags &
544               (TG3_FLAG_USE_LINKCHG_REG |
545                TG3_FLAG_POLL_SERDES))) {
546                 if (sblk->status & SD_STATUS_LINK_CHG)
547                         work_exists = 1;
548         }
549         /* check for RX/TX work to do */
550         if (sblk->idx[0].tx_consumer != tp->tx_cons ||
551             sblk->idx[0].rx_producer != tp->rx_rcb_ptr)
552                 work_exists = 1;
553
554         return work_exists;
555 }
556
557 /* tg3_restart_ints
558  *  similar to tg3_enable_ints, but it accurately determines whether there
559  *  is new work pending and can return without flushing the PIO write
560  *  which reenables interrupts
561  */
562 static void tg3_restart_ints(struct tg3 *tp)
563 {
564         tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
565                      tp->last_tag << 24);
566         mmiowb();
567
568         /* When doing tagged status, this work check is unnecessary.
569          * The last_tag we write above tells the chip which piece of
570          * work we've completed.
571          */
572         if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
573             tg3_has_work(tp))
574                 tw32(HOSTCC_MODE, tp->coalesce_mode |
575                      (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
576 }
577
578 static inline void tg3_netif_stop(struct tg3 *tp)
579 {
580         tp->dev->trans_start = jiffies; /* prevent tx timeout */
581         netif_poll_disable(tp->dev);
582         netif_tx_disable(tp->dev);
583 }
584
585 static inline void tg3_netif_start(struct tg3 *tp)
586 {
587         netif_wake_queue(tp->dev);
588         /* NOTE: unconditional netif_wake_queue is only appropriate
589          * so long as all callers are assured to have free tx slots
590          * (such as after tg3_init_hw)
591          */
592         netif_poll_enable(tp->dev);
593         tp->hw_status->status |= SD_STATUS_UPDATED;
594         tg3_enable_ints(tp);
595 }
596
597 static void tg3_switch_clocks(struct tg3 *tp)
598 {
599         u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
600         u32 orig_clock_ctrl;
601
602         if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
603                 return;
604
605         orig_clock_ctrl = clock_ctrl;
606         clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
607                        CLOCK_CTRL_CLKRUN_OENABLE |
608                        0x1f);
609         tp->pci_clock_ctrl = clock_ctrl;
610
611         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
612                 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
613                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
614                                     clock_ctrl | CLOCK_CTRL_625_CORE, 40);
615                 }
616         } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
617                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
618                             clock_ctrl |
619                             (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
620                             40);
621                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
622                             clock_ctrl | (CLOCK_CTRL_ALTCLK),
623                             40);
624         }
625         tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
626 }
627
628 #define PHY_BUSY_LOOPS  5000
629
630 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
631 {
632         u32 frame_val;
633         unsigned int loops;
634         int ret;
635
636         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
637                 tw32_f(MAC_MI_MODE,
638                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
639                 udelay(80);
640         }
641
642         *val = 0x0;
643
644         frame_val  = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
645                       MI_COM_PHY_ADDR_MASK);
646         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
647                       MI_COM_REG_ADDR_MASK);
648         frame_val |= (MI_COM_CMD_READ | MI_COM_START);
649
650         tw32_f(MAC_MI_COM, frame_val);
651
652         loops = PHY_BUSY_LOOPS;
653         while (loops != 0) {
654                 udelay(10);
655                 frame_val = tr32(MAC_MI_COM);
656
657                 if ((frame_val & MI_COM_BUSY) == 0) {
658                         udelay(5);
659                         frame_val = tr32(MAC_MI_COM);
660                         break;
661                 }
662                 loops -= 1;
663         }
664
665         ret = -EBUSY;
666         if (loops != 0) {
667                 *val = frame_val & MI_COM_DATA_MASK;
668                 ret = 0;
669         }
670
671         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
672                 tw32_f(MAC_MI_MODE, tp->mi_mode);
673                 udelay(80);
674         }
675
676         return ret;
677 }
678
679 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
680 {
681         u32 frame_val;
682         unsigned int loops;
683         int ret;
684
685         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 &&
686             (reg == MII_TG3_CTRL || reg == MII_TG3_AUX_CTRL))
687                 return 0;
688
689         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
690                 tw32_f(MAC_MI_MODE,
691                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
692                 udelay(80);
693         }
694
695         frame_val  = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
696                       MI_COM_PHY_ADDR_MASK);
697         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
698                       MI_COM_REG_ADDR_MASK);
699         frame_val |= (val & MI_COM_DATA_MASK);
700         frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
701
702         tw32_f(MAC_MI_COM, frame_val);
703
704         loops = PHY_BUSY_LOOPS;
705         while (loops != 0) {
706                 udelay(10);
707                 frame_val = tr32(MAC_MI_COM);
708                 if ((frame_val & MI_COM_BUSY) == 0) {
709                         udelay(5);
710                         frame_val = tr32(MAC_MI_COM);
711                         break;
712                 }
713                 loops -= 1;
714         }
715
716         ret = -EBUSY;
717         if (loops != 0)
718                 ret = 0;
719
720         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
721                 tw32_f(MAC_MI_MODE, tp->mi_mode);
722                 udelay(80);
723         }
724
725         return ret;
726 }
727
728 static void tg3_phy_set_wirespeed(struct tg3 *tp)
729 {
730         u32 val;
731
732         if (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED)
733                 return;
734
735         if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x7007) &&
736             !tg3_readphy(tp, MII_TG3_AUX_CTRL, &val))
737                 tg3_writephy(tp, MII_TG3_AUX_CTRL,
738                              (val | (1 << 15) | (1 << 4)));
739 }
740
741 static int tg3_bmcr_reset(struct tg3 *tp)
742 {
743         u32 phy_control;
744         int limit, err;
745
746         /* OK, reset it, and poll the BMCR_RESET bit until it
747          * clears or we time out.
748          */
749         phy_control = BMCR_RESET;
750         err = tg3_writephy(tp, MII_BMCR, phy_control);
751         if (err != 0)
752                 return -EBUSY;
753
754         limit = 5000;
755         while (limit--) {
756                 err = tg3_readphy(tp, MII_BMCR, &phy_control);
757                 if (err != 0)
758                         return -EBUSY;
759
760                 if ((phy_control & BMCR_RESET) == 0) {
761                         udelay(40);
762                         break;
763                 }
764                 udelay(10);
765         }
766         if (limit <= 0)
767                 return -EBUSY;
768
769         return 0;
770 }
771
772 static int tg3_wait_macro_done(struct tg3 *tp)
773 {
774         int limit = 100;
775
776         while (limit--) {
777                 u32 tmp32;
778
779                 if (!tg3_readphy(tp, 0x16, &tmp32)) {
780                         if ((tmp32 & 0x1000) == 0)
781                                 break;
782                 }
783         }
784         if (limit <= 0)
785                 return -EBUSY;
786
787         return 0;
788 }
789
790 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
791 {
792         static const u32 test_pat[4][6] = {
793         { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
794         { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
795         { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
796         { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
797         };
798         int chan;
799
800         for (chan = 0; chan < 4; chan++) {
801                 int i;
802
803                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
804                              (chan * 0x2000) | 0x0200);
805                 tg3_writephy(tp, 0x16, 0x0002);
806
807                 for (i = 0; i < 6; i++)
808                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
809                                      test_pat[chan][i]);
810
811                 tg3_writephy(tp, 0x16, 0x0202);
812                 if (tg3_wait_macro_done(tp)) {
813                         *resetp = 1;
814                         return -EBUSY;
815                 }
816
817                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
818                              (chan * 0x2000) | 0x0200);
819                 tg3_writephy(tp, 0x16, 0x0082);
820                 if (tg3_wait_macro_done(tp)) {
821                         *resetp = 1;
822                         return -EBUSY;
823                 }
824
825                 tg3_writephy(tp, 0x16, 0x0802);
826                 if (tg3_wait_macro_done(tp)) {
827                         *resetp = 1;
828                         return -EBUSY;
829                 }
830
831                 for (i = 0; i < 6; i += 2) {
832                         u32 low, high;
833
834                         if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
835                             tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
836                             tg3_wait_macro_done(tp)) {
837                                 *resetp = 1;
838                                 return -EBUSY;
839                         }
840                         low &= 0x7fff;
841                         high &= 0x000f;
842                         if (low != test_pat[chan][i] ||
843                             high != test_pat[chan][i+1]) {
844                                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
845                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
846                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
847
848                                 return -EBUSY;
849                         }
850                 }
851         }
852
853         return 0;
854 }
855
856 static int tg3_phy_reset_chanpat(struct tg3 *tp)
857 {
858         int chan;
859
860         for (chan = 0; chan < 4; chan++) {
861                 int i;
862
863                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
864                              (chan * 0x2000) | 0x0200);
865                 tg3_writephy(tp, 0x16, 0x0002);
866                 for (i = 0; i < 6; i++)
867                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
868                 tg3_writephy(tp, 0x16, 0x0202);
869                 if (tg3_wait_macro_done(tp))
870                         return -EBUSY;
871         }
872
873         return 0;
874 }
875
876 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
877 {
878         u32 reg32, phy9_orig;
879         int retries, do_phy_reset, err;
880
881         retries = 10;
882         do_phy_reset = 1;
883         do {
884                 if (do_phy_reset) {
885                         err = tg3_bmcr_reset(tp);
886                         if (err)
887                                 return err;
888                         do_phy_reset = 0;
889                 }
890
891                 /* Disable transmitter and interrupt.  */
892                 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
893                         continue;
894
895                 reg32 |= 0x3000;
896                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
897
898                 /* Set full-duplex, 1000 mbps.  */
899                 tg3_writephy(tp, MII_BMCR,
900                              BMCR_FULLDPLX | TG3_BMCR_SPEED1000);
901
902                 /* Set to master mode.  */
903                 if (tg3_readphy(tp, MII_TG3_CTRL, &phy9_orig))
904                         continue;
905
906                 tg3_writephy(tp, MII_TG3_CTRL,
907                              (MII_TG3_CTRL_AS_MASTER |
908                               MII_TG3_CTRL_ENABLE_AS_MASTER));
909
910                 /* Enable SM_DSP_CLOCK and 6dB.  */
911                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
912
913                 /* Block the PHY control access.  */
914                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
915                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0800);
916
917                 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
918                 if (!err)
919                         break;
920         } while (--retries);
921
922         err = tg3_phy_reset_chanpat(tp);
923         if (err)
924                 return err;
925
926         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
927         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0000);
928
929         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
930         tg3_writephy(tp, 0x16, 0x0000);
931
932         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
933             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
934                 /* Set Extended packet length bit for jumbo frames */
935                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4400);
936         }
937         else {
938                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
939         }
940
941         tg3_writephy(tp, MII_TG3_CTRL, phy9_orig);
942
943         if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32)) {
944                 reg32 &= ~0x3000;
945                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
946         } else if (!err)
947                 err = -EBUSY;
948
949         return err;
950 }
951
952 static void tg3_link_report(struct tg3 *);
953
954 /* This will reset the tigon3 PHY if there is no valid
955  * link unless the FORCE argument is non-zero.
956  */
957 static int tg3_phy_reset(struct tg3 *tp)
958 {
959         u32 phy_status;
960         int err;
961
962         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
963                 u32 val;
964
965                 val = tr32(GRC_MISC_CFG);
966                 tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
967                 udelay(40);
968         }
969         err  = tg3_readphy(tp, MII_BMSR, &phy_status);
970         err |= tg3_readphy(tp, MII_BMSR, &phy_status);
971         if (err != 0)
972                 return -EBUSY;
973
974         if (netif_running(tp->dev) && netif_carrier_ok(tp->dev)) {
975                 netif_carrier_off(tp->dev);
976                 tg3_link_report(tp);
977         }
978
979         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
980             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
981             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
982                 err = tg3_phy_reset_5703_4_5(tp);
983                 if (err)
984                         return err;
985                 goto out;
986         }
987
988         err = tg3_bmcr_reset(tp);
989         if (err)
990                 return err;
991
992 out:
993         if (tp->tg3_flags2 & TG3_FLG2_PHY_ADC_BUG) {
994                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
995                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
996                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x2aaa);
997                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
998                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0323);
999                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1000         }
1001         if (tp->tg3_flags2 & TG3_FLG2_PHY_5704_A0_BUG) {
1002                 tg3_writephy(tp, 0x1c, 0x8d68);
1003                 tg3_writephy(tp, 0x1c, 0x8d68);
1004         }
1005         if (tp->tg3_flags2 & TG3_FLG2_PHY_BER_BUG) {
1006                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1007                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1008                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x310b);
1009                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1010                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x9506);
1011                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x401f);
1012                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x14e2);
1013                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1014         }
1015         else if (tp->tg3_flags2 & TG3_FLG2_PHY_JITTER_BUG) {
1016                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1017                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1018                 if (tp->tg3_flags2 & TG3_FLG2_PHY_ADJUST_TRIM) {
1019                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
1020                         tg3_writephy(tp, MII_TG3_TEST1,
1021                                      MII_TG3_TEST1_TRIM_EN | 0x4);
1022                 } else
1023                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
1024                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1025         }
1026         /* Set Extended packet length bit (bit 14) on all chips that */
1027         /* support jumbo frames */
1028         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
1029                 /* Cannot do read-modify-write on 5401 */
1030                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
1031         } else if (tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) {
1032                 u32 phy_reg;
1033
1034                 /* Set bit 14 with read-modify-write to preserve other bits */
1035                 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0007) &&
1036                     !tg3_readphy(tp, MII_TG3_AUX_CTRL, &phy_reg))
1037                         tg3_writephy(tp, MII_TG3_AUX_CTRL, phy_reg | 0x4000);
1038         }
1039
1040         /* Set phy register 0x10 bit 0 to high fifo elasticity to support
1041          * jumbo frames transmission.
1042          */
1043         if (tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) {
1044                 u32 phy_reg;
1045
1046                 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &phy_reg))
1047                     tg3_writephy(tp, MII_TG3_EXT_CTRL,
1048                                  phy_reg | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
1049         }
1050
1051         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1052                 u32 phy_reg;
1053
1054                 /* adjust output voltage */
1055                 tg3_writephy(tp, MII_TG3_EPHY_PTEST, 0x12);
1056
1057                 if (!tg3_readphy(tp, MII_TG3_EPHY_TEST, &phy_reg)) {
1058                         u32 phy_reg2;
1059
1060                         tg3_writephy(tp, MII_TG3_EPHY_TEST,
1061                                      phy_reg | MII_TG3_EPHY_SHADOW_EN);
1062                         /* Enable auto-MDIX */
1063                         if (!tg3_readphy(tp, 0x10, &phy_reg2))
1064                                 tg3_writephy(tp, 0x10, phy_reg2 | 0x4000);
1065                         tg3_writephy(tp, MII_TG3_EPHY_TEST, phy_reg);
1066                 }
1067         }
1068
1069         tg3_phy_set_wirespeed(tp);
1070         return 0;
1071 }
1072
1073 static void tg3_frob_aux_power(struct tg3 *tp)
1074 {
1075         struct tg3 *tp_peer = tp;
1076
1077         if ((tp->tg3_flags2 & TG3_FLG2_IS_NIC) == 0)
1078                 return;
1079
1080         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
1081             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)) {
1082                 struct net_device *dev_peer;
1083
1084                 dev_peer = pci_get_drvdata(tp->pdev_peer);
1085                 /* remove_one() may have been run on the peer. */
1086                 if (!dev_peer)
1087                         tp_peer = tp;
1088                 else
1089                         tp_peer = netdev_priv(dev_peer);
1090         }
1091
1092         if ((tp->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
1093             (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0 ||
1094             (tp_peer->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
1095             (tp_peer->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0) {
1096                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1097                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1098                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1099                                     (GRC_LCLCTRL_GPIO_OE0 |
1100                                      GRC_LCLCTRL_GPIO_OE1 |
1101                                      GRC_LCLCTRL_GPIO_OE2 |
1102                                      GRC_LCLCTRL_GPIO_OUTPUT0 |
1103                                      GRC_LCLCTRL_GPIO_OUTPUT1),
1104                                     100);
1105                 } else {
1106                         u32 no_gpio2;
1107                         u32 grc_local_ctrl = 0;
1108
1109                         if (tp_peer != tp &&
1110                             (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
1111                                 return;
1112
1113                         /* Workaround to prevent overdrawing Amps. */
1114                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
1115                             ASIC_REV_5714) {
1116                                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
1117                                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1118                                             grc_local_ctrl, 100);
1119                         }
1120
1121                         /* On 5753 and variants, GPIO2 cannot be used. */
1122                         no_gpio2 = tp->nic_sram_data_cfg &
1123                                     NIC_SRAM_DATA_CFG_NO_GPIO2;
1124
1125                         grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
1126                                          GRC_LCLCTRL_GPIO_OE1 |
1127                                          GRC_LCLCTRL_GPIO_OE2 |
1128                                          GRC_LCLCTRL_GPIO_OUTPUT1 |
1129                                          GRC_LCLCTRL_GPIO_OUTPUT2;
1130                         if (no_gpio2) {
1131                                 grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
1132                                                     GRC_LCLCTRL_GPIO_OUTPUT2);
1133                         }
1134                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1135                                                     grc_local_ctrl, 100);
1136
1137                         grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
1138
1139                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1140                                                     grc_local_ctrl, 100);
1141
1142                         if (!no_gpio2) {
1143                                 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
1144                                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1145                                             grc_local_ctrl, 100);
1146                         }
1147                 }
1148         } else {
1149                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
1150                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
1151                         if (tp_peer != tp &&
1152                             (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
1153                                 return;
1154
1155                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1156                                     (GRC_LCLCTRL_GPIO_OE1 |
1157                                      GRC_LCLCTRL_GPIO_OUTPUT1), 100);
1158
1159                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1160                                     GRC_LCLCTRL_GPIO_OE1, 100);
1161
1162                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1163                                     (GRC_LCLCTRL_GPIO_OE1 |
1164                                      GRC_LCLCTRL_GPIO_OUTPUT1), 100);
1165                 }
1166         }
1167 }
1168
1169 static int tg3_setup_phy(struct tg3 *, int);
1170
1171 #define RESET_KIND_SHUTDOWN     0
1172 #define RESET_KIND_INIT         1
1173 #define RESET_KIND_SUSPEND      2
1174
1175 static void tg3_write_sig_post_reset(struct tg3 *, int);
1176 static int tg3_halt_cpu(struct tg3 *, u32);
1177 static int tg3_nvram_lock(struct tg3 *);
1178 static void tg3_nvram_unlock(struct tg3 *);
1179
1180 static void tg3_power_down_phy(struct tg3 *tp)
1181 {
1182         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
1183                 return;
1184
1185         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1186                 u32 val;
1187
1188                 tg3_bmcr_reset(tp);
1189                 val = tr32(GRC_MISC_CFG);
1190                 tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
1191                 udelay(40);
1192                 return;
1193         } else {
1194                 tg3_writephy(tp, MII_TG3_EXT_CTRL,
1195                              MII_TG3_EXT_CTRL_FORCE_LED_OFF);
1196                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x01b2);
1197         }
1198
1199         /* The PHY should not be powered down on some chips because
1200          * of bugs.
1201          */
1202         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1203             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
1204             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 &&
1205              (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)))
1206                 return;
1207         tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
1208 }
1209
1210 static int tg3_set_power_state(struct tg3 *tp, pci_power_t state)
1211 {
1212         u32 misc_host_ctrl;
1213         u16 power_control, power_caps;
1214         int pm = tp->pm_cap;
1215
1216         /* Make sure register accesses (indirect or otherwise)
1217          * will function correctly.
1218          */
1219         pci_write_config_dword(tp->pdev,
1220                                TG3PCI_MISC_HOST_CTRL,
1221                                tp->misc_host_ctrl);
1222
1223         pci_read_config_word(tp->pdev,
1224                              pm + PCI_PM_CTRL,
1225                              &power_control);
1226         power_control |= PCI_PM_CTRL_PME_STATUS;
1227         power_control &= ~(PCI_PM_CTRL_STATE_MASK);
1228         switch (state) {
1229         case PCI_D0:
1230                 power_control |= 0;
1231                 pci_write_config_word(tp->pdev,
1232                                       pm + PCI_PM_CTRL,
1233                                       power_control);
1234                 udelay(100);    /* Delay after power state change */
1235
1236                 /* Switch out of Vaux if it is a NIC */
1237                 if (tp->tg3_flags2 & TG3_FLG2_IS_NIC)
1238                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl, 100);
1239
1240                 return 0;
1241
1242         case PCI_D1:
1243                 power_control |= 1;
1244                 break;
1245
1246         case PCI_D2:
1247                 power_control |= 2;
1248                 break;
1249
1250         case PCI_D3hot:
1251                 power_control |= 3;
1252                 break;
1253
1254         default:
1255                 printk(KERN_WARNING PFX "%s: Invalid power state (%d) "
1256                        "requested.\n",
1257                        tp->dev->name, state);
1258                 return -EINVAL;
1259         };
1260
1261         power_control |= PCI_PM_CTRL_PME_ENABLE;
1262
1263         misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
1264         tw32(TG3PCI_MISC_HOST_CTRL,
1265              misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
1266
1267         if (tp->link_config.phy_is_low_power == 0) {
1268                 tp->link_config.phy_is_low_power = 1;
1269                 tp->link_config.orig_speed = tp->link_config.speed;
1270                 tp->link_config.orig_duplex = tp->link_config.duplex;
1271                 tp->link_config.orig_autoneg = tp->link_config.autoneg;
1272         }
1273
1274         if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)) {
1275                 tp->link_config.speed = SPEED_10;
1276                 tp->link_config.duplex = DUPLEX_HALF;
1277                 tp->link_config.autoneg = AUTONEG_ENABLE;
1278                 tg3_setup_phy(tp, 0);
1279         }
1280
1281         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1282                 u32 val;
1283
1284                 val = tr32(GRC_VCPU_EXT_CTRL);
1285                 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
1286         } else if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
1287                 int i;
1288                 u32 val;
1289
1290                 for (i = 0; i < 200; i++) {
1291                         tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
1292                         if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
1293                                 break;
1294                         msleep(1);
1295                 }
1296         }
1297         tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
1298                                              WOL_DRV_STATE_SHUTDOWN |
1299                                              WOL_DRV_WOL | WOL_SET_MAGIC_PKT);
1300
1301         pci_read_config_word(tp->pdev, pm + PCI_PM_PMC, &power_caps);
1302
1303         if (tp->tg3_flags & TG3_FLAG_WOL_ENABLE) {
1304                 u32 mac_mode;
1305
1306                 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
1307                         tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x5a);
1308                         udelay(40);
1309
1310                         if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
1311                                 mac_mode = MAC_MODE_PORT_MODE_GMII;
1312                         else
1313                                 mac_mode = MAC_MODE_PORT_MODE_MII;
1314
1315                         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 ||
1316                             !(tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB))
1317                                 mac_mode |= MAC_MODE_LINK_POLARITY;
1318                 } else {
1319                         mac_mode = MAC_MODE_PORT_MODE_TBI;
1320                 }
1321
1322                 if (!(tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
1323                         tw32(MAC_LED_CTRL, tp->led_ctrl);
1324
1325                 if (((power_caps & PCI_PM_CAP_PME_D3cold) &&
1326                      (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)))
1327                         mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
1328
1329                 tw32_f(MAC_MODE, mac_mode);
1330                 udelay(100);
1331
1332                 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
1333                 udelay(10);
1334         }
1335
1336         if (!(tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB) &&
1337             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1338              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
1339                 u32 base_val;
1340
1341                 base_val = tp->pci_clock_ctrl;
1342                 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
1343                              CLOCK_CTRL_TXCLK_DISABLE);
1344
1345                 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
1346                             CLOCK_CTRL_PWRDOWN_PLL133, 40);
1347         } else if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
1348                 /* do nothing */
1349         } else if (!((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
1350                      (tp->tg3_flags & TG3_FLAG_ENABLE_ASF))) {
1351                 u32 newbits1, newbits2;
1352
1353                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1354                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1355                         newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
1356                                     CLOCK_CTRL_TXCLK_DISABLE |
1357                                     CLOCK_CTRL_ALTCLK);
1358                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
1359                 } else if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
1360                         newbits1 = CLOCK_CTRL_625_CORE;
1361                         newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
1362                 } else {
1363                         newbits1 = CLOCK_CTRL_ALTCLK;
1364                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
1365                 }
1366
1367                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
1368                             40);
1369
1370                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
1371                             40);
1372
1373                 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
1374                         u32 newbits3;
1375
1376                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1377                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1378                                 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
1379                                             CLOCK_CTRL_TXCLK_DISABLE |
1380                                             CLOCK_CTRL_44MHZ_CORE);
1381                         } else {
1382                                 newbits3 = CLOCK_CTRL_44MHZ_CORE;
1383                         }
1384
1385                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
1386                                     tp->pci_clock_ctrl | newbits3, 40);
1387                 }
1388         }
1389
1390         if (!(tp->tg3_flags & TG3_FLAG_WOL_ENABLE) &&
1391             !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
1392                 tg3_power_down_phy(tp);
1393
1394         tg3_frob_aux_power(tp);
1395
1396         /* Workaround for unstable PLL clock */
1397         if ((GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX) ||
1398             (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX)) {
1399                 u32 val = tr32(0x7d00);
1400
1401                 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
1402                 tw32(0x7d00, val);
1403                 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
1404                         int err;
1405
1406                         err = tg3_nvram_lock(tp);
1407                         tg3_halt_cpu(tp, RX_CPU_BASE);
1408                         if (!err)
1409                                 tg3_nvram_unlock(tp);
1410                 }
1411         }
1412
1413         tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
1414
1415         /* Finally, set the new power state. */
1416         pci_write_config_word(tp->pdev, pm + PCI_PM_CTRL, power_control);
1417         udelay(100);    /* Delay after power state change */
1418
1419         return 0;
1420 }
1421
1422 static void tg3_link_report(struct tg3 *tp)
1423 {
1424         if (!netif_carrier_ok(tp->dev)) {
1425                 if (netif_msg_link(tp))
1426                         printk(KERN_INFO PFX "%s: Link is down.\n",
1427                                tp->dev->name);
1428         } else if (netif_msg_link(tp)) {
1429                 printk(KERN_INFO PFX "%s: Link is up at %d Mbps, %s duplex.\n",
1430                        tp->dev->name,
1431                        (tp->link_config.active_speed == SPEED_1000 ?
1432                         1000 :
1433                         (tp->link_config.active_speed == SPEED_100 ?
1434                          100 : 10)),
1435                        (tp->link_config.active_duplex == DUPLEX_FULL ?
1436                         "full" : "half"));
1437
1438                 printk(KERN_INFO PFX "%s: Flow control is %s for TX and "
1439                        "%s for RX.\n",
1440                        tp->dev->name,
1441                        (tp->tg3_flags & TG3_FLAG_TX_PAUSE) ? "on" : "off",
1442                        (tp->tg3_flags & TG3_FLAG_RX_PAUSE) ? "on" : "off");
1443         }
1444 }
1445
1446 static void tg3_setup_flow_control(struct tg3 *tp, u32 local_adv, u32 remote_adv)
1447 {
1448         u32 new_tg3_flags = 0;
1449         u32 old_rx_mode = tp->rx_mode;
1450         u32 old_tx_mode = tp->tx_mode;
1451
1452         if (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG) {
1453
1454                 /* Convert 1000BaseX flow control bits to 1000BaseT
1455                  * bits before resolving flow control.
1456                  */
1457                 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
1458                         local_adv &= ~(ADVERTISE_PAUSE_CAP |
1459                                        ADVERTISE_PAUSE_ASYM);
1460                         remote_adv &= ~(LPA_PAUSE_CAP | LPA_PAUSE_ASYM);
1461
1462                         if (local_adv & ADVERTISE_1000XPAUSE)
1463                                 local_adv |= ADVERTISE_PAUSE_CAP;
1464                         if (local_adv & ADVERTISE_1000XPSE_ASYM)
1465                                 local_adv |= ADVERTISE_PAUSE_ASYM;
1466                         if (remote_adv & LPA_1000XPAUSE)
1467                                 remote_adv |= LPA_PAUSE_CAP;
1468                         if (remote_adv & LPA_1000XPAUSE_ASYM)
1469                                 remote_adv |= LPA_PAUSE_ASYM;
1470                 }
1471
1472                 if (local_adv & ADVERTISE_PAUSE_CAP) {
1473                         if (local_adv & ADVERTISE_PAUSE_ASYM) {
1474                                 if (remote_adv & LPA_PAUSE_CAP)
1475                                         new_tg3_flags |=
1476                                                 (TG3_FLAG_RX_PAUSE |
1477                                                 TG3_FLAG_TX_PAUSE);
1478                                 else if (remote_adv & LPA_PAUSE_ASYM)
1479                                         new_tg3_flags |=
1480                                                 (TG3_FLAG_RX_PAUSE);
1481                         } else {
1482                                 if (remote_adv & LPA_PAUSE_CAP)
1483                                         new_tg3_flags |=
1484                                                 (TG3_FLAG_RX_PAUSE |
1485                                                 TG3_FLAG_TX_PAUSE);
1486                         }
1487                 } else if (local_adv & ADVERTISE_PAUSE_ASYM) {
1488                         if ((remote_adv & LPA_PAUSE_CAP) &&
1489                         (remote_adv & LPA_PAUSE_ASYM))
1490                                 new_tg3_flags |= TG3_FLAG_TX_PAUSE;
1491                 }
1492
1493                 tp->tg3_flags &= ~(TG3_FLAG_RX_PAUSE | TG3_FLAG_TX_PAUSE);
1494                 tp->tg3_flags |= new_tg3_flags;
1495         } else {
1496                 new_tg3_flags = tp->tg3_flags;
1497         }
1498
1499         if (new_tg3_flags & TG3_FLAG_RX_PAUSE)
1500                 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1501         else
1502                 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1503
1504         if (old_rx_mode != tp->rx_mode) {
1505                 tw32_f(MAC_RX_MODE, tp->rx_mode);
1506         }
1507
1508         if (new_tg3_flags & TG3_FLAG_TX_PAUSE)
1509                 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1510         else
1511                 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1512
1513         if (old_tx_mode != tp->tx_mode) {
1514                 tw32_f(MAC_TX_MODE, tp->tx_mode);
1515         }
1516 }
1517
1518 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
1519 {
1520         switch (val & MII_TG3_AUX_STAT_SPDMASK) {
1521         case MII_TG3_AUX_STAT_10HALF:
1522                 *speed = SPEED_10;
1523                 *duplex = DUPLEX_HALF;
1524                 break;
1525
1526         case MII_TG3_AUX_STAT_10FULL:
1527                 *speed = SPEED_10;
1528                 *duplex = DUPLEX_FULL;
1529                 break;
1530
1531         case MII_TG3_AUX_STAT_100HALF:
1532                 *speed = SPEED_100;
1533                 *duplex = DUPLEX_HALF;
1534                 break;
1535
1536         case MII_TG3_AUX_STAT_100FULL:
1537                 *speed = SPEED_100;
1538                 *duplex = DUPLEX_FULL;
1539                 break;
1540
1541         case MII_TG3_AUX_STAT_1000HALF:
1542                 *speed = SPEED_1000;
1543                 *duplex = DUPLEX_HALF;
1544                 break;
1545
1546         case MII_TG3_AUX_STAT_1000FULL:
1547                 *speed = SPEED_1000;
1548                 *duplex = DUPLEX_FULL;
1549                 break;
1550
1551         default:
1552                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1553                         *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
1554                                  SPEED_10;
1555                         *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
1556                                   DUPLEX_HALF;
1557                         break;
1558                 }
1559                 *speed = SPEED_INVALID;
1560                 *duplex = DUPLEX_INVALID;
1561                 break;
1562         };
1563 }
1564
1565 static void tg3_phy_copper_begin(struct tg3 *tp)
1566 {
1567         u32 new_adv;
1568         int i;
1569
1570         if (tp->link_config.phy_is_low_power) {
1571                 /* Entering low power mode.  Disable gigabit and
1572                  * 100baseT advertisements.
1573                  */
1574                 tg3_writephy(tp, MII_TG3_CTRL, 0);
1575
1576                 new_adv = (ADVERTISE_10HALF | ADVERTISE_10FULL |
1577                            ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
1578                 if (tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB)
1579                         new_adv |= (ADVERTISE_100HALF | ADVERTISE_100FULL);
1580
1581                 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1582         } else if (tp->link_config.speed == SPEED_INVALID) {
1583                 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
1584                         tp->link_config.advertising &=
1585                                 ~(ADVERTISED_1000baseT_Half |
1586                                   ADVERTISED_1000baseT_Full);
1587
1588                 new_adv = (ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
1589                 if (tp->link_config.advertising & ADVERTISED_10baseT_Half)
1590                         new_adv |= ADVERTISE_10HALF;
1591                 if (tp->link_config.advertising & ADVERTISED_10baseT_Full)
1592                         new_adv |= ADVERTISE_10FULL;
1593                 if (tp->link_config.advertising & ADVERTISED_100baseT_Half)
1594                         new_adv |= ADVERTISE_100HALF;
1595                 if (tp->link_config.advertising & ADVERTISED_100baseT_Full)
1596                         new_adv |= ADVERTISE_100FULL;
1597                 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1598
1599                 if (tp->link_config.advertising &
1600                     (ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full)) {
1601                         new_adv = 0;
1602                         if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
1603                                 new_adv |= MII_TG3_CTRL_ADV_1000_HALF;
1604                         if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
1605                                 new_adv |= MII_TG3_CTRL_ADV_1000_FULL;
1606                         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY) &&
1607                             (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1608                              tp->pci_chip_rev_id == CHIPREV_ID_5701_B0))
1609                                 new_adv |= (MII_TG3_CTRL_AS_MASTER |
1610                                             MII_TG3_CTRL_ENABLE_AS_MASTER);
1611                         tg3_writephy(tp, MII_TG3_CTRL, new_adv);
1612                 } else {
1613                         tg3_writephy(tp, MII_TG3_CTRL, 0);
1614                 }
1615         } else {
1616                 /* Asking for a specific link mode. */
1617                 if (tp->link_config.speed == SPEED_1000) {
1618                         new_adv = ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP;
1619                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
1620
1621                         if (tp->link_config.duplex == DUPLEX_FULL)
1622                                 new_adv = MII_TG3_CTRL_ADV_1000_FULL;
1623                         else
1624                                 new_adv = MII_TG3_CTRL_ADV_1000_HALF;
1625                         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1626                             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
1627                                 new_adv |= (MII_TG3_CTRL_AS_MASTER |
1628                                             MII_TG3_CTRL_ENABLE_AS_MASTER);
1629                         tg3_writephy(tp, MII_TG3_CTRL, new_adv);
1630                 } else {
1631                         tg3_writephy(tp, MII_TG3_CTRL, 0);
1632
1633                         new_adv = ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP;
1634                         if (tp->link_config.speed == SPEED_100) {
1635                                 if (tp->link_config.duplex == DUPLEX_FULL)
1636                                         new_adv |= ADVERTISE_100FULL;
1637                                 else
1638                                         new_adv |= ADVERTISE_100HALF;
1639                         } else {
1640                                 if (tp->link_config.duplex == DUPLEX_FULL)
1641                                         new_adv |= ADVERTISE_10FULL;
1642                                 else
1643                                         new_adv |= ADVERTISE_10HALF;
1644                         }
1645                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
1646                 }
1647         }
1648
1649         if (tp->link_config.autoneg == AUTONEG_DISABLE &&
1650             tp->link_config.speed != SPEED_INVALID) {
1651                 u32 bmcr, orig_bmcr;
1652
1653                 tp->link_config.active_speed = tp->link_config.speed;
1654                 tp->link_config.active_duplex = tp->link_config.duplex;
1655
1656                 bmcr = 0;
1657                 switch (tp->link_config.speed) {
1658                 default:
1659                 case SPEED_10:
1660                         break;
1661
1662                 case SPEED_100:
1663                         bmcr |= BMCR_SPEED100;
1664                         break;
1665
1666                 case SPEED_1000:
1667                         bmcr |= TG3_BMCR_SPEED1000;
1668                         break;
1669                 };
1670
1671                 if (tp->link_config.duplex == DUPLEX_FULL)
1672                         bmcr |= BMCR_FULLDPLX;
1673
1674                 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
1675                     (bmcr != orig_bmcr)) {
1676                         tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
1677                         for (i = 0; i < 1500; i++) {
1678                                 u32 tmp;
1679
1680                                 udelay(10);
1681                                 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
1682                                     tg3_readphy(tp, MII_BMSR, &tmp))
1683                                         continue;
1684                                 if (!(tmp & BMSR_LSTATUS)) {
1685                                         udelay(40);
1686                                         break;
1687                                 }
1688                         }
1689                         tg3_writephy(tp, MII_BMCR, bmcr);
1690                         udelay(40);
1691                 }
1692         } else {
1693                 tg3_writephy(tp, MII_BMCR,
1694                              BMCR_ANENABLE | BMCR_ANRESTART);
1695         }
1696 }
1697
1698 static int tg3_init_5401phy_dsp(struct tg3 *tp)
1699 {
1700         int err;
1701
1702         /* Turn off tap power management. */
1703         /* Set Extended packet length bit */
1704         err  = tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
1705
1706         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0012);
1707         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1804);
1708
1709         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0013);
1710         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1204);
1711
1712         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
1713         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0132);
1714
1715         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
1716         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0232);
1717
1718         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1719         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0a20);
1720
1721         udelay(40);
1722
1723         return err;
1724 }
1725
1726 static int tg3_copper_is_advertising_all(struct tg3 *tp, u32 mask)
1727 {
1728         u32 adv_reg, all_mask = 0;
1729
1730         if (mask & ADVERTISED_10baseT_Half)
1731                 all_mask |= ADVERTISE_10HALF;
1732         if (mask & ADVERTISED_10baseT_Full)
1733                 all_mask |= ADVERTISE_10FULL;
1734         if (mask & ADVERTISED_100baseT_Half)
1735                 all_mask |= ADVERTISE_100HALF;
1736         if (mask & ADVERTISED_100baseT_Full)
1737                 all_mask |= ADVERTISE_100FULL;
1738
1739         if (tg3_readphy(tp, MII_ADVERTISE, &adv_reg))
1740                 return 0;
1741
1742         if ((adv_reg & all_mask) != all_mask)
1743                 return 0;
1744         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
1745                 u32 tg3_ctrl;
1746
1747                 all_mask = 0;
1748                 if (mask & ADVERTISED_1000baseT_Half)
1749                         all_mask |= ADVERTISE_1000HALF;
1750                 if (mask & ADVERTISED_1000baseT_Full)
1751                         all_mask |= ADVERTISE_1000FULL;
1752
1753                 if (tg3_readphy(tp, MII_TG3_CTRL, &tg3_ctrl))
1754                         return 0;
1755
1756                 if ((tg3_ctrl & all_mask) != all_mask)
1757                         return 0;
1758         }
1759         return 1;
1760 }
1761
1762 static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
1763 {
1764         int current_link_up;
1765         u32 bmsr, dummy;
1766         u16 current_speed;
1767         u8 current_duplex;
1768         int i, err;
1769
1770         tw32(MAC_EVENT, 0);
1771
1772         tw32_f(MAC_STATUS,
1773              (MAC_STATUS_SYNC_CHANGED |
1774               MAC_STATUS_CFG_CHANGED |
1775               MAC_STATUS_MI_COMPLETION |
1776               MAC_STATUS_LNKSTATE_CHANGED));
1777         udelay(40);
1778
1779         tp->mi_mode = MAC_MI_MODE_BASE;
1780         tw32_f(MAC_MI_MODE, tp->mi_mode);
1781         udelay(80);
1782
1783         tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x02);
1784
1785         /* Some third-party PHYs need to be reset on link going
1786          * down.
1787          */
1788         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
1789              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
1790              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
1791             netif_carrier_ok(tp->dev)) {
1792                 tg3_readphy(tp, MII_BMSR, &bmsr);
1793                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
1794                     !(bmsr & BMSR_LSTATUS))
1795                         force_reset = 1;
1796         }
1797         if (force_reset)
1798                 tg3_phy_reset(tp);
1799
1800         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
1801                 tg3_readphy(tp, MII_BMSR, &bmsr);
1802                 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
1803                     !(tp->tg3_flags & TG3_FLAG_INIT_COMPLETE))
1804                         bmsr = 0;
1805
1806                 if (!(bmsr & BMSR_LSTATUS)) {
1807                         err = tg3_init_5401phy_dsp(tp);
1808                         if (err)
1809                                 return err;
1810
1811                         tg3_readphy(tp, MII_BMSR, &bmsr);
1812                         for (i = 0; i < 1000; i++) {
1813                                 udelay(10);
1814                                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
1815                                     (bmsr & BMSR_LSTATUS)) {
1816                                         udelay(40);
1817                                         break;
1818                                 }
1819                         }
1820
1821                         if ((tp->phy_id & PHY_ID_REV_MASK) == PHY_REV_BCM5401_B0 &&
1822                             !(bmsr & BMSR_LSTATUS) &&
1823                             tp->link_config.active_speed == SPEED_1000) {
1824                                 err = tg3_phy_reset(tp);
1825                                 if (!err)
1826                                         err = tg3_init_5401phy_dsp(tp);
1827                                 if (err)
1828                                         return err;
1829                         }
1830                 }
1831         } else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1832                    tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
1833                 /* 5701 {A0,B0} CRC bug workaround */
1834                 tg3_writephy(tp, 0x15, 0x0a75);
1835                 tg3_writephy(tp, 0x1c, 0x8c68);
1836                 tg3_writephy(tp, 0x1c, 0x8d68);
1837                 tg3_writephy(tp, 0x1c, 0x8c68);
1838         }
1839
1840         /* Clear pending interrupts... */
1841         tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
1842         tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
1843
1844         if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT)
1845                 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
1846         else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906)
1847                 tg3_writephy(tp, MII_TG3_IMASK, ~0);
1848
1849         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1850             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1851                 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
1852                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
1853                                      MII_TG3_EXT_CTRL_LNK3_LED_MODE);
1854                 else
1855                         tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
1856         }
1857
1858         current_link_up = 0;
1859         current_speed = SPEED_INVALID;
1860         current_duplex = DUPLEX_INVALID;
1861
1862         if (tp->tg3_flags2 & TG3_FLG2_CAPACITIVE_COUPLING) {
1863                 u32 val;
1864
1865                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4007);
1866                 tg3_readphy(tp, MII_TG3_AUX_CTRL, &val);
1867                 if (!(val & (1 << 10))) {
1868                         val |= (1 << 10);
1869                         tg3_writephy(tp, MII_TG3_AUX_CTRL, val);
1870                         goto relink;
1871                 }
1872         }
1873
1874         bmsr = 0;
1875         for (i = 0; i < 100; i++) {
1876                 tg3_readphy(tp, MII_BMSR, &bmsr);
1877                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
1878                     (bmsr & BMSR_LSTATUS))
1879                         break;
1880                 udelay(40);
1881         }
1882
1883         if (bmsr & BMSR_LSTATUS) {
1884                 u32 aux_stat, bmcr;
1885
1886                 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
1887                 for (i = 0; i < 2000; i++) {
1888                         udelay(10);
1889                         if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
1890                             aux_stat)
1891                                 break;
1892                 }
1893
1894                 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
1895                                              &current_speed,
1896                                              &current_duplex);
1897
1898                 bmcr = 0;
1899                 for (i = 0; i < 200; i++) {
1900                         tg3_readphy(tp, MII_BMCR, &bmcr);
1901                         if (tg3_readphy(tp, MII_BMCR, &bmcr))
1902                                 continue;
1903                         if (bmcr && bmcr != 0x7fff)
1904                                 break;
1905                         udelay(10);
1906                 }
1907
1908                 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
1909                         if (bmcr & BMCR_ANENABLE) {
1910                                 current_link_up = 1;
1911
1912                                 /* Force autoneg restart if we are exiting
1913                                  * low power mode.
1914                                  */
1915                                 if (!tg3_copper_is_advertising_all(tp,
1916                                                 tp->link_config.advertising))
1917                                         current_link_up = 0;
1918                         } else {
1919                                 current_link_up = 0;
1920                         }
1921                 } else {
1922                         if (!(bmcr & BMCR_ANENABLE) &&
1923                             tp->link_config.speed == current_speed &&
1924                             tp->link_config.duplex == current_duplex) {
1925                                 current_link_up = 1;
1926                         } else {
1927                                 current_link_up = 0;
1928                         }
1929                 }
1930
1931                 tp->link_config.active_speed = current_speed;
1932                 tp->link_config.active_duplex = current_duplex;
1933         }
1934
1935         if (current_link_up == 1 &&
1936             (tp->link_config.active_duplex == DUPLEX_FULL) &&
1937             (tp->link_config.autoneg == AUTONEG_ENABLE)) {
1938                 u32 local_adv, remote_adv;
1939
1940                 if (tg3_readphy(tp, MII_ADVERTISE, &local_adv))
1941                         local_adv = 0;
1942                 local_adv &= (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
1943
1944                 if (tg3_readphy(tp, MII_LPA, &remote_adv))
1945                         remote_adv = 0;
1946
1947                 remote_adv &= (LPA_PAUSE_CAP | LPA_PAUSE_ASYM);
1948
1949                 /* If we are not advertising full pause capability,
1950                  * something is wrong.  Bring the link down and reconfigure.
1951                  */
1952                 if (local_adv != ADVERTISE_PAUSE_CAP) {
1953                         current_link_up = 0;
1954                 } else {
1955                         tg3_setup_flow_control(tp, local_adv, remote_adv);
1956                 }
1957         }
1958 relink:
1959         if (current_link_up == 0 || tp->link_config.phy_is_low_power) {
1960                 u32 tmp;
1961
1962                 tg3_phy_copper_begin(tp);
1963
1964                 tg3_readphy(tp, MII_BMSR, &tmp);
1965                 if (!tg3_readphy(tp, MII_BMSR, &tmp) &&
1966                     (tmp & BMSR_LSTATUS))
1967                         current_link_up = 1;
1968         }
1969
1970         tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
1971         if (current_link_up == 1) {
1972                 if (tp->link_config.active_speed == SPEED_100 ||
1973                     tp->link_config.active_speed == SPEED_10)
1974                         tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
1975                 else
1976                         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
1977         } else
1978                 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
1979
1980         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
1981         if (tp->link_config.active_duplex == DUPLEX_HALF)
1982                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
1983
1984         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
1985         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
1986                 if ((tp->led_ctrl == LED_CTRL_MODE_PHY_2) ||
1987                     (current_link_up == 1 &&
1988                      tp->link_config.active_speed == SPEED_10))
1989                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
1990         } else {
1991                 if (current_link_up == 1)
1992                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
1993         }
1994
1995         /* ??? Without this setting Netgear GA302T PHY does not
1996          * ??? send/receive packets...
1997          */
1998         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411 &&
1999             tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) {
2000                 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
2001                 tw32_f(MAC_MI_MODE, tp->mi_mode);
2002                 udelay(80);
2003         }
2004
2005         tw32_f(MAC_MODE, tp->mac_mode);
2006         udelay(40);
2007
2008         if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
2009                 /* Polled via timer. */
2010                 tw32_f(MAC_EVENT, 0);
2011         } else {
2012                 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2013         }
2014         udelay(40);
2015
2016         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 &&
2017             current_link_up == 1 &&
2018             tp->link_config.active_speed == SPEED_1000 &&
2019             ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ||
2020              (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED))) {
2021                 udelay(120);
2022                 tw32_f(MAC_STATUS,
2023                      (MAC_STATUS_SYNC_CHANGED |
2024                       MAC_STATUS_CFG_CHANGED));
2025                 udelay(40);
2026                 tg3_write_mem(tp,
2027                               NIC_SRAM_FIRMWARE_MBOX,
2028                               NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
2029         }
2030
2031         if (current_link_up != netif_carrier_ok(tp->dev)) {
2032                 if (current_link_up)
2033                         netif_carrier_on(tp->dev);
2034                 else
2035                         netif_carrier_off(tp->dev);
2036                 tg3_link_report(tp);
2037         }
2038
2039         return 0;
2040 }
2041
2042 struct tg3_fiber_aneginfo {
2043         int state;
2044 #define ANEG_STATE_UNKNOWN              0
2045 #define ANEG_STATE_AN_ENABLE            1
2046 #define ANEG_STATE_RESTART_INIT         2
2047 #define ANEG_STATE_RESTART              3
2048 #define ANEG_STATE_DISABLE_LINK_OK      4
2049 #define ANEG_STATE_ABILITY_DETECT_INIT  5
2050 #define ANEG_STATE_ABILITY_DETECT       6
2051 #define ANEG_STATE_ACK_DETECT_INIT      7
2052 #define ANEG_STATE_ACK_DETECT           8
2053 #define ANEG_STATE_COMPLETE_ACK_INIT    9
2054 #define ANEG_STATE_COMPLETE_ACK         10
2055 #define ANEG_STATE_IDLE_DETECT_INIT     11
2056 #define ANEG_STATE_IDLE_DETECT          12
2057 #define ANEG_STATE_LINK_OK              13
2058 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT  14
2059 #define ANEG_STATE_NEXT_PAGE_WAIT       15
2060
2061         u32 flags;
2062 #define MR_AN_ENABLE            0x00000001
2063 #define MR_RESTART_AN           0x00000002
2064 #define MR_AN_COMPLETE          0x00000004
2065 #define MR_PAGE_RX              0x00000008
2066 #define MR_NP_LOADED            0x00000010
2067 #define MR_TOGGLE_TX            0x00000020
2068 #define MR_LP_ADV_FULL_DUPLEX   0x00000040
2069 #define MR_LP_ADV_HALF_DUPLEX   0x00000080
2070 #define MR_LP_ADV_SYM_PAUSE     0x00000100
2071 #define MR_LP_ADV_ASYM_PAUSE    0x00000200
2072 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
2073 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
2074 #define MR_LP_ADV_NEXT_PAGE     0x00001000
2075 #define MR_TOGGLE_RX            0x00002000
2076 #define MR_NP_RX                0x00004000
2077
2078 #define MR_LINK_OK              0x80000000
2079
2080         unsigned long link_time, cur_time;
2081
2082         u32 ability_match_cfg;
2083         int ability_match_count;
2084
2085         char ability_match, idle_match, ack_match;
2086
2087         u32 txconfig, rxconfig;
2088 #define ANEG_CFG_NP             0x00000080
2089 #define ANEG_CFG_ACK            0x00000040
2090 #define ANEG_CFG_RF2            0x00000020
2091 #define ANEG_CFG_RF1            0x00000010
2092 #define ANEG_CFG_PS2            0x00000001
2093 #define ANEG_CFG_PS1            0x00008000
2094 #define ANEG_CFG_HD             0x00004000
2095 #define ANEG_CFG_FD             0x00002000
2096 #define ANEG_CFG_INVAL          0x00001f06
2097
2098 };
2099 #define ANEG_OK         0
2100 #define ANEG_DONE       1
2101 #define ANEG_TIMER_ENAB 2
2102 #define ANEG_FAILED     -1
2103
2104 #define ANEG_STATE_SETTLE_TIME  10000
2105
2106 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
2107                                    struct tg3_fiber_aneginfo *ap)
2108 {
2109         unsigned long delta;
2110         u32 rx_cfg_reg;
2111         int ret;
2112
2113         if (ap->state == ANEG_STATE_UNKNOWN) {
2114                 ap->rxconfig = 0;
2115                 ap->link_time = 0;
2116                 ap->cur_time = 0;
2117                 ap->ability_match_cfg = 0;
2118                 ap->ability_match_count = 0;
2119                 ap->ability_match = 0;
2120                 ap->idle_match = 0;
2121                 ap->ack_match = 0;
2122         }
2123         ap->cur_time++;
2124
2125         if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
2126                 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
2127
2128                 if (rx_cfg_reg != ap->ability_match_cfg) {
2129                         ap->ability_match_cfg = rx_cfg_reg;
2130                         ap->ability_match = 0;
2131                         ap->ability_match_count = 0;
2132                 } else {
2133                         if (++ap->ability_match_count > 1) {
2134                                 ap->ability_match = 1;
2135                                 ap->ability_match_cfg = rx_cfg_reg;
2136                         }
2137                 }
2138                 if (rx_cfg_reg & ANEG_CFG_ACK)
2139                         ap->ack_match = 1;
2140                 else
2141                         ap->ack_match = 0;
2142
2143                 ap->idle_match = 0;
2144         } else {
2145                 ap->idle_match = 1;
2146                 ap->ability_match_cfg = 0;
2147                 ap->ability_match_count = 0;
2148                 ap->ability_match = 0;
2149                 ap->ack_match = 0;
2150
2151                 rx_cfg_reg = 0;
2152         }
2153
2154         ap->rxconfig = rx_cfg_reg;
2155         ret = ANEG_OK;
2156
2157         switch(ap->state) {
2158         case ANEG_STATE_UNKNOWN:
2159                 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
2160                         ap->state = ANEG_STATE_AN_ENABLE;
2161
2162                 /* fallthru */
2163         case ANEG_STATE_AN_ENABLE:
2164                 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
2165                 if (ap->flags & MR_AN_ENABLE) {
2166                         ap->link_time = 0;
2167                         ap->cur_time = 0;
2168                         ap->ability_match_cfg = 0;
2169                         ap->ability_match_count = 0;
2170                         ap->ability_match = 0;
2171                         ap->idle_match = 0;
2172                         ap->ack_match = 0;
2173
2174                         ap->state = ANEG_STATE_RESTART_INIT;
2175                 } else {
2176                         ap->state = ANEG_STATE_DISABLE_LINK_OK;
2177                 }
2178                 break;
2179
2180         case ANEG_STATE_RESTART_INIT:
2181                 ap->link_time = ap->cur_time;
2182                 ap->flags &= ~(MR_NP_LOADED);
2183                 ap->txconfig = 0;
2184                 tw32(MAC_TX_AUTO_NEG, 0);
2185                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
2186                 tw32_f(MAC_MODE, tp->mac_mode);
2187                 udelay(40);
2188
2189                 ret = ANEG_TIMER_ENAB;
2190                 ap->state = ANEG_STATE_RESTART;
2191
2192                 /* fallthru */
2193         case ANEG_STATE_RESTART:
2194                 delta = ap->cur_time - ap->link_time;
2195                 if (delta > ANEG_STATE_SETTLE_TIME) {
2196                         ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
2197                 } else {
2198                         ret = ANEG_TIMER_ENAB;
2199                 }
2200                 break;
2201
2202         case ANEG_STATE_DISABLE_LINK_OK:
2203                 ret = ANEG_DONE;
2204                 break;
2205
2206         case ANEG_STATE_ABILITY_DETECT_INIT:
2207                 ap->flags &= ~(MR_TOGGLE_TX);
2208                 ap->txconfig = (ANEG_CFG_FD | ANEG_CFG_PS1);
2209                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
2210                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
2211                 tw32_f(MAC_MODE, tp->mac_mode);
2212                 udelay(40);
2213
2214                 ap->state = ANEG_STATE_ABILITY_DETECT;
2215                 break;
2216
2217         case ANEG_STATE_ABILITY_DETECT:
2218                 if (ap->ability_match != 0 && ap->rxconfig != 0) {
2219                         ap->state = ANEG_STATE_ACK_DETECT_INIT;
2220                 }
2221                 break;
2222
2223         case ANEG_STATE_ACK_DETECT_INIT:
2224                 ap->txconfig |= ANEG_CFG_ACK;
2225                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
2226                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
2227                 tw32_f(MAC_MODE, tp->mac_mode);
2228                 udelay(40);
2229
2230                 ap->state = ANEG_STATE_ACK_DETECT;
2231
2232                 /* fallthru */
2233         case ANEG_STATE_ACK_DETECT:
2234                 if (ap->ack_match != 0) {
2235                         if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
2236                             (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
2237                                 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
2238                         } else {
2239                                 ap->state = ANEG_STATE_AN_ENABLE;
2240                         }
2241                 } else if (ap->ability_match != 0 &&
2242                            ap->rxconfig == 0) {
2243                         ap->state = ANEG_STATE_AN_ENABLE;
2244                 }
2245                 break;
2246
2247         case ANEG_STATE_COMPLETE_ACK_INIT:
2248                 if (ap->rxconfig & ANEG_CFG_INVAL) {
2249                         ret = ANEG_FAILED;
2250                         break;
2251                 }
2252                 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
2253                                MR_LP_ADV_HALF_DUPLEX |
2254                                MR_LP_ADV_SYM_PAUSE |
2255                                MR_LP_ADV_ASYM_PAUSE |
2256                                MR_LP_ADV_REMOTE_FAULT1 |
2257                                MR_LP_ADV_REMOTE_FAULT2 |
2258                                MR_LP_ADV_NEXT_PAGE |
2259                                MR_TOGGLE_RX |
2260                                MR_NP_RX);
2261                 if (ap->rxconfig & ANEG_CFG_FD)
2262                         ap->flags |= MR_LP_ADV_FULL_DUPLEX;
2263                 if (ap->rxconfig & ANEG_CFG_HD)
2264                         ap->flags |= MR_LP_ADV_HALF_DUPLEX;
2265                 if (ap->rxconfig & ANEG_CFG_PS1)
2266                         ap->flags |= MR_LP_ADV_SYM_PAUSE;
2267                 if (ap->rxconfig & ANEG_CFG_PS2)
2268                         ap->flags |= MR_LP_ADV_ASYM_PAUSE;
2269                 if (ap->rxconfig & ANEG_CFG_RF1)
2270                         ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
2271                 if (ap->rxconfig & ANEG_CFG_RF2)
2272                         ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
2273                 if (ap->rxconfig & ANEG_CFG_NP)
2274                         ap->flags |= MR_LP_ADV_NEXT_PAGE;
2275
2276                 ap->link_time = ap->cur_time;
2277
2278                 ap->flags ^= (MR_TOGGLE_TX);
2279                 if (ap->rxconfig & 0x0008)
2280                         ap->flags |= MR_TOGGLE_RX;
2281                 if (ap->rxconfig & ANEG_CFG_NP)
2282                         ap->flags |= MR_NP_RX;
2283                 ap->flags |= MR_PAGE_RX;
2284
2285                 ap->state = ANEG_STATE_COMPLETE_ACK;
2286                 ret = ANEG_TIMER_ENAB;
2287                 break;
2288
2289         case ANEG_STATE_COMPLETE_ACK:
2290                 if (ap->ability_match != 0 &&
2291                     ap->rxconfig == 0) {
2292                         ap->state = ANEG_STATE_AN_ENABLE;
2293                         break;
2294                 }
2295                 delta = ap->cur_time - ap->link_time;
2296                 if (delta > ANEG_STATE_SETTLE_TIME) {
2297                         if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
2298                                 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
2299                         } else {
2300                                 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
2301                                     !(ap->flags & MR_NP_RX)) {
2302                                         ap->state = ANEG_STATE_IDLE_DETECT_INIT;
2303                                 } else {
2304                                         ret = ANEG_FAILED;
2305                                 }
2306                         }
2307                 }
2308                 break;
2309
2310         case ANEG_STATE_IDLE_DETECT_INIT:
2311                 ap->link_time = ap->cur_time;
2312                 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
2313                 tw32_f(MAC_MODE, tp->mac_mode);
2314                 udelay(40);
2315
2316                 ap->state = ANEG_STATE_IDLE_DETECT;
2317                 ret = ANEG_TIMER_ENAB;
2318                 break;
2319
2320         case ANEG_STATE_IDLE_DETECT:
2321                 if (ap->ability_match != 0 &&
2322                     ap->rxconfig == 0) {
2323                         ap->state = ANEG_STATE_AN_ENABLE;
2324                         break;
2325                 }
2326                 delta = ap->cur_time - ap->link_time;
2327                 if (delta > ANEG_STATE_SETTLE_TIME) {
2328                         /* XXX another gem from the Broadcom driver :( */
2329                         ap->state = ANEG_STATE_LINK_OK;
2330                 }
2331                 break;
2332
2333         case ANEG_STATE_LINK_OK:
2334                 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
2335                 ret = ANEG_DONE;
2336                 break;
2337
2338         case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
2339                 /* ??? unimplemented */
2340                 break;
2341
2342         case ANEG_STATE_NEXT_PAGE_WAIT:
2343                 /* ??? unimplemented */
2344                 break;
2345
2346         default:
2347                 ret = ANEG_FAILED;
2348                 break;
2349         };
2350
2351         return ret;
2352 }
2353
2354 static int fiber_autoneg(struct tg3 *tp, u32 *flags)
2355 {
2356         int res = 0;
2357         struct tg3_fiber_aneginfo aninfo;
2358         int status = ANEG_FAILED;
2359         unsigned int tick;
2360         u32 tmp;
2361
2362         tw32_f(MAC_TX_AUTO_NEG, 0);
2363
2364         tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
2365         tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
2366         udelay(40);
2367
2368         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
2369         udelay(40);
2370
2371         memset(&aninfo, 0, sizeof(aninfo));
2372         aninfo.flags |= MR_AN_ENABLE;
2373         aninfo.state = ANEG_STATE_UNKNOWN;
2374         aninfo.cur_time = 0;
2375         tick = 0;
2376         while (++tick < 195000) {
2377                 status = tg3_fiber_aneg_smachine(tp, &aninfo);
2378                 if (status == ANEG_DONE || status == ANEG_FAILED)
2379                         break;
2380
2381                 udelay(1);
2382         }
2383
2384         tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
2385         tw32_f(MAC_MODE, tp->mac_mode);
2386         udelay(40);
2387
2388         *flags = aninfo.flags;
2389
2390         if (status == ANEG_DONE &&
2391             (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
2392                              MR_LP_ADV_FULL_DUPLEX)))
2393                 res = 1;
2394
2395         return res;
2396 }
2397
2398 static void tg3_init_bcm8002(struct tg3 *tp)
2399 {
2400         u32 mac_status = tr32(MAC_STATUS);
2401         int i;
2402
2403         /* Reset when initting first time or we have a link. */
2404         if ((tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) &&
2405             !(mac_status & MAC_STATUS_PCS_SYNCED))
2406                 return;
2407
2408         /* Set PLL lock range. */
2409         tg3_writephy(tp, 0x16, 0x8007);
2410
2411         /* SW reset */
2412         tg3_writephy(tp, MII_BMCR, BMCR_RESET);
2413
2414         /* Wait for reset to complete. */
2415         /* XXX schedule_timeout() ... */
2416         for (i = 0; i < 500; i++)
2417                 udelay(10);
2418
2419         /* Config mode; select PMA/Ch 1 regs. */
2420         tg3_writephy(tp, 0x10, 0x8411);
2421
2422         /* Enable auto-lock and comdet, select txclk for tx. */
2423         tg3_writephy(tp, 0x11, 0x0a10);
2424
2425         tg3_writephy(tp, 0x18, 0x00a0);
2426         tg3_writephy(tp, 0x16, 0x41ff);
2427
2428         /* Assert and deassert POR. */
2429         tg3_writephy(tp, 0x13, 0x0400);
2430         udelay(40);
2431         tg3_writephy(tp, 0x13, 0x0000);
2432
2433         tg3_writephy(tp, 0x11, 0x0a50);
2434         udelay(40);
2435         tg3_writephy(tp, 0x11, 0x0a10);
2436
2437         /* Wait for signal to stabilize */
2438         /* XXX schedule_timeout() ... */
2439         for (i = 0; i < 15000; i++)
2440                 udelay(10);
2441
2442         /* Deselect the channel register so we can read the PHYID
2443          * later.
2444          */
2445         tg3_writephy(tp, 0x10, 0x8011);
2446 }
2447
2448 static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
2449 {
2450         u32 sg_dig_ctrl, sg_dig_status;
2451         u32 serdes_cfg, expected_sg_dig_ctrl;
2452         int workaround, port_a;
2453         int current_link_up;
2454
2455         serdes_cfg = 0;
2456         expected_sg_dig_ctrl = 0;
2457         workaround = 0;
2458         port_a = 1;
2459         current_link_up = 0;
2460
2461         if (tp->pci_chip_rev_id != CHIPREV_ID_5704_A0 &&
2462             tp->pci_chip_rev_id != CHIPREV_ID_5704_A1) {
2463                 workaround = 1;
2464                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
2465                         port_a = 0;
2466
2467                 /* preserve bits 0-11,13,14 for signal pre-emphasis */
2468                 /* preserve bits 20-23 for voltage regulator */
2469                 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
2470         }
2471
2472         sg_dig_ctrl = tr32(SG_DIG_CTRL);
2473
2474         if (tp->link_config.autoneg != AUTONEG_ENABLE) {
2475                 if (sg_dig_ctrl & (1 << 31)) {
2476                         if (workaround) {
2477                                 u32 val = serdes_cfg;
2478
2479                                 if (port_a)
2480                                         val |= 0xc010000;
2481                                 else
2482                                         val |= 0x4010000;
2483                                 tw32_f(MAC_SERDES_CFG, val);
2484                         }
2485                         tw32_f(SG_DIG_CTRL, 0x01388400);
2486                 }
2487                 if (mac_status & MAC_STATUS_PCS_SYNCED) {
2488                         tg3_setup_flow_control(tp, 0, 0);
2489                         current_link_up = 1;
2490                 }
2491                 goto out;
2492         }
2493
2494         /* Want auto-negotiation.  */
2495         expected_sg_dig_ctrl = 0x81388400;
2496
2497         /* Pause capability */
2498         expected_sg_dig_ctrl |= (1 << 11);
2499
2500         /* Asymettric pause */
2501         expected_sg_dig_ctrl |= (1 << 12);
2502
2503         if (sg_dig_ctrl != expected_sg_dig_ctrl) {
2504                 if ((tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT) &&
2505                     tp->serdes_counter &&
2506                     ((mac_status & (MAC_STATUS_PCS_SYNCED |
2507                                     MAC_STATUS_RCVD_CFG)) ==
2508                      MAC_STATUS_PCS_SYNCED)) {
2509                         tp->serdes_counter--;
2510                         current_link_up = 1;
2511                         goto out;
2512                 }
2513 restart_autoneg:
2514                 if (workaround)
2515                         tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
2516                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | (1 << 30));
2517                 udelay(5);
2518                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
2519
2520                 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
2521                 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2522         } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
2523                                  MAC_STATUS_SIGNAL_DET)) {
2524                 sg_dig_status = tr32(SG_DIG_STATUS);
2525                 mac_status = tr32(MAC_STATUS);
2526
2527                 if ((sg_dig_status & (1 << 1)) &&
2528                     (mac_status & MAC_STATUS_PCS_SYNCED)) {
2529                         u32 local_adv, remote_adv;
2530
2531                         local_adv = ADVERTISE_PAUSE_CAP;
2532                         remote_adv = 0;
2533                         if (sg_dig_status & (1 << 19))
2534                                 remote_adv |= LPA_PAUSE_CAP;
2535                         if (sg_dig_status & (1 << 20))
2536                                 remote_adv |= LPA_PAUSE_ASYM;
2537
2538                         tg3_setup_flow_control(tp, local_adv, remote_adv);
2539                         current_link_up = 1;
2540                         tp->serdes_counter = 0;
2541                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2542                 } else if (!(sg_dig_status & (1 << 1))) {
2543                         if (tp->serdes_counter)
2544                                 tp->serdes_counter--;
2545                         else {
2546                                 if (workaround) {
2547                                         u32 val = serdes_cfg;
2548
2549                                         if (port_a)
2550                                                 val |= 0xc010000;
2551                                         else
2552                                                 val |= 0x4010000;
2553
2554                                         tw32_f(MAC_SERDES_CFG, val);
2555                                 }
2556
2557                                 tw32_f(SG_DIG_CTRL, 0x01388400);
2558                                 udelay(40);
2559
2560                                 /* Link parallel detection - link is up */
2561                                 /* only if we have PCS_SYNC and not */
2562                                 /* receiving config code words */
2563                                 mac_status = tr32(MAC_STATUS);
2564                                 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
2565                                     !(mac_status & MAC_STATUS_RCVD_CFG)) {
2566                                         tg3_setup_flow_control(tp, 0, 0);
2567                                         current_link_up = 1;
2568                                         tp->tg3_flags2 |=
2569                                                 TG3_FLG2_PARALLEL_DETECT;
2570                                         tp->serdes_counter =
2571                                                 SERDES_PARALLEL_DET_TIMEOUT;
2572                                 } else
2573                                         goto restart_autoneg;
2574                         }
2575                 }
2576         } else {
2577                 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
2578                 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2579         }
2580
2581 out:
2582         return current_link_up;
2583 }
2584
2585 static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
2586 {
2587         int current_link_up = 0;
2588
2589         if (!(mac_status & MAC_STATUS_PCS_SYNCED)) {
2590                 tp->tg3_flags &= ~TG3_FLAG_GOT_SERDES_FLOWCTL;
2591                 goto out;
2592         }
2593
2594         if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2595                 u32 flags;
2596                 int i;
2597
2598                 if (fiber_autoneg(tp, &flags)) {
2599                         u32 local_adv, remote_adv;
2600
2601                         local_adv = ADVERTISE_PAUSE_CAP;
2602                         remote_adv = 0;
2603                         if (flags & MR_LP_ADV_SYM_PAUSE)
2604                                 remote_adv |= LPA_PAUSE_CAP;
2605                         if (flags & MR_LP_ADV_ASYM_PAUSE)
2606                                 remote_adv |= LPA_PAUSE_ASYM;
2607
2608                         tg3_setup_flow_control(tp, local_adv, remote_adv);
2609
2610                         tp->tg3_flags |= TG3_FLAG_GOT_SERDES_FLOWCTL;
2611                         current_link_up = 1;
2612                 }
2613                 for (i = 0; i < 30; i++) {
2614                         udelay(20);
2615                         tw32_f(MAC_STATUS,
2616                                (MAC_STATUS_SYNC_CHANGED |
2617                                 MAC_STATUS_CFG_CHANGED));
2618                         udelay(40);
2619                         if ((tr32(MAC_STATUS) &
2620                              (MAC_STATUS_SYNC_CHANGED |
2621                               MAC_STATUS_CFG_CHANGED)) == 0)
2622                                 break;
2623                 }
2624
2625                 mac_status = tr32(MAC_STATUS);
2626                 if (current_link_up == 0 &&
2627                     (mac_status & MAC_STATUS_PCS_SYNCED) &&
2628                     !(mac_status & MAC_STATUS_RCVD_CFG))
2629                         current_link_up = 1;
2630         } else {
2631                 /* Forcing 1000FD link up. */
2632                 current_link_up = 1;
2633                 tp->tg3_flags |= TG3_FLAG_GOT_SERDES_FLOWCTL;
2634
2635                 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
2636                 udelay(40);
2637         }
2638
2639 out:
2640         return current_link_up;
2641 }
2642
2643 static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
2644 {
2645         u32 orig_pause_cfg;
2646         u16 orig_active_speed;
2647         u8 orig_active_duplex;
2648         u32 mac_status;
2649         int current_link_up;
2650         int i;
2651
2652         orig_pause_cfg =
2653                 (tp->tg3_flags & (TG3_FLAG_RX_PAUSE |
2654                                   TG3_FLAG_TX_PAUSE));
2655         orig_active_speed = tp->link_config.active_speed;
2656         orig_active_duplex = tp->link_config.active_duplex;
2657
2658         if (!(tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG) &&
2659             netif_carrier_ok(tp->dev) &&
2660             (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE)) {
2661                 mac_status = tr32(MAC_STATUS);
2662                 mac_status &= (MAC_STATUS_PCS_SYNCED |
2663                                MAC_STATUS_SIGNAL_DET |
2664                                MAC_STATUS_CFG_CHANGED |
2665                                MAC_STATUS_RCVD_CFG);
2666                 if (mac_status == (MAC_STATUS_PCS_SYNCED |
2667                                    MAC_STATUS_SIGNAL_DET)) {
2668                         tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
2669                                             MAC_STATUS_CFG_CHANGED));
2670                         return 0;
2671                 }
2672         }
2673
2674         tw32_f(MAC_TX_AUTO_NEG, 0);
2675
2676         tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
2677         tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
2678         tw32_f(MAC_MODE, tp->mac_mode);
2679         udelay(40);
2680
2681         if (tp->phy_id == PHY_ID_BCM8002)
2682                 tg3_init_bcm8002(tp);
2683
2684         /* Enable link change event even when serdes polling.  */
2685         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2686         udelay(40);
2687
2688         current_link_up = 0;
2689         mac_status = tr32(MAC_STATUS);
2690
2691         if (tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG)
2692                 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
2693         else
2694                 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
2695
2696         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
2697         tw32_f(MAC_MODE, tp->mac_mode);
2698         udelay(40);
2699
2700         tp->hw_status->status =
2701                 (SD_STATUS_UPDATED |
2702                  (tp->hw_status->status & ~SD_STATUS_LINK_CHG));
2703
2704         for (i = 0; i < 100; i++) {
2705                 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
2706                                     MAC_STATUS_CFG_CHANGED));
2707                 udelay(5);
2708                 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
2709                                          MAC_STATUS_CFG_CHANGED |
2710                                          MAC_STATUS_LNKSTATE_CHANGED)) == 0)
2711                         break;
2712         }
2713
2714         mac_status = tr32(MAC_STATUS);
2715         if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
2716                 current_link_up = 0;
2717                 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
2718                     tp->serdes_counter == 0) {
2719                         tw32_f(MAC_MODE, (tp->mac_mode |
2720                                           MAC_MODE_SEND_CONFIGS));
2721                         udelay(1);
2722                         tw32_f(MAC_MODE, tp->mac_mode);
2723                 }
2724         }
2725
2726         if (current_link_up == 1) {
2727                 tp->link_config.active_speed = SPEED_1000;
2728                 tp->link_config.active_duplex = DUPLEX_FULL;
2729                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
2730                                     LED_CTRL_LNKLED_OVERRIDE |
2731                                     LED_CTRL_1000MBPS_ON));
2732         } else {
2733                 tp->link_config.active_speed = SPEED_INVALID;
2734                 tp->link_config.active_duplex = DUPLEX_INVALID;
2735                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
2736                                     LED_CTRL_LNKLED_OVERRIDE |
2737                                     LED_CTRL_TRAFFIC_OVERRIDE));
2738         }
2739
2740         if (current_link_up != netif_carrier_ok(tp->dev)) {
2741                 if (current_link_up)
2742                         netif_carrier_on(tp->dev);
2743                 else
2744                         netif_carrier_off(tp->dev);
2745                 tg3_link_report(tp);
2746         } else {
2747                 u32 now_pause_cfg =
2748                         tp->tg3_flags & (TG3_FLAG_RX_PAUSE |
2749                                          TG3_FLAG_TX_PAUSE);
2750                 if (orig_pause_cfg != now_pause_cfg ||
2751                     orig_active_speed != tp->link_config.active_speed ||
2752                     orig_active_duplex != tp->link_config.active_duplex)
2753                         tg3_link_report(tp);
2754         }
2755
2756         return 0;
2757 }
2758
2759 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
2760 {
2761         int current_link_up, err = 0;
2762         u32 bmsr, bmcr;
2763         u16 current_speed;
2764         u8 current_duplex;
2765
2766         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
2767         tw32_f(MAC_MODE, tp->mac_mode);
2768         udelay(40);
2769
2770         tw32(MAC_EVENT, 0);
2771
2772         tw32_f(MAC_STATUS,
2773              (MAC_STATUS_SYNC_CHANGED |
2774               MAC_STATUS_CFG_CHANGED |
2775               MAC_STATUS_MI_COMPLETION |
2776               MAC_STATUS_LNKSTATE_CHANGED));
2777         udelay(40);
2778
2779         if (force_reset)
2780                 tg3_phy_reset(tp);
2781
2782         current_link_up = 0;
2783         current_speed = SPEED_INVALID;
2784         current_duplex = DUPLEX_INVALID;
2785
2786         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
2787         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
2788         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
2789                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
2790                         bmsr |= BMSR_LSTATUS;
2791                 else
2792                         bmsr &= ~BMSR_LSTATUS;
2793         }
2794
2795         err |= tg3_readphy(tp, MII_BMCR, &bmcr);
2796
2797         if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
2798             (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT)) {
2799                 /* do nothing, just check for link up at the end */
2800         } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2801                 u32 adv, new_adv;
2802
2803                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
2804                 new_adv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
2805                                   ADVERTISE_1000XPAUSE |
2806                                   ADVERTISE_1000XPSE_ASYM |
2807                                   ADVERTISE_SLCT);
2808
2809                 /* Always advertise symmetric PAUSE just like copper */
2810                 new_adv |= ADVERTISE_1000XPAUSE;
2811
2812                 if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
2813                         new_adv |= ADVERTISE_1000XHALF;
2814                 if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
2815                         new_adv |= ADVERTISE_1000XFULL;
2816
2817                 if ((new_adv != adv) || !(bmcr & BMCR_ANENABLE)) {
2818                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
2819                         bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
2820                         tg3_writephy(tp, MII_BMCR, bmcr);
2821
2822                         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2823                         tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
2824                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2825
2826                         return err;
2827                 }
2828         } else {
2829                 u32 new_bmcr;
2830
2831                 bmcr &= ~BMCR_SPEED1000;
2832                 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
2833
2834                 if (tp->link_config.duplex == DUPLEX_FULL)
2835                         new_bmcr |= BMCR_FULLDPLX;
2836
2837                 if (new_bmcr != bmcr) {
2838                         /* BMCR_SPEED1000 is a reserved bit that needs
2839                          * to be set on write.
2840                          */
2841                         new_bmcr |= BMCR_SPEED1000;
2842
2843                         /* Force a linkdown */
2844                         if (netif_carrier_ok(tp->dev)) {
2845                                 u32 adv;
2846
2847                                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
2848                                 adv &= ~(ADVERTISE_1000XFULL |
2849                                          ADVERTISE_1000XHALF |
2850                                          ADVERTISE_SLCT);
2851                                 tg3_writephy(tp, MII_ADVERTISE, adv);
2852                                 tg3_writephy(tp, MII_BMCR, bmcr |
2853                                                            BMCR_ANRESTART |
2854                                                            BMCR_ANENABLE);
2855                                 udelay(10);
2856                                 netif_carrier_off(tp->dev);
2857                         }
2858                         tg3_writephy(tp, MII_BMCR, new_bmcr);
2859                         bmcr = new_bmcr;
2860                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
2861                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
2862                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
2863                             ASIC_REV_5714) {
2864                                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
2865                                         bmsr |= BMSR_LSTATUS;
2866                                 else
2867                                         bmsr &= ~BMSR_LSTATUS;
2868                         }
2869                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2870                 }
2871         }
2872
2873         if (bmsr & BMSR_LSTATUS) {
2874                 current_speed = SPEED_1000;
2875                 current_link_up = 1;
2876                 if (bmcr & BMCR_FULLDPLX)
2877                         current_duplex = DUPLEX_FULL;
2878                 else
2879                         current_duplex = DUPLEX_HALF;
2880
2881                 if (bmcr & BMCR_ANENABLE) {
2882                         u32 local_adv, remote_adv, common;
2883
2884                         err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
2885                         err |= tg3_readphy(tp, MII_LPA, &remote_adv);
2886                         common = local_adv & remote_adv;
2887                         if (common & (ADVERTISE_1000XHALF |
2888                                       ADVERTISE_1000XFULL)) {
2889                                 if (common & ADVERTISE_1000XFULL)
2890                                         current_duplex = DUPLEX_FULL;
2891                                 else
2892                                         current_duplex = DUPLEX_HALF;
2893
2894                                 tg3_setup_flow_control(tp, local_adv,
2895                                                        remote_adv);
2896                         }
2897                         else
2898                                 current_link_up = 0;
2899                 }
2900         }
2901
2902         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
2903         if (tp->link_config.active_duplex == DUPLEX_HALF)
2904                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
2905
2906         tw32_f(MAC_MODE, tp->mac_mode);
2907         udelay(40);
2908
2909         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2910
2911         tp->link_config.active_speed = current_speed;
2912         tp->link_config.active_duplex = current_duplex;
2913
2914         if (current_link_up != netif_carrier_ok(tp->dev)) {
2915                 if (current_link_up)
2916                         netif_carrier_on(tp->dev);
2917                 else {
2918                         netif_carrier_off(tp->dev);
2919                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2920                 }
2921                 tg3_link_report(tp);
2922         }
2923         return err;
2924 }
2925
2926 static void tg3_serdes_parallel_detect(struct tg3 *tp)
2927 {
2928         if (tp->serdes_counter) {
2929                 /* Give autoneg time to complete. */
2930                 tp->serdes_counter--;
2931                 return;
2932         }
2933         if (!netif_carrier_ok(tp->dev) &&
2934             (tp->link_config.autoneg == AUTONEG_ENABLE)) {
2935                 u32 bmcr;
2936
2937                 tg3_readphy(tp, MII_BMCR, &bmcr);
2938                 if (bmcr & BMCR_ANENABLE) {
2939                         u32 phy1, phy2;
2940
2941                         /* Select shadow register 0x1f */
2942                         tg3_writephy(tp, 0x1c, 0x7c00);
2943                         tg3_readphy(tp, 0x1c, &phy1);
2944
2945                         /* Select expansion interrupt status register */
2946                         tg3_writephy(tp, 0x17, 0x0f01);
2947                         tg3_readphy(tp, 0x15, &phy2);
2948                         tg3_readphy(tp, 0x15, &phy2);
2949
2950                         if ((phy1 & 0x10) && !(phy2 & 0x20)) {
2951                                 /* We have signal detect and not receiving
2952                                  * config code words, link is up by parallel
2953                                  * detection.
2954                                  */
2955
2956                                 bmcr &= ~BMCR_ANENABLE;
2957                                 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
2958                                 tg3_writephy(tp, MII_BMCR, bmcr);
2959                                 tp->tg3_flags2 |= TG3_FLG2_PARALLEL_DETECT;
2960                         }
2961                 }
2962         }
2963         else if (netif_carrier_ok(tp->dev) &&
2964                  (tp->link_config.autoneg == AUTONEG_ENABLE) &&
2965                  (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT)) {
2966                 u32 phy2;
2967
2968                 /* Select expansion interrupt status register */
2969                 tg3_writephy(tp, 0x17, 0x0f01);
2970                 tg3_readphy(tp, 0x15, &phy2);
2971                 if (phy2 & 0x20) {
2972                         u32 bmcr;
2973
2974                         /* Config code words received, turn on autoneg. */
2975                         tg3_readphy(tp, MII_BMCR, &bmcr);
2976                         tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
2977
2978                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2979
2980                 }
2981         }
2982 }
2983
2984 static int tg3_setup_phy(struct tg3 *tp, int force_reset)
2985 {
2986         int err;
2987
2988         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
2989                 err = tg3_setup_fiber_phy(tp, force_reset);
2990         } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
2991                 err = tg3_setup_fiber_mii_phy(tp, force_reset);
2992         } else {
2993                 err = tg3_setup_copper_phy(tp, force_reset);
2994         }
2995
2996         if (tp->link_config.active_speed == SPEED_1000 &&
2997             tp->link_config.active_duplex == DUPLEX_HALF)
2998                 tw32(MAC_TX_LENGTHS,
2999                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
3000                       (6 << TX_LENGTHS_IPG_SHIFT) |
3001                       (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
3002         else
3003                 tw32(MAC_TX_LENGTHS,
3004                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
3005                       (6 << TX_LENGTHS_IPG_SHIFT) |
3006                       (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
3007
3008         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
3009                 if (netif_carrier_ok(tp->dev)) {
3010                         tw32(HOSTCC_STAT_COAL_TICKS,
3011                              tp->coal.stats_block_coalesce_usecs);
3012                 } else {
3013                         tw32(HOSTCC_STAT_COAL_TICKS, 0);
3014                 }
3015         }
3016
3017         return err;
3018 }
3019
3020 /* This is called whenever we suspect that the system chipset is re-
3021  * ordering the sequence of MMIO to the tx send mailbox. The symptom
3022  * is bogus tx completions. We try to recover by setting the
3023  * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
3024  * in the workqueue.
3025  */
3026 static void tg3_tx_recover(struct tg3 *tp)
3027 {
3028         BUG_ON((tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) ||
3029                tp->write32_tx_mbox == tg3_write_indirect_mbox);
3030
3031         printk(KERN_WARNING PFX "%s: The system may be re-ordering memory-"
3032                "mapped I/O cycles to the network device, attempting to "
3033                "recover. Please report the problem to the driver maintainer "
3034                "and include system chipset information.\n", tp->dev->name);
3035
3036         spin_lock(&tp->lock);
3037         tp->tg3_flags |= TG3_FLAG_TX_RECOVERY_PENDING;
3038         spin_unlock(&tp->lock);
3039 }
3040
3041 static inline u32 tg3_tx_avail(struct tg3 *tp)
3042 {
3043         smp_mb();
3044         return (tp->tx_pending -
3045                 ((tp->tx_prod - tp->tx_cons) & (TG3_TX_RING_SIZE - 1)));
3046 }
3047
3048 /* Tigon3 never reports partial packet sends.  So we do not
3049  * need special logic to handle SKBs that have not had all
3050  * of their frags sent yet, like SunGEM does.
3051  */
3052 static void tg3_tx(struct tg3 *tp)
3053 {
3054         u32 hw_idx = tp->hw_status->idx[0].tx_consumer;
3055         u32 sw_idx = tp->tx_cons;
3056
3057         while (sw_idx != hw_idx) {
3058                 struct tx_ring_info *ri = &tp->tx_buffers[sw_idx];
3059                 struct sk_buff *skb = ri->skb;
3060                 int i, tx_bug = 0;
3061
3062                 if (unlikely(skb == NULL)) {
3063                         tg3_tx_recover(tp);
3064                         return;
3065                 }
3066
3067                 pci_unmap_single(tp->pdev,
3068                                  pci_unmap_addr(ri, mapping),
3069                                  skb_headlen(skb),
3070                                  PCI_DMA_TODEVICE);
3071
3072                 ri->skb = NULL;
3073
3074                 sw_idx = NEXT_TX(sw_idx);
3075
3076                 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
3077                         ri = &tp->tx_buffers[sw_idx];
3078                         if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
3079                                 tx_bug = 1;
3080
3081                         pci_unmap_page(tp->pdev,
3082                                        pci_unmap_addr(ri, mapping),
3083                                        skb_shinfo(skb)->frags[i].size,
3084                                        PCI_DMA_TODEVICE);
3085
3086                         sw_idx = NEXT_TX(sw_idx);
3087                 }
3088
3089                 dev_kfree_skb(skb);
3090
3091                 if (unlikely(tx_bug)) {
3092                         tg3_tx_recover(tp);
3093                         return;
3094                 }
3095         }
3096
3097         tp->tx_cons = sw_idx;
3098
3099         /* Need to make the tx_cons update visible to tg3_start_xmit()
3100          * before checking for netif_queue_stopped().  Without the
3101          * memory barrier, there is a small possibility that tg3_start_xmit()
3102          * will miss it and cause the queue to be stopped forever.
3103          */
3104         smp_mb();
3105
3106         if (unlikely(netif_queue_stopped(tp->dev) &&
3107                      (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp)))) {
3108                 netif_tx_lock(tp->dev);
3109                 if (netif_queue_stopped(tp->dev) &&
3110                     (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp)))
3111                         netif_wake_queue(tp->dev);
3112                 netif_tx_unlock(tp->dev);
3113         }
3114 }
3115
3116 /* Returns size of skb allocated or < 0 on error.
3117  *
3118  * We only need to fill in the address because the other members
3119  * of the RX descriptor are invariant, see tg3_init_rings.
3120  *
3121  * Note the purposeful assymetry of cpu vs. chip accesses.  For
3122  * posting buffers we only dirty the first cache line of the RX
3123  * descriptor (containing the address).  Whereas for the RX status
3124  * buffers the cpu only reads the last cacheline of the RX descriptor
3125  * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
3126  */
3127 static int tg3_alloc_rx_skb(struct tg3 *tp, u32 opaque_key,
3128                             int src_idx, u32 dest_idx_unmasked)
3129 {
3130         struct tg3_rx_buffer_desc *desc;
3131         struct ring_info *map, *src_map;
3132         struct sk_buff *skb;
3133         dma_addr_t mapping;
3134         int skb_size, dest_idx;
3135
3136         src_map = NULL;
3137         switch (opaque_key) {
3138         case RXD_OPAQUE_RING_STD:
3139                 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
3140                 desc = &tp->rx_std[dest_idx];
3141                 map = &tp->rx_std_buffers[dest_idx];
3142                 if (src_idx >= 0)
3143                         src_map = &tp->rx_std_buffers[src_idx];
3144                 skb_size = tp->rx_pkt_buf_sz;
3145                 break;
3146
3147         case RXD_OPAQUE_RING_JUMBO:
3148                 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
3149                 desc = &tp->rx_jumbo[dest_idx];
3150                 map = &tp->rx_jumbo_buffers[dest_idx];
3151                 if (src_idx >= 0)
3152                         src_map = &tp->rx_jumbo_buffers[src_idx];
3153                 skb_size = RX_JUMBO_PKT_BUF_SZ;
3154                 break;
3155
3156         default:
3157                 return -EINVAL;
3158         };
3159
3160         /* Do not overwrite any of the map or rp information
3161          * until we are sure we can commit to a new buffer.
3162          *
3163          * Callers depend upon this behavior and assume that
3164          * we leave everything unchanged if we fail.
3165          */
3166         skb = netdev_alloc_skb(tp->dev, skb_size);
3167         if (skb == NULL)
3168                 return -ENOMEM;
3169
3170         skb_reserve(skb, tp->rx_offset);
3171
3172         mapping = pci_map_single(tp->pdev, skb->data,
3173                                  skb_size - tp->rx_offset,
3174                                  PCI_DMA_FROMDEVICE);
3175
3176         map->skb = skb;
3177         pci_unmap_addr_set(map, mapping, mapping);
3178
3179         if (src_map != NULL)
3180                 src_map->skb = NULL;
3181
3182         desc->addr_hi = ((u64)mapping >> 32);
3183         desc->addr_lo = ((u64)mapping & 0xffffffff);
3184
3185         return skb_size;
3186 }
3187
3188 /* We only need to move over in the address because the other
3189  * members of the RX descriptor are invariant.  See notes above
3190  * tg3_alloc_rx_skb for full details.
3191  */
3192 static void tg3_recycle_rx(struct tg3 *tp, u32 opaque_key,
3193                            int src_idx, u32 dest_idx_unmasked)
3194 {
3195         struct tg3_rx_buffer_desc *src_desc, *dest_desc;
3196         struct ring_info *src_map, *dest_map;
3197         int dest_idx;
3198
3199         switch (opaque_key) {
3200         case RXD_OPAQUE_RING_STD:
3201                 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
3202                 dest_desc = &tp->rx_std[dest_idx];
3203                 dest_map = &tp->rx_std_buffers[dest_idx];
3204                 src_desc = &tp->rx_std[src_idx];
3205                 src_map = &tp->rx_std_buffers[src_idx];
3206                 break;
3207
3208         case RXD_OPAQUE_RING_JUMBO:
3209                 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
3210                 dest_desc = &tp->rx_jumbo[dest_idx];
3211                 dest_map = &tp->rx_jumbo_buffers[dest_idx];
3212                 src_desc = &tp->rx_jumbo[src_idx];
3213                 src_map = &tp->rx_jumbo_buffers[src_idx];
3214                 break;
3215
3216         default:
3217                 return;
3218         };
3219
3220         dest_map->skb = src_map->skb;
3221         pci_unmap_addr_set(dest_map, mapping,
3222                            pci_unmap_addr(src_map, mapping));
3223         dest_desc->addr_hi = src_desc->addr_hi;
3224         dest_desc->addr_lo = src_desc->addr_lo;
3225
3226         src_map->skb = NULL;
3227 }
3228
3229 #if TG3_VLAN_TAG_USED
3230 static int tg3_vlan_rx(struct tg3 *tp, struct sk_buff *skb, u16 vlan_tag)
3231 {
3232         return vlan_hwaccel_receive_skb(skb, tp->vlgrp, vlan_tag);
3233 }
3234 #endif
3235
3236 /* The RX ring scheme is composed of multiple rings which post fresh
3237  * buffers to the chip, and one special ring the chip uses to report
3238  * status back to the host.
3239  *
3240  * The special ring reports the status of received packets to the
3241  * host.  The chip does not write into the original descriptor the
3242  * RX buffer was obtained from.  The chip simply takes the original
3243  * descriptor as provided by the host, updates the status and length
3244  * field, then writes this into the next status ring entry.
3245  *
3246  * Each ring the host uses to post buffers to the chip is described
3247  * by a TG3_BDINFO entry in the chips SRAM area.  When a packet arrives,
3248  * it is first placed into the on-chip ram.  When the packet's length
3249  * is known, it walks down the TG3_BDINFO entries to select the ring.
3250  * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
3251  * which is within the range of the new packet's length is chosen.
3252  *
3253  * The "separate ring for rx status" scheme may sound queer, but it makes
3254  * sense from a cache coherency perspective.  If only the host writes
3255  * to the buffer post rings, and only the chip writes to the rx status
3256  * rings, then cache lines never move beyond shared-modified state.
3257  * If both the host and chip were to write into the same ring, cache line
3258  * eviction could occur since both entities want it in an exclusive state.
3259  */
3260 static int tg3_rx(struct tg3 *tp, int budget)
3261 {
3262         u32 work_mask, rx_std_posted = 0;
3263         u32 sw_idx = tp->rx_rcb_ptr;
3264         u16 hw_idx;
3265         int received;
3266
3267         hw_idx = tp->hw_status->idx[0].rx_producer;
3268         /*
3269          * We need to order the read of hw_idx and the read of
3270          * the opaque cookie.
3271          */
3272         rmb();
3273         work_mask = 0;
3274         received = 0;
3275         while (sw_idx != hw_idx && budget > 0) {
3276                 struct tg3_rx_buffer_desc *desc = &tp->rx_rcb[sw_idx];
3277                 unsigned int len;
3278                 struct sk_buff *skb;
3279                 dma_addr_t dma_addr;
3280                 u32 opaque_key, desc_idx, *post_ptr;
3281
3282                 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
3283                 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
3284                 if (opaque_key == RXD_OPAQUE_RING_STD) {
3285                         dma_addr = pci_unmap_addr(&tp->rx_std_buffers[desc_idx],
3286                                                   mapping);
3287                         skb = tp->rx_std_buffers[desc_idx].skb;
3288                         post_ptr = &tp->rx_std_ptr;
3289                         rx_std_posted++;
3290                 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
3291                         dma_addr = pci_unmap_addr(&tp->rx_jumbo_buffers[desc_idx],
3292                                                   mapping);
3293                         skb = tp->rx_jumbo_buffers[desc_idx].skb;
3294                         post_ptr = &tp->rx_jumbo_ptr;
3295                 }
3296                 else {
3297                         goto next_pkt_nopost;
3298                 }
3299
3300                 work_mask |= opaque_key;
3301
3302                 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
3303                     (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
3304                 drop_it:
3305                         tg3_recycle_rx(tp, opaque_key,
3306                                        desc_idx, *post_ptr);
3307                 drop_it_no_recycle:
3308                         /* Other statistics kept track of by card. */
3309                         tp->net_stats.rx_dropped++;
3310                         goto next_pkt;
3311                 }
3312
3313                 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4; /* omit crc */
3314
3315                 if (len > RX_COPY_THRESHOLD
3316                         && tp->rx_offset == 2
3317                         /* rx_offset != 2 iff this is a 5701 card running
3318                          * in PCI-X mode [see tg3_get_invariants()] */
3319                 ) {
3320                         int skb_size;
3321
3322                         skb_size = tg3_alloc_rx_skb(tp, opaque_key,
3323                                                     desc_idx, *post_ptr);
3324                         if (skb_size < 0)
3325                                 goto drop_it;
3326
3327                         pci_unmap_single(tp->pdev, dma_addr,
3328                                          skb_size - tp->rx_offset,
3329                                          PCI_DMA_FROMDEVICE);
3330
3331                         skb_put(skb, len);
3332                 } else {
3333                         struct sk_buff *copy_skb;
3334
3335                         tg3_recycle_rx(tp, opaque_key,
3336                                        desc_idx, *post_ptr);
3337
3338                         copy_skb = netdev_alloc_skb(tp->dev, len + 2);
3339                         if (copy_skb == NULL)
3340                                 goto drop_it_no_recycle;
3341
3342                         skb_reserve(copy_skb, 2);
3343                         skb_put(copy_skb, len);
3344                         pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
3345                         memcpy(copy_skb->data, skb->data, len);
3346                         pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
3347
3348                         /* We'll reuse the original ring buffer. */
3349                         skb = copy_skb;
3350                 }
3351
3352                 if ((tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) &&
3353                     (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
3354                     (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
3355                       >> RXD_TCPCSUM_SHIFT) == 0xffff))
3356                         skb->ip_summed = CHECKSUM_UNNECESSARY;
3357                 else
3358                         skb->ip_summed = CHECKSUM_NONE;
3359
3360                 skb->protocol = eth_type_trans(skb, tp->dev);
3361 #if TG3_VLAN_TAG_USED
3362                 if (tp->vlgrp != NULL &&
3363                     desc->type_flags & RXD_FLAG_VLAN) {
3364                         tg3_vlan_rx(tp, skb,
3365                                     desc->err_vlan & RXD_VLAN_MASK);
3366                 } else
3367 #endif
3368                         netif_receive_skb(skb);
3369
3370                 tp->dev->last_rx = jiffies;
3371                 received++;
3372                 budget--;
3373
3374 next_pkt:
3375                 (*post_ptr)++;
3376
3377                 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
3378                         u32 idx = *post_ptr % TG3_RX_RING_SIZE;
3379
3380                         tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX +
3381                                      TG3_64BIT_REG_LOW, idx);
3382                         work_mask &= ~RXD_OPAQUE_RING_STD;
3383                         rx_std_posted = 0;
3384                 }
3385 next_pkt_nopost:
3386                 sw_idx++;
3387                 sw_idx %= TG3_RX_RCB_RING_SIZE(tp);
3388
3389                 /* Refresh hw_idx to see if there is new work */
3390                 if (sw_idx == hw_idx) {
3391                         hw_idx = tp->hw_status->idx[0].rx_producer;
3392                         rmb();
3393                 }
3394         }
3395
3396         /* ACK the status ring. */
3397         tp->rx_rcb_ptr = sw_idx;
3398         tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW, sw_idx);
3399
3400         /* Refill RX ring(s). */
3401         if (work_mask & RXD_OPAQUE_RING_STD) {
3402                 sw_idx = tp->rx_std_ptr % TG3_RX_RING_SIZE;
3403                 tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
3404                              sw_idx);
3405         }
3406         if (work_mask & RXD_OPAQUE_RING_JUMBO) {
3407                 sw_idx = tp->rx_jumbo_ptr % TG3_RX_JUMBO_RING_SIZE;
3408                 tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
3409                              sw_idx);
3410         }
3411         mmiowb();
3412
3413         return received;
3414 }
3415
3416 static int tg3_poll(struct net_device *netdev, int *budget)
3417 {
3418         struct tg3 *tp = netdev_priv(netdev);
3419         struct tg3_hw_status *sblk = tp->hw_status;
3420         int done;
3421
3422         /* handle link change and other phy events */
3423         if (!(tp->tg3_flags &
3424               (TG3_FLAG_USE_LINKCHG_REG |
3425                TG3_FLAG_POLL_SERDES))) {
3426                 if (sblk->status & SD_STATUS_LINK_CHG) {
3427                         sblk->status = SD_STATUS_UPDATED |
3428                                 (sblk->status & ~SD_STATUS_LINK_CHG);
3429                         spin_lock(&tp->lock);
3430                         tg3_setup_phy(tp, 0);
3431                         spin_unlock(&tp->lock);
3432                 }
3433         }
3434
3435         /* run TX completion thread */
3436         if (sblk->idx[0].tx_consumer != tp->tx_cons) {
3437                 tg3_tx(tp);
3438                 if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING)) {
3439                         netif_rx_complete(netdev);
3440                         schedule_work(&tp->reset_task);
3441                         return 0;
3442                 }
3443         }
3444
3445         /* run RX thread, within the bounds set by NAPI.
3446          * All RX "locking" is done by ensuring outside
3447          * code synchronizes with dev->poll()
3448          */
3449         if (sblk->idx[0].rx_producer != tp->rx_rcb_ptr) {
3450                 int orig_budget = *budget;
3451                 int work_done;
3452
3453                 if (orig_budget > netdev->quota)
3454                         orig_budget = netdev->quota;
3455
3456                 work_done = tg3_rx(tp, orig_budget);
3457
3458                 *budget -= work_done;
3459                 netdev->quota -= work_done;
3460         }
3461
3462         if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) {
3463                 tp->last_tag = sblk->status_tag;
3464                 rmb();
3465         } else
3466                 sblk->status &= ~SD_STATUS_UPDATED;
3467
3468         /* if no more work, tell net stack and NIC we're done */
3469         done = !tg3_has_work(tp);
3470         if (done) {
3471                 netif_rx_complete(netdev);
3472                 tg3_restart_ints(tp);
3473         }
3474
3475         return (done ? 0 : 1);
3476 }
3477
3478 static void tg3_irq_quiesce(struct tg3 *tp)
3479 {
3480         BUG_ON(tp->irq_sync);
3481
3482         tp->irq_sync = 1;
3483         smp_mb();
3484
3485         synchronize_irq(tp->pdev->irq);
3486 }
3487
3488 static inline int tg3_irq_sync(struct tg3 *tp)
3489 {
3490         return tp->irq_sync;
3491 }
3492
3493 /* Fully shutdown all tg3 driver activity elsewhere in the system.
3494  * If irq_sync is non-zero, then the IRQ handler must be synchronized
3495  * with as well.  Most of the time, this is not necessary except when
3496  * shutting down the device.
3497  */
3498 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
3499 {
3500         if (irq_sync)
3501                 tg3_irq_quiesce(tp);
3502         spin_lock_bh(&tp->lock);
3503 }
3504
3505 static inline void tg3_full_unlock(struct tg3 *tp)
3506 {
3507         spin_unlock_bh(&tp->lock);
3508 }
3509
3510 /* One-shot MSI handler - Chip automatically disables interrupt
3511  * after sending MSI so driver doesn't have to do it.
3512  */
3513 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
3514 {
3515         struct net_device *dev = dev_id;
3516         struct tg3 *tp = netdev_priv(dev);
3517
3518         prefetch(tp->hw_status);
3519         prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
3520
3521         if (likely(!tg3_irq_sync(tp)))
3522                 netif_rx_schedule(dev);         /* schedule NAPI poll */
3523
3524         return IRQ_HANDLED;
3525 }
3526
3527 /* MSI ISR - No need to check for interrupt sharing and no need to
3528  * flush status block and interrupt mailbox. PCI ordering rules
3529  * guarantee that MSI will arrive after the status block.
3530  */
3531 static irqreturn_t tg3_msi(int irq, void *dev_id)
3532 {
3533         struct net_device *dev = dev_id;
3534         struct tg3 *tp = netdev_priv(dev);
3535
3536         prefetch(tp->hw_status);
3537         prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
3538         /*
3539          * Writing any value to intr-mbox-0 clears PCI INTA# and
3540          * chip-internal interrupt pending events.
3541          * Writing non-zero to intr-mbox-0 additional tells the
3542          * NIC to stop sending us irqs, engaging "in-intr-handler"
3543          * event coalescing.
3544          */
3545         tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
3546         if (likely(!tg3_irq_sync(tp)))
3547                 netif_rx_schedule(dev);         /* schedule NAPI poll */
3548
3549         return IRQ_RETVAL(1);
3550 }
3551
3552 static irqreturn_t tg3_interrupt(int irq, void *dev_id)
3553 {
3554         struct net_device *dev = dev_id;
3555         struct tg3 *tp = netdev_priv(dev);
3556         struct tg3_hw_status *sblk = tp->hw_status;
3557         unsigned int handled = 1;
3558
3559         /* In INTx mode, it is possible for the interrupt to arrive at
3560          * the CPU before the status block posted prior to the interrupt.
3561          * Reading the PCI State register will confirm whether the
3562          * interrupt is ours and will flush the status block.
3563          */
3564         if ((sblk->status & SD_STATUS_UPDATED) ||
3565             !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
3566                 /*
3567                  * Writing any value to intr-mbox-0 clears PCI INTA# and
3568                  * chip-internal interrupt pending events.
3569                  * Writing non-zero to intr-mbox-0 additional tells the
3570                  * NIC to stop sending us irqs, engaging "in-intr-handler"
3571                  * event coalescing.
3572                  */
3573                 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
3574                              0x00000001);
3575                 if (tg3_irq_sync(tp))
3576                         goto out;
3577                 sblk->status &= ~SD_STATUS_UPDATED;
3578                 if (likely(tg3_has_work(tp))) {
3579                         prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
3580                         netif_rx_schedule(dev);         /* schedule NAPI poll */
3581                 } else {
3582                         /* No work, shared interrupt perhaps?  re-enable
3583                          * interrupts, and flush that PCI write
3584                          */
3585                         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
3586                                 0x00000000);
3587                 }
3588         } else {        /* shared interrupt */
3589                 handled = 0;
3590         }
3591 out:
3592         return IRQ_RETVAL(handled);
3593 }
3594
3595 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
3596 {
3597         struct net_device *dev = dev_id;
3598         struct tg3 *tp = netdev_priv(dev);
3599         struct tg3_hw_status *sblk = tp->hw_status;
3600         unsigned int handled = 1;
3601
3602         /* In INTx mode, it is possible for the interrupt to arrive at
3603          * the CPU before the status block posted prior to the interrupt.
3604          * Reading the PCI State register will confirm whether the
3605          * interrupt is ours and will flush the status block.
3606          */
3607         if ((sblk->status_tag != tp->last_tag) ||
3608             !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
3609                 /*
3610                  * writing any value to intr-mbox-0 clears PCI INTA# and
3611                  * chip-internal interrupt pending events.
3612                  * writing non-zero to intr-mbox-0 additional tells the
3613                  * NIC to stop sending us irqs, engaging "in-intr-handler"
3614                  * event coalescing.
3615                  */
3616                 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
3617                              0x00000001);
3618                 if (tg3_irq_sync(tp))
3619                         goto out;
3620                 if (netif_rx_schedule_prep(dev)) {
3621                         prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
3622                         /* Update last_tag to mark that this status has been
3623                          * seen. Because interrupt may be shared, we may be
3624                          * racing with tg3_poll(), so only update last_tag
3625                          * if tg3_poll() is not scheduled.
3626                          */
3627                         tp->last_tag = sblk->status_tag;
3628                         __netif_rx_schedule(dev);
3629                 }
3630         } else {        /* shared interrupt */
3631                 handled = 0;
3632         }
3633 out:
3634         return IRQ_RETVAL(handled);
3635 }
3636
3637 /* ISR for interrupt test */
3638 static irqreturn_t tg3_test_isr(int irq, void *dev_id)
3639 {
3640         struct net_device *dev = dev_id;
3641         struct tg3 *tp = netdev_priv(dev);
3642         struct tg3_hw_status *sblk = tp->hw_status;
3643
3644         if ((sblk->status & SD_STATUS_UPDATED) ||
3645             !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
3646                 tg3_disable_ints(tp);
3647                 return IRQ_RETVAL(1);
3648         }
3649         return IRQ_RETVAL(0);
3650 }
3651
3652 static int tg3_init_hw(struct tg3 *, int);
3653 static int tg3_halt(struct tg3 *, int, int);
3654
3655 /* Restart hardware after configuration changes, self-test, etc.
3656  * Invoked with tp->lock held.
3657  */
3658 static int tg3_restart_hw(struct tg3 *tp, int reset_phy)
3659 {
3660         int err;
3661
3662         err = tg3_init_hw(tp, reset_phy);
3663         if (err) {
3664                 printk(KERN_ERR PFX "%s: Failed to re-initialize device, "
3665                        "aborting.\n", tp->dev->name);
3666                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
3667                 tg3_full_unlock(tp);
3668                 del_timer_sync(&tp->timer);
3669                 tp->irq_sync = 0;
3670                 netif_poll_enable(tp->dev);
3671                 dev_close(tp->dev);
3672                 tg3_full_lock(tp, 0);
3673         }
3674         return err;
3675 }
3676
3677 #ifdef CONFIG_NET_POLL_CONTROLLER
3678 static void tg3_poll_controller(struct net_device *dev)
3679 {
3680         struct tg3 *tp = netdev_priv(dev);
3681
3682         tg3_interrupt(tp->pdev->irq, dev);
3683 }
3684 #endif
3685
3686 static void tg3_reset_task(struct work_struct *work)
3687 {
3688         struct tg3 *tp = container_of(work, struct tg3, reset_task);
3689         unsigned int restart_timer;
3690
3691         tg3_full_lock(tp, 0);
3692         tp->tg3_flags |= TG3_FLAG_IN_RESET_TASK;
3693
3694         if (!netif_running(tp->dev)) {
3695                 tp->tg3_flags &= ~TG3_FLAG_IN_RESET_TASK;
3696                 tg3_full_unlock(tp);
3697                 return;
3698         }
3699
3700         tg3_full_unlock(tp);
3701
3702         tg3_netif_stop(tp);
3703
3704         tg3_full_lock(tp, 1);
3705
3706         restart_timer = tp->tg3_flags2 & TG3_FLG2_RESTART_TIMER;
3707         tp->tg3_flags2 &= ~TG3_FLG2_RESTART_TIMER;
3708
3709         if (tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING) {
3710                 tp->write32_tx_mbox = tg3_write32_tx_mbox;
3711                 tp->write32_rx_mbox = tg3_write_flush_reg32;
3712                 tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;
3713                 tp->tg3_flags &= ~TG3_FLAG_TX_RECOVERY_PENDING;
3714         }
3715
3716         tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
3717         if (tg3_init_hw(tp, 1))
3718                 goto out;
3719
3720         tg3_netif_start(tp);
3721
3722         if (restart_timer)
3723                 mod_timer(&tp->timer, jiffies + 1);
3724
3725 out:
3726         tp->tg3_flags &= ~TG3_FLAG_IN_RESET_TASK;
3727
3728         tg3_full_unlock(tp);
3729 }
3730
3731 static void tg3_tx_timeout(struct net_device *dev)
3732 {
3733         struct tg3 *tp = netdev_priv(dev);
3734
3735         if (netif_msg_tx_err(tp))
3736                 printk(KERN_ERR PFX "%s: transmit timed out, resetting\n",
3737                        dev->name);
3738
3739         schedule_work(&tp->reset_task);
3740 }
3741
3742 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
3743 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
3744 {
3745         u32 base = (u32) mapping & 0xffffffff;
3746
3747         return ((base > 0xffffdcc0) &&
3748                 (base + len + 8 < base));
3749 }
3750
3751 /* Test for DMA addresses > 40-bit */
3752 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
3753                                           int len)
3754 {
3755 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
3756         if (tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG)
3757                 return (((u64) mapping + len) > DMA_40BIT_MASK);
3758         return 0;
3759 #else
3760         return 0;
3761 #endif
3762 }
3763
3764 static void tg3_set_txd(struct tg3 *, int, dma_addr_t, int, u32, u32);
3765
3766 /* Workaround 4GB and 40-bit hardware DMA bugs. */
3767 static int tigon3_dma_hwbug_workaround(struct tg3 *tp, struct sk_buff *skb,
3768                                        u32 last_plus_one, u32 *start,
3769                                        u32 base_flags, u32 mss)
3770 {
3771         struct sk_buff *new_skb = skb_copy(skb, GFP_ATOMIC);
3772         dma_addr_t new_addr = 0;
3773         u32 entry = *start;
3774         int i, ret = 0;
3775
3776         if (!new_skb) {
3777                 ret = -1;
3778         } else {
3779                 /* New SKB is guaranteed to be linear. */
3780                 entry = *start;
3781                 new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
3782                                           PCI_DMA_TODEVICE);
3783                 /* Make sure new skb does not cross any 4G boundaries.
3784                  * Drop the packet if it does.
3785                  */
3786                 if (tg3_4g_overflow_test(new_addr, new_skb->len)) {
3787                         ret = -1;
3788                         dev_kfree_skb(new_skb);
3789                         new_skb = NULL;
3790                 } else {
3791                         tg3_set_txd(tp, entry, new_addr, new_skb->len,
3792                                     base_flags, 1 | (mss << 1));
3793                         *start = NEXT_TX(entry);
3794                 }
3795         }
3796
3797         /* Now clean up the sw ring entries. */
3798         i = 0;
3799         while (entry != last_plus_one) {
3800                 int len;
3801
3802                 if (i == 0)
3803                         len = skb_headlen(skb);
3804                 else
3805                         len = skb_shinfo(skb)->frags[i-1].size;
3806                 pci_unmap_single(tp->pdev,
3807                                  pci_unmap_addr(&tp->tx_buffers[entry], mapping),
3808                                  len, PCI_DMA_TODEVICE);
3809                 if (i == 0) {
3810                         tp->tx_buffers[entry].skb = new_skb;
3811                         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, new_addr);
3812                 } else {
3813                         tp->tx_buffers[entry].skb = NULL;
3814                 }
3815                 entry = NEXT_TX(entry);
3816                 i++;
3817         }
3818
3819         dev_kfree_skb(skb);
3820
3821         return ret;
3822 }
3823
3824 static void tg3_set_txd(struct tg3 *tp, int entry,
3825                         dma_addr_t mapping, int len, u32 flags,
3826                         u32 mss_and_is_end)
3827 {
3828         struct tg3_tx_buffer_desc *txd = &tp->tx_ring[entry];
3829         int is_end = (mss_and_is_end & 0x1);
3830         u32 mss = (mss_and_is_end >> 1);
3831         u32 vlan_tag = 0;
3832
3833         if (is_end)
3834                 flags |= TXD_FLAG_END;
3835         if (flags & TXD_FLAG_VLAN) {
3836                 vlan_tag = flags >> 16;
3837                 flags &= 0xffff;
3838         }
3839         vlan_tag |= (mss << TXD_MSS_SHIFT);
3840
3841         txd->addr_hi = ((u64) mapping >> 32);
3842         txd->addr_lo = ((u64) mapping & 0xffffffff);
3843         txd->len_flags = (len << TXD_LEN_SHIFT) | flags;
3844         txd->vlan_tag = vlan_tag << TXD_VLAN_TAG_SHIFT;
3845 }
3846
3847 /* hard_start_xmit for devices that don't have any bugs and
3848  * support TG3_FLG2_HW_TSO_2 only.
3849  */
3850 static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
3851 {
3852         struct tg3 *tp = netdev_priv(dev);
3853         dma_addr_t mapping;
3854         u32 len, entry, base_flags, mss;
3855
3856         len = skb_headlen(skb);
3857
3858         /* We are running in BH disabled context with netif_tx_lock
3859          * and TX reclaim runs via tp->poll inside of a software
3860          * interrupt.  Furthermore, IRQ processing runs lockless so we have
3861          * no IRQ context deadlocks to worry about either.  Rejoice!
3862          */
3863         if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->nr_frags + 1))) {
3864                 if (!netif_queue_stopped(dev)) {
3865                         netif_stop_queue(dev);
3866
3867                         /* This is a hard error, log it. */
3868                         printk(KERN_ERR PFX "%s: BUG! Tx Ring full when "
3869                                "queue awake!\n", dev->name);
3870                 }
3871                 return NETDEV_TX_BUSY;
3872         }
3873
3874         entry = tp->tx_prod;
3875         base_flags = 0;
3876 #if TG3_TSO_SUPPORT != 0
3877         mss = 0;
3878         if (skb->len > (tp->dev->mtu + ETH_HLEN) &&
3879             (mss = skb_shinfo(skb)->gso_size) != 0) {
3880                 int tcp_opt_len, ip_tcp_len;
3881
3882                 if (skb_header_cloned(skb) &&
3883                     pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
3884                         dev_kfree_skb(skb);
3885                         goto out_unlock;
3886                 }
3887
3888                 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
3889                         mss |= (skb_headlen(skb) - ETH_HLEN) << 9;
3890                 else {
3891                         tcp_opt_len = ((skb->h.th->doff - 5) * 4);
3892                         ip_tcp_len = (skb->nh.iph->ihl * 4) +
3893                                      sizeof(struct tcphdr);
3894
3895                         skb->nh.iph->check = 0;
3896                         skb->nh.iph->tot_len = htons(mss + ip_tcp_len +
3897                                                      tcp_opt_len);
3898                         mss |= (ip_tcp_len + tcp_opt_len) << 9;
3899                 }
3900
3901                 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
3902                                TXD_FLAG_CPU_POST_DMA);
3903
3904                 skb->h.th->check = 0;
3905
3906         }
3907         else if (skb->ip_summed == CHECKSUM_PARTIAL)
3908                 base_flags |= TXD_FLAG_TCPUDP_CSUM;
3909 #else
3910         mss = 0;
3911         if (skb->ip_summed == CHECKSUM_PARTIAL)
3912                 base_flags |= TXD_FLAG_TCPUDP_CSUM;
3913 #endif
3914 #if TG3_VLAN_TAG_USED
3915         if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
3916                 base_flags |= (TXD_FLAG_VLAN |
3917                                (vlan_tx_tag_get(skb) << 16));
3918 #endif
3919
3920         /* Queue skb data, a.k.a. the main skb fragment. */
3921         mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
3922
3923         tp->tx_buffers[entry].skb = skb;
3924         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
3925
3926         tg3_set_txd(tp, entry, mapping, len, base_flags,
3927                     (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
3928
3929         entry = NEXT_TX(entry);
3930
3931         /* Now loop through additional data fragments, and queue them. */
3932         if (skb_shinfo(skb)->nr_frags > 0) {
3933                 unsigned int i, last;
3934
3935                 last = skb_shinfo(skb)->nr_frags - 1;
3936                 for (i = 0; i <= last; i++) {
3937                         skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3938
3939                         len = frag->size;
3940                         mapping = pci_map_page(tp->pdev,
3941                                                frag->page,
3942                                                frag->page_offset,
3943                                                len, PCI_DMA_TODEVICE);
3944
3945                         tp->tx_buffers[entry].skb = NULL;
3946                         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
3947
3948                         tg3_set_txd(tp, entry, mapping, len,
3949                                     base_flags, (i == last) | (mss << 1));
3950
3951                         entry = NEXT_TX(entry);
3952                 }
3953         }
3954
3955         /* Packets are ready, update Tx producer idx local and on card. */
3956         tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry);
3957
3958         tp->tx_prod = entry;
3959         if (unlikely(tg3_tx_avail(tp) <= (MAX_SKB_FRAGS + 1))) {
3960                 netif_stop_queue(dev);
3961                 if (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp))
3962                         netif_wake_queue(tp->dev);
3963         }
3964
3965 out_unlock:
3966         mmiowb();
3967
3968         dev->trans_start = jiffies;
3969
3970         return NETDEV_TX_OK;
3971 }
3972
3973 #if TG3_TSO_SUPPORT != 0
3974 static int tg3_start_xmit_dma_bug(struct sk_buff *, struct net_device *);
3975
3976 /* Use GSO to workaround a rare TSO bug that may be triggered when the
3977  * TSO header is greater than 80 bytes.
3978  */
3979 static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
3980 {
3981         struct sk_buff *segs, *nskb;
3982
3983         /* Estimate the number of fragments in the worst case */
3984         if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->gso_segs * 3))) {
3985                 netif_stop_queue(tp->dev);
3986                 return NETDEV_TX_BUSY;
3987         }
3988
3989         segs = skb_gso_segment(skb, tp->dev->features & ~NETIF_F_TSO);
3990         if (unlikely(IS_ERR(segs)))
3991                 goto tg3_tso_bug_end;
3992
3993         do {
3994                 nskb = segs;
3995                 segs = segs->next;
3996                 nskb->next = NULL;
3997                 tg3_start_xmit_dma_bug(nskb, tp->dev);
3998         } while (segs);
3999
4000 tg3_tso_bug_end:
4001         dev_kfree_skb(skb);
4002
4003         return NETDEV_TX_OK;
4004 }
4005 #endif
4006
4007 /* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
4008  * support TG3_FLG2_HW_TSO_1 or firmware TSO only.
4009  */
4010 static int tg3_start_xmit_dma_bug(struct sk_buff *skb, struct net_device *dev)
4011 {
4012         struct tg3 *tp = netdev_priv(dev);
4013         dma_addr_t mapping;
4014         u32 len, entry, base_flags, mss;
4015         int would_hit_hwbug;
4016
4017         len = skb_headlen(skb);
4018
4019         /* We are running in BH disabled context with netif_tx_lock
4020          * and TX reclaim runs via tp->poll inside of a software
4021          * interrupt.  Furthermore, IRQ processing runs lockless so we have
4022          * no IRQ context deadlocks to worry about either.  Rejoice!
4023          */
4024         if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->nr_frags + 1))) {
4025                 if (!netif_queue_stopped(dev)) {
4026                         netif_stop_queue(dev);
4027
4028                         /* This is a hard error, log it. */
4029                         printk(KERN_ERR PFX "%s: BUG! Tx Ring full when "
4030                                "queue awake!\n", dev->name);
4031                 }
4032                 return NETDEV_TX_BUSY;
4033         }
4034
4035         entry = tp->tx_prod;
4036         base_flags = 0;
4037         if (skb->ip_summed == CHECKSUM_PARTIAL)
4038                 base_flags |= TXD_FLAG_TCPUDP_CSUM;
4039 #if TG3_TSO_SUPPORT != 0
4040         mss = 0;
4041         if (skb->len > (tp->dev->mtu + ETH_HLEN) &&
4042             (mss = skb_shinfo(skb)->gso_size) != 0) {
4043                 int tcp_opt_len, ip_tcp_len, hdr_len;
4044
4045                 if (skb_header_cloned(skb) &&
4046                     pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
4047                         dev_kfree_skb(skb);
4048                         goto out_unlock;
4049                 }
4050
4051                 tcp_opt_len = ((skb->h.th->doff - 5) * 4);
4052                 ip_tcp_len = (skb->nh.iph->ihl * 4) + sizeof(struct tcphdr);
4053
4054                 hdr_len = ip_tcp_len + tcp_opt_len;
4055                 if (unlikely((ETH_HLEN + hdr_len) > 80) &&
4056                              (tp->tg3_flags2 & TG3_FLG2_HW_TSO_1_BUG))
4057                         return (tg3_tso_bug(tp, skb));
4058
4059                 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
4060                                TXD_FLAG_CPU_POST_DMA);
4061
4062                 skb->nh.iph->check = 0;
4063                 skb->nh.iph->tot_len = htons(mss + hdr_len);
4064                 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
4065                         skb->h.th->check = 0;
4066                         base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
4067                 }
4068                 else {
4069                         skb->h.th->check =
4070                                 ~csum_tcpudp_magic(skb->nh.iph->saddr,
4071                                                    skb->nh.iph->daddr,
4072                                                    0, IPPROTO_TCP, 0);
4073                 }
4074
4075                 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO) ||
4076                     (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)) {
4077                         if (tcp_opt_len || skb->nh.iph->ihl > 5) {
4078                                 int tsflags;
4079
4080                                 tsflags = ((skb->nh.iph->ihl - 5) +
4081                                            (tcp_opt_len >> 2));
4082                                 mss |= (tsflags << 11);
4083                         }
4084                 } else {
4085                         if (tcp_opt_len || skb->nh.iph->ihl > 5) {
4086                                 int tsflags;
4087
4088                                 tsflags = ((skb->nh.iph->ihl - 5) +
4089                                            (tcp_opt_len >> 2));
4090                                 base_flags |= tsflags << 12;
4091                         }
4092                 }
4093         }
4094 #else
4095         mss = 0;
4096 #endif
4097 #if TG3_VLAN_TAG_USED
4098         if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
4099                 base_flags |= (TXD_FLAG_VLAN |
4100                                (vlan_tx_tag_get(skb) << 16));
4101 #endif
4102
4103         /* Queue skb data, a.k.a. the main skb fragment. */
4104         mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
4105
4106         tp->tx_buffers[entry].skb = skb;
4107         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
4108
4109         would_hit_hwbug = 0;
4110
4111         if (tg3_4g_overflow_test(mapping, len))
4112                 would_hit_hwbug = 1;
4113
4114         tg3_set_txd(tp, entry, mapping, len, base_flags,
4115                     (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
4116
4117         entry = NEXT_TX(entry);
4118
4119         /* Now loop through additional data fragments, and queue them. */
4120         if (skb_shinfo(skb)->nr_frags > 0) {
4121                 unsigned int i, last;
4122
4123                 last = skb_shinfo(skb)->nr_frags - 1;
4124                 for (i = 0; i <= last; i++) {
4125                         skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4126
4127                         len = frag->size;
4128                         mapping = pci_map_page(tp->pdev,
4129                                                frag->page,
4130                                                frag->page_offset,
4131                                                len, PCI_DMA_TODEVICE);
4132
4133                         tp->tx_buffers[entry].skb = NULL;
4134                         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
4135
4136                         if (tg3_4g_overflow_test(mapping, len))
4137                                 would_hit_hwbug = 1;
4138
4139                         if (tg3_40bit_overflow_test(tp, mapping, len))
4140                                 would_hit_hwbug = 1;
4141
4142                         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
4143                                 tg3_set_txd(tp, entry, mapping, len,
4144                                             base_flags, (i == last)|(mss << 1));
4145                         else
4146                                 tg3_set_txd(tp, entry, mapping, len,
4147                                             base_flags, (i == last));
4148
4149                         entry = NEXT_TX(entry);
4150                 }
4151         }
4152
4153         if (would_hit_hwbug) {
4154                 u32 last_plus_one = entry;
4155                 u32 start;
4156
4157                 start = entry - 1 - skb_shinfo(skb)->nr_frags;
4158                 start &= (TG3_TX_RING_SIZE - 1);
4159
4160                 /* If the workaround fails due to memory/mapping
4161                  * failure, silently drop this packet.
4162                  */
4163                 if (tigon3_dma_hwbug_workaround(tp, skb, last_plus_one,
4164                                                 &start, base_flags, mss))
4165                         goto out_unlock;
4166
4167                 entry = start;
4168         }
4169
4170         /* Packets are ready, update Tx producer idx local and on card. */
4171         tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry);
4172
4173         tp->tx_prod = entry;
4174         if (unlikely(tg3_tx_avail(tp) <= (MAX_SKB_FRAGS + 1))) {
4175                 netif_stop_queue(dev);
4176                 if (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp))
4177                         netif_wake_queue(tp->dev);
4178         }
4179
4180 out_unlock:
4181         mmiowb();
4182
4183         dev->trans_start = jiffies;
4184
4185         return NETDEV_TX_OK;
4186 }
4187
4188 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
4189                                int new_mtu)
4190 {
4191         dev->mtu = new_mtu;
4192
4193         if (new_mtu > ETH_DATA_LEN) {
4194                 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
4195                         tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
4196                         ethtool_op_set_tso(dev, 0);
4197                 }
4198                 else
4199                         tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE;
4200         } else {
4201                 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
4202                         tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
4203                 tp->tg3_flags &= ~TG3_FLAG_JUMBO_RING_ENABLE;
4204         }
4205 }
4206
4207 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
4208 {
4209         struct tg3 *tp = netdev_priv(dev);
4210         int err;
4211
4212         if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
4213                 return -EINVAL;
4214
4215         if (!netif_running(dev)) {
4216                 /* We'll just catch it later when the
4217                  * device is up'd.
4218                  */
4219                 tg3_set_mtu(dev, tp, new_mtu);
4220                 return 0;
4221         }
4222
4223         tg3_netif_stop(tp);
4224
4225         tg3_full_lock(tp, 1);
4226
4227         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
4228
4229         tg3_set_mtu(dev, tp, new_mtu);
4230
4231         err = tg3_restart_hw(tp, 0);
4232
4233         if (!err)
4234                 tg3_netif_start(tp);
4235
4236         tg3_full_unlock(tp);
4237
4238         return err;
4239 }
4240
4241 /* Free up pending packets in all rx/tx rings.
4242  *
4243  * The chip has been shut down and the driver detached from
4244  * the networking, so no interrupts or new tx packets will
4245  * end up in the driver.  tp->{tx,}lock is not held and we are not
4246  * in an interrupt context and thus may sleep.
4247  */
4248 static void tg3_free_rings(struct tg3 *tp)
4249 {
4250         struct ring_info *rxp;
4251         int i;
4252
4253         for (i = 0; i < TG3_RX_RING_SIZE; i++) {
4254                 rxp = &tp->rx_std_buffers[i];
4255
4256                 if (rxp->skb == NULL)
4257                         continue;
4258                 pci_unmap_single(tp->pdev,
4259                                  pci_unmap_addr(rxp, mapping),
4260                                  tp->rx_pkt_buf_sz - tp->rx_offset,
4261                                  PCI_DMA_FROMDEVICE);
4262                 dev_kfree_skb_any(rxp->skb);
4263                 rxp->skb = NULL;
4264         }
4265
4266         for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
4267                 rxp = &tp->rx_jumbo_buffers[i];
4268
4269                 if (rxp->skb == NULL)
4270                         continue;
4271                 pci_unmap_single(tp->pdev,
4272                                  pci_unmap_addr(rxp, mapping),
4273                                  RX_JUMBO_PKT_BUF_SZ - tp->rx_offset,
4274                                  PCI_DMA_FROMDEVICE);
4275                 dev_kfree_skb_any(rxp->skb);
4276                 rxp->skb = NULL;
4277         }
4278
4279         for (i = 0; i < TG3_TX_RING_SIZE; ) {
4280                 struct tx_ring_info *txp;
4281                 struct sk_buff *skb;
4282                 int j;
4283
4284                 txp = &tp->tx_buffers[i];
4285                 skb = txp->skb;
4286
4287                 if (skb == NULL) {
4288                         i++;
4289                         continue;
4290                 }
4291
4292                 pci_unmap_single(tp->pdev,
4293                                  pci_unmap_addr(txp, mapping),
4294                                  skb_headlen(skb),
4295                                  PCI_DMA_TODEVICE);
4296                 txp->skb = NULL;
4297
4298                 i++;
4299
4300                 for (j = 0; j < skb_shinfo(skb)->nr_frags; j++) {
4301                         txp = &tp->tx_buffers[i & (TG3_TX_RING_SIZE - 1)];
4302                         pci_unmap_page(tp->pdev,
4303                                        pci_unmap_addr(txp, mapping),
4304                                        skb_shinfo(skb)->frags[j].size,
4305                                        PCI_DMA_TODEVICE);
4306                         i++;
4307                 }
4308
4309                 dev_kfree_skb_any(skb);
4310         }
4311 }
4312
4313 /* Initialize tx/rx rings for packet processing.
4314  *
4315  * The chip has been shut down and the driver detached from
4316  * the networking, so no interrupts or new tx packets will
4317  * end up in the driver.  tp->{tx,}lock are held and thus
4318  * we may not sleep.
4319  */
4320 static int tg3_init_rings(struct tg3 *tp)
4321 {
4322         u32 i;
4323
4324         /* Free up all the SKBs. */
4325         tg3_free_rings(tp);
4326
4327         /* Zero out all descriptors. */
4328         memset(tp->rx_std, 0, TG3_RX_RING_BYTES);
4329         memset(tp->rx_jumbo, 0, TG3_RX_JUMBO_RING_BYTES);
4330         memset(tp->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
4331         memset(tp->tx_ring, 0, TG3_TX_RING_BYTES);
4332
4333         tp->rx_pkt_buf_sz = RX_PKT_BUF_SZ;
4334         if ((tp->tg3_flags2 & TG3_FLG2_5780_CLASS) &&
4335             (tp->dev->mtu > ETH_DATA_LEN))
4336                 tp->rx_pkt_buf_sz = RX_JUMBO_PKT_BUF_SZ;
4337
4338         /* Initialize invariants of the rings, we only set this
4339          * stuff once.  This works because the card does not
4340          * write into the rx buffer posting rings.
4341          */
4342         for (i = 0; i < TG3_RX_RING_SIZE; i++) {
4343                 struct tg3_rx_buffer_desc *rxd;
4344
4345                 rxd = &tp->rx_std[i];
4346                 rxd->idx_len = (tp->rx_pkt_buf_sz - tp->rx_offset - 64)
4347                         << RXD_LEN_SHIFT;
4348                 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
4349                 rxd->opaque = (RXD_OPAQUE_RING_STD |
4350                                (i << RXD_OPAQUE_INDEX_SHIFT));
4351         }
4352
4353         if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
4354                 for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
4355                         struct tg3_rx_buffer_desc *rxd;
4356
4357                         rxd = &tp->rx_jumbo[i];
4358                         rxd->idx_len = (RX_JUMBO_PKT_BUF_SZ - tp->rx_offset - 64)
4359                                 << RXD_LEN_SHIFT;
4360                         rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
4361                                 RXD_FLAG_JUMBO;
4362                         rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
4363                                (i << RXD_OPAQUE_INDEX_SHIFT));
4364                 }
4365         }
4366
4367         /* Now allocate fresh SKBs for each rx ring. */
4368         for (i = 0; i < tp->rx_pending; i++) {
4369                 if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_STD, -1, i) < 0) {
4370                         printk(KERN_WARNING PFX
4371                                "%s: Using a smaller RX standard ring, "
4372                                "only %d out of %d buffers were allocated "
4373                                "successfully.\n",
4374                                tp->dev->name, i, tp->rx_pending);
4375                         if (i == 0)
4376                                 return -ENOMEM;
4377                         tp->rx_pending = i;
4378                         break;
4379                 }
4380         }
4381
4382         if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
4383                 for (i = 0; i < tp->rx_jumbo_pending; i++) {
4384                         if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_JUMBO,
4385                                              -1, i) < 0) {
4386                                 printk(KERN_WARNING PFX
4387                                        "%s: Using a smaller RX jumbo ring, "
4388                                        "only %d out of %d buffers were "
4389                                        "allocated successfully.\n",
4390                                        tp->dev->name, i, tp->rx_jumbo_pending);
4391                                 if (i == 0) {
4392                                         tg3_free_rings(tp);
4393                                         return -ENOMEM;
4394                                 }
4395                                 tp->rx_jumbo_pending = i;
4396                                 break;
4397                         }
4398                 }
4399         }
4400         return 0;
4401 }
4402
4403 /*
4404  * Must not be invoked with interrupt sources disabled and
4405  * the hardware shutdown down.
4406  */
4407 static void tg3_free_consistent(struct tg3 *tp)
4408 {
4409         kfree(tp->rx_std_buffers);
4410         tp->rx_std_buffers = NULL;
4411         if (tp->rx_std) {
4412                 pci_free_consistent(tp->pdev, TG3_RX_RING_BYTES,
4413                                     tp->rx_std, tp->rx_std_mapping);
4414                 tp->rx_std = NULL;
4415         }
4416         if (tp->rx_jumbo) {
4417                 pci_free_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
4418                                     tp->rx_jumbo, tp->rx_jumbo_mapping);
4419                 tp->rx_jumbo = NULL;
4420         }
4421         if (tp->rx_rcb) {
4422                 pci_free_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
4423                                     tp->rx_rcb, tp->rx_rcb_mapping);
4424                 tp->rx_rcb = NULL;
4425         }
4426         if (tp->tx_ring) {
4427                 pci_free_consistent(tp->pdev, TG3_TX_RING_BYTES,
4428                         tp->tx_ring, tp->tx_desc_mapping);
4429                 tp->tx_ring = NULL;
4430         }
4431         if (tp->hw_status) {
4432                 pci_free_consistent(tp->pdev, TG3_HW_STATUS_SIZE,
4433                                     tp->hw_status, tp->status_mapping);
4434                 tp->hw_status = NULL;
4435         }
4436         if (tp->hw_stats) {
4437                 pci_free_consistent(tp->pdev, sizeof(struct tg3_hw_stats),
4438                                     tp->hw_stats, tp->stats_mapping);
4439                 tp->hw_stats = NULL;
4440         }
4441 }
4442
4443 /*
4444  * Must not be invoked with interrupt sources disabled and
4445  * the hardware shutdown down.  Can sleep.
4446  */
4447 static int tg3_alloc_consistent(struct tg3 *tp)
4448 {
4449         tp->rx_std_buffers = kzalloc((sizeof(struct ring_info) *
4450                                       (TG3_RX_RING_SIZE +
4451                                        TG3_RX_JUMBO_RING_SIZE)) +
4452                                      (sizeof(struct tx_ring_info) *
4453                                       TG3_TX_RING_SIZE),
4454                                      GFP_KERNEL);
4455         if (!tp->rx_std_buffers)
4456                 return -ENOMEM;
4457
4458         tp->rx_jumbo_buffers = &tp->rx_std_buffers[TG3_RX_RING_SIZE];
4459         tp->tx_buffers = (struct tx_ring_info *)
4460                 &tp->rx_jumbo_buffers[TG3_RX_JUMBO_RING_SIZE];
4461
4462         tp->rx_std = pci_alloc_consistent(tp->pdev, TG3_RX_RING_BYTES,
4463                                           &tp->rx_std_mapping);
4464         if (!tp->rx_std)
4465                 goto err_out;
4466
4467         tp->rx_jumbo = pci_alloc_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
4468                                             &tp->rx_jumbo_mapping);
4469
4470         if (!tp->rx_jumbo)
4471                 goto err_out;
4472
4473         tp->rx_rcb = pci_alloc_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
4474                                           &tp->rx_rcb_mapping);
4475         if (!tp->rx_rcb)
4476                 goto err_out;
4477
4478         tp->tx_ring = pci_alloc_consistent(tp->pdev, TG3_TX_RING_BYTES,
4479                                            &tp->tx_desc_mapping);
4480         if (!tp->tx_ring)
4481                 goto err_out;
4482
4483         tp->hw_status = pci_alloc_consistent(tp->pdev,
4484                                              TG3_HW_STATUS_SIZE,
4485                                              &tp->status_mapping);
4486         if (!tp->hw_status)
4487                 goto err_out;
4488
4489         tp->hw_stats = pci_alloc_consistent(tp->pdev,
4490                                             sizeof(struct tg3_hw_stats),
4491                                             &tp->stats_mapping);
4492         if (!tp->hw_stats)
4493                 goto err_out;
4494
4495         memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
4496         memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
4497
4498         return 0;
4499
4500 err_out:
4501         tg3_free_consistent(tp);
4502         return -ENOMEM;
4503 }
4504
4505 #define MAX_WAIT_CNT 1000
4506
4507 /* To stop a block, clear the enable bit and poll till it
4508  * clears.  tp->lock is held.
4509  */
4510 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, int silent)
4511 {
4512         unsigned int i;
4513         u32 val;
4514
4515         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
4516                 switch (ofs) {
4517                 case RCVLSC_MODE:
4518                 case DMAC_MODE:
4519                 case MBFREE_MODE:
4520                 case BUFMGR_MODE:
4521                 case MEMARB_MODE:
4522                         /* We can't enable/disable these bits of the
4523                          * 5705/5750, just say success.
4524                          */
4525                         return 0;
4526
4527                 default:
4528                         break;
4529                 };
4530         }
4531
4532         val = tr32(ofs);
4533         val &= ~enable_bit;
4534         tw32_f(ofs, val);
4535
4536         for (i = 0; i < MAX_WAIT_CNT; i++) {
4537                 udelay(100);
4538                 val = tr32(ofs);
4539                 if ((val & enable_bit) == 0)
4540                         break;
4541         }
4542
4543         if (i == MAX_WAIT_CNT && !silent) {
4544                 printk(KERN_ERR PFX "tg3_stop_block timed out, "
4545                        "ofs=%lx enable_bit=%x\n",
4546                        ofs, enable_bit);
4547                 return -ENODEV;
4548         }
4549
4550         return 0;
4551 }
4552
4553 /* tp->lock is held. */
4554 static int tg3_abort_hw(struct tg3 *tp, int silent)
4555 {
4556         int i, err;
4557
4558         tg3_disable_ints(tp);
4559
4560         tp->rx_mode &= ~RX_MODE_ENABLE;
4561         tw32_f(MAC_RX_MODE, tp->rx_mode);
4562         udelay(10);
4563
4564         err  = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
4565         err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
4566         err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
4567         err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
4568         err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
4569         err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
4570
4571         err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
4572         err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
4573         err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
4574         err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
4575         err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
4576         err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
4577         err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
4578
4579         tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
4580         tw32_f(MAC_MODE, tp->mac_mode);
4581         udelay(40);
4582
4583         tp->tx_mode &= ~TX_MODE_ENABLE;
4584         tw32_f(MAC_TX_MODE, tp->tx_mode);
4585
4586         for (i = 0; i < MAX_WAIT_CNT; i++) {
4587                 udelay(100);
4588                 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
4589                         break;
4590         }
4591         if (i >= MAX_WAIT_CNT) {
4592                 printk(KERN_ERR PFX "tg3_abort_hw timed out for %s, "
4593                        "TX_MODE_ENABLE will not clear MAC_TX_MODE=%08x\n",
4594                        tp->dev->name, tr32(MAC_TX_MODE));
4595                 err |= -ENODEV;
4596         }
4597
4598         err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
4599         err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
4600         err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
4601
4602         tw32(FTQ_RESET, 0xffffffff);
4603         tw32(FTQ_RESET, 0x00000000);
4604
4605         err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
4606         err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
4607
4608         if (tp->hw_status)
4609                 memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
4610         if (tp->hw_stats)
4611                 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
4612
4613         return err;
4614 }
4615
4616 /* tp->lock is held. */
4617 static int tg3_nvram_lock(struct tg3 *tp)
4618 {
4619         if (tp->tg3_flags & TG3_FLAG_NVRAM) {
4620                 int i;
4621
4622                 if (tp->nvram_lock_cnt == 0) {
4623                         tw32(NVRAM_SWARB, SWARB_REQ_SET1);
4624                         for (i = 0; i < 8000; i++) {
4625                                 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
4626                                         break;
4627                                 udelay(20);
4628                         }
4629                         if (i == 8000) {
4630                                 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
4631                                 return -ENODEV;
4632                         }
4633                 }
4634                 tp->nvram_lock_cnt++;
4635         }
4636         return 0;
4637 }
4638
4639 /* tp->lock is held. */
4640 static void tg3_nvram_unlock(struct tg3 *tp)
4641 {
4642         if (tp->tg3_flags & TG3_FLAG_NVRAM) {
4643                 if (tp->nvram_lock_cnt > 0)
4644                         tp->nvram_lock_cnt--;
4645                 if (tp->nvram_lock_cnt == 0)
4646                         tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
4647         }
4648 }
4649
4650 /* tp->lock is held. */
4651 static void tg3_enable_nvram_access(struct tg3 *tp)
4652 {
4653         if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
4654             !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM)) {
4655                 u32 nvaccess = tr32(NVRAM_ACCESS);
4656
4657                 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
4658         }
4659 }
4660
4661 /* tp->lock is held. */
4662 static void tg3_disable_nvram_access(struct tg3 *tp)
4663 {
4664         if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
4665             !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM)) {
4666                 u32 nvaccess = tr32(NVRAM_ACCESS);
4667
4668                 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
4669         }
4670 }
4671
4672 /* tp->lock is held. */
4673 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
4674 {
4675         tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
4676                       NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
4677
4678         if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
4679                 switch (kind) {
4680                 case RESET_KIND_INIT:
4681                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4682                                       DRV_STATE_START);
4683                         break;
4684
4685                 case RESET_KIND_SHUTDOWN:
4686                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4687                                       DRV_STATE_UNLOAD);
4688                         break;
4689
4690                 case RESET_KIND_SUSPEND:
4691                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4692                                       DRV_STATE_SUSPEND);
4693                         break;
4694
4695                 default:
4696                         break;
4697                 };
4698         }
4699 }
4700
4701 /* tp->lock is held. */
4702 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
4703 {
4704         if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
4705                 switch (kind) {
4706                 case RESET_KIND_INIT:
4707                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4708                                       DRV_STATE_START_DONE);
4709                         break;
4710
4711                 case RESET_KIND_SHUTDOWN:
4712                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4713                                       DRV_STATE_UNLOAD_DONE);
4714                         break;
4715
4716                 default:
4717                         break;
4718                 };
4719         }
4720 }
4721
4722 /* tp->lock is held. */
4723 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
4724 {
4725         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
4726                 switch (kind) {
4727                 case RESET_KIND_INIT:
4728                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4729                                       DRV_STATE_START);
4730                         break;
4731
4732                 case RESET_KIND_SHUTDOWN:
4733                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4734                                       DRV_STATE_UNLOAD);
4735                         break;
4736
4737                 case RESET_KIND_SUSPEND:
4738                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4739                                       DRV_STATE_SUSPEND);
4740                         break;
4741
4742                 default:
4743                         break;
4744                 };
4745         }
4746 }
4747
4748 static int tg3_poll_fw(struct tg3 *tp)
4749 {
4750         int i;
4751         u32 val;
4752
4753         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
4754                 /* Wait up to 20ms for init done. */
4755                 for (i = 0; i < 200; i++) {
4756                         if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
4757                                 return 0;
4758                         udelay(100);
4759                 }
4760                 return -ENODEV;
4761         }
4762
4763         /* Wait for firmware initialization to complete. */
4764         for (i = 0; i < 100000; i++) {
4765                 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
4766                 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
4767                         break;
4768                 udelay(10);
4769         }
4770
4771         /* Chip might not be fitted with firmware.  Some Sun onboard
4772          * parts are configured like that.  So don't signal the timeout
4773          * of the above loop as an error, but do report the lack of
4774          * running firmware once.
4775          */
4776         if (i >= 100000 &&
4777             !(tp->tg3_flags2 & TG3_FLG2_NO_FWARE_REPORTED)) {
4778                 tp->tg3_flags2 |= TG3_FLG2_NO_FWARE_REPORTED;
4779
4780                 printk(KERN_INFO PFX "%s: No firmware running.\n",
4781                        tp->dev->name);
4782         }
4783
4784         return 0;
4785 }
4786
4787 static void tg3_stop_fw(struct tg3 *);
4788
4789 /* tp->lock is held. */
4790 static int tg3_chip_reset(struct tg3 *tp)
4791 {
4792         u32 val;
4793         void (*write_op)(struct tg3 *, u32, u32);
4794         int err;
4795
4796         tg3_nvram_lock(tp);
4797
4798         /* No matching tg3_nvram_unlock() after this because
4799          * chip reset below will undo the nvram lock.
4800          */
4801         tp->nvram_lock_cnt = 0;
4802
4803         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
4804             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
4805             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
4806                 tw32(GRC_FASTBOOT_PC, 0);
4807
4808         /*
4809          * We must avoid the readl() that normally takes place.
4810          * It locks machines, causes machine checks, and other
4811          * fun things.  So, temporarily disable the 5701
4812          * hardware workaround, while we do the reset.
4813          */
4814         write_op = tp->write32;
4815         if (write_op == tg3_write_flush_reg32)
4816                 tp->write32 = tg3_write32;
4817
4818         /* do the reset */
4819         val = GRC_MISC_CFG_CORECLK_RESET;
4820
4821         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
4822                 if (tr32(0x7e2c) == 0x60) {
4823                         tw32(0x7e2c, 0x20);
4824                 }
4825                 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
4826                         tw32(GRC_MISC_CFG, (1 << 29));
4827                         val |= (1 << 29);
4828                 }
4829         }
4830
4831         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
4832                 tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
4833                 tw32(GRC_VCPU_EXT_CTRL,
4834                      tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
4835         }
4836
4837         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
4838                 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
4839         tw32(GRC_MISC_CFG, val);
4840
4841         /* restore 5701 hardware bug workaround write method */
4842         tp->write32 = write_op;
4843
4844         /* Unfortunately, we have to delay before the PCI read back.
4845          * Some 575X chips even will not respond to a PCI cfg access
4846          * when the reset command is given to the chip.
4847          *
4848          * How do these hardware designers expect things to work
4849          * properly if the PCI write is posted for a long period
4850          * of time?  It is always necessary to have some method by
4851          * which a register read back can occur to push the write
4852          * out which does the reset.
4853          *
4854          * For most tg3 variants the trick below was working.
4855          * Ho hum...
4856          */
4857         udelay(120);
4858
4859         /* Flush PCI posted writes.  The normal MMIO registers
4860          * are inaccessible at this time so this is the only
4861          * way to make this reliably (actually, this is no longer
4862          * the case, see above).  I tried to use indirect
4863          * register read/write but this upset some 5701 variants.
4864          */
4865         pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
4866
4867         udelay(120);
4868
4869         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
4870                 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) {
4871                         int i;
4872                         u32 cfg_val;
4873
4874                         /* Wait for link training to complete.  */
4875                         for (i = 0; i < 5000; i++)
4876                                 udelay(100);
4877
4878                         pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
4879                         pci_write_config_dword(tp->pdev, 0xc4,
4880                                                cfg_val | (1 << 15));
4881                 }
4882                 /* Set PCIE max payload size and clear error status.  */
4883                 pci_write_config_dword(tp->pdev, 0xd8, 0xf5000);
4884         }
4885
4886         /* Re-enable indirect register accesses. */
4887         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
4888                                tp->misc_host_ctrl);
4889
4890         /* Set MAX PCI retry to zero. */
4891         val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
4892         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
4893             (tp->tg3_flags & TG3_FLAG_PCIX_MODE))
4894                 val |= PCISTATE_RETRY_SAME_DMA;
4895         pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
4896
4897         pci_restore_state(tp->pdev);
4898
4899         /* Make sure PCI-X relaxed ordering bit is clear. */
4900         pci_read_config_dword(tp->pdev, TG3PCI_X_CAPS, &val);
4901         val &= ~PCIX_CAPS_RELAXED_ORDERING;
4902         pci_write_config_dword(tp->pdev, TG3PCI_X_CAPS, val);
4903
4904         if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
4905                 u32 val;
4906
4907                 /* Chip reset on 5780 will reset MSI enable bit,
4908                  * so need to restore it.
4909                  */
4910                 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
4911                         u16 ctrl;
4912
4913                         pci_read_config_word(tp->pdev,
4914                                              tp->msi_cap + PCI_MSI_FLAGS,
4915                                              &ctrl);
4916                         pci_write_config_word(tp->pdev,
4917                                               tp->msi_cap + PCI_MSI_FLAGS,
4918                                               ctrl | PCI_MSI_FLAGS_ENABLE);
4919                         val = tr32(MSGINT_MODE);
4920                         tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
4921                 }
4922
4923                 val = tr32(MEMARB_MODE);
4924                 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
4925
4926         } else
4927                 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
4928
4929         if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A3) {
4930                 tg3_stop_fw(tp);
4931                 tw32(0x5000, 0x400);
4932         }
4933
4934         tw32(GRC_MODE, tp->grc_mode);
4935
4936         if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0) {
4937                 u32 val = tr32(0xc4);
4938
4939                 tw32(0xc4, val | (1 << 15));
4940         }
4941
4942         if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
4943             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
4944                 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
4945                 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0)
4946                         tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
4947                 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
4948         }
4949
4950         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
4951                 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
4952                 tw32_f(MAC_MODE, tp->mac_mode);
4953         } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
4954                 tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
4955                 tw32_f(MAC_MODE, tp->mac_mode);
4956         } else
4957                 tw32_f(MAC_MODE, 0);
4958         udelay(40);
4959
4960         err = tg3_poll_fw(tp);
4961         if (err)
4962                 return err;
4963
4964         if ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
4965             tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
4966                 u32 val = tr32(0x7c00);
4967
4968                 tw32(0x7c00, val | (1 << 25));
4969         }
4970
4971         /* Reprobe ASF enable state.  */
4972         tp->tg3_flags &= ~TG3_FLAG_ENABLE_ASF;
4973         tp->tg3_flags2 &= ~TG3_FLG2_ASF_NEW_HANDSHAKE;
4974         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
4975         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
4976                 u32 nic_cfg;
4977
4978                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
4979                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
4980                         tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
4981                         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
4982                                 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
4983                 }
4984         }
4985
4986         return 0;
4987 }
4988
4989 /* tp->lock is held. */
4990 static void tg3_stop_fw(struct tg3 *tp)
4991 {
4992         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
4993                 u32 val;
4994                 int i;
4995
4996                 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
4997                 val = tr32(GRC_RX_CPU_EVENT);
4998                 val |= (1 << 14);
4999                 tw32(GRC_RX_CPU_EVENT, val);
5000
5001                 /* Wait for RX cpu to ACK the event.  */
5002                 for (i = 0; i < 100; i++) {
5003                         if (!(tr32(GRC_RX_CPU_EVENT) & (1 << 14)))
5004                                 break;
5005                         udelay(1);
5006                 }
5007         }
5008 }
5009
5010 /* tp->lock is held. */
5011 static int tg3_halt(struct tg3 *tp, int kind, int silent)
5012 {
5013         int err;
5014
5015         tg3_stop_fw(tp);
5016
5017         tg3_write_sig_pre_reset(tp, kind);
5018
5019         tg3_abort_hw(tp, silent);
5020         err = tg3_chip_reset(tp);
5021
5022         tg3_write_sig_legacy(tp, kind);
5023         tg3_write_sig_post_reset(tp, kind);
5024
5025         if (err)
5026                 return err;
5027
5028         return 0;
5029 }
5030
5031 #define TG3_FW_RELEASE_MAJOR    0x0
5032 #define TG3_FW_RELASE_MINOR     0x0
5033 #define TG3_FW_RELEASE_FIX      0x0
5034 #define TG3_FW_START_ADDR       0x08000000
5035 #define TG3_FW_TEXT_ADDR        0x08000000
5036 #define TG3_FW_TEXT_LEN         0x9c0
5037 #define TG3_FW_RODATA_ADDR      0x080009c0
5038 #define TG3_FW_RODATA_LEN       0x60
5039 #define TG3_FW_DATA_ADDR        0x08000a40
5040 #define TG3_FW_DATA_LEN         0x20
5041 #define TG3_FW_SBSS_ADDR        0x08000a60
5042 #define TG3_FW_SBSS_LEN         0xc
5043 #define TG3_FW_BSS_ADDR         0x08000a70
5044 #define TG3_FW_BSS_LEN          0x10
5045
5046 static const u32 tg3FwText[(TG3_FW_TEXT_LEN / sizeof(u32)) + 1] = {
5047         0x00000000, 0x10000003, 0x00000000, 0x0000000d, 0x0000000d, 0x3c1d0800,
5048         0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100000, 0x0e000018, 0x00000000,
5049         0x0000000d, 0x3c1d0800, 0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100034,
5050         0x0e00021c, 0x00000000, 0x0000000d, 0x00000000, 0x00000000, 0x00000000,
5051         0x27bdffe0, 0x3c1cc000, 0xafbf0018, 0xaf80680c, 0x0e00004c, 0x241b2105,
5052         0x97850000, 0x97870002, 0x9782002c, 0x9783002e, 0x3c040800, 0x248409c0,
5053         0xafa00014, 0x00021400, 0x00621825, 0x00052c00, 0xafa30010, 0x8f860010,
5054         0x00e52825, 0x0e000060, 0x24070102, 0x3c02ac00, 0x34420100, 0x3c03ac01,
5055         0x34630100, 0xaf820490, 0x3c02ffff, 0xaf820494, 0xaf830498, 0xaf82049c,
5056         0x24020001, 0xaf825ce0, 0x0e00003f, 0xaf825d00, 0x0e000140, 0x00000000,
5057         0x8fbf0018, 0x03e00008, 0x27bd0020, 0x2402ffff, 0xaf825404, 0x8f835400,
5058         0x34630400, 0xaf835400, 0xaf825404, 0x3c020800, 0x24420034, 0xaf82541c,
5059         0x03e00008, 0xaf805400, 0x00000000, 0x00000000, 0x3c020800, 0x34423000,
5060         0x3c030800, 0x34633000, 0x3c040800, 0x348437ff, 0x3c010800, 0xac220a64,
5061         0x24020040, 0x3c010800, 0xac220a68, 0x3c010800, 0xac200a60, 0xac600000,
5062         0x24630004, 0x0083102b, 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000,
5063         0x00804821, 0x8faa0010, 0x3c020800, 0x8c420a60, 0x3c040800, 0x8c840a68,
5064         0x8fab0014, 0x24430001, 0x0044102b, 0x3c010800, 0xac230a60, 0x14400003,
5065         0x00004021, 0x3c010800, 0xac200a60, 0x3c020800, 0x8c420a60, 0x3c030800,
5066         0x8c630a64, 0x91240000, 0x00021140, 0x00431021, 0x00481021, 0x25080001,
5067         0xa0440000, 0x29020008, 0x1440fff4, 0x25290001, 0x3c020800, 0x8c420a60,
5068         0x3c030800, 0x8c630a64, 0x8f84680c, 0x00021140, 0x00431021, 0xac440008,
5069         0xac45000c, 0xac460010, 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c,
5070         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
5071         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
5072         0, 0, 0, 0, 0, 0,
5073         0x02000008, 0x00000000, 0x0a0001e3, 0x3c0a0001, 0x0a0001e3, 0x3c0a0002,
5074         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
5075         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
5076         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
5077         0x0a0001e3, 0x3c0a0007, 0x0a0001e3, 0x3c0a0008, 0x0a0001e3, 0x3c0a0009,
5078         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a000b,
5079         0x0a0001e3, 0x3c0a000c, 0x0a0001e3, 0x3c0a000d, 0x0a0001e3, 0x00000000,
5080         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a000e, 0x0a0001e3, 0x00000000,
5081         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
5082         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
5083         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a0013, 0x0a0001e3, 0x3c0a0014,
5084         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
5085         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
5086         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
5087         0x27bdffe0, 0x00001821, 0x00001021, 0xafbf0018, 0xafb10014, 0xafb00010,
5088         0x3c010800, 0x00220821, 0xac200a70, 0x3c010800, 0x00220821, 0xac200a74,
5089         0x3c010800, 0x00220821, 0xac200a78, 0x24630001, 0x1860fff5, 0x2442000c,
5090         0x24110001, 0x8f906810, 0x32020004, 0x14400005, 0x24040001, 0x3c020800,
5091         0x8c420a78, 0x18400003, 0x00002021, 0x0e000182, 0x00000000, 0x32020001,
5092         0x10400003, 0x00000000, 0x0e000169, 0x00000000, 0x0a000153, 0xaf915028,
5093         0x8fbf0018, 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0020, 0x3c050800,
5094         0x8ca50a70, 0x3c060800, 0x8cc60a80, 0x3c070800, 0x8ce70a78, 0x27bdffe0,
5095         0x3c040800, 0x248409d0, 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014,
5096         0x0e00017b, 0x00002021, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x24020001,
5097         0x8f836810, 0x00821004, 0x00021027, 0x00621824, 0x03e00008, 0xaf836810,
5098         0x27bdffd8, 0xafbf0024, 0x1080002e, 0xafb00020, 0x8f825cec, 0xafa20018,
5099         0x8f825cec, 0x3c100800, 0x26100a78, 0xafa2001c, 0x34028000, 0xaf825cec,
5100         0x8e020000, 0x18400016, 0x00000000, 0x3c020800, 0x94420a74, 0x8fa3001c,
5101         0x000221c0, 0xac830004, 0x8fa2001c, 0x3c010800, 0x0e000201, 0xac220a74,
5102         0x10400005, 0x00000000, 0x8e020000, 0x24420001, 0x0a0001df, 0xae020000,
5103         0x3c020800, 0x8c420a70, 0x00021c02, 0x000321c0, 0x0a0001c5, 0xafa2001c,
5104         0x0e000201, 0x00000000, 0x1040001f, 0x00000000, 0x8e020000, 0x8fa3001c,
5105         0x24420001, 0x3c010800, 0xac230a70, 0x3c010800, 0xac230a74, 0x0a0001df,
5106         0xae020000, 0x3c100800, 0x26100a78, 0x8e020000, 0x18400028, 0x00000000,
5107         0x0e000201, 0x00000000, 0x14400024, 0x00000000, 0x8e020000, 0x3c030800,
5108         0x8c630a70, 0x2442ffff, 0xafa3001c, 0x18400006, 0xae020000, 0x00031402,
5109         0x000221c0, 0x8c820004, 0x3c010800, 0xac220a70, 0x97a2001e, 0x2442ff00,
5110         0x2c420300, 0x1440000b, 0x24024000, 0x3c040800, 0x248409dc, 0xafa00010,
5111         0xafa00014, 0x8fa6001c, 0x24050008, 0x0e000060, 0x00003821, 0x0a0001df,
5112         0x00000000, 0xaf825cf8, 0x3c020800, 0x8c420a40, 0x8fa3001c, 0x24420001,
5113         0xaf835cf8, 0x3c010800, 0xac220a40, 0x8fbf0024, 0x8fb00020, 0x03e00008,
5114         0x27bd0028, 0x27bdffe0, 0x3c040800, 0x248409e8, 0x00002821, 0x00003021,
5115         0x00003821, 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014, 0x8fbf0018,
5116         0x03e00008, 0x27bd0020, 0x8f82680c, 0x8f85680c, 0x00021827, 0x0003182b,
5117         0x00031823, 0x00431024, 0x00441021, 0x00a2282b, 0x10a00006, 0x00000000,
5118         0x00401821, 0x8f82680c, 0x0043102b, 0x1440fffd, 0x00000000, 0x03e00008,
5119         0x00000000, 0x3c040800, 0x8c840000, 0x3c030800, 0x8c630a40, 0x0064102b,
5120         0x54400002, 0x00831023, 0x00641023, 0x2c420008, 0x03e00008, 0x38420001,
5121         0x27bdffe0, 0x00802821, 0x3c040800, 0x24840a00, 0x00003021, 0x00003821,
5122         0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014, 0x0a000216, 0x00000000,
5123         0x8fbf0018, 0x03e00008, 0x27bd0020, 0x00000000, 0x27bdffe0, 0x3c1cc000,
5124         0xafbf0018, 0x0e00004c, 0xaf80680c, 0x3c040800, 0x24840a10, 0x03802821,
5125         0x00003021, 0x00003821, 0xafa00010, 0x0e000060, 0xafa00014, 0x2402ffff,
5126         0xaf825404, 0x3c0200aa, 0x0e000234, 0xaf825434, 0x8fbf0018, 0x03e00008,
5127         0x27bd0020, 0x00000000, 0x00000000, 0x00000000, 0x27bdffe8, 0xafb00010,
5128         0x24100001, 0xafbf0014, 0x3c01c003, 0xac200000, 0x8f826810, 0x30422000,
5129         0x10400003, 0x00000000, 0x0e000246, 0x00000000, 0x0a00023a, 0xaf905428,
5130         0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x27bdfff8, 0x8f845d0c,
5131         0x3c0200ff, 0x3c030800, 0x8c630a50, 0x3442fff8, 0x00821024, 0x1043001e,
5132         0x3c0500ff, 0x34a5fff8, 0x3c06c003, 0x3c074000, 0x00851824, 0x8c620010,
5133         0x3c010800, 0xac230a50, 0x30420008, 0x10400005, 0x00871025, 0x8cc20000,
5134         0x24420001, 0xacc20000, 0x00871025, 0xaf825d0c, 0x8fa20000, 0x24420001,
5135         0xafa20000, 0x8fa20000, 0x8fa20000, 0x24420001, 0xafa20000, 0x8fa20000,
5136         0x8f845d0c, 0x3c030800, 0x8c630a50, 0x00851024, 0x1443ffe8, 0x00851824,
5137         0x27bd0008, 0x03e00008, 0x00000000, 0x00000000, 0x00000000
5138 };
5139
5140 static const u32 tg3FwRodata[(TG3_FW_RODATA_LEN / sizeof(u32)) + 1] = {
5141         0x35373031, 0x726c7341, 0x00000000, 0x00000000, 0x53774576, 0x656e7430,
5142         0x00000000, 0x726c7045, 0x76656e74, 0x31000000, 0x556e6b6e, 0x45766e74,
5143         0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x66617461, 0x6c457272,
5144         0x00000000, 0x00000000, 0x4d61696e, 0x43707542, 0x00000000, 0x00000000,
5145         0x00000000
5146 };
5147
5148 #if 0 /* All zeros, don't eat up space with it. */
5149 u32 tg3FwData[(TG3_FW_DATA_LEN / sizeof(u32)) + 1] = {
5150         0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
5151         0x00000000, 0x00000000, 0x00000000, 0x00000000
5152 };
5153 #endif
5154
5155 #define RX_CPU_SCRATCH_BASE     0x30000
5156 #define RX_CPU_SCRATCH_SIZE     0x04000
5157 #define TX_CPU_SCRATCH_BASE     0x34000
5158 #define TX_CPU_SCRATCH_SIZE     0x04000
5159
5160 /* tp->lock is held. */
5161 static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
5162 {
5163         int i;
5164
5165         BUG_ON(offset == TX_CPU_BASE &&
5166             (tp->tg3_flags2 & TG3_FLG2_5705_PLUS));
5167
5168         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
5169                 u32 val = tr32(GRC_VCPU_EXT_CTRL);
5170
5171                 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
5172                 return 0;
5173         }
5174         if (offset == RX_CPU_BASE) {
5175                 for (i = 0; i < 10000; i++) {
5176                         tw32(offset + CPU_STATE, 0xffffffff);
5177                         tw32(offset + CPU_MODE,  CPU_MODE_HALT);
5178                         if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
5179                                 break;
5180                 }
5181
5182                 tw32(offset + CPU_STATE, 0xffffffff);
5183                 tw32_f(offset + CPU_MODE,  CPU_MODE_HALT);
5184                 udelay(10);
5185         } else {
5186                 for (i = 0; i < 10000; i++) {
5187                         tw32(offset + CPU_STATE, 0xffffffff);
5188                         tw32(offset + CPU_MODE,  CPU_MODE_HALT);
5189                         if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
5190                                 break;
5191                 }
5192         }
5193
5194         if (i >= 10000) {
5195                 printk(KERN_ERR PFX "tg3_reset_cpu timed out for %s, "
5196                        "and %s CPU\n",
5197                        tp->dev->name,
5198                        (offset == RX_CPU_BASE ? "RX" : "TX"));
5199                 return -ENODEV;
5200         }
5201
5202         /* Clear firmware's nvram arbitration. */
5203         if (tp->tg3_flags & TG3_FLAG_NVRAM)
5204                 tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
5205         return 0;
5206 }
5207
5208 struct fw_info {
5209         unsigned int text_base;
5210         unsigned int text_len;
5211         const u32 *text_data;
5212         unsigned int rodata_base;
5213         unsigned int rodata_len;
5214         const u32 *rodata_data;
5215         unsigned int data_base;
5216         unsigned int data_len;
5217         const u32 *data_data;
5218 };
5219
5220 /* tp->lock is held. */
5221 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base, u32 cpu_scratch_base,
5222                                  int cpu_scratch_size, struct fw_info *info)
5223 {
5224         int err, lock_err, i;
5225         void (*write_op)(struct tg3 *, u32, u32);
5226
5227         if (cpu_base == TX_CPU_BASE &&
5228             (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5229                 printk(KERN_ERR PFX "tg3_load_firmware_cpu: Trying to load "
5230                        "TX cpu firmware on %s which is 5705.\n",
5231                        tp->dev->name);
5232                 return -EINVAL;
5233         }
5234
5235         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
5236                 write_op = tg3_write_mem;
5237         else
5238                 write_op = tg3_write_indirect_reg32;
5239
5240         /* It is possible that bootcode is still loading at this point.
5241          * Get the nvram lock first before halting the cpu.
5242          */
5243         lock_err = tg3_nvram_lock(tp);
5244         err = tg3_halt_cpu(tp, cpu_base);
5245         if (!lock_err)
5246                 tg3_nvram_unlock(tp);
5247         if (err)
5248                 goto out;
5249
5250         for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
5251                 write_op(tp, cpu_scratch_base + i, 0);
5252         tw32(cpu_base + CPU_STATE, 0xffffffff);
5253         tw32(cpu_base + CPU_MODE, tr32(cpu_base+CPU_MODE)|CPU_MODE_HALT);
5254         for (i = 0; i < (info->text_len / sizeof(u32)); i++)
5255                 write_op(tp, (cpu_scratch_base +
5256                               (info->text_base & 0xffff) +
5257                               (i * sizeof(u32))),
5258                          (info->text_data ?
5259                           info->text_data[i] : 0));
5260         for (i = 0; i < (info->rodata_len / sizeof(u32)); i++)
5261                 write_op(tp, (cpu_scratch_base +
5262                               (info->rodata_base & 0xffff) +
5263                               (i * sizeof(u32))),
5264                          (info->rodata_data ?
5265                           info->rodata_data[i] : 0));
5266         for (i = 0; i < (info->data_len / sizeof(u32)); i++)
5267                 write_op(tp, (cpu_scratch_base +
5268                               (info->data_base & 0xffff) +
5269                               (i * sizeof(u32))),
5270                          (info->data_data ?
5271                           info->data_data[i] : 0));
5272
5273         err = 0;
5274
5275 out:
5276         return err;
5277 }
5278
5279 /* tp->lock is held. */
5280 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
5281 {
5282         struct fw_info info;
5283         int err, i;
5284
5285         info.text_base = TG3_FW_TEXT_ADDR;
5286         info.text_len = TG3_FW_TEXT_LEN;
5287         info.text_data = &tg3FwText[0];
5288         info.rodata_base = TG3_FW_RODATA_ADDR;
5289         info.rodata_len = TG3_FW_RODATA_LEN;
5290         info.rodata_data = &tg3FwRodata[0];
5291         info.data_base = TG3_FW_DATA_ADDR;
5292         info.data_len = TG3_FW_DATA_LEN;
5293         info.data_data = NULL;
5294
5295         err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
5296                                     RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
5297                                     &info);
5298         if (err)
5299                 return err;
5300
5301         err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
5302                                     TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
5303                                     &info);
5304         if (err)
5305                 return err;
5306
5307         /* Now startup only the RX cpu. */
5308         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
5309         tw32_f(RX_CPU_BASE + CPU_PC,    TG3_FW_TEXT_ADDR);
5310
5311         for (i = 0; i < 5; i++) {
5312                 if (tr32(RX_CPU_BASE + CPU_PC) == TG3_FW_TEXT_ADDR)
5313                         break;
5314                 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
5315                 tw32(RX_CPU_BASE + CPU_MODE,  CPU_MODE_HALT);
5316                 tw32_f(RX_CPU_BASE + CPU_PC,    TG3_FW_TEXT_ADDR);
5317                 udelay(1000);
5318         }
5319         if (i >= 5) {
5320                 printk(KERN_ERR PFX "tg3_load_firmware fails for %s "
5321                        "to set RX CPU PC, is %08x should be %08x\n",
5322                        tp->dev->name, tr32(RX_CPU_BASE + CPU_PC),
5323                        TG3_FW_TEXT_ADDR);
5324                 return -ENODEV;
5325         }
5326         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
5327         tw32_f(RX_CPU_BASE + CPU_MODE,  0x00000000);
5328
5329         return 0;
5330 }
5331
5332 #if TG3_TSO_SUPPORT != 0
5333
5334 #define TG3_TSO_FW_RELEASE_MAJOR        0x1
5335 #define TG3_TSO_FW_RELASE_MINOR         0x6
5336 #define TG3_TSO_FW_RELEASE_FIX          0x0
5337 #define TG3_TSO_FW_START_ADDR           0x08000000
5338 #define TG3_TSO_FW_TEXT_ADDR            0x08000000
5339 #define TG3_TSO_FW_TEXT_LEN             0x1aa0
5340 #define TG3_TSO_FW_RODATA_ADDR          0x08001aa0
5341 #define TG3_TSO_FW_RODATA_LEN           0x60
5342 #define TG3_TSO_FW_DATA_ADDR            0x08001b20
5343 #define TG3_TSO_FW_DATA_LEN             0x30
5344 #define TG3_TSO_FW_SBSS_ADDR            0x08001b50
5345 #define TG3_TSO_FW_SBSS_LEN             0x2c
5346 #define TG3_TSO_FW_BSS_ADDR             0x08001b80
5347 #define TG3_TSO_FW_BSS_LEN              0x894
5348
5349 static const u32 tg3TsoFwText[(TG3_TSO_FW_TEXT_LEN / 4) + 1] = {
5350         0x0e000003, 0x00000000, 0x08001b24, 0x00000000, 0x10000003, 0x00000000,
5351         0x0000000d, 0x0000000d, 0x3c1d0800, 0x37bd4000, 0x03a0f021, 0x3c100800,
5352         0x26100000, 0x0e000010, 0x00000000, 0x0000000d, 0x27bdffe0, 0x3c04fefe,
5353         0xafbf0018, 0x0e0005d8, 0x34840002, 0x0e000668, 0x00000000, 0x3c030800,
5354         0x90631b68, 0x24020002, 0x3c040800, 0x24841aac, 0x14620003, 0x24050001,
5355         0x3c040800, 0x24841aa0, 0x24060006, 0x00003821, 0xafa00010, 0x0e00067c,
5356         0xafa00014, 0x8f625c50, 0x34420001, 0xaf625c50, 0x8f625c90, 0x34420001,
5357         0xaf625c90, 0x2402ffff, 0x0e000034, 0xaf625404, 0x8fbf0018, 0x03e00008,
5358         0x27bd0020, 0x00000000, 0x00000000, 0x00000000, 0x27bdffe0, 0xafbf001c,
5359         0xafb20018, 0xafb10014, 0x0e00005b, 0xafb00010, 0x24120002, 0x24110001,
5360         0x8f706820, 0x32020100, 0x10400003, 0x00000000, 0x0e0000bb, 0x00000000,
5361         0x8f706820, 0x32022000, 0x10400004, 0x32020001, 0x0e0001f0, 0x24040001,
5362         0x32020001, 0x10400003, 0x00000000, 0x0e0000a3, 0x00000000, 0x3c020800,
5363         0x90421b98, 0x14520003, 0x00000000, 0x0e0004c0, 0x00000000, 0x0a00003c,
5364         0xaf715028, 0x8fbf001c, 0x8fb20018, 0x8fb10014, 0x8fb00010, 0x03e00008,
5365         0x27bd0020, 0x27bdffe0, 0x3c040800, 0x24841ac0, 0x00002821, 0x00003021,
5366         0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014, 0x3c040800,
5367         0x248423d8, 0xa4800000, 0x3c010800, 0xa0201b98, 0x3c010800, 0xac201b9c,
5368         0x3c010800, 0xac201ba0, 0x3c010800, 0xac201ba4, 0x3c010800, 0xac201bac,
5369         0x3c010800, 0xac201bb8, 0x3c010800, 0xac201bbc, 0x8f624434, 0x3c010800,
5370         0xac221b88, 0x8f624438, 0x3c010800, 0xac221b8c, 0x8f624410, 0xac80f7a8,
5371         0x3c010800, 0xac201b84, 0x3c010800, 0xac2023e0, 0x3c010800, 0xac2023c8,
5372         0x3c010800, 0xac2023cc, 0x3c010800, 0xac202400, 0x3c010800, 0xac221b90,
5373         0x8f620068, 0x24030007, 0x00021702, 0x10430005, 0x00000000, 0x8f620068,
5374         0x00021702, 0x14400004, 0x24020001, 0x3c010800, 0x0a000097, 0xac20240c,
5375         0xac820034, 0x3c040800, 0x24841acc, 0x3c050800, 0x8ca5240c, 0x00003021,
5376         0x00003821, 0xafa00010, 0x0e00067c, 0xafa00014, 0x8fbf0018, 0x03e00008,
5377         0x27bd0020, 0x27bdffe0, 0x3c040800, 0x24841ad8, 0x00002821, 0x00003021,
5378         0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014, 0x0e00005b,
5379         0x00000000, 0x0e0000b4, 0x00002021, 0x8fbf0018, 0x03e00008, 0x27bd0020,
5380         0x24020001, 0x8f636820, 0x00821004, 0x00021027, 0x00621824, 0x03e00008,
5381         0xaf636820, 0x27bdffd0, 0xafbf002c, 0xafb60028, 0xafb50024, 0xafb40020,
5382         0xafb3001c, 0xafb20018, 0xafb10014, 0xafb00010, 0x8f675c5c, 0x3c030800,
5383         0x24631bbc, 0x8c620000, 0x14470005, 0x3c0200ff, 0x3c020800, 0x90421b98,
5384         0x14400119, 0x3c0200ff, 0x3442fff8, 0x00e28824, 0xac670000, 0x00111902,
5385         0x306300ff, 0x30e20003, 0x000211c0, 0x00622825, 0x00a04021, 0x00071602,
5386         0x3c030800, 0x90631b98, 0x3044000f, 0x14600036, 0x00804821, 0x24020001,
5387         0x3c010800, 0xa0221b98, 0x00051100, 0x00821025, 0x3c010800, 0xac201b9c,
5388         0x3c010800, 0xac201ba0, 0x3c010800, 0xac201ba4, 0x3c010800, 0xac201bac,
5389         0x3c010800, 0xac201bb8, 0x3c010800, 0xac201bb0, 0x3c010800, 0xac201bb4,
5390         0x3c010800, 0xa42223d8, 0x9622000c, 0x30437fff, 0x3c010800, 0xa4222410,
5391         0x30428000, 0x3c010800, 0xa4231bc6, 0x10400005, 0x24020001, 0x3c010800,
5392         0xac2223f4, 0x0a000102, 0x2406003e, 0x24060036, 0x3c010800, 0xac2023f4,
5393         0x9622000a, 0x3c030800, 0x94631bc6, 0x3c010800, 0xac2023f0, 0x3c010800,
5394         0xac2023f8, 0x00021302, 0x00021080, 0x00c21021, 0x00621821, 0x3c010800,
5395         0xa42223d0, 0x3c010800, 0x0a000115, 0xa4231b96, 0x9622000c, 0x3c010800,
5396         0xa42223ec, 0x3c040800, 0x24841b9c, 0x8c820000, 0x00021100, 0x3c010800,
5397         0x00220821, 0xac311bc8, 0x8c820000, 0x00021100, 0x3c010800, 0x00220821,
5398         0xac271bcc, 0x8c820000, 0x25030001, 0x306601ff, 0x00021100, 0x3c010800,
5399         0x00220821, 0xac261bd0, 0x8c820000, 0x00021100, 0x3c010800, 0x00220821,
5400         0xac291bd4, 0x96230008, 0x3c020800, 0x8c421bac, 0x00432821, 0x3c010800,
5401         0xac251bac, 0x9622000a, 0x30420004, 0x14400018, 0x00061100, 0x8f630c14,
5402         0x3063000f, 0x2c620002, 0x1440000b, 0x3c02c000, 0x8f630c14, 0x3c020800,
5403         0x8c421b40, 0x3063000f, 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002,
5404         0x1040fff7, 0x3c02c000, 0x00e21825, 0xaf635c5c, 0x8f625c50, 0x30420002,
5405         0x10400014, 0x00000000, 0x0a000147, 0x00000000, 0x3c030800, 0x8c631b80,
5406         0x3c040800, 0x94841b94, 0x01221025, 0x3c010800, 0xa42223da, 0x24020001,
5407         0x3c010800, 0xac221bb8, 0x24630001, 0x0085202a, 0x3c010800, 0x10800003,
5408         0xac231b80, 0x3c010800, 0xa4251b94, 0x3c060800, 0x24c61b9c, 0x8cc20000,
5409         0x24420001, 0xacc20000, 0x28420080, 0x14400005, 0x00000000, 0x0e000656,
5410         0x24040002, 0x0a0001e6, 0x00000000, 0x3c020800, 0x8c421bb8, 0x10400078,
5411         0x24020001, 0x3c050800, 0x90a51b98, 0x14a20072, 0x00000000, 0x3c150800,
5412         0x96b51b96, 0x3c040800, 0x8c841bac, 0x32a3ffff, 0x0083102a, 0x1440006c,
5413         0x00000000, 0x14830003, 0x00000000, 0x3c010800, 0xac2523f0, 0x1060005c,
5414         0x00009021, 0x24d60004, 0x0060a021, 0x24d30014, 0x8ec20000, 0x00028100,
5415         0x3c110800, 0x02308821, 0x0e000625, 0x8e311bc8, 0x00402821, 0x10a00054,
5416         0x00000000, 0x9628000a, 0x31020040, 0x10400005, 0x2407180c, 0x8e22000c,
5417         0x2407188c, 0x00021400, 0xaca20018, 0x3c030800, 0x00701821, 0x8c631bd0,
5418         0x3c020800, 0x00501021, 0x8c421bd4, 0x00031d00, 0x00021400, 0x00621825,
5419         0xaca30014, 0x8ec30004, 0x96220008, 0x00432023, 0x3242ffff, 0x3083ffff,
5420         0x00431021, 0x0282102a, 0x14400002, 0x02b23023, 0x00803021, 0x8e620000,
5421         0x30c4ffff, 0x00441021, 0xae620000, 0x8e220000, 0xaca20000, 0x8e220004,
5422         0x8e63fff4, 0x00431021, 0xaca20004, 0xa4a6000e, 0x8e62fff4, 0x00441021,
5423         0xae62fff4, 0x96230008, 0x0043102a, 0x14400005, 0x02469021, 0x8e62fff0,
5424         0xae60fff4, 0x24420001, 0xae62fff0, 0xaca00008, 0x3242ffff, 0x14540008,
5425         0x24020305, 0x31020080, 0x54400001, 0x34e70010, 0x24020905, 0xa4a2000c,
5426         0x0a0001cb, 0x34e70020, 0xa4a2000c, 0x3c020800, 0x8c4223f0, 0x10400003,
5427         0x3c024b65, 0x0a0001d3, 0x34427654, 0x3c02b49a, 0x344289ab, 0xaca2001c,
5428         0x30e2ffff, 0xaca20010, 0x0e0005a2, 0x00a02021, 0x3242ffff, 0x0054102b,
5429         0x1440ffa9, 0x00000000, 0x24020002, 0x3c010800, 0x0a0001e6, 0xa0221b98,
5430         0x8ec2083c, 0x24420001, 0x0a0001e6, 0xaec2083c, 0x0e0004c0, 0x00000000,
5431         0x8fbf002c, 0x8fb60028, 0x8fb50024, 0x8fb40020, 0x8fb3001c, 0x8fb20018,
5432         0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0030, 0x27bdffd0, 0xafbf0028,
5433         0xafb30024, 0xafb20020, 0xafb1001c, 0xafb00018, 0x8f725c9c, 0x3c0200ff,
5434         0x3442fff8, 0x3c070800, 0x24e71bb4, 0x02428824, 0x9623000e, 0x8ce20000,
5435         0x00431021, 0xace20000, 0x8e220010, 0x30420020, 0x14400011, 0x00809821,
5436         0x0e00063b, 0x02202021, 0x3c02c000, 0x02421825, 0xaf635c9c, 0x8f625c90,
5437         0x30420002, 0x1040011e, 0x00000000, 0xaf635c9c, 0x8f625c90, 0x30420002,
5438         0x10400119, 0x00000000, 0x0a00020d, 0x00000000, 0x8e240008, 0x8e230014,
5439         0x00041402, 0x000231c0, 0x00031502, 0x304201ff, 0x2442ffff, 0x3042007f,
5440         0x00031942, 0x30637800, 0x00021100, 0x24424000, 0x00624821, 0x9522000a,
5441         0x3084ffff, 0x30420008, 0x104000b0, 0x000429c0, 0x3c020800, 0x8c422400,
5442         0x14400024, 0x24c50008, 0x94c20014, 0x3c010800, 0xa42223d0, 0x8cc40010,
5443         0x00041402, 0x3c010800, 0xa42223d2, 0x3c010800, 0xa42423d4, 0x94c2000e,
5444         0x3083ffff, 0x00431023, 0x3c010800, 0xac222408, 0x94c2001a, 0x3c010800,
5445         0xac262400, 0x3c010800, 0xac322404, 0x3c010800, 0xac2223fc, 0x3c02c000,
5446         0x02421825, 0xaf635c9c, 0x8f625c90, 0x30420002, 0x104000e5, 0x00000000,
5447         0xaf635c9c, 0x8f625c90, 0x30420002, 0x104000e0, 0x00000000, 0x0a000246,
5448         0x00000000, 0x94c2000e, 0x3c030800, 0x946323d4, 0x00434023, 0x3103ffff,
5449         0x2c620008, 0x1040001c, 0x00000000, 0x94c20014, 0x24420028, 0x00a22821,
5450         0x00031042, 0x1840000b, 0x00002021, 0x24e60848, 0x00403821, 0x94a30000,
5451         0x8cc20000, 0x24840001, 0x00431021, 0xacc20000, 0x0087102a, 0x1440fff9,
5452         0x24a50002, 0x31020001, 0x1040001f, 0x3c024000, 0x3c040800, 0x248423fc,
5453         0xa0a00001, 0x94a30000, 0x8c820000, 0x00431021, 0x0a000285, 0xac820000,
5454         0x8f626800, 0x3c030010, 0x00431024, 0x10400009, 0x00000000, 0x94c2001a,
5455         0x3c030800, 0x8c6323fc, 0x00431021, 0x3c010800, 0xac2223fc, 0x0a000286,
5456         0x3c024000, 0x94c2001a, 0x94c4001c, 0x3c030800, 0x8c6323fc, 0x00441023,
5457         0x00621821, 0x3c010800, 0xac2323fc, 0x3c024000, 0x02421825, 0xaf635c9c,
5458         0x8f625c90, 0x30420002, 0x1440fffc, 0x00000000, 0x9522000a, 0x30420010,
5459         0x1040009b, 0x00000000, 0x3c030800, 0x946323d4, 0x3c070800, 0x24e72400,
5460         0x8ce40000, 0x8f626800, 0x24630030, 0x00832821, 0x3c030010, 0x00431024,
5461         0x1440000a, 0x00000000, 0x94a20004, 0x3c040800, 0x8c842408, 0x3c030800,
5462         0x8c6323fc, 0x00441023, 0x00621821, 0x3c010800, 0xac2323fc, 0x3c040800,
5463         0x8c8423fc, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402, 0x00822021,
5464         0x00041027, 0xa4a20006, 0x3c030800, 0x8c632404, 0x3c0200ff, 0x3442fff8,
5465         0x00628824, 0x96220008, 0x24050001, 0x24034000, 0x000231c0, 0x00801021,
5466         0xa4c2001a, 0xa4c0001c, 0xace00000, 0x3c010800, 0xac251b60, 0xaf635cb8,
5467         0x8f625cb0, 0x30420002, 0x10400003, 0x00000000, 0x3c010800, 0xac201b60,
5468         0x8e220008, 0xaf625cb8, 0x8f625cb0, 0x30420002, 0x10400003, 0x00000000,
5469         0x3c010800, 0xac201b60, 0x3c020800, 0x8c421b60, 0x1040ffec, 0x00000000,
5470         0x3c040800, 0x0e00063b, 0x8c842404, 0x0a00032a, 0x00000000, 0x3c030800,
5471         0x90631b98, 0x24020002, 0x14620003, 0x3c034b65, 0x0a0002e1, 0x00008021,
5472         0x8e22001c, 0x34637654, 0x10430002, 0x24100002, 0x24100001, 0x00c02021,
5473         0x0e000350, 0x02003021, 0x24020003, 0x3c010800, 0xa0221b98, 0x24020002,
5474         0x1202000a, 0x24020001, 0x3c030800, 0x8c6323f0, 0x10620006, 0x00000000,
5475         0x3c020800, 0x944223d8, 0x00021400, 0x0a00031f, 0xae220014, 0x3c040800,
5476         0x248423da, 0x94820000, 0x00021400, 0xae220014, 0x3c020800, 0x8c421bbc,
5477         0x3c03c000, 0x3c010800, 0xa0201b98, 0x00431025, 0xaf625c5c, 0x8f625c50,
5478         0x30420002, 0x10400009, 0x00000000, 0x2484f7e2, 0x8c820000, 0x00431025,
5479         0xaf625c5c, 0x8f625c50, 0x30420002, 0x1440fffa, 0x00000000, 0x3c020800,
5480         0x24421b84, 0x8c430000, 0x24630001, 0xac430000, 0x8f630c14, 0x3063000f,
5481         0x2c620002, 0x1440000c, 0x3c024000, 0x8f630c14, 0x3c020800, 0x8c421b40,
5482         0x3063000f, 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002, 0x1040fff7,
5483         0x00000000, 0x3c024000, 0x02421825, 0xaf635c9c, 0x8f625c90, 0x30420002,
5484         0x1440fffc, 0x00000000, 0x12600003, 0x00000000, 0x0e0004c0, 0x00000000,
5485         0x8fbf0028, 0x8fb30024, 0x8fb20020, 0x8fb1001c, 0x8fb00018, 0x03e00008,
5486         0x27bd0030, 0x8f634450, 0x3c040800, 0x24841b88, 0x8c820000, 0x00031c02,
5487         0x0043102b, 0x14400007, 0x3c038000, 0x8c840004, 0x8f624450, 0x00021c02,
5488         0x0083102b, 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024,
5489         0x1440fffd, 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3c024000,
5490         0x00822025, 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00000000,
5491         0x03e00008, 0x00000000, 0x27bdffe0, 0x00805821, 0x14c00011, 0x256e0008,
5492         0x3c020800, 0x8c4223f4, 0x10400007, 0x24020016, 0x3c010800, 0xa42223d2,
5493         0x2402002a, 0x3c010800, 0x0a000364, 0xa42223d4, 0x8d670010, 0x00071402,
5494         0x3c010800, 0xa42223d2, 0x3c010800, 0xa42723d4, 0x3c040800, 0x948423d4,
5495         0x3c030800, 0x946323d2, 0x95cf0006, 0x3c020800, 0x944223d0, 0x00832023,
5496         0x01e2c023, 0x3065ffff, 0x24a20028, 0x01c24821, 0x3082ffff, 0x14c0001a,
5497         0x01226021, 0x9582000c, 0x3042003f, 0x3c010800, 0xa42223d6, 0x95820004,
5498         0x95830006, 0x3c010800, 0xac2023e4, 0x3c010800, 0xac2023e8, 0x00021400,
5499         0x00431025, 0x3c010800, 0xac221bc0, 0x95220004, 0x3c010800, 0xa4221bc4,
5500         0x95230002, 0x01e51023, 0x0043102a, 0x10400010, 0x24020001, 0x3c010800,
5501         0x0a000398, 0xac2223f8, 0x3c030800, 0x8c6323e8, 0x3c020800, 0x94421bc4,
5502         0x00431021, 0xa5220004, 0x3c020800, 0x94421bc0, 0xa5820004, 0x3c020800,
5503         0x8c421bc0, 0xa5820006, 0x3c020800, 0x8c4223f0, 0x3c0d0800, 0x8dad23e4,
5504         0x3c0a0800, 0x144000e5, 0x8d4a23e8, 0x3c020800, 0x94421bc4, 0x004a1821,
5505         0x3063ffff, 0x0062182b, 0x24020002, 0x10c2000d, 0x01435023, 0x3c020800,
5506         0x944223d6, 0x30420009, 0x10400008, 0x00000000, 0x9582000c, 0x3042fff6,
5507         0xa582000c, 0x3c020800, 0x944223d6, 0x30420009, 0x01a26823, 0x3c020800,
5508         0x8c4223f8, 0x1040004a, 0x01203821, 0x3c020800, 0x944223d2, 0x00004021,
5509         0xa520000a, 0x01e21023, 0xa5220002, 0x3082ffff, 0x00021042, 0x18400008,
5510         0x00003021, 0x00401821, 0x94e20000, 0x25080001, 0x00c23021, 0x0103102a,
5511         0x1440fffb, 0x24e70002, 0x00061c02, 0x30c2ffff, 0x00623021, 0x00061402,
5512         0x00c23021, 0x00c02821, 0x00061027, 0xa522000a, 0x00003021, 0x2527000c,
5513         0x00004021, 0x94e20000, 0x25080001, 0x00c23021, 0x2d020004, 0x1440fffb,
5514         0x24e70002, 0x95220002, 0x00004021, 0x91230009, 0x00442023, 0x01803821,
5515         0x3082ffff, 0xa4e00010, 0x00621821, 0x00021042, 0x18400010, 0x00c33021,
5516         0x00404821, 0x94e20000, 0x24e70002, 0x00c23021, 0x30e2007f, 0x14400006,
5517         0x25080001, 0x8d630000, 0x3c02007f, 0x3442ff80, 0x00625824, 0x25670008,
5518         0x0109102a, 0x1440fff3, 0x00000000, 0x30820001, 0x10400005, 0x00061c02,
5519         0xa0e00001, 0x94e20000, 0x00c23021, 0x00061c02, 0x30c2ffff, 0x00623021,
5520         0x00061402, 0x00c23021, 0x0a00047d, 0x30c6ffff, 0x24020002, 0x14c20081,
5521         0x00000000, 0x3c020800, 0x8c42240c, 0x14400007, 0x00000000, 0x3c020800,
5522         0x944223d2, 0x95230002, 0x01e21023, 0x10620077, 0x00000000, 0x3c020800,
5523         0x944223d2, 0x01e21023, 0xa5220002, 0x3c020800, 0x8c42240c, 0x1040001a,
5524         0x31e3ffff, 0x8dc70010, 0x3c020800, 0x94421b96, 0x00e04021, 0x00072c02,
5525         0x00aa2021, 0x00431023, 0x00823823, 0x00072402, 0x30e2ffff, 0x00823821,
5526         0x00071027, 0xa522000a, 0x3102ffff, 0x3c040800, 0x948423d4, 0x00453023,
5527         0x00e02821, 0x00641823, 0x006d1821, 0x00c33021, 0x00061c02, 0x30c2ffff,
5528         0x0a00047d, 0x00623021, 0x01203821, 0x00004021, 0x3082ffff, 0x00021042,
5529         0x18400008, 0x00003021, 0x00401821, 0x94e20000, 0x25080001, 0x00c23021,
5530         0x0103102a, 0x1440fffb, 0x24e70002, 0x00061c02, 0x30c2ffff, 0x00623021,
5531         0x00061402, 0x00c23021, 0x00c02821, 0x00061027, 0xa522000a, 0x00003021,
5532         0x2527000c, 0x00004021, 0x94e20000, 0x25080001, 0x00c23021, 0x2d020004,
5533         0x1440fffb, 0x24e70002, 0x95220002, 0x00004021, 0x91230009, 0x00442023,
5534         0x01803821, 0x3082ffff, 0xa4e00010, 0x3c040800, 0x948423d4, 0x00621821,
5535         0x00c33021, 0x00061c02, 0x30c2ffff, 0x00623021, 0x00061c02, 0x3c020800,
5536         0x944223d0, 0x00c34821, 0x00441023, 0x00021fc2, 0x00431021, 0x00021043,
5537         0x18400010, 0x00003021, 0x00402021, 0x94e20000, 0x24e70002, 0x00c23021,
5538         0x30e2007f, 0x14400006, 0x25080001, 0x8d630000, 0x3c02007f, 0x3442ff80,
5539         0x00625824, 0x25670008, 0x0104102a, 0x1440fff3, 0x00000000, 0x3c020800,
5540         0x944223ec, 0x00c23021, 0x3122ffff, 0x00c23021, 0x00061c02, 0x30c2ffff,
5541         0x00623021, 0x00061402, 0x00c23021, 0x00c04021, 0x00061027, 0xa5820010,
5542         0xadc00014, 0x0a00049d, 0xadc00000, 0x8dc70010, 0x00e04021, 0x11400007,
5543         0x00072c02, 0x00aa3021, 0x00061402, 0x30c3ffff, 0x00433021, 0x00061402,
5544         0x00c22821, 0x00051027, 0xa522000a, 0x3c030800, 0x946323d4, 0x3102ffff,
5545         0x01e21021, 0x00433023, 0x00cd3021, 0x00061c02, 0x30c2ffff, 0x00623021,
5546         0x00061402, 0x00c23021, 0x00c04021, 0x00061027, 0xa5820010, 0x3102ffff,
5547         0x00051c00, 0x00431025, 0xadc20010, 0x3c020800, 0x8c4223f4, 0x10400005,
5548         0x2de205eb, 0x14400002, 0x25e2fff2, 0x34028870, 0xa5c20034, 0x3c030800,
5549         0x246323e8, 0x8c620000, 0x24420001, 0xac620000, 0x3c040800, 0x8c8423e4,
5550         0x3c020800, 0x8c421bc0, 0x3303ffff, 0x00832021, 0x00431821, 0x0062102b,
5551         0x3c010800, 0xac2423e4, 0x10400003, 0x2482ffff, 0x3c010800, 0xac2223e4,
5552         0x3c010800, 0xac231bc0, 0x03e00008, 0x27bd0020, 0x27bdffb8, 0x3c050800,
5553         0x24a51b96, 0xafbf0044, 0xafbe0040, 0xafb7003c, 0xafb60038, 0xafb50034,
5554         0xafb40030, 0xafb3002c, 0xafb20028, 0xafb10024, 0xafb00020, 0x94a90000,
5555         0x3c020800, 0x944223d0, 0x3c030800, 0x8c631bb0, 0x3c040800, 0x8c841bac,
5556         0x01221023, 0x0064182a, 0xa7a9001e, 0x106000be, 0xa7a20016, 0x24be0022,
5557         0x97b6001e, 0x24b3001a, 0x24b70016, 0x8fc20000, 0x14400008, 0x00000000,
5558         0x8fc2fff8, 0x97a30016, 0x8fc4fff4, 0x00431021, 0x0082202a, 0x148000b0,
5559         0x00000000, 0x97d50818, 0x32a2ffff, 0x104000a3, 0x00009021, 0x0040a021,
5560         0x00008821, 0x0e000625, 0x00000000, 0x00403021, 0x14c00007, 0x00000000,
5561         0x3c020800, 0x8c4223dc, 0x24420001, 0x3c010800, 0x0a000596, 0xac2223dc,
5562         0x3c100800, 0x02118021, 0x8e101bc8, 0x9608000a, 0x31020040, 0x10400005,
5563         0x2407180c, 0x8e02000c, 0x2407188c, 0x00021400, 0xacc20018, 0x31020080,
5564         0x54400001, 0x34e70010, 0x3c020800, 0x00511021, 0x8c421bd0, 0x3c030800,
5565         0x00711821, 0x8c631bd4, 0x00021500, 0x00031c00, 0x00431025, 0xacc20014,
5566         0x96040008, 0x3242ffff, 0x00821021, 0x0282102a, 0x14400002, 0x02b22823,
5567         0x00802821, 0x8e020000, 0x02459021, 0xacc20000, 0x8e020004, 0x00c02021,
5568         0x26310010, 0xac820004, 0x30e2ffff, 0xac800008, 0xa485000e, 0xac820010,
5569         0x24020305, 0x0e0005a2, 0xa482000c, 0x3242ffff, 0x0054102b, 0x1440ffc5,
5570         0x3242ffff, 0x0a00058e, 0x00000000, 0x8e620000, 0x8e63fffc, 0x0043102a,
5571         0x10400067, 0x00000000, 0x8e62fff0, 0x00028900, 0x3c100800, 0x02118021,
5572         0x0e000625, 0x8e101bc8, 0x00403021, 0x14c00005, 0x00000000, 0x8e62082c,
5573         0x24420001, 0x0a000596, 0xae62082c, 0x9608000a, 0x31020040, 0x10400005,
5574         0x2407180c, 0x8e02000c, 0x2407188c, 0x00021400, 0xacc20018, 0x3c020800,
5575         0x00511021, 0x8c421bd0, 0x3c030800, 0x00711821, 0x8c631bd4, 0x00021500,
5576         0x00031c00, 0x00431025, 0xacc20014, 0x8e63fff4, 0x96020008, 0x00432023,
5577         0x3242ffff, 0x3083ffff, 0x00431021, 0x02c2102a, 0x10400003, 0x00802821,
5578         0x97a9001e, 0x01322823, 0x8e620000, 0x30a4ffff, 0x00441021, 0xae620000,
5579         0xa4c5000e, 0x8e020000, 0xacc20000, 0x8e020004, 0x8e63fff4, 0x00431021,
5580         0xacc20004, 0x8e63fff4, 0x96020008, 0x00641821, 0x0062102a, 0x14400006,
5581         0x02459021, 0x8e62fff0, 0xae60fff4, 0x24420001, 0x0a000571, 0xae62fff0,
5582         0xae63fff4, 0xacc00008, 0x3242ffff, 0x10560003, 0x31020004, 0x10400006,
5583         0x24020305, 0x31020080, 0x54400001, 0x34e70010, 0x34e70020, 0x24020905,
5584         0xa4c2000c, 0x8ee30000, 0x8ee20004, 0x14620007, 0x3c02b49a, 0x8ee20860,
5585         0x54400001, 0x34e70400, 0x3c024b65, 0x0a000588, 0x34427654, 0x344289ab,
5586         0xacc2001c, 0x30e2ffff, 0xacc20010, 0x0e0005a2, 0x00c02021, 0x3242ffff,
5587         0x0056102b, 0x1440ff9b, 0x00000000, 0x8e620000, 0x8e63fffc, 0x0043102a,
5588         0x1440ff48, 0x00000000, 0x8fbf0044, 0x8fbe0040, 0x8fb7003c, 0x8fb60038,
5589         0x8fb50034, 0x8fb40030, 0x8fb3002c, 0x8fb20028, 0x8fb10024, 0x8fb00020,
5590         0x03e00008, 0x27bd0048, 0x27bdffe8, 0xafbf0014, 0xafb00010, 0x8f624450,
5591         0x8f634410, 0x0a0005b1, 0x00808021, 0x8f626820, 0x30422000, 0x10400003,
5592         0x00000000, 0x0e0001f0, 0x00002021, 0x8f624450, 0x8f634410, 0x3042ffff,
5593         0x0043102b, 0x1440fff5, 0x00000000, 0x8f630c14, 0x3063000f, 0x2c620002,
5594         0x1440000b, 0x00000000, 0x8f630c14, 0x3c020800, 0x8c421b40, 0x3063000f,
5595         0x24420001, 0x3c010800, 0xac221b40, 0x2c620002, 0x1040fff7, 0x00000000,
5596         0xaf705c18, 0x8f625c10, 0x30420002, 0x10400009, 0x00000000, 0x8f626820,
5597         0x30422000, 0x1040fff8, 0x00000000, 0x0e0001f0, 0x00002021, 0x0a0005c4,
5598         0x00000000, 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x00000000,
5599         0x00000000, 0x00000000, 0x27bdffe8, 0x3c1bc000, 0xafbf0014, 0xafb00010,
5600         0xaf60680c, 0x8f626804, 0x34420082, 0xaf626804, 0x8f634000, 0x24020b50,
5601         0x3c010800, 0xac221b54, 0x24020b78, 0x3c010800, 0xac221b64, 0x34630002,
5602         0xaf634000, 0x0e000605, 0x00808021, 0x3c010800, 0xa0221b68, 0x304200ff,
5603         0x24030002, 0x14430005, 0x00000000, 0x3c020800, 0x8c421b54, 0x0a0005f8,
5604         0xac5000c0, 0x3c020800, 0x8c421b54, 0xac5000bc, 0x8f624434, 0x8f634438,
5605         0x8f644410, 0x3c010800, 0xac221b5c, 0x3c010800, 0xac231b6c, 0x3c010800,
5606         0xac241b58, 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x3c040800,
5607         0x8c870000, 0x3c03aa55, 0x3463aa55, 0x3c06c003, 0xac830000, 0x8cc20000,
5608         0x14430007, 0x24050002, 0x3c0355aa, 0x346355aa, 0xac830000, 0x8cc20000,
5609         0x50430001, 0x24050001, 0x3c020800, 0xac470000, 0x03e00008, 0x00a01021,
5610         0x27bdfff8, 0x18800009, 0x00002821, 0x8f63680c, 0x8f62680c, 0x1043fffe,
5611         0x00000000, 0x24a50001, 0x00a4102a, 0x1440fff9, 0x00000000, 0x03e00008,
5612         0x27bd0008, 0x8f634450, 0x3c020800, 0x8c421b5c, 0x00031c02, 0x0043102b,
5613         0x14400008, 0x3c038000, 0x3c040800, 0x8c841b6c, 0x8f624450, 0x00021c02,
5614         0x0083102b, 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024,
5615         0x1440fffd, 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3082ffff,
5616         0x2442e000, 0x2c422001, 0x14400003, 0x3c024000, 0x0a000648, 0x2402ffff,
5617         0x00822025, 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00001021,
5618         0x03e00008, 0x00000000, 0x8f624450, 0x3c030800, 0x8c631b58, 0x0a000651,
5619         0x3042ffff, 0x8f624450, 0x3042ffff, 0x0043102b, 0x1440fffc, 0x00000000,
5620         0x03e00008, 0x00000000, 0x27bdffe0, 0x00802821, 0x3c040800, 0x24841af0,
5621         0x00003021, 0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014,
5622         0x0a000660, 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x00000000,
5623         0x00000000, 0x00000000, 0x3c020800, 0x34423000, 0x3c030800, 0x34633000,
5624         0x3c040800, 0x348437ff, 0x3c010800, 0xac221b74, 0x24020040, 0x3c010800,
5625         0xac221b78, 0x3c010800, 0xac201b70, 0xac600000, 0x24630004, 0x0083102b,
5626         0x5040fffd, 0xac600000, 0x03e00008, 0x00000000, 0x00804821, 0x8faa0010,
5627         0x3c020800, 0x8c421b70, 0x3c040800, 0x8c841b78, 0x8fab0014, 0x24430001,
5628         0x0044102b, 0x3c010800, 0xac231b70, 0x14400003, 0x00004021, 0x3c010800,
5629         0xac201b70, 0x3c020800, 0x8c421b70, 0x3c030800, 0x8c631b74, 0x91240000,
5630         0x00021140, 0x00431021, 0x00481021, 0x25080001, 0xa0440000, 0x29020008,
5631         0x1440fff4, 0x25290001, 0x3c020800, 0x8c421b70, 0x3c030800, 0x8c631b74,
5632         0x8f64680c, 0x00021140, 0x00431021, 0xac440008, 0xac45000c, 0xac460010,
5633         0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c, 0x00000000, 0x00000000,
5634 };
5635
5636 static const u32 tg3TsoFwRodata[] = {
5637         0x4d61696e, 0x43707542, 0x00000000, 0x4d61696e, 0x43707541, 0x00000000,
5638         0x00000000, 0x00000000, 0x73746b6f, 0x66666c64, 0x496e0000, 0x73746b6f,
5639         0x66662a2a, 0x00000000, 0x53774576, 0x656e7430, 0x00000000, 0x00000000,
5640         0x00000000, 0x00000000, 0x66617461, 0x6c457272, 0x00000000, 0x00000000,
5641         0x00000000,
5642 };
5643
5644 static const u32 tg3TsoFwData[] = {
5645         0x00000000, 0x73746b6f, 0x66666c64, 0x5f76312e, 0x362e3000, 0x00000000,
5646         0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
5647         0x00000000,
5648 };
5649
5650 /* 5705 needs a special version of the TSO firmware.  */
5651 #define TG3_TSO5_FW_RELEASE_MAJOR       0x1
5652 #define TG3_TSO5_FW_RELASE_MINOR        0x2
5653 #define TG3_TSO5_FW_RELEASE_FIX         0x0
5654 #define TG3_TSO5_FW_START_ADDR          0x00010000
5655 #define TG3_TSO5_FW_TEXT_ADDR           0x00010000
5656 #define TG3_TSO5_FW_TEXT_LEN            0xe90
5657 #define TG3_TSO5_FW_RODATA_ADDR         0x00010e90
5658 #define TG3_TSO5_FW_RODATA_LEN          0x50
5659 #define TG3_TSO5_FW_DATA_ADDR           0x00010f00
5660 #define TG3_TSO5_FW_DATA_LEN            0x20
5661 #define TG3_TSO5_FW_SBSS_ADDR           0x00010f20
5662 #define TG3_TSO5_FW_SBSS_LEN            0x28
5663 #define TG3_TSO5_FW_BSS_ADDR            0x00010f50
5664 #define TG3_TSO5_FW_BSS_LEN             0x88
5665
5666 static const u32 tg3Tso5FwText[(TG3_TSO5_FW_TEXT_LEN / 4) + 1] = {
5667         0x0c004003, 0x00000000, 0x00010f04, 0x00000000, 0x10000003, 0x00000000,
5668         0x0000000d, 0x0000000d, 0x3c1d0001, 0x37bde000, 0x03a0f021, 0x3c100001,
5669         0x26100000, 0x0c004010, 0x00000000, 0x0000000d, 0x27bdffe0, 0x3c04fefe,
5670         0xafbf0018, 0x0c0042e8, 0x34840002, 0x0c004364, 0x00000000, 0x3c030001,
5671         0x90630f34, 0x24020002, 0x3c040001, 0x24840e9c, 0x14620003, 0x24050001,
5672         0x3c040001, 0x24840e90, 0x24060002, 0x00003821, 0xafa00010, 0x0c004378,
5673         0xafa00014, 0x0c00402c, 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020,
5674         0x00000000, 0x00000000, 0x27bdffe0, 0xafbf001c, 0xafb20018, 0xafb10014,
5675         0x0c0042d4, 0xafb00010, 0x3c128000, 0x24110001, 0x8f706810, 0x32020400,
5676         0x10400007, 0x00000000, 0x8f641008, 0x00921024, 0x14400003, 0x00000000,
5677         0x0c004064, 0x00000000, 0x3c020001, 0x90420f56, 0x10510003, 0x32020200,
5678         0x1040fff1, 0x00000000, 0x0c0041b4, 0x00000000, 0x08004034, 0x00000000,
5679         0x8fbf001c, 0x8fb20018, 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0020,
5680         0x27bdffe0, 0x3c040001, 0x24840eb0, 0x00002821, 0x00003021, 0x00003821,
5681         0xafbf0018, 0xafa00010, 0x0c004378, 0xafa00014, 0x0000d021, 0x24020130,
5682         0xaf625000, 0x3c010001, 0xa4200f50, 0x3c010001, 0xa0200f57, 0x8fbf0018,
5683         0x03e00008, 0x27bd0020, 0x00000000, 0x00000000, 0x3c030001, 0x24630f60,
5684         0x90620000, 0x27bdfff0, 0x14400003, 0x0080c021, 0x08004073, 0x00004821,
5685         0x3c022000, 0x03021024, 0x10400003, 0x24090002, 0x08004073, 0xa0600000,
5686         0x24090001, 0x00181040, 0x30431f80, 0x346f8008, 0x1520004b, 0x25eb0028,
5687         0x3c040001, 0x00832021, 0x8c848010, 0x3c050001, 0x24a50f7a, 0x00041402,
5688         0xa0a20000, 0x3c010001, 0xa0240f7b, 0x3c020001, 0x00431021, 0x94428014,
5689         0x3c010001, 0xa0220f7c, 0x3c0c0001, 0x01836021, 0x8d8c8018, 0x304200ff,
5690         0x24420008, 0x000220c3, 0x24020001, 0x3c010001, 0xa0220f60, 0x0124102b,
5691         0x1040000c, 0x00003821, 0x24a6000e, 0x01602821, 0x8ca20000, 0x8ca30004,
5692         0x24a50008, 0x24e70001, 0xacc20000, 0xacc30004, 0x00e4102b, 0x1440fff8,
5693         0x24c60008, 0x00003821, 0x3c080001, 0x25080f7b, 0x91060000, 0x3c020001,
5694         0x90420f7c, 0x2503000d, 0x00c32821, 0x00461023, 0x00021fc2, 0x00431021,
5695         0x00021043, 0x1840000c, 0x00002021, 0x91020001, 0x00461023, 0x00021fc2,
5696         0x00431021, 0x00021843, 0x94a20000, 0x24e70001, 0x00822021, 0x00e3102a,
5697         0x1440fffb, 0x24a50002, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402,
5698         0x00822021, 0x3c02ffff, 0x01821024, 0x3083ffff, 0x00431025, 0x3c010001,
5699         0x080040fa, 0xac220f80, 0x3c050001, 0x24a50f7c, 0x90a20000, 0x3c0c0001,
5700         0x01836021, 0x8d8c8018, 0x000220c2, 0x1080000e, 0x00003821, 0x01603021,
5701         0x24a5000c, 0x8ca20000, 0x8ca30004, 0x24a50008, 0x24e70001, 0xacc20000,
5702         0xacc30004, 0x00e4102b, 0x1440fff8, 0x24c60008, 0x3c050001, 0x24a50f7c,
5703         0x90a20000, 0x30430007, 0x24020004, 0x10620011, 0x28620005, 0x10400005,
5704         0x24020002, 0x10620008, 0x000710c0, 0x080040fa, 0x00000000, 0x24020006,
5705         0x1062000e, 0x000710c0, 0x080040fa, 0x00000000, 0x00a21821, 0x9463000c,
5706         0x004b1021, 0x080040fa, 0xa4430000, 0x000710c0, 0x00a21821, 0x8c63000c,
5707         0x004b1021, 0x080040fa, 0xac430000, 0x00a21821, 0x8c63000c, 0x004b2021,
5708         0x00a21021, 0xac830000, 0x94420010, 0xa4820004, 0x95e70006, 0x3c020001,
5709         0x90420f7c, 0x3c030001, 0x90630f7a, 0x00e2c823, 0x3c020001, 0x90420f7b,
5710         0x24630028, 0x01e34021, 0x24420028, 0x15200012, 0x01e23021, 0x94c2000c,
5711         0x3c010001, 0xa4220f78, 0x94c20004, 0x94c30006, 0x3c010001, 0xa4200f76,
5712         0x3c010001, 0xa4200f72, 0x00021400, 0x00431025, 0x3c010001, 0xac220f6c,
5713         0x95020004, 0x3c010001, 0x08004124, 0xa4220f70, 0x3c020001, 0x94420f70,
5714         0x3c030001, 0x94630f72, 0x00431021, 0xa5020004, 0x3c020001, 0x94420f6c,
5715         0xa4c20004, 0x3c020001, 0x8c420f6c, 0xa4c20006, 0x3c040001, 0x94840f72,
5716         0x3c020001, 0x94420f70, 0x3c0a0001, 0x954a0f76, 0x00441821, 0x3063ffff,
5717         0x0062182a, 0x24020002, 0x1122000b, 0x00832023, 0x3c030001, 0x94630f78,
5718         0x30620009, 0x10400006, 0x3062fff6, 0xa4c2000c, 0x3c020001, 0x94420f78,
5719         0x30420009, 0x01425023, 0x24020001, 0x1122001b, 0x29220002, 0x50400005,
5720         0x24020002, 0x11200007, 0x31a2ffff, 0x08004197, 0x00000000, 0x1122001d,
5721         0x24020016, 0x08004197, 0x31a2ffff, 0x3c0e0001, 0x95ce0f80, 0x10800005,
5722         0x01806821, 0x01c42021, 0x00041c02, 0x3082ffff, 0x00627021, 0x000e1027,
5723         0xa502000a, 0x3c030001, 0x90630f7b, 0x31a2ffff, 0x00e21021, 0x0800418d,
5724         0x00432023, 0x3c020001, 0x94420f80, 0x00442021, 0x00041c02, 0x3082ffff,
5725         0x00622021, 0x00807021, 0x00041027, 0x08004185, 0xa502000a, 0x3c050001,
5726         0x24a50f7a, 0x90a30000, 0x14620002, 0x24e2fff2, 0xa5e20034, 0x90a20000,
5727         0x00e21023, 0xa5020002, 0x3c030001, 0x94630f80, 0x3c020001, 0x94420f5a,
5728         0x30e5ffff, 0x00641821, 0x00451023, 0x00622023, 0x00041c02, 0x3082ffff,
5729         0x00622021, 0x00041027, 0xa502000a, 0x3c030001, 0x90630f7c, 0x24620001,
5730         0x14a20005, 0x00807021, 0x01631021, 0x90420000, 0x08004185, 0x00026200,
5731         0x24620002, 0x14a20003, 0x306200fe, 0x004b1021, 0x944c0000, 0x3c020001,
5732         0x94420f82, 0x3183ffff, 0x3c040001, 0x90840f7b, 0x00431021, 0x00e21021,
5733         0x00442023, 0x008a2021, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402,
5734         0x00822021, 0x00806821, 0x00041027, 0xa4c20010, 0x31a2ffff, 0x000e1c00,
5735         0x00431025, 0x3c040001, 0x24840f72, 0xade20010, 0x94820000, 0x3c050001,
5736         0x94a50f76, 0x3c030001, 0x8c630f6c, 0x24420001, 0x00b92821, 0xa4820000,
5737         0x3322ffff, 0x00622021, 0x0083182b, 0x3c010001, 0xa4250f76, 0x10600003,
5738         0x24a2ffff, 0x3c010001, 0xa4220f76, 0x3c024000, 0x03021025, 0x3c010001,
5739         0xac240f6c, 0xaf621008, 0x03e00008, 0x27bd0010, 0x3c030001, 0x90630f56,
5740         0x27bdffe8, 0x24020001, 0xafbf0014, 0x10620026, 0xafb00010, 0x8f620cf4,
5741         0x2442ffff, 0x3042007f, 0x00021100, 0x8c434000, 0x3c010001, 0xac230f64,
5742         0x8c434008, 0x24444000, 0x8c5c4004, 0x30620040, 0x14400002, 0x24020088,
5743         0x24020008, 0x3c010001, 0xa4220f68, 0x30620004, 0x10400005, 0x24020001,
5744         0x3c010001, 0xa0220f57, 0x080041d5, 0x00031402, 0x3c010001, 0xa0200f57,
5745         0x00031402, 0x3c010001, 0xa4220f54, 0x9483000c, 0x24020001, 0x3c010001,
5746         0xa4200f50, 0x3c010001, 0xa0220f56, 0x3c010001, 0xa4230f62, 0x24020001,
5747         0x1342001e, 0x00000000, 0x13400005, 0x24020003, 0x13420067, 0x00000000,
5748         0x080042cf, 0x00000000, 0x3c020001, 0x94420f62, 0x241a0001, 0x3c010001,
5749         0xa4200f5e, 0x3c010001, 0xa4200f52, 0x304407ff, 0x00021bc2, 0x00031823,
5750         0x3063003e, 0x34630036, 0x00021242, 0x3042003c, 0x00621821, 0x3c010001,
5751         0xa4240f58, 0x00832021, 0x24630030, 0x3c010001, 0xa4240f5a, 0x3c010001,
5752         0xa4230f5c, 0x3c060001, 0x24c60f52, 0x94c50000, 0x94c30002, 0x3c040001,
5753         0x94840f5a, 0x00651021, 0x0044102a, 0x10400013, 0x3c108000, 0x00a31021,
5754         0xa4c20000, 0x3c02a000, 0xaf620cf4, 0x3c010001, 0xa0200f56, 0x8f641008,
5755         0x00901024, 0x14400003, 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4,
5756         0x00501024, 0x104000b7, 0x00000000, 0x0800420f, 0x00000000, 0x3c030001,
5757         0x94630f50, 0x00851023, 0xa4c40000, 0x00621821, 0x3042ffff, 0x3c010001,
5758         0xa4230f50, 0xaf620ce8, 0x3c020001, 0x94420f68, 0x34420024, 0xaf620cec,
5759         0x94c30002, 0x3c020001, 0x94420f50, 0x14620012, 0x3c028000, 0x3c108000,
5760         0x3c02a000, 0xaf620cf4, 0x3c010001, 0xa0200f56, 0x8f641008, 0x00901024,
5761         0x14400003, 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4, 0x00501024,
5762         0x1440fff7, 0x00000000, 0x080042cf, 0x241a0003, 0xaf620cf4, 0x3c108000,
5763         0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064, 0x00000000,
5764         0x8f620cf4, 0x00501024, 0x1440fff7, 0x00000000, 0x080042cf, 0x241a0003,
5765         0x3c070001, 0x24e70f50, 0x94e20000, 0x03821021, 0xaf620ce0, 0x3c020001,
5766         0x8c420f64, 0xaf620ce4, 0x3c050001, 0x94a50f54, 0x94e30000, 0x3c040001,
5767         0x94840f58, 0x3c020001, 0x94420f5e, 0x00a32823, 0x00822023, 0x30a6ffff,
5768         0x3083ffff, 0x00c3102b, 0x14400043, 0x00000000, 0x3c020001, 0x94420f5c,
5769         0x00021400, 0x00621025, 0xaf620ce8, 0x94e20000, 0x3c030001, 0x94630f54,
5770         0x00441021, 0xa4e20000, 0x3042ffff, 0x14430021, 0x3c020008, 0x3c020001,
5771         0x90420f57, 0x10400006, 0x3c03000c, 0x3c020001, 0x94420f68, 0x34630624,
5772         0x0800427c, 0x0000d021, 0x3c020001, 0x94420f68, 0x3c030008, 0x34630624,
5773         0x00431025, 0xaf620cec, 0x3c108000, 0x3c02a000, 0xaf620cf4, 0x3c010001,
5774         0xa0200f56, 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064,
5775         0x00000000, 0x8f620cf4, 0x00501024, 0x10400015, 0x00000000, 0x08004283,
5776         0x00000000, 0x3c030001, 0x94630f68, 0x34420624, 0x3c108000, 0x00621825,
5777         0x3c028000, 0xaf630cec, 0xaf620cf4, 0x8f641008, 0x00901024, 0x14400003,
5778         0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4, 0x00501024, 0x1440fff7,
5779         0x00000000, 0x3c010001, 0x080042cf, 0xa4200f5e, 0x3c020001, 0x94420f5c,
5780         0x00021400, 0x00c21025, 0xaf620ce8, 0x3c020001, 0x90420f57, 0x10400009,
5781         0x3c03000c, 0x3c020001, 0x94420f68, 0x34630624, 0x0000d021, 0x00431025,
5782         0xaf620cec, 0x080042c1, 0x3c108000, 0x3c020001, 0x94420f68, 0x3c030008,
5783         0x34630604, 0x00431025, 0xaf620cec, 0x3c020001, 0x94420f5e, 0x00451021,
5784         0x3c010001, 0xa4220f5e, 0x3c108000, 0x3c02a000, 0xaf620cf4, 0x3c010001,
5785         0xa0200f56, 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064,
5786         0x00000000, 0x8f620cf4, 0x00501024, 0x1440fff7, 0x00000000, 0x8fbf0014,
5787         0x8fb00010, 0x03e00008, 0x27bd0018, 0x00000000, 0x27bdffe0, 0x3c040001,
5788         0x24840ec0, 0x00002821, 0x00003021, 0x00003821, 0xafbf0018, 0xafa00010,
5789         0x0c004378, 0xafa00014, 0x0000d021, 0x24020130, 0xaf625000, 0x3c010001,
5790         0xa4200f50, 0x3c010001, 0xa0200f57, 0x8fbf0018, 0x03e00008, 0x27bd0020,
5791         0x27bdffe8, 0x3c1bc000, 0xafbf0014, 0xafb00010, 0xaf60680c, 0x8f626804,
5792         0x34420082, 0xaf626804, 0x8f634000, 0x24020b50, 0x3c010001, 0xac220f20,
5793         0x24020b78, 0x3c010001, 0xac220f30, 0x34630002, 0xaf634000, 0x0c004315,
5794         0x00808021, 0x3c010001, 0xa0220f34, 0x304200ff, 0x24030002, 0x14430005,
5795         0x00000000, 0x3c020001, 0x8c420f20, 0x08004308, 0xac5000c0, 0x3c020001,
5796         0x8c420f20, 0xac5000bc, 0x8f624434, 0x8f634438, 0x8f644410, 0x3c010001,
5797         0xac220f28, 0x3c010001, 0xac230f38, 0x3c010001, 0xac240f24, 0x8fbf0014,
5798         0x8fb00010, 0x03e00008, 0x27bd0018, 0x03e00008, 0x24020001, 0x27bdfff8,
5799         0x18800009, 0x00002821, 0x8f63680c, 0x8f62680c, 0x1043fffe, 0x00000000,
5800         0x24a50001, 0x00a4102a, 0x1440fff9, 0x00000000, 0x03e00008, 0x27bd0008,
5801         0x8f634450, 0x3c020001, 0x8c420f28, 0x00031c02, 0x0043102b, 0x14400008,
5802         0x3c038000, 0x3c040001, 0x8c840f38, 0x8f624450, 0x00021c02, 0x0083102b,
5803         0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024, 0x1440fffd,
5804         0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3082ffff, 0x2442e000,
5805         0x2c422001, 0x14400003, 0x3c024000, 0x08004347, 0x2402ffff, 0x00822025,
5806         0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00001021, 0x03e00008,
5807         0x00000000, 0x8f624450, 0x3c030001, 0x8c630f24, 0x08004350, 0x3042ffff,
5808         0x8f624450, 0x3042ffff, 0x0043102b, 0x1440fffc, 0x00000000, 0x03e00008,
5809         0x00000000, 0x27bdffe0, 0x00802821, 0x3c040001, 0x24840ed0, 0x00003021,
5810         0x00003821, 0xafbf0018, 0xafa00010, 0x0c004378, 0xafa00014, 0x0800435f,
5811         0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x3c020001, 0x3442d600,
5812         0x3c030001, 0x3463d600, 0x3c040001, 0x3484ddff, 0x3c010001, 0xac220f40,
5813         0x24020040, 0x3c010001, 0xac220f44, 0x3c010001, 0xac200f3c, 0xac600000,
5814         0x24630004, 0x0083102b, 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000,
5815         0x00804821, 0x8faa0010, 0x3c020001, 0x8c420f3c, 0x3c040001, 0x8c840f44,
5816         0x8fab0014, 0x24430001, 0x0044102b, 0x3c010001, 0xac230f3c, 0x14400003,
5817         0x00004021, 0x3c010001, 0xac200f3c, 0x3c020001, 0x8c420f3c, 0x3c030001,
5818         0x8c630f40, 0x91240000, 0x00021140, 0x00431021, 0x00481021, 0x25080001,
5819         0xa0440000, 0x29020008, 0x1440fff4, 0x25290001, 0x3c020001, 0x8c420f3c,
5820         0x3c030001, 0x8c630f40, 0x8f64680c, 0x00021140, 0x00431021, 0xac440008,
5821         0xac45000c, 0xac460010, 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c,
5822         0x00000000, 0x00000000, 0x00000000,
5823 };
5824
5825 static const u32 tg3Tso5FwRodata[(TG3_TSO5_FW_RODATA_LEN / 4) + 1] = {
5826         0x4d61696e, 0x43707542, 0x00000000, 0x4d61696e, 0x43707541, 0x00000000,
5827         0x00000000, 0x00000000, 0x73746b6f, 0x66666c64, 0x00000000, 0x00000000,
5828         0x73746b6f, 0x66666c64, 0x00000000, 0x00000000, 0x66617461, 0x6c457272,
5829         0x00000000, 0x00000000, 0x00000000,
5830 };
5831
5832 static const u32 tg3Tso5FwData[(TG3_TSO5_FW_DATA_LEN / 4) + 1] = {
5833         0x00000000, 0x73746b6f, 0x66666c64, 0x5f76312e, 0x322e3000, 0x00000000,
5834         0x00000000, 0x00000000, 0x00000000,
5835 };
5836
5837 /* tp->lock is held. */
5838 static int tg3_load_tso_firmware(struct tg3 *tp)
5839 {
5840         struct fw_info info;
5841         unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
5842         int err, i;
5843
5844         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
5845                 return 0;
5846
5847         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
5848                 info.text_base = TG3_TSO5_FW_TEXT_ADDR;
5849                 info.text_len = TG3_TSO5_FW_TEXT_LEN;
5850                 info.text_data = &tg3Tso5FwText[0];
5851                 info.rodata_base = TG3_TSO5_FW_RODATA_ADDR;
5852                 info.rodata_len = TG3_TSO5_FW_RODATA_LEN;
5853                 info.rodata_data = &tg3Tso5FwRodata[0];
5854                 info.data_base = TG3_TSO5_FW_DATA_ADDR;
5855                 info.data_len = TG3_TSO5_FW_DATA_LEN;
5856                 info.data_data = &tg3Tso5FwData[0];
5857                 cpu_base = RX_CPU_BASE;
5858                 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
5859                 cpu_scratch_size = (info.text_len +
5860                                     info.rodata_len +
5861                                     info.data_len +
5862                                     TG3_TSO5_FW_SBSS_LEN +
5863                                     TG3_TSO5_FW_BSS_LEN);
5864         } else {
5865                 info.text_base = TG3_TSO_FW_TEXT_ADDR;
5866                 info.text_len = TG3_TSO_FW_TEXT_LEN;
5867                 info.text_data = &tg3TsoFwText[0];
5868                 info.rodata_base = TG3_TSO_FW_RODATA_ADDR;
5869                 info.rodata_len = TG3_TSO_FW_RODATA_LEN;
5870                 info.rodata_data = &tg3TsoFwRodata[0];
5871                 info.data_base = TG3_TSO_FW_DATA_ADDR;
5872                 info.data_len = TG3_TSO_FW_DATA_LEN;
5873                 info.data_data = &tg3TsoFwData[0];
5874                 cpu_base = TX_CPU_BASE;
5875                 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
5876                 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
5877         }
5878
5879         err = tg3_load_firmware_cpu(tp, cpu_base,
5880                                     cpu_scratch_base, cpu_scratch_size,
5881                                     &info);
5882         if (err)
5883                 return err;
5884
5885         /* Now startup the cpu. */
5886         tw32(cpu_base + CPU_STATE, 0xffffffff);
5887         tw32_f(cpu_base + CPU_PC,    info.text_base);
5888
5889         for (i = 0; i < 5; i++) {
5890                 if (tr32(cpu_base + CPU_PC) == info.text_base)
5891                         break;
5892                 tw32(cpu_base + CPU_STATE, 0xffffffff);
5893                 tw32(cpu_base + CPU_MODE,  CPU_MODE_HALT);
5894                 tw32_f(cpu_base + CPU_PC,    info.text_base);
5895                 udelay(1000);
5896         }
5897         if (i >= 5) {
5898                 printk(KERN_ERR PFX "tg3_load_tso_firmware fails for %s "
5899                        "to set CPU PC, is %08x should be %08x\n",
5900                        tp->dev->name, tr32(cpu_base + CPU_PC),
5901                        info.text_base);
5902                 return -ENODEV;
5903         }
5904         tw32(cpu_base + CPU_STATE, 0xffffffff);
5905         tw32_f(cpu_base + CPU_MODE,  0x00000000);
5906         return 0;
5907 }
5908
5909 #endif /* TG3_TSO_SUPPORT != 0 */
5910
5911 /* tp->lock is held. */
5912 static void __tg3_set_mac_addr(struct tg3 *tp)
5913 {
5914         u32 addr_high, addr_low;
5915         int i;
5916
5917         addr_high = ((tp->dev->dev_addr[0] << 8) |
5918                      tp->dev->dev_addr[1]);
5919         addr_low = ((tp->dev->dev_addr[2] << 24) |
5920                     (tp->dev->dev_addr[3] << 16) |
5921                     (tp->dev->dev_addr[4] <<  8) |
5922                     (tp->dev->dev_addr[5] <<  0));
5923         for (i = 0; i < 4; i++) {
5924                 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
5925                 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
5926         }
5927
5928         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
5929             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
5930                 for (i = 0; i < 12; i++) {
5931                         tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
5932                         tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
5933                 }
5934         }
5935
5936         addr_high = (tp->dev->dev_addr[0] +
5937                      tp->dev->dev_addr[1] +
5938                      tp->dev->dev_addr[2] +
5939                      tp->dev->dev_addr[3] +
5940                      tp->dev->dev_addr[4] +
5941                      tp->dev->dev_addr[5]) &
5942                 TX_BACKOFF_SEED_MASK;
5943         tw32(MAC_TX_BACKOFF_SEED, addr_high);
5944 }
5945
5946 static int tg3_set_mac_addr(struct net_device *dev, void *p)
5947 {
5948         struct tg3 *tp = netdev_priv(dev);
5949         struct sockaddr *addr = p;
5950         int err = 0;
5951
5952         if (!is_valid_ether_addr(addr->sa_data))
5953                 return -EINVAL;
5954
5955         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
5956
5957         if (!netif_running(dev))
5958                 return 0;
5959
5960         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
5961                 /* Reset chip so that ASF can re-init any MAC addresses it
5962                  * needs.
5963                  */
5964                 tg3_netif_stop(tp);
5965                 tg3_full_lock(tp, 1);
5966
5967                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
5968                 err = tg3_restart_hw(tp, 0);
5969                 if (!err)
5970                         tg3_netif_start(tp);
5971                 tg3_full_unlock(tp);
5972         } else {
5973                 spin_lock_bh(&tp->lock);
5974                 __tg3_set_mac_addr(tp);
5975                 spin_unlock_bh(&tp->lock);
5976         }
5977
5978         return err;
5979 }
5980
5981 /* tp->lock is held. */
5982 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
5983                            dma_addr_t mapping, u32 maxlen_flags,
5984                            u32 nic_addr)
5985 {
5986         tg3_write_mem(tp,
5987                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
5988                       ((u64) mapping >> 32));
5989         tg3_write_mem(tp,
5990                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
5991                       ((u64) mapping & 0xffffffff));
5992         tg3_write_mem(tp,
5993                       (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
5994                        maxlen_flags);
5995
5996         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
5997                 tg3_write_mem(tp,
5998                               (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
5999                               nic_addr);
6000 }
6001
6002 static void __tg3_set_rx_mode(struct net_device *);
6003 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
6004 {
6005         tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
6006         tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
6007         tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
6008         tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
6009         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6010                 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
6011                 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
6012         }
6013         tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
6014         tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
6015         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6016                 u32 val = ec->stats_block_coalesce_usecs;
6017
6018                 if (!netif_carrier_ok(tp->dev))
6019                         val = 0;
6020
6021                 tw32(HOSTCC_STAT_COAL_TICKS, val);
6022         }
6023 }
6024
6025 /* tp->lock is held. */
6026 static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
6027 {
6028         u32 val, rdmac_mode;
6029         int i, err, limit;
6030
6031         tg3_disable_ints(tp);
6032
6033         tg3_stop_fw(tp);
6034
6035         tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
6036
6037         if (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) {
6038                 tg3_abort_hw(tp, 1);
6039         }
6040
6041         if (reset_phy)
6042                 tg3_phy_reset(tp);
6043
6044         err = tg3_chip_reset(tp);
6045         if (err)
6046                 return err;
6047
6048         tg3_write_sig_legacy(tp, RESET_KIND_INIT);
6049
6050         /* This works around an issue with Athlon chipsets on
6051          * B3 tigon3 silicon.  This bit has no effect on any
6052          * other revision.  But do not set this on PCI Express
6053          * chips.
6054          */
6055         if (!(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
6056                 tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
6057         tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
6058
6059         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
6060             (tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
6061                 val = tr32(TG3PCI_PCISTATE);
6062                 val |= PCISTATE_RETRY_SAME_DMA;
6063                 tw32(TG3PCI_PCISTATE, val);
6064         }
6065
6066         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_BX) {
6067                 /* Enable some hw fixes.  */
6068                 val = tr32(TG3PCI_MSI_DATA);
6069                 val |= (1 << 26) | (1 << 28) | (1 << 29);
6070                 tw32(TG3PCI_MSI_DATA, val);
6071         }
6072
6073         /* Descriptor ring init may make accesses to the
6074          * NIC SRAM area to setup the TX descriptors, so we
6075          * can only do this after the hardware has been
6076          * successfully reset.
6077          */
6078         err = tg3_init_rings(tp);
6079         if (err)
6080                 return err;
6081
6082         /* This value is determined during the probe time DMA
6083          * engine test, tg3_test_dma.
6084          */
6085         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
6086
6087         tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
6088                           GRC_MODE_4X_NIC_SEND_RINGS |
6089                           GRC_MODE_NO_TX_PHDR_CSUM |
6090                           GRC_MODE_NO_RX_PHDR_CSUM);
6091         tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
6092
6093         /* Pseudo-header checksum is done by hardware logic and not
6094          * the offload processers, so make the chip do the pseudo-
6095          * header checksums on receive.  For transmit it is more
6096          * convenient to do the pseudo-header checksum in software
6097          * as Linux does that on transmit for us in all cases.
6098          */
6099         tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
6100
6101         tw32(GRC_MODE,
6102              tp->grc_mode |
6103              (GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP));
6104
6105         /* Setup the timer prescalar register.  Clock is always 66Mhz. */
6106         val = tr32(GRC_MISC_CFG);
6107         val &= ~0xff;
6108         val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
6109         tw32(GRC_MISC_CFG, val);
6110
6111         /* Initialize MBUF/DESC pool. */
6112         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
6113                 /* Do nothing.  */
6114         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
6115                 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
6116                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
6117                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
6118                 else
6119                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
6120                 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
6121                 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
6122         }
6123 #if TG3_TSO_SUPPORT != 0
6124         else if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
6125                 int fw_len;
6126
6127                 fw_len = (TG3_TSO5_FW_TEXT_LEN +
6128                           TG3_TSO5_FW_RODATA_LEN +
6129                           TG3_TSO5_FW_DATA_LEN +
6130                           TG3_TSO5_FW_SBSS_LEN +
6131                           TG3_TSO5_FW_BSS_LEN);
6132                 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
6133                 tw32(BUFMGR_MB_POOL_ADDR,
6134                      NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
6135                 tw32(BUFMGR_MB_POOL_SIZE,
6136                      NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
6137         }
6138 #endif
6139
6140         if (tp->dev->mtu <= ETH_DATA_LEN) {
6141                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
6142                      tp->bufmgr_config.mbuf_read_dma_low_water);
6143                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
6144                      tp->bufmgr_config.mbuf_mac_rx_low_water);
6145                 tw32(BUFMGR_MB_HIGH_WATER,
6146                      tp->bufmgr_config.mbuf_high_water);
6147         } else {
6148                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
6149                      tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
6150                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
6151                      tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
6152                 tw32(BUFMGR_MB_HIGH_WATER,
6153                      tp->bufmgr_config.mbuf_high_water_jumbo);
6154         }
6155         tw32(BUFMGR_DMA_LOW_WATER,
6156              tp->bufmgr_config.dma_low_water);
6157         tw32(BUFMGR_DMA_HIGH_WATER,
6158              tp->bufmgr_config.dma_high_water);
6159
6160         tw32(BUFMGR_MODE, BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE);
6161         for (i = 0; i < 2000; i++) {
6162                 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
6163                         break;
6164                 udelay(10);
6165         }
6166         if (i >= 2000) {
6167                 printk(KERN_ERR PFX "tg3_reset_hw cannot enable BUFMGR for %s.\n",
6168                        tp->dev->name);
6169                 return -ENODEV;
6170         }
6171
6172         /* Setup replenish threshold. */
6173         val = tp->rx_pending / 8;
6174         if (val == 0)
6175                 val = 1;
6176         else if (val > tp->rx_std_max_post)
6177                 val = tp->rx_std_max_post;
6178         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
6179                 if (tp->pci_chip_rev_id == CHIPREV_ID_5906_A1)
6180                         tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
6181
6182                 if (val > (TG3_RX_INTERNAL_RING_SZ_5906 / 2))
6183                         val = TG3_RX_INTERNAL_RING_SZ_5906 / 2;
6184         }
6185
6186         tw32(RCVBDI_STD_THRESH, val);
6187
6188         /* Initialize TG3_BDINFO's at:
6189          *  RCVDBDI_STD_BD:     standard eth size rx ring
6190          *  RCVDBDI_JUMBO_BD:   jumbo frame rx ring
6191          *  RCVDBDI_MINI_BD:    small frame rx ring (??? does not work)
6192          *
6193          * like so:
6194          *  TG3_BDINFO_HOST_ADDR:       high/low parts of DMA address of ring
6195          *  TG3_BDINFO_MAXLEN_FLAGS:    (rx max buffer size << 16) |
6196          *                              ring attribute flags
6197          *  TG3_BDINFO_NIC_ADDR:        location of descriptors in nic SRAM
6198          *
6199          * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
6200          * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
6201          *
6202          * The size of each ring is fixed in the firmware, but the location is
6203          * configurable.
6204          */
6205         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
6206              ((u64) tp->rx_std_mapping >> 32));
6207         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
6208              ((u64) tp->rx_std_mapping & 0xffffffff));
6209         tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
6210              NIC_SRAM_RX_BUFFER_DESC);
6211
6212         /* Don't even try to program the JUMBO/MINI buffer descriptor
6213          * configs on 5705.
6214          */
6215         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
6216                 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS,
6217                      RX_STD_MAX_SIZE_5705 << BDINFO_FLAGS_MAXLEN_SHIFT);
6218         } else {
6219                 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS,
6220                      RX_STD_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT);
6221
6222                 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
6223                      BDINFO_FLAGS_DISABLED);
6224
6225                 /* Setup replenish threshold. */
6226                 tw32(RCVBDI_JUMBO_THRESH, tp->rx_jumbo_pending / 8);
6227
6228                 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
6229                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
6230                              ((u64) tp->rx_jumbo_mapping >> 32));
6231                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
6232                              ((u64) tp->rx_jumbo_mapping & 0xffffffff));
6233                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
6234                              RX_JUMBO_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT);
6235                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
6236                              NIC_SRAM_RX_JUMBO_BUFFER_DESC);
6237                 } else {
6238                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
6239                              BDINFO_FLAGS_DISABLED);
6240                 }
6241
6242         }
6243
6244         /* There is only one send ring on 5705/5750, no need to explicitly
6245          * disable the others.
6246          */
6247         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6248                 /* Clear out send RCB ring in SRAM. */
6249                 for (i = NIC_SRAM_SEND_RCB; i < NIC_SRAM_RCV_RET_RCB; i += TG3_BDINFO_SIZE)
6250                         tg3_write_mem(tp, i + TG3_BDINFO_MAXLEN_FLAGS,
6251                                       BDINFO_FLAGS_DISABLED);
6252         }
6253
6254         tp->tx_prod = 0;
6255         tp->tx_cons = 0;
6256         tw32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0);
6257         tw32_tx_mbox(MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0);
6258
6259         tg3_set_bdinfo(tp, NIC_SRAM_SEND_RCB,
6260                        tp->tx_desc_mapping,
6261                        (TG3_TX_RING_SIZE <<
6262                         BDINFO_FLAGS_MAXLEN_SHIFT),
6263                        NIC_SRAM_TX_BUFFER_DESC);
6264
6265         /* There is only one receive return ring on 5705/5750, no need
6266          * to explicitly disable the others.
6267          */
6268         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6269                 for (i = NIC_SRAM_RCV_RET_RCB; i < NIC_SRAM_STATS_BLK;
6270                      i += TG3_BDINFO_SIZE) {
6271                         tg3_write_mem(tp, i + TG3_BDINFO_MAXLEN_FLAGS,
6272                                       BDINFO_FLAGS_DISABLED);
6273                 }
6274         }
6275
6276         tp->rx_rcb_ptr = 0;
6277         tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW, 0);
6278
6279         tg3_set_bdinfo(tp, NIC_SRAM_RCV_RET_RCB,
6280                        tp->rx_rcb_mapping,
6281                        (TG3_RX_RCB_RING_SIZE(tp) <<
6282                         BDINFO_FLAGS_MAXLEN_SHIFT),
6283                        0);
6284
6285         tp->rx_std_ptr = tp->rx_pending;
6286         tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
6287                      tp->rx_std_ptr);
6288
6289         tp->rx_jumbo_ptr = (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) ?
6290                                                 tp->rx_jumbo_pending : 0;
6291         tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
6292                      tp->rx_jumbo_ptr);
6293
6294         /* Initialize MAC address and backoff seed. */
6295         __tg3_set_mac_addr(tp);
6296
6297         /* MTU + ethernet header + FCS + optional VLAN tag */
6298         tw32(MAC_RX_MTU_SIZE, tp->dev->mtu + ETH_HLEN + 8);
6299
6300         /* The slot time is changed by tg3_setup_phy if we
6301          * run at gigabit with half duplex.
6302          */
6303         tw32(MAC_TX_LENGTHS,
6304              (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
6305              (6 << TX_LENGTHS_IPG_SHIFT) |
6306              (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
6307
6308         /* Receive rules. */
6309         tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
6310         tw32(RCVLPC_CONFIG, 0x0181);
6311
6312         /* Calculate RDMAC_MODE setting early, we need it to determine
6313          * the RCVLPC_STATE_ENABLE mask.
6314          */
6315         rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
6316                       RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
6317                       RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
6318                       RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
6319                       RDMAC_MODE_LNGREAD_ENAB);
6320         if (tp->tg3_flags & TG3_FLAG_SPLIT_MODE)
6321                 rdmac_mode |= RDMAC_MODE_SPLIT_ENABLE;
6322
6323         /* If statement applies to 5705 and 5750 PCI devices only */
6324         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
6325              tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
6326             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)) {
6327                 if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE &&
6328                     (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
6329                      tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
6330                         rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
6331                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
6332                            !(tp->tg3_flags2 & TG3_FLG2_IS_5788)) {
6333                         rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
6334                 }
6335         }
6336
6337         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)
6338                 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
6339
6340 #if TG3_TSO_SUPPORT != 0
6341         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
6342                 rdmac_mode |= (1 << 27);
6343 #endif
6344
6345         /* Receive/send statistics. */
6346         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
6347                 val = tr32(RCVLPC_STATS_ENABLE);
6348                 val &= ~RCVLPC_STATSENAB_DACK_FIX;
6349                 tw32(RCVLPC_STATS_ENABLE, val);
6350         } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
6351                    (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
6352                 val = tr32(RCVLPC_STATS_ENABLE);
6353                 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
6354                 tw32(RCVLPC_STATS_ENABLE, val);
6355         } else {
6356                 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
6357         }
6358         tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
6359         tw32(SNDDATAI_STATSENAB, 0xffffff);
6360         tw32(SNDDATAI_STATSCTRL,
6361              (SNDDATAI_SCTRL_ENABLE |
6362               SNDDATAI_SCTRL_FASTUPD));
6363
6364         /* Setup host coalescing engine. */
6365         tw32(HOSTCC_MODE, 0);
6366         for (i = 0; i < 2000; i++) {
6367                 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
6368                         break;
6369                 udelay(10);
6370         }
6371
6372         __tg3_set_coalesce(tp, &tp->coal);
6373
6374         /* set status block DMA address */
6375         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
6376              ((u64) tp->status_mapping >> 32));
6377         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
6378              ((u64) tp->status_mapping & 0xffffffff));
6379
6380         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6381                 /* Status/statistics block address.  See tg3_timer,
6382                  * the tg3_periodic_fetch_stats call there, and
6383                  * tg3_get_stats to see how this works for 5705/5750 chips.
6384                  */
6385                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
6386                      ((u64) tp->stats_mapping >> 32));
6387                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
6388                      ((u64) tp->stats_mapping & 0xffffffff));
6389                 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
6390                 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
6391         }
6392
6393         tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
6394
6395         tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
6396         tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
6397         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
6398                 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
6399
6400         /* Clear statistics/status block in chip, and status block in ram. */
6401         for (i = NIC_SRAM_STATS_BLK;
6402              i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
6403              i += sizeof(u32)) {
6404                 tg3_write_mem(tp, i, 0);
6405                 udelay(40);
6406         }
6407         memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
6408
6409         if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
6410                 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
6411                 /* reset to prevent losing 1st rx packet intermittently */
6412                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
6413                 udelay(10);
6414         }
6415
6416         tp->mac_mode = MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
6417                 MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE | MAC_MODE_FHDE_ENABLE;
6418         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
6419         udelay(40);
6420
6421         /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
6422          * If TG3_FLG2_IS_NIC is zero, we should read the
6423          * register to preserve the GPIO settings for LOMs. The GPIOs,
6424          * whether used as inputs or outputs, are set by boot code after
6425          * reset.
6426          */
6427         if (!(tp->tg3_flags2 & TG3_FLG2_IS_NIC)) {
6428                 u32 gpio_mask;
6429
6430                 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
6431                             GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
6432                             GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
6433
6434                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
6435                         gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
6436                                      GRC_LCLCTRL_GPIO_OUTPUT3;
6437
6438                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
6439                         gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
6440
6441                 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
6442
6443                 /* GPIO1 must be driven high for eeprom write protect */
6444                 if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT)
6445                         tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
6446                                                GRC_LCLCTRL_GPIO_OUTPUT1);
6447         }
6448         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
6449         udelay(100);
6450
6451         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0);
6452         tp->last_tag = 0;
6453
6454         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6455                 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
6456                 udelay(40);
6457         }
6458
6459         val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
6460                WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
6461                WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
6462                WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
6463                WDMAC_MODE_LNGREAD_ENAB);
6464
6465         /* If statement applies to 5705 and 5750 PCI devices only */
6466         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
6467              tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
6468             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
6469                 if ((tp->tg3_flags & TG3_FLG2_TSO_CAPABLE) &&
6470                     (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
6471                      tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
6472                         /* nothing */
6473                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
6474                            !(tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
6475                            !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
6476                         val |= WDMAC_MODE_RX_ACCEL;
6477                 }
6478         }
6479
6480         /* Enable host coalescing bug fix */
6481         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755) ||
6482             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787))
6483                 val |= (1 << 29);
6484
6485         tw32_f(WDMAC_MODE, val);
6486         udelay(40);
6487
6488         if ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0) {
6489                 val = tr32(TG3PCI_X_CAPS);
6490                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) {
6491                         val &= ~PCIX_CAPS_BURST_MASK;
6492                         val |= (PCIX_CAPS_MAX_BURST_CPIOB << PCIX_CAPS_BURST_SHIFT);
6493                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
6494                         val &= ~(PCIX_CAPS_SPLIT_MASK | PCIX_CAPS_BURST_MASK);
6495                         val |= (PCIX_CAPS_MAX_BURST_CPIOB << PCIX_CAPS_BURST_SHIFT);
6496                         if (tp->tg3_flags & TG3_FLAG_SPLIT_MODE)
6497                                 val |= (tp->split_mode_max_reqs <<
6498                                         PCIX_CAPS_SPLIT_SHIFT);
6499                 }
6500                 tw32(TG3PCI_X_CAPS, val);
6501         }
6502
6503         tw32_f(RDMAC_MODE, rdmac_mode);
6504         udelay(40);
6505
6506         tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
6507         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
6508                 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
6509         tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
6510         tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
6511         tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
6512         tw32(RCVDBDI_MODE, RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ);
6513         tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
6514 #if TG3_TSO_SUPPORT != 0
6515         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
6516                 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
6517 #endif
6518         tw32(SNDBDI_MODE, SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE);
6519         tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
6520
6521         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
6522                 err = tg3_load_5701_a0_firmware_fix(tp);
6523                 if (err)
6524                         return err;
6525         }
6526
6527 #if TG3_TSO_SUPPORT != 0
6528         if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
6529                 err = tg3_load_tso_firmware(tp);
6530                 if (err)
6531                         return err;
6532         }
6533 #endif
6534
6535         tp->tx_mode = TX_MODE_ENABLE;
6536         tw32_f(MAC_TX_MODE, tp->tx_mode);
6537         udelay(100);
6538
6539         tp->rx_mode = RX_MODE_ENABLE;
6540         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
6541                 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
6542
6543         tw32_f(MAC_RX_MODE, tp->rx_mode);
6544         udelay(10);
6545
6546         if (tp->link_config.phy_is_low_power) {
6547                 tp->link_config.phy_is_low_power = 0;
6548                 tp->link_config.speed = tp->link_config.orig_speed;
6549                 tp->link_config.duplex = tp->link_config.orig_duplex;
6550                 tp->link_config.autoneg = tp->link_config.orig_autoneg;
6551         }
6552
6553         tp->mi_mode = MAC_MI_MODE_BASE;
6554         tw32_f(MAC_MI_MODE, tp->mi_mode);
6555         udelay(80);
6556
6557         tw32(MAC_LED_CTRL, tp->led_ctrl);
6558
6559         tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
6560         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
6561                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
6562                 udelay(10);
6563         }
6564         tw32_f(MAC_RX_MODE, tp->rx_mode);
6565         udelay(10);
6566
6567         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
6568                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) &&
6569                         !(tp->tg3_flags2 & TG3_FLG2_SERDES_PREEMPHASIS)) {
6570                         /* Set drive transmission level to 1.2V  */
6571                         /* only if the signal pre-emphasis bit is not set  */
6572                         val = tr32(MAC_SERDES_CFG);
6573                         val &= 0xfffff000;
6574                         val |= 0x880;
6575                         tw32(MAC_SERDES_CFG, val);
6576                 }
6577                 if (tp->pci_chip_rev_id == CHIPREV_ID_5703_A1)
6578                         tw32(MAC_SERDES_CFG, 0x616000);
6579         }
6580
6581         /* Prevent chip from dropping frames when flow control
6582          * is enabled.
6583          */
6584         tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, 2);
6585
6586         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
6587             (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
6588                 /* Use hardware link auto-negotiation */
6589                 tp->tg3_flags2 |= TG3_FLG2_HW_AUTONEG;
6590         }
6591
6592         if ((tp->tg3_flags2 & TG3_FLG2_MII_SERDES) &&
6593             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)) {
6594                 u32 tmp;
6595
6596                 tmp = tr32(SERDES_RX_CTRL);
6597                 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
6598                 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
6599                 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
6600                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
6601         }
6602
6603         err = tg3_setup_phy(tp, 0);
6604         if (err)
6605                 return err;
6606
6607         if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
6608             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906) {
6609                 u32 tmp;
6610
6611                 /* Clear CRC stats. */
6612                 if (!tg3_readphy(tp, 0x1e, &tmp)) {
6613                         tg3_writephy(tp, 0x1e, tmp | 0x8000);
6614                         tg3_readphy(tp, 0x14, &tmp);
6615                 }
6616         }
6617
6618         __tg3_set_rx_mode(tp->dev);
6619
6620         /* Initialize receive rules. */
6621         tw32(MAC_RCV_RULE_0,  0xc2000000 & RCV_RULE_DISABLE_MASK);
6622         tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
6623         tw32(MAC_RCV_RULE_1,  0x86000004 & RCV_RULE_DISABLE_MASK);
6624         tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
6625
6626         if ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
6627             !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
6628                 limit = 8;
6629         else
6630                 limit = 16;
6631         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF)
6632                 limit -= 4;
6633         switch (limit) {
6634         case 16:
6635                 tw32(MAC_RCV_RULE_15,  0); tw32(MAC_RCV_VALUE_15,  0);
6636         case 15:
6637                 tw32(MAC_RCV_RULE_14,  0); tw32(MAC_RCV_VALUE_14,  0);
6638         case 14:
6639                 tw32(MAC_RCV_RULE_13,  0); tw32(MAC_RCV_VALUE_13,  0);
6640         case 13:
6641                 tw32(MAC_RCV_RULE_12,  0); tw32(MAC_RCV_VALUE_12,  0);
6642         case 12:
6643                 tw32(MAC_RCV_RULE_11,  0); tw32(MAC_RCV_VALUE_11,  0);
6644         case 11:
6645                 tw32(MAC_RCV_RULE_10,  0); tw32(MAC_RCV_VALUE_10,  0);
6646         case 10:
6647                 tw32(MAC_RCV_RULE_9,  0); tw32(MAC_RCV_VALUE_9,  0);
6648         case 9:
6649                 tw32(MAC_RCV_RULE_8,  0); tw32(MAC_RCV_VALUE_8,  0);
6650         case 8:
6651                 tw32(MAC_RCV_RULE_7,  0); tw32(MAC_RCV_VALUE_7,  0);
6652         case 7:
6653                 tw32(MAC_RCV_RULE_6,  0); tw32(MAC_RCV_VALUE_6,  0);
6654         case 6:
6655                 tw32(MAC_RCV_RULE_5,  0); tw32(MAC_RCV_VALUE_5,  0);
6656         case 5:
6657                 tw32(MAC_RCV_RULE_4,  0); tw32(MAC_RCV_VALUE_4,  0);
6658         case 4:
6659                 /* tw32(MAC_RCV_RULE_3,  0); tw32(MAC_RCV_VALUE_3,  0); */
6660         case 3:
6661                 /* tw32(MAC_RCV_RULE_2,  0); tw32(MAC_RCV_VALUE_2,  0); */
6662         case 2:
6663         case 1:
6664
6665         default:
6666                 break;
6667         };
6668
6669         tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
6670
6671         return 0;
6672 }
6673
6674 /* Called at device open time to get the chip ready for
6675  * packet processing.  Invoked with tp->lock held.
6676  */
6677 static int tg3_init_hw(struct tg3 *tp, int reset_phy)
6678 {
6679         int err;
6680
6681         /* Force the chip into D0. */
6682         err = tg3_set_power_state(tp, PCI_D0);
6683         if (err)
6684                 goto out;
6685
6686         tg3_switch_clocks(tp);
6687
6688         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
6689
6690         err = tg3_reset_hw(tp, reset_phy);
6691
6692 out:
6693         return err;
6694 }
6695
6696 #define TG3_STAT_ADD32(PSTAT, REG) \
6697 do {    u32 __val = tr32(REG); \
6698         (PSTAT)->low += __val; \
6699         if ((PSTAT)->low < __val) \
6700                 (PSTAT)->high += 1; \
6701 } while (0)
6702
6703 static void tg3_periodic_fetch_stats(struct tg3 *tp)
6704 {
6705         struct tg3_hw_stats *sp = tp->hw_stats;
6706
6707         if (!netif_carrier_ok(tp->dev))
6708                 return;
6709
6710         TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
6711         TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
6712         TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
6713         TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
6714         TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
6715         TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
6716         TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
6717         TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
6718         TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
6719         TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
6720         TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
6721         TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
6722         TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
6723
6724         TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
6725         TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
6726         TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
6727         TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
6728         TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
6729         TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
6730         TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
6731         TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
6732         TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
6733         TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
6734         TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
6735         TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
6736         TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
6737         TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
6738
6739         TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
6740         TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
6741         TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
6742 }
6743
6744 static void tg3_timer(unsigned long __opaque)
6745 {
6746         struct tg3 *tp = (struct tg3 *) __opaque;
6747
6748         if (tp->irq_sync)
6749                 goto restart_timer;
6750
6751         spin_lock(&tp->lock);
6752
6753         if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) {
6754                 /* All of this garbage is because when using non-tagged
6755                  * IRQ status the mailbox/status_block protocol the chip
6756                  * uses with the cpu is race prone.
6757                  */
6758                 if (tp->hw_status->status & SD_STATUS_UPDATED) {
6759                         tw32(GRC_LOCAL_CTRL,
6760                              tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
6761                 } else {
6762                         tw32(HOSTCC_MODE, tp->coalesce_mode |
6763                              (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
6764                 }
6765
6766                 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
6767                         tp->tg3_flags2 |= TG3_FLG2_RESTART_TIMER;
6768                         spin_unlock(&tp->lock);
6769                         schedule_work(&tp->reset_task);
6770                         return;
6771                 }
6772         }
6773
6774         /* This part only runs once per second. */
6775         if (!--tp->timer_counter) {
6776                 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
6777                         tg3_periodic_fetch_stats(tp);
6778
6779                 if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
6780                         u32 mac_stat;
6781                         int phy_event;
6782
6783                         mac_stat = tr32(MAC_STATUS);
6784
6785                         phy_event = 0;
6786                         if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) {
6787                                 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
6788                                         phy_event = 1;
6789                         } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
6790                                 phy_event = 1;
6791
6792                         if (phy_event)
6793                                 tg3_setup_phy(tp, 0);
6794                 } else if (tp->tg3_flags & TG3_FLAG_POLL_SERDES) {
6795                         u32 mac_stat = tr32(MAC_STATUS);
6796                         int need_setup = 0;
6797
6798                         if (netif_carrier_ok(tp->dev) &&
6799                             (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
6800                                 need_setup = 1;
6801                         }
6802                         if (! netif_carrier_ok(tp->dev) &&
6803                             (mac_stat & (MAC_STATUS_PCS_SYNCED |
6804                                          MAC_STATUS_SIGNAL_DET))) {
6805                                 need_setup = 1;
6806                         }
6807                         if (need_setup) {
6808                                 if (!tp->serdes_counter) {
6809                                         tw32_f(MAC_MODE,
6810                                              (tp->mac_mode &
6811                                               ~MAC_MODE_PORT_MODE_MASK));
6812                                         udelay(40);
6813                                         tw32_f(MAC_MODE, tp->mac_mode);
6814                                         udelay(40);
6815                                 }
6816                                 tg3_setup_phy(tp, 0);
6817                         }
6818                 } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
6819                         tg3_serdes_parallel_detect(tp);
6820
6821                 tp->timer_counter = tp->timer_multiplier;
6822         }
6823
6824         /* Heartbeat is only sent once every 2 seconds.
6825          *
6826          * The heartbeat is to tell the ASF firmware that the host
6827          * driver is still alive.  In the event that the OS crashes,
6828          * ASF needs to reset the hardware to free up the FIFO space
6829          * that may be filled with rx packets destined for the host.
6830          * If the FIFO is full, ASF will no longer function properly.
6831          *
6832          * Unintended resets have been reported on real time kernels
6833          * where the timer doesn't run on time.  Netpoll will also have
6834          * same problem.
6835          *
6836          * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
6837          * to check the ring condition when the heartbeat is expiring
6838          * before doing the reset.  This will prevent most unintended
6839          * resets.
6840          */
6841         if (!--tp->asf_counter) {
6842                 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
6843                         u32 val;
6844
6845                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
6846                                       FWCMD_NICDRV_ALIVE3);
6847                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
6848                         /* 5 seconds timeout */
6849                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, 5);
6850                         val = tr32(GRC_RX_CPU_EVENT);
6851                         val |= (1 << 14);
6852                         tw32(GRC_RX_CPU_EVENT, val);
6853                 }
6854                 tp->asf_counter = tp->asf_multiplier;
6855         }
6856
6857         spin_unlock(&tp->lock);
6858
6859 restart_timer:
6860         tp->timer.expires = jiffies + tp->timer_offset;
6861         add_timer(&tp->timer);
6862 }
6863
6864 static int tg3_request_irq(struct tg3 *tp)
6865 {
6866         irq_handler_t fn;
6867         unsigned long flags;
6868         struct net_device *dev = tp->dev;
6869
6870         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6871                 fn = tg3_msi;
6872                 if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI)
6873                         fn = tg3_msi_1shot;
6874                 flags = IRQF_SAMPLE_RANDOM;
6875         } else {
6876                 fn = tg3_interrupt;
6877                 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
6878                         fn = tg3_interrupt_tagged;
6879                 flags = IRQF_SHARED | IRQF_SAMPLE_RANDOM;
6880         }
6881         return (request_irq(tp->pdev->irq, fn, flags, dev->name, dev));
6882 }
6883
6884 static int tg3_test_interrupt(struct tg3 *tp)
6885 {
6886         struct net_device *dev = tp->dev;
6887         int err, i, intr_ok = 0;
6888
6889         if (!netif_running(dev))
6890                 return -ENODEV;
6891
6892         tg3_disable_ints(tp);
6893
6894         free_irq(tp->pdev->irq, dev);
6895
6896         err = request_irq(tp->pdev->irq, tg3_test_isr,
6897                           IRQF_SHARED | IRQF_SAMPLE_RANDOM, dev->name, dev);
6898         if (err)
6899                 return err;
6900
6901         tp->hw_status->status &= ~SD_STATUS_UPDATED;
6902         tg3_enable_ints(tp);
6903
6904         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
6905                HOSTCC_MODE_NOW);
6906
6907         for (i = 0; i < 5; i++) {
6908                 u32 int_mbox, misc_host_ctrl;
6909
6910                 int_mbox = tr32_mailbox(MAILBOX_INTERRUPT_0 +
6911                                         TG3_64BIT_REG_LOW);
6912                 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
6913
6914                 if ((int_mbox != 0) ||
6915                     (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) {
6916                         intr_ok = 1;
6917                         break;
6918                 }
6919
6920                 msleep(10);
6921         }
6922
6923         tg3_disable_ints(tp);
6924
6925         free_irq(tp->pdev->irq, dev);
6926
6927         err = tg3_request_irq(tp);
6928
6929         if (err)
6930                 return err;
6931
6932         if (intr_ok)
6933                 return 0;
6934
6935         return -EIO;
6936 }
6937
6938 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
6939  * successfully restored
6940  */
6941 static int tg3_test_msi(struct tg3 *tp)
6942 {
6943         struct net_device *dev = tp->dev;
6944         int err;
6945         u16 pci_cmd;
6946
6947         if (!(tp->tg3_flags2 & TG3_FLG2_USING_MSI))
6948                 return 0;
6949
6950         /* Turn off SERR reporting in case MSI terminates with Master
6951          * Abort.
6952          */
6953         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
6954         pci_write_config_word(tp->pdev, PCI_COMMAND,
6955                               pci_cmd & ~PCI_COMMAND_SERR);
6956
6957         err = tg3_test_interrupt(tp);
6958
6959         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
6960
6961         if (!err)
6962                 return 0;
6963
6964         /* other failures */
6965         if (err != -EIO)
6966                 return err;
6967
6968         /* MSI test failed, go back to INTx mode */
6969         printk(KERN_WARNING PFX "%s: No interrupt was generated using MSI, "
6970                "switching to INTx mode. Please report this failure to "
6971                "the PCI maintainer and include system chipset information.\n",
6972                        tp->dev->name);
6973
6974         free_irq(tp->pdev->irq, dev);
6975         pci_disable_msi(tp->pdev);
6976
6977         tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
6978
6979         err = tg3_request_irq(tp);
6980         if (err)
6981                 return err;
6982
6983         /* Need to reset the chip because the MSI cycle may have terminated
6984          * with Master Abort.
6985          */
6986         tg3_full_lock(tp, 1);
6987
6988         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
6989         err = tg3_init_hw(tp, 1);
6990
6991         tg3_full_unlock(tp);
6992
6993         if (err)
6994                 free_irq(tp->pdev->irq, dev);
6995
6996         return err;
6997 }
6998
6999 static int tg3_open(struct net_device *dev)
7000 {
7001         struct tg3 *tp = netdev_priv(dev);
7002         int err;
7003
7004         netif_carrier_off(tp->dev);
7005
7006         tg3_full_lock(tp, 0);
7007
7008         err = tg3_set_power_state(tp, PCI_D0);
7009         if (err) {
7010                 tg3_full_unlock(tp);
7011                 return err;
7012         }
7013
7014         tg3_disable_ints(tp);
7015         tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
7016
7017         tg3_full_unlock(tp);
7018
7019         /* The placement of this call is tied
7020          * to the setup and use of Host TX descriptors.
7021          */
7022         err = tg3_alloc_consistent(tp);
7023         if (err)
7024                 return err;
7025
7026         if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
7027             (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5750_AX) &&
7028             (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5750_BX) &&
7029             !((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) &&
7030               (tp->pdev_peer == tp->pdev))) {
7031                 /* All MSI supporting chips should support tagged
7032                  * status.  Assert that this is the case.
7033                  */
7034                 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) {
7035                         printk(KERN_WARNING PFX "%s: MSI without TAGGED? "
7036                                "Not using MSI.\n", tp->dev->name);
7037                 } else if (pci_enable_msi(tp->pdev) == 0) {
7038                         u32 msi_mode;
7039
7040                         msi_mode = tr32(MSGINT_MODE);
7041                         tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
7042                         tp->tg3_flags2 |= TG3_FLG2_USING_MSI;
7043                 }
7044         }
7045         err = tg3_request_irq(tp);
7046
7047         if (err) {
7048                 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7049                         pci_disable_msi(tp->pdev);
7050                         tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
7051                 }
7052                 tg3_free_consistent(tp);
7053                 return err;
7054         }
7055
7056         tg3_full_lock(tp, 0);
7057
7058         err = tg3_init_hw(tp, 1);
7059         if (err) {
7060                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
7061                 tg3_free_rings(tp);
7062         } else {
7063                 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
7064                         tp->timer_offset = HZ;
7065                 else
7066                         tp->timer_offset = HZ / 10;
7067
7068                 BUG_ON(tp->timer_offset > HZ);
7069                 tp->timer_counter = tp->timer_multiplier =
7070                         (HZ / tp->timer_offset);
7071                 tp->asf_counter = tp->asf_multiplier =
7072                         ((HZ / tp->timer_offset) * 2);
7073
7074                 init_timer(&tp->timer);
7075                 tp->timer.expires = jiffies + tp->timer_offset;
7076                 tp->timer.data = (unsigned long) tp;
7077                 tp->timer.function = tg3_timer;
7078         }
7079
7080         tg3_full_unlock(tp);
7081
7082         if (err) {
7083                 free_irq(tp->pdev->irq, dev);
7084                 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7085                         pci_disable_msi(tp->pdev);
7086                         tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
7087                 }
7088                 tg3_free_consistent(tp);
7089                 return err;
7090         }
7091
7092         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7093                 err = tg3_test_msi(tp);
7094
7095                 if (err) {
7096                         tg3_full_lock(tp, 0);
7097
7098                         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7099                                 pci_disable_msi(tp->pdev);
7100                                 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
7101                         }
7102                         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
7103                         tg3_free_rings(tp);
7104                         tg3_free_consistent(tp);
7105
7106                         tg3_full_unlock(tp);
7107
7108                         return err;
7109                 }
7110
7111                 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7112                         if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI) {
7113                                 u32 val = tr32(PCIE_TRANSACTION_CFG);
7114
7115                                 tw32(PCIE_TRANSACTION_CFG,
7116                                      val | PCIE_TRANS_CFG_1SHOT_MSI);
7117                         }
7118                 }
7119         }
7120
7121         tg3_full_lock(tp, 0);
7122
7123         add_timer(&tp->timer);
7124         tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
7125         tg3_enable_ints(tp);
7126
7127         tg3_full_unlock(tp);
7128
7129         netif_start_queue(dev);
7130
7131         return 0;
7132 }
7133
7134 #if 0
7135 /*static*/ void tg3_dump_state(struct tg3 *tp)
7136 {
7137         u32 val32, val32_2, val32_3, val32_4, val32_5;
7138         u16 val16;
7139         int i;
7140
7141         pci_read_config_word(tp->pdev, PCI_STATUS, &val16);
7142         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE, &val32);
7143         printk("DEBUG: PCI status [%04x] TG3PCI state[%08x]\n",
7144                val16, val32);
7145
7146         /* MAC block */
7147         printk("DEBUG: MAC_MODE[%08x] MAC_STATUS[%08x]\n",
7148                tr32(MAC_MODE), tr32(MAC_STATUS));
7149         printk("       MAC_EVENT[%08x] MAC_LED_CTRL[%08x]\n",
7150                tr32(MAC_EVENT), tr32(MAC_LED_CTRL));
7151         printk("DEBUG: MAC_TX_MODE[%08x] MAC_TX_STATUS[%08x]\n",
7152                tr32(MAC_TX_MODE), tr32(MAC_TX_STATUS));
7153         printk("       MAC_RX_MODE[%08x] MAC_RX_STATUS[%08x]\n",
7154                tr32(MAC_RX_MODE), tr32(MAC_RX_STATUS));
7155
7156         /* Send data initiator control block */
7157         printk("DEBUG: SNDDATAI_MODE[%08x] SNDDATAI_STATUS[%08x]\n",
7158                tr32(SNDDATAI_MODE), tr32(SNDDATAI_STATUS));
7159         printk("       SNDDATAI_STATSCTRL[%08x]\n",
7160                tr32(SNDDATAI_STATSCTRL));
7161
7162         /* Send data completion control block */
7163         printk("DEBUG: SNDDATAC_MODE[%08x]\n", tr32(SNDDATAC_MODE));
7164
7165         /* Send BD ring selector block */
7166         printk("DEBUG: SNDBDS_MODE[%08x] SNDBDS_STATUS[%08x]\n",
7167                tr32(SNDBDS_MODE), tr32(SNDBDS_STATUS));
7168
7169         /* Send BD initiator control block */
7170         printk("DEBUG: SNDBDI_MODE[%08x] SNDBDI_STATUS[%08x]\n",
7171                tr32(SNDBDI_MODE), tr32(SNDBDI_STATUS));
7172
7173         /* Send BD completion control block */
7174         printk("DEBUG: SNDBDC_MODE[%08x]\n", tr32(SNDBDC_MODE));
7175
7176         /* Receive list placement control block */
7177         printk("DEBUG: RCVLPC_MODE[%08x] RCVLPC_STATUS[%08x]\n",
7178                tr32(RCVLPC_MODE), tr32(RCVLPC_STATUS));
7179         printk("       RCVLPC_STATSCTRL[%08x]\n",
7180                tr32(RCVLPC_STATSCTRL));
7181
7182         /* Receive data and receive BD initiator control block */
7183         printk("DEBUG: RCVDBDI_MODE[%08x] RCVDBDI_STATUS[%08x]\n",
7184                tr32(RCVDBDI_MODE), tr32(RCVDBDI_STATUS));
7185
7186         /* Receive data completion control block */
7187         printk("DEBUG: RCVDCC_MODE[%08x]\n",
7188                tr32(RCVDCC_MODE));
7189
7190         /* Receive BD initiator control block */
7191         printk("DEBUG: RCVBDI_MODE[%08x] RCVBDI_STATUS[%08x]\n",
7192                tr32(RCVBDI_MODE), tr32(RCVBDI_STATUS));
7193
7194         /* Receive BD completion control block */
7195         printk("DEBUG: RCVCC_MODE[%08x] RCVCC_STATUS[%08x]\n",
7196                tr32(RCVCC_MODE), tr32(RCVCC_STATUS));
7197
7198         /* Receive list selector control block */
7199         printk("DEBUG: RCVLSC_MODE[%08x] RCVLSC_STATUS[%08x]\n",
7200                tr32(RCVLSC_MODE), tr32(RCVLSC_STATUS));
7201
7202         /* Mbuf cluster free block */
7203         printk("DEBUG: MBFREE_MODE[%08x] MBFREE_STATUS[%08x]\n",
7204                tr32(MBFREE_MODE), tr32(MBFREE_STATUS));
7205
7206         /* Host coalescing control block */
7207         printk("DEBUG: HOSTCC_MODE[%08x] HOSTCC_STATUS[%08x]\n",
7208                tr32(HOSTCC_MODE), tr32(HOSTCC_STATUS));
7209         printk("DEBUG: HOSTCC_STATS_BLK_HOST_ADDR[%08x%08x]\n",
7210                tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
7211                tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
7212         printk("DEBUG: HOSTCC_STATUS_BLK_HOST_ADDR[%08x%08x]\n",
7213                tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
7214                tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
7215         printk("DEBUG: HOSTCC_STATS_BLK_NIC_ADDR[%08x]\n",
7216                tr32(HOSTCC_STATS_BLK_NIC_ADDR));
7217         printk("DEBUG: HOSTCC_STATUS_BLK_NIC_ADDR[%08x]\n",
7218                tr32(HOSTCC_STATUS_BLK_NIC_ADDR));
7219
7220         /* Memory arbiter control block */
7221         printk("DEBUG: MEMARB_MODE[%08x] MEMARB_STATUS[%08x]\n",
7222                tr32(MEMARB_MODE), tr32(MEMARB_STATUS));
7223
7224         /* Buffer manager control block */
7225         printk("DEBUG: BUFMGR_MODE[%08x] BUFMGR_STATUS[%08x]\n",
7226                tr32(BUFMGR_MODE), tr32(BUFMGR_STATUS));
7227         printk("DEBUG: BUFMGR_MB_POOL_ADDR[%08x] BUFMGR_MB_POOL_SIZE[%08x]\n",
7228                tr32(BUFMGR_MB_POOL_ADDR), tr32(BUFMGR_MB_POOL_SIZE));
7229         printk("DEBUG: BUFMGR_DMA_DESC_POOL_ADDR[%08x] "
7230                "BUFMGR_DMA_DESC_POOL_SIZE[%08x]\n",
7231                tr32(BUFMGR_DMA_DESC_POOL_ADDR),
7232                tr32(BUFMGR_DMA_DESC_POOL_SIZE));
7233
7234         /* Read DMA control block */
7235         printk("DEBUG: RDMAC_MODE[%08x] RDMAC_STATUS[%08x]\n",
7236                tr32(RDMAC_MODE), tr32(RDMAC_STATUS));
7237
7238         /* Write DMA control block */
7239         printk("DEBUG: WDMAC_MODE[%08x] WDMAC_STATUS[%08x]\n",
7240                tr32(WDMAC_MODE), tr32(WDMAC_STATUS));
7241
7242         /* DMA completion block */
7243         printk("DEBUG: DMAC_MODE[%08x]\n",
7244                tr32(DMAC_MODE));
7245
7246         /* GRC block */
7247         printk("DEBUG: GRC_MODE[%08x] GRC_MISC_CFG[%08x]\n",
7248                tr32(GRC_MODE), tr32(GRC_MISC_CFG));
7249         printk("DEBUG: GRC_LOCAL_CTRL[%08x]\n",
7250                tr32(GRC_LOCAL_CTRL));
7251
7252         /* TG3_BDINFOs */
7253         printk("DEBUG: RCVDBDI_JUMBO_BD[%08x%08x:%08x:%08x]\n",
7254                tr32(RCVDBDI_JUMBO_BD + 0x0),
7255                tr32(RCVDBDI_JUMBO_BD + 0x4),
7256                tr32(RCVDBDI_JUMBO_BD + 0x8),
7257                tr32(RCVDBDI_JUMBO_BD + 0xc));
7258         printk("DEBUG: RCVDBDI_STD_BD[%08x%08x:%08x:%08x]\n",
7259                tr32(RCVDBDI_STD_BD + 0x0),
7260                tr32(RCVDBDI_STD_BD + 0x4),
7261                tr32(RCVDBDI_STD_BD + 0x8),
7262                tr32(RCVDBDI_STD_BD + 0xc));
7263         printk("DEBUG: RCVDBDI_MINI_BD[%08x%08x:%08x:%08x]\n",
7264                tr32(RCVDBDI_MINI_BD + 0x0),
7265                tr32(RCVDBDI_MINI_BD + 0x4),
7266                tr32(RCVDBDI_MINI_BD + 0x8),
7267                tr32(RCVDBDI_MINI_BD + 0xc));
7268
7269         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x0, &val32);
7270         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x4, &val32_2);
7271         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x8, &val32_3);
7272         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0xc, &val32_4);
7273         printk("DEBUG: SRAM_SEND_RCB_0[%08x%08x:%08x:%08x]\n",
7274                val32, val32_2, val32_3, val32_4);
7275
7276         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x0, &val32);
7277         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x4, &val32_2);
7278         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x8, &val32_3);
7279         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0xc, &val32_4);
7280         printk("DEBUG: SRAM_RCV_RET_RCB_0[%08x%08x:%08x:%08x]\n",
7281                val32, val32_2, val32_3, val32_4);
7282
7283         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x0, &val32);
7284         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x4, &val32_2);
7285         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x8, &val32_3);
7286         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0xc, &val32_4);
7287         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x10, &val32_5);
7288         printk("DEBUG: SRAM_STATUS_BLK[%08x:%08x:%08x:%08x:%08x]\n",
7289                val32, val32_2, val32_3, val32_4, val32_5);
7290
7291         /* SW status block */
7292         printk("DEBUG: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
7293                tp->hw_status->status,
7294                tp->hw_status->status_tag,
7295                tp->hw_status->rx_jumbo_consumer,
7296                tp->hw_status->rx_consumer,
7297                tp->hw_status->rx_mini_consumer,
7298                tp->hw_status->idx[0].rx_producer,
7299                tp->hw_status->idx[0].tx_consumer);
7300
7301         /* SW statistics block */
7302         printk("DEBUG: Host statistics block [%08x:%08x:%08x:%08x]\n",
7303                ((u32 *)tp->hw_stats)[0],
7304                ((u32 *)tp->hw_stats)[1],
7305                ((u32 *)tp->hw_stats)[2],
7306                ((u32 *)tp->hw_stats)[3]);
7307
7308         /* Mailboxes */
7309         printk("DEBUG: SNDHOST_PROD[%08x%08x] SNDNIC_PROD[%08x%08x]\n",
7310                tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + 0x0),
7311                tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + 0x4),
7312                tr32_mailbox(MAILBOX_SNDNIC_PROD_IDX_0 + 0x0),
7313                tr32_mailbox(MAILBOX_SNDNIC_PROD_IDX_0 + 0x4));
7314
7315         /* NIC side send descriptors. */
7316         for (i = 0; i < 6; i++) {
7317                 unsigned long txd;
7318
7319                 txd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_TX_BUFFER_DESC
7320                         + (i * sizeof(struct tg3_tx_buffer_desc));
7321                 printk("DEBUG: NIC TXD(%d)[%08x:%08x:%08x:%08x]\n",
7322                        i,
7323                        readl(txd + 0x0), readl(txd + 0x4),
7324                        readl(txd + 0x8), readl(txd + 0xc));
7325         }
7326
7327         /* NIC side RX descriptors. */
7328         for (i = 0; i < 6; i++) {
7329                 unsigned long rxd;
7330
7331                 rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_BUFFER_DESC
7332                         + (i * sizeof(struct tg3_rx_buffer_desc));
7333                 printk("DEBUG: NIC RXD_STD(%d)[0][%08x:%08x:%08x:%08x]\n",
7334                        i,
7335                        readl(rxd + 0x0), readl(rxd + 0x4),
7336                        readl(rxd + 0x8), readl(rxd + 0xc));
7337                 rxd += (4 * sizeof(u32));
7338                 printk("DEBUG: NIC RXD_STD(%d)[1][%08x:%08x:%08x:%08x]\n",
7339                        i,
7340                        readl(rxd + 0x0), readl(rxd + 0x4),
7341                        readl(rxd + 0x8), readl(rxd + 0xc));
7342         }
7343
7344         for (i = 0; i < 6; i++) {
7345                 unsigned long rxd;
7346
7347                 rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_JUMBO_BUFFER_DESC
7348                         + (i * sizeof(struct tg3_rx_buffer_desc));
7349                 printk("DEBUG: NIC RXD_JUMBO(%d)[0][%08x:%08x:%08x:%08x]\n",
7350                        i,
7351                        readl(rxd + 0x0), readl(rxd + 0x4),
7352                        readl(rxd + 0x8), readl(rxd + 0xc));
7353                 rxd += (4 * sizeof(u32));
7354                 printk("DEBUG: NIC RXD_JUMBO(%d)[1][%08x:%08x:%08x:%08x]\n",
7355                        i,
7356                        readl(rxd + 0x0), readl(rxd + 0x4),
7357                        readl(rxd + 0x8), readl(rxd + 0xc));
7358         }
7359 }
7360 #endif
7361
7362 static struct net_device_stats *tg3_get_stats(struct net_device *);
7363 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *);
7364
7365 static int tg3_close(struct net_device *dev)
7366 {
7367         struct tg3 *tp = netdev_priv(dev);
7368
7369         /* Calling flush_scheduled_work() may deadlock because
7370          * linkwatch_event() may be on the workqueue and it will try to get
7371          * the rtnl_lock which we are holding.
7372          */
7373         while (tp->tg3_flags & TG3_FLAG_IN_RESET_TASK)
7374                 msleep(1);
7375
7376         netif_stop_queue(dev);
7377
7378         del_timer_sync(&tp->timer);
7379
7380         tg3_full_lock(tp, 1);
7381 #if 0
7382         tg3_dump_state(tp);
7383 #endif
7384
7385         tg3_disable_ints(tp);
7386
7387         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
7388         tg3_free_rings(tp);
7389         tp->tg3_flags &=
7390                 ~(TG3_FLAG_INIT_COMPLETE |
7391                   TG3_FLAG_GOT_SERDES_FLOWCTL);
7392
7393         tg3_full_unlock(tp);
7394
7395         free_irq(tp->pdev->irq, dev);
7396         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7397                 pci_disable_msi(tp->pdev);
7398                 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
7399         }
7400
7401         memcpy(&tp->net_stats_prev, tg3_get_stats(tp->dev),
7402                sizeof(tp->net_stats_prev));
7403         memcpy(&tp->estats_prev, tg3_get_estats(tp),
7404                sizeof(tp->estats_prev));
7405
7406         tg3_free_consistent(tp);
7407
7408         tg3_set_power_state(tp, PCI_D3hot);
7409
7410         netif_carrier_off(tp->dev);
7411
7412         return 0;
7413 }
7414
7415 static inline unsigned long get_stat64(tg3_stat64_t *val)
7416 {
7417         unsigned long ret;
7418
7419 #if (BITS_PER_LONG == 32)
7420         ret = val->low;
7421 #else
7422         ret = ((u64)val->high << 32) | ((u64)val->low);
7423 #endif
7424         return ret;
7425 }
7426
7427 static unsigned long calc_crc_errors(struct tg3 *tp)
7428 {
7429         struct tg3_hw_stats *hw_stats = tp->hw_stats;
7430
7431         if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
7432             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
7433              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
7434                 u32 val;
7435
7436                 spin_lock_bh(&tp->lock);
7437                 if (!tg3_readphy(tp, 0x1e, &val)) {
7438                         tg3_writephy(tp, 0x1e, val | 0x8000);
7439                         tg3_readphy(tp, 0x14, &val);
7440                 } else
7441                         val = 0;
7442                 spin_unlock_bh(&tp->lock);
7443
7444                 tp->phy_crc_errors += val;
7445
7446                 return tp->phy_crc_errors;
7447         }
7448
7449         return get_stat64(&hw_stats->rx_fcs_errors);
7450 }
7451
7452 #define ESTAT_ADD(member) \
7453         estats->member =        old_estats->member + \
7454                                 get_stat64(&hw_stats->member)
7455
7456 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *tp)
7457 {
7458         struct tg3_ethtool_stats *estats = &tp->estats;
7459         struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
7460         struct tg3_hw_stats *hw_stats = tp->hw_stats;
7461
7462         if (!hw_stats)
7463                 return old_estats;
7464
7465         ESTAT_ADD(rx_octets);
7466         ESTAT_ADD(rx_fragments);
7467         ESTAT_ADD(rx_ucast_packets);
7468         ESTAT_ADD(rx_mcast_packets);
7469         ESTAT_ADD(rx_bcast_packets);
7470         ESTAT_ADD(rx_fcs_errors);
7471         ESTAT_ADD(rx_align_errors);
7472         ESTAT_ADD(rx_xon_pause_rcvd);
7473         ESTAT_ADD(rx_xoff_pause_rcvd);
7474         ESTAT_ADD(rx_mac_ctrl_rcvd);
7475         ESTAT_ADD(rx_xoff_entered);
7476         ESTAT_ADD(rx_frame_too_long_errors);
7477         ESTAT_ADD(rx_jabbers);
7478         ESTAT_ADD(rx_undersize_packets);
7479         ESTAT_ADD(rx_in_length_errors);
7480         ESTAT_ADD(rx_out_length_errors);
7481         ESTAT_ADD(rx_64_or_less_octet_packets);
7482         ESTAT_ADD(rx_65_to_127_octet_packets);
7483         ESTAT_ADD(rx_128_to_255_octet_packets);
7484         ESTAT_ADD(rx_256_to_511_octet_packets);
7485         ESTAT_ADD(rx_512_to_1023_octet_packets);
7486         ESTAT_ADD(rx_1024_to_1522_octet_packets);
7487         ESTAT_ADD(rx_1523_to_2047_octet_packets);
7488         ESTAT_ADD(rx_2048_to_4095_octet_packets);
7489         ESTAT_ADD(rx_4096_to_8191_octet_packets);
7490         ESTAT_ADD(rx_8192_to_9022_octet_packets);
7491
7492         ESTAT_ADD(tx_octets);
7493         ESTAT_ADD(tx_collisions);
7494         ESTAT_ADD(tx_xon_sent);
7495         ESTAT_ADD(tx_xoff_sent);
7496         ESTAT_ADD(tx_flow_control);
7497         ESTAT_ADD(tx_mac_errors);
7498         ESTAT_ADD(tx_single_collisions);
7499         ESTAT_ADD(tx_mult_collisions);
7500         ESTAT_ADD(tx_deferred);
7501         ESTAT_ADD(tx_excessive_collisions);
7502         ESTAT_ADD(tx_late_collisions);
7503         ESTAT_ADD(tx_collide_2times);
7504         ESTAT_ADD(tx_collide_3times);
7505         ESTAT_ADD(tx_collide_4times);
7506         ESTAT_ADD(tx_collide_5times);
7507         ESTAT_ADD(tx_collide_6times);
7508         ESTAT_ADD(tx_collide_7times);
7509         ESTAT_ADD(tx_collide_8times);
7510         ESTAT_ADD(tx_collide_9times);
7511         ESTAT_ADD(tx_collide_10times);
7512         ESTAT_ADD(tx_collide_11times);
7513         ESTAT_ADD(tx_collide_12times);
7514         ESTAT_ADD(tx_collide_13times);
7515         ESTAT_ADD(tx_collide_14times);
7516         ESTAT_ADD(tx_collide_15times);
7517         ESTAT_ADD(tx_ucast_packets);
7518         ESTAT_ADD(tx_mcast_packets);
7519         ESTAT_ADD(tx_bcast_packets);
7520         ESTAT_ADD(tx_carrier_sense_errors);
7521         ESTAT_ADD(tx_discards);
7522         ESTAT_ADD(tx_errors);
7523
7524         ESTAT_ADD(dma_writeq_full);
7525         ESTAT_ADD(dma_write_prioq_full);
7526         ESTAT_ADD(rxbds_empty);
7527         ESTAT_ADD(rx_discards);
7528         ESTAT_ADD(rx_errors);
7529         ESTAT_ADD(rx_threshold_hit);
7530
7531         ESTAT_ADD(dma_readq_full);
7532         ESTAT_ADD(dma_read_prioq_full);
7533         ESTAT_ADD(tx_comp_queue_full);
7534
7535         ESTAT_ADD(ring_set_send_prod_index);
7536         ESTAT_ADD(ring_status_update);
7537         ESTAT_ADD(nic_irqs);
7538         ESTAT_ADD(nic_avoided_irqs);
7539         ESTAT_ADD(nic_tx_threshold_hit);
7540
7541         return estats;
7542 }
7543
7544 static struct net_device_stats *tg3_get_stats(struct net_device *dev)
7545 {
7546         struct tg3 *tp = netdev_priv(dev);
7547         struct net_device_stats *stats = &tp->net_stats;
7548         struct net_device_stats *old_stats = &tp->net_stats_prev;
7549         struct tg3_hw_stats *hw_stats = tp->hw_stats;
7550
7551         if (!hw_stats)
7552                 return old_stats;
7553
7554         stats->rx_packets = old_stats->rx_packets +
7555                 get_stat64(&hw_stats->rx_ucast_packets) +
7556                 get_stat64(&hw_stats->rx_mcast_packets) +
7557                 get_stat64(&hw_stats->rx_bcast_packets);
7558
7559         stats->tx_packets = old_stats->tx_packets +
7560                 get_stat64(&hw_stats->tx_ucast_packets) +
7561                 get_stat64(&hw_stats->tx_mcast_packets) +
7562                 get_stat64(&hw_stats->tx_bcast_packets);
7563
7564         stats->rx_bytes = old_stats->rx_bytes +
7565                 get_stat64(&hw_stats->rx_octets);
7566         stats->tx_bytes = old_stats->tx_bytes +
7567                 get_stat64(&hw_stats->tx_octets);
7568
7569         stats->rx_errors = old_stats->rx_errors +
7570                 get_stat64(&hw_stats->rx_errors);
7571         stats->tx_errors = old_stats->tx_errors +
7572                 get_stat64(&hw_stats->tx_errors) +
7573                 get_stat64(&hw_stats->tx_mac_errors) +
7574                 get_stat64(&hw_stats->tx_carrier_sense_errors) +
7575                 get_stat64(&hw_stats->tx_discards);
7576
7577         stats->multicast = old_stats->multicast +
7578                 get_stat64(&hw_stats->rx_mcast_packets);
7579         stats->collisions = old_stats->collisions +
7580                 get_stat64(&hw_stats->tx_collisions);
7581
7582         stats->rx_length_errors = old_stats->rx_length_errors +
7583                 get_stat64(&hw_stats->rx_frame_too_long_errors) +
7584                 get_stat64(&hw_stats->rx_undersize_packets);
7585
7586         stats->rx_over_errors = old_stats->rx_over_errors +
7587                 get_stat64(&hw_stats->rxbds_empty);
7588         stats->rx_frame_errors = old_stats->rx_frame_errors +
7589                 get_stat64(&hw_stats->rx_align_errors);
7590         stats->tx_aborted_errors = old_stats->tx_aborted_errors +
7591                 get_stat64(&hw_stats->tx_discards);
7592         stats->tx_carrier_errors = old_stats->tx_carrier_errors +
7593                 get_stat64(&hw_stats->tx_carrier_sense_errors);
7594
7595         stats->rx_crc_errors = old_stats->rx_crc_errors +
7596                 calc_crc_errors(tp);
7597
7598         stats->rx_missed_errors = old_stats->rx_missed_errors +
7599                 get_stat64(&hw_stats->rx_discards);
7600
7601         return stats;
7602 }
7603
7604 static inline u32 calc_crc(unsigned char *buf, int len)
7605 {
7606         u32 reg;
7607         u32 tmp;
7608         int j, k;
7609
7610         reg = 0xffffffff;
7611
7612         for (j = 0; j < len; j++) {
7613                 reg ^= buf[j];
7614
7615                 for (k = 0; k < 8; k++) {
7616                         tmp = reg & 0x01;
7617
7618                         reg >>= 1;
7619
7620                         if (tmp) {
7621                                 reg ^= 0xedb88320;
7622                         }
7623                 }
7624         }
7625
7626         return ~reg;
7627 }
7628
7629 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
7630 {
7631         /* accept or reject all multicast frames */
7632         tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
7633         tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
7634         tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
7635         tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
7636 }
7637
7638 static void __tg3_set_rx_mode(struct net_device *dev)
7639 {
7640         struct tg3 *tp = netdev_priv(dev);
7641         u32 rx_mode;
7642
7643         rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
7644                                   RX_MODE_KEEP_VLAN_TAG);
7645
7646         /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
7647          * flag clear.
7648          */
7649 #if TG3_VLAN_TAG_USED
7650         if (!tp->vlgrp &&
7651             !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
7652                 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
7653 #else
7654         /* By definition, VLAN is disabled always in this
7655          * case.
7656          */
7657         if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
7658                 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
7659 #endif
7660
7661         if (dev->flags & IFF_PROMISC) {
7662                 /* Promiscuous mode. */
7663                 rx_mode |= RX_MODE_PROMISC;
7664         } else if (dev->flags & IFF_ALLMULTI) {
7665                 /* Accept all multicast. */
7666                 tg3_set_multi (tp, 1);
7667         } else if (dev->mc_count < 1) {
7668                 /* Reject all multicast. */
7669                 tg3_set_multi (tp, 0);
7670         } else {
7671                 /* Accept one or more multicast(s). */
7672                 struct dev_mc_list *mclist;
7673                 unsigned int i;
7674                 u32 mc_filter[4] = { 0, };
7675                 u32 regidx;
7676                 u32 bit;
7677                 u32 crc;
7678
7679                 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
7680                      i++, mclist = mclist->next) {
7681
7682                         crc = calc_crc (mclist->dmi_addr, ETH_ALEN);
7683                         bit = ~crc & 0x7f;
7684                         regidx = (bit & 0x60) >> 5;
7685                         bit &= 0x1f;
7686                         mc_filter[regidx] |= (1 << bit);
7687                 }
7688
7689                 tw32(MAC_HASH_REG_0, mc_filter[0]);
7690                 tw32(MAC_HASH_REG_1, mc_filter[1]);
7691                 tw32(MAC_HASH_REG_2, mc_filter[2]);
7692                 tw32(MAC_HASH_REG_3, mc_filter[3]);
7693         }
7694
7695         if (rx_mode != tp->rx_mode) {
7696                 tp->rx_mode = rx_mode;
7697                 tw32_f(MAC_RX_MODE, rx_mode);
7698                 udelay(10);
7699         }
7700 }
7701
7702 static void tg3_set_rx_mode(struct net_device *dev)
7703 {
7704         struct tg3 *tp = netdev_priv(dev);
7705
7706         if (!netif_running(dev))
7707                 return;
7708
7709         tg3_full_lock(tp, 0);
7710         __tg3_set_rx_mode(dev);
7711         tg3_full_unlock(tp);
7712 }
7713
7714 #define TG3_REGDUMP_LEN         (32 * 1024)
7715
7716 static int tg3_get_regs_len(struct net_device *dev)
7717 {
7718         return TG3_REGDUMP_LEN;
7719 }
7720
7721 static void tg3_get_regs(struct net_device *dev,
7722                 struct ethtool_regs *regs, void *_p)
7723 {
7724         u32 *p = _p;
7725         struct tg3 *tp = netdev_priv(dev);
7726         u8 *orig_p = _p;
7727         int i;
7728
7729         regs->version = 0;
7730
7731         memset(p, 0, TG3_REGDUMP_LEN);
7732
7733         if (tp->link_config.phy_is_low_power)
7734                 return;
7735
7736         tg3_full_lock(tp, 0);
7737
7738 #define __GET_REG32(reg)        (*(p)++ = tr32(reg))
7739 #define GET_REG32_LOOP(base,len)                \
7740 do {    p = (u32 *)(orig_p + (base));           \
7741         for (i = 0; i < len; i += 4)            \
7742                 __GET_REG32((base) + i);        \
7743 } while (0)
7744 #define GET_REG32_1(reg)                        \
7745 do {    p = (u32 *)(orig_p + (reg));            \
7746         __GET_REG32((reg));                     \
7747 } while (0)
7748
7749         GET_REG32_LOOP(TG3PCI_VENDOR, 0xb0);
7750         GET_REG32_LOOP(MAILBOX_INTERRUPT_0, 0x200);
7751         GET_REG32_LOOP(MAC_MODE, 0x4f0);
7752         GET_REG32_LOOP(SNDDATAI_MODE, 0xe0);
7753         GET_REG32_1(SNDDATAC_MODE);
7754         GET_REG32_LOOP(SNDBDS_MODE, 0x80);
7755         GET_REG32_LOOP(SNDBDI_MODE, 0x48);
7756         GET_REG32_1(SNDBDC_MODE);
7757         GET_REG32_LOOP(RCVLPC_MODE, 0x20);
7758         GET_REG32_LOOP(RCVLPC_SELLST_BASE, 0x15c);
7759         GET_REG32_LOOP(RCVDBDI_MODE, 0x0c);
7760         GET_REG32_LOOP(RCVDBDI_JUMBO_BD, 0x3c);
7761         GET_REG32_LOOP(RCVDBDI_BD_PROD_IDX_0, 0x44);
7762         GET_REG32_1(RCVDCC_MODE);
7763         GET_REG32_LOOP(RCVBDI_MODE, 0x20);
7764         GET_REG32_LOOP(RCVCC_MODE, 0x14);
7765         GET_REG32_LOOP(RCVLSC_MODE, 0x08);
7766         GET_REG32_1(MBFREE_MODE);
7767         GET_REG32_LOOP(HOSTCC_MODE, 0x100);
7768         GET_REG32_LOOP(MEMARB_MODE, 0x10);
7769         GET_REG32_LOOP(BUFMGR_MODE, 0x58);
7770         GET_REG32_LOOP(RDMAC_MODE, 0x08);
7771         GET_REG32_LOOP(WDMAC_MODE, 0x08);
7772         GET_REG32_1(RX_CPU_MODE);
7773         GET_REG32_1(RX_CPU_STATE);
7774         GET_REG32_1(RX_CPU_PGMCTR);
7775         GET_REG32_1(RX_CPU_HWBKPT);
7776         GET_REG32_1(TX_CPU_MODE);
7777         GET_REG32_1(TX_CPU_STATE);
7778         GET_REG32_1(TX_CPU_PGMCTR);
7779         GET_REG32_LOOP(GRCMBOX_INTERRUPT_0, 0x110);
7780         GET_REG32_LOOP(FTQ_RESET, 0x120);
7781         GET_REG32_LOOP(MSGINT_MODE, 0x0c);
7782         GET_REG32_1(DMAC_MODE);
7783         GET_REG32_LOOP(GRC_MODE, 0x4c);
7784         if (tp->tg3_flags & TG3_FLAG_NVRAM)
7785                 GET_REG32_LOOP(NVRAM_CMD, 0x24);
7786
7787 #undef __GET_REG32
7788 #undef GET_REG32_LOOP
7789 #undef GET_REG32_1
7790
7791         tg3_full_unlock(tp);
7792 }
7793
7794 static int tg3_get_eeprom_len(struct net_device *dev)
7795 {
7796         struct tg3 *tp = netdev_priv(dev);
7797
7798         return tp->nvram_size;
7799 }
7800
7801 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val);
7802 static int tg3_nvram_read_swab(struct tg3 *tp, u32 offset, u32 *val);
7803
7804 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
7805 {
7806         struct tg3 *tp = netdev_priv(dev);
7807         int ret;
7808         u8  *pd;
7809         u32 i, offset, len, val, b_offset, b_count;
7810
7811         if (tp->link_config.phy_is_low_power)
7812                 return -EAGAIN;
7813
7814         offset = eeprom->offset;
7815         len = eeprom->len;
7816         eeprom->len = 0;
7817
7818         eeprom->magic = TG3_EEPROM_MAGIC;
7819
7820         if (offset & 3) {
7821                 /* adjustments to start on required 4 byte boundary */
7822                 b_offset = offset & 3;
7823                 b_count = 4 - b_offset;
7824                 if (b_count > len) {
7825                         /* i.e. offset=1 len=2 */
7826                         b_count = len;
7827                 }
7828                 ret = tg3_nvram_read(tp, offset-b_offset, &val);
7829                 if (ret)
7830                         return ret;
7831                 val = cpu_to_le32(val);
7832                 memcpy(data, ((char*)&val) + b_offset, b_count);
7833                 len -= b_count;
7834                 offset += b_count;
7835                 eeprom->len += b_count;
7836         }
7837
7838         /* read bytes upto the last 4 byte boundary */
7839         pd = &data[eeprom->len];
7840         for (i = 0; i < (len - (len & 3)); i += 4) {
7841                 ret = tg3_nvram_read(tp, offset + i, &val);
7842                 if (ret) {
7843                         eeprom->len += i;
7844                         return ret;
7845                 }
7846                 val = cpu_to_le32(val);
7847                 memcpy(pd + i, &val, 4);
7848         }
7849         eeprom->len += i;
7850
7851         if (len & 3) {
7852                 /* read last bytes not ending on 4 byte boundary */
7853                 pd = &data[eeprom->len];
7854                 b_count = len & 3;
7855                 b_offset = offset + len - b_count;
7856                 ret = tg3_nvram_read(tp, b_offset, &val);
7857                 if (ret)
7858                         return ret;
7859                 val = cpu_to_le32(val);
7860                 memcpy(pd, ((char*)&val), b_count);
7861                 eeprom->len += b_count;
7862         }
7863         return 0;
7864 }
7865
7866 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf);
7867
7868 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
7869 {
7870         struct tg3 *tp = netdev_priv(dev);
7871         int ret;
7872         u32 offset, len, b_offset, odd_len, start, end;
7873         u8 *buf;
7874
7875         if (tp->link_config.phy_is_low_power)
7876                 return -EAGAIN;
7877
7878         if (eeprom->magic != TG3_EEPROM_MAGIC)
7879                 return -EINVAL;
7880
7881         offset = eeprom->offset;
7882         len = eeprom->len;
7883
7884         if ((b_offset = (offset & 3))) {
7885                 /* adjustments to start on required 4 byte boundary */
7886                 ret = tg3_nvram_read(tp, offset-b_offset, &start);
7887                 if (ret)
7888                         return ret;
7889                 start = cpu_to_le32(start);
7890                 len += b_offset;
7891                 offset &= ~3;
7892                 if (len < 4)
7893                         len = 4;
7894         }
7895
7896         odd_len = 0;
7897         if (len & 3) {
7898                 /* adjustments to end on required 4 byte boundary */
7899                 odd_len = 1;
7900                 len = (len + 3) & ~3;
7901                 ret = tg3_nvram_read(tp, offset+len-4, &end);
7902                 if (ret)
7903                         return ret;
7904                 end = cpu_to_le32(end);
7905         }
7906
7907         buf = data;
7908         if (b_offset || odd_len) {
7909                 buf = kmalloc(len, GFP_KERNEL);
7910                 if (buf == 0)
7911                         return -ENOMEM;
7912                 if (b_offset)
7913                         memcpy(buf, &start, 4);
7914                 if (odd_len)
7915                         memcpy(buf+len-4, &end, 4);
7916                 memcpy(buf + b_offset, data, eeprom->len);
7917         }
7918
7919         ret = tg3_nvram_write_block(tp, offset, len, buf);
7920
7921         if (buf != data)
7922                 kfree(buf);
7923
7924         return ret;
7925 }
7926
7927 static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
7928 {
7929         struct tg3 *tp = netdev_priv(dev);
7930
7931         cmd->supported = (SUPPORTED_Autoneg);
7932
7933         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
7934                 cmd->supported |= (SUPPORTED_1000baseT_Half |
7935                                    SUPPORTED_1000baseT_Full);
7936
7937         if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)) {
7938                 cmd->supported |= (SUPPORTED_100baseT_Half |
7939                                   SUPPORTED_100baseT_Full |
7940                                   SUPPORTED_10baseT_Half |
7941                                   SUPPORTED_10baseT_Full |
7942                                   SUPPORTED_MII);
7943                 cmd->port = PORT_TP;
7944         } else {
7945                 cmd->supported |= SUPPORTED_FIBRE;
7946                 cmd->port = PORT_FIBRE;
7947         }
7948
7949         cmd->advertising = tp->link_config.advertising;
7950         if (netif_running(dev)) {
7951                 cmd->speed = tp->link_config.active_speed;
7952                 cmd->duplex = tp->link_config.active_duplex;
7953         }
7954         cmd->phy_address = PHY_ADDR;
7955         cmd->transceiver = 0;
7956         cmd->autoneg = tp->link_config.autoneg;
7957         cmd->maxtxpkt = 0;
7958         cmd->maxrxpkt = 0;
7959         return 0;
7960 }
7961
7962 static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
7963 {
7964         struct tg3 *tp = netdev_priv(dev);
7965
7966         if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) {
7967                 /* These are the only valid advertisement bits allowed.  */
7968                 if (cmd->autoneg == AUTONEG_ENABLE &&
7969                     (cmd->advertising & ~(ADVERTISED_1000baseT_Half |
7970                                           ADVERTISED_1000baseT_Full |
7971                                           ADVERTISED_Autoneg |
7972                                           ADVERTISED_FIBRE)))
7973                         return -EINVAL;
7974                 /* Fiber can only do SPEED_1000.  */
7975                 else if ((cmd->autoneg != AUTONEG_ENABLE) &&
7976                          (cmd->speed != SPEED_1000))
7977                         return -EINVAL;
7978         /* Copper cannot force SPEED_1000.  */
7979         } else if ((cmd->autoneg != AUTONEG_ENABLE) &&
7980                    (cmd->speed == SPEED_1000))
7981                 return -EINVAL;
7982         else if ((cmd->speed == SPEED_1000) &&
7983                  (tp->tg3_flags2 & TG3_FLAG_10_100_ONLY))
7984                 return -EINVAL;
7985
7986         tg3_full_lock(tp, 0);
7987
7988         tp->link_config.autoneg = cmd->autoneg;
7989         if (cmd->autoneg == AUTONEG_ENABLE) {
7990                 tp->link_config.advertising = cmd->advertising;
7991                 tp->link_config.speed = SPEED_INVALID;
7992                 tp->link_config.duplex = DUPLEX_INVALID;
7993         } else {
7994                 tp->link_config.advertising = 0;
7995                 tp->link_config.speed = cmd->speed;
7996                 tp->link_config.duplex = cmd->duplex;
7997         }
7998
7999         tp->link_config.orig_speed = tp->link_config.speed;
8000         tp->link_config.orig_duplex = tp->link_config.duplex;
8001         tp->link_config.orig_autoneg = tp->link_config.autoneg;
8002
8003         if (netif_running(dev))
8004                 tg3_setup_phy(tp, 1);
8005
8006         tg3_full_unlock(tp);
8007
8008         return 0;
8009 }
8010
8011 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
8012 {
8013         struct tg3 *tp = netdev_priv(dev);
8014
8015         strcpy(info->driver, DRV_MODULE_NAME);
8016         strcpy(info->version, DRV_MODULE_VERSION);
8017         strcpy(info->fw_version, tp->fw_ver);
8018         strcpy(info->bus_info, pci_name(tp->pdev));
8019 }
8020
8021 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
8022 {
8023         struct tg3 *tp = netdev_priv(dev);
8024
8025         wol->supported = WAKE_MAGIC;
8026         wol->wolopts = 0;
8027         if (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)
8028                 wol->wolopts = WAKE_MAGIC;
8029         memset(&wol->sopass, 0, sizeof(wol->sopass));
8030 }
8031
8032 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
8033 {
8034         struct tg3 *tp = netdev_priv(dev);
8035
8036         if (wol->wolopts & ~WAKE_MAGIC)
8037                 return -EINVAL;
8038         if ((wol->wolopts & WAKE_MAGIC) &&
8039             tp->tg3_flags2 & TG3_FLG2_ANY_SERDES &&
8040             !(tp->tg3_flags & TG3_FLAG_SERDES_WOL_CAP))
8041                 return -EINVAL;
8042
8043         spin_lock_bh(&tp->lock);
8044         if (wol->wolopts & WAKE_MAGIC)
8045                 tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
8046         else
8047                 tp->tg3_flags &= ~TG3_FLAG_WOL_ENABLE;
8048         spin_unlock_bh(&tp->lock);
8049
8050         return 0;
8051 }
8052
8053 static u32 tg3_get_msglevel(struct net_device *dev)
8054 {
8055         struct tg3 *tp = netdev_priv(dev);
8056         return tp->msg_enable;
8057 }
8058
8059 static void tg3_set_msglevel(struct net_device *dev, u32 value)
8060 {
8061         struct tg3 *tp = netdev_priv(dev);
8062         tp->msg_enable = value;
8063 }
8064
8065 #if TG3_TSO_SUPPORT != 0
8066 static int tg3_set_tso(struct net_device *dev, u32 value)
8067 {
8068         struct tg3 *tp = netdev_priv(dev);
8069
8070         if (!(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
8071                 if (value)
8072                         return -EINVAL;
8073                 return 0;
8074         }
8075         if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_2) &&
8076             (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906)) {
8077                 if (value)
8078                         dev->features |= NETIF_F_TSO6;
8079                 else
8080                         dev->features &= ~NETIF_F_TSO6;
8081         }
8082         return ethtool_op_set_tso(dev, value);
8083 }
8084 #endif
8085
8086 static int tg3_nway_reset(struct net_device *dev)
8087 {
8088         struct tg3 *tp = netdev_priv(dev);
8089         u32 bmcr;
8090         int r;
8091
8092         if (!netif_running(dev))
8093                 return -EAGAIN;
8094
8095         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
8096                 return -EINVAL;
8097
8098         spin_lock_bh(&tp->lock);
8099         r = -EINVAL;
8100         tg3_readphy(tp, MII_BMCR, &bmcr);
8101         if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
8102             ((bmcr & BMCR_ANENABLE) ||
8103              (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT))) {
8104                 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
8105                                            BMCR_ANENABLE);
8106                 r = 0;
8107         }
8108         spin_unlock_bh(&tp->lock);
8109
8110         return r;
8111 }
8112
8113 static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
8114 {
8115         struct tg3 *tp = netdev_priv(dev);
8116
8117         ering->rx_max_pending = TG3_RX_RING_SIZE - 1;
8118         ering->rx_mini_max_pending = 0;
8119         if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE)
8120                 ering->rx_jumbo_max_pending = TG3_RX_JUMBO_RING_SIZE - 1;
8121         else
8122                 ering->rx_jumbo_max_pending = 0;
8123
8124         ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
8125
8126         ering->rx_pending = tp->rx_pending;
8127         ering->rx_mini_pending = 0;
8128         if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE)
8129                 ering->rx_jumbo_pending = tp->rx_jumbo_pending;
8130         else
8131                 ering->rx_jumbo_pending = 0;
8132
8133         ering->tx_pending = tp->tx_pending;
8134 }
8135
8136 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
8137 {
8138         struct tg3 *tp = netdev_priv(dev);
8139         int irq_sync = 0, err = 0;
8140
8141         if ((ering->rx_pending > TG3_RX_RING_SIZE - 1) ||
8142             (ering->rx_jumbo_pending > TG3_RX_JUMBO_RING_SIZE - 1) ||
8143             (ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
8144             (ering->tx_pending <= MAX_SKB_FRAGS) ||
8145             ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_1_BUG) &&
8146              (ering->tx_pending <= (MAX_SKB_FRAGS * 3))))
8147                 return -EINVAL;
8148
8149         if (netif_running(dev)) {
8150                 tg3_netif_stop(tp);
8151                 irq_sync = 1;
8152         }
8153
8154         tg3_full_lock(tp, irq_sync);
8155
8156         tp->rx_pending = ering->rx_pending;
8157
8158         if ((tp->tg3_flags2 & TG3_FLG2_MAX_RXPEND_64) &&
8159             tp->rx_pending > 63)
8160                 tp->rx_pending = 63;
8161         tp->rx_jumbo_pending = ering->rx_jumbo_pending;
8162         tp->tx_pending = ering->tx_pending;
8163
8164         if (netif_running(dev)) {
8165                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
8166                 err = tg3_restart_hw(tp, 1);
8167                 if (!err)
8168                         tg3_netif_start(tp);
8169         }
8170
8171         tg3_full_unlock(tp);
8172
8173         return err;
8174 }
8175
8176 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
8177 {
8178         struct tg3 *tp = netdev_priv(dev);
8179
8180         epause->autoneg = (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG) != 0;
8181         epause->rx_pause = (tp->tg3_flags & TG3_FLAG_RX_PAUSE) != 0;
8182         epause->tx_pause = (tp->tg3_flags & TG3_FLAG_TX_PAUSE) != 0;
8183 }
8184
8185 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
8186 {
8187         struct tg3 *tp = netdev_priv(dev);
8188         int irq_sync = 0, err = 0;
8189
8190         if (netif_running(dev)) {
8191                 tg3_netif_stop(tp);
8192                 irq_sync = 1;
8193         }
8194
8195         tg3_full_lock(tp, irq_sync);
8196
8197         if (epause->autoneg)
8198                 tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
8199         else
8200                 tp->tg3_flags &= ~TG3_FLAG_PAUSE_AUTONEG;
8201         if (epause->rx_pause)
8202                 tp->tg3_flags |= TG3_FLAG_RX_PAUSE;
8203         else
8204                 tp->tg3_flags &= ~TG3_FLAG_RX_PAUSE;
8205         if (epause->tx_pause)
8206                 tp->tg3_flags |= TG3_FLAG_TX_PAUSE;
8207         else
8208                 tp->tg3_flags &= ~TG3_FLAG_TX_PAUSE;
8209
8210         if (netif_running(dev)) {
8211                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
8212                 err = tg3_restart_hw(tp, 1);
8213                 if (!err)
8214                         tg3_netif_start(tp);
8215         }
8216
8217         tg3_full_unlock(tp);
8218
8219         return err;
8220 }
8221
8222 static u32 tg3_get_rx_csum(struct net_device *dev)
8223 {
8224         struct tg3 *tp = netdev_priv(dev);
8225         return (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0;
8226 }
8227
8228 static int tg3_set_rx_csum(struct net_device *dev, u32 data)
8229 {
8230         struct tg3 *tp = netdev_priv(dev);
8231
8232         if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
8233                 if (data != 0)
8234                         return -EINVAL;
8235                 return 0;
8236         }
8237
8238         spin_lock_bh(&tp->lock);
8239         if (data)
8240                 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
8241         else
8242                 tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
8243         spin_unlock_bh(&tp->lock);
8244
8245         return 0;
8246 }
8247
8248 static int tg3_set_tx_csum(struct net_device *dev, u32 data)
8249 {
8250         struct tg3 *tp = netdev_priv(dev);
8251
8252         if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
8253                 if (data != 0)
8254                         return -EINVAL;
8255                 return 0;
8256         }
8257
8258         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
8259             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
8260                 ethtool_op_set_tx_hw_csum(dev, data);
8261         else
8262                 ethtool_op_set_tx_csum(dev, data);
8263
8264         return 0;
8265 }
8266
8267 static int tg3_get_stats_count (struct net_device *dev)
8268 {
8269         return TG3_NUM_STATS;
8270 }
8271
8272 static int tg3_get_test_count (struct net_device *dev)
8273 {
8274         return TG3_NUM_TEST;
8275 }
8276
8277 static void tg3_get_strings (struct net_device *dev, u32 stringset, u8 *buf)
8278 {
8279         switch (stringset) {
8280         case ETH_SS_STATS:
8281                 memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
8282                 break;
8283         case ETH_SS_TEST:
8284                 memcpy(buf, &ethtool_test_keys, sizeof(ethtool_test_keys));
8285                 break;
8286         default:
8287                 WARN_ON(1);     /* we need a WARN() */
8288                 break;
8289         }
8290 }
8291
8292 static int tg3_phys_id(struct net_device *dev, u32 data)
8293 {
8294         struct tg3 *tp = netdev_priv(dev);
8295         int i;
8296
8297         if (!netif_running(tp->dev))
8298                 return -EAGAIN;
8299
8300         if (data == 0)
8301                 data = 2;
8302
8303         for (i = 0; i < (data * 2); i++) {
8304                 if ((i % 2) == 0)
8305                         tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
8306                                            LED_CTRL_1000MBPS_ON |
8307                                            LED_CTRL_100MBPS_ON |
8308                                            LED_CTRL_10MBPS_ON |
8309                                            LED_CTRL_TRAFFIC_OVERRIDE |
8310                                            LED_CTRL_TRAFFIC_BLINK |
8311                                            LED_CTRL_TRAFFIC_LED);
8312
8313                 else
8314                         tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
8315                                            LED_CTRL_TRAFFIC_OVERRIDE);
8316
8317                 if (msleep_interruptible(500))
8318                         break;
8319         }
8320         tw32(MAC_LED_CTRL, tp->led_ctrl);
8321         return 0;
8322 }
8323
8324 static void tg3_get_ethtool_stats (struct net_device *dev,
8325                                    struct ethtool_stats *estats, u64 *tmp_stats)
8326 {
8327         struct tg3 *tp = netdev_priv(dev);
8328         memcpy(tmp_stats, tg3_get_estats(tp), sizeof(tp->estats));
8329 }
8330
8331 #define NVRAM_TEST_SIZE 0x100
8332 #define NVRAM_SELFBOOT_FORMAT1_SIZE 0x14
8333 #define NVRAM_SELFBOOT_HW_SIZE 0x20
8334 #define NVRAM_SELFBOOT_DATA_SIZE 0x1c
8335
8336 static int tg3_test_nvram(struct tg3 *tp)
8337 {
8338         u32 *buf, csum, magic;
8339         int i, j, err = 0, size;
8340
8341         if (tg3_nvram_read_swab(tp, 0, &magic) != 0)
8342                 return -EIO;
8343
8344         if (magic == TG3_EEPROM_MAGIC)
8345                 size = NVRAM_TEST_SIZE;
8346         else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) {
8347                 if ((magic & 0xe00000) == 0x200000)
8348                         size = NVRAM_SELFBOOT_FORMAT1_SIZE;
8349                 else
8350                         return 0;
8351         } else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
8352                 size = NVRAM_SELFBOOT_HW_SIZE;
8353         else
8354                 return -EIO;
8355
8356         buf = kmalloc(size, GFP_KERNEL);
8357         if (buf == NULL)
8358                 return -ENOMEM;
8359
8360         err = -EIO;
8361         for (i = 0, j = 0; i < size; i += 4, j++) {
8362                 u32 val;
8363
8364                 if ((err = tg3_nvram_read(tp, i, &val)) != 0)
8365                         break;
8366                 buf[j] = cpu_to_le32(val);
8367         }
8368         if (i < size)
8369                 goto out;
8370
8371         /* Selfboot format */
8372         if ((cpu_to_be32(buf[0]) & TG3_EEPROM_MAGIC_FW_MSK) ==
8373             TG3_EEPROM_MAGIC_FW) {
8374                 u8 *buf8 = (u8 *) buf, csum8 = 0;
8375
8376                 for (i = 0; i < size; i++)
8377                         csum8 += buf8[i];
8378
8379                 if (csum8 == 0) {
8380                         err = 0;
8381                         goto out;
8382                 }
8383
8384                 err = -EIO;
8385                 goto out;
8386         }
8387
8388         if ((cpu_to_be32(buf[0]) & TG3_EEPROM_MAGIC_HW_MSK) ==
8389             TG3_EEPROM_MAGIC_HW) {
8390                 u8 data[NVRAM_SELFBOOT_DATA_SIZE];
8391                 u8 parity[NVRAM_SELFBOOT_DATA_SIZE];
8392                 u8 *buf8 = (u8 *) buf;
8393                 int j, k;
8394
8395                 /* Separate the parity bits and the data bytes.  */
8396                 for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) {
8397                         if ((i == 0) || (i == 8)) {
8398                                 int l;
8399                                 u8 msk;
8400
8401                                 for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1)
8402                                         parity[k++] = buf8[i] & msk;
8403                                 i++;
8404                         }
8405                         else if (i == 16) {
8406                                 int l;
8407                                 u8 msk;
8408
8409                                 for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1)
8410                                         parity[k++] = buf8[i] & msk;
8411                                 i++;
8412
8413                                 for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1)
8414                                         parity[k++] = buf8[i] & msk;
8415                                 i++;
8416                         }
8417                         data[j++] = buf8[i];
8418                 }
8419
8420                 err = -EIO;
8421                 for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) {
8422                         u8 hw8 = hweight8(data[i]);
8423
8424                         if ((hw8 & 0x1) && parity[i])
8425                                 goto out;
8426                         else if (!(hw8 & 0x1) && !parity[i])
8427                                 goto out;
8428                 }
8429                 err = 0;
8430                 goto out;
8431         }
8432
8433         /* Bootstrap checksum at offset 0x10 */
8434         csum = calc_crc((unsigned char *) buf, 0x10);
8435         if(csum != cpu_to_le32(buf[0x10/4]))
8436                 goto out;
8437
8438         /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
8439         csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
8440         if (csum != cpu_to_le32(buf[0xfc/4]))
8441                  goto out;
8442
8443         err = 0;
8444
8445 out:
8446         kfree(buf);
8447         return err;
8448 }
8449
8450 #define TG3_SERDES_TIMEOUT_SEC  2
8451 #define TG3_COPPER_TIMEOUT_SEC  6
8452
8453 static int tg3_test_link(struct tg3 *tp)
8454 {
8455         int i, max;
8456
8457         if (!netif_running(tp->dev))
8458                 return -ENODEV;
8459
8460         if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
8461                 max = TG3_SERDES_TIMEOUT_SEC;
8462         else
8463                 max = TG3_COPPER_TIMEOUT_SEC;
8464
8465         for (i = 0; i < max; i++) {
8466                 if (netif_carrier_ok(tp->dev))
8467                         return 0;
8468
8469                 if (msleep_interruptible(1000))
8470                         break;
8471         }
8472
8473         return -EIO;
8474 }
8475
8476 /* Only test the commonly used registers */
8477 static int tg3_test_registers(struct tg3 *tp)
8478 {
8479         int i, is_5705, is_5750;
8480         u32 offset, read_mask, write_mask, val, save_val, read_val;
8481         static struct {
8482                 u16 offset;
8483                 u16 flags;
8484 #define TG3_FL_5705     0x1
8485 #define TG3_FL_NOT_5705 0x2
8486 #define TG3_FL_NOT_5788 0x4
8487 #define TG3_FL_NOT_5750 0x8
8488                 u32 read_mask;
8489                 u32 write_mask;
8490         } reg_tbl[] = {
8491                 /* MAC Control Registers */
8492                 { MAC_MODE, TG3_FL_NOT_5705,
8493                         0x00000000, 0x00ef6f8c },
8494                 { MAC_MODE, TG3_FL_5705,
8495                         0x00000000, 0x01ef6b8c },
8496                 { MAC_STATUS, TG3_FL_NOT_5705,
8497                         0x03800107, 0x00000000 },
8498                 { MAC_STATUS, TG3_FL_5705,
8499                         0x03800100, 0x00000000 },
8500                 { MAC_ADDR_0_HIGH, 0x0000,
8501                         0x00000000, 0x0000ffff },
8502                 { MAC_ADDR_0_LOW, 0x0000,
8503                         0x00000000, 0xffffffff },
8504                 { MAC_RX_MTU_SIZE, 0x0000,
8505                         0x00000000, 0x0000ffff },
8506                 { MAC_TX_MODE, 0x0000,
8507                         0x00000000, 0x00000070 },
8508                 { MAC_TX_LENGTHS, 0x0000,
8509                         0x00000000, 0x00003fff },
8510                 { MAC_RX_MODE, TG3_FL_NOT_5705,
8511                         0x00000000, 0x000007fc },
8512                 { MAC_RX_MODE, TG3_FL_5705,
8513                         0x00000000, 0x000007dc },
8514                 { MAC_HASH_REG_0, 0x0000,
8515                         0x00000000, 0xffffffff },
8516                 { MAC_HASH_REG_1, 0x0000,
8517                         0x00000000, 0xffffffff },
8518                 { MAC_HASH_REG_2, 0x0000,
8519                         0x00000000, 0xffffffff },
8520                 { MAC_HASH_REG_3, 0x0000,
8521                         0x00000000, 0xffffffff },
8522
8523                 /* Receive Data and Receive BD Initiator Control Registers. */
8524                 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
8525                         0x00000000, 0xffffffff },
8526                 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
8527                         0x00000000, 0xffffffff },
8528                 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
8529                         0x00000000, 0x00000003 },
8530                 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
8531                         0x00000000, 0xffffffff },
8532                 { RCVDBDI_STD_BD+0, 0x0000,
8533                         0x00000000, 0xffffffff },
8534                 { RCVDBDI_STD_BD+4, 0x0000,
8535                         0x00000000, 0xffffffff },
8536                 { RCVDBDI_STD_BD+8, 0x0000,
8537                         0x00000000, 0xffff0002 },
8538                 { RCVDBDI_STD_BD+0xc, 0x0000,
8539                         0x00000000, 0xffffffff },
8540
8541                 /* Receive BD Initiator Control Registers. */
8542                 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
8543                         0x00000000, 0xffffffff },
8544                 { RCVBDI_STD_THRESH, TG3_FL_5705,
8545                         0x00000000, 0x000003ff },
8546                 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
8547                         0x00000000, 0xffffffff },
8548
8549                 /* Host Coalescing Control Registers. */
8550                 { HOSTCC_MODE, TG3_FL_NOT_5705,
8551                         0x00000000, 0x00000004 },
8552                 { HOSTCC_MODE, TG3_FL_5705,
8553                         0x00000000, 0x000000f6 },
8554                 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
8555                         0x00000000, 0xffffffff },
8556                 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
8557                         0x00000000, 0x000003ff },
8558                 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
8559                         0x00000000, 0xffffffff },
8560                 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
8561                         0x00000000, 0x000003ff },
8562                 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
8563                         0x00000000, 0xffffffff },
8564                 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
8565                         0x00000000, 0x000000ff },
8566                 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
8567                         0x00000000, 0xffffffff },
8568                 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
8569                         0x00000000, 0x000000ff },
8570                 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
8571                         0x00000000, 0xffffffff },
8572                 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
8573                         0x00000000, 0xffffffff },
8574                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
8575                         0x00000000, 0xffffffff },
8576                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
8577                         0x00000000, 0x000000ff },
8578                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
8579                         0x00000000, 0xffffffff },
8580                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
8581                         0x00000000, 0x000000ff },
8582                 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
8583                         0x00000000, 0xffffffff },
8584                 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
8585                         0x00000000, 0xffffffff },
8586                 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
8587                         0x00000000, 0xffffffff },
8588                 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
8589                         0x00000000, 0xffffffff },
8590                 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
8591                         0x00000000, 0xffffffff },
8592                 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
8593                         0xffffffff, 0x00000000 },
8594                 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
8595                         0xffffffff, 0x00000000 },
8596
8597                 /* Buffer Manager Control Registers. */
8598                 { BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750,
8599                         0x00000000, 0x007fff80 },
8600                 { BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750,
8601                         0x00000000, 0x007fffff },
8602                 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
8603                         0x00000000, 0x0000003f },
8604                 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
8605                         0x00000000, 0x000001ff },
8606                 { BUFMGR_MB_HIGH_WATER, 0x0000,
8607                         0x00000000, 0x000001ff },
8608                 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
8609                         0xffffffff, 0x00000000 },
8610                 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
8611                         0xffffffff, 0x00000000 },
8612
8613                 /* Mailbox Registers */
8614                 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
8615                         0x00000000, 0x000001ff },
8616                 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
8617                         0x00000000, 0x000001ff },
8618                 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
8619                         0x00000000, 0x000007ff },
8620                 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
8621                         0x00000000, 0x000001ff },
8622
8623                 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
8624         };
8625
8626         is_5705 = is_5750 = 0;
8627         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
8628                 is_5705 = 1;
8629                 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
8630                         is_5750 = 1;
8631         }
8632
8633         for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
8634                 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
8635                         continue;
8636
8637                 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
8638                         continue;
8639
8640                 if ((tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
8641                     (reg_tbl[i].flags & TG3_FL_NOT_5788))
8642                         continue;
8643
8644                 if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750))
8645                         continue;
8646
8647                 offset = (u32) reg_tbl[i].offset;
8648                 read_mask = reg_tbl[i].read_mask;
8649                 write_mask = reg_tbl[i].write_mask;
8650
8651                 /* Save the original register content */
8652                 save_val = tr32(offset);
8653
8654                 /* Determine the read-only value. */
8655                 read_val = save_val & read_mask;
8656
8657                 /* Write zero to the register, then make sure the read-only bits
8658                  * are not changed and the read/write bits are all zeros.
8659                  */
8660                 tw32(offset, 0);
8661
8662                 val = tr32(offset);
8663
8664                 /* Test the read-only and read/write bits. */
8665                 if (((val & read_mask) != read_val) || (val & write_mask))
8666                         goto out;
8667
8668                 /* Write ones to all the bits defined by RdMask and WrMask, then
8669                  * make sure the read-only bits are not changed and the
8670                  * read/write bits are all ones.
8671                  */
8672                 tw32(offset, read_mask | write_mask);
8673
8674                 val = tr32(offset);
8675
8676                 /* Test the read-only bits. */
8677                 if ((val & read_mask) != read_val)
8678                         goto out;
8679
8680                 /* Test the read/write bits. */
8681                 if ((val & write_mask) != write_mask)
8682                         goto out;
8683
8684                 tw32(offset, save_val);
8685         }
8686
8687         return 0;
8688
8689 out:
8690         if (netif_msg_hw(tp))
8691                 printk(KERN_ERR PFX "Register test failed at offset %x\n",
8692                        offset);
8693         tw32(offset, save_val);
8694         return -EIO;
8695 }
8696
8697 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
8698 {
8699         static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
8700         int i;
8701         u32 j;
8702
8703         for (i = 0; i < sizeof(test_pattern)/sizeof(u32); i++) {
8704                 for (j = 0; j < len; j += 4) {
8705                         u32 val;
8706
8707                         tg3_write_mem(tp, offset + j, test_pattern[i]);
8708                         tg3_read_mem(tp, offset + j, &val);
8709                         if (val != test_pattern[i])
8710                                 return -EIO;
8711                 }
8712         }
8713         return 0;
8714 }
8715
8716 static int tg3_test_memory(struct tg3 *tp)
8717 {
8718         static struct mem_entry {
8719                 u32 offset;
8720                 u32 len;
8721         } mem_tbl_570x[] = {
8722                 { 0x00000000, 0x00b50},
8723                 { 0x00002000, 0x1c000},
8724                 { 0xffffffff, 0x00000}
8725         }, mem_tbl_5705[] = {
8726                 { 0x00000100, 0x0000c},
8727                 { 0x00000200, 0x00008},
8728                 { 0x00004000, 0x00800},
8729                 { 0x00006000, 0x01000},
8730                 { 0x00008000, 0x02000},
8731                 { 0x00010000, 0x0e000},
8732                 { 0xffffffff, 0x00000}
8733         }, mem_tbl_5755[] = {
8734                 { 0x00000200, 0x00008},
8735                 { 0x00004000, 0x00800},
8736                 { 0x00006000, 0x00800},
8737                 { 0x00008000, 0x02000},
8738                 { 0x00010000, 0x0c000},
8739                 { 0xffffffff, 0x00000}
8740         }, mem_tbl_5906[] = {
8741                 { 0x00000200, 0x00008},
8742                 { 0x00004000, 0x00400},
8743                 { 0x00006000, 0x00400},
8744                 { 0x00008000, 0x01000},
8745                 { 0x00010000, 0x01000},
8746                 { 0xffffffff, 0x00000}
8747         };
8748         struct mem_entry *mem_tbl;
8749         int err = 0;
8750         int i;
8751
8752         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
8753                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
8754                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
8755                         mem_tbl = mem_tbl_5755;
8756                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
8757                         mem_tbl = mem_tbl_5906;
8758                 else
8759                         mem_tbl = mem_tbl_5705;
8760         } else
8761                 mem_tbl = mem_tbl_570x;
8762
8763         for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
8764                 if ((err = tg3_do_mem_test(tp, mem_tbl[i].offset,
8765                     mem_tbl[i].len)) != 0)
8766                         break;
8767         }
8768
8769         return err;
8770 }
8771
8772 #define TG3_MAC_LOOPBACK        0
8773 #define TG3_PHY_LOOPBACK        1
8774
8775 static int tg3_run_loopback(struct tg3 *tp, int loopback_mode)
8776 {
8777         u32 mac_mode, rx_start_idx, rx_idx, tx_idx, opaque_key;
8778         u32 desc_idx;
8779         struct sk_buff *skb, *rx_skb;
8780         u8 *tx_data;
8781         dma_addr_t map;
8782         int num_pkts, tx_len, rx_len, i, err;
8783         struct tg3_rx_buffer_desc *desc;
8784
8785         if (loopback_mode == TG3_MAC_LOOPBACK) {
8786                 /* HW errata - mac loopback fails in some cases on 5780.
8787                  * Normal traffic and PHY loopback are not affected by
8788                  * errata.
8789                  */
8790                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780)
8791                         return 0;
8792
8793                 mac_mode = (tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK) |
8794                            MAC_MODE_PORT_INT_LPBACK | MAC_MODE_LINK_POLARITY;
8795                 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
8796                         mac_mode |= MAC_MODE_PORT_MODE_MII;
8797                 else
8798                         mac_mode |= MAC_MODE_PORT_MODE_GMII;
8799                 tw32(MAC_MODE, mac_mode);
8800         } else if (loopback_mode == TG3_PHY_LOOPBACK) {
8801                 u32 val;
8802
8803                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
8804                         u32 phytest;
8805
8806                         if (!tg3_readphy(tp, MII_TG3_EPHY_TEST, &phytest)) {
8807                                 u32 phy;
8808
8809                                 tg3_writephy(tp, MII_TG3_EPHY_TEST,
8810                                              phytest | MII_TG3_EPHY_SHADOW_EN);
8811                                 if (!tg3_readphy(tp, 0x1b, &phy))
8812                                         tg3_writephy(tp, 0x1b, phy & ~0x20);
8813                                 if (!tg3_readphy(tp, 0x10, &phy))
8814                                         tg3_writephy(tp, 0x10, phy & ~0x4000);
8815                                 tg3_writephy(tp, MII_TG3_EPHY_TEST, phytest);
8816                         }
8817                         val = BMCR_LOOPBACK | BMCR_FULLDPLX | BMCR_SPEED100;
8818                 } else
8819                         val = BMCR_LOOPBACK | BMCR_FULLDPLX | BMCR_SPEED1000;
8820
8821                 tg3_writephy(tp, MII_BMCR, val);
8822                 udelay(40);
8823
8824                 mac_mode = (tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK) |
8825                            MAC_MODE_LINK_POLARITY;
8826                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
8827                         tg3_writephy(tp, MII_TG3_EPHY_PTEST, 0x1800);
8828                         mac_mode |= MAC_MODE_PORT_MODE_MII;
8829                 } else
8830                         mac_mode |= MAC_MODE_PORT_MODE_GMII;
8831
8832                 /* reset to prevent losing 1st rx packet intermittently */
8833                 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
8834                         tw32_f(MAC_RX_MODE, RX_MODE_RESET);
8835                         udelay(10);
8836                         tw32_f(MAC_RX_MODE, tp->rx_mode);
8837                 }
8838                 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
8839                         mac_mode &= ~MAC_MODE_LINK_POLARITY;
8840                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
8841                                      MII_TG3_EXT_CTRL_LNK3_LED_MODE);
8842                 }
8843                 tw32(MAC_MODE, mac_mode);
8844         }
8845         else
8846                 return -EINVAL;
8847
8848         err = -EIO;
8849
8850         tx_len = 1514;
8851         skb = netdev_alloc_skb(tp->dev, tx_len);
8852         if (!skb)
8853                 return -ENOMEM;
8854
8855         tx_data = skb_put(skb, tx_len);
8856         memcpy(tx_data, tp->dev->dev_addr, 6);
8857         memset(tx_data + 6, 0x0, 8);
8858
8859         tw32(MAC_RX_MTU_SIZE, tx_len + 4);
8860
8861         for (i = 14; i < tx_len; i++)
8862                 tx_data[i] = (u8) (i & 0xff);
8863
8864         map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
8865
8866         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
8867              HOSTCC_MODE_NOW);
8868
8869         udelay(10);
8870
8871         rx_start_idx = tp->hw_status->idx[0].rx_producer;
8872
8873         num_pkts = 0;
8874
8875         tg3_set_txd(tp, tp->tx_prod, map, tx_len, 0, 1);
8876
8877         tp->tx_prod++;
8878         num_pkts++;
8879
8880         tw32_tx_mbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW,
8881                      tp->tx_prod);
8882         tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW);
8883
8884         udelay(10);
8885
8886         /* 250 usec to allow enough time on some 10/100 Mbps devices.  */
8887         for (i = 0; i < 25; i++) {
8888                 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
8889                        HOSTCC_MODE_NOW);
8890
8891                 udelay(10);
8892
8893                 tx_idx = tp->hw_status->idx[0].tx_consumer;
8894                 rx_idx = tp->hw_status->idx[0].rx_producer;
8895                 if ((tx_idx == tp->tx_prod) &&
8896                     (rx_idx == (rx_start_idx + num_pkts)))
8897                         break;
8898         }
8899
8900         pci_unmap_single(tp->pdev, map, tx_len, PCI_DMA_TODEVICE);
8901         dev_kfree_skb(skb);
8902
8903         if (tx_idx != tp->tx_prod)
8904                 goto out;
8905
8906         if (rx_idx != rx_start_idx + num_pkts)
8907                 goto out;
8908
8909         desc = &tp->rx_rcb[rx_start_idx];
8910         desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
8911         opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
8912         if (opaque_key != RXD_OPAQUE_RING_STD)
8913                 goto out;
8914
8915         if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
8916             (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
8917                 goto out;
8918
8919         rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4;
8920         if (rx_len != tx_len)
8921                 goto out;
8922
8923         rx_skb = tp->rx_std_buffers[desc_idx].skb;
8924
8925         map = pci_unmap_addr(&tp->rx_std_buffers[desc_idx], mapping);
8926         pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len, PCI_DMA_FROMDEVICE);
8927
8928         for (i = 14; i < tx_len; i++) {
8929                 if (*(rx_skb->data + i) != (u8) (i & 0xff))
8930                         goto out;
8931         }
8932         err = 0;
8933
8934         /* tg3_free_rings will unmap and free the rx_skb */
8935 out:
8936         return err;
8937 }
8938
8939 #define TG3_MAC_LOOPBACK_FAILED         1
8940 #define TG3_PHY_LOOPBACK_FAILED         2
8941 #define TG3_LOOPBACK_FAILED             (TG3_MAC_LOOPBACK_FAILED |      \
8942                                          TG3_PHY_LOOPBACK_FAILED)
8943
8944 static int tg3_test_loopback(struct tg3 *tp)
8945 {
8946         int err = 0;
8947
8948         if (!netif_running(tp->dev))
8949                 return TG3_LOOPBACK_FAILED;
8950
8951         err = tg3_reset_hw(tp, 1);
8952         if (err)
8953                 return TG3_LOOPBACK_FAILED;
8954
8955         if (tg3_run_loopback(tp, TG3_MAC_LOOPBACK))
8956                 err |= TG3_MAC_LOOPBACK_FAILED;
8957         if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
8958                 if (tg3_run_loopback(tp, TG3_PHY_LOOPBACK))
8959                         err |= TG3_PHY_LOOPBACK_FAILED;
8960         }
8961
8962         return err;
8963 }
8964
8965 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
8966                           u64 *data)
8967 {
8968         struct tg3 *tp = netdev_priv(dev);
8969
8970         if (tp->link_config.phy_is_low_power)
8971                 tg3_set_power_state(tp, PCI_D0);
8972
8973         memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
8974
8975         if (tg3_test_nvram(tp) != 0) {
8976                 etest->flags |= ETH_TEST_FL_FAILED;
8977                 data[0] = 1;
8978         }
8979         if (tg3_test_link(tp) != 0) {
8980                 etest->flags |= ETH_TEST_FL_FAILED;
8981                 data[1] = 1;
8982         }
8983         if (etest->flags & ETH_TEST_FL_OFFLINE) {
8984                 int err, irq_sync = 0;
8985
8986                 if (netif_running(dev)) {
8987                         tg3_netif_stop(tp);
8988                         irq_sync = 1;
8989                 }
8990
8991                 tg3_full_lock(tp, irq_sync);
8992
8993                 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
8994                 err = tg3_nvram_lock(tp);
8995                 tg3_halt_cpu(tp, RX_CPU_BASE);
8996                 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
8997                         tg3_halt_cpu(tp, TX_CPU_BASE);
8998                 if (!err)
8999                         tg3_nvram_unlock(tp);
9000
9001                 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
9002                         tg3_phy_reset(tp);
9003
9004                 if (tg3_test_registers(tp) != 0) {
9005                         etest->flags |= ETH_TEST_FL_FAILED;
9006                         data[2] = 1;
9007                 }
9008                 if (tg3_test_memory(tp) != 0) {
9009                         etest->flags |= ETH_TEST_FL_FAILED;
9010                         data[3] = 1;
9011                 }
9012                 if ((data[4] = tg3_test_loopback(tp)) != 0)
9013                         etest->flags |= ETH_TEST_FL_FAILED;
9014
9015                 tg3_full_unlock(tp);
9016
9017                 if (tg3_test_interrupt(tp) != 0) {
9018                         etest->flags |= ETH_TEST_FL_FAILED;
9019                         data[5] = 1;
9020                 }
9021
9022                 tg3_full_lock(tp, 0);
9023
9024                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9025                 if (netif_running(dev)) {
9026                         tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
9027                         if (!tg3_restart_hw(tp, 1))
9028                                 tg3_netif_start(tp);
9029                 }
9030
9031                 tg3_full_unlock(tp);
9032         }
9033         if (tp->link_config.phy_is_low_power)
9034                 tg3_set_power_state(tp, PCI_D3hot);
9035
9036 }
9037
9038 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
9039 {
9040         struct mii_ioctl_data *data = if_mii(ifr);
9041         struct tg3 *tp = netdev_priv(dev);
9042         int err;
9043
9044         switch(cmd) {
9045         case SIOCGMIIPHY:
9046                 data->phy_id = PHY_ADDR;
9047
9048                 /* fallthru */
9049         case SIOCGMIIREG: {
9050                 u32 mii_regval;
9051
9052                 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
9053                         break;                  /* We have no PHY */
9054
9055                 if (tp->link_config.phy_is_low_power)
9056                         return -EAGAIN;
9057
9058                 spin_lock_bh(&tp->lock);
9059                 err = tg3_readphy(tp, data->reg_num & 0x1f, &mii_regval);
9060                 spin_unlock_bh(&tp->lock);
9061
9062                 data->val_out = mii_regval;
9063
9064                 return err;
9065         }
9066
9067         case SIOCSMIIREG:
9068                 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
9069                         break;                  /* We have no PHY */
9070
9071                 if (!capable(CAP_NET_ADMIN))
9072                         return -EPERM;
9073
9074                 if (tp->link_config.phy_is_low_power)
9075                         return -EAGAIN;
9076
9077                 spin_lock_bh(&tp->lock);
9078                 err = tg3_writephy(tp, data->reg_num & 0x1f, data->val_in);
9079                 spin_unlock_bh(&tp->lock);
9080
9081                 return err;
9082
9083         default:
9084                 /* do nothing */
9085                 break;
9086         }
9087         return -EOPNOTSUPP;
9088 }
9089
9090 #if TG3_VLAN_TAG_USED
9091 static void tg3_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
9092 {
9093         struct tg3 *tp = netdev_priv(dev);
9094
9095         if (netif_running(dev))
9096                 tg3_netif_stop(tp);
9097
9098         tg3_full_lock(tp, 0);
9099
9100         tp->vlgrp = grp;
9101
9102         /* Update RX_MODE_KEEP_VLAN_TAG bit in RX_MODE register. */
9103         __tg3_set_rx_mode(dev);
9104
9105         tg3_full_unlock(tp);
9106
9107         if (netif_running(dev))
9108                 tg3_netif_start(tp);
9109 }
9110
9111 static void tg3_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
9112 {
9113         struct tg3 *tp = netdev_priv(dev);
9114
9115         if (netif_running(dev))
9116                 tg3_netif_stop(tp);
9117
9118         tg3_full_lock(tp, 0);
9119         if (tp->vlgrp)
9120                 tp->vlgrp->vlan_devices[vid] = NULL;
9121         tg3_full_unlock(tp);
9122
9123         if (netif_running(dev))
9124                 tg3_netif_start(tp);
9125 }
9126 #endif
9127
9128 static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
9129 {
9130         struct tg3 *tp = netdev_priv(dev);
9131
9132         memcpy(ec, &tp->coal, sizeof(*ec));
9133         return 0;
9134 }
9135
9136 static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
9137 {
9138         struct tg3 *tp = netdev_priv(dev);
9139         u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
9140         u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
9141
9142         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
9143                 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
9144                 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
9145                 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
9146                 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
9147         }
9148
9149         if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
9150             (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
9151             (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
9152             (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
9153             (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
9154             (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
9155             (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
9156             (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
9157             (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
9158             (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
9159                 return -EINVAL;
9160
9161         /* No rx interrupts will be generated if both are zero */
9162         if ((ec->rx_coalesce_usecs == 0) &&
9163             (ec->rx_max_coalesced_frames == 0))
9164                 return -EINVAL;
9165
9166         /* No tx interrupts will be generated if both are zero */
9167         if ((ec->tx_coalesce_usecs == 0) &&
9168             (ec->tx_max_coalesced_frames == 0))
9169                 return -EINVAL;
9170
9171         /* Only copy relevant parameters, ignore all others. */
9172         tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
9173         tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
9174         tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
9175         tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
9176         tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
9177         tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
9178         tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
9179         tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
9180         tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
9181
9182         if (netif_running(dev)) {
9183                 tg3_full_lock(tp, 0);
9184                 __tg3_set_coalesce(tp, &tp->coal);
9185                 tg3_full_unlock(tp);
9186         }
9187         return 0;
9188 }
9189
9190 static const struct ethtool_ops tg3_ethtool_ops = {
9191         .get_settings           = tg3_get_settings,
9192         .set_settings           = tg3_set_settings,
9193         .get_drvinfo            = tg3_get_drvinfo,
9194         .get_regs_len           = tg3_get_regs_len,
9195         .get_regs               = tg3_get_regs,
9196         .get_wol                = tg3_get_wol,
9197         .set_wol                = tg3_set_wol,
9198         .get_msglevel           = tg3_get_msglevel,
9199         .set_msglevel           = tg3_set_msglevel,
9200         .nway_reset             = tg3_nway_reset,
9201         .get_link               = ethtool_op_get_link,
9202         .get_eeprom_len         = tg3_get_eeprom_len,
9203         .get_eeprom             = tg3_get_eeprom,
9204         .set_eeprom             = tg3_set_eeprom,
9205         .get_ringparam          = tg3_get_ringparam,
9206         .set_ringparam          = tg3_set_ringparam,
9207         .get_pauseparam         = tg3_get_pauseparam,
9208         .set_pauseparam         = tg3_set_pauseparam,
9209         .get_rx_csum            = tg3_get_rx_csum,
9210         .set_rx_csum            = tg3_set_rx_csum,
9211         .get_tx_csum            = ethtool_op_get_tx_csum,
9212         .set_tx_csum            = tg3_set_tx_csum,
9213         .get_sg                 = ethtool_op_get_sg,
9214         .set_sg                 = ethtool_op_set_sg,
9215 #if TG3_TSO_SUPPORT != 0
9216         .get_tso                = ethtool_op_get_tso,
9217         .set_tso                = tg3_set_tso,
9218 #endif
9219         .self_test_count        = tg3_get_test_count,
9220         .self_test              = tg3_self_test,
9221         .get_strings            = tg3_get_strings,
9222         .phys_id                = tg3_phys_id,
9223         .get_stats_count        = tg3_get_stats_count,
9224         .get_ethtool_stats      = tg3_get_ethtool_stats,
9225         .get_coalesce           = tg3_get_coalesce,
9226         .set_coalesce           = tg3_set_coalesce,
9227         .get_perm_addr          = ethtool_op_get_perm_addr,
9228 };
9229
9230 static void __devinit tg3_get_eeprom_size(struct tg3 *tp)
9231 {
9232         u32 cursize, val, magic;
9233
9234         tp->nvram_size = EEPROM_CHIP_SIZE;
9235
9236         if (tg3_nvram_read_swab(tp, 0, &magic) != 0)
9237                 return;
9238
9239         if ((magic != TG3_EEPROM_MAGIC) &&
9240             ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) &&
9241             ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW))
9242                 return;
9243
9244         /*
9245          * Size the chip by reading offsets at increasing powers of two.
9246          * When we encounter our validation signature, we know the addressing
9247          * has wrapped around, and thus have our chip size.
9248          */
9249         cursize = 0x10;
9250
9251         while (cursize < tp->nvram_size) {
9252                 if (tg3_nvram_read_swab(tp, cursize, &val) != 0)
9253                         return;
9254
9255                 if (val == magic)
9256                         break;
9257
9258                 cursize <<= 1;
9259         }
9260
9261         tp->nvram_size = cursize;
9262 }
9263
9264 static void __devinit tg3_get_nvram_size(struct tg3 *tp)
9265 {
9266         u32 val;
9267
9268         if (tg3_nvram_read_swab(tp, 0, &val) != 0)
9269                 return;
9270
9271         /* Selfboot format */
9272         if (val != TG3_EEPROM_MAGIC) {
9273                 tg3_get_eeprom_size(tp);
9274                 return;
9275         }
9276
9277         if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
9278                 if (val != 0) {
9279                         tp->nvram_size = (val >> 16) * 1024;
9280                         return;
9281                 }
9282         }
9283         tp->nvram_size = 0x20000;
9284 }
9285
9286 static void __devinit tg3_get_nvram_info(struct tg3 *tp)
9287 {
9288         u32 nvcfg1;
9289
9290         nvcfg1 = tr32(NVRAM_CFG1);
9291         if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
9292                 tp->tg3_flags2 |= TG3_FLG2_FLASH;
9293         }
9294         else {
9295                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
9296                 tw32(NVRAM_CFG1, nvcfg1);
9297         }
9298
9299         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) ||
9300             (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
9301                 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
9302                         case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
9303                                 tp->nvram_jedecnum = JEDEC_ATMEL;
9304                                 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
9305                                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9306                                 break;
9307                         case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
9308                                 tp->nvram_jedecnum = JEDEC_ATMEL;
9309                                 tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
9310                                 break;
9311                         case FLASH_VENDOR_ATMEL_EEPROM:
9312                                 tp->nvram_jedecnum = JEDEC_ATMEL;
9313                                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
9314                                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9315                                 break;
9316                         case FLASH_VENDOR_ST:
9317                                 tp->nvram_jedecnum = JEDEC_ST;
9318                                 tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
9319                                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9320                                 break;
9321                         case FLASH_VENDOR_SAIFUN:
9322                                 tp->nvram_jedecnum = JEDEC_SAIFUN;
9323                                 tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
9324                                 break;
9325                         case FLASH_VENDOR_SST_SMALL:
9326                         case FLASH_VENDOR_SST_LARGE:
9327                                 tp->nvram_jedecnum = JEDEC_SST;
9328                                 tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
9329                                 break;
9330                 }
9331         }
9332         else {
9333                 tp->nvram_jedecnum = JEDEC_ATMEL;
9334                 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
9335                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9336         }
9337 }
9338
9339 static void __devinit tg3_get_5752_nvram_info(struct tg3 *tp)
9340 {
9341         u32 nvcfg1;
9342
9343         nvcfg1 = tr32(NVRAM_CFG1);
9344
9345         /* NVRAM protection for TPM */
9346         if (nvcfg1 & (1 << 27))
9347                 tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM;
9348
9349         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
9350                 case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
9351                 case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
9352                         tp->nvram_jedecnum = JEDEC_ATMEL;
9353                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9354                         break;
9355                 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
9356                         tp->nvram_jedecnum = JEDEC_ATMEL;
9357                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9358                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
9359                         break;
9360                 case FLASH_5752VENDOR_ST_M45PE10:
9361                 case FLASH_5752VENDOR_ST_M45PE20:
9362                 case FLASH_5752VENDOR_ST_M45PE40:
9363                         tp->nvram_jedecnum = JEDEC_ST;
9364                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9365                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
9366                         break;
9367         }
9368
9369         if (tp->tg3_flags2 & TG3_FLG2_FLASH) {
9370                 switch (nvcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
9371                         case FLASH_5752PAGE_SIZE_256:
9372                                 tp->nvram_pagesize = 256;
9373                                 break;
9374                         case FLASH_5752PAGE_SIZE_512:
9375                                 tp->nvram_pagesize = 512;
9376                                 break;
9377                         case FLASH_5752PAGE_SIZE_1K:
9378                                 tp->nvram_pagesize = 1024;
9379                                 break;
9380                         case FLASH_5752PAGE_SIZE_2K:
9381                                 tp->nvram_pagesize = 2048;
9382                                 break;
9383                         case FLASH_5752PAGE_SIZE_4K:
9384                                 tp->nvram_pagesize = 4096;
9385                                 break;
9386                         case FLASH_5752PAGE_SIZE_264:
9387                                 tp->nvram_pagesize = 264;
9388                                 break;
9389                 }
9390         }
9391         else {
9392                 /* For eeprom, set pagesize to maximum eeprom size */
9393                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
9394
9395                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
9396                 tw32(NVRAM_CFG1, nvcfg1);
9397         }
9398 }
9399
9400 static void __devinit tg3_get_5755_nvram_info(struct tg3 *tp)
9401 {
9402         u32 nvcfg1;
9403
9404         nvcfg1 = tr32(NVRAM_CFG1);
9405
9406         /* NVRAM protection for TPM */
9407         if (nvcfg1 & (1 << 27))
9408                 tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM;
9409
9410         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
9411                 case FLASH_5755VENDOR_ATMEL_EEPROM_64KHZ:
9412                 case FLASH_5755VENDOR_ATMEL_EEPROM_376KHZ:
9413                         tp->nvram_jedecnum = JEDEC_ATMEL;
9414                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9415                         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
9416
9417                         nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
9418                         tw32(NVRAM_CFG1, nvcfg1);
9419                         break;
9420                 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
9421                 case FLASH_5755VENDOR_ATMEL_FLASH_1:
9422                 case FLASH_5755VENDOR_ATMEL_FLASH_2:
9423                 case FLASH_5755VENDOR_ATMEL_FLASH_3:
9424                 case FLASH_5755VENDOR_ATMEL_FLASH_4:
9425                         tp->nvram_jedecnum = JEDEC_ATMEL;
9426                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9427                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
9428                         tp->nvram_pagesize = 264;
9429                         break;
9430                 case FLASH_5752VENDOR_ST_M45PE10:
9431                 case FLASH_5752VENDOR_ST_M45PE20:
9432                 case FLASH_5752VENDOR_ST_M45PE40:
9433                         tp->nvram_jedecnum = JEDEC_ST;
9434                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9435                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
9436                         tp->nvram_pagesize = 256;
9437                         break;
9438         }
9439 }
9440
9441 static void __devinit tg3_get_5787_nvram_info(struct tg3 *tp)
9442 {
9443         u32 nvcfg1;
9444
9445         nvcfg1 = tr32(NVRAM_CFG1);
9446
9447         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
9448                 case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
9449                 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
9450                 case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
9451                 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
9452                         tp->nvram_jedecnum = JEDEC_ATMEL;
9453                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9454                         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
9455
9456                         nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
9457                         tw32(NVRAM_CFG1, nvcfg1);
9458                         break;
9459                 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
9460                 case FLASH_5755VENDOR_ATMEL_FLASH_1:
9461                 case FLASH_5755VENDOR_ATMEL_FLASH_2:
9462                 case FLASH_5755VENDOR_ATMEL_FLASH_3:
9463                         tp->nvram_jedecnum = JEDEC_ATMEL;
9464                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9465                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
9466                         tp->nvram_pagesize = 264;
9467                         break;
9468                 case FLASH_5752VENDOR_ST_M45PE10:
9469                 case FLASH_5752VENDOR_ST_M45PE20:
9470                 case FLASH_5752VENDOR_ST_M45PE40:
9471                         tp->nvram_jedecnum = JEDEC_ST;
9472                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9473                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
9474                         tp->nvram_pagesize = 256;
9475                         break;
9476         }
9477 }
9478
9479 static void __devinit tg3_get_5906_nvram_info(struct tg3 *tp)
9480 {
9481         tp->nvram_jedecnum = JEDEC_ATMEL;
9482         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9483         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
9484 }
9485
9486 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
9487 static void __devinit tg3_nvram_init(struct tg3 *tp)
9488 {
9489         tw32_f(GRC_EEPROM_ADDR,
9490              (EEPROM_ADDR_FSM_RESET |
9491               (EEPROM_DEFAULT_CLOCK_PERIOD <<
9492                EEPROM_ADDR_CLKPERD_SHIFT)));
9493
9494         msleep(1);
9495
9496         /* Enable seeprom accesses. */
9497         tw32_f(GRC_LOCAL_CTRL,
9498              tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
9499         udelay(100);
9500
9501         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
9502             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
9503                 tp->tg3_flags |= TG3_FLAG_NVRAM;
9504
9505                 if (tg3_nvram_lock(tp)) {
9506                         printk(KERN_WARNING PFX "%s: Cannot get nvarm lock, "
9507                                "tg3_nvram_init failed.\n", tp->dev->name);
9508                         return;
9509                 }
9510                 tg3_enable_nvram_access(tp);
9511
9512                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
9513                         tg3_get_5752_nvram_info(tp);
9514                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
9515                         tg3_get_5755_nvram_info(tp);
9516                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
9517                         tg3_get_5787_nvram_info(tp);
9518                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
9519                         tg3_get_5906_nvram_info(tp);
9520                 else
9521                         tg3_get_nvram_info(tp);
9522
9523                 tg3_get_nvram_size(tp);
9524
9525                 tg3_disable_nvram_access(tp);
9526                 tg3_nvram_unlock(tp);
9527
9528         } else {
9529                 tp->tg3_flags &= ~(TG3_FLAG_NVRAM | TG3_FLAG_NVRAM_BUFFERED);
9530
9531                 tg3_get_eeprom_size(tp);
9532         }
9533 }
9534
9535 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
9536                                         u32 offset, u32 *val)
9537 {
9538         u32 tmp;
9539         int i;
9540
9541         if (offset > EEPROM_ADDR_ADDR_MASK ||
9542             (offset % 4) != 0)
9543                 return -EINVAL;
9544
9545         tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
9546                                         EEPROM_ADDR_DEVID_MASK |
9547                                         EEPROM_ADDR_READ);
9548         tw32(GRC_EEPROM_ADDR,
9549              tmp |
9550              (0 << EEPROM_ADDR_DEVID_SHIFT) |
9551              ((offset << EEPROM_ADDR_ADDR_SHIFT) &
9552               EEPROM_ADDR_ADDR_MASK) |
9553              EEPROM_ADDR_READ | EEPROM_ADDR_START);
9554
9555         for (i = 0; i < 1000; i++) {
9556                 tmp = tr32(GRC_EEPROM_ADDR);
9557
9558                 if (tmp & EEPROM_ADDR_COMPLETE)
9559                         break;
9560                 msleep(1);
9561         }
9562         if (!(tmp & EEPROM_ADDR_COMPLETE))
9563                 return -EBUSY;
9564
9565         *val = tr32(GRC_EEPROM_DATA);
9566         return 0;
9567 }
9568
9569 #define NVRAM_CMD_TIMEOUT 10000
9570
9571 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
9572 {
9573         int i;
9574
9575         tw32(NVRAM_CMD, nvram_cmd);
9576         for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
9577                 udelay(10);
9578                 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
9579                         udelay(10);
9580                         break;
9581                 }
9582         }
9583         if (i == NVRAM_CMD_TIMEOUT) {
9584                 return -EBUSY;
9585         }
9586         return 0;
9587 }
9588
9589 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
9590 {
9591         if ((tp->tg3_flags & TG3_FLAG_NVRAM) &&
9592             (tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) &&
9593             (tp->tg3_flags2 & TG3_FLG2_FLASH) &&
9594             (tp->nvram_jedecnum == JEDEC_ATMEL))
9595
9596                 addr = ((addr / tp->nvram_pagesize) <<
9597                         ATMEL_AT45DB0X1B_PAGE_POS) +
9598                        (addr % tp->nvram_pagesize);
9599
9600         return addr;
9601 }
9602
9603 static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
9604 {
9605         if ((tp->tg3_flags & TG3_FLAG_NVRAM) &&
9606             (tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) &&
9607             (tp->tg3_flags2 & TG3_FLG2_FLASH) &&
9608             (tp->nvram_jedecnum == JEDEC_ATMEL))
9609
9610                 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
9611                         tp->nvram_pagesize) +
9612                        (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
9613
9614         return addr;
9615 }
9616
9617 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
9618 {
9619         int ret;
9620
9621         if (!(tp->tg3_flags & TG3_FLAG_NVRAM))
9622                 return tg3_nvram_read_using_eeprom(tp, offset, val);
9623
9624         offset = tg3_nvram_phys_addr(tp, offset);
9625
9626         if (offset > NVRAM_ADDR_MSK)
9627                 return -EINVAL;
9628
9629         ret = tg3_nvram_lock(tp);
9630         if (ret)
9631                 return ret;
9632
9633         tg3_enable_nvram_access(tp);
9634
9635         tw32(NVRAM_ADDR, offset);
9636         ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
9637                 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
9638
9639         if (ret == 0)
9640                 *val = swab32(tr32(NVRAM_RDDATA));
9641
9642         tg3_disable_nvram_access(tp);
9643
9644         tg3_nvram_unlock(tp);
9645
9646         return ret;
9647 }
9648
9649 static int tg3_nvram_read_swab(struct tg3 *tp, u32 offset, u32 *val)
9650 {
9651         int err;
9652         u32 tmp;
9653
9654         err = tg3_nvram_read(tp, offset, &tmp);
9655         *val = swab32(tmp);
9656         return err;
9657 }
9658
9659 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
9660                                     u32 offset, u32 len, u8 *buf)
9661 {
9662         int i, j, rc = 0;
9663         u32 val;
9664
9665         for (i = 0; i < len; i += 4) {
9666                 u32 addr, data;
9667
9668                 addr = offset + i;
9669
9670                 memcpy(&data, buf + i, 4);
9671
9672                 tw32(GRC_EEPROM_DATA, cpu_to_le32(data));
9673
9674                 val = tr32(GRC_EEPROM_ADDR);
9675                 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
9676
9677                 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
9678                         EEPROM_ADDR_READ);
9679                 tw32(GRC_EEPROM_ADDR, val |
9680                         (0 << EEPROM_ADDR_DEVID_SHIFT) |
9681                         (addr & EEPROM_ADDR_ADDR_MASK) |
9682                         EEPROM_ADDR_START |
9683                         EEPROM_ADDR_WRITE);
9684
9685                 for (j = 0; j < 1000; j++) {
9686                         val = tr32(GRC_EEPROM_ADDR);
9687
9688                         if (val & EEPROM_ADDR_COMPLETE)
9689                                 break;
9690                         msleep(1);
9691                 }
9692                 if (!(val & EEPROM_ADDR_COMPLETE)) {
9693                         rc = -EBUSY;
9694                         break;
9695                 }
9696         }
9697
9698         return rc;
9699 }
9700
9701 /* offset and length are dword aligned */
9702 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
9703                 u8 *buf)
9704 {
9705         int ret = 0;
9706         u32 pagesize = tp->nvram_pagesize;
9707         u32 pagemask = pagesize - 1;
9708         u32 nvram_cmd;
9709         u8 *tmp;
9710
9711         tmp = kmalloc(pagesize, GFP_KERNEL);
9712         if (tmp == NULL)
9713                 return -ENOMEM;
9714
9715         while (len) {
9716                 int j;
9717                 u32 phy_addr, page_off, size;
9718
9719                 phy_addr = offset & ~pagemask;
9720
9721                 for (j = 0; j < pagesize; j += 4) {
9722                         if ((ret = tg3_nvram_read(tp, phy_addr + j,
9723                                                 (u32 *) (tmp + j))))
9724                                 break;
9725                 }
9726                 if (ret)
9727                         break;
9728
9729                 page_off = offset & pagemask;
9730                 size = pagesize;
9731                 if (len < size)
9732                         size = len;
9733
9734                 len -= size;
9735
9736                 memcpy(tmp + page_off, buf, size);
9737
9738                 offset = offset + (pagesize - page_off);
9739
9740                 tg3_enable_nvram_access(tp);
9741
9742                 /*
9743                  * Before we can erase the flash page, we need
9744                  * to issue a special "write enable" command.
9745                  */
9746                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
9747
9748                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
9749                         break;
9750
9751                 /* Erase the target page */
9752                 tw32(NVRAM_ADDR, phy_addr);
9753
9754                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
9755                         NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
9756
9757                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
9758                         break;
9759
9760                 /* Issue another write enable to start the write. */
9761                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
9762
9763                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
9764                         break;
9765
9766                 for (j = 0; j < pagesize; j += 4) {
9767                         u32 data;
9768
9769                         data = *((u32 *) (tmp + j));
9770                         tw32(NVRAM_WRDATA, cpu_to_be32(data));
9771
9772                         tw32(NVRAM_ADDR, phy_addr + j);
9773
9774                         nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
9775                                 NVRAM_CMD_WR;
9776
9777                         if (j == 0)
9778                                 nvram_cmd |= NVRAM_CMD_FIRST;
9779                         else if (j == (pagesize - 4))
9780                                 nvram_cmd |= NVRAM_CMD_LAST;
9781
9782                         if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
9783                                 break;
9784                 }
9785                 if (ret)
9786                         break;
9787         }
9788
9789         nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
9790         tg3_nvram_exec_cmd(tp, nvram_cmd);
9791
9792         kfree(tmp);
9793
9794         return ret;
9795 }
9796
9797 /* offset and length are dword aligned */
9798 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
9799                 u8 *buf)
9800 {
9801         int i, ret = 0;
9802
9803         for (i = 0; i < len; i += 4, offset += 4) {
9804                 u32 data, page_off, phy_addr, nvram_cmd;
9805
9806                 memcpy(&data, buf + i, 4);
9807                 tw32(NVRAM_WRDATA, cpu_to_be32(data));
9808
9809                 page_off = offset % tp->nvram_pagesize;
9810
9811                 phy_addr = tg3_nvram_phys_addr(tp, offset);
9812
9813                 tw32(NVRAM_ADDR, phy_addr);
9814
9815                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
9816
9817                 if ((page_off == 0) || (i == 0))
9818                         nvram_cmd |= NVRAM_CMD_FIRST;
9819                 if (page_off == (tp->nvram_pagesize - 4))
9820                         nvram_cmd |= NVRAM_CMD_LAST;
9821
9822                 if (i == (len - 4))
9823                         nvram_cmd |= NVRAM_CMD_LAST;
9824
9825                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752) &&
9826                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5755) &&
9827                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5787) &&
9828                     (tp->nvram_jedecnum == JEDEC_ST) &&
9829                     (nvram_cmd & NVRAM_CMD_FIRST)) {
9830
9831                         if ((ret = tg3_nvram_exec_cmd(tp,
9832                                 NVRAM_CMD_WREN | NVRAM_CMD_GO |
9833                                 NVRAM_CMD_DONE)))
9834
9835                                 break;
9836                 }
9837                 if (!(tp->tg3_flags2 & TG3_FLG2_FLASH)) {
9838                         /* We always do complete word writes to eeprom. */
9839                         nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
9840                 }
9841
9842                 if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
9843                         break;
9844         }
9845         return ret;
9846 }
9847
9848 /* offset and length are dword aligned */
9849 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
9850 {
9851         int ret;
9852
9853         if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
9854                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
9855                        ~GRC_LCLCTRL_GPIO_OUTPUT1);
9856                 udelay(40);
9857         }
9858
9859         if (!(tp->tg3_flags & TG3_FLAG_NVRAM)) {
9860                 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
9861         }
9862         else {
9863                 u32 grc_mode;
9864
9865                 ret = tg3_nvram_lock(tp);
9866                 if (ret)
9867                         return ret;
9868
9869                 tg3_enable_nvram_access(tp);
9870                 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
9871                     !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM))
9872                         tw32(NVRAM_WRITE1, 0x406);
9873
9874                 grc_mode = tr32(GRC_MODE);
9875                 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
9876
9877                 if ((tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) ||
9878                         !(tp->tg3_flags2 & TG3_FLG2_FLASH)) {
9879
9880                         ret = tg3_nvram_write_block_buffered(tp, offset, len,
9881                                 buf);
9882                 }
9883                 else {
9884                         ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
9885                                 buf);
9886                 }
9887
9888                 grc_mode = tr32(GRC_MODE);
9889                 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
9890
9891                 tg3_disable_nvram_access(tp);
9892                 tg3_nvram_unlock(tp);
9893         }
9894
9895         if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
9896                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
9897                 udelay(40);
9898         }
9899
9900         return ret;
9901 }
9902
9903 struct subsys_tbl_ent {
9904         u16 subsys_vendor, subsys_devid;
9905         u32 phy_id;
9906 };
9907
9908 static struct subsys_tbl_ent subsys_id_to_phy_id[] = {
9909         /* Broadcom boards. */
9910         { PCI_VENDOR_ID_BROADCOM, 0x1644, PHY_ID_BCM5401 }, /* BCM95700A6 */
9911         { PCI_VENDOR_ID_BROADCOM, 0x0001, PHY_ID_BCM5701 }, /* BCM95701A5 */
9912         { PCI_VENDOR_ID_BROADCOM, 0x0002, PHY_ID_BCM8002 }, /* BCM95700T6 */
9913         { PCI_VENDOR_ID_BROADCOM, 0x0003, 0 },              /* BCM95700A9 */
9914         { PCI_VENDOR_ID_BROADCOM, 0x0005, PHY_ID_BCM5701 }, /* BCM95701T1 */
9915         { PCI_VENDOR_ID_BROADCOM, 0x0006, PHY_ID_BCM5701 }, /* BCM95701T8 */
9916         { PCI_VENDOR_ID_BROADCOM, 0x0007, 0 },              /* BCM95701A7 */
9917         { PCI_VENDOR_ID_BROADCOM, 0x0008, PHY_ID_BCM5701 }, /* BCM95701A10 */
9918         { PCI_VENDOR_ID_BROADCOM, 0x8008, PHY_ID_BCM5701 }, /* BCM95701A12 */
9919         { PCI_VENDOR_ID_BROADCOM, 0x0009, PHY_ID_BCM5703 }, /* BCM95703Ax1 */
9920         { PCI_VENDOR_ID_BROADCOM, 0x8009, PHY_ID_BCM5703 }, /* BCM95703Ax2 */
9921
9922         /* 3com boards. */
9923         { PCI_VENDOR_ID_3COM, 0x1000, PHY_ID_BCM5401 }, /* 3C996T */
9924         { PCI_VENDOR_ID_3COM, 0x1006, PHY_ID_BCM5701 }, /* 3C996BT */
9925         { PCI_VENDOR_ID_3COM, 0x1004, 0 },              /* 3C996SX */
9926         { PCI_VENDOR_ID_3COM, 0x1007, PHY_ID_BCM5701 }, /* 3C1000T */
9927         { PCI_VENDOR_ID_3COM, 0x1008, PHY_ID_BCM5701 }, /* 3C940BR01 */
9928
9929         /* DELL boards. */
9930         { PCI_VENDOR_ID_DELL, 0x00d1, PHY_ID_BCM5401 }, /* VIPER */
9931         { PCI_VENDOR_ID_DELL, 0x0106, PHY_ID_BCM5401 }, /* JAGUAR */
9932         { PCI_VENDOR_ID_DELL, 0x0109, PHY_ID_BCM5411 }, /* MERLOT */
9933         { PCI_VENDOR_ID_DELL, 0x010a, PHY_ID_BCM5411 }, /* SLIM_MERLOT */
9934
9935         /* Compaq boards. */
9936         { PCI_VENDOR_ID_COMPAQ, 0x007c, PHY_ID_BCM5701 }, /* BANSHEE */
9937         { PCI_VENDOR_ID_COMPAQ, 0x009a, PHY_ID_BCM5701 }, /* BANSHEE_2 */
9938         { PCI_VENDOR_ID_COMPAQ, 0x007d, 0 },              /* CHANGELING */
9939         { PCI_VENDOR_ID_COMPAQ, 0x0085, PHY_ID_BCM5701 }, /* NC7780 */
9940         { PCI_VENDOR_ID_COMPAQ, 0x0099, PHY_ID_BCM5701 }, /* NC7780_2 */
9941
9942         /* IBM boards. */
9943         { PCI_VENDOR_ID_IBM, 0x0281, 0 } /* IBM??? */
9944 };
9945
9946 static inline struct subsys_tbl_ent *lookup_by_subsys(struct tg3 *tp)
9947 {
9948         int i;
9949
9950         for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
9951                 if ((subsys_id_to_phy_id[i].subsys_vendor ==
9952                      tp->pdev->subsystem_vendor) &&
9953                     (subsys_id_to_phy_id[i].subsys_devid ==
9954                      tp->pdev->subsystem_device))
9955                         return &subsys_id_to_phy_id[i];
9956         }
9957         return NULL;
9958 }
9959
9960 static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
9961 {
9962         u32 val;
9963         u16 pmcsr;
9964
9965         /* On some early chips the SRAM cannot be accessed in D3hot state,
9966          * so need make sure we're in D0.
9967          */
9968         pci_read_config_word(tp->pdev, tp->pm_cap + PCI_PM_CTRL, &pmcsr);
9969         pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
9970         pci_write_config_word(tp->pdev, tp->pm_cap + PCI_PM_CTRL, pmcsr);
9971         msleep(1);
9972
9973         /* Make sure register accesses (indirect or otherwise)
9974          * will function correctly.
9975          */
9976         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
9977                                tp->misc_host_ctrl);
9978
9979         /* The memory arbiter has to be enabled in order for SRAM accesses
9980          * to succeed.  Normally on powerup the tg3 chip firmware will make
9981          * sure it is enabled, but other entities such as system netboot
9982          * code might disable it.
9983          */
9984         val = tr32(MEMARB_MODE);
9985         tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
9986
9987         tp->phy_id = PHY_ID_INVALID;
9988         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
9989
9990         /* Assume an onboard device by default.  */
9991         tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT;
9992
9993         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
9994                 if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
9995                         tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT;
9996                         tp->tg3_flags2 |= TG3_FLG2_IS_NIC;
9997                 }
9998                 return;
9999         }
10000
10001         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
10002         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
10003                 u32 nic_cfg, led_cfg;
10004                 u32 nic_phy_id, ver, cfg2 = 0, eeprom_phy_id;
10005                 int eeprom_phy_serdes = 0;
10006
10007                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
10008                 tp->nic_sram_data_cfg = nic_cfg;
10009
10010                 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
10011                 ver >>= NIC_SRAM_DATA_VER_SHIFT;
10012                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700) &&
10013                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) &&
10014                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5703) &&
10015                     (ver > 0) && (ver < 0x100))
10016                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
10017
10018                 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
10019                     NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
10020                         eeprom_phy_serdes = 1;
10021
10022                 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
10023                 if (nic_phy_id != 0) {
10024                         u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
10025                         u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
10026
10027                         eeprom_phy_id  = (id1 >> 16) << 10;
10028                         eeprom_phy_id |= (id2 & 0xfc00) << 16;
10029                         eeprom_phy_id |= (id2 & 0x03ff) <<  0;
10030                 } else
10031                         eeprom_phy_id = 0;
10032
10033                 tp->phy_id = eeprom_phy_id;
10034                 if (eeprom_phy_serdes) {
10035                         if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
10036                                 tp->tg3_flags2 |= TG3_FLG2_MII_SERDES;
10037                         else
10038                                 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
10039                 }
10040
10041                 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
10042                         led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
10043                                     SHASTA_EXT_LED_MODE_MASK);
10044                 else
10045                         led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
10046
10047                 switch (led_cfg) {
10048                 default:
10049                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
10050                         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
10051                         break;
10052
10053                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
10054                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
10055                         break;
10056
10057                 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
10058                         tp->led_ctrl = LED_CTRL_MODE_MAC;
10059
10060                         /* Default to PHY_1_MODE if 0 (MAC_MODE) is
10061                          * read on some older 5700/5701 bootcode.
10062                          */
10063                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
10064                             ASIC_REV_5700 ||
10065                             GET_ASIC_REV(tp->pci_chip_rev_id) ==
10066                             ASIC_REV_5701)
10067                                 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
10068
10069                         break;
10070
10071                 case SHASTA_EXT_LED_SHARED:
10072                         tp->led_ctrl = LED_CTRL_MODE_SHARED;
10073                         if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
10074                             tp->pci_chip_rev_id != CHIPREV_ID_5750_A1)
10075                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
10076                                                  LED_CTRL_MODE_PHY_2);
10077                         break;
10078
10079                 case SHASTA_EXT_LED_MAC:
10080                         tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
10081                         break;
10082
10083                 case SHASTA_EXT_LED_COMBO:
10084                         tp->led_ctrl = LED_CTRL_MODE_COMBO;
10085                         if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0)
10086                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
10087                                                  LED_CTRL_MODE_PHY_2);
10088                         break;
10089
10090                 };
10091
10092                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
10093                      GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) &&
10094                     tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
10095                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
10096
10097                 if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
10098                         tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT;
10099                         if ((tp->pdev->subsystem_vendor ==
10100                              PCI_VENDOR_ID_ARIMA) &&
10101                             (tp->pdev->subsystem_device == 0x205a ||
10102                              tp->pdev->subsystem_device == 0x2063))
10103                                 tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT;
10104                 } else {
10105                         tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT;
10106                         tp->tg3_flags2 |= TG3_FLG2_IS_NIC;
10107                 }
10108
10109                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
10110                         tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
10111                         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
10112                                 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
10113                 }
10114                 if (nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL)
10115                         tp->tg3_flags |= TG3_FLAG_SERDES_WOL_CAP;
10116
10117                 if (cfg2 & (1 << 17))
10118                         tp->tg3_flags2 |= TG3_FLG2_CAPACITIVE_COUPLING;
10119
10120                 /* serdes signal pre-emphasis in register 0x590 set by */
10121                 /* bootcode if bit 18 is set */
10122                 if (cfg2 & (1 << 18))
10123                         tp->tg3_flags2 |= TG3_FLG2_SERDES_PREEMPHASIS;
10124         }
10125 }
10126
10127 static int __devinit tg3_phy_probe(struct tg3 *tp)
10128 {
10129         u32 hw_phy_id_1, hw_phy_id_2;
10130         u32 hw_phy_id, hw_phy_id_masked;
10131         int err;
10132
10133         /* Reading the PHY ID register can conflict with ASF
10134          * firwmare access to the PHY hardware.
10135          */
10136         err = 0;
10137         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
10138                 hw_phy_id = hw_phy_id_masked = PHY_ID_INVALID;
10139         } else {
10140                 /* Now read the physical PHY_ID from the chip and verify
10141                  * that it is sane.  If it doesn't look good, we fall back
10142                  * to either the hard-coded table based PHY_ID and failing
10143                  * that the value found in the eeprom area.
10144                  */
10145                 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
10146                 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
10147
10148                 hw_phy_id  = (hw_phy_id_1 & 0xffff) << 10;
10149                 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
10150                 hw_phy_id |= (hw_phy_id_2 & 0x03ff) <<  0;
10151
10152                 hw_phy_id_masked = hw_phy_id & PHY_ID_MASK;
10153         }
10154
10155         if (!err && KNOWN_PHY_ID(hw_phy_id_masked)) {
10156                 tp->phy_id = hw_phy_id;
10157                 if (hw_phy_id_masked == PHY_ID_BCM8002)
10158                         tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
10159                 else
10160                         tp->tg3_flags2 &= ~TG3_FLG2_PHY_SERDES;
10161         } else {
10162                 if (tp->phy_id != PHY_ID_INVALID) {
10163                         /* Do nothing, phy ID already set up in
10164                          * tg3_get_eeprom_hw_cfg().
10165                          */
10166                 } else {
10167                         struct subsys_tbl_ent *p;
10168
10169                         /* No eeprom signature?  Try the hardcoded
10170                          * subsys device table.
10171                          */
10172                         p = lookup_by_subsys(tp);
10173                         if (!p)
10174                                 return -ENODEV;
10175
10176                         tp->phy_id = p->phy_id;
10177                         if (!tp->phy_id ||
10178                             tp->phy_id == PHY_ID_BCM8002)
10179                                 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
10180                 }
10181         }
10182
10183         if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) &&
10184             !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
10185                 u32 bmsr, adv_reg, tg3_ctrl, mask;
10186
10187                 tg3_readphy(tp, MII_BMSR, &bmsr);
10188                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
10189                     (bmsr & BMSR_LSTATUS))
10190                         goto skip_phy_reset;
10191
10192                 err = tg3_phy_reset(tp);
10193                 if (err)
10194                         return err;
10195
10196                 adv_reg = (ADVERTISE_10HALF | ADVERTISE_10FULL |
10197                            ADVERTISE_100HALF | ADVERTISE_100FULL |
10198                            ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
10199                 tg3_ctrl = 0;
10200                 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
10201                         tg3_ctrl = (MII_TG3_CTRL_ADV_1000_HALF |
10202                                     MII_TG3_CTRL_ADV_1000_FULL);
10203                         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
10204                             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
10205                                 tg3_ctrl |= (MII_TG3_CTRL_AS_MASTER |
10206                                              MII_TG3_CTRL_ENABLE_AS_MASTER);
10207                 }
10208
10209                 mask = (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
10210                         ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
10211                         ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full);
10212                 if (!tg3_copper_is_advertising_all(tp, mask)) {
10213                         tg3_writephy(tp, MII_ADVERTISE, adv_reg);
10214
10215                         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
10216                                 tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
10217
10218                         tg3_writephy(tp, MII_BMCR,
10219                                      BMCR_ANENABLE | BMCR_ANRESTART);
10220                 }
10221                 tg3_phy_set_wirespeed(tp);
10222
10223                 tg3_writephy(tp, MII_ADVERTISE, adv_reg);
10224                 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
10225                         tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
10226         }
10227
10228 skip_phy_reset:
10229         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
10230                 err = tg3_init_5401phy_dsp(tp);
10231                 if (err)
10232                         return err;
10233         }
10234
10235         if (!err && ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401)) {
10236                 err = tg3_init_5401phy_dsp(tp);
10237         }
10238
10239         if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
10240                 tp->link_config.advertising =
10241                         (ADVERTISED_1000baseT_Half |
10242                          ADVERTISED_1000baseT_Full |
10243                          ADVERTISED_Autoneg |
10244                          ADVERTISED_FIBRE);
10245         if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
10246                 tp->link_config.advertising &=
10247                         ~(ADVERTISED_1000baseT_Half |
10248                           ADVERTISED_1000baseT_Full);
10249
10250         return err;
10251 }
10252
10253 static void __devinit tg3_read_partno(struct tg3 *tp)
10254 {
10255         unsigned char vpd_data[256];
10256         unsigned int i;
10257         u32 magic;
10258
10259         if (tg3_nvram_read_swab(tp, 0x0, &magic))
10260                 goto out_not_found;
10261
10262         if (magic == TG3_EEPROM_MAGIC) {
10263                 for (i = 0; i < 256; i += 4) {
10264                         u32 tmp;
10265
10266                         if (tg3_nvram_read(tp, 0x100 + i, &tmp))
10267                                 goto out_not_found;
10268
10269                         vpd_data[i + 0] = ((tmp >>  0) & 0xff);
10270                         vpd_data[i + 1] = ((tmp >>  8) & 0xff);
10271                         vpd_data[i + 2] = ((tmp >> 16) & 0xff);
10272                         vpd_data[i + 3] = ((tmp >> 24) & 0xff);
10273                 }
10274         } else {
10275                 int vpd_cap;
10276
10277                 vpd_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_VPD);
10278                 for (i = 0; i < 256; i += 4) {
10279                         u32 tmp, j = 0;
10280                         u16 tmp16;
10281
10282                         pci_write_config_word(tp->pdev, vpd_cap + PCI_VPD_ADDR,
10283                                               i);
10284                         while (j++ < 100) {
10285                                 pci_read_config_word(tp->pdev, vpd_cap +
10286                                                      PCI_VPD_ADDR, &tmp16);
10287                                 if (tmp16 & 0x8000)
10288                                         break;
10289                                 msleep(1);
10290                         }
10291                         if (!(tmp16 & 0x8000))
10292                                 goto out_not_found;
10293
10294                         pci_read_config_dword(tp->pdev, vpd_cap + PCI_VPD_DATA,
10295                                               &tmp);
10296                         tmp = cpu_to_le32(tmp);
10297                         memcpy(&vpd_data[i], &tmp, 4);
10298                 }
10299         }
10300
10301         /* Now parse and find the part number. */
10302         for (i = 0; i < 254; ) {
10303                 unsigned char val = vpd_data[i];
10304                 unsigned int block_end;
10305
10306                 if (val == 0x82 || val == 0x91) {
10307                         i = (i + 3 +
10308                              (vpd_data[i + 1] +
10309                               (vpd_data[i + 2] << 8)));
10310                         continue;
10311                 }
10312
10313                 if (val != 0x90)
10314                         goto out_not_found;
10315
10316                 block_end = (i + 3 +
10317                              (vpd_data[i + 1] +
10318                               (vpd_data[i + 2] << 8)));
10319                 i += 3;
10320
10321                 if (block_end > 256)
10322                         goto out_not_found;
10323
10324                 while (i < (block_end - 2)) {
10325                         if (vpd_data[i + 0] == 'P' &&
10326                             vpd_data[i + 1] == 'N') {
10327                                 int partno_len = vpd_data[i + 2];
10328
10329                                 i += 3;
10330                                 if (partno_len > 24 || (partno_len + i) > 256)
10331                                         goto out_not_found;
10332
10333                                 memcpy(tp->board_part_number,
10334                                        &vpd_data[i], partno_len);
10335
10336                                 /* Success. */
10337                                 return;
10338                         }
10339                         i += 3 + vpd_data[i + 2];
10340                 }
10341
10342                 /* Part number not found. */
10343                 goto out_not_found;
10344         }
10345
10346 out_not_found:
10347         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
10348                 strcpy(tp->board_part_number, "BCM95906");
10349         else
10350                 strcpy(tp->board_part_number, "none");
10351 }
10352
10353 static void __devinit tg3_read_fw_ver(struct tg3 *tp)
10354 {
10355         u32 val, offset, start;
10356
10357         if (tg3_nvram_read_swab(tp, 0, &val))
10358                 return;
10359
10360         if (val != TG3_EEPROM_MAGIC)
10361                 return;
10362
10363         if (tg3_nvram_read_swab(tp, 0xc, &offset) ||
10364             tg3_nvram_read_swab(tp, 0x4, &start))
10365                 return;
10366
10367         offset = tg3_nvram_logical_addr(tp, offset);
10368         if (tg3_nvram_read_swab(tp, offset, &val))
10369                 return;
10370
10371         if ((val & 0xfc000000) == 0x0c000000) {
10372                 u32 ver_offset, addr;
10373                 int i;
10374
10375                 if (tg3_nvram_read_swab(tp, offset + 4, &val) ||
10376                     tg3_nvram_read_swab(tp, offset + 8, &ver_offset))
10377                         return;
10378
10379                 if (val != 0)
10380                         return;
10381
10382                 addr = offset + ver_offset - start;
10383                 for (i = 0; i < 16; i += 4) {
10384                         if (tg3_nvram_read(tp, addr + i, &val))
10385                                 return;
10386
10387                         val = cpu_to_le32(val);
10388                         memcpy(tp->fw_ver + i, &val, 4);
10389                 }
10390         }
10391 }
10392
10393 static int __devinit tg3_get_invariants(struct tg3 *tp)
10394 {
10395         static struct pci_device_id write_reorder_chipsets[] = {
10396                 { PCI_DEVICE(PCI_VENDOR_ID_AMD,
10397                              PCI_DEVICE_ID_AMD_FE_GATE_700C) },
10398                 { PCI_DEVICE(PCI_VENDOR_ID_AMD,
10399                              PCI_DEVICE_ID_AMD_8131_BRIDGE) },
10400                 { PCI_DEVICE(PCI_VENDOR_ID_VIA,
10401                              PCI_DEVICE_ID_VIA_8385_0) },
10402                 { },
10403         };
10404         u32 misc_ctrl_reg;
10405         u32 cacheline_sz_reg;
10406         u32 pci_state_reg, grc_misc_cfg;
10407         u32 val;
10408         u16 pci_cmd;
10409         int err, pcie_cap;
10410
10411         /* Force memory write invalidate off.  If we leave it on,
10412          * then on 5700_BX chips we have to enable a workaround.
10413          * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
10414          * to match the cacheline size.  The Broadcom driver have this
10415          * workaround but turns MWI off all the times so never uses
10416          * it.  This seems to suggest that the workaround is insufficient.
10417          */
10418         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
10419         pci_cmd &= ~PCI_COMMAND_INVALIDATE;
10420         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
10421
10422         /* It is absolutely critical that TG3PCI_MISC_HOST_CTRL
10423          * has the register indirect write enable bit set before
10424          * we try to access any of the MMIO registers.  It is also
10425          * critical that the PCI-X hw workaround situation is decided
10426          * before that as well.
10427          */
10428         pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
10429                               &misc_ctrl_reg);
10430
10431         tp->pci_chip_rev_id = (misc_ctrl_reg >>
10432                                MISC_HOST_CTRL_CHIPREV_SHIFT);
10433
10434         /* Wrong chip ID in 5752 A0. This code can be removed later
10435          * as A0 is not in production.
10436          */
10437         if (tp->pci_chip_rev_id == CHIPREV_ID_5752_A0_HW)
10438                 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
10439
10440         /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
10441          * we need to disable memory and use config. cycles
10442          * only to access all registers. The 5702/03 chips
10443          * can mistakenly decode the special cycles from the
10444          * ICH chipsets as memory write cycles, causing corruption
10445          * of register and memory space. Only certain ICH bridges
10446          * will drive special cycles with non-zero data during the
10447          * address phase which can fall within the 5703's address
10448          * range. This is not an ICH bug as the PCI spec allows
10449          * non-zero address during special cycles. However, only
10450          * these ICH bridges are known to drive non-zero addresses
10451          * during special cycles.
10452          *
10453          * Since special cycles do not cross PCI bridges, we only
10454          * enable this workaround if the 5703 is on the secondary
10455          * bus of these ICH bridges.
10456          */
10457         if ((tp->pci_chip_rev_id == CHIPREV_ID_5703_A1) ||
10458             (tp->pci_chip_rev_id == CHIPREV_ID_5703_A2)) {
10459                 static struct tg3_dev_id {
10460                         u32     vendor;
10461                         u32     device;
10462                         u32     rev;
10463                 } ich_chipsets[] = {
10464                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
10465                           PCI_ANY_ID },
10466                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
10467                           PCI_ANY_ID },
10468                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
10469                           0xa },
10470                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
10471                           PCI_ANY_ID },
10472                         { },
10473                 };
10474                 struct tg3_dev_id *pci_id = &ich_chipsets[0];
10475                 struct pci_dev *bridge = NULL;
10476
10477                 while (pci_id->vendor != 0) {
10478                         bridge = pci_get_device(pci_id->vendor, pci_id->device,
10479                                                 bridge);
10480                         if (!bridge) {
10481                                 pci_id++;
10482                                 continue;
10483                         }
10484                         if (pci_id->rev != PCI_ANY_ID) {
10485                                 u8 rev;
10486
10487                                 pci_read_config_byte(bridge, PCI_REVISION_ID,
10488                                                      &rev);
10489                                 if (rev > pci_id->rev)
10490                                         continue;
10491                         }
10492                         if (bridge->subordinate &&
10493                             (bridge->subordinate->number ==
10494                              tp->pdev->bus->number)) {
10495
10496                                 tp->tg3_flags2 |= TG3_FLG2_ICH_WORKAROUND;
10497                                 pci_dev_put(bridge);
10498                                 break;
10499                         }
10500                 }
10501         }
10502
10503         /* The EPB bridge inside 5714, 5715, and 5780 cannot support
10504          * DMA addresses > 40-bit. This bridge may have other additional
10505          * 57xx devices behind it in some 4-port NIC designs for example.
10506          * Any tg3 device found behind the bridge will also need the 40-bit
10507          * DMA workaround.
10508          */
10509         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 ||
10510             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
10511                 tp->tg3_flags2 |= TG3_FLG2_5780_CLASS;
10512                 tp->tg3_flags |= TG3_FLAG_40BIT_DMA_BUG;
10513                 tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI);
10514         }
10515         else {
10516                 struct pci_dev *bridge = NULL;
10517
10518                 do {
10519                         bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
10520                                                 PCI_DEVICE_ID_SERVERWORKS_EPB,
10521                                                 bridge);
10522                         if (bridge && bridge->subordinate &&
10523                             (bridge->subordinate->number <=
10524                              tp->pdev->bus->number) &&
10525                             (bridge->subordinate->subordinate >=
10526                              tp->pdev->bus->number)) {
10527                                 tp->tg3_flags |= TG3_FLAG_40BIT_DMA_BUG;
10528                                 pci_dev_put(bridge);
10529                                 break;
10530                         }
10531                 } while (bridge);
10532         }
10533
10534         /* Initialize misc host control in PCI block. */
10535         tp->misc_host_ctrl |= (misc_ctrl_reg &
10536                                MISC_HOST_CTRL_CHIPREV);
10537         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
10538                                tp->misc_host_ctrl);
10539
10540         pci_read_config_dword(tp->pdev, TG3PCI_CACHELINESZ,
10541                               &cacheline_sz_reg);
10542
10543         tp->pci_cacheline_sz = (cacheline_sz_reg >>  0) & 0xff;
10544         tp->pci_lat_timer    = (cacheline_sz_reg >>  8) & 0xff;
10545         tp->pci_hdr_type     = (cacheline_sz_reg >> 16) & 0xff;
10546         tp->pci_bist         = (cacheline_sz_reg >> 24) & 0xff;
10547
10548         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
10549             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
10550             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
10551             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
10552             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 ||
10553             (tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
10554                 tp->tg3_flags2 |= TG3_FLG2_5750_PLUS;
10555
10556         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) ||
10557             (tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
10558                 tp->tg3_flags2 |= TG3_FLG2_5705_PLUS;
10559
10560         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
10561                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
10562                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
10563                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
10564                         tp->tg3_flags2 |= TG3_FLG2_HW_TSO_2;
10565                         tp->tg3_flags2 |= TG3_FLG2_1SHOT_MSI;
10566                 } else {
10567                         tp->tg3_flags2 |= TG3_FLG2_HW_TSO_1 |
10568                                           TG3_FLG2_HW_TSO_1_BUG;
10569                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
10570                                 ASIC_REV_5750 &&
10571                             tp->pci_chip_rev_id >= CHIPREV_ID_5750_C2)
10572                                 tp->tg3_flags2 &= ~TG3_FLG2_HW_TSO_1_BUG;
10573                 }
10574         }
10575
10576         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705 &&
10577             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5750 &&
10578             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752 &&
10579             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5755 &&
10580             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5787 &&
10581             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906)
10582                 tp->tg3_flags2 |= TG3_FLG2_JUMBO_CAPABLE;
10583
10584         pcie_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_EXP);
10585         if (pcie_cap != 0) {
10586                 tp->tg3_flags2 |= TG3_FLG2_PCI_EXPRESS;
10587                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
10588                         u16 lnkctl;
10589
10590                         pci_read_config_word(tp->pdev,
10591                                              pcie_cap + PCI_EXP_LNKCTL,
10592                                              &lnkctl);
10593                         if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN)
10594                                 tp->tg3_flags2 &= ~TG3_FLG2_HW_TSO_2;
10595                 }
10596         }
10597
10598         /* If we have an AMD 762 or VIA K8T800 chipset, write
10599          * reordering to the mailbox registers done by the host
10600          * controller can cause major troubles.  We read back from
10601          * every mailbox register write to force the writes to be
10602          * posted to the chip in order.
10603          */
10604         if (pci_dev_present(write_reorder_chipsets) &&
10605             !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
10606                 tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;
10607
10608         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
10609             tp->pci_lat_timer < 64) {
10610                 tp->pci_lat_timer = 64;
10611
10612                 cacheline_sz_reg  = ((tp->pci_cacheline_sz & 0xff) <<  0);
10613                 cacheline_sz_reg |= ((tp->pci_lat_timer    & 0xff) <<  8);
10614                 cacheline_sz_reg |= ((tp->pci_hdr_type     & 0xff) << 16);
10615                 cacheline_sz_reg |= ((tp->pci_bist         & 0xff) << 24);
10616
10617                 pci_write_config_dword(tp->pdev, TG3PCI_CACHELINESZ,
10618                                        cacheline_sz_reg);
10619         }
10620
10621         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
10622                               &pci_state_reg);
10623
10624         if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0) {
10625                 tp->tg3_flags |= TG3_FLAG_PCIX_MODE;
10626
10627                 /* If this is a 5700 BX chipset, and we are in PCI-X
10628                  * mode, enable register write workaround.
10629                  *
10630                  * The workaround is to use indirect register accesses
10631                  * for all chip writes not to mailbox registers.
10632                  */
10633                 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX) {
10634                         u32 pm_reg;
10635                         u16 pci_cmd;
10636
10637                         tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
10638
10639                         /* The chip can have it's power management PCI config
10640                          * space registers clobbered due to this bug.
10641                          * So explicitly force the chip into D0 here.
10642                          */
10643                         pci_read_config_dword(tp->pdev, TG3PCI_PM_CTRL_STAT,
10644                                               &pm_reg);
10645                         pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
10646                         pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
10647                         pci_write_config_dword(tp->pdev, TG3PCI_PM_CTRL_STAT,
10648                                                pm_reg);
10649
10650                         /* Also, force SERR#/PERR# in PCI command. */
10651                         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
10652                         pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
10653                         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
10654                 }
10655         }
10656
10657         /* 5700 BX chips need to have their TX producer index mailboxes
10658          * written twice to workaround a bug.
10659          */
10660         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX)
10661                 tp->tg3_flags |= TG3_FLAG_TXD_MBOX_HWBUG;
10662
10663         /* Back to back register writes can cause problems on this chip,
10664          * the workaround is to read back all reg writes except those to
10665          * mailbox regs.  See tg3_write_indirect_reg32().
10666          *
10667          * PCI Express 5750_A0 rev chips need this workaround too.
10668          */
10669         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
10670             ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
10671              tp->pci_chip_rev_id == CHIPREV_ID_5750_A0))
10672                 tp->tg3_flags |= TG3_FLAG_5701_REG_WRITE_BUG;
10673
10674         if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
10675                 tp->tg3_flags |= TG3_FLAG_PCI_HIGH_SPEED;
10676         if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
10677                 tp->tg3_flags |= TG3_FLAG_PCI_32BIT;
10678
10679         /* Chip-specific fixup from Broadcom driver */
10680         if ((tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) &&
10681             (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
10682                 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
10683                 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
10684         }
10685
10686         /* Default fast path register access methods */
10687         tp->read32 = tg3_read32;
10688         tp->write32 = tg3_write32;
10689         tp->read32_mbox = tg3_read32;
10690         tp->write32_mbox = tg3_write32;
10691         tp->write32_tx_mbox = tg3_write32;
10692         tp->write32_rx_mbox = tg3_write32;
10693
10694         /* Various workaround register access methods */
10695         if (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG)
10696                 tp->write32 = tg3_write_indirect_reg32;
10697         else if (tp->tg3_flags & TG3_FLAG_5701_REG_WRITE_BUG)
10698                 tp->write32 = tg3_write_flush_reg32;
10699
10700         if ((tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG) ||
10701             (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)) {
10702                 tp->write32_tx_mbox = tg3_write32_tx_mbox;
10703                 if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
10704                         tp->write32_rx_mbox = tg3_write_flush_reg32;
10705         }
10706
10707         if (tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND) {
10708                 tp->read32 = tg3_read_indirect_reg32;
10709                 tp->write32 = tg3_write_indirect_reg32;
10710                 tp->read32_mbox = tg3_read_indirect_mbox;
10711                 tp->write32_mbox = tg3_write_indirect_mbox;
10712                 tp->write32_tx_mbox = tg3_write_indirect_mbox;
10713                 tp->write32_rx_mbox = tg3_write_indirect_mbox;
10714
10715                 iounmap(tp->regs);
10716                 tp->regs = NULL;
10717
10718                 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
10719                 pci_cmd &= ~PCI_COMMAND_MEMORY;
10720                 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
10721         }
10722         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
10723                 tp->read32_mbox = tg3_read32_mbox_5906;
10724                 tp->write32_mbox = tg3_write32_mbox_5906;
10725                 tp->write32_tx_mbox = tg3_write32_mbox_5906;
10726                 tp->write32_rx_mbox = tg3_write32_mbox_5906;
10727         }
10728
10729         if (tp->write32 == tg3_write_indirect_reg32 ||
10730             ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
10731              (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
10732               GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)))
10733                 tp->tg3_flags |= TG3_FLAG_SRAM_USE_CONFIG;
10734
10735         /* Get eeprom hw config before calling tg3_set_power_state().
10736          * In particular, the TG3_FLG2_IS_NIC flag must be
10737          * determined before calling tg3_set_power_state() so that
10738          * we know whether or not to switch out of Vaux power.
10739          * When the flag is set, it means that GPIO1 is used for eeprom
10740          * write protect and also implies that it is a LOM where GPIOs
10741          * are not used to switch power.
10742          */
10743         tg3_get_eeprom_hw_cfg(tp);
10744
10745         /* Set up tp->grc_local_ctrl before calling tg3_set_power_state().
10746          * GPIO1 driven high will bring 5700's external PHY out of reset.
10747          * It is also used as eeprom write protect on LOMs.
10748          */
10749         tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
10750         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
10751             (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT))
10752                 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
10753                                        GRC_LCLCTRL_GPIO_OUTPUT1);
10754         /* Unused GPIO3 must be driven as output on 5752 because there
10755          * are no pull-up resistors on unused GPIO pins.
10756          */
10757         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
10758                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
10759
10760         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
10761                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
10762
10763         /* Force the chip into D0. */
10764         err = tg3_set_power_state(tp, PCI_D0);
10765         if (err) {
10766                 printk(KERN_ERR PFX "(%s) transition to D0 failed\n",
10767                        pci_name(tp->pdev));
10768                 return err;
10769         }
10770
10771         /* 5700 B0 chips do not support checksumming correctly due
10772          * to hardware bugs.
10773          */
10774         if (tp->pci_chip_rev_id == CHIPREV_ID_5700_B0)
10775                 tp->tg3_flags |= TG3_FLAG_BROKEN_CHECKSUMS;
10776
10777         /* Derive initial jumbo mode from MTU assigned in
10778          * ether_setup() via the alloc_etherdev() call
10779          */
10780         if (tp->dev->mtu > ETH_DATA_LEN &&
10781             !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
10782                 tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE;
10783
10784         /* Determine WakeOnLan speed to use. */
10785         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
10786             tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
10787             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0 ||
10788             tp->pci_chip_rev_id == CHIPREV_ID_5701_B2) {
10789                 tp->tg3_flags &= ~(TG3_FLAG_WOL_SPEED_100MB);
10790         } else {
10791                 tp->tg3_flags |= TG3_FLAG_WOL_SPEED_100MB;
10792         }
10793
10794         /* A few boards don't want Ethernet@WireSpeed phy feature */
10795         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
10796             ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
10797              (tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) &&
10798              (tp->pci_chip_rev_id != CHIPREV_ID_5705_A1)) ||
10799             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) ||
10800             (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES))
10801                 tp->tg3_flags2 |= TG3_FLG2_NO_ETH_WIRE_SPEED;
10802
10803         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5703_AX ||
10804             GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_AX)
10805                 tp->tg3_flags2 |= TG3_FLG2_PHY_ADC_BUG;
10806         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0)
10807                 tp->tg3_flags2 |= TG3_FLG2_PHY_5704_A0_BUG;
10808
10809         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
10810                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
10811                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787) {
10812                         tp->tg3_flags2 |= TG3_FLG2_PHY_JITTER_BUG;
10813                         if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
10814                                 tp->tg3_flags2 |= TG3_FLG2_PHY_ADJUST_TRIM;
10815                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906)
10816                         tp->tg3_flags2 |= TG3_FLG2_PHY_BER_BUG;
10817         }
10818
10819         tp->coalesce_mode = 0;
10820         if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX &&
10821             GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX)
10822                 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
10823
10824         /* Initialize MAC MI mode, polling disabled. */
10825         tw32_f(MAC_MI_MODE, tp->mi_mode);
10826         udelay(80);
10827
10828         /* Initialize data/descriptor byte/word swapping. */
10829         val = tr32(GRC_MODE);
10830         val &= GRC_MODE_HOST_STACKUP;
10831         tw32(GRC_MODE, val | tp->grc_mode);
10832
10833         tg3_switch_clocks(tp);
10834
10835         /* Clear this out for sanity. */
10836         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
10837
10838         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
10839                               &pci_state_reg);
10840         if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
10841             (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) == 0) {
10842                 u32 chiprevid = GET_CHIP_REV_ID(tp->misc_host_ctrl);
10843
10844                 if (chiprevid == CHIPREV_ID_5701_A0 ||
10845                     chiprevid == CHIPREV_ID_5701_B0 ||
10846                     chiprevid == CHIPREV_ID_5701_B2 ||
10847                     chiprevid == CHIPREV_ID_5701_B5) {
10848                         void __iomem *sram_base;
10849
10850                         /* Write some dummy words into the SRAM status block
10851                          * area, see if it reads back correctly.  If the return
10852                          * value is bad, force enable the PCIX workaround.
10853                          */
10854                         sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
10855
10856                         writel(0x00000000, sram_base);
10857                         writel(0x00000000, sram_base + 4);
10858                         writel(0xffffffff, sram_base + 4);
10859                         if (readl(sram_base) != 0x00000000)
10860                                 tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
10861                 }
10862         }
10863
10864         udelay(50);
10865         tg3_nvram_init(tp);
10866
10867         grc_misc_cfg = tr32(GRC_MISC_CFG);
10868         grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
10869
10870         /* Broadcom's driver says that CIOBE multisplit has a bug */
10871 #if 0
10872         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
10873             grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5704CIOBE) {
10874                 tp->tg3_flags |= TG3_FLAG_SPLIT_MODE;
10875                 tp->split_mode_max_reqs = SPLIT_MODE_5704_MAX_REQ;
10876         }
10877 #endif
10878         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
10879             (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
10880              grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
10881                 tp->tg3_flags2 |= TG3_FLG2_IS_5788;
10882
10883         if (!(tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
10884             (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700))
10885                 tp->tg3_flags |= TG3_FLAG_TAGGED_STATUS;
10886         if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) {
10887                 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
10888                                       HOSTCC_MODE_CLRTICK_TXBD);
10889
10890                 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
10891                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
10892                                        tp->misc_host_ctrl);
10893         }
10894
10895         /* these are limited to 10/100 only */
10896         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
10897              (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
10898             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
10899              tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
10900              (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901 ||
10901               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901_2 ||
10902               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5705F)) ||
10903             (tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
10904              (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5751F ||
10905               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5753F ||
10906               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5787F)) ||
10907             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
10908                 tp->tg3_flags |= TG3_FLAG_10_100_ONLY;
10909
10910         err = tg3_phy_probe(tp);
10911         if (err) {
10912                 printk(KERN_ERR PFX "(%s) phy probe failed, err %d\n",
10913                        pci_name(tp->pdev), err);
10914                 /* ... but do not return immediately ... */
10915         }
10916
10917         tg3_read_partno(tp);
10918         tg3_read_fw_ver(tp);
10919
10920         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
10921                 tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
10922         } else {
10923                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
10924                         tp->tg3_flags |= TG3_FLAG_USE_MI_INTERRUPT;
10925                 else
10926                         tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
10927         }
10928
10929         /* 5700 {AX,BX} chips have a broken status block link
10930          * change bit implementation, so we must use the
10931          * status register in those cases.
10932          */
10933         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
10934                 tp->tg3_flags |= TG3_FLAG_USE_LINKCHG_REG;
10935         else
10936                 tp->tg3_flags &= ~TG3_FLAG_USE_LINKCHG_REG;
10937
10938         /* The led_ctrl is set during tg3_phy_probe, here we might
10939          * have to force the link status polling mechanism based
10940          * upon subsystem IDs.
10941          */
10942         if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
10943             !(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
10944                 tp->tg3_flags |= (TG3_FLAG_USE_MI_INTERRUPT |
10945                                   TG3_FLAG_USE_LINKCHG_REG);
10946         }
10947
10948         /* For all SERDES we poll the MAC status register. */
10949         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
10950                 tp->tg3_flags |= TG3_FLAG_POLL_SERDES;
10951         else
10952                 tp->tg3_flags &= ~TG3_FLAG_POLL_SERDES;
10953
10954         /* All chips before 5787 can get confused if TX buffers
10955          * straddle the 4GB address boundary in some cases.
10956          */
10957         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
10958             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
10959             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
10960                 tp->dev->hard_start_xmit = tg3_start_xmit;
10961         else
10962                 tp->dev->hard_start_xmit = tg3_start_xmit_dma_bug;
10963
10964         tp->rx_offset = 2;
10965         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
10966             (tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0)
10967                 tp->rx_offset = 0;
10968
10969         tp->rx_std_max_post = TG3_RX_RING_SIZE;
10970
10971         /* Increment the rx prod index on the rx std ring by at most
10972          * 8 for these chips to workaround hw errata.
10973          */
10974         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
10975             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
10976             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
10977                 tp->rx_std_max_post = 8;
10978
10979         /* By default, disable wake-on-lan.  User can change this
10980          * using ETHTOOL_SWOL.
10981          */
10982         tp->tg3_flags &= ~TG3_FLAG_WOL_ENABLE;
10983
10984         return err;
10985 }
10986
10987 #ifdef CONFIG_SPARC64
10988 static int __devinit tg3_get_macaddr_sparc(struct tg3 *tp)
10989 {
10990         struct net_device *dev = tp->dev;
10991         struct pci_dev *pdev = tp->pdev;
10992         struct pcidev_cookie *pcp = pdev->sysdata;
10993
10994         if (pcp != NULL) {
10995                 unsigned char *addr;
10996                 int len;
10997
10998                 addr = of_get_property(pcp->prom_node, "local-mac-address",
10999                                         &len);
11000                 if (addr && len == 6) {
11001                         memcpy(dev->dev_addr, addr, 6);
11002                         memcpy(dev->perm_addr, dev->dev_addr, 6);
11003                         return 0;
11004                 }
11005         }
11006         return -ENODEV;
11007 }
11008
11009 static int __devinit tg3_get_default_macaddr_sparc(struct tg3 *tp)
11010 {
11011         struct net_device *dev = tp->dev;
11012
11013         memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
11014         memcpy(dev->perm_addr, idprom->id_ethaddr, 6);
11015         return 0;
11016 }
11017 #endif
11018
11019 static int __devinit tg3_get_device_address(struct tg3 *tp)
11020 {
11021         struct net_device *dev = tp->dev;
11022         u32 hi, lo, mac_offset;
11023         int addr_ok = 0;
11024
11025 #ifdef CONFIG_SPARC64
11026         if (!tg3_get_macaddr_sparc(tp))
11027                 return 0;
11028 #endif
11029
11030         mac_offset = 0x7c;
11031         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
11032             (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
11033                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
11034                         mac_offset = 0xcc;
11035                 if (tg3_nvram_lock(tp))
11036                         tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
11037                 else
11038                         tg3_nvram_unlock(tp);
11039         }
11040         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
11041                 mac_offset = 0x10;
11042
11043         /* First try to get it from MAC address mailbox. */
11044         tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
11045         if ((hi >> 16) == 0x484b) {
11046                 dev->dev_addr[0] = (hi >>  8) & 0xff;
11047                 dev->dev_addr[1] = (hi >>  0) & 0xff;
11048
11049                 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
11050                 dev->dev_addr[2] = (lo >> 24) & 0xff;
11051                 dev->dev_addr[3] = (lo >> 16) & 0xff;
11052                 dev->dev_addr[4] = (lo >>  8) & 0xff;
11053                 dev->dev_addr[5] = (lo >>  0) & 0xff;
11054
11055                 /* Some old bootcode may report a 0 MAC address in SRAM */
11056                 addr_ok = is_valid_ether_addr(&dev->dev_addr[0]);
11057         }
11058         if (!addr_ok) {
11059                 /* Next, try NVRAM. */
11060                 if (!tg3_nvram_read(tp, mac_offset + 0, &hi) &&
11061                     !tg3_nvram_read(tp, mac_offset + 4, &lo)) {
11062                         dev->dev_addr[0] = ((hi >> 16) & 0xff);
11063                         dev->dev_addr[1] = ((hi >> 24) & 0xff);
11064                         dev->dev_addr[2] = ((lo >>  0) & 0xff);
11065                         dev->dev_addr[3] = ((lo >>  8) & 0xff);
11066                         dev->dev_addr[4] = ((lo >> 16) & 0xff);
11067                         dev->dev_addr[5] = ((lo >> 24) & 0xff);
11068                 }
11069                 /* Finally just fetch it out of the MAC control regs. */
11070                 else {
11071                         hi = tr32(MAC_ADDR_0_HIGH);
11072                         lo = tr32(MAC_ADDR_0_LOW);
11073
11074                         dev->dev_addr[5] = lo & 0xff;
11075                         dev->dev_addr[4] = (lo >> 8) & 0xff;
11076                         dev->dev_addr[3] = (lo >> 16) & 0xff;
11077                         dev->dev_addr[2] = (lo >> 24) & 0xff;
11078                         dev->dev_addr[1] = hi & 0xff;
11079                         dev->dev_addr[0] = (hi >> 8) & 0xff;
11080                 }
11081         }
11082
11083         if (!is_valid_ether_addr(&dev->dev_addr[0])) {
11084 #ifdef CONFIG_SPARC64
11085                 if (!tg3_get_default_macaddr_sparc(tp))
11086                         return 0;
11087 #endif
11088                 return -EINVAL;
11089         }
11090         memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
11091         return 0;
11092 }
11093
11094 #define BOUNDARY_SINGLE_CACHELINE       1
11095 #define BOUNDARY_MULTI_CACHELINE        2
11096
11097 static u32 __devinit tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
11098 {
11099         int cacheline_size;
11100         u8 byte;
11101         int goal;
11102
11103         pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
11104         if (byte == 0)
11105                 cacheline_size = 1024;
11106         else
11107                 cacheline_size = (int) byte * 4;
11108
11109         /* On 5703 and later chips, the boundary bits have no
11110          * effect.
11111          */
11112         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
11113             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
11114             !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
11115                 goto out;
11116
11117 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
11118         goal = BOUNDARY_MULTI_CACHELINE;
11119 #else
11120 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
11121         goal = BOUNDARY_SINGLE_CACHELINE;
11122 #else
11123         goal = 0;
11124 #endif
11125 #endif
11126
11127         if (!goal)
11128                 goto out;
11129
11130         /* PCI controllers on most RISC systems tend to disconnect
11131          * when a device tries to burst across a cache-line boundary.
11132          * Therefore, letting tg3 do so just wastes PCI bandwidth.
11133          *
11134          * Unfortunately, for PCI-E there are only limited
11135          * write-side controls for this, and thus for reads
11136          * we will still get the disconnects.  We'll also waste
11137          * these PCI cycles for both read and write for chips
11138          * other than 5700 and 5701 which do not implement the
11139          * boundary bits.
11140          */
11141         if ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
11142             !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
11143                 switch (cacheline_size) {
11144                 case 16:
11145                 case 32:
11146                 case 64:
11147                 case 128:
11148                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
11149                                 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
11150                                         DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
11151                         } else {
11152                                 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
11153                                         DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
11154                         }
11155                         break;
11156
11157                 case 256:
11158                         val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
11159                                 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
11160                         break;
11161
11162                 default:
11163                         val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
11164                                 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
11165                         break;
11166                 };
11167         } else if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
11168                 switch (cacheline_size) {
11169                 case 16:
11170                 case 32:
11171                 case 64:
11172                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
11173                                 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
11174                                 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
11175                                 break;
11176                         }
11177                         /* fallthrough */
11178                 case 128:
11179                 default:
11180                         val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
11181                         val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
11182                         break;
11183                 };
11184         } else {
11185                 switch (cacheline_size) {
11186                 case 16:
11187                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
11188                                 val |= (DMA_RWCTRL_READ_BNDRY_16 |
11189                                         DMA_RWCTRL_WRITE_BNDRY_16);
11190                                 break;
11191                         }
11192                         /* fallthrough */
11193                 case 32:
11194                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
11195                                 val |= (DMA_RWCTRL_READ_BNDRY_32 |
11196                                         DMA_RWCTRL_WRITE_BNDRY_32);
11197                                 break;
11198                         }
11199                         /* fallthrough */
11200                 case 64:
11201                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
11202                                 val |= (DMA_RWCTRL_READ_BNDRY_64 |
11203                                         DMA_RWCTRL_WRITE_BNDRY_64);
11204                                 break;
11205                         }
11206                         /* fallthrough */
11207                 case 128:
11208                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
11209                                 val |= (DMA_RWCTRL_READ_BNDRY_128 |
11210                                         DMA_RWCTRL_WRITE_BNDRY_128);
11211                                 break;
11212                         }
11213                         /* fallthrough */
11214                 case 256:
11215                         val |= (DMA_RWCTRL_READ_BNDRY_256 |
11216                                 DMA_RWCTRL_WRITE_BNDRY_256);
11217                         break;
11218                 case 512:
11219                         val |= (DMA_RWCTRL_READ_BNDRY_512 |
11220                                 DMA_RWCTRL_WRITE_BNDRY_512);
11221                         break;
11222                 case 1024:
11223                 default:
11224                         val |= (DMA_RWCTRL_READ_BNDRY_1024 |
11225                                 DMA_RWCTRL_WRITE_BNDRY_1024);
11226                         break;
11227                 };
11228         }
11229
11230 out:
11231         return val;
11232 }
11233
11234 static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma, int size, int to_device)
11235 {
11236         struct tg3_internal_buffer_desc test_desc;
11237         u32 sram_dma_descs;
11238         int i, ret;
11239
11240         sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
11241
11242         tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
11243         tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
11244         tw32(RDMAC_STATUS, 0);
11245         tw32(WDMAC_STATUS, 0);
11246
11247         tw32(BUFMGR_MODE, 0);
11248         tw32(FTQ_RESET, 0);
11249
11250         test_desc.addr_hi = ((u64) buf_dma) >> 32;
11251         test_desc.addr_lo = buf_dma & 0xffffffff;
11252         test_desc.nic_mbuf = 0x00002100;
11253         test_desc.len = size;
11254
11255         /*
11256          * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
11257          * the *second* time the tg3 driver was getting loaded after an
11258          * initial scan.
11259          *
11260          * Broadcom tells me:
11261          *   ...the DMA engine is connected to the GRC block and a DMA
11262          *   reset may affect the GRC block in some unpredictable way...
11263          *   The behavior of resets to individual blocks has not been tested.
11264          *
11265          * Broadcom noted the GRC reset will also reset all sub-components.
11266          */
11267         if (to_device) {
11268                 test_desc.cqid_sqid = (13 << 8) | 2;
11269
11270                 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
11271                 udelay(40);
11272         } else {
11273                 test_desc.cqid_sqid = (16 << 8) | 7;
11274
11275                 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
11276                 udelay(40);
11277         }
11278         test_desc.flags = 0x00000005;
11279
11280         for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
11281                 u32 val;
11282
11283                 val = *(((u32 *)&test_desc) + i);
11284                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
11285                                        sram_dma_descs + (i * sizeof(u32)));
11286                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
11287         }
11288         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
11289
11290         if (to_device) {
11291                 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
11292         } else {
11293                 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
11294         }
11295
11296         ret = -ENODEV;
11297         for (i = 0; i < 40; i++) {
11298                 u32 val;
11299
11300                 if (to_device)
11301                         val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
11302                 else
11303                         val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
11304                 if ((val & 0xffff) == sram_dma_descs) {
11305                         ret = 0;
11306                         break;
11307                 }
11308
11309                 udelay(100);
11310         }
11311
11312         return ret;
11313 }
11314
11315 #define TEST_BUFFER_SIZE        0x2000
11316
11317 static int __devinit tg3_test_dma(struct tg3 *tp)
11318 {
11319         dma_addr_t buf_dma;
11320         u32 *buf, saved_dma_rwctrl;
11321         int ret;
11322
11323         buf = pci_alloc_consistent(tp->pdev, TEST_BUFFER_SIZE, &buf_dma);
11324         if (!buf) {
11325                 ret = -ENOMEM;
11326                 goto out_nofree;
11327         }
11328
11329         tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
11330                           (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
11331
11332         tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
11333
11334         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
11335                 /* DMA read watermark not used on PCIE */
11336                 tp->dma_rwctrl |= 0x00180000;
11337         } else if (!(tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
11338                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
11339                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
11340                         tp->dma_rwctrl |= 0x003f0000;
11341                 else
11342                         tp->dma_rwctrl |= 0x003f000f;
11343         } else {
11344                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
11345                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
11346                         u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
11347
11348                         /* If the 5704 is behind the EPB bridge, we can
11349                          * do the less restrictive ONE_DMA workaround for
11350                          * better performance.
11351                          */
11352                         if ((tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG) &&
11353                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
11354                                 tp->dma_rwctrl |= 0x8000;
11355                         else if (ccval == 0x6 || ccval == 0x7)
11356                                 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
11357
11358                         /* Set bit 23 to enable PCIX hw bug fix */
11359                         tp->dma_rwctrl |= 0x009f0000;
11360                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) {
11361                         /* 5780 always in PCIX mode */
11362                         tp->dma_rwctrl |= 0x00144000;
11363                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
11364                         /* 5714 always in PCIX mode */
11365                         tp->dma_rwctrl |= 0x00148000;
11366                 } else {
11367                         tp->dma_rwctrl |= 0x001b000f;
11368                 }
11369         }
11370
11371         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
11372             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
11373                 tp->dma_rwctrl &= 0xfffffff0;
11374
11375         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
11376             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
11377                 /* Remove this if it causes problems for some boards. */
11378                 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
11379
11380                 /* On 5700/5701 chips, we need to set this bit.
11381                  * Otherwise the chip will issue cacheline transactions
11382                  * to streamable DMA memory with not all the byte
11383                  * enables turned on.  This is an error on several
11384                  * RISC PCI controllers, in particular sparc64.
11385                  *
11386                  * On 5703/5704 chips, this bit has been reassigned
11387                  * a different meaning.  In particular, it is used
11388                  * on those chips to enable a PCI-X workaround.
11389                  */
11390                 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
11391         }
11392
11393         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
11394
11395 #if 0
11396         /* Unneeded, already done by tg3_get_invariants.  */
11397         tg3_switch_clocks(tp);
11398 #endif
11399
11400         ret = 0;
11401         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
11402             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
11403                 goto out;
11404
11405         /* It is best to perform DMA test with maximum write burst size
11406          * to expose the 5700/5701 write DMA bug.
11407          */
11408         saved_dma_rwctrl = tp->dma_rwctrl;
11409         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
11410         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
11411
11412         while (1) {
11413                 u32 *p = buf, i;
11414
11415                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
11416                         p[i] = i;
11417
11418                 /* Send the buffer to the chip. */
11419                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1);
11420                 if (ret) {
11421                         printk(KERN_ERR "tg3_test_dma() Write the buffer failed %d\n", ret);
11422                         break;
11423                 }
11424
11425 #if 0
11426                 /* validate data reached card RAM correctly. */
11427                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
11428                         u32 val;
11429                         tg3_read_mem(tp, 0x2100 + (i*4), &val);
11430                         if (le32_to_cpu(val) != p[i]) {
11431                                 printk(KERN_ERR "  tg3_test_dma()  Card buffer corrupted on write! (%d != %d)\n", val, i);
11432                                 /* ret = -ENODEV here? */
11433                         }
11434                         p[i] = 0;
11435                 }
11436 #endif
11437                 /* Now read it back. */
11438                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0);
11439                 if (ret) {
11440                         printk(KERN_ERR "tg3_test_dma() Read the buffer failed %d\n", ret);
11441
11442                         break;
11443                 }
11444
11445                 /* Verify it. */
11446                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
11447                         if (p[i] == i)
11448                                 continue;
11449
11450                         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
11451                             DMA_RWCTRL_WRITE_BNDRY_16) {
11452                                 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
11453                                 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
11454                                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
11455                                 break;
11456                         } else {
11457                                 printk(KERN_ERR "tg3_test_dma() buffer corrupted on read back! (%d != %d)\n", p[i], i);
11458                                 ret = -ENODEV;
11459                                 goto out;
11460                         }
11461                 }
11462
11463                 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
11464                         /* Success. */
11465                         ret = 0;
11466                         break;
11467                 }
11468         }
11469         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
11470             DMA_RWCTRL_WRITE_BNDRY_16) {
11471                 static struct pci_device_id dma_wait_state_chipsets[] = {
11472                         { PCI_DEVICE(PCI_VENDOR_ID_APPLE,
11473                                      PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
11474                         { },
11475                 };
11476
11477                 /* DMA test passed without adjusting DMA boundary,
11478                  * now look for chipsets that are known to expose the
11479                  * DMA bug without failing the test.
11480                  */
11481                 if (pci_dev_present(dma_wait_state_chipsets)) {
11482                         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
11483                         tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
11484                 }
11485                 else
11486                         /* Safe to use the calculated DMA boundary. */
11487                         tp->dma_rwctrl = saved_dma_rwctrl;
11488
11489                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
11490         }
11491
11492 out:
11493         pci_free_consistent(tp->pdev, TEST_BUFFER_SIZE, buf, buf_dma);
11494 out_nofree:
11495         return ret;
11496 }
11497
11498 static void __devinit tg3_init_link_config(struct tg3 *tp)
11499 {
11500         tp->link_config.advertising =
11501                 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
11502                  ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
11503                  ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full |
11504                  ADVERTISED_Autoneg | ADVERTISED_MII);
11505         tp->link_config.speed = SPEED_INVALID;
11506         tp->link_config.duplex = DUPLEX_INVALID;
11507         tp->link_config.autoneg = AUTONEG_ENABLE;
11508         tp->link_config.active_speed = SPEED_INVALID;
11509         tp->link_config.active_duplex = DUPLEX_INVALID;
11510         tp->link_config.phy_is_low_power = 0;
11511         tp->link_config.orig_speed = SPEED_INVALID;
11512         tp->link_config.orig_duplex = DUPLEX_INVALID;
11513         tp->link_config.orig_autoneg = AUTONEG_INVALID;
11514 }
11515
11516 static void __devinit tg3_init_bufmgr_config(struct tg3 *tp)
11517 {
11518         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
11519                 tp->bufmgr_config.mbuf_read_dma_low_water =
11520                         DEFAULT_MB_RDMA_LOW_WATER_5705;
11521                 tp->bufmgr_config.mbuf_mac_rx_low_water =
11522                         DEFAULT_MB_MACRX_LOW_WATER_5705;
11523                 tp->bufmgr_config.mbuf_high_water =
11524                         DEFAULT_MB_HIGH_WATER_5705;
11525                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
11526                         tp->bufmgr_config.mbuf_mac_rx_low_water =
11527                                 DEFAULT_MB_MACRX_LOW_WATER_5906;
11528                         tp->bufmgr_config.mbuf_high_water =
11529                                 DEFAULT_MB_HIGH_WATER_5906;
11530                 }
11531
11532                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
11533                         DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
11534                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
11535                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
11536                 tp->bufmgr_config.mbuf_high_water_jumbo =
11537                         DEFAULT_MB_HIGH_WATER_JUMBO_5780;
11538         } else {
11539                 tp->bufmgr_config.mbuf_read_dma_low_water =
11540                         DEFAULT_MB_RDMA_LOW_WATER;
11541                 tp->bufmgr_config.mbuf_mac_rx_low_water =
11542                         DEFAULT_MB_MACRX_LOW_WATER;
11543                 tp->bufmgr_config.mbuf_high_water =
11544                         DEFAULT_MB_HIGH_WATER;
11545
11546                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
11547                         DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
11548                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
11549                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
11550                 tp->bufmgr_config.mbuf_high_water_jumbo =
11551                         DEFAULT_MB_HIGH_WATER_JUMBO;
11552         }
11553
11554         tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
11555         tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
11556 }
11557
11558 static char * __devinit tg3_phy_string(struct tg3 *tp)
11559 {
11560         switch (tp->phy_id & PHY_ID_MASK) {
11561         case PHY_ID_BCM5400:    return "5400";
11562         case PHY_ID_BCM5401:    return "5401";
11563         case PHY_ID_BCM5411:    return "5411";
11564         case PHY_ID_BCM5701:    return "5701";
11565         case PHY_ID_BCM5703:    return "5703";
11566         case PHY_ID_BCM5704:    return "5704";
11567         case PHY_ID_BCM5705:    return "5705";
11568         case PHY_ID_BCM5750:    return "5750";
11569         case PHY_ID_BCM5752:    return "5752";
11570         case PHY_ID_BCM5714:    return "5714";
11571         case PHY_ID_BCM5780:    return "5780";
11572         case PHY_ID_BCM5755:    return "5755";
11573         case PHY_ID_BCM5787:    return "5787";
11574         case PHY_ID_BCM5756:    return "5722/5756";
11575         case PHY_ID_BCM5906:    return "5906";
11576         case PHY_ID_BCM8002:    return "8002/serdes";
11577         case 0:                 return "serdes";
11578         default:                return "unknown";
11579         };
11580 }
11581
11582 static char * __devinit tg3_bus_string(struct tg3 *tp, char *str)
11583 {
11584         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
11585                 strcpy(str, "PCI Express");
11586                 return str;
11587         } else if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) {
11588                 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
11589
11590                 strcpy(str, "PCIX:");
11591
11592                 if ((clock_ctrl == 7) ||
11593                     ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
11594                      GRC_MISC_CFG_BOARD_ID_5704CIOBE))
11595                         strcat(str, "133MHz");
11596                 else if (clock_ctrl == 0)
11597                         strcat(str, "33MHz");
11598                 else if (clock_ctrl == 2)
11599                         strcat(str, "50MHz");
11600                 else if (clock_ctrl == 4)
11601                         strcat(str, "66MHz");
11602                 else if (clock_ctrl == 6)
11603                         strcat(str, "100MHz");
11604         } else {
11605                 strcpy(str, "PCI:");
11606                 if (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED)
11607                         strcat(str, "66MHz");
11608                 else
11609                         strcat(str, "33MHz");
11610         }
11611         if (tp->tg3_flags & TG3_FLAG_PCI_32BIT)
11612                 strcat(str, ":32-bit");
11613         else
11614                 strcat(str, ":64-bit");
11615         return str;
11616 }
11617
11618 static struct pci_dev * __devinit tg3_find_peer(struct tg3 *tp)
11619 {
11620         struct pci_dev *peer;
11621         unsigned int func, devnr = tp->pdev->devfn & ~7;
11622
11623         for (func = 0; func < 8; func++) {
11624                 peer = pci_get_slot(tp->pdev->bus, devnr | func);
11625                 if (peer && peer != tp->pdev)
11626                         break;
11627                 pci_dev_put(peer);
11628         }
11629         /* 5704 can be configured in single-port mode, set peer to
11630          * tp->pdev in that case.
11631          */
11632         if (!peer) {
11633                 peer = tp->pdev;
11634                 return peer;
11635         }
11636
11637         /*
11638          * We don't need to keep the refcount elevated; there's no way
11639          * to remove one half of this device without removing the other
11640          */
11641         pci_dev_put(peer);
11642
11643         return peer;
11644 }
11645
11646 static void __devinit tg3_init_coal(struct tg3 *tp)
11647 {
11648         struct ethtool_coalesce *ec = &tp->coal;
11649
11650         memset(ec, 0, sizeof(*ec));
11651         ec->cmd = ETHTOOL_GCOALESCE;
11652         ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
11653         ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
11654         ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
11655         ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
11656         ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
11657         ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
11658         ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
11659         ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
11660         ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
11661
11662         if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
11663                                  HOSTCC_MODE_CLRTICK_TXBD)) {
11664                 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
11665                 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
11666                 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
11667                 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
11668         }
11669
11670         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
11671                 ec->rx_coalesce_usecs_irq = 0;
11672                 ec->tx_coalesce_usecs_irq = 0;
11673                 ec->stats_block_coalesce_usecs = 0;
11674         }
11675 }
11676
11677 static int __devinit tg3_init_one(struct pci_dev *pdev,
11678                                   const struct pci_device_id *ent)
11679 {
11680         static int tg3_version_printed = 0;
11681         unsigned long tg3reg_base, tg3reg_len;
11682         struct net_device *dev;
11683         struct tg3 *tp;
11684         int i, err, pm_cap;
11685         char str[40];
11686         u64 dma_mask, persist_dma_mask;
11687
11688         if (tg3_version_printed++ == 0)
11689                 printk(KERN_INFO "%s", version);
11690
11691         err = pci_enable_device(pdev);
11692         if (err) {
11693                 printk(KERN_ERR PFX "Cannot enable PCI device, "
11694                        "aborting.\n");
11695                 return err;
11696         }
11697
11698         if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
11699                 printk(KERN_ERR PFX "Cannot find proper PCI device "
11700                        "base address, aborting.\n");
11701                 err = -ENODEV;
11702                 goto err_out_disable_pdev;
11703         }
11704
11705         err = pci_request_regions(pdev, DRV_MODULE_NAME);
11706         if (err) {
11707                 printk(KERN_ERR PFX "Cannot obtain PCI resources, "
11708                        "aborting.\n");
11709                 goto err_out_disable_pdev;
11710         }
11711
11712         pci_set_master(pdev);
11713
11714         /* Find power-management capability. */
11715         pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
11716         if (pm_cap == 0) {
11717                 printk(KERN_ERR PFX "Cannot find PowerManagement capability, "
11718                        "aborting.\n");
11719                 err = -EIO;
11720                 goto err_out_free_res;
11721         }
11722
11723         tg3reg_base = pci_resource_start(pdev, 0);
11724         tg3reg_len = pci_resource_len(pdev, 0);
11725
11726         dev = alloc_etherdev(sizeof(*tp));
11727         if (!dev) {
11728                 printk(KERN_ERR PFX "Etherdev alloc failed, aborting.\n");
11729                 err = -ENOMEM;
11730                 goto err_out_free_res;
11731         }
11732
11733         SET_MODULE_OWNER(dev);
11734         SET_NETDEV_DEV(dev, &pdev->dev);
11735
11736 #if TG3_VLAN_TAG_USED
11737         dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
11738         dev->vlan_rx_register = tg3_vlan_rx_register;
11739         dev->vlan_rx_kill_vid = tg3_vlan_rx_kill_vid;
11740 #endif
11741
11742         tp = netdev_priv(dev);
11743         tp->pdev = pdev;
11744         tp->dev = dev;
11745         tp->pm_cap = pm_cap;
11746         tp->mac_mode = TG3_DEF_MAC_MODE;
11747         tp->rx_mode = TG3_DEF_RX_MODE;
11748         tp->tx_mode = TG3_DEF_TX_MODE;
11749         tp->mi_mode = MAC_MI_MODE_BASE;
11750         if (tg3_debug > 0)
11751                 tp->msg_enable = tg3_debug;
11752         else
11753                 tp->msg_enable = TG3_DEF_MSG_ENABLE;
11754
11755         /* The word/byte swap controls here control register access byte
11756          * swapping.  DMA data byte swapping is controlled in the GRC_MODE
11757          * setting below.
11758          */
11759         tp->misc_host_ctrl =
11760                 MISC_HOST_CTRL_MASK_PCI_INT |
11761                 MISC_HOST_CTRL_WORD_SWAP |
11762                 MISC_HOST_CTRL_INDIR_ACCESS |
11763                 MISC_HOST_CTRL_PCISTATE_RW;
11764
11765         /* The NONFRM (non-frame) byte/word swap controls take effect
11766          * on descriptor entries, anything which isn't packet data.
11767          *
11768          * The StrongARM chips on the board (one for tx, one for rx)
11769          * are running in big-endian mode.
11770          */
11771         tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
11772                         GRC_MODE_WSWAP_NONFRM_DATA);
11773 #ifdef __BIG_ENDIAN
11774         tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
11775 #endif
11776         spin_lock_init(&tp->lock);
11777         spin_lock_init(&tp->indirect_lock);
11778         INIT_WORK(&tp->reset_task, tg3_reset_task);
11779
11780         tp->regs = ioremap_nocache(tg3reg_base, tg3reg_len);
11781         if (tp->regs == 0UL) {
11782                 printk(KERN_ERR PFX "Cannot map device registers, "
11783                        "aborting.\n");
11784                 err = -ENOMEM;
11785                 goto err_out_free_dev;
11786         }
11787
11788         tg3_init_link_config(tp);
11789
11790         tp->rx_pending = TG3_DEF_RX_RING_PENDING;
11791         tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
11792         tp->tx_pending = TG3_DEF_TX_RING_PENDING;
11793
11794         dev->open = tg3_open;
11795         dev->stop = tg3_close;
11796         dev->get_stats = tg3_get_stats;
11797         dev->set_multicast_list = tg3_set_rx_mode;
11798         dev->set_mac_address = tg3_set_mac_addr;
11799         dev->do_ioctl = tg3_ioctl;
11800         dev->tx_timeout = tg3_tx_timeout;
11801         dev->poll = tg3_poll;
11802         dev->ethtool_ops = &tg3_ethtool_ops;
11803         dev->weight = 64;
11804         dev->watchdog_timeo = TG3_TX_TIMEOUT;
11805         dev->change_mtu = tg3_change_mtu;
11806         dev->irq = pdev->irq;
11807 #ifdef CONFIG_NET_POLL_CONTROLLER
11808         dev->poll_controller = tg3_poll_controller;
11809 #endif
11810
11811         err = tg3_get_invariants(tp);
11812         if (err) {
11813                 printk(KERN_ERR PFX "Problem fetching invariants of chip, "
11814                        "aborting.\n");
11815                 goto err_out_iounmap;
11816         }
11817
11818         /* The EPB bridge inside 5714, 5715, and 5780 and any
11819          * device behind the EPB cannot support DMA addresses > 40-bit.
11820          * On 64-bit systems with IOMMU, use 40-bit dma_mask.
11821          * On 64-bit systems without IOMMU, use 64-bit dma_mask and
11822          * do DMA address check in tg3_start_xmit().
11823          */
11824         if (tp->tg3_flags2 & TG3_FLG2_IS_5788)
11825                 persist_dma_mask = dma_mask = DMA_32BIT_MASK;
11826         else if (tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG) {
11827                 persist_dma_mask = dma_mask = DMA_40BIT_MASK;
11828 #ifdef CONFIG_HIGHMEM
11829                 dma_mask = DMA_64BIT_MASK;
11830 #endif
11831         } else
11832                 persist_dma_mask = dma_mask = DMA_64BIT_MASK;
11833
11834         /* Configure DMA attributes. */
11835         if (dma_mask > DMA_32BIT_MASK) {
11836                 err = pci_set_dma_mask(pdev, dma_mask);
11837                 if (!err) {
11838                         dev->features |= NETIF_F_HIGHDMA;
11839                         err = pci_set_consistent_dma_mask(pdev,
11840                                                           persist_dma_mask);
11841                         if (err < 0) {
11842                                 printk(KERN_ERR PFX "Unable to obtain 64 bit "
11843                                        "DMA for consistent allocations\n");
11844                                 goto err_out_iounmap;
11845                         }
11846                 }
11847         }
11848         if (err || dma_mask == DMA_32BIT_MASK) {
11849                 err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
11850                 if (err) {
11851                         printk(KERN_ERR PFX "No usable DMA configuration, "
11852                                "aborting.\n");
11853                         goto err_out_iounmap;
11854                 }
11855         }
11856
11857         tg3_init_bufmgr_config(tp);
11858
11859 #if TG3_TSO_SUPPORT != 0
11860         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
11861                 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
11862         }
11863         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
11864             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
11865             tp->pci_chip_rev_id == CHIPREV_ID_5705_A0 ||
11866             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 ||
11867             (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0) {
11868                 tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
11869         } else {
11870                 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
11871         }
11872
11873         /* TSO is on by default on chips that support hardware TSO.
11874          * Firmware TSO on older chips gives lower performance, so it
11875          * is off by default, but can be enabled using ethtool.
11876          */
11877         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
11878                 dev->features |= NETIF_F_TSO;
11879                 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_2) &&
11880                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906))
11881                         dev->features |= NETIF_F_TSO6;
11882         }
11883
11884 #endif
11885
11886         if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 &&
11887             !(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) &&
11888             !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
11889                 tp->tg3_flags2 |= TG3_FLG2_MAX_RXPEND_64;
11890                 tp->rx_pending = 63;
11891         }
11892
11893         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
11894             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714))
11895                 tp->pdev_peer = tg3_find_peer(tp);
11896
11897         err = tg3_get_device_address(tp);
11898         if (err) {
11899                 printk(KERN_ERR PFX "Could not obtain valid ethernet address, "
11900                        "aborting.\n");
11901                 goto err_out_iounmap;
11902         }
11903
11904         /*
11905          * Reset chip in case UNDI or EFI driver did not shutdown
11906          * DMA self test will enable WDMAC and we'll see (spurious)
11907          * pending DMA on the PCI bus at that point.
11908          */
11909         if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
11910             (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
11911                 pci_save_state(tp->pdev);
11912                 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
11913                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11914         }
11915
11916         err = tg3_test_dma(tp);
11917         if (err) {
11918                 printk(KERN_ERR PFX "DMA engine test failed, aborting.\n");
11919                 goto err_out_iounmap;
11920         }
11921
11922         /* Tigon3 can do ipv4 only... and some chips have buggy
11923          * checksumming.
11924          */
11925         if ((tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) == 0) {
11926                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
11927                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
11928                         dev->features |= NETIF_F_HW_CSUM;
11929                 else
11930                         dev->features |= NETIF_F_IP_CSUM;
11931                 dev->features |= NETIF_F_SG;
11932                 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
11933         } else
11934                 tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
11935
11936         /* flow control autonegotiation is default behavior */
11937         tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
11938
11939         tg3_init_coal(tp);
11940
11941         /* Now that we have fully setup the chip, save away a snapshot
11942          * of the PCI config space.  We need to restore this after
11943          * GRC_MISC_CFG core clock resets and some resume events.
11944          */
11945         pci_save_state(tp->pdev);
11946
11947         pci_set_drvdata(pdev, dev);
11948
11949         err = register_netdev(dev);
11950         if (err) {
11951                 printk(KERN_ERR PFX "Cannot register net device, "
11952                        "aborting.\n");
11953                 goto err_out_iounmap;
11954         }
11955
11956         printk(KERN_INFO "%s: Tigon3 [partno(%s) rev %04x PHY(%s)] (%s) %s Ethernet ",
11957                dev->name,
11958                tp->board_part_number,
11959                tp->pci_chip_rev_id,
11960                tg3_phy_string(tp),
11961                tg3_bus_string(tp, str),
11962                ((tp->tg3_flags & TG3_FLAG_10_100_ONLY) ? "10/100Base-TX" :
11963                 ((tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) ? "1000Base-SX" :
11964                  "10/100/1000Base-T")));
11965
11966         for (i = 0; i < 6; i++)
11967                 printk("%2.2x%c", dev->dev_addr[i],
11968                        i == 5 ? '\n' : ':');
11969
11970         printk(KERN_INFO "%s: RXcsums[%d] LinkChgREG[%d] "
11971                "MIirq[%d] ASF[%d] Split[%d] WireSpeed[%d] "
11972                "TSOcap[%d] \n",
11973                dev->name,
11974                (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0,
11975                (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) != 0,
11976                (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) != 0,
11977                (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0,
11978                (tp->tg3_flags & TG3_FLAG_SPLIT_MODE) != 0,
11979                (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED) == 0,
11980                (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) != 0);
11981         printk(KERN_INFO "%s: dma_rwctrl[%08x] dma_mask[%d-bit]\n",
11982                dev->name, tp->dma_rwctrl,
11983                (pdev->dma_mask == DMA_32BIT_MASK) ? 32 :
11984                 (((u64) pdev->dma_mask == DMA_40BIT_MASK) ? 40 : 64));
11985
11986         return 0;
11987
11988 err_out_iounmap:
11989         if (tp->regs) {
11990                 iounmap(tp->regs);
11991                 tp->regs = NULL;
11992         }
11993
11994 err_out_free_dev:
11995         free_netdev(dev);
11996
11997 err_out_free_res:
11998         pci_release_regions(pdev);
11999
12000 err_out_disable_pdev:
12001         pci_disable_device(pdev);
12002         pci_set_drvdata(pdev, NULL);
12003         return err;
12004 }
12005
12006 static void __devexit tg3_remove_one(struct pci_dev *pdev)
12007 {
12008         struct net_device *dev = pci_get_drvdata(pdev);
12009
12010         if (dev) {
12011                 struct tg3 *tp = netdev_priv(dev);
12012
12013                 flush_scheduled_work();
12014                 unregister_netdev(dev);
12015                 if (tp->regs) {
12016                         iounmap(tp->regs);
12017                         tp->regs = NULL;
12018                 }
12019                 free_netdev(dev);
12020                 pci_release_regions(pdev);
12021                 pci_disable_device(pdev);
12022                 pci_set_drvdata(pdev, NULL);
12023         }
12024 }
12025
12026 static int tg3_suspend(struct pci_dev *pdev, pm_message_t state)
12027 {
12028         struct net_device *dev = pci_get_drvdata(pdev);
12029         struct tg3 *tp = netdev_priv(dev);
12030         int err;
12031
12032         if (!netif_running(dev))
12033                 return 0;
12034
12035         flush_scheduled_work();
12036         tg3_netif_stop(tp);
12037
12038         del_timer_sync(&tp->timer);
12039
12040         tg3_full_lock(tp, 1);
12041         tg3_disable_ints(tp);
12042         tg3_full_unlock(tp);
12043
12044         netif_device_detach(dev);
12045
12046         tg3_full_lock(tp, 0);
12047         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12048         tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
12049         tg3_full_unlock(tp);
12050
12051         err = tg3_set_power_state(tp, pci_choose_state(pdev, state));
12052         if (err) {
12053                 tg3_full_lock(tp, 0);
12054
12055                 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
12056                 if (tg3_restart_hw(tp, 1))
12057                         goto out;
12058
12059                 tp->timer.expires = jiffies + tp->timer_offset;
12060                 add_timer(&tp->timer);
12061
12062                 netif_device_attach(dev);
12063                 tg3_netif_start(tp);
12064
12065 out:
12066                 tg3_full_unlock(tp);
12067         }
12068
12069         return err;
12070 }
12071
12072 static int tg3_resume(struct pci_dev *pdev)
12073 {
12074         struct net_device *dev = pci_get_drvdata(pdev);
12075         struct tg3 *tp = netdev_priv(dev);
12076         int err;
12077
12078         if (!netif_running(dev))
12079                 return 0;
12080
12081         pci_restore_state(tp->pdev);
12082
12083         err = tg3_set_power_state(tp, PCI_D0);
12084         if (err)
12085                 return err;
12086
12087         netif_device_attach(dev);
12088
12089         tg3_full_lock(tp, 0);
12090
12091         tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
12092         err = tg3_restart_hw(tp, 1);
12093         if (err)
12094                 goto out;
12095
12096         tp->timer.expires = jiffies + tp->timer_offset;
12097         add_timer(&tp->timer);
12098
12099         tg3_netif_start(tp);
12100
12101 out:
12102         tg3_full_unlock(tp);
12103
12104         return err;
12105 }
12106
12107 static struct pci_driver tg3_driver = {
12108         .name           = DRV_MODULE_NAME,
12109         .id_table       = tg3_pci_tbl,
12110         .probe          = tg3_init_one,
12111         .remove         = __devexit_p(tg3_remove_one),
12112         .suspend        = tg3_suspend,
12113         .resume         = tg3_resume
12114 };
12115
12116 static int __init tg3_init(void)
12117 {
12118         return pci_register_driver(&tg3_driver);
12119 }
12120
12121 static void __exit tg3_cleanup(void)
12122 {
12123         pci_unregister_driver(&tg3_driver);
12124 }
12125
12126 module_init(tg3_init);
12127 module_exit(tg3_cleanup);