[TG3]: Add mailbox read method
[linux-2.6] / drivers / net / tg3.c
1 /*
2  * tg3.c: Broadcom Tigon3 ethernet driver.
3  *
4  * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5  * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6  * Copyright (C) 2004 Sun Microsystems Inc.
7  * Copyright (C) 2005 Broadcom Corporation.
8  *
9  * Firmware is:
10  *      Derived from proprietary unpublished source code,
11  *      Copyright (C) 2000-2003 Broadcom Corporation.
12  *
13  *      Permission is hereby granted for the distribution of this firmware
14  *      data in hexadecimal or equivalent format, provided this copyright
15  *      notice is accompanying it.
16  */
17
18 #include <linux/config.h>
19
20 #include <linux/module.h>
21 #include <linux/moduleparam.h>
22 #include <linux/kernel.h>
23 #include <linux/types.h>
24 #include <linux/compiler.h>
25 #include <linux/slab.h>
26 #include <linux/delay.h>
27 #include <linux/init.h>
28 #include <linux/ioport.h>
29 #include <linux/pci.h>
30 #include <linux/netdevice.h>
31 #include <linux/etherdevice.h>
32 #include <linux/skbuff.h>
33 #include <linux/ethtool.h>
34 #include <linux/mii.h>
35 #include <linux/if_vlan.h>
36 #include <linux/ip.h>
37 #include <linux/tcp.h>
38 #include <linux/workqueue.h>
39
40 #include <net/checksum.h>
41
42 #include <asm/system.h>
43 #include <asm/io.h>
44 #include <asm/byteorder.h>
45 #include <asm/uaccess.h>
46
47 #ifdef CONFIG_SPARC64
48 #include <asm/idprom.h>
49 #include <asm/oplib.h>
50 #include <asm/pbm.h>
51 #endif
52
53 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
54 #define TG3_VLAN_TAG_USED 1
55 #else
56 #define TG3_VLAN_TAG_USED 0
57 #endif
58
59 #ifdef NETIF_F_TSO
60 #define TG3_TSO_SUPPORT 1
61 #else
62 #define TG3_TSO_SUPPORT 0
63 #endif
64
65 #include "tg3.h"
66
67 #define DRV_MODULE_NAME         "tg3"
68 #define PFX DRV_MODULE_NAME     ": "
69 #define DRV_MODULE_VERSION      "3.37"
70 #define DRV_MODULE_RELDATE      "August 25, 2005"
71
72 #define TG3_DEF_MAC_MODE        0
73 #define TG3_DEF_RX_MODE         0
74 #define TG3_DEF_TX_MODE         0
75 #define TG3_DEF_MSG_ENABLE        \
76         (NETIF_MSG_DRV          | \
77          NETIF_MSG_PROBE        | \
78          NETIF_MSG_LINK         | \
79          NETIF_MSG_TIMER        | \
80          NETIF_MSG_IFDOWN       | \
81          NETIF_MSG_IFUP         | \
82          NETIF_MSG_RX_ERR       | \
83          NETIF_MSG_TX_ERR)
84
85 /* length of time before we decide the hardware is borked,
86  * and dev->tx_timeout() should be called to fix the problem
87  */
88 #define TG3_TX_TIMEOUT                  (5 * HZ)
89
90 /* hardware minimum and maximum for a single frame's data payload */
91 #define TG3_MIN_MTU                     60
92 #define TG3_MAX_MTU(tp) \
93         ((tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) ? 9000 : 1500)
94
95 /* These numbers seem to be hard coded in the NIC firmware somehow.
96  * You can't change the ring sizes, but you can change where you place
97  * them in the NIC onboard memory.
98  */
99 #define TG3_RX_RING_SIZE                512
100 #define TG3_DEF_RX_RING_PENDING         200
101 #define TG3_RX_JUMBO_RING_SIZE          256
102 #define TG3_DEF_RX_JUMBO_RING_PENDING   100
103
104 /* Do not place this n-ring entries value into the tp struct itself,
105  * we really want to expose these constants to GCC so that modulo et
106  * al.  operations are done with shifts and masks instead of with
107  * hw multiply/modulo instructions.  Another solution would be to
108  * replace things like '% foo' with '& (foo - 1)'.
109  */
110 #define TG3_RX_RCB_RING_SIZE(tp)        \
111         ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ?  512 : 1024)
112
113 #define TG3_TX_RING_SIZE                512
114 #define TG3_DEF_TX_RING_PENDING         (TG3_TX_RING_SIZE - 1)
115
116 #define TG3_RX_RING_BYTES       (sizeof(struct tg3_rx_buffer_desc) * \
117                                  TG3_RX_RING_SIZE)
118 #define TG3_RX_JUMBO_RING_BYTES (sizeof(struct tg3_rx_buffer_desc) * \
119                                  TG3_RX_JUMBO_RING_SIZE)
120 #define TG3_RX_RCB_RING_BYTES(tp) (sizeof(struct tg3_rx_buffer_desc) * \
121                                    TG3_RX_RCB_RING_SIZE(tp))
122 #define TG3_TX_RING_BYTES       (sizeof(struct tg3_tx_buffer_desc) * \
123                                  TG3_TX_RING_SIZE)
124 #define TX_RING_GAP(TP) \
125         (TG3_TX_RING_SIZE - (TP)->tx_pending)
126 #define TX_BUFFS_AVAIL(TP)                                              \
127         (((TP)->tx_cons <= (TP)->tx_prod) ?                             \
128           (TP)->tx_cons + (TP)->tx_pending - (TP)->tx_prod :            \
129           (TP)->tx_cons - (TP)->tx_prod - TX_RING_GAP(TP))
130 #define NEXT_TX(N)              (((N) + 1) & (TG3_TX_RING_SIZE - 1))
131
132 #define RX_PKT_BUF_SZ           (1536 + tp->rx_offset + 64)
133 #define RX_JUMBO_PKT_BUF_SZ     (9046 + tp->rx_offset + 64)
134
135 /* minimum number of free TX descriptors required to wake up TX process */
136 #define TG3_TX_WAKEUP_THRESH            (TG3_TX_RING_SIZE / 4)
137
138 /* number of ETHTOOL_GSTATS u64's */
139 #define TG3_NUM_STATS           (sizeof(struct tg3_ethtool_stats)/sizeof(u64))
140
141 #define TG3_NUM_TEST            6
142
143 static char version[] __devinitdata =
144         DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
145
146 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
147 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
148 MODULE_LICENSE("GPL");
149 MODULE_VERSION(DRV_MODULE_VERSION);
150
151 static int tg3_debug = -1;      /* -1 == use TG3_DEF_MSG_ENABLE as value */
152 module_param(tg3_debug, int, 0);
153 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
154
155 static struct pci_device_id tg3_pci_tbl[] = {
156         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700,
157           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
158         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701,
159           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
160         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702,
161           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
162         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703,
163           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
164         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704,
165           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
166         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE,
167           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
168         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705,
169           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
170         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2,
171           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
172         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M,
173           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
174         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2,
175           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
176         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X,
177           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
178         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X,
179           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
180         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S,
181           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
182         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3,
183           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
184         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3,
185           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
186         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782,
187           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
188         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788,
189           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
190         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789,
191           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
192         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901,
193           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
194         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2,
195           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
196         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2,
197           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
198         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F,
199           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
200         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5720,
201           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
202         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721,
203           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
204         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750,
205           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
206         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751,
207           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
208         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750M,
209           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
210         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M,
211           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
212         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F,
213           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
214         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752,
215           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
216         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M,
217           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
218         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753,
219           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
220         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M,
221           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
222         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F,
223           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
224         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780,
225           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
226         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S,
227           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
228         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781,
229           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
230         { PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX,
231           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
232         { PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX,
233           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
234         { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000,
235           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
236         { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001,
237           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
238         { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003,
239           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
240         { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100,
241           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
242         { PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3,
243           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
244         { 0, }
245 };
246
247 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
248
249 static struct {
250         const char string[ETH_GSTRING_LEN];
251 } ethtool_stats_keys[TG3_NUM_STATS] = {
252         { "rx_octets" },
253         { "rx_fragments" },
254         { "rx_ucast_packets" },
255         { "rx_mcast_packets" },
256         { "rx_bcast_packets" },
257         { "rx_fcs_errors" },
258         { "rx_align_errors" },
259         { "rx_xon_pause_rcvd" },
260         { "rx_xoff_pause_rcvd" },
261         { "rx_mac_ctrl_rcvd" },
262         { "rx_xoff_entered" },
263         { "rx_frame_too_long_errors" },
264         { "rx_jabbers" },
265         { "rx_undersize_packets" },
266         { "rx_in_length_errors" },
267         { "rx_out_length_errors" },
268         { "rx_64_or_less_octet_packets" },
269         { "rx_65_to_127_octet_packets" },
270         { "rx_128_to_255_octet_packets" },
271         { "rx_256_to_511_octet_packets" },
272         { "rx_512_to_1023_octet_packets" },
273         { "rx_1024_to_1522_octet_packets" },
274         { "rx_1523_to_2047_octet_packets" },
275         { "rx_2048_to_4095_octet_packets" },
276         { "rx_4096_to_8191_octet_packets" },
277         { "rx_8192_to_9022_octet_packets" },
278
279         { "tx_octets" },
280         { "tx_collisions" },
281
282         { "tx_xon_sent" },
283         { "tx_xoff_sent" },
284         { "tx_flow_control" },
285         { "tx_mac_errors" },
286         { "tx_single_collisions" },
287         { "tx_mult_collisions" },
288         { "tx_deferred" },
289         { "tx_excessive_collisions" },
290         { "tx_late_collisions" },
291         { "tx_collide_2times" },
292         { "tx_collide_3times" },
293         { "tx_collide_4times" },
294         { "tx_collide_5times" },
295         { "tx_collide_6times" },
296         { "tx_collide_7times" },
297         { "tx_collide_8times" },
298         { "tx_collide_9times" },
299         { "tx_collide_10times" },
300         { "tx_collide_11times" },
301         { "tx_collide_12times" },
302         { "tx_collide_13times" },
303         { "tx_collide_14times" },
304         { "tx_collide_15times" },
305         { "tx_ucast_packets" },
306         { "tx_mcast_packets" },
307         { "tx_bcast_packets" },
308         { "tx_carrier_sense_errors" },
309         { "tx_discards" },
310         { "tx_errors" },
311
312         { "dma_writeq_full" },
313         { "dma_write_prioq_full" },
314         { "rxbds_empty" },
315         { "rx_discards" },
316         { "rx_errors" },
317         { "rx_threshold_hit" },
318
319         { "dma_readq_full" },
320         { "dma_read_prioq_full" },
321         { "tx_comp_queue_full" },
322
323         { "ring_set_send_prod_index" },
324         { "ring_status_update" },
325         { "nic_irqs" },
326         { "nic_avoided_irqs" },
327         { "nic_tx_threshold_hit" }
328 };
329
330 static struct {
331         const char string[ETH_GSTRING_LEN];
332 } ethtool_test_keys[TG3_NUM_TEST] = {
333         { "nvram test     (online) " },
334         { "link test      (online) " },
335         { "register test  (offline)" },
336         { "memory test    (offline)" },
337         { "loopback test  (offline)" },
338         { "interrupt test (offline)" },
339 };
340
341 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
342 {
343         spin_lock_bh(&tp->indirect_lock);
344         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
345         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
346         spin_unlock_bh(&tp->indirect_lock);
347 }
348
349 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
350 {
351         writel(val, tp->regs + off);
352         readl(tp->regs + off);
353 }
354
355 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val)
356 {
357         if ((tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) != 0) {
358                 spin_lock_bh(&tp->indirect_lock);
359                 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
360                 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
361                 spin_unlock_bh(&tp->indirect_lock);
362         } else {
363                 void __iomem *dest = tp->regs + off;
364                 writel(val, dest);
365                 readl(dest);    /* always flush PCI write */
366         }
367 }
368
369 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
370 {
371         tp->write32_mbox(tp, off, val);
372         tp->read32_mbox(tp, off);
373 }
374
375 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
376 {
377         void __iomem *mbox = tp->regs + off;
378         writel(val, mbox);
379         if (tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG)
380                 writel(val, mbox);
381         if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
382                 readl(mbox);
383 }
384
385 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
386 {
387         writel(val, tp->regs + off);
388 }
389
390 static u32 tg3_read32(struct tg3 *tp, u32 off)
391 {
392         return (readl(tp->regs + off)); 
393 }
394
395 #define tw32_mailbox(reg, val)  tp->write32_mbox(tp, reg, val)
396 #define tw32_mailbox_f(reg, val)        tw32_mailbox_flush(tp, (reg), (val))
397 #define tw32_rx_mbox(reg, val)  tp->write32_rx_mbox(tp, reg, val)
398 #define tw32_tx_mbox(reg, val)  tp->write32_tx_mbox(tp, reg, val)
399 #define tr32_mailbox(reg)       tp->read32_mbox(tp, reg)
400
401 #define tw32(reg,val)           tp->write32(tp, reg, val)
402 #define tw32_f(reg,val)         _tw32_flush(tp,(reg),(val))
403 #define tr32(reg)               tp->read32(tp, reg)
404
405 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
406 {
407         spin_lock_bh(&tp->indirect_lock);
408         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
409         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
410
411         /* Always leave this as zero. */
412         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
413         spin_unlock_bh(&tp->indirect_lock);
414 }
415
416 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
417 {
418         spin_lock_bh(&tp->indirect_lock);
419         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
420         pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
421
422         /* Always leave this as zero. */
423         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
424         spin_unlock_bh(&tp->indirect_lock);
425 }
426
427 static void tg3_disable_ints(struct tg3 *tp)
428 {
429         tw32(TG3PCI_MISC_HOST_CTRL,
430              (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
431         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
432 }
433
434 static inline void tg3_cond_int(struct tg3 *tp)
435 {
436         if (tp->hw_status->status & SD_STATUS_UPDATED)
437                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
438 }
439
440 static void tg3_enable_ints(struct tg3 *tp)
441 {
442         tp->irq_sync = 0;
443         wmb();
444
445         tw32(TG3PCI_MISC_HOST_CTRL,
446              (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
447         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
448                        (tp->last_tag << 24));
449         tg3_cond_int(tp);
450 }
451
452 static inline unsigned int tg3_has_work(struct tg3 *tp)
453 {
454         struct tg3_hw_status *sblk = tp->hw_status;
455         unsigned int work_exists = 0;
456
457         /* check for phy events */
458         if (!(tp->tg3_flags &
459               (TG3_FLAG_USE_LINKCHG_REG |
460                TG3_FLAG_POLL_SERDES))) {
461                 if (sblk->status & SD_STATUS_LINK_CHG)
462                         work_exists = 1;
463         }
464         /* check for RX/TX work to do */
465         if (sblk->idx[0].tx_consumer != tp->tx_cons ||
466             sblk->idx[0].rx_producer != tp->rx_rcb_ptr)
467                 work_exists = 1;
468
469         return work_exists;
470 }
471
472 /* tg3_restart_ints
473  *  similar to tg3_enable_ints, but it accurately determines whether there
474  *  is new work pending and can return without flushing the PIO write
475  *  which reenables interrupts 
476  */
477 static void tg3_restart_ints(struct tg3 *tp)
478 {
479         tw32(TG3PCI_MISC_HOST_CTRL,
480                 (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
481         tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
482                      tp->last_tag << 24);
483         mmiowb();
484
485         /* When doing tagged status, this work check is unnecessary.
486          * The last_tag we write above tells the chip which piece of
487          * work we've completed.
488          */
489         if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
490             tg3_has_work(tp))
491                 tw32(HOSTCC_MODE, tp->coalesce_mode |
492                      (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
493 }
494
495 static inline void tg3_netif_stop(struct tg3 *tp)
496 {
497         tp->dev->trans_start = jiffies; /* prevent tx timeout */
498         netif_poll_disable(tp->dev);
499         netif_tx_disable(tp->dev);
500 }
501
502 static inline void tg3_netif_start(struct tg3 *tp)
503 {
504         netif_wake_queue(tp->dev);
505         /* NOTE: unconditional netif_wake_queue is only appropriate
506          * so long as all callers are assured to have free tx slots
507          * (such as after tg3_init_hw)
508          */
509         netif_poll_enable(tp->dev);
510         tp->hw_status->status |= SD_STATUS_UPDATED;
511         tg3_enable_ints(tp);
512 }
513
514 static void tg3_switch_clocks(struct tg3 *tp)
515 {
516         u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
517         u32 orig_clock_ctrl;
518
519         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780)
520                 return;
521
522         orig_clock_ctrl = clock_ctrl;
523         clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
524                        CLOCK_CTRL_CLKRUN_OENABLE |
525                        0x1f);
526         tp->pci_clock_ctrl = clock_ctrl;
527
528         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
529                 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
530                         tw32_f(TG3PCI_CLOCK_CTRL,
531                                clock_ctrl | CLOCK_CTRL_625_CORE);
532                         udelay(40);
533                 }
534         } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
535                 tw32_f(TG3PCI_CLOCK_CTRL,
536                      clock_ctrl |
537                      (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK));
538                 udelay(40);
539                 tw32_f(TG3PCI_CLOCK_CTRL,
540                      clock_ctrl | (CLOCK_CTRL_ALTCLK));
541                 udelay(40);
542         }
543         tw32_f(TG3PCI_CLOCK_CTRL, clock_ctrl);
544         udelay(40);
545 }
546
547 #define PHY_BUSY_LOOPS  5000
548
549 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
550 {
551         u32 frame_val;
552         unsigned int loops;
553         int ret;
554
555         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
556                 tw32_f(MAC_MI_MODE,
557                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
558                 udelay(80);
559         }
560
561         *val = 0x0;
562
563         frame_val  = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
564                       MI_COM_PHY_ADDR_MASK);
565         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
566                       MI_COM_REG_ADDR_MASK);
567         frame_val |= (MI_COM_CMD_READ | MI_COM_START);
568         
569         tw32_f(MAC_MI_COM, frame_val);
570
571         loops = PHY_BUSY_LOOPS;
572         while (loops != 0) {
573                 udelay(10);
574                 frame_val = tr32(MAC_MI_COM);
575
576                 if ((frame_val & MI_COM_BUSY) == 0) {
577                         udelay(5);
578                         frame_val = tr32(MAC_MI_COM);
579                         break;
580                 }
581                 loops -= 1;
582         }
583
584         ret = -EBUSY;
585         if (loops != 0) {
586                 *val = frame_val & MI_COM_DATA_MASK;
587                 ret = 0;
588         }
589
590         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
591                 tw32_f(MAC_MI_MODE, tp->mi_mode);
592                 udelay(80);
593         }
594
595         return ret;
596 }
597
598 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
599 {
600         u32 frame_val;
601         unsigned int loops;
602         int ret;
603
604         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
605                 tw32_f(MAC_MI_MODE,
606                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
607                 udelay(80);
608         }
609
610         frame_val  = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
611                       MI_COM_PHY_ADDR_MASK);
612         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
613                       MI_COM_REG_ADDR_MASK);
614         frame_val |= (val & MI_COM_DATA_MASK);
615         frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
616         
617         tw32_f(MAC_MI_COM, frame_val);
618
619         loops = PHY_BUSY_LOOPS;
620         while (loops != 0) {
621                 udelay(10);
622                 frame_val = tr32(MAC_MI_COM);
623                 if ((frame_val & MI_COM_BUSY) == 0) {
624                         udelay(5);
625                         frame_val = tr32(MAC_MI_COM);
626                         break;
627                 }
628                 loops -= 1;
629         }
630
631         ret = -EBUSY;
632         if (loops != 0)
633                 ret = 0;
634
635         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
636                 tw32_f(MAC_MI_MODE, tp->mi_mode);
637                 udelay(80);
638         }
639
640         return ret;
641 }
642
643 static void tg3_phy_set_wirespeed(struct tg3 *tp)
644 {
645         u32 val;
646
647         if (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED)
648                 return;
649
650         if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x7007) &&
651             !tg3_readphy(tp, MII_TG3_AUX_CTRL, &val))
652                 tg3_writephy(tp, MII_TG3_AUX_CTRL,
653                              (val | (1 << 15) | (1 << 4)));
654 }
655
656 static int tg3_bmcr_reset(struct tg3 *tp)
657 {
658         u32 phy_control;
659         int limit, err;
660
661         /* OK, reset it, and poll the BMCR_RESET bit until it
662          * clears or we time out.
663          */
664         phy_control = BMCR_RESET;
665         err = tg3_writephy(tp, MII_BMCR, phy_control);
666         if (err != 0)
667                 return -EBUSY;
668
669         limit = 5000;
670         while (limit--) {
671                 err = tg3_readphy(tp, MII_BMCR, &phy_control);
672                 if (err != 0)
673                         return -EBUSY;
674
675                 if ((phy_control & BMCR_RESET) == 0) {
676                         udelay(40);
677                         break;
678                 }
679                 udelay(10);
680         }
681         if (limit <= 0)
682                 return -EBUSY;
683
684         return 0;
685 }
686
687 static int tg3_wait_macro_done(struct tg3 *tp)
688 {
689         int limit = 100;
690
691         while (limit--) {
692                 u32 tmp32;
693
694                 if (!tg3_readphy(tp, 0x16, &tmp32)) {
695                         if ((tmp32 & 0x1000) == 0)
696                                 break;
697                 }
698         }
699         if (limit <= 0)
700                 return -EBUSY;
701
702         return 0;
703 }
704
705 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
706 {
707         static const u32 test_pat[4][6] = {
708         { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
709         { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
710         { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
711         { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
712         };
713         int chan;
714
715         for (chan = 0; chan < 4; chan++) {
716                 int i;
717
718                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
719                              (chan * 0x2000) | 0x0200);
720                 tg3_writephy(tp, 0x16, 0x0002);
721
722                 for (i = 0; i < 6; i++)
723                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
724                                      test_pat[chan][i]);
725
726                 tg3_writephy(tp, 0x16, 0x0202);
727                 if (tg3_wait_macro_done(tp)) {
728                         *resetp = 1;
729                         return -EBUSY;
730                 }
731
732                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
733                              (chan * 0x2000) | 0x0200);
734                 tg3_writephy(tp, 0x16, 0x0082);
735                 if (tg3_wait_macro_done(tp)) {
736                         *resetp = 1;
737                         return -EBUSY;
738                 }
739
740                 tg3_writephy(tp, 0x16, 0x0802);
741                 if (tg3_wait_macro_done(tp)) {
742                         *resetp = 1;
743                         return -EBUSY;
744                 }
745
746                 for (i = 0; i < 6; i += 2) {
747                         u32 low, high;
748
749                         if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
750                             tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
751                             tg3_wait_macro_done(tp)) {
752                                 *resetp = 1;
753                                 return -EBUSY;
754                         }
755                         low &= 0x7fff;
756                         high &= 0x000f;
757                         if (low != test_pat[chan][i] ||
758                             high != test_pat[chan][i+1]) {
759                                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
760                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
761                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
762
763                                 return -EBUSY;
764                         }
765                 }
766         }
767
768         return 0;
769 }
770
771 static int tg3_phy_reset_chanpat(struct tg3 *tp)
772 {
773         int chan;
774
775         for (chan = 0; chan < 4; chan++) {
776                 int i;
777
778                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
779                              (chan * 0x2000) | 0x0200);
780                 tg3_writephy(tp, 0x16, 0x0002);
781                 for (i = 0; i < 6; i++)
782                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
783                 tg3_writephy(tp, 0x16, 0x0202);
784                 if (tg3_wait_macro_done(tp))
785                         return -EBUSY;
786         }
787
788         return 0;
789 }
790
791 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
792 {
793         u32 reg32, phy9_orig;
794         int retries, do_phy_reset, err;
795
796         retries = 10;
797         do_phy_reset = 1;
798         do {
799                 if (do_phy_reset) {
800                         err = tg3_bmcr_reset(tp);
801                         if (err)
802                                 return err;
803                         do_phy_reset = 0;
804                 }
805
806                 /* Disable transmitter and interrupt.  */
807                 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
808                         continue;
809
810                 reg32 |= 0x3000;
811                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
812
813                 /* Set full-duplex, 1000 mbps.  */
814                 tg3_writephy(tp, MII_BMCR,
815                              BMCR_FULLDPLX | TG3_BMCR_SPEED1000);
816
817                 /* Set to master mode.  */
818                 if (tg3_readphy(tp, MII_TG3_CTRL, &phy9_orig))
819                         continue;
820
821                 tg3_writephy(tp, MII_TG3_CTRL,
822                              (MII_TG3_CTRL_AS_MASTER |
823                               MII_TG3_CTRL_ENABLE_AS_MASTER));
824
825                 /* Enable SM_DSP_CLOCK and 6dB.  */
826                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
827
828                 /* Block the PHY control access.  */
829                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
830                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0800);
831
832                 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
833                 if (!err)
834                         break;
835         } while (--retries);
836
837         err = tg3_phy_reset_chanpat(tp);
838         if (err)
839                 return err;
840
841         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
842         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0000);
843
844         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
845         tg3_writephy(tp, 0x16, 0x0000);
846
847         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
848             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
849                 /* Set Extended packet length bit for jumbo frames */
850                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4400);
851         }
852         else {
853                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
854         }
855
856         tg3_writephy(tp, MII_TG3_CTRL, phy9_orig);
857
858         if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32)) {
859                 reg32 &= ~0x3000;
860                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
861         } else if (!err)
862                 err = -EBUSY;
863
864         return err;
865 }
866
867 /* This will reset the tigon3 PHY if there is no valid
868  * link unless the FORCE argument is non-zero.
869  */
870 static int tg3_phy_reset(struct tg3 *tp)
871 {
872         u32 phy_status;
873         int err;
874
875         err  = tg3_readphy(tp, MII_BMSR, &phy_status);
876         err |= tg3_readphy(tp, MII_BMSR, &phy_status);
877         if (err != 0)
878                 return -EBUSY;
879
880         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
881             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
882             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
883                 err = tg3_phy_reset_5703_4_5(tp);
884                 if (err)
885                         return err;
886                 goto out;
887         }
888
889         err = tg3_bmcr_reset(tp);
890         if (err)
891                 return err;
892
893 out:
894         if (tp->tg3_flags2 & TG3_FLG2_PHY_ADC_BUG) {
895                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
896                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
897                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x2aaa);
898                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
899                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0323);
900                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
901         }
902         if (tp->tg3_flags2 & TG3_FLG2_PHY_5704_A0_BUG) {
903                 tg3_writephy(tp, 0x1c, 0x8d68);
904                 tg3_writephy(tp, 0x1c, 0x8d68);
905         }
906         if (tp->tg3_flags2 & TG3_FLG2_PHY_BER_BUG) {
907                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
908                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
909                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x310b);
910                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
911                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x9506);
912                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x401f);
913                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x14e2);
914                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
915         }
916         /* Set Extended packet length bit (bit 14) on all chips that */
917         /* support jumbo frames */
918         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
919                 /* Cannot do read-modify-write on 5401 */
920                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
921         } else if (tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) {
922                 u32 phy_reg;
923
924                 /* Set bit 14 with read-modify-write to preserve other bits */
925                 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0007) &&
926                     !tg3_readphy(tp, MII_TG3_AUX_CTRL, &phy_reg))
927                         tg3_writephy(tp, MII_TG3_AUX_CTRL, phy_reg | 0x4000);
928         }
929
930         /* Set phy register 0x10 bit 0 to high fifo elasticity to support
931          * jumbo frames transmission.
932          */
933         if (tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) {
934                 u32 phy_reg;
935
936                 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &phy_reg))
937                     tg3_writephy(tp, MII_TG3_EXT_CTRL,
938                                  phy_reg | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
939         }
940
941         tg3_phy_set_wirespeed(tp);
942         return 0;
943 }
944
945 static void tg3_frob_aux_power(struct tg3 *tp)
946 {
947         struct tg3 *tp_peer = tp;
948
949         if ((tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) != 0)
950                 return;
951
952         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
953                 tp_peer = pci_get_drvdata(tp->pdev_peer);
954                 if (!tp_peer)
955                         BUG();
956         }
957
958
959         if ((tp->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
960             (tp_peer->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0) {
961                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
962                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
963                         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
964                              (GRC_LCLCTRL_GPIO_OE0 |
965                               GRC_LCLCTRL_GPIO_OE1 |
966                               GRC_LCLCTRL_GPIO_OE2 |
967                               GRC_LCLCTRL_GPIO_OUTPUT0 |
968                               GRC_LCLCTRL_GPIO_OUTPUT1));
969                         udelay(100);
970                 } else {
971                         u32 no_gpio2;
972                         u32 grc_local_ctrl;
973
974                         if (tp_peer != tp &&
975                             (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
976                                 return;
977
978                         /* On 5753 and variants, GPIO2 cannot be used. */
979                         no_gpio2 = tp->nic_sram_data_cfg &
980                                     NIC_SRAM_DATA_CFG_NO_GPIO2;
981
982                         grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
983                                          GRC_LCLCTRL_GPIO_OE1 |
984                                          GRC_LCLCTRL_GPIO_OE2 |
985                                          GRC_LCLCTRL_GPIO_OUTPUT1 |
986                                          GRC_LCLCTRL_GPIO_OUTPUT2;
987                         if (no_gpio2) {
988                                 grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
989                                                     GRC_LCLCTRL_GPIO_OUTPUT2);
990                         }
991                         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
992                                                 grc_local_ctrl);
993                         udelay(100);
994
995                         grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
996
997                         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
998                                                 grc_local_ctrl);
999                         udelay(100);
1000
1001                         if (!no_gpio2) {
1002                                 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
1003                                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1004                                        grc_local_ctrl);
1005                                 udelay(100);
1006                         }
1007                 }
1008         } else {
1009                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
1010                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
1011                         if (tp_peer != tp &&
1012                             (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
1013                                 return;
1014
1015                         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1016                              (GRC_LCLCTRL_GPIO_OE1 |
1017                               GRC_LCLCTRL_GPIO_OUTPUT1));
1018                         udelay(100);
1019
1020                         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1021                              (GRC_LCLCTRL_GPIO_OE1));
1022                         udelay(100);
1023
1024                         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1025                              (GRC_LCLCTRL_GPIO_OE1 |
1026                               GRC_LCLCTRL_GPIO_OUTPUT1));
1027                         udelay(100);
1028                 }
1029         }
1030 }
1031
1032 static int tg3_setup_phy(struct tg3 *, int);
1033
1034 #define RESET_KIND_SHUTDOWN     0
1035 #define RESET_KIND_INIT         1
1036 #define RESET_KIND_SUSPEND      2
1037
1038 static void tg3_write_sig_post_reset(struct tg3 *, int);
1039 static int tg3_halt_cpu(struct tg3 *, u32);
1040
1041 static int tg3_set_power_state(struct tg3 *tp, int state)
1042 {
1043         u32 misc_host_ctrl;
1044         u16 power_control, power_caps;
1045         int pm = tp->pm_cap;
1046
1047         /* Make sure register accesses (indirect or otherwise)
1048          * will function correctly.
1049          */
1050         pci_write_config_dword(tp->pdev,
1051                                TG3PCI_MISC_HOST_CTRL,
1052                                tp->misc_host_ctrl);
1053
1054         pci_read_config_word(tp->pdev,
1055                              pm + PCI_PM_CTRL,
1056                              &power_control);
1057         power_control |= PCI_PM_CTRL_PME_STATUS;
1058         power_control &= ~(PCI_PM_CTRL_STATE_MASK);
1059         switch (state) {
1060         case 0:
1061                 power_control |= 0;
1062                 pci_write_config_word(tp->pdev,
1063                                       pm + PCI_PM_CTRL,
1064                                       power_control);
1065                 udelay(100);    /* Delay after power state change */
1066
1067                 /* Switch out of Vaux if it is not a LOM */
1068                 if (!(tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT)) {
1069                         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
1070                         udelay(100);
1071                 }
1072
1073                 return 0;
1074
1075         case 1:
1076                 power_control |= 1;
1077                 break;
1078
1079         case 2:
1080                 power_control |= 2;
1081                 break;
1082
1083         case 3:
1084                 power_control |= 3;
1085                 break;
1086
1087         default:
1088                 printk(KERN_WARNING PFX "%s: Invalid power state (%d) "
1089                        "requested.\n",
1090                        tp->dev->name, state);
1091                 return -EINVAL;
1092         };
1093
1094         power_control |= PCI_PM_CTRL_PME_ENABLE;
1095
1096         misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
1097         tw32(TG3PCI_MISC_HOST_CTRL,
1098              misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
1099
1100         if (tp->link_config.phy_is_low_power == 0) {
1101                 tp->link_config.phy_is_low_power = 1;
1102                 tp->link_config.orig_speed = tp->link_config.speed;
1103                 tp->link_config.orig_duplex = tp->link_config.duplex;
1104                 tp->link_config.orig_autoneg = tp->link_config.autoneg;
1105         }
1106
1107         if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)) {
1108                 tp->link_config.speed = SPEED_10;
1109                 tp->link_config.duplex = DUPLEX_HALF;
1110                 tp->link_config.autoneg = AUTONEG_ENABLE;
1111                 tg3_setup_phy(tp, 0);
1112         }
1113
1114         pci_read_config_word(tp->pdev, pm + PCI_PM_PMC, &power_caps);
1115
1116         if (tp->tg3_flags & TG3_FLAG_WOL_ENABLE) {
1117                 u32 mac_mode;
1118
1119                 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
1120                         tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x5a);
1121                         udelay(40);
1122
1123                         mac_mode = MAC_MODE_PORT_MODE_MII;
1124
1125                         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 ||
1126                             !(tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB))
1127                                 mac_mode |= MAC_MODE_LINK_POLARITY;
1128                 } else {
1129                         mac_mode = MAC_MODE_PORT_MODE_TBI;
1130                 }
1131
1132                 if (!(tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
1133                         tw32(MAC_LED_CTRL, tp->led_ctrl);
1134
1135                 if (((power_caps & PCI_PM_CAP_PME_D3cold) &&
1136                      (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)))
1137                         mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
1138
1139                 tw32_f(MAC_MODE, mac_mode);
1140                 udelay(100);
1141
1142                 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
1143                 udelay(10);
1144         }
1145
1146         if (!(tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB) &&
1147             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1148              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
1149                 u32 base_val;
1150
1151                 base_val = tp->pci_clock_ctrl;
1152                 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
1153                              CLOCK_CTRL_TXCLK_DISABLE);
1154
1155                 tw32_f(TG3PCI_CLOCK_CTRL, base_val |
1156                      CLOCK_CTRL_ALTCLK |
1157                      CLOCK_CTRL_PWRDOWN_PLL133);
1158                 udelay(40);
1159         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) {
1160                 /* do nothing */
1161         } else if (!((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
1162                      (tp->tg3_flags & TG3_FLAG_ENABLE_ASF))) {
1163                 u32 newbits1, newbits2;
1164
1165                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1166                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1167                         newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
1168                                     CLOCK_CTRL_TXCLK_DISABLE |
1169                                     CLOCK_CTRL_ALTCLK);
1170                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
1171                 } else if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
1172                         newbits1 = CLOCK_CTRL_625_CORE;
1173                         newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
1174                 } else {
1175                         newbits1 = CLOCK_CTRL_ALTCLK;
1176                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
1177                 }
1178
1179                 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1);
1180                 udelay(40);
1181
1182                 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2);
1183                 udelay(40);
1184
1185                 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
1186                         u32 newbits3;
1187
1188                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1189                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1190                                 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
1191                                             CLOCK_CTRL_TXCLK_DISABLE |
1192                                             CLOCK_CTRL_44MHZ_CORE);
1193                         } else {
1194                                 newbits3 = CLOCK_CTRL_44MHZ_CORE;
1195                         }
1196
1197                         tw32_f(TG3PCI_CLOCK_CTRL,
1198                                          tp->pci_clock_ctrl | newbits3);
1199                         udelay(40);
1200                 }
1201         }
1202
1203         tg3_frob_aux_power(tp);
1204
1205         /* Workaround for unstable PLL clock */
1206         if ((GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX) ||
1207             (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX)) {
1208                 u32 val = tr32(0x7d00);
1209
1210                 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
1211                 tw32(0x7d00, val);
1212                 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
1213                         tg3_halt_cpu(tp, RX_CPU_BASE);
1214         }
1215
1216         /* Finally, set the new power state. */
1217         pci_write_config_word(tp->pdev, pm + PCI_PM_CTRL, power_control);
1218         udelay(100);    /* Delay after power state change */
1219
1220         tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
1221
1222         return 0;
1223 }
1224
1225 static void tg3_link_report(struct tg3 *tp)
1226 {
1227         if (!netif_carrier_ok(tp->dev)) {
1228                 printk(KERN_INFO PFX "%s: Link is down.\n", tp->dev->name);
1229         } else {
1230                 printk(KERN_INFO PFX "%s: Link is up at %d Mbps, %s duplex.\n",
1231                        tp->dev->name,
1232                        (tp->link_config.active_speed == SPEED_1000 ?
1233                         1000 :
1234                         (tp->link_config.active_speed == SPEED_100 ?
1235                          100 : 10)),
1236                        (tp->link_config.active_duplex == DUPLEX_FULL ?
1237                         "full" : "half"));
1238
1239                 printk(KERN_INFO PFX "%s: Flow control is %s for TX and "
1240                        "%s for RX.\n",
1241                        tp->dev->name,
1242                        (tp->tg3_flags & TG3_FLAG_TX_PAUSE) ? "on" : "off",
1243                        (tp->tg3_flags & TG3_FLAG_RX_PAUSE) ? "on" : "off");
1244         }
1245 }
1246
1247 static void tg3_setup_flow_control(struct tg3 *tp, u32 local_adv, u32 remote_adv)
1248 {
1249         u32 new_tg3_flags = 0;
1250         u32 old_rx_mode = tp->rx_mode;
1251         u32 old_tx_mode = tp->tx_mode;
1252
1253         if (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG) {
1254
1255                 /* Convert 1000BaseX flow control bits to 1000BaseT
1256                  * bits before resolving flow control.
1257                  */
1258                 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
1259                         local_adv &= ~(ADVERTISE_PAUSE_CAP |
1260                                        ADVERTISE_PAUSE_ASYM);
1261                         remote_adv &= ~(LPA_PAUSE_CAP | LPA_PAUSE_ASYM);
1262
1263                         if (local_adv & ADVERTISE_1000XPAUSE)
1264                                 local_adv |= ADVERTISE_PAUSE_CAP;
1265                         if (local_adv & ADVERTISE_1000XPSE_ASYM)
1266                                 local_adv |= ADVERTISE_PAUSE_ASYM;
1267                         if (remote_adv & LPA_1000XPAUSE)
1268                                 remote_adv |= LPA_PAUSE_CAP;
1269                         if (remote_adv & LPA_1000XPAUSE_ASYM)
1270                                 remote_adv |= LPA_PAUSE_ASYM;
1271                 }
1272
1273                 if (local_adv & ADVERTISE_PAUSE_CAP) {
1274                         if (local_adv & ADVERTISE_PAUSE_ASYM) {
1275                                 if (remote_adv & LPA_PAUSE_CAP)
1276                                         new_tg3_flags |=
1277                                                 (TG3_FLAG_RX_PAUSE |
1278                                                 TG3_FLAG_TX_PAUSE);
1279                                 else if (remote_adv & LPA_PAUSE_ASYM)
1280                                         new_tg3_flags |=
1281                                                 (TG3_FLAG_RX_PAUSE);
1282                         } else {
1283                                 if (remote_adv & LPA_PAUSE_CAP)
1284                                         new_tg3_flags |=
1285                                                 (TG3_FLAG_RX_PAUSE |
1286                                                 TG3_FLAG_TX_PAUSE);
1287                         }
1288                 } else if (local_adv & ADVERTISE_PAUSE_ASYM) {
1289                         if ((remote_adv & LPA_PAUSE_CAP) &&
1290                         (remote_adv & LPA_PAUSE_ASYM))
1291                                 new_tg3_flags |= TG3_FLAG_TX_PAUSE;
1292                 }
1293
1294                 tp->tg3_flags &= ~(TG3_FLAG_RX_PAUSE | TG3_FLAG_TX_PAUSE);
1295                 tp->tg3_flags |= new_tg3_flags;
1296         } else {
1297                 new_tg3_flags = tp->tg3_flags;
1298         }
1299
1300         if (new_tg3_flags & TG3_FLAG_RX_PAUSE)
1301                 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1302         else
1303                 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1304
1305         if (old_rx_mode != tp->rx_mode) {
1306                 tw32_f(MAC_RX_MODE, tp->rx_mode);
1307         }
1308         
1309         if (new_tg3_flags & TG3_FLAG_TX_PAUSE)
1310                 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1311         else
1312                 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1313
1314         if (old_tx_mode != tp->tx_mode) {
1315                 tw32_f(MAC_TX_MODE, tp->tx_mode);
1316         }
1317 }
1318
1319 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
1320 {
1321         switch (val & MII_TG3_AUX_STAT_SPDMASK) {
1322         case MII_TG3_AUX_STAT_10HALF:
1323                 *speed = SPEED_10;
1324                 *duplex = DUPLEX_HALF;
1325                 break;
1326
1327         case MII_TG3_AUX_STAT_10FULL:
1328                 *speed = SPEED_10;
1329                 *duplex = DUPLEX_FULL;
1330                 break;
1331
1332         case MII_TG3_AUX_STAT_100HALF:
1333                 *speed = SPEED_100;
1334                 *duplex = DUPLEX_HALF;
1335                 break;
1336
1337         case MII_TG3_AUX_STAT_100FULL:
1338                 *speed = SPEED_100;
1339                 *duplex = DUPLEX_FULL;
1340                 break;
1341
1342         case MII_TG3_AUX_STAT_1000HALF:
1343                 *speed = SPEED_1000;
1344                 *duplex = DUPLEX_HALF;
1345                 break;
1346
1347         case MII_TG3_AUX_STAT_1000FULL:
1348                 *speed = SPEED_1000;
1349                 *duplex = DUPLEX_FULL;
1350                 break;
1351
1352         default:
1353                 *speed = SPEED_INVALID;
1354                 *duplex = DUPLEX_INVALID;
1355                 break;
1356         };
1357 }
1358
1359 static void tg3_phy_copper_begin(struct tg3 *tp)
1360 {
1361         u32 new_adv;
1362         int i;
1363
1364         if (tp->link_config.phy_is_low_power) {
1365                 /* Entering low power mode.  Disable gigabit and
1366                  * 100baseT advertisements.
1367                  */
1368                 tg3_writephy(tp, MII_TG3_CTRL, 0);
1369
1370                 new_adv = (ADVERTISE_10HALF | ADVERTISE_10FULL |
1371                            ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
1372                 if (tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB)
1373                         new_adv |= (ADVERTISE_100HALF | ADVERTISE_100FULL);
1374
1375                 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1376         } else if (tp->link_config.speed == SPEED_INVALID) {
1377                 tp->link_config.advertising =
1378                         (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
1379                          ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
1380                          ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full |
1381                          ADVERTISED_Autoneg | ADVERTISED_MII);
1382
1383                 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
1384                         tp->link_config.advertising &=
1385                                 ~(ADVERTISED_1000baseT_Half |
1386                                   ADVERTISED_1000baseT_Full);
1387
1388                 new_adv = (ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
1389                 if (tp->link_config.advertising & ADVERTISED_10baseT_Half)
1390                         new_adv |= ADVERTISE_10HALF;
1391                 if (tp->link_config.advertising & ADVERTISED_10baseT_Full)
1392                         new_adv |= ADVERTISE_10FULL;
1393                 if (tp->link_config.advertising & ADVERTISED_100baseT_Half)
1394                         new_adv |= ADVERTISE_100HALF;
1395                 if (tp->link_config.advertising & ADVERTISED_100baseT_Full)
1396                         new_adv |= ADVERTISE_100FULL;
1397                 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1398
1399                 if (tp->link_config.advertising &
1400                     (ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full)) {
1401                         new_adv = 0;
1402                         if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
1403                                 new_adv |= MII_TG3_CTRL_ADV_1000_HALF;
1404                         if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
1405                                 new_adv |= MII_TG3_CTRL_ADV_1000_FULL;
1406                         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY) &&
1407                             (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1408                              tp->pci_chip_rev_id == CHIPREV_ID_5701_B0))
1409                                 new_adv |= (MII_TG3_CTRL_AS_MASTER |
1410                                             MII_TG3_CTRL_ENABLE_AS_MASTER);
1411                         tg3_writephy(tp, MII_TG3_CTRL, new_adv);
1412                 } else {
1413                         tg3_writephy(tp, MII_TG3_CTRL, 0);
1414                 }
1415         } else {
1416                 /* Asking for a specific link mode. */
1417                 if (tp->link_config.speed == SPEED_1000) {
1418                         new_adv = ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP;
1419                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
1420
1421                         if (tp->link_config.duplex == DUPLEX_FULL)
1422                                 new_adv = MII_TG3_CTRL_ADV_1000_FULL;
1423                         else
1424                                 new_adv = MII_TG3_CTRL_ADV_1000_HALF;
1425                         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1426                             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
1427                                 new_adv |= (MII_TG3_CTRL_AS_MASTER |
1428                                             MII_TG3_CTRL_ENABLE_AS_MASTER);
1429                         tg3_writephy(tp, MII_TG3_CTRL, new_adv);
1430                 } else {
1431                         tg3_writephy(tp, MII_TG3_CTRL, 0);
1432
1433                         new_adv = ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP;
1434                         if (tp->link_config.speed == SPEED_100) {
1435                                 if (tp->link_config.duplex == DUPLEX_FULL)
1436                                         new_adv |= ADVERTISE_100FULL;
1437                                 else
1438                                         new_adv |= ADVERTISE_100HALF;
1439                         } else {
1440                                 if (tp->link_config.duplex == DUPLEX_FULL)
1441                                         new_adv |= ADVERTISE_10FULL;
1442                                 else
1443                                         new_adv |= ADVERTISE_10HALF;
1444                         }
1445                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
1446                 }
1447         }
1448
1449         if (tp->link_config.autoneg == AUTONEG_DISABLE &&
1450             tp->link_config.speed != SPEED_INVALID) {
1451                 u32 bmcr, orig_bmcr;
1452
1453                 tp->link_config.active_speed = tp->link_config.speed;
1454                 tp->link_config.active_duplex = tp->link_config.duplex;
1455
1456                 bmcr = 0;
1457                 switch (tp->link_config.speed) {
1458                 default:
1459                 case SPEED_10:
1460                         break;
1461
1462                 case SPEED_100:
1463                         bmcr |= BMCR_SPEED100;
1464                         break;
1465
1466                 case SPEED_1000:
1467                         bmcr |= TG3_BMCR_SPEED1000;
1468                         break;
1469                 };
1470
1471                 if (tp->link_config.duplex == DUPLEX_FULL)
1472                         bmcr |= BMCR_FULLDPLX;
1473
1474                 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
1475                     (bmcr != orig_bmcr)) {
1476                         tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
1477                         for (i = 0; i < 1500; i++) {
1478                                 u32 tmp;
1479
1480                                 udelay(10);
1481                                 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
1482                                     tg3_readphy(tp, MII_BMSR, &tmp))
1483                                         continue;
1484                                 if (!(tmp & BMSR_LSTATUS)) {
1485                                         udelay(40);
1486                                         break;
1487                                 }
1488                         }
1489                         tg3_writephy(tp, MII_BMCR, bmcr);
1490                         udelay(40);
1491                 }
1492         } else {
1493                 tg3_writephy(tp, MII_BMCR,
1494                              BMCR_ANENABLE | BMCR_ANRESTART);
1495         }
1496 }
1497
1498 static int tg3_init_5401phy_dsp(struct tg3 *tp)
1499 {
1500         int err;
1501
1502         /* Turn off tap power management. */
1503         /* Set Extended packet length bit */
1504         err  = tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
1505
1506         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0012);
1507         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1804);
1508
1509         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0013);
1510         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1204);
1511
1512         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
1513         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0132);
1514
1515         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
1516         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0232);
1517
1518         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1519         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0a20);
1520
1521         udelay(40);
1522
1523         return err;
1524 }
1525
1526 static int tg3_copper_is_advertising_all(struct tg3 *tp)
1527 {
1528         u32 adv_reg, all_mask;
1529
1530         if (tg3_readphy(tp, MII_ADVERTISE, &adv_reg))
1531                 return 0;
1532
1533         all_mask = (ADVERTISE_10HALF | ADVERTISE_10FULL |
1534                     ADVERTISE_100HALF | ADVERTISE_100FULL);
1535         if ((adv_reg & all_mask) != all_mask)
1536                 return 0;
1537         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
1538                 u32 tg3_ctrl;
1539
1540                 if (tg3_readphy(tp, MII_TG3_CTRL, &tg3_ctrl))
1541                         return 0;
1542
1543                 all_mask = (MII_TG3_CTRL_ADV_1000_HALF |
1544                             MII_TG3_CTRL_ADV_1000_FULL);
1545                 if ((tg3_ctrl & all_mask) != all_mask)
1546                         return 0;
1547         }
1548         return 1;
1549 }
1550
1551 static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
1552 {
1553         int current_link_up;
1554         u32 bmsr, dummy;
1555         u16 current_speed;
1556         u8 current_duplex;
1557         int i, err;
1558
1559         tw32(MAC_EVENT, 0);
1560
1561         tw32_f(MAC_STATUS,
1562              (MAC_STATUS_SYNC_CHANGED |
1563               MAC_STATUS_CFG_CHANGED |
1564               MAC_STATUS_MI_COMPLETION |
1565               MAC_STATUS_LNKSTATE_CHANGED));
1566         udelay(40);
1567
1568         tp->mi_mode = MAC_MI_MODE_BASE;
1569         tw32_f(MAC_MI_MODE, tp->mi_mode);
1570         udelay(80);
1571
1572         tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x02);
1573
1574         /* Some third-party PHYs need to be reset on link going
1575          * down.
1576          */
1577         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
1578              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
1579              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
1580             netif_carrier_ok(tp->dev)) {
1581                 tg3_readphy(tp, MII_BMSR, &bmsr);
1582                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
1583                     !(bmsr & BMSR_LSTATUS))
1584                         force_reset = 1;
1585         }
1586         if (force_reset)
1587                 tg3_phy_reset(tp);
1588
1589         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
1590                 tg3_readphy(tp, MII_BMSR, &bmsr);
1591                 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
1592                     !(tp->tg3_flags & TG3_FLAG_INIT_COMPLETE))
1593                         bmsr = 0;
1594
1595                 if (!(bmsr & BMSR_LSTATUS)) {
1596                         err = tg3_init_5401phy_dsp(tp);
1597                         if (err)
1598                                 return err;
1599
1600                         tg3_readphy(tp, MII_BMSR, &bmsr);
1601                         for (i = 0; i < 1000; i++) {
1602                                 udelay(10);
1603                                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
1604                                     (bmsr & BMSR_LSTATUS)) {
1605                                         udelay(40);
1606                                         break;
1607                                 }
1608                         }
1609
1610                         if ((tp->phy_id & PHY_ID_REV_MASK) == PHY_REV_BCM5401_B0 &&
1611                             !(bmsr & BMSR_LSTATUS) &&
1612                             tp->link_config.active_speed == SPEED_1000) {
1613                                 err = tg3_phy_reset(tp);
1614                                 if (!err)
1615                                         err = tg3_init_5401phy_dsp(tp);
1616                                 if (err)
1617                                         return err;
1618                         }
1619                 }
1620         } else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1621                    tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
1622                 /* 5701 {A0,B0} CRC bug workaround */
1623                 tg3_writephy(tp, 0x15, 0x0a75);
1624                 tg3_writephy(tp, 0x1c, 0x8c68);
1625                 tg3_writephy(tp, 0x1c, 0x8d68);
1626                 tg3_writephy(tp, 0x1c, 0x8c68);
1627         }
1628
1629         /* Clear pending interrupts... */
1630         tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
1631         tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
1632
1633         if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT)
1634                 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
1635         else
1636                 tg3_writephy(tp, MII_TG3_IMASK, ~0);
1637
1638         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1639             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1640                 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
1641                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
1642                                      MII_TG3_EXT_CTRL_LNK3_LED_MODE);
1643                 else
1644                         tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
1645         }
1646
1647         current_link_up = 0;
1648         current_speed = SPEED_INVALID;
1649         current_duplex = DUPLEX_INVALID;
1650
1651         if (tp->tg3_flags2 & TG3_FLG2_CAPACITIVE_COUPLING) {
1652                 u32 val;
1653
1654                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4007);
1655                 tg3_readphy(tp, MII_TG3_AUX_CTRL, &val);
1656                 if (!(val & (1 << 10))) {
1657                         val |= (1 << 10);
1658                         tg3_writephy(tp, MII_TG3_AUX_CTRL, val);
1659                         goto relink;
1660                 }
1661         }
1662
1663         bmsr = 0;
1664         for (i = 0; i < 100; i++) {
1665                 tg3_readphy(tp, MII_BMSR, &bmsr);
1666                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
1667                     (bmsr & BMSR_LSTATUS))
1668                         break;
1669                 udelay(40);
1670         }
1671
1672         if (bmsr & BMSR_LSTATUS) {
1673                 u32 aux_stat, bmcr;
1674
1675                 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
1676                 for (i = 0; i < 2000; i++) {
1677                         udelay(10);
1678                         if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
1679                             aux_stat)
1680                                 break;
1681                 }
1682
1683                 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
1684                                              &current_speed,
1685                                              &current_duplex);
1686
1687                 bmcr = 0;
1688                 for (i = 0; i < 200; i++) {
1689                         tg3_readphy(tp, MII_BMCR, &bmcr);
1690                         if (tg3_readphy(tp, MII_BMCR, &bmcr))
1691                                 continue;
1692                         if (bmcr && bmcr != 0x7fff)
1693                                 break;
1694                         udelay(10);
1695                 }
1696
1697                 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
1698                         if (bmcr & BMCR_ANENABLE) {
1699                                 current_link_up = 1;
1700
1701                                 /* Force autoneg restart if we are exiting
1702                                  * low power mode.
1703                                  */
1704                                 if (!tg3_copper_is_advertising_all(tp))
1705                                         current_link_up = 0;
1706                         } else {
1707                                 current_link_up = 0;
1708                         }
1709                 } else {
1710                         if (!(bmcr & BMCR_ANENABLE) &&
1711                             tp->link_config.speed == current_speed &&
1712                             tp->link_config.duplex == current_duplex) {
1713                                 current_link_up = 1;
1714                         } else {
1715                                 current_link_up = 0;
1716                         }
1717                 }
1718
1719                 tp->link_config.active_speed = current_speed;
1720                 tp->link_config.active_duplex = current_duplex;
1721         }
1722
1723         if (current_link_up == 1 &&
1724             (tp->link_config.active_duplex == DUPLEX_FULL) &&
1725             (tp->link_config.autoneg == AUTONEG_ENABLE)) {
1726                 u32 local_adv, remote_adv;
1727
1728                 if (tg3_readphy(tp, MII_ADVERTISE, &local_adv))
1729                         local_adv = 0;
1730                 local_adv &= (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
1731
1732                 if (tg3_readphy(tp, MII_LPA, &remote_adv))
1733                         remote_adv = 0;
1734
1735                 remote_adv &= (LPA_PAUSE_CAP | LPA_PAUSE_ASYM);
1736
1737                 /* If we are not advertising full pause capability,
1738                  * something is wrong.  Bring the link down and reconfigure.
1739                  */
1740                 if (local_adv != ADVERTISE_PAUSE_CAP) {
1741                         current_link_up = 0;
1742                 } else {
1743                         tg3_setup_flow_control(tp, local_adv, remote_adv);
1744                 }
1745         }
1746 relink:
1747         if (current_link_up == 0) {
1748                 u32 tmp;
1749
1750                 tg3_phy_copper_begin(tp);
1751
1752                 tg3_readphy(tp, MII_BMSR, &tmp);
1753                 if (!tg3_readphy(tp, MII_BMSR, &tmp) &&
1754                     (tmp & BMSR_LSTATUS))
1755                         current_link_up = 1;
1756         }
1757
1758         tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
1759         if (current_link_up == 1) {
1760                 if (tp->link_config.active_speed == SPEED_100 ||
1761                     tp->link_config.active_speed == SPEED_10)
1762                         tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
1763                 else
1764                         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
1765         } else
1766                 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
1767
1768         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
1769         if (tp->link_config.active_duplex == DUPLEX_HALF)
1770                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
1771
1772         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
1773         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
1774                 if ((tp->led_ctrl == LED_CTRL_MODE_PHY_2) ||
1775                     (current_link_up == 1 &&
1776                      tp->link_config.active_speed == SPEED_10))
1777                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
1778         } else {
1779                 if (current_link_up == 1)
1780                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
1781         }
1782
1783         /* ??? Without this setting Netgear GA302T PHY does not
1784          * ??? send/receive packets...
1785          */
1786         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411 &&
1787             tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) {
1788                 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
1789                 tw32_f(MAC_MI_MODE, tp->mi_mode);
1790                 udelay(80);
1791         }
1792
1793         tw32_f(MAC_MODE, tp->mac_mode);
1794         udelay(40);
1795
1796         if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
1797                 /* Polled via timer. */
1798                 tw32_f(MAC_EVENT, 0);
1799         } else {
1800                 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
1801         }
1802         udelay(40);
1803
1804         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 &&
1805             current_link_up == 1 &&
1806             tp->link_config.active_speed == SPEED_1000 &&
1807             ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ||
1808              (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED))) {
1809                 udelay(120);
1810                 tw32_f(MAC_STATUS,
1811                      (MAC_STATUS_SYNC_CHANGED |
1812                       MAC_STATUS_CFG_CHANGED));
1813                 udelay(40);
1814                 tg3_write_mem(tp,
1815                               NIC_SRAM_FIRMWARE_MBOX,
1816                               NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
1817         }
1818
1819         if (current_link_up != netif_carrier_ok(tp->dev)) {
1820                 if (current_link_up)
1821                         netif_carrier_on(tp->dev);
1822                 else
1823                         netif_carrier_off(tp->dev);
1824                 tg3_link_report(tp);
1825         }
1826
1827         return 0;
1828 }
1829
1830 struct tg3_fiber_aneginfo {
1831         int state;
1832 #define ANEG_STATE_UNKNOWN              0
1833 #define ANEG_STATE_AN_ENABLE            1
1834 #define ANEG_STATE_RESTART_INIT         2
1835 #define ANEG_STATE_RESTART              3
1836 #define ANEG_STATE_DISABLE_LINK_OK      4
1837 #define ANEG_STATE_ABILITY_DETECT_INIT  5
1838 #define ANEG_STATE_ABILITY_DETECT       6
1839 #define ANEG_STATE_ACK_DETECT_INIT      7
1840 #define ANEG_STATE_ACK_DETECT           8
1841 #define ANEG_STATE_COMPLETE_ACK_INIT    9
1842 #define ANEG_STATE_COMPLETE_ACK         10
1843 #define ANEG_STATE_IDLE_DETECT_INIT     11
1844 #define ANEG_STATE_IDLE_DETECT          12
1845 #define ANEG_STATE_LINK_OK              13
1846 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT  14
1847 #define ANEG_STATE_NEXT_PAGE_WAIT       15
1848
1849         u32 flags;
1850 #define MR_AN_ENABLE            0x00000001
1851 #define MR_RESTART_AN           0x00000002
1852 #define MR_AN_COMPLETE          0x00000004
1853 #define MR_PAGE_RX              0x00000008
1854 #define MR_NP_LOADED            0x00000010
1855 #define MR_TOGGLE_TX            0x00000020
1856 #define MR_LP_ADV_FULL_DUPLEX   0x00000040
1857 #define MR_LP_ADV_HALF_DUPLEX   0x00000080
1858 #define MR_LP_ADV_SYM_PAUSE     0x00000100
1859 #define MR_LP_ADV_ASYM_PAUSE    0x00000200
1860 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
1861 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
1862 #define MR_LP_ADV_NEXT_PAGE     0x00001000
1863 #define MR_TOGGLE_RX            0x00002000
1864 #define MR_NP_RX                0x00004000
1865
1866 #define MR_LINK_OK              0x80000000
1867
1868         unsigned long link_time, cur_time;
1869
1870         u32 ability_match_cfg;
1871         int ability_match_count;
1872
1873         char ability_match, idle_match, ack_match;
1874
1875         u32 txconfig, rxconfig;
1876 #define ANEG_CFG_NP             0x00000080
1877 #define ANEG_CFG_ACK            0x00000040
1878 #define ANEG_CFG_RF2            0x00000020
1879 #define ANEG_CFG_RF1            0x00000010
1880 #define ANEG_CFG_PS2            0x00000001
1881 #define ANEG_CFG_PS1            0x00008000
1882 #define ANEG_CFG_HD             0x00004000
1883 #define ANEG_CFG_FD             0x00002000
1884 #define ANEG_CFG_INVAL          0x00001f06
1885
1886 };
1887 #define ANEG_OK         0
1888 #define ANEG_DONE       1
1889 #define ANEG_TIMER_ENAB 2
1890 #define ANEG_FAILED     -1
1891
1892 #define ANEG_STATE_SETTLE_TIME  10000
1893
1894 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
1895                                    struct tg3_fiber_aneginfo *ap)
1896 {
1897         unsigned long delta;
1898         u32 rx_cfg_reg;
1899         int ret;
1900
1901         if (ap->state == ANEG_STATE_UNKNOWN) {
1902                 ap->rxconfig = 0;
1903                 ap->link_time = 0;
1904                 ap->cur_time = 0;
1905                 ap->ability_match_cfg = 0;
1906                 ap->ability_match_count = 0;
1907                 ap->ability_match = 0;
1908                 ap->idle_match = 0;
1909                 ap->ack_match = 0;
1910         }
1911         ap->cur_time++;
1912
1913         if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
1914                 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
1915
1916                 if (rx_cfg_reg != ap->ability_match_cfg) {
1917                         ap->ability_match_cfg = rx_cfg_reg;
1918                         ap->ability_match = 0;
1919                         ap->ability_match_count = 0;
1920                 } else {
1921                         if (++ap->ability_match_count > 1) {
1922                                 ap->ability_match = 1;
1923                                 ap->ability_match_cfg = rx_cfg_reg;
1924                         }
1925                 }
1926                 if (rx_cfg_reg & ANEG_CFG_ACK)
1927                         ap->ack_match = 1;
1928                 else
1929                         ap->ack_match = 0;
1930
1931                 ap->idle_match = 0;
1932         } else {
1933                 ap->idle_match = 1;
1934                 ap->ability_match_cfg = 0;
1935                 ap->ability_match_count = 0;
1936                 ap->ability_match = 0;
1937                 ap->ack_match = 0;
1938
1939                 rx_cfg_reg = 0;
1940         }
1941
1942         ap->rxconfig = rx_cfg_reg;
1943         ret = ANEG_OK;
1944
1945         switch(ap->state) {
1946         case ANEG_STATE_UNKNOWN:
1947                 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
1948                         ap->state = ANEG_STATE_AN_ENABLE;
1949
1950                 /* fallthru */
1951         case ANEG_STATE_AN_ENABLE:
1952                 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
1953                 if (ap->flags & MR_AN_ENABLE) {
1954                         ap->link_time = 0;
1955                         ap->cur_time = 0;
1956                         ap->ability_match_cfg = 0;
1957                         ap->ability_match_count = 0;
1958                         ap->ability_match = 0;
1959                         ap->idle_match = 0;
1960                         ap->ack_match = 0;
1961
1962                         ap->state = ANEG_STATE_RESTART_INIT;
1963                 } else {
1964                         ap->state = ANEG_STATE_DISABLE_LINK_OK;
1965                 }
1966                 break;
1967
1968         case ANEG_STATE_RESTART_INIT:
1969                 ap->link_time = ap->cur_time;
1970                 ap->flags &= ~(MR_NP_LOADED);
1971                 ap->txconfig = 0;
1972                 tw32(MAC_TX_AUTO_NEG, 0);
1973                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
1974                 tw32_f(MAC_MODE, tp->mac_mode);
1975                 udelay(40);
1976
1977                 ret = ANEG_TIMER_ENAB;
1978                 ap->state = ANEG_STATE_RESTART;
1979
1980                 /* fallthru */
1981         case ANEG_STATE_RESTART:
1982                 delta = ap->cur_time - ap->link_time;
1983                 if (delta > ANEG_STATE_SETTLE_TIME) {
1984                         ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
1985                 } else {
1986                         ret = ANEG_TIMER_ENAB;
1987                 }
1988                 break;
1989
1990         case ANEG_STATE_DISABLE_LINK_OK:
1991                 ret = ANEG_DONE;
1992                 break;
1993
1994         case ANEG_STATE_ABILITY_DETECT_INIT:
1995                 ap->flags &= ~(MR_TOGGLE_TX);
1996                 ap->txconfig = (ANEG_CFG_FD | ANEG_CFG_PS1);
1997                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
1998                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
1999                 tw32_f(MAC_MODE, tp->mac_mode);
2000                 udelay(40);
2001
2002                 ap->state = ANEG_STATE_ABILITY_DETECT;
2003                 break;
2004
2005         case ANEG_STATE_ABILITY_DETECT:
2006                 if (ap->ability_match != 0 && ap->rxconfig != 0) {
2007                         ap->state = ANEG_STATE_ACK_DETECT_INIT;
2008                 }
2009                 break;
2010
2011         case ANEG_STATE_ACK_DETECT_INIT:
2012                 ap->txconfig |= ANEG_CFG_ACK;
2013                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
2014                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
2015                 tw32_f(MAC_MODE, tp->mac_mode);
2016                 udelay(40);
2017
2018                 ap->state = ANEG_STATE_ACK_DETECT;
2019
2020                 /* fallthru */
2021         case ANEG_STATE_ACK_DETECT:
2022                 if (ap->ack_match != 0) {
2023                         if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
2024                             (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
2025                                 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
2026                         } else {
2027                                 ap->state = ANEG_STATE_AN_ENABLE;
2028                         }
2029                 } else if (ap->ability_match != 0 &&
2030                            ap->rxconfig == 0) {
2031                         ap->state = ANEG_STATE_AN_ENABLE;
2032                 }
2033                 break;
2034
2035         case ANEG_STATE_COMPLETE_ACK_INIT:
2036                 if (ap->rxconfig & ANEG_CFG_INVAL) {
2037                         ret = ANEG_FAILED;
2038                         break;
2039                 }
2040                 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
2041                                MR_LP_ADV_HALF_DUPLEX |
2042                                MR_LP_ADV_SYM_PAUSE |
2043                                MR_LP_ADV_ASYM_PAUSE |
2044                                MR_LP_ADV_REMOTE_FAULT1 |
2045                                MR_LP_ADV_REMOTE_FAULT2 |
2046                                MR_LP_ADV_NEXT_PAGE |
2047                                MR_TOGGLE_RX |
2048                                MR_NP_RX);
2049                 if (ap->rxconfig & ANEG_CFG_FD)
2050                         ap->flags |= MR_LP_ADV_FULL_DUPLEX;
2051                 if (ap->rxconfig & ANEG_CFG_HD)
2052                         ap->flags |= MR_LP_ADV_HALF_DUPLEX;
2053                 if (ap->rxconfig & ANEG_CFG_PS1)
2054                         ap->flags |= MR_LP_ADV_SYM_PAUSE;
2055                 if (ap->rxconfig & ANEG_CFG_PS2)
2056                         ap->flags |= MR_LP_ADV_ASYM_PAUSE;
2057                 if (ap->rxconfig & ANEG_CFG_RF1)
2058                         ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
2059                 if (ap->rxconfig & ANEG_CFG_RF2)
2060                         ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
2061                 if (ap->rxconfig & ANEG_CFG_NP)
2062                         ap->flags |= MR_LP_ADV_NEXT_PAGE;
2063
2064                 ap->link_time = ap->cur_time;
2065
2066                 ap->flags ^= (MR_TOGGLE_TX);
2067                 if (ap->rxconfig & 0x0008)
2068                         ap->flags |= MR_TOGGLE_RX;
2069                 if (ap->rxconfig & ANEG_CFG_NP)
2070                         ap->flags |= MR_NP_RX;
2071                 ap->flags |= MR_PAGE_RX;
2072
2073                 ap->state = ANEG_STATE_COMPLETE_ACK;
2074                 ret = ANEG_TIMER_ENAB;
2075                 break;
2076
2077         case ANEG_STATE_COMPLETE_ACK:
2078                 if (ap->ability_match != 0 &&
2079                     ap->rxconfig == 0) {
2080                         ap->state = ANEG_STATE_AN_ENABLE;
2081                         break;
2082                 }
2083                 delta = ap->cur_time - ap->link_time;
2084                 if (delta > ANEG_STATE_SETTLE_TIME) {
2085                         if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
2086                                 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
2087                         } else {
2088                                 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
2089                                     !(ap->flags & MR_NP_RX)) {
2090                                         ap->state = ANEG_STATE_IDLE_DETECT_INIT;
2091                                 } else {
2092                                         ret = ANEG_FAILED;
2093                                 }
2094                         }
2095                 }
2096                 break;
2097
2098         case ANEG_STATE_IDLE_DETECT_INIT:
2099                 ap->link_time = ap->cur_time;
2100                 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
2101                 tw32_f(MAC_MODE, tp->mac_mode);
2102                 udelay(40);
2103
2104                 ap->state = ANEG_STATE_IDLE_DETECT;
2105                 ret = ANEG_TIMER_ENAB;
2106                 break;
2107
2108         case ANEG_STATE_IDLE_DETECT:
2109                 if (ap->ability_match != 0 &&
2110                     ap->rxconfig == 0) {
2111                         ap->state = ANEG_STATE_AN_ENABLE;
2112                         break;
2113                 }
2114                 delta = ap->cur_time - ap->link_time;
2115                 if (delta > ANEG_STATE_SETTLE_TIME) {
2116                         /* XXX another gem from the Broadcom driver :( */
2117                         ap->state = ANEG_STATE_LINK_OK;
2118                 }
2119                 break;
2120
2121         case ANEG_STATE_LINK_OK:
2122                 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
2123                 ret = ANEG_DONE;
2124                 break;
2125
2126         case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
2127                 /* ??? unimplemented */
2128                 break;
2129
2130         case ANEG_STATE_NEXT_PAGE_WAIT:
2131                 /* ??? unimplemented */
2132                 break;
2133
2134         default:
2135                 ret = ANEG_FAILED;
2136                 break;
2137         };
2138
2139         return ret;
2140 }
2141
2142 static int fiber_autoneg(struct tg3 *tp, u32 *flags)
2143 {
2144         int res = 0;
2145         struct tg3_fiber_aneginfo aninfo;
2146         int status = ANEG_FAILED;
2147         unsigned int tick;
2148         u32 tmp;
2149
2150         tw32_f(MAC_TX_AUTO_NEG, 0);
2151
2152         tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
2153         tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
2154         udelay(40);
2155
2156         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
2157         udelay(40);
2158
2159         memset(&aninfo, 0, sizeof(aninfo));
2160         aninfo.flags |= MR_AN_ENABLE;
2161         aninfo.state = ANEG_STATE_UNKNOWN;
2162         aninfo.cur_time = 0;
2163         tick = 0;
2164         while (++tick < 195000) {
2165                 status = tg3_fiber_aneg_smachine(tp, &aninfo);
2166                 if (status == ANEG_DONE || status == ANEG_FAILED)
2167                         break;
2168
2169                 udelay(1);
2170         }
2171
2172         tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
2173         tw32_f(MAC_MODE, tp->mac_mode);
2174         udelay(40);
2175
2176         *flags = aninfo.flags;
2177
2178         if (status == ANEG_DONE &&
2179             (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
2180                              MR_LP_ADV_FULL_DUPLEX)))
2181                 res = 1;
2182
2183         return res;
2184 }
2185
2186 static void tg3_init_bcm8002(struct tg3 *tp)
2187 {
2188         u32 mac_status = tr32(MAC_STATUS);
2189         int i;
2190
2191         /* Reset when initting first time or we have a link. */
2192         if ((tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) &&
2193             !(mac_status & MAC_STATUS_PCS_SYNCED))
2194                 return;
2195
2196         /* Set PLL lock range. */
2197         tg3_writephy(tp, 0x16, 0x8007);
2198
2199         /* SW reset */
2200         tg3_writephy(tp, MII_BMCR, BMCR_RESET);
2201
2202         /* Wait for reset to complete. */
2203         /* XXX schedule_timeout() ... */
2204         for (i = 0; i < 500; i++)
2205                 udelay(10);
2206
2207         /* Config mode; select PMA/Ch 1 regs. */
2208         tg3_writephy(tp, 0x10, 0x8411);
2209
2210         /* Enable auto-lock and comdet, select txclk for tx. */
2211         tg3_writephy(tp, 0x11, 0x0a10);
2212
2213         tg3_writephy(tp, 0x18, 0x00a0);
2214         tg3_writephy(tp, 0x16, 0x41ff);
2215
2216         /* Assert and deassert POR. */
2217         tg3_writephy(tp, 0x13, 0x0400);
2218         udelay(40);
2219         tg3_writephy(tp, 0x13, 0x0000);
2220
2221         tg3_writephy(tp, 0x11, 0x0a50);
2222         udelay(40);
2223         tg3_writephy(tp, 0x11, 0x0a10);
2224
2225         /* Wait for signal to stabilize */
2226         /* XXX schedule_timeout() ... */
2227         for (i = 0; i < 15000; i++)
2228                 udelay(10);
2229
2230         /* Deselect the channel register so we can read the PHYID
2231          * later.
2232          */
2233         tg3_writephy(tp, 0x10, 0x8011);
2234 }
2235
2236 static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
2237 {
2238         u32 sg_dig_ctrl, sg_dig_status;
2239         u32 serdes_cfg, expected_sg_dig_ctrl;
2240         int workaround, port_a;
2241         int current_link_up;
2242
2243         serdes_cfg = 0;
2244         expected_sg_dig_ctrl = 0;
2245         workaround = 0;
2246         port_a = 1;
2247         current_link_up = 0;
2248
2249         if (tp->pci_chip_rev_id != CHIPREV_ID_5704_A0 &&
2250             tp->pci_chip_rev_id != CHIPREV_ID_5704_A1) {
2251                 workaround = 1;
2252                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
2253                         port_a = 0;
2254
2255                 /* preserve bits 0-11,13,14 for signal pre-emphasis */
2256                 /* preserve bits 20-23 for voltage regulator */
2257                 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
2258         }
2259
2260         sg_dig_ctrl = tr32(SG_DIG_CTRL);
2261
2262         if (tp->link_config.autoneg != AUTONEG_ENABLE) {
2263                 if (sg_dig_ctrl & (1 << 31)) {
2264                         if (workaround) {
2265                                 u32 val = serdes_cfg;
2266
2267                                 if (port_a)
2268                                         val |= 0xc010000;
2269                                 else
2270                                         val |= 0x4010000;
2271                                 tw32_f(MAC_SERDES_CFG, val);
2272                         }
2273                         tw32_f(SG_DIG_CTRL, 0x01388400);
2274                 }
2275                 if (mac_status & MAC_STATUS_PCS_SYNCED) {
2276                         tg3_setup_flow_control(tp, 0, 0);
2277                         current_link_up = 1;
2278                 }
2279                 goto out;
2280         }
2281
2282         /* Want auto-negotiation.  */
2283         expected_sg_dig_ctrl = 0x81388400;
2284
2285         /* Pause capability */
2286         expected_sg_dig_ctrl |= (1 << 11);
2287
2288         /* Asymettric pause */
2289         expected_sg_dig_ctrl |= (1 << 12);
2290
2291         if (sg_dig_ctrl != expected_sg_dig_ctrl) {
2292                 if (workaround)
2293                         tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
2294                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | (1 << 30));
2295                 udelay(5);
2296                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
2297
2298                 tp->tg3_flags2 |= TG3_FLG2_PHY_JUST_INITTED;
2299         } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
2300                                  MAC_STATUS_SIGNAL_DET)) {
2301                 int i;
2302
2303                 /* Giver time to negotiate (~200ms) */
2304                 for (i = 0; i < 40000; i++) {
2305                         sg_dig_status = tr32(SG_DIG_STATUS);
2306                         if (sg_dig_status & (0x3))
2307                                 break;
2308                         udelay(5);
2309                 }
2310                 mac_status = tr32(MAC_STATUS);
2311
2312                 if ((sg_dig_status & (1 << 1)) &&
2313                     (mac_status & MAC_STATUS_PCS_SYNCED)) {
2314                         u32 local_adv, remote_adv;
2315
2316                         local_adv = ADVERTISE_PAUSE_CAP;
2317                         remote_adv = 0;
2318                         if (sg_dig_status & (1 << 19))
2319                                 remote_adv |= LPA_PAUSE_CAP;
2320                         if (sg_dig_status & (1 << 20))
2321                                 remote_adv |= LPA_PAUSE_ASYM;
2322
2323                         tg3_setup_flow_control(tp, local_adv, remote_adv);
2324                         current_link_up = 1;
2325                         tp->tg3_flags2 &= ~TG3_FLG2_PHY_JUST_INITTED;
2326                 } else if (!(sg_dig_status & (1 << 1))) {
2327                         if (tp->tg3_flags2 & TG3_FLG2_PHY_JUST_INITTED)
2328                                 tp->tg3_flags2 &= ~TG3_FLG2_PHY_JUST_INITTED;
2329                         else {
2330                                 if (workaround) {
2331                                         u32 val = serdes_cfg;
2332
2333                                         if (port_a)
2334                                                 val |= 0xc010000;
2335                                         else
2336                                                 val |= 0x4010000;
2337
2338                                         tw32_f(MAC_SERDES_CFG, val);
2339                                 }
2340
2341                                 tw32_f(SG_DIG_CTRL, 0x01388400);
2342                                 udelay(40);
2343
2344                                 /* Link parallel detection - link is up */
2345                                 /* only if we have PCS_SYNC and not */
2346                                 /* receiving config code words */
2347                                 mac_status = tr32(MAC_STATUS);
2348                                 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
2349                                     !(mac_status & MAC_STATUS_RCVD_CFG)) {
2350                                         tg3_setup_flow_control(tp, 0, 0);
2351                                         current_link_up = 1;
2352                                 }
2353                         }
2354                 }
2355         }
2356
2357 out:
2358         return current_link_up;
2359 }
2360
2361 static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
2362 {
2363         int current_link_up = 0;
2364
2365         if (!(mac_status & MAC_STATUS_PCS_SYNCED)) {
2366                 tp->tg3_flags &= ~TG3_FLAG_GOT_SERDES_FLOWCTL;
2367                 goto out;
2368         }
2369
2370         if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2371                 u32 flags;
2372                 int i;
2373   
2374                 if (fiber_autoneg(tp, &flags)) {
2375                         u32 local_adv, remote_adv;
2376
2377                         local_adv = ADVERTISE_PAUSE_CAP;
2378                         remote_adv = 0;
2379                         if (flags & MR_LP_ADV_SYM_PAUSE)
2380                                 remote_adv |= LPA_PAUSE_CAP;
2381                         if (flags & MR_LP_ADV_ASYM_PAUSE)
2382                                 remote_adv |= LPA_PAUSE_ASYM;
2383
2384                         tg3_setup_flow_control(tp, local_adv, remote_adv);
2385
2386                         tp->tg3_flags |= TG3_FLAG_GOT_SERDES_FLOWCTL;
2387                         current_link_up = 1;
2388                 }
2389                 for (i = 0; i < 30; i++) {
2390                         udelay(20);
2391                         tw32_f(MAC_STATUS,
2392                                (MAC_STATUS_SYNC_CHANGED |
2393                                 MAC_STATUS_CFG_CHANGED));
2394                         udelay(40);
2395                         if ((tr32(MAC_STATUS) &
2396                              (MAC_STATUS_SYNC_CHANGED |
2397                               MAC_STATUS_CFG_CHANGED)) == 0)
2398                                 break;
2399                 }
2400
2401                 mac_status = tr32(MAC_STATUS);
2402                 if (current_link_up == 0 &&
2403                     (mac_status & MAC_STATUS_PCS_SYNCED) &&
2404                     !(mac_status & MAC_STATUS_RCVD_CFG))
2405                         current_link_up = 1;
2406         } else {
2407                 /* Forcing 1000FD link up. */
2408                 current_link_up = 1;
2409                 tp->tg3_flags |= TG3_FLAG_GOT_SERDES_FLOWCTL;
2410
2411                 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
2412                 udelay(40);
2413         }
2414
2415 out:
2416         return current_link_up;
2417 }
2418
2419 static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
2420 {
2421         u32 orig_pause_cfg;
2422         u16 orig_active_speed;
2423         u8 orig_active_duplex;
2424         u32 mac_status;
2425         int current_link_up;
2426         int i;
2427
2428         orig_pause_cfg =
2429                 (tp->tg3_flags & (TG3_FLAG_RX_PAUSE |
2430                                   TG3_FLAG_TX_PAUSE));
2431         orig_active_speed = tp->link_config.active_speed;
2432         orig_active_duplex = tp->link_config.active_duplex;
2433
2434         if (!(tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG) &&
2435             netif_carrier_ok(tp->dev) &&
2436             (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE)) {
2437                 mac_status = tr32(MAC_STATUS);
2438                 mac_status &= (MAC_STATUS_PCS_SYNCED |
2439                                MAC_STATUS_SIGNAL_DET |
2440                                MAC_STATUS_CFG_CHANGED |
2441                                MAC_STATUS_RCVD_CFG);
2442                 if (mac_status == (MAC_STATUS_PCS_SYNCED |
2443                                    MAC_STATUS_SIGNAL_DET)) {
2444                         tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
2445                                             MAC_STATUS_CFG_CHANGED));
2446                         return 0;
2447                 }
2448         }
2449
2450         tw32_f(MAC_TX_AUTO_NEG, 0);
2451
2452         tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
2453         tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
2454         tw32_f(MAC_MODE, tp->mac_mode);
2455         udelay(40);
2456
2457         if (tp->phy_id == PHY_ID_BCM8002)
2458                 tg3_init_bcm8002(tp);
2459
2460         /* Enable link change event even when serdes polling.  */
2461         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2462         udelay(40);
2463
2464         current_link_up = 0;
2465         mac_status = tr32(MAC_STATUS);
2466
2467         if (tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG)
2468                 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
2469         else
2470                 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
2471
2472         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
2473         tw32_f(MAC_MODE, tp->mac_mode);
2474         udelay(40);
2475
2476         tp->hw_status->status =
2477                 (SD_STATUS_UPDATED |
2478                  (tp->hw_status->status & ~SD_STATUS_LINK_CHG));
2479
2480         for (i = 0; i < 100; i++) {
2481                 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
2482                                     MAC_STATUS_CFG_CHANGED));
2483                 udelay(5);
2484                 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
2485                                          MAC_STATUS_CFG_CHANGED)) == 0)
2486                         break;
2487         }
2488
2489         mac_status = tr32(MAC_STATUS);
2490         if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
2491                 current_link_up = 0;
2492                 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2493                         tw32_f(MAC_MODE, (tp->mac_mode |
2494                                           MAC_MODE_SEND_CONFIGS));
2495                         udelay(1);
2496                         tw32_f(MAC_MODE, tp->mac_mode);
2497                 }
2498         }
2499
2500         if (current_link_up == 1) {
2501                 tp->link_config.active_speed = SPEED_1000;
2502                 tp->link_config.active_duplex = DUPLEX_FULL;
2503                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
2504                                     LED_CTRL_LNKLED_OVERRIDE |
2505                                     LED_CTRL_1000MBPS_ON));
2506         } else {
2507                 tp->link_config.active_speed = SPEED_INVALID;
2508                 tp->link_config.active_duplex = DUPLEX_INVALID;
2509                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
2510                                     LED_CTRL_LNKLED_OVERRIDE |
2511                                     LED_CTRL_TRAFFIC_OVERRIDE));
2512         }
2513
2514         if (current_link_up != netif_carrier_ok(tp->dev)) {
2515                 if (current_link_up)
2516                         netif_carrier_on(tp->dev);
2517                 else
2518                         netif_carrier_off(tp->dev);
2519                 tg3_link_report(tp);
2520         } else {
2521                 u32 now_pause_cfg =
2522                         tp->tg3_flags & (TG3_FLAG_RX_PAUSE |
2523                                          TG3_FLAG_TX_PAUSE);
2524                 if (orig_pause_cfg != now_pause_cfg ||
2525                     orig_active_speed != tp->link_config.active_speed ||
2526                     orig_active_duplex != tp->link_config.active_duplex)
2527                         tg3_link_report(tp);
2528         }
2529
2530         return 0;
2531 }
2532
2533 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
2534 {
2535         int current_link_up, err = 0;
2536         u32 bmsr, bmcr;
2537         u16 current_speed;
2538         u8 current_duplex;
2539
2540         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
2541         tw32_f(MAC_MODE, tp->mac_mode);
2542         udelay(40);
2543
2544         tw32(MAC_EVENT, 0);
2545
2546         tw32_f(MAC_STATUS,
2547              (MAC_STATUS_SYNC_CHANGED |
2548               MAC_STATUS_CFG_CHANGED |
2549               MAC_STATUS_MI_COMPLETION |
2550               MAC_STATUS_LNKSTATE_CHANGED));
2551         udelay(40);
2552
2553         if (force_reset)
2554                 tg3_phy_reset(tp);
2555
2556         current_link_up = 0;
2557         current_speed = SPEED_INVALID;
2558         current_duplex = DUPLEX_INVALID;
2559
2560         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
2561         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
2562
2563         err |= tg3_readphy(tp, MII_BMCR, &bmcr);
2564
2565         if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
2566             (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT)) {
2567                 /* do nothing, just check for link up at the end */
2568         } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2569                 u32 adv, new_adv;
2570
2571                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
2572                 new_adv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
2573                                   ADVERTISE_1000XPAUSE |
2574                                   ADVERTISE_1000XPSE_ASYM |
2575                                   ADVERTISE_SLCT);
2576
2577                 /* Always advertise symmetric PAUSE just like copper */
2578                 new_adv |= ADVERTISE_1000XPAUSE;
2579
2580                 if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
2581                         new_adv |= ADVERTISE_1000XHALF;
2582                 if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
2583                         new_adv |= ADVERTISE_1000XFULL;
2584
2585                 if ((new_adv != adv) || !(bmcr & BMCR_ANENABLE)) {
2586                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
2587                         bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
2588                         tg3_writephy(tp, MII_BMCR, bmcr);
2589
2590                         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2591                         tp->tg3_flags2 |= TG3_FLG2_PHY_JUST_INITTED;
2592                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2593
2594                         return err;
2595                 }
2596         } else {
2597                 u32 new_bmcr;
2598
2599                 bmcr &= ~BMCR_SPEED1000;
2600                 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
2601
2602                 if (tp->link_config.duplex == DUPLEX_FULL)
2603                         new_bmcr |= BMCR_FULLDPLX;
2604
2605                 if (new_bmcr != bmcr) {
2606                         /* BMCR_SPEED1000 is a reserved bit that needs
2607                          * to be set on write.
2608                          */
2609                         new_bmcr |= BMCR_SPEED1000;
2610
2611                         /* Force a linkdown */
2612                         if (netif_carrier_ok(tp->dev)) {
2613                                 u32 adv;
2614
2615                                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
2616                                 adv &= ~(ADVERTISE_1000XFULL |
2617                                          ADVERTISE_1000XHALF |
2618                                          ADVERTISE_SLCT);
2619                                 tg3_writephy(tp, MII_ADVERTISE, adv);
2620                                 tg3_writephy(tp, MII_BMCR, bmcr |
2621                                                            BMCR_ANRESTART |
2622                                                            BMCR_ANENABLE);
2623                                 udelay(10);
2624                                 netif_carrier_off(tp->dev);
2625                         }
2626                         tg3_writephy(tp, MII_BMCR, new_bmcr);
2627                         bmcr = new_bmcr;
2628                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
2629                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
2630                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2631                 }
2632         }
2633
2634         if (bmsr & BMSR_LSTATUS) {
2635                 current_speed = SPEED_1000;
2636                 current_link_up = 1;
2637                 if (bmcr & BMCR_FULLDPLX)
2638                         current_duplex = DUPLEX_FULL;
2639                 else
2640                         current_duplex = DUPLEX_HALF;
2641
2642                 if (bmcr & BMCR_ANENABLE) {
2643                         u32 local_adv, remote_adv, common;
2644
2645                         err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
2646                         err |= tg3_readphy(tp, MII_LPA, &remote_adv);
2647                         common = local_adv & remote_adv;
2648                         if (common & (ADVERTISE_1000XHALF |
2649                                       ADVERTISE_1000XFULL)) {
2650                                 if (common & ADVERTISE_1000XFULL)
2651                                         current_duplex = DUPLEX_FULL;
2652                                 else
2653                                         current_duplex = DUPLEX_HALF;
2654
2655                                 tg3_setup_flow_control(tp, local_adv,
2656                                                        remote_adv);
2657                         }
2658                         else
2659                                 current_link_up = 0;
2660                 }
2661         }
2662
2663         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
2664         if (tp->link_config.active_duplex == DUPLEX_HALF)
2665                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
2666
2667         tw32_f(MAC_MODE, tp->mac_mode);
2668         udelay(40);
2669
2670         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2671
2672         tp->link_config.active_speed = current_speed;
2673         tp->link_config.active_duplex = current_duplex;
2674
2675         if (current_link_up != netif_carrier_ok(tp->dev)) {
2676                 if (current_link_up)
2677                         netif_carrier_on(tp->dev);
2678                 else {
2679                         netif_carrier_off(tp->dev);
2680                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2681                 }
2682                 tg3_link_report(tp);
2683         }
2684         return err;
2685 }
2686
2687 static void tg3_serdes_parallel_detect(struct tg3 *tp)
2688 {
2689         if (tp->tg3_flags2 & TG3_FLG2_PHY_JUST_INITTED) {
2690                 /* Give autoneg time to complete. */
2691                 tp->tg3_flags2 &= ~TG3_FLG2_PHY_JUST_INITTED;
2692                 return;
2693         }
2694         if (!netif_carrier_ok(tp->dev) &&
2695             (tp->link_config.autoneg == AUTONEG_ENABLE)) {
2696                 u32 bmcr;
2697
2698                 tg3_readphy(tp, MII_BMCR, &bmcr);
2699                 if (bmcr & BMCR_ANENABLE) {
2700                         u32 phy1, phy2;
2701
2702                         /* Select shadow register 0x1f */
2703                         tg3_writephy(tp, 0x1c, 0x7c00);
2704                         tg3_readphy(tp, 0x1c, &phy1);
2705
2706                         /* Select expansion interrupt status register */
2707                         tg3_writephy(tp, 0x17, 0x0f01);
2708                         tg3_readphy(tp, 0x15, &phy2);
2709                         tg3_readphy(tp, 0x15, &phy2);
2710
2711                         if ((phy1 & 0x10) && !(phy2 & 0x20)) {
2712                                 /* We have signal detect and not receiving
2713                                  * config code words, link is up by parallel
2714                                  * detection.
2715                                  */
2716
2717                                 bmcr &= ~BMCR_ANENABLE;
2718                                 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
2719                                 tg3_writephy(tp, MII_BMCR, bmcr);
2720                                 tp->tg3_flags2 |= TG3_FLG2_PARALLEL_DETECT;
2721                         }
2722                 }
2723         }
2724         else if (netif_carrier_ok(tp->dev) &&
2725                  (tp->link_config.autoneg == AUTONEG_ENABLE) &&
2726                  (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT)) {
2727                 u32 phy2;
2728
2729                 /* Select expansion interrupt status register */
2730                 tg3_writephy(tp, 0x17, 0x0f01);
2731                 tg3_readphy(tp, 0x15, &phy2);
2732                 if (phy2 & 0x20) {
2733                         u32 bmcr;
2734
2735                         /* Config code words received, turn on autoneg. */
2736                         tg3_readphy(tp, MII_BMCR, &bmcr);
2737                         tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
2738
2739                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2740
2741                 }
2742         }
2743 }
2744
2745 static int tg3_setup_phy(struct tg3 *tp, int force_reset)
2746 {
2747         int err;
2748
2749         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
2750                 err = tg3_setup_fiber_phy(tp, force_reset);
2751         } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
2752                 err = tg3_setup_fiber_mii_phy(tp, force_reset);
2753         } else {
2754                 err = tg3_setup_copper_phy(tp, force_reset);
2755         }
2756
2757         if (tp->link_config.active_speed == SPEED_1000 &&
2758             tp->link_config.active_duplex == DUPLEX_HALF)
2759                 tw32(MAC_TX_LENGTHS,
2760                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2761                       (6 << TX_LENGTHS_IPG_SHIFT) |
2762                       (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
2763         else
2764                 tw32(MAC_TX_LENGTHS,
2765                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2766                       (6 << TX_LENGTHS_IPG_SHIFT) |
2767                       (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
2768
2769         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
2770                 if (netif_carrier_ok(tp->dev)) {
2771                         tw32(HOSTCC_STAT_COAL_TICKS,
2772                              tp->coal.stats_block_coalesce_usecs);
2773                 } else {
2774                         tw32(HOSTCC_STAT_COAL_TICKS, 0);
2775                 }
2776         }
2777
2778         return err;
2779 }
2780
2781 /* Tigon3 never reports partial packet sends.  So we do not
2782  * need special logic to handle SKBs that have not had all
2783  * of their frags sent yet, like SunGEM does.
2784  */
2785 static void tg3_tx(struct tg3 *tp)
2786 {
2787         u32 hw_idx = tp->hw_status->idx[0].tx_consumer;
2788         u32 sw_idx = tp->tx_cons;
2789
2790         while (sw_idx != hw_idx) {
2791                 struct tx_ring_info *ri = &tp->tx_buffers[sw_idx];
2792                 struct sk_buff *skb = ri->skb;
2793                 int i;
2794
2795                 if (unlikely(skb == NULL))
2796                         BUG();
2797
2798                 pci_unmap_single(tp->pdev,
2799                                  pci_unmap_addr(ri, mapping),
2800                                  skb_headlen(skb),
2801                                  PCI_DMA_TODEVICE);
2802
2803                 ri->skb = NULL;
2804
2805                 sw_idx = NEXT_TX(sw_idx);
2806
2807                 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2808                         if (unlikely(sw_idx == hw_idx))
2809                                 BUG();
2810
2811                         ri = &tp->tx_buffers[sw_idx];
2812                         if (unlikely(ri->skb != NULL))
2813                                 BUG();
2814
2815                         pci_unmap_page(tp->pdev,
2816                                        pci_unmap_addr(ri, mapping),
2817                                        skb_shinfo(skb)->frags[i].size,
2818                                        PCI_DMA_TODEVICE);
2819
2820                         sw_idx = NEXT_TX(sw_idx);
2821                 }
2822
2823                 dev_kfree_skb(skb);
2824         }
2825
2826         tp->tx_cons = sw_idx;
2827
2828         if (netif_queue_stopped(tp->dev) &&
2829             (TX_BUFFS_AVAIL(tp) > TG3_TX_WAKEUP_THRESH))
2830                 netif_wake_queue(tp->dev);
2831 }
2832
2833 /* Returns size of skb allocated or < 0 on error.
2834  *
2835  * We only need to fill in the address because the other members
2836  * of the RX descriptor are invariant, see tg3_init_rings.
2837  *
2838  * Note the purposeful assymetry of cpu vs. chip accesses.  For
2839  * posting buffers we only dirty the first cache line of the RX
2840  * descriptor (containing the address).  Whereas for the RX status
2841  * buffers the cpu only reads the last cacheline of the RX descriptor
2842  * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
2843  */
2844 static int tg3_alloc_rx_skb(struct tg3 *tp, u32 opaque_key,
2845                             int src_idx, u32 dest_idx_unmasked)
2846 {
2847         struct tg3_rx_buffer_desc *desc;
2848         struct ring_info *map, *src_map;
2849         struct sk_buff *skb;
2850         dma_addr_t mapping;
2851         int skb_size, dest_idx;
2852
2853         src_map = NULL;
2854         switch (opaque_key) {
2855         case RXD_OPAQUE_RING_STD:
2856                 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
2857                 desc = &tp->rx_std[dest_idx];
2858                 map = &tp->rx_std_buffers[dest_idx];
2859                 if (src_idx >= 0)
2860                         src_map = &tp->rx_std_buffers[src_idx];
2861                 skb_size = tp->rx_pkt_buf_sz;
2862                 break;
2863
2864         case RXD_OPAQUE_RING_JUMBO:
2865                 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
2866                 desc = &tp->rx_jumbo[dest_idx];
2867                 map = &tp->rx_jumbo_buffers[dest_idx];
2868                 if (src_idx >= 0)
2869                         src_map = &tp->rx_jumbo_buffers[src_idx];
2870                 skb_size = RX_JUMBO_PKT_BUF_SZ;
2871                 break;
2872
2873         default:
2874                 return -EINVAL;
2875         };
2876
2877         /* Do not overwrite any of the map or rp information
2878          * until we are sure we can commit to a new buffer.
2879          *
2880          * Callers depend upon this behavior and assume that
2881          * we leave everything unchanged if we fail.
2882          */
2883         skb = dev_alloc_skb(skb_size);
2884         if (skb == NULL)
2885                 return -ENOMEM;
2886
2887         skb->dev = tp->dev;
2888         skb_reserve(skb, tp->rx_offset);
2889
2890         mapping = pci_map_single(tp->pdev, skb->data,
2891                                  skb_size - tp->rx_offset,
2892                                  PCI_DMA_FROMDEVICE);
2893
2894         map->skb = skb;
2895         pci_unmap_addr_set(map, mapping, mapping);
2896
2897         if (src_map != NULL)
2898                 src_map->skb = NULL;
2899
2900         desc->addr_hi = ((u64)mapping >> 32);
2901         desc->addr_lo = ((u64)mapping & 0xffffffff);
2902
2903         return skb_size;
2904 }
2905
2906 /* We only need to move over in the address because the other
2907  * members of the RX descriptor are invariant.  See notes above
2908  * tg3_alloc_rx_skb for full details.
2909  */
2910 static void tg3_recycle_rx(struct tg3 *tp, u32 opaque_key,
2911                            int src_idx, u32 dest_idx_unmasked)
2912 {
2913         struct tg3_rx_buffer_desc *src_desc, *dest_desc;
2914         struct ring_info *src_map, *dest_map;
2915         int dest_idx;
2916
2917         switch (opaque_key) {
2918         case RXD_OPAQUE_RING_STD:
2919                 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
2920                 dest_desc = &tp->rx_std[dest_idx];
2921                 dest_map = &tp->rx_std_buffers[dest_idx];
2922                 src_desc = &tp->rx_std[src_idx];
2923                 src_map = &tp->rx_std_buffers[src_idx];
2924                 break;
2925
2926         case RXD_OPAQUE_RING_JUMBO:
2927                 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
2928                 dest_desc = &tp->rx_jumbo[dest_idx];
2929                 dest_map = &tp->rx_jumbo_buffers[dest_idx];
2930                 src_desc = &tp->rx_jumbo[src_idx];
2931                 src_map = &tp->rx_jumbo_buffers[src_idx];
2932                 break;
2933
2934         default:
2935                 return;
2936         };
2937
2938         dest_map->skb = src_map->skb;
2939         pci_unmap_addr_set(dest_map, mapping,
2940                            pci_unmap_addr(src_map, mapping));
2941         dest_desc->addr_hi = src_desc->addr_hi;
2942         dest_desc->addr_lo = src_desc->addr_lo;
2943
2944         src_map->skb = NULL;
2945 }
2946
2947 #if TG3_VLAN_TAG_USED
2948 static int tg3_vlan_rx(struct tg3 *tp, struct sk_buff *skb, u16 vlan_tag)
2949 {
2950         return vlan_hwaccel_receive_skb(skb, tp->vlgrp, vlan_tag);
2951 }
2952 #endif
2953
2954 /* The RX ring scheme is composed of multiple rings which post fresh
2955  * buffers to the chip, and one special ring the chip uses to report
2956  * status back to the host.
2957  *
2958  * The special ring reports the status of received packets to the
2959  * host.  The chip does not write into the original descriptor the
2960  * RX buffer was obtained from.  The chip simply takes the original
2961  * descriptor as provided by the host, updates the status and length
2962  * field, then writes this into the next status ring entry.
2963  *
2964  * Each ring the host uses to post buffers to the chip is described
2965  * by a TG3_BDINFO entry in the chips SRAM area.  When a packet arrives,
2966  * it is first placed into the on-chip ram.  When the packet's length
2967  * is known, it walks down the TG3_BDINFO entries to select the ring.
2968  * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
2969  * which is within the range of the new packet's length is chosen.
2970  *
2971  * The "separate ring for rx status" scheme may sound queer, but it makes
2972  * sense from a cache coherency perspective.  If only the host writes
2973  * to the buffer post rings, and only the chip writes to the rx status
2974  * rings, then cache lines never move beyond shared-modified state.
2975  * If both the host and chip were to write into the same ring, cache line
2976  * eviction could occur since both entities want it in an exclusive state.
2977  */
2978 static int tg3_rx(struct tg3 *tp, int budget)
2979 {
2980         u32 work_mask;
2981         u32 sw_idx = tp->rx_rcb_ptr;
2982         u16 hw_idx;
2983         int received;
2984
2985         hw_idx = tp->hw_status->idx[0].rx_producer;
2986         /*
2987          * We need to order the read of hw_idx and the read of
2988          * the opaque cookie.
2989          */
2990         rmb();
2991         work_mask = 0;
2992         received = 0;
2993         while (sw_idx != hw_idx && budget > 0) {
2994                 struct tg3_rx_buffer_desc *desc = &tp->rx_rcb[sw_idx];
2995                 unsigned int len;
2996                 struct sk_buff *skb;
2997                 dma_addr_t dma_addr;
2998                 u32 opaque_key, desc_idx, *post_ptr;
2999
3000                 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
3001                 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
3002                 if (opaque_key == RXD_OPAQUE_RING_STD) {
3003                         dma_addr = pci_unmap_addr(&tp->rx_std_buffers[desc_idx],
3004                                                   mapping);
3005                         skb = tp->rx_std_buffers[desc_idx].skb;
3006                         post_ptr = &tp->rx_std_ptr;
3007                 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
3008                         dma_addr = pci_unmap_addr(&tp->rx_jumbo_buffers[desc_idx],
3009                                                   mapping);
3010                         skb = tp->rx_jumbo_buffers[desc_idx].skb;
3011                         post_ptr = &tp->rx_jumbo_ptr;
3012                 }
3013                 else {
3014                         goto next_pkt_nopost;
3015                 }
3016
3017                 work_mask |= opaque_key;
3018
3019                 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
3020                     (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
3021                 drop_it:
3022                         tg3_recycle_rx(tp, opaque_key,
3023                                        desc_idx, *post_ptr);
3024                 drop_it_no_recycle:
3025                         /* Other statistics kept track of by card. */
3026                         tp->net_stats.rx_dropped++;
3027                         goto next_pkt;
3028                 }
3029
3030                 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4; /* omit crc */
3031
3032                 if (len > RX_COPY_THRESHOLD 
3033                         && tp->rx_offset == 2
3034                         /* rx_offset != 2 iff this is a 5701 card running
3035                          * in PCI-X mode [see tg3_get_invariants()] */
3036                 ) {
3037                         int skb_size;
3038
3039                         skb_size = tg3_alloc_rx_skb(tp, opaque_key,
3040                                                     desc_idx, *post_ptr);
3041                         if (skb_size < 0)
3042                                 goto drop_it;
3043
3044                         pci_unmap_single(tp->pdev, dma_addr,
3045                                          skb_size - tp->rx_offset,
3046                                          PCI_DMA_FROMDEVICE);
3047
3048                         skb_put(skb, len);
3049                 } else {
3050                         struct sk_buff *copy_skb;
3051
3052                         tg3_recycle_rx(tp, opaque_key,
3053                                        desc_idx, *post_ptr);
3054
3055                         copy_skb = dev_alloc_skb(len + 2);
3056                         if (copy_skb == NULL)
3057                                 goto drop_it_no_recycle;
3058
3059                         copy_skb->dev = tp->dev;
3060                         skb_reserve(copy_skb, 2);
3061                         skb_put(copy_skb, len);
3062                         pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
3063                         memcpy(copy_skb->data, skb->data, len);
3064                         pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
3065
3066                         /* We'll reuse the original ring buffer. */
3067                         skb = copy_skb;
3068                 }
3069
3070                 if ((tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) &&
3071                     (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
3072                     (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
3073                       >> RXD_TCPCSUM_SHIFT) == 0xffff))
3074                         skb->ip_summed = CHECKSUM_UNNECESSARY;
3075                 else
3076                         skb->ip_summed = CHECKSUM_NONE;
3077
3078                 skb->protocol = eth_type_trans(skb, tp->dev);
3079 #if TG3_VLAN_TAG_USED
3080                 if (tp->vlgrp != NULL &&
3081                     desc->type_flags & RXD_FLAG_VLAN) {
3082                         tg3_vlan_rx(tp, skb,
3083                                     desc->err_vlan & RXD_VLAN_MASK);
3084                 } else
3085 #endif
3086                         netif_receive_skb(skb);
3087
3088                 tp->dev->last_rx = jiffies;
3089                 received++;
3090                 budget--;
3091
3092 next_pkt:
3093                 (*post_ptr)++;
3094 next_pkt_nopost:
3095                 sw_idx++;
3096                 sw_idx %= TG3_RX_RCB_RING_SIZE(tp);
3097
3098                 /* Refresh hw_idx to see if there is new work */
3099                 if (sw_idx == hw_idx) {
3100                         hw_idx = tp->hw_status->idx[0].rx_producer;
3101                         rmb();
3102                 }
3103         }
3104
3105         /* ACK the status ring. */
3106         tp->rx_rcb_ptr = sw_idx;
3107         tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW, sw_idx);
3108
3109         /* Refill RX ring(s). */
3110         if (work_mask & RXD_OPAQUE_RING_STD) {
3111                 sw_idx = tp->rx_std_ptr % TG3_RX_RING_SIZE;
3112                 tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
3113                              sw_idx);
3114         }
3115         if (work_mask & RXD_OPAQUE_RING_JUMBO) {
3116                 sw_idx = tp->rx_jumbo_ptr % TG3_RX_JUMBO_RING_SIZE;
3117                 tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
3118                              sw_idx);
3119         }
3120         mmiowb();
3121
3122         return received;
3123 }
3124
3125 static int tg3_poll(struct net_device *netdev, int *budget)
3126 {
3127         struct tg3 *tp = netdev_priv(netdev);
3128         struct tg3_hw_status *sblk = tp->hw_status;
3129         int done;
3130
3131         /* handle link change and other phy events */
3132         if (!(tp->tg3_flags &
3133               (TG3_FLAG_USE_LINKCHG_REG |
3134                TG3_FLAG_POLL_SERDES))) {
3135                 if (sblk->status & SD_STATUS_LINK_CHG) {
3136                         sblk->status = SD_STATUS_UPDATED |
3137                                 (sblk->status & ~SD_STATUS_LINK_CHG);
3138                         spin_lock(&tp->lock);
3139                         tg3_setup_phy(tp, 0);
3140                         spin_unlock(&tp->lock);
3141                 }
3142         }
3143
3144         /* run TX completion thread */
3145         if (sblk->idx[0].tx_consumer != tp->tx_cons) {
3146                 spin_lock(&tp->tx_lock);
3147                 tg3_tx(tp);
3148                 spin_unlock(&tp->tx_lock);
3149         }
3150
3151         /* run RX thread, within the bounds set by NAPI.
3152          * All RX "locking" is done by ensuring outside
3153          * code synchronizes with dev->poll()
3154          */
3155         if (sblk->idx[0].rx_producer != tp->rx_rcb_ptr) {
3156                 int orig_budget = *budget;
3157                 int work_done;
3158
3159                 if (orig_budget > netdev->quota)
3160                         orig_budget = netdev->quota;
3161
3162                 work_done = tg3_rx(tp, orig_budget);
3163
3164                 *budget -= work_done;
3165                 netdev->quota -= work_done;
3166         }
3167
3168         if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
3169                 tp->last_tag = sblk->status_tag;
3170         rmb();
3171         sblk->status &= ~SD_STATUS_UPDATED;
3172
3173         /* if no more work, tell net stack and NIC we're done */
3174         done = !tg3_has_work(tp);
3175         if (done) {
3176                 spin_lock(&tp->lock);
3177                 netif_rx_complete(netdev);
3178                 tg3_restart_ints(tp);
3179                 spin_unlock(&tp->lock);
3180         }
3181
3182         return (done ? 0 : 1);
3183 }
3184
3185 static void tg3_irq_quiesce(struct tg3 *tp)
3186 {
3187         BUG_ON(tp->irq_sync);
3188
3189         tp->irq_sync = 1;
3190         smp_mb();
3191
3192         synchronize_irq(tp->pdev->irq);
3193 }
3194
3195 static inline int tg3_irq_sync(struct tg3 *tp)
3196 {
3197         return tp->irq_sync;
3198 }
3199
3200 /* Fully shutdown all tg3 driver activity elsewhere in the system.
3201  * If irq_sync is non-zero, then the IRQ handler must be synchronized
3202  * with as well.  Most of the time, this is not necessary except when
3203  * shutting down the device.
3204  */
3205 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
3206 {
3207         if (irq_sync)
3208                 tg3_irq_quiesce(tp);
3209         spin_lock_bh(&tp->lock);
3210         spin_lock(&tp->tx_lock);
3211 }
3212
3213 static inline void tg3_full_unlock(struct tg3 *tp)
3214 {
3215         spin_unlock(&tp->tx_lock);
3216         spin_unlock_bh(&tp->lock);
3217 }
3218
3219 /* MSI ISR - No need to check for interrupt sharing and no need to
3220  * flush status block and interrupt mailbox. PCI ordering rules
3221  * guarantee that MSI will arrive after the status block.
3222  */
3223 static irqreturn_t tg3_msi(int irq, void *dev_id, struct pt_regs *regs)
3224 {
3225         struct net_device *dev = dev_id;
3226         struct tg3 *tp = netdev_priv(dev);
3227         struct tg3_hw_status *sblk = tp->hw_status;
3228
3229         /*
3230          * Writing any value to intr-mbox-0 clears PCI INTA# and
3231          * chip-internal interrupt pending events.
3232          * Writing non-zero to intr-mbox-0 additional tells the
3233          * NIC to stop sending us irqs, engaging "in-intr-handler"
3234          * event coalescing.
3235          */
3236         tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
3237         tp->last_tag = sblk->status_tag;
3238         rmb();
3239         if (tg3_irq_sync(tp))
3240                 goto out;
3241         sblk->status &= ~SD_STATUS_UPDATED;
3242         if (likely(tg3_has_work(tp)))
3243                 netif_rx_schedule(dev);         /* schedule NAPI poll */
3244         else {
3245                 /* No work, re-enable interrupts.  */
3246                 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
3247                              tp->last_tag << 24);
3248         }
3249 out:
3250         return IRQ_RETVAL(1);
3251 }
3252
3253 static irqreturn_t tg3_interrupt(int irq, void *dev_id, struct pt_regs *regs)
3254 {
3255         struct net_device *dev = dev_id;
3256         struct tg3 *tp = netdev_priv(dev);
3257         struct tg3_hw_status *sblk = tp->hw_status;
3258         unsigned int handled = 1;
3259
3260         /* In INTx mode, it is possible for the interrupt to arrive at
3261          * the CPU before the status block posted prior to the interrupt.
3262          * Reading the PCI State register will confirm whether the
3263          * interrupt is ours and will flush the status block.
3264          */
3265         if ((sblk->status & SD_STATUS_UPDATED) ||
3266             !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
3267                 /*
3268                  * Writing any value to intr-mbox-0 clears PCI INTA# and
3269                  * chip-internal interrupt pending events.
3270                  * Writing non-zero to intr-mbox-0 additional tells the
3271                  * NIC to stop sending us irqs, engaging "in-intr-handler"
3272                  * event coalescing.
3273                  */
3274                 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
3275                              0x00000001);
3276                 if (tg3_irq_sync(tp))
3277                         goto out;
3278                 sblk->status &= ~SD_STATUS_UPDATED;
3279                 if (likely(tg3_has_work(tp)))
3280                         netif_rx_schedule(dev);         /* schedule NAPI poll */
3281                 else {
3282                         /* No work, shared interrupt perhaps?  re-enable
3283                          * interrupts, and flush that PCI write
3284                          */
3285                         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
3286                                 0x00000000);
3287                 }
3288         } else {        /* shared interrupt */
3289                 handled = 0;
3290         }
3291 out:
3292         return IRQ_RETVAL(handled);
3293 }
3294
3295 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id, struct pt_regs *regs)
3296 {
3297         struct net_device *dev = dev_id;
3298         struct tg3 *tp = netdev_priv(dev);
3299         struct tg3_hw_status *sblk = tp->hw_status;
3300         unsigned int handled = 1;
3301
3302         /* In INTx mode, it is possible for the interrupt to arrive at
3303          * the CPU before the status block posted prior to the interrupt.
3304          * Reading the PCI State register will confirm whether the
3305          * interrupt is ours and will flush the status block.
3306          */
3307         if ((sblk->status & SD_STATUS_UPDATED) ||
3308             !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
3309                 /*
3310                  * writing any value to intr-mbox-0 clears PCI INTA# and
3311                  * chip-internal interrupt pending events.
3312                  * writing non-zero to intr-mbox-0 additional tells the
3313                  * NIC to stop sending us irqs, engaging "in-intr-handler"
3314                  * event coalescing.
3315                  */
3316                 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
3317                              0x00000001);
3318                 tp->last_tag = sblk->status_tag;
3319                 rmb();
3320                 if (tg3_irq_sync(tp))
3321                         goto out;
3322                 sblk->status &= ~SD_STATUS_UPDATED;
3323                 if (likely(tg3_has_work(tp)))
3324                         netif_rx_schedule(dev);         /* schedule NAPI poll */
3325                 else {
3326                         /* no work, shared interrupt perhaps?  re-enable
3327                          * interrupts, and flush that PCI write
3328                          */
3329                         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
3330                                        tp->last_tag << 24);
3331                 }
3332         } else {        /* shared interrupt */
3333                 handled = 0;
3334         }
3335 out:
3336         return IRQ_RETVAL(handled);
3337 }
3338
3339 /* ISR for interrupt test */
3340 static irqreturn_t tg3_test_isr(int irq, void *dev_id,
3341                 struct pt_regs *regs)
3342 {
3343         struct net_device *dev = dev_id;
3344         struct tg3 *tp = netdev_priv(dev);
3345         struct tg3_hw_status *sblk = tp->hw_status;
3346
3347         if (sblk->status & SD_STATUS_UPDATED) {
3348                 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
3349                              0x00000001);
3350                 return IRQ_RETVAL(1);
3351         }
3352         return IRQ_RETVAL(0);
3353 }
3354
3355 static int tg3_init_hw(struct tg3 *);
3356 static int tg3_halt(struct tg3 *, int, int);
3357
3358 #ifdef CONFIG_NET_POLL_CONTROLLER
3359 static void tg3_poll_controller(struct net_device *dev)
3360 {
3361         struct tg3 *tp = netdev_priv(dev);
3362
3363         tg3_interrupt(tp->pdev->irq, dev, NULL);
3364 }
3365 #endif
3366
3367 static void tg3_reset_task(void *_data)
3368 {
3369         struct tg3 *tp = _data;
3370         unsigned int restart_timer;
3371
3372         tg3_netif_stop(tp);
3373
3374         tg3_full_lock(tp, 1);
3375
3376         restart_timer = tp->tg3_flags2 & TG3_FLG2_RESTART_TIMER;
3377         tp->tg3_flags2 &= ~TG3_FLG2_RESTART_TIMER;
3378
3379         tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
3380         tg3_init_hw(tp);
3381
3382         tg3_netif_start(tp);
3383
3384         tg3_full_unlock(tp);
3385
3386         if (restart_timer)
3387                 mod_timer(&tp->timer, jiffies + 1);
3388 }
3389
3390 static void tg3_tx_timeout(struct net_device *dev)
3391 {
3392         struct tg3 *tp = netdev_priv(dev);
3393
3394         printk(KERN_ERR PFX "%s: transmit timed out, resetting\n",
3395                dev->name);
3396
3397         schedule_work(&tp->reset_task);
3398 }
3399
3400 static void tg3_set_txd(struct tg3 *, int, dma_addr_t, int, u32, u32);
3401
3402 static int tigon3_4gb_hwbug_workaround(struct tg3 *tp, struct sk_buff *skb,
3403                                        u32 guilty_entry, int guilty_len,
3404                                        u32 last_plus_one, u32 *start, u32 mss)
3405 {
3406         struct sk_buff *new_skb = skb_copy(skb, GFP_ATOMIC);
3407         dma_addr_t new_addr;
3408         u32 entry = *start;
3409         int i;
3410
3411         if (!new_skb) {
3412                 dev_kfree_skb(skb);
3413                 return -1;
3414         }
3415
3416         /* New SKB is guaranteed to be linear. */
3417         entry = *start;
3418         new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
3419                                   PCI_DMA_TODEVICE);
3420         tg3_set_txd(tp, entry, new_addr, new_skb->len,
3421                     (skb->ip_summed == CHECKSUM_HW) ?
3422                     TXD_FLAG_TCPUDP_CSUM : 0, 1 | (mss << 1));
3423         *start = NEXT_TX(entry);
3424
3425         /* Now clean up the sw ring entries. */
3426         i = 0;
3427         while (entry != last_plus_one) {
3428                 int len;
3429
3430                 if (i == 0)
3431                         len = skb_headlen(skb);
3432                 else
3433                         len = skb_shinfo(skb)->frags[i-1].size;
3434                 pci_unmap_single(tp->pdev,
3435                                  pci_unmap_addr(&tp->tx_buffers[entry], mapping),
3436                                  len, PCI_DMA_TODEVICE);
3437                 if (i == 0) {
3438                         tp->tx_buffers[entry].skb = new_skb;
3439                         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, new_addr);
3440                 } else {
3441                         tp->tx_buffers[entry].skb = NULL;
3442                 }
3443                 entry = NEXT_TX(entry);
3444                 i++;
3445         }
3446
3447         dev_kfree_skb(skb);
3448
3449         return 0;
3450 }
3451
3452 static void tg3_set_txd(struct tg3 *tp, int entry,
3453                         dma_addr_t mapping, int len, u32 flags,
3454                         u32 mss_and_is_end)
3455 {
3456         struct tg3_tx_buffer_desc *txd = &tp->tx_ring[entry];
3457         int is_end = (mss_and_is_end & 0x1);
3458         u32 mss = (mss_and_is_end >> 1);
3459         u32 vlan_tag = 0;
3460
3461         if (is_end)
3462                 flags |= TXD_FLAG_END;
3463         if (flags & TXD_FLAG_VLAN) {
3464                 vlan_tag = flags >> 16;
3465                 flags &= 0xffff;
3466         }
3467         vlan_tag |= (mss << TXD_MSS_SHIFT);
3468
3469         txd->addr_hi = ((u64) mapping >> 32);
3470         txd->addr_lo = ((u64) mapping & 0xffffffff);
3471         txd->len_flags = (len << TXD_LEN_SHIFT) | flags;
3472         txd->vlan_tag = vlan_tag << TXD_VLAN_TAG_SHIFT;
3473 }
3474
3475 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
3476 {
3477         u32 base = (u32) mapping & 0xffffffff;
3478
3479         return ((base > 0xffffdcc0) &&
3480                 (base + len + 8 < base));
3481 }
3482
3483 static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
3484 {
3485         struct tg3 *tp = netdev_priv(dev);
3486         dma_addr_t mapping;
3487         unsigned int i;
3488         u32 len, entry, base_flags, mss;
3489         int would_hit_hwbug;
3490
3491         len = skb_headlen(skb);
3492
3493         /* No BH disabling for tx_lock here.  We are running in BH disabled
3494          * context and TX reclaim runs via tp->poll inside of a software
3495          * interrupt.  Furthermore, IRQ processing runs lockless so we have
3496          * no IRQ context deadlocks to worry about either.  Rejoice!
3497          */
3498         if (!spin_trylock(&tp->tx_lock))
3499                 return NETDEV_TX_LOCKED; 
3500
3501         /* This is a hard error, log it. */
3502         if (unlikely(TX_BUFFS_AVAIL(tp) <= (skb_shinfo(skb)->nr_frags + 1))) {
3503                 netif_stop_queue(dev);
3504                 spin_unlock(&tp->tx_lock);
3505                 printk(KERN_ERR PFX "%s: BUG! Tx Ring full when queue awake!\n",
3506                        dev->name);
3507                 return NETDEV_TX_BUSY;
3508         }
3509
3510         entry = tp->tx_prod;
3511         base_flags = 0;
3512         if (skb->ip_summed == CHECKSUM_HW)
3513                 base_flags |= TXD_FLAG_TCPUDP_CSUM;
3514 #if TG3_TSO_SUPPORT != 0
3515         mss = 0;
3516         if (skb->len > (tp->dev->mtu + ETH_HLEN) &&
3517             (mss = skb_shinfo(skb)->tso_size) != 0) {
3518                 int tcp_opt_len, ip_tcp_len;
3519
3520                 if (skb_header_cloned(skb) &&
3521                     pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
3522                         dev_kfree_skb(skb);
3523                         goto out_unlock;
3524                 }
3525
3526                 tcp_opt_len = ((skb->h.th->doff - 5) * 4);
3527                 ip_tcp_len = (skb->nh.iph->ihl * 4) + sizeof(struct tcphdr);
3528
3529                 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
3530                                TXD_FLAG_CPU_POST_DMA);
3531
3532                 skb->nh.iph->check = 0;
3533                 skb->nh.iph->tot_len = ntohs(mss + ip_tcp_len + tcp_opt_len);
3534                 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
3535                         skb->h.th->check = 0;
3536                         base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
3537                 }
3538                 else {
3539                         skb->h.th->check =
3540                                 ~csum_tcpudp_magic(skb->nh.iph->saddr,
3541                                                    skb->nh.iph->daddr,
3542                                                    0, IPPROTO_TCP, 0);
3543                 }
3544
3545                 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO) ||
3546                     (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)) {
3547                         if (tcp_opt_len || skb->nh.iph->ihl > 5) {
3548                                 int tsflags;
3549
3550                                 tsflags = ((skb->nh.iph->ihl - 5) +
3551                                            (tcp_opt_len >> 2));
3552                                 mss |= (tsflags << 11);
3553                         }
3554                 } else {
3555                         if (tcp_opt_len || skb->nh.iph->ihl > 5) {
3556                                 int tsflags;
3557
3558                                 tsflags = ((skb->nh.iph->ihl - 5) +
3559                                            (tcp_opt_len >> 2));
3560                                 base_flags |= tsflags << 12;
3561                         }
3562                 }
3563         }
3564 #else
3565         mss = 0;
3566 #endif
3567 #if TG3_VLAN_TAG_USED
3568         if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
3569                 base_flags |= (TXD_FLAG_VLAN |
3570                                (vlan_tx_tag_get(skb) << 16));
3571 #endif
3572
3573         /* Queue skb data, a.k.a. the main skb fragment. */
3574         mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
3575
3576         tp->tx_buffers[entry].skb = skb;
3577         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
3578
3579         would_hit_hwbug = 0;
3580
3581         if (tg3_4g_overflow_test(mapping, len))
3582                 would_hit_hwbug = entry + 1;
3583
3584         tg3_set_txd(tp, entry, mapping, len, base_flags,
3585                     (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
3586
3587         entry = NEXT_TX(entry);
3588
3589         /* Now loop through additional data fragments, and queue them. */
3590         if (skb_shinfo(skb)->nr_frags > 0) {
3591                 unsigned int i, last;
3592
3593                 last = skb_shinfo(skb)->nr_frags - 1;
3594                 for (i = 0; i <= last; i++) {
3595                         skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3596
3597                         len = frag->size;
3598                         mapping = pci_map_page(tp->pdev,
3599                                                frag->page,
3600                                                frag->page_offset,
3601                                                len, PCI_DMA_TODEVICE);
3602
3603                         tp->tx_buffers[entry].skb = NULL;
3604                         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
3605
3606                         if (tg3_4g_overflow_test(mapping, len)) {
3607                                 /* Only one should match. */
3608                                 if (would_hit_hwbug)
3609                                         BUG();
3610                                 would_hit_hwbug = entry + 1;
3611                         }
3612
3613                         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
3614                                 tg3_set_txd(tp, entry, mapping, len,
3615                                             base_flags, (i == last)|(mss << 1));
3616                         else
3617                                 tg3_set_txd(tp, entry, mapping, len,
3618                                             base_flags, (i == last));
3619
3620                         entry = NEXT_TX(entry);
3621                 }
3622         }
3623
3624         if (would_hit_hwbug) {
3625                 u32 last_plus_one = entry;
3626                 u32 start;
3627                 unsigned int len = 0;
3628
3629                 would_hit_hwbug -= 1;
3630                 entry = entry - 1 - skb_shinfo(skb)->nr_frags;
3631                 entry &= (TG3_TX_RING_SIZE - 1);
3632                 start = entry;
3633                 i = 0;
3634                 while (entry != last_plus_one) {
3635                         if (i == 0)
3636                                 len = skb_headlen(skb);
3637                         else
3638                                 len = skb_shinfo(skb)->frags[i-1].size;
3639
3640                         if (entry == would_hit_hwbug)
3641                                 break;
3642
3643                         i++;
3644                         entry = NEXT_TX(entry);
3645
3646                 }
3647
3648                 /* If the workaround fails due to memory/mapping
3649                  * failure, silently drop this packet.
3650                  */
3651                 if (tigon3_4gb_hwbug_workaround(tp, skb,
3652                                                 entry, len,
3653                                                 last_plus_one,
3654                                                 &start, mss))
3655                         goto out_unlock;
3656
3657                 entry = start;
3658         }
3659
3660         /* Packets are ready, update Tx producer idx local and on card. */
3661         tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry);
3662
3663         tp->tx_prod = entry;
3664         if (TX_BUFFS_AVAIL(tp) <= (MAX_SKB_FRAGS + 1))
3665                 netif_stop_queue(dev);
3666
3667 out_unlock:
3668         mmiowb();
3669         spin_unlock(&tp->tx_lock);
3670
3671         dev->trans_start = jiffies;
3672
3673         return NETDEV_TX_OK;
3674 }
3675
3676 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
3677                                int new_mtu)
3678 {
3679         dev->mtu = new_mtu;
3680
3681         if (new_mtu > ETH_DATA_LEN) {
3682                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) {
3683                         tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
3684                         ethtool_op_set_tso(dev, 0);
3685                 }
3686                 else
3687                         tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE;
3688         } else {
3689                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780)
3690                         tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
3691                 tp->tg3_flags &= ~TG3_FLAG_JUMBO_RING_ENABLE;
3692         }
3693 }
3694
3695 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
3696 {
3697         struct tg3 *tp = netdev_priv(dev);
3698
3699         if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
3700                 return -EINVAL;
3701
3702         if (!netif_running(dev)) {
3703                 /* We'll just catch it later when the
3704                  * device is up'd.
3705                  */
3706                 tg3_set_mtu(dev, tp, new_mtu);
3707                 return 0;
3708         }
3709
3710         tg3_netif_stop(tp);
3711
3712         tg3_full_lock(tp, 1);
3713
3714         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
3715
3716         tg3_set_mtu(dev, tp, new_mtu);
3717
3718         tg3_init_hw(tp);
3719
3720         tg3_netif_start(tp);
3721
3722         tg3_full_unlock(tp);
3723
3724         return 0;
3725 }
3726
3727 /* Free up pending packets in all rx/tx rings.
3728  *
3729  * The chip has been shut down and the driver detached from
3730  * the networking, so no interrupts or new tx packets will
3731  * end up in the driver.  tp->{tx,}lock is not held and we are not
3732  * in an interrupt context and thus may sleep.
3733  */
3734 static void tg3_free_rings(struct tg3 *tp)
3735 {
3736         struct ring_info *rxp;
3737         int i;
3738
3739         for (i = 0; i < TG3_RX_RING_SIZE; i++) {
3740                 rxp = &tp->rx_std_buffers[i];
3741
3742                 if (rxp->skb == NULL)
3743                         continue;
3744                 pci_unmap_single(tp->pdev,
3745                                  pci_unmap_addr(rxp, mapping),
3746                                  tp->rx_pkt_buf_sz - tp->rx_offset,
3747                                  PCI_DMA_FROMDEVICE);
3748                 dev_kfree_skb_any(rxp->skb);
3749                 rxp->skb = NULL;
3750         }
3751
3752         for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
3753                 rxp = &tp->rx_jumbo_buffers[i];
3754
3755                 if (rxp->skb == NULL)
3756                         continue;
3757                 pci_unmap_single(tp->pdev,
3758                                  pci_unmap_addr(rxp, mapping),
3759                                  RX_JUMBO_PKT_BUF_SZ - tp->rx_offset,
3760                                  PCI_DMA_FROMDEVICE);
3761                 dev_kfree_skb_any(rxp->skb);
3762                 rxp->skb = NULL;
3763         }
3764
3765         for (i = 0; i < TG3_TX_RING_SIZE; ) {
3766                 struct tx_ring_info *txp;
3767                 struct sk_buff *skb;
3768                 int j;
3769
3770                 txp = &tp->tx_buffers[i];
3771                 skb = txp->skb;
3772
3773                 if (skb == NULL) {
3774                         i++;
3775                         continue;
3776                 }
3777
3778                 pci_unmap_single(tp->pdev,
3779                                  pci_unmap_addr(txp, mapping),
3780                                  skb_headlen(skb),
3781                                  PCI_DMA_TODEVICE);
3782                 txp->skb = NULL;
3783
3784                 i++;
3785
3786                 for (j = 0; j < skb_shinfo(skb)->nr_frags; j++) {
3787                         txp = &tp->tx_buffers[i & (TG3_TX_RING_SIZE - 1)];
3788                         pci_unmap_page(tp->pdev,
3789                                        pci_unmap_addr(txp, mapping),
3790                                        skb_shinfo(skb)->frags[j].size,
3791                                        PCI_DMA_TODEVICE);
3792                         i++;
3793                 }
3794
3795                 dev_kfree_skb_any(skb);
3796         }
3797 }
3798
3799 /* Initialize tx/rx rings for packet processing.
3800  *
3801  * The chip has been shut down and the driver detached from
3802  * the networking, so no interrupts or new tx packets will
3803  * end up in the driver.  tp->{tx,}lock are held and thus
3804  * we may not sleep.
3805  */
3806 static void tg3_init_rings(struct tg3 *tp)
3807 {
3808         u32 i;
3809
3810         /* Free up all the SKBs. */
3811         tg3_free_rings(tp);
3812
3813         /* Zero out all descriptors. */
3814         memset(tp->rx_std, 0, TG3_RX_RING_BYTES);
3815         memset(tp->rx_jumbo, 0, TG3_RX_JUMBO_RING_BYTES);
3816         memset(tp->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
3817         memset(tp->tx_ring, 0, TG3_TX_RING_BYTES);
3818
3819         tp->rx_pkt_buf_sz = RX_PKT_BUF_SZ;
3820         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) &&
3821             (tp->dev->mtu > ETH_DATA_LEN))
3822                 tp->rx_pkt_buf_sz = RX_JUMBO_PKT_BUF_SZ;
3823
3824         /* Initialize invariants of the rings, we only set this
3825          * stuff once.  This works because the card does not
3826          * write into the rx buffer posting rings.
3827          */
3828         for (i = 0; i < TG3_RX_RING_SIZE; i++) {
3829                 struct tg3_rx_buffer_desc *rxd;
3830
3831                 rxd = &tp->rx_std[i];
3832                 rxd->idx_len = (tp->rx_pkt_buf_sz - tp->rx_offset - 64)
3833                         << RXD_LEN_SHIFT;
3834                 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
3835                 rxd->opaque = (RXD_OPAQUE_RING_STD |
3836                                (i << RXD_OPAQUE_INDEX_SHIFT));
3837         }
3838
3839         if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
3840                 for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
3841                         struct tg3_rx_buffer_desc *rxd;
3842
3843                         rxd = &tp->rx_jumbo[i];
3844                         rxd->idx_len = (RX_JUMBO_PKT_BUF_SZ - tp->rx_offset - 64)
3845                                 << RXD_LEN_SHIFT;
3846                         rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
3847                                 RXD_FLAG_JUMBO;
3848                         rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
3849                                (i << RXD_OPAQUE_INDEX_SHIFT));
3850                 }
3851         }
3852
3853         /* Now allocate fresh SKBs for each rx ring. */
3854         for (i = 0; i < tp->rx_pending; i++) {
3855                 if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_STD,
3856                                      -1, i) < 0)
3857                         break;
3858         }
3859
3860         if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
3861                 for (i = 0; i < tp->rx_jumbo_pending; i++) {
3862                         if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_JUMBO,
3863                                              -1, i) < 0)
3864                                 break;
3865                 }
3866         }
3867 }
3868
3869 /*
3870  * Must not be invoked with interrupt sources disabled and
3871  * the hardware shutdown down.
3872  */
3873 static void tg3_free_consistent(struct tg3 *tp)
3874 {
3875         if (tp->rx_std_buffers) {
3876                 kfree(tp->rx_std_buffers);
3877                 tp->rx_std_buffers = NULL;
3878         }
3879         if (tp->rx_std) {
3880                 pci_free_consistent(tp->pdev, TG3_RX_RING_BYTES,
3881                                     tp->rx_std, tp->rx_std_mapping);
3882                 tp->rx_std = NULL;
3883         }
3884         if (tp->rx_jumbo) {
3885                 pci_free_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
3886                                     tp->rx_jumbo, tp->rx_jumbo_mapping);
3887                 tp->rx_jumbo = NULL;
3888         }
3889         if (tp->rx_rcb) {
3890                 pci_free_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
3891                                     tp->rx_rcb, tp->rx_rcb_mapping);
3892                 tp->rx_rcb = NULL;
3893         }
3894         if (tp->tx_ring) {
3895                 pci_free_consistent(tp->pdev, TG3_TX_RING_BYTES,
3896                         tp->tx_ring, tp->tx_desc_mapping);
3897                 tp->tx_ring = NULL;
3898         }
3899         if (tp->hw_status) {
3900                 pci_free_consistent(tp->pdev, TG3_HW_STATUS_SIZE,
3901                                     tp->hw_status, tp->status_mapping);
3902                 tp->hw_status = NULL;
3903         }
3904         if (tp->hw_stats) {
3905                 pci_free_consistent(tp->pdev, sizeof(struct tg3_hw_stats),
3906                                     tp->hw_stats, tp->stats_mapping);
3907                 tp->hw_stats = NULL;
3908         }
3909 }
3910
3911 /*
3912  * Must not be invoked with interrupt sources disabled and
3913  * the hardware shutdown down.  Can sleep.
3914  */
3915 static int tg3_alloc_consistent(struct tg3 *tp)
3916 {
3917         tp->rx_std_buffers = kmalloc((sizeof(struct ring_info) *
3918                                       (TG3_RX_RING_SIZE +
3919                                        TG3_RX_JUMBO_RING_SIZE)) +
3920                                      (sizeof(struct tx_ring_info) *
3921                                       TG3_TX_RING_SIZE),
3922                                      GFP_KERNEL);
3923         if (!tp->rx_std_buffers)
3924                 return -ENOMEM;
3925
3926         memset(tp->rx_std_buffers, 0,
3927                (sizeof(struct ring_info) *
3928                 (TG3_RX_RING_SIZE +
3929                  TG3_RX_JUMBO_RING_SIZE)) +
3930                (sizeof(struct tx_ring_info) *
3931                 TG3_TX_RING_SIZE));
3932
3933         tp->rx_jumbo_buffers = &tp->rx_std_buffers[TG3_RX_RING_SIZE];
3934         tp->tx_buffers = (struct tx_ring_info *)
3935                 &tp->rx_jumbo_buffers[TG3_RX_JUMBO_RING_SIZE];
3936
3937         tp->rx_std = pci_alloc_consistent(tp->pdev, TG3_RX_RING_BYTES,
3938                                           &tp->rx_std_mapping);
3939         if (!tp->rx_std)
3940                 goto err_out;
3941
3942         tp->rx_jumbo = pci_alloc_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
3943                                             &tp->rx_jumbo_mapping);
3944
3945         if (!tp->rx_jumbo)
3946                 goto err_out;
3947
3948         tp->rx_rcb = pci_alloc_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
3949                                           &tp->rx_rcb_mapping);
3950         if (!tp->rx_rcb)
3951                 goto err_out;
3952
3953         tp->tx_ring = pci_alloc_consistent(tp->pdev, TG3_TX_RING_BYTES,
3954                                            &tp->tx_desc_mapping);
3955         if (!tp->tx_ring)
3956                 goto err_out;
3957
3958         tp->hw_status = pci_alloc_consistent(tp->pdev,
3959                                              TG3_HW_STATUS_SIZE,
3960                                              &tp->status_mapping);
3961         if (!tp->hw_status)
3962                 goto err_out;
3963
3964         tp->hw_stats = pci_alloc_consistent(tp->pdev,
3965                                             sizeof(struct tg3_hw_stats),
3966                                             &tp->stats_mapping);
3967         if (!tp->hw_stats)
3968                 goto err_out;
3969
3970         memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
3971         memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
3972
3973         return 0;
3974
3975 err_out:
3976         tg3_free_consistent(tp);
3977         return -ENOMEM;
3978 }
3979
3980 #define MAX_WAIT_CNT 1000
3981
3982 /* To stop a block, clear the enable bit and poll till it
3983  * clears.  tp->lock is held.
3984  */
3985 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, int silent)
3986 {
3987         unsigned int i;
3988         u32 val;
3989
3990         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
3991                 switch (ofs) {
3992                 case RCVLSC_MODE:
3993                 case DMAC_MODE:
3994                 case MBFREE_MODE:
3995                 case BUFMGR_MODE:
3996                 case MEMARB_MODE:
3997                         /* We can't enable/disable these bits of the
3998                          * 5705/5750, just say success.
3999                          */
4000                         return 0;
4001
4002                 default:
4003                         break;
4004                 };
4005         }
4006
4007         val = tr32(ofs);
4008         val &= ~enable_bit;
4009         tw32_f(ofs, val);
4010
4011         for (i = 0; i < MAX_WAIT_CNT; i++) {
4012                 udelay(100);
4013                 val = tr32(ofs);
4014                 if ((val & enable_bit) == 0)
4015                         break;
4016         }
4017
4018         if (i == MAX_WAIT_CNT && !silent) {
4019                 printk(KERN_ERR PFX "tg3_stop_block timed out, "
4020                        "ofs=%lx enable_bit=%x\n",
4021                        ofs, enable_bit);
4022                 return -ENODEV;
4023         }
4024
4025         return 0;
4026 }
4027
4028 /* tp->lock is held. */
4029 static int tg3_abort_hw(struct tg3 *tp, int silent)
4030 {
4031         int i, err;
4032
4033         tg3_disable_ints(tp);
4034
4035         tp->rx_mode &= ~RX_MODE_ENABLE;
4036         tw32_f(MAC_RX_MODE, tp->rx_mode);
4037         udelay(10);
4038
4039         err  = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
4040         err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
4041         err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
4042         err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
4043         err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
4044         err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
4045
4046         err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
4047         err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
4048         err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
4049         err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
4050         err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
4051         err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
4052         err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
4053
4054         tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
4055         tw32_f(MAC_MODE, tp->mac_mode);
4056         udelay(40);
4057
4058         tp->tx_mode &= ~TX_MODE_ENABLE;
4059         tw32_f(MAC_TX_MODE, tp->tx_mode);
4060
4061         for (i = 0; i < MAX_WAIT_CNT; i++) {
4062                 udelay(100);
4063                 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
4064                         break;
4065         }
4066         if (i >= MAX_WAIT_CNT) {
4067                 printk(KERN_ERR PFX "tg3_abort_hw timed out for %s, "
4068                        "TX_MODE_ENABLE will not clear MAC_TX_MODE=%08x\n",
4069                        tp->dev->name, tr32(MAC_TX_MODE));
4070                 err |= -ENODEV;
4071         }
4072
4073         err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
4074         err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
4075         err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
4076
4077         tw32(FTQ_RESET, 0xffffffff);
4078         tw32(FTQ_RESET, 0x00000000);
4079
4080         err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
4081         err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
4082
4083         if (tp->hw_status)
4084                 memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
4085         if (tp->hw_stats)
4086                 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
4087
4088         return err;
4089 }
4090
4091 /* tp->lock is held. */
4092 static int tg3_nvram_lock(struct tg3 *tp)
4093 {
4094         if (tp->tg3_flags & TG3_FLAG_NVRAM) {
4095                 int i;
4096
4097                 tw32(NVRAM_SWARB, SWARB_REQ_SET1);
4098                 for (i = 0; i < 8000; i++) {
4099                         if (tr32(NVRAM_SWARB) & SWARB_GNT1)
4100                                 break;
4101                         udelay(20);
4102                 }
4103                 if (i == 8000)
4104                         return -ENODEV;
4105         }
4106         return 0;
4107 }
4108
4109 /* tp->lock is held. */
4110 static void tg3_nvram_unlock(struct tg3 *tp)
4111 {
4112         if (tp->tg3_flags & TG3_FLAG_NVRAM)
4113                 tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
4114 }
4115
4116 /* tp->lock is held. */
4117 static void tg3_enable_nvram_access(struct tg3 *tp)
4118 {
4119         if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
4120             !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM)) {
4121                 u32 nvaccess = tr32(NVRAM_ACCESS);
4122
4123                 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
4124         }
4125 }
4126
4127 /* tp->lock is held. */
4128 static void tg3_disable_nvram_access(struct tg3 *tp)
4129 {
4130         if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
4131             !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM)) {
4132                 u32 nvaccess = tr32(NVRAM_ACCESS);
4133
4134                 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
4135         }
4136 }
4137
4138 /* tp->lock is held. */
4139 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
4140 {
4141         if (!(tp->tg3_flags2 & TG3_FLG2_SUN_570X))
4142                 tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
4143                               NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
4144
4145         if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
4146                 switch (kind) {
4147                 case RESET_KIND_INIT:
4148                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4149                                       DRV_STATE_START);
4150                         break;
4151
4152                 case RESET_KIND_SHUTDOWN:
4153                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4154                                       DRV_STATE_UNLOAD);
4155                         break;
4156
4157                 case RESET_KIND_SUSPEND:
4158                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4159                                       DRV_STATE_SUSPEND);
4160                         break;
4161
4162                 default:
4163                         break;
4164                 };
4165         }
4166 }
4167
4168 /* tp->lock is held. */
4169 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
4170 {
4171         if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
4172                 switch (kind) {
4173                 case RESET_KIND_INIT:
4174                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4175                                       DRV_STATE_START_DONE);
4176                         break;
4177
4178                 case RESET_KIND_SHUTDOWN:
4179                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4180                                       DRV_STATE_UNLOAD_DONE);
4181                         break;
4182
4183                 default:
4184                         break;
4185                 };
4186         }
4187 }
4188
4189 /* tp->lock is held. */
4190 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
4191 {
4192         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
4193                 switch (kind) {
4194                 case RESET_KIND_INIT:
4195                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4196                                       DRV_STATE_START);
4197                         break;
4198
4199                 case RESET_KIND_SHUTDOWN:
4200                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4201                                       DRV_STATE_UNLOAD);
4202                         break;
4203
4204                 case RESET_KIND_SUSPEND:
4205                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4206                                       DRV_STATE_SUSPEND);
4207                         break;
4208
4209                 default:
4210                         break;
4211                 };
4212         }
4213 }
4214
4215 static void tg3_stop_fw(struct tg3 *);
4216
4217 /* tp->lock is held. */
4218 static int tg3_chip_reset(struct tg3 *tp)
4219 {
4220         u32 val;
4221         void (*write_op)(struct tg3 *, u32, u32);
4222         int i;
4223
4224         if (!(tp->tg3_flags2 & TG3_FLG2_SUN_570X))
4225                 tg3_nvram_lock(tp);
4226
4227         /*
4228          * We must avoid the readl() that normally takes place.
4229          * It locks machines, causes machine checks, and other
4230          * fun things.  So, temporarily disable the 5701
4231          * hardware workaround, while we do the reset.
4232          */
4233         write_op = tp->write32;
4234         if (write_op == tg3_write_flush_reg32)
4235                 tp->write32 = tg3_write32;
4236
4237         /* do the reset */
4238         val = GRC_MISC_CFG_CORECLK_RESET;
4239
4240         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
4241                 if (tr32(0x7e2c) == 0x60) {
4242                         tw32(0x7e2c, 0x20);
4243                 }
4244                 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
4245                         tw32(GRC_MISC_CFG, (1 << 29));
4246                         val |= (1 << 29);
4247                 }
4248         }
4249
4250         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
4251                 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
4252         tw32(GRC_MISC_CFG, val);
4253
4254         /* restore 5701 hardware bug workaround write method */
4255         tp->write32 = write_op;
4256
4257         /* Unfortunately, we have to delay before the PCI read back.
4258          * Some 575X chips even will not respond to a PCI cfg access
4259          * when the reset command is given to the chip.
4260          *
4261          * How do these hardware designers expect things to work
4262          * properly if the PCI write is posted for a long period
4263          * of time?  It is always necessary to have some method by
4264          * which a register read back can occur to push the write
4265          * out which does the reset.
4266          *
4267          * For most tg3 variants the trick below was working.
4268          * Ho hum...
4269          */
4270         udelay(120);
4271
4272         /* Flush PCI posted writes.  The normal MMIO registers
4273          * are inaccessible at this time so this is the only
4274          * way to make this reliably (actually, this is no longer
4275          * the case, see above).  I tried to use indirect
4276          * register read/write but this upset some 5701 variants.
4277          */
4278         pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
4279
4280         udelay(120);
4281
4282         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
4283                 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) {
4284                         int i;
4285                         u32 cfg_val;
4286
4287                         /* Wait for link training to complete.  */
4288                         for (i = 0; i < 5000; i++)
4289                                 udelay(100);
4290
4291                         pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
4292                         pci_write_config_dword(tp->pdev, 0xc4,
4293                                                cfg_val | (1 << 15));
4294                 }
4295                 /* Set PCIE max payload size and clear error status.  */
4296                 pci_write_config_dword(tp->pdev, 0xd8, 0xf5000);
4297         }
4298
4299         /* Re-enable indirect register accesses. */
4300         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
4301                                tp->misc_host_ctrl);
4302
4303         /* Set MAX PCI retry to zero. */
4304         val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
4305         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
4306             (tp->tg3_flags & TG3_FLAG_PCIX_MODE))
4307                 val |= PCISTATE_RETRY_SAME_DMA;
4308         pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
4309
4310         pci_restore_state(tp->pdev);
4311
4312         /* Make sure PCI-X relaxed ordering bit is clear. */
4313         pci_read_config_dword(tp->pdev, TG3PCI_X_CAPS, &val);
4314         val &= ~PCIX_CAPS_RELAXED_ORDERING;
4315         pci_write_config_dword(tp->pdev, TG3PCI_X_CAPS, val);
4316
4317         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) {
4318                 u32 val;
4319
4320                 /* Chip reset on 5780 will reset MSI enable bit,
4321                  * so need to restore it.
4322                  */
4323                 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
4324                         u16 ctrl;
4325
4326                         pci_read_config_word(tp->pdev,
4327                                              tp->msi_cap + PCI_MSI_FLAGS,
4328                                              &ctrl);
4329                         pci_write_config_word(tp->pdev,
4330                                               tp->msi_cap + PCI_MSI_FLAGS,
4331                                               ctrl | PCI_MSI_FLAGS_ENABLE);
4332                         val = tr32(MSGINT_MODE);
4333                         tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
4334                 }
4335
4336                 val = tr32(MEMARB_MODE);
4337                 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
4338
4339         } else
4340                 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
4341
4342         if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A3) {
4343                 tg3_stop_fw(tp);
4344                 tw32(0x5000, 0x400);
4345         }
4346
4347         tw32(GRC_MODE, tp->grc_mode);
4348
4349         if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0) {
4350                 u32 val = tr32(0xc4);
4351
4352                 tw32(0xc4, val | (1 << 15));
4353         }
4354
4355         if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
4356             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
4357                 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
4358                 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0)
4359                         tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
4360                 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
4361         }
4362
4363         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
4364                 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
4365                 tw32_f(MAC_MODE, tp->mac_mode);
4366         } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
4367                 tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
4368                 tw32_f(MAC_MODE, tp->mac_mode);
4369         } else
4370                 tw32_f(MAC_MODE, 0);
4371         udelay(40);
4372
4373         if (!(tp->tg3_flags2 & TG3_FLG2_SUN_570X)) {
4374                 /* Wait for firmware initialization to complete. */
4375                 for (i = 0; i < 100000; i++) {
4376                         tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
4377                         if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
4378                                 break;
4379                         udelay(10);
4380                 }
4381                 if (i >= 100000) {
4382                         printk(KERN_ERR PFX "tg3_reset_hw timed out for %s, "
4383                                "firmware will not restart magic=%08x\n",
4384                                tp->dev->name, val);
4385                         return -ENODEV;
4386                 }
4387         }
4388
4389         if ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
4390             tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
4391                 u32 val = tr32(0x7c00);
4392
4393                 tw32(0x7c00, val | (1 << 25));
4394         }
4395
4396         /* Reprobe ASF enable state.  */
4397         tp->tg3_flags &= ~TG3_FLAG_ENABLE_ASF;
4398         tp->tg3_flags2 &= ~TG3_FLG2_ASF_NEW_HANDSHAKE;
4399         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
4400         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
4401                 u32 nic_cfg;
4402
4403                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
4404                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
4405                         tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
4406                         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
4407                                 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
4408                 }
4409         }
4410
4411         return 0;
4412 }
4413
4414 /* tp->lock is held. */
4415 static void tg3_stop_fw(struct tg3 *tp)
4416 {
4417         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
4418                 u32 val;
4419                 int i;
4420
4421                 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
4422                 val = tr32(GRC_RX_CPU_EVENT);
4423                 val |= (1 << 14);
4424                 tw32(GRC_RX_CPU_EVENT, val);
4425
4426                 /* Wait for RX cpu to ACK the event.  */
4427                 for (i = 0; i < 100; i++) {
4428                         if (!(tr32(GRC_RX_CPU_EVENT) & (1 << 14)))
4429                                 break;
4430                         udelay(1);
4431                 }
4432         }
4433 }
4434
4435 /* tp->lock is held. */
4436 static int tg3_halt(struct tg3 *tp, int kind, int silent)
4437 {
4438         int err;
4439
4440         tg3_stop_fw(tp);
4441
4442         tg3_write_sig_pre_reset(tp, kind);
4443
4444         tg3_abort_hw(tp, silent);
4445         err = tg3_chip_reset(tp);
4446
4447         tg3_write_sig_legacy(tp, kind);
4448         tg3_write_sig_post_reset(tp, kind);
4449
4450         if (err)
4451                 return err;
4452
4453         return 0;
4454 }
4455
4456 #define TG3_FW_RELEASE_MAJOR    0x0
4457 #define TG3_FW_RELASE_MINOR     0x0
4458 #define TG3_FW_RELEASE_FIX      0x0
4459 #define TG3_FW_START_ADDR       0x08000000
4460 #define TG3_FW_TEXT_ADDR        0x08000000
4461 #define TG3_FW_TEXT_LEN         0x9c0
4462 #define TG3_FW_RODATA_ADDR      0x080009c0
4463 #define TG3_FW_RODATA_LEN       0x60
4464 #define TG3_FW_DATA_ADDR        0x08000a40
4465 #define TG3_FW_DATA_LEN         0x20
4466 #define TG3_FW_SBSS_ADDR        0x08000a60
4467 #define TG3_FW_SBSS_LEN         0xc
4468 #define TG3_FW_BSS_ADDR         0x08000a70
4469 #define TG3_FW_BSS_LEN          0x10
4470
4471 static u32 tg3FwText[(TG3_FW_TEXT_LEN / sizeof(u32)) + 1] = {
4472         0x00000000, 0x10000003, 0x00000000, 0x0000000d, 0x0000000d, 0x3c1d0800,
4473         0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100000, 0x0e000018, 0x00000000,
4474         0x0000000d, 0x3c1d0800, 0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100034,
4475         0x0e00021c, 0x00000000, 0x0000000d, 0x00000000, 0x00000000, 0x00000000,
4476         0x27bdffe0, 0x3c1cc000, 0xafbf0018, 0xaf80680c, 0x0e00004c, 0x241b2105,
4477         0x97850000, 0x97870002, 0x9782002c, 0x9783002e, 0x3c040800, 0x248409c0,
4478         0xafa00014, 0x00021400, 0x00621825, 0x00052c00, 0xafa30010, 0x8f860010,
4479         0x00e52825, 0x0e000060, 0x24070102, 0x3c02ac00, 0x34420100, 0x3c03ac01,
4480         0x34630100, 0xaf820490, 0x3c02ffff, 0xaf820494, 0xaf830498, 0xaf82049c,
4481         0x24020001, 0xaf825ce0, 0x0e00003f, 0xaf825d00, 0x0e000140, 0x00000000,
4482         0x8fbf0018, 0x03e00008, 0x27bd0020, 0x2402ffff, 0xaf825404, 0x8f835400,
4483         0x34630400, 0xaf835400, 0xaf825404, 0x3c020800, 0x24420034, 0xaf82541c,
4484         0x03e00008, 0xaf805400, 0x00000000, 0x00000000, 0x3c020800, 0x34423000,
4485         0x3c030800, 0x34633000, 0x3c040800, 0x348437ff, 0x3c010800, 0xac220a64,
4486         0x24020040, 0x3c010800, 0xac220a68, 0x3c010800, 0xac200a60, 0xac600000,
4487         0x24630004, 0x0083102b, 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000,
4488         0x00804821, 0x8faa0010, 0x3c020800, 0x8c420a60, 0x3c040800, 0x8c840a68,
4489         0x8fab0014, 0x24430001, 0x0044102b, 0x3c010800, 0xac230a60, 0x14400003,
4490         0x00004021, 0x3c010800, 0xac200a60, 0x3c020800, 0x8c420a60, 0x3c030800,
4491         0x8c630a64, 0x91240000, 0x00021140, 0x00431021, 0x00481021, 0x25080001,
4492         0xa0440000, 0x29020008, 0x1440fff4, 0x25290001, 0x3c020800, 0x8c420a60,
4493         0x3c030800, 0x8c630a64, 0x8f84680c, 0x00021140, 0x00431021, 0xac440008,
4494         0xac45000c, 0xac460010, 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c,
4495         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4496         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4497         0, 0, 0, 0, 0, 0,
4498         0x02000008, 0x00000000, 0x0a0001e3, 0x3c0a0001, 0x0a0001e3, 0x3c0a0002,
4499         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4500         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4501         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4502         0x0a0001e3, 0x3c0a0007, 0x0a0001e3, 0x3c0a0008, 0x0a0001e3, 0x3c0a0009,
4503         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a000b,
4504         0x0a0001e3, 0x3c0a000c, 0x0a0001e3, 0x3c0a000d, 0x0a0001e3, 0x00000000,
4505         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a000e, 0x0a0001e3, 0x00000000,
4506         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4507         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4508         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a0013, 0x0a0001e3, 0x3c0a0014,
4509         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4510         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4511         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4512         0x27bdffe0, 0x00001821, 0x00001021, 0xafbf0018, 0xafb10014, 0xafb00010,
4513         0x3c010800, 0x00220821, 0xac200a70, 0x3c010800, 0x00220821, 0xac200a74,
4514         0x3c010800, 0x00220821, 0xac200a78, 0x24630001, 0x1860fff5, 0x2442000c,
4515         0x24110001, 0x8f906810, 0x32020004, 0x14400005, 0x24040001, 0x3c020800,
4516         0x8c420a78, 0x18400003, 0x00002021, 0x0e000182, 0x00000000, 0x32020001,
4517         0x10400003, 0x00000000, 0x0e000169, 0x00000000, 0x0a000153, 0xaf915028,
4518         0x8fbf0018, 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0020, 0x3c050800,
4519         0x8ca50a70, 0x3c060800, 0x8cc60a80, 0x3c070800, 0x8ce70a78, 0x27bdffe0,
4520         0x3c040800, 0x248409d0, 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014,
4521         0x0e00017b, 0x00002021, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x24020001,
4522         0x8f836810, 0x00821004, 0x00021027, 0x00621824, 0x03e00008, 0xaf836810,
4523         0x27bdffd8, 0xafbf0024, 0x1080002e, 0xafb00020, 0x8f825cec, 0xafa20018,
4524         0x8f825cec, 0x3c100800, 0x26100a78, 0xafa2001c, 0x34028000, 0xaf825cec,
4525         0x8e020000, 0x18400016, 0x00000000, 0x3c020800, 0x94420a74, 0x8fa3001c,
4526         0x000221c0, 0xac830004, 0x8fa2001c, 0x3c010800, 0x0e000201, 0xac220a74,
4527         0x10400005, 0x00000000, 0x8e020000, 0x24420001, 0x0a0001df, 0xae020000,
4528         0x3c020800, 0x8c420a70, 0x00021c02, 0x000321c0, 0x0a0001c5, 0xafa2001c,
4529         0x0e000201, 0x00000000, 0x1040001f, 0x00000000, 0x8e020000, 0x8fa3001c,
4530         0x24420001, 0x3c010800, 0xac230a70, 0x3c010800, 0xac230a74, 0x0a0001df,
4531         0xae020000, 0x3c100800, 0x26100a78, 0x8e020000, 0x18400028, 0x00000000,
4532         0x0e000201, 0x00000000, 0x14400024, 0x00000000, 0x8e020000, 0x3c030800,
4533         0x8c630a70, 0x2442ffff, 0xafa3001c, 0x18400006, 0xae020000, 0x00031402,
4534         0x000221c0, 0x8c820004, 0x3c010800, 0xac220a70, 0x97a2001e, 0x2442ff00,
4535         0x2c420300, 0x1440000b, 0x24024000, 0x3c040800, 0x248409dc, 0xafa00010,
4536         0xafa00014, 0x8fa6001c, 0x24050008, 0x0e000060, 0x00003821, 0x0a0001df,
4537         0x00000000, 0xaf825cf8, 0x3c020800, 0x8c420a40, 0x8fa3001c, 0x24420001,
4538         0xaf835cf8, 0x3c010800, 0xac220a40, 0x8fbf0024, 0x8fb00020, 0x03e00008,
4539         0x27bd0028, 0x27bdffe0, 0x3c040800, 0x248409e8, 0x00002821, 0x00003021,
4540         0x00003821, 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014, 0x8fbf0018,
4541         0x03e00008, 0x27bd0020, 0x8f82680c, 0x8f85680c, 0x00021827, 0x0003182b,
4542         0x00031823, 0x00431024, 0x00441021, 0x00a2282b, 0x10a00006, 0x00000000,
4543         0x00401821, 0x8f82680c, 0x0043102b, 0x1440fffd, 0x00000000, 0x03e00008,
4544         0x00000000, 0x3c040800, 0x8c840000, 0x3c030800, 0x8c630a40, 0x0064102b,
4545         0x54400002, 0x00831023, 0x00641023, 0x2c420008, 0x03e00008, 0x38420001,
4546         0x27bdffe0, 0x00802821, 0x3c040800, 0x24840a00, 0x00003021, 0x00003821,
4547         0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014, 0x0a000216, 0x00000000,
4548         0x8fbf0018, 0x03e00008, 0x27bd0020, 0x00000000, 0x27bdffe0, 0x3c1cc000,
4549         0xafbf0018, 0x0e00004c, 0xaf80680c, 0x3c040800, 0x24840a10, 0x03802821,
4550         0x00003021, 0x00003821, 0xafa00010, 0x0e000060, 0xafa00014, 0x2402ffff,
4551         0xaf825404, 0x3c0200aa, 0x0e000234, 0xaf825434, 0x8fbf0018, 0x03e00008,
4552         0x27bd0020, 0x00000000, 0x00000000, 0x00000000, 0x27bdffe8, 0xafb00010,
4553         0x24100001, 0xafbf0014, 0x3c01c003, 0xac200000, 0x8f826810, 0x30422000,
4554         0x10400003, 0x00000000, 0x0e000246, 0x00000000, 0x0a00023a, 0xaf905428,
4555         0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x27bdfff8, 0x8f845d0c,
4556         0x3c0200ff, 0x3c030800, 0x8c630a50, 0x3442fff8, 0x00821024, 0x1043001e,
4557         0x3c0500ff, 0x34a5fff8, 0x3c06c003, 0x3c074000, 0x00851824, 0x8c620010,
4558         0x3c010800, 0xac230a50, 0x30420008, 0x10400005, 0x00871025, 0x8cc20000,
4559         0x24420001, 0xacc20000, 0x00871025, 0xaf825d0c, 0x8fa20000, 0x24420001,
4560         0xafa20000, 0x8fa20000, 0x8fa20000, 0x24420001, 0xafa20000, 0x8fa20000,
4561         0x8f845d0c, 0x3c030800, 0x8c630a50, 0x00851024, 0x1443ffe8, 0x00851824,
4562         0x27bd0008, 0x03e00008, 0x00000000, 0x00000000, 0x00000000
4563 };
4564
4565 static u32 tg3FwRodata[(TG3_FW_RODATA_LEN / sizeof(u32)) + 1] = {
4566         0x35373031, 0x726c7341, 0x00000000, 0x00000000, 0x53774576, 0x656e7430,
4567         0x00000000, 0x726c7045, 0x76656e74, 0x31000000, 0x556e6b6e, 0x45766e74,
4568         0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x66617461, 0x6c457272,
4569         0x00000000, 0x00000000, 0x4d61696e, 0x43707542, 0x00000000, 0x00000000,
4570         0x00000000
4571 };
4572
4573 #if 0 /* All zeros, don't eat up space with it. */
4574 u32 tg3FwData[(TG3_FW_DATA_LEN / sizeof(u32)) + 1] = {
4575         0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
4576         0x00000000, 0x00000000, 0x00000000, 0x00000000
4577 };
4578 #endif
4579
4580 #define RX_CPU_SCRATCH_BASE     0x30000
4581 #define RX_CPU_SCRATCH_SIZE     0x04000
4582 #define TX_CPU_SCRATCH_BASE     0x34000
4583 #define TX_CPU_SCRATCH_SIZE     0x04000
4584
4585 /* tp->lock is held. */
4586 static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
4587 {
4588         int i;
4589
4590         if (offset == TX_CPU_BASE &&
4591             (tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
4592                 BUG();
4593
4594         if (offset == RX_CPU_BASE) {
4595                 for (i = 0; i < 10000; i++) {
4596                         tw32(offset + CPU_STATE, 0xffffffff);
4597                         tw32(offset + CPU_MODE,  CPU_MODE_HALT);
4598                         if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
4599                                 break;
4600                 }
4601
4602                 tw32(offset + CPU_STATE, 0xffffffff);
4603                 tw32_f(offset + CPU_MODE,  CPU_MODE_HALT);
4604                 udelay(10);
4605         } else {
4606                 for (i = 0; i < 10000; i++) {
4607                         tw32(offset + CPU_STATE, 0xffffffff);
4608                         tw32(offset + CPU_MODE,  CPU_MODE_HALT);
4609                         if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
4610                                 break;
4611                 }
4612         }
4613
4614         if (i >= 10000) {
4615                 printk(KERN_ERR PFX "tg3_reset_cpu timed out for %s, "
4616                        "and %s CPU\n",
4617                        tp->dev->name,
4618                        (offset == RX_CPU_BASE ? "RX" : "TX"));
4619                 return -ENODEV;
4620         }
4621         return 0;
4622 }
4623
4624 struct fw_info {
4625         unsigned int text_base;
4626         unsigned int text_len;
4627         u32 *text_data;
4628         unsigned int rodata_base;
4629         unsigned int rodata_len;
4630         u32 *rodata_data;
4631         unsigned int data_base;
4632         unsigned int data_len;
4633         u32 *data_data;
4634 };
4635
4636 /* tp->lock is held. */
4637 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base, u32 cpu_scratch_base,
4638                                  int cpu_scratch_size, struct fw_info *info)
4639 {
4640         int err, i;
4641         void (*write_op)(struct tg3 *, u32, u32);
4642
4643         if (cpu_base == TX_CPU_BASE &&
4644             (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
4645                 printk(KERN_ERR PFX "tg3_load_firmware_cpu: Trying to load "
4646                        "TX cpu firmware on %s which is 5705.\n",
4647                        tp->dev->name);
4648                 return -EINVAL;
4649         }
4650
4651         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
4652                 write_op = tg3_write_mem;
4653         else
4654                 write_op = tg3_write_indirect_reg32;
4655
4656         /* It is possible that bootcode is still loading at this point.
4657          * Get the nvram lock first before halting the cpu.
4658          */
4659         tg3_nvram_lock(tp);
4660         err = tg3_halt_cpu(tp, cpu_base);
4661         tg3_nvram_unlock(tp);
4662         if (err)
4663                 goto out;
4664
4665         for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
4666                 write_op(tp, cpu_scratch_base + i, 0);
4667         tw32(cpu_base + CPU_STATE, 0xffffffff);
4668         tw32(cpu_base + CPU_MODE, tr32(cpu_base+CPU_MODE)|CPU_MODE_HALT);
4669         for (i = 0; i < (info->text_len / sizeof(u32)); i++)
4670                 write_op(tp, (cpu_scratch_base +
4671                               (info->text_base & 0xffff) +
4672                               (i * sizeof(u32))),
4673                          (info->text_data ?
4674                           info->text_data[i] : 0));
4675         for (i = 0; i < (info->rodata_len / sizeof(u32)); i++)
4676                 write_op(tp, (cpu_scratch_base +
4677                               (info->rodata_base & 0xffff) +
4678                               (i * sizeof(u32))),
4679                          (info->rodata_data ?
4680                           info->rodata_data[i] : 0));
4681         for (i = 0; i < (info->data_len / sizeof(u32)); i++)
4682                 write_op(tp, (cpu_scratch_base +
4683                               (info->data_base & 0xffff) +
4684                               (i * sizeof(u32))),
4685                          (info->data_data ?
4686                           info->data_data[i] : 0));
4687
4688         err = 0;
4689
4690 out:
4691         return err;
4692 }
4693
4694 /* tp->lock is held. */
4695 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
4696 {
4697         struct fw_info info;
4698         int err, i;
4699
4700         info.text_base = TG3_FW_TEXT_ADDR;
4701         info.text_len = TG3_FW_TEXT_LEN;
4702         info.text_data = &tg3FwText[0];
4703         info.rodata_base = TG3_FW_RODATA_ADDR;
4704         info.rodata_len = TG3_FW_RODATA_LEN;
4705         info.rodata_data = &tg3FwRodata[0];
4706         info.data_base = TG3_FW_DATA_ADDR;
4707         info.data_len = TG3_FW_DATA_LEN;
4708         info.data_data = NULL;
4709
4710         err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
4711                                     RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
4712                                     &info);
4713         if (err)
4714                 return err;
4715
4716         err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
4717                                     TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
4718                                     &info);
4719         if (err)
4720                 return err;
4721
4722         /* Now startup only the RX cpu. */
4723         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
4724         tw32_f(RX_CPU_BASE + CPU_PC,    TG3_FW_TEXT_ADDR);
4725
4726         for (i = 0; i < 5; i++) {
4727                 if (tr32(RX_CPU_BASE + CPU_PC) == TG3_FW_TEXT_ADDR)
4728                         break;
4729                 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
4730                 tw32(RX_CPU_BASE + CPU_MODE,  CPU_MODE_HALT);
4731                 tw32_f(RX_CPU_BASE + CPU_PC,    TG3_FW_TEXT_ADDR);
4732                 udelay(1000);
4733         }
4734         if (i >= 5) {
4735                 printk(KERN_ERR PFX "tg3_load_firmware fails for %s "
4736                        "to set RX CPU PC, is %08x should be %08x\n",
4737                        tp->dev->name, tr32(RX_CPU_BASE + CPU_PC),
4738                        TG3_FW_TEXT_ADDR);
4739                 return -ENODEV;
4740         }
4741         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
4742         tw32_f(RX_CPU_BASE + CPU_MODE,  0x00000000);
4743
4744         return 0;
4745 }
4746
4747 #if TG3_TSO_SUPPORT != 0
4748
4749 #define TG3_TSO_FW_RELEASE_MAJOR        0x1
4750 #define TG3_TSO_FW_RELASE_MINOR         0x6
4751 #define TG3_TSO_FW_RELEASE_FIX          0x0
4752 #define TG3_TSO_FW_START_ADDR           0x08000000
4753 #define TG3_TSO_FW_TEXT_ADDR            0x08000000
4754 #define TG3_TSO_FW_TEXT_LEN             0x1aa0
4755 #define TG3_TSO_FW_RODATA_ADDR          0x08001aa0
4756 #define TG3_TSO_FW_RODATA_LEN           0x60
4757 #define TG3_TSO_FW_DATA_ADDR            0x08001b20
4758 #define TG3_TSO_FW_DATA_LEN             0x30
4759 #define TG3_TSO_FW_SBSS_ADDR            0x08001b50
4760 #define TG3_TSO_FW_SBSS_LEN             0x2c
4761 #define TG3_TSO_FW_BSS_ADDR             0x08001b80
4762 #define TG3_TSO_FW_BSS_LEN              0x894
4763
4764 static u32 tg3TsoFwText[(TG3_TSO_FW_TEXT_LEN / 4) + 1] = {
4765         0x0e000003, 0x00000000, 0x08001b24, 0x00000000, 0x10000003, 0x00000000,
4766         0x0000000d, 0x0000000d, 0x3c1d0800, 0x37bd4000, 0x03a0f021, 0x3c100800,
4767         0x26100000, 0x0e000010, 0x00000000, 0x0000000d, 0x27bdffe0, 0x3c04fefe,
4768         0xafbf0018, 0x0e0005d8, 0x34840002, 0x0e000668, 0x00000000, 0x3c030800,
4769         0x90631b68, 0x24020002, 0x3c040800, 0x24841aac, 0x14620003, 0x24050001,
4770         0x3c040800, 0x24841aa0, 0x24060006, 0x00003821, 0xafa00010, 0x0e00067c,
4771         0xafa00014, 0x8f625c50, 0x34420001, 0xaf625c50, 0x8f625c90, 0x34420001,
4772         0xaf625c90, 0x2402ffff, 0x0e000034, 0xaf625404, 0x8fbf0018, 0x03e00008,
4773         0x27bd0020, 0x00000000, 0x00000000, 0x00000000, 0x27bdffe0, 0xafbf001c,
4774         0xafb20018, 0xafb10014, 0x0e00005b, 0xafb00010, 0x24120002, 0x24110001,
4775         0x8f706820, 0x32020100, 0x10400003, 0x00000000, 0x0e0000bb, 0x00000000,
4776         0x8f706820, 0x32022000, 0x10400004, 0x32020001, 0x0e0001f0, 0x24040001,
4777         0x32020001, 0x10400003, 0x00000000, 0x0e0000a3, 0x00000000, 0x3c020800,
4778         0x90421b98, 0x14520003, 0x00000000, 0x0e0004c0, 0x00000000, 0x0a00003c,
4779         0xaf715028, 0x8fbf001c, 0x8fb20018, 0x8fb10014, 0x8fb00010, 0x03e00008,
4780         0x27bd0020, 0x27bdffe0, 0x3c040800, 0x24841ac0, 0x00002821, 0x00003021,
4781         0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014, 0x3c040800,
4782         0x248423d8, 0xa4800000, 0x3c010800, 0xa0201b98, 0x3c010800, 0xac201b9c,
4783         0x3c010800, 0xac201ba0, 0x3c010800, 0xac201ba4, 0x3c010800, 0xac201bac,
4784         0x3c010800, 0xac201bb8, 0x3c010800, 0xac201bbc, 0x8f624434, 0x3c010800,
4785         0xac221b88, 0x8f624438, 0x3c010800, 0xac221b8c, 0x8f624410, 0xac80f7a8,
4786         0x3c010800, 0xac201b84, 0x3c010800, 0xac2023e0, 0x3c010800, 0xac2023c8,
4787         0x3c010800, 0xac2023cc, 0x3c010800, 0xac202400, 0x3c010800, 0xac221b90,
4788         0x8f620068, 0x24030007, 0x00021702, 0x10430005, 0x00000000, 0x8f620068,
4789         0x00021702, 0x14400004, 0x24020001, 0x3c010800, 0x0a000097, 0xac20240c,
4790         0xac820034, 0x3c040800, 0x24841acc, 0x3c050800, 0x8ca5240c, 0x00003021,
4791         0x00003821, 0xafa00010, 0x0e00067c, 0xafa00014, 0x8fbf0018, 0x03e00008,
4792         0x27bd0020, 0x27bdffe0, 0x3c040800, 0x24841ad8, 0x00002821, 0x00003021,
4793         0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014, 0x0e00005b,
4794         0x00000000, 0x0e0000b4, 0x00002021, 0x8fbf0018, 0x03e00008, 0x27bd0020,
4795         0x24020001, 0x8f636820, 0x00821004, 0x00021027, 0x00621824, 0x03e00008,
4796         0xaf636820, 0x27bdffd0, 0xafbf002c, 0xafb60028, 0xafb50024, 0xafb40020,
4797         0xafb3001c, 0xafb20018, 0xafb10014, 0xafb00010, 0x8f675c5c, 0x3c030800,
4798         0x24631bbc, 0x8c620000, 0x14470005, 0x3c0200ff, 0x3c020800, 0x90421b98,
4799         0x14400119, 0x3c0200ff, 0x3442fff8, 0x00e28824, 0xac670000, 0x00111902,
4800         0x306300ff, 0x30e20003, 0x000211c0, 0x00622825, 0x00a04021, 0x00071602,
4801         0x3c030800, 0x90631b98, 0x3044000f, 0x14600036, 0x00804821, 0x24020001,
4802         0x3c010800, 0xa0221b98, 0x00051100, 0x00821025, 0x3c010800, 0xac201b9c,
4803         0x3c010800, 0xac201ba0, 0x3c010800, 0xac201ba4, 0x3c010800, 0xac201bac,
4804         0x3c010800, 0xac201bb8, 0x3c010800, 0xac201bb0, 0x3c010800, 0xac201bb4,
4805         0x3c010800, 0xa42223d8, 0x9622000c, 0x30437fff, 0x3c010800, 0xa4222410,
4806         0x30428000, 0x3c010800, 0xa4231bc6, 0x10400005, 0x24020001, 0x3c010800,
4807         0xac2223f4, 0x0a000102, 0x2406003e, 0x24060036, 0x3c010800, 0xac2023f4,
4808         0x9622000a, 0x3c030800, 0x94631bc6, 0x3c010800, 0xac2023f0, 0x3c010800,
4809         0xac2023f8, 0x00021302, 0x00021080, 0x00c21021, 0x00621821, 0x3c010800,
4810         0xa42223d0, 0x3c010800, 0x0a000115, 0xa4231b96, 0x9622000c, 0x3c010800,
4811         0xa42223ec, 0x3c040800, 0x24841b9c, 0x8c820000, 0x00021100, 0x3c010800,
4812         0x00220821, 0xac311bc8, 0x8c820000, 0x00021100, 0x3c010800, 0x00220821,
4813         0xac271bcc, 0x8c820000, 0x25030001, 0x306601ff, 0x00021100, 0x3c010800,
4814         0x00220821, 0xac261bd0, 0x8c820000, 0x00021100, 0x3c010800, 0x00220821,
4815         0xac291bd4, 0x96230008, 0x3c020800, 0x8c421bac, 0x00432821, 0x3c010800,
4816         0xac251bac, 0x9622000a, 0x30420004, 0x14400018, 0x00061100, 0x8f630c14,
4817         0x3063000f, 0x2c620002, 0x1440000b, 0x3c02c000, 0x8f630c14, 0x3c020800,
4818         0x8c421b40, 0x3063000f, 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002,
4819         0x1040fff7, 0x3c02c000, 0x00e21825, 0xaf635c5c, 0x8f625c50, 0x30420002,
4820         0x10400014, 0x00000000, 0x0a000147, 0x00000000, 0x3c030800, 0x8c631b80,
4821         0x3c040800, 0x94841b94, 0x01221025, 0x3c010800, 0xa42223da, 0x24020001,
4822         0x3c010800, 0xac221bb8, 0x24630001, 0x0085202a, 0x3c010800, 0x10800003,
4823         0xac231b80, 0x3c010800, 0xa4251b94, 0x3c060800, 0x24c61b9c, 0x8cc20000,
4824         0x24420001, 0xacc20000, 0x28420080, 0x14400005, 0x00000000, 0x0e000656,
4825         0x24040002, 0x0a0001e6, 0x00000000, 0x3c020800, 0x8c421bb8, 0x10400078,
4826         0x24020001, 0x3c050800, 0x90a51b98, 0x14a20072, 0x00000000, 0x3c150800,
4827         0x96b51b96, 0x3c040800, 0x8c841bac, 0x32a3ffff, 0x0083102a, 0x1440006c,
4828         0x00000000, 0x14830003, 0x00000000, 0x3c010800, 0xac2523f0, 0x1060005c,
4829         0x00009021, 0x24d60004, 0x0060a021, 0x24d30014, 0x8ec20000, 0x00028100,
4830         0x3c110800, 0x02308821, 0x0e000625, 0x8e311bc8, 0x00402821, 0x10a00054,
4831         0x00000000, 0x9628000a, 0x31020040, 0x10400005, 0x2407180c, 0x8e22000c,
4832         0x2407188c, 0x00021400, 0xaca20018, 0x3c030800, 0x00701821, 0x8c631bd0,
4833         0x3c020800, 0x00501021, 0x8c421bd4, 0x00031d00, 0x00021400, 0x00621825,
4834         0xaca30014, 0x8ec30004, 0x96220008, 0x00432023, 0x3242ffff, 0x3083ffff,
4835         0x00431021, 0x0282102a, 0x14400002, 0x02b23023, 0x00803021, 0x8e620000,
4836         0x30c4ffff, 0x00441021, 0xae620000, 0x8e220000, 0xaca20000, 0x8e220004,
4837         0x8e63fff4, 0x00431021, 0xaca20004, 0xa4a6000e, 0x8e62fff4, 0x00441021,
4838         0xae62fff4, 0x96230008, 0x0043102a, 0x14400005, 0x02469021, 0x8e62fff0,
4839         0xae60fff4, 0x24420001, 0xae62fff0, 0xaca00008, 0x3242ffff, 0x14540008,
4840         0x24020305, 0x31020080, 0x54400001, 0x34e70010, 0x24020905, 0xa4a2000c,
4841         0x0a0001cb, 0x34e70020, 0xa4a2000c, 0x3c020800, 0x8c4223f0, 0x10400003,
4842         0x3c024b65, 0x0a0001d3, 0x34427654, 0x3c02b49a, 0x344289ab, 0xaca2001c,
4843         0x30e2ffff, 0xaca20010, 0x0e0005a2, 0x00a02021, 0x3242ffff, 0x0054102b,
4844         0x1440ffa9, 0x00000000, 0x24020002, 0x3c010800, 0x0a0001e6, 0xa0221b98,
4845         0x8ec2083c, 0x24420001, 0x0a0001e6, 0xaec2083c, 0x0e0004c0, 0x00000000,
4846         0x8fbf002c, 0x8fb60028, 0x8fb50024, 0x8fb40020, 0x8fb3001c, 0x8fb20018,
4847         0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0030, 0x27bdffd0, 0xafbf0028,
4848         0xafb30024, 0xafb20020, 0xafb1001c, 0xafb00018, 0x8f725c9c, 0x3c0200ff,
4849         0x3442fff8, 0x3c070800, 0x24e71bb4, 0x02428824, 0x9623000e, 0x8ce20000,
4850         0x00431021, 0xace20000, 0x8e220010, 0x30420020, 0x14400011, 0x00809821,
4851         0x0e00063b, 0x02202021, 0x3c02c000, 0x02421825, 0xaf635c9c, 0x8f625c90,
4852         0x30420002, 0x1040011e, 0x00000000, 0xaf635c9c, 0x8f625c90, 0x30420002,
4853         0x10400119, 0x00000000, 0x0a00020d, 0x00000000, 0x8e240008, 0x8e230014,
4854         0x00041402, 0x000231c0, 0x00031502, 0x304201ff, 0x2442ffff, 0x3042007f,
4855         0x00031942, 0x30637800, 0x00021100, 0x24424000, 0x00624821, 0x9522000a,
4856         0x3084ffff, 0x30420008, 0x104000b0, 0x000429c0, 0x3c020800, 0x8c422400,
4857         0x14400024, 0x24c50008, 0x94c20014, 0x3c010800, 0xa42223d0, 0x8cc40010,
4858         0x00041402, 0x3c010800, 0xa42223d2, 0x3c010800, 0xa42423d4, 0x94c2000e,
4859         0x3083ffff, 0x00431023, 0x3c010800, 0xac222408, 0x94c2001a, 0x3c010800,
4860         0xac262400, 0x3c010800, 0xac322404, 0x3c010800, 0xac2223fc, 0x3c02c000,
4861         0x02421825, 0xaf635c9c, 0x8f625c90, 0x30420002, 0x104000e5, 0x00000000,
4862         0xaf635c9c, 0x8f625c90, 0x30420002, 0x104000e0, 0x00000000, 0x0a000246,
4863         0x00000000, 0x94c2000e, 0x3c030800, 0x946323d4, 0x00434023, 0x3103ffff,
4864         0x2c620008, 0x1040001c, 0x00000000, 0x94c20014, 0x24420028, 0x00a22821,
4865         0x00031042, 0x1840000b, 0x00002021, 0x24e60848, 0x00403821, 0x94a30000,
4866         0x8cc20000, 0x24840001, 0x00431021, 0xacc20000, 0x0087102a, 0x1440fff9,
4867         0x24a50002, 0x31020001, 0x1040001f, 0x3c024000, 0x3c040800, 0x248423fc,
4868         0xa0a00001, 0x94a30000, 0x8c820000, 0x00431021, 0x0a000285, 0xac820000,
4869         0x8f626800, 0x3c030010, 0x00431024, 0x10400009, 0x00000000, 0x94c2001a,
4870         0x3c030800, 0x8c6323fc, 0x00431021, 0x3c010800, 0xac2223fc, 0x0a000286,
4871         0x3c024000, 0x94c2001a, 0x94c4001c, 0x3c030800, 0x8c6323fc, 0x00441023,
4872         0x00621821, 0x3c010800, 0xac2323fc, 0x3c024000, 0x02421825, 0xaf635c9c,
4873         0x8f625c90, 0x30420002, 0x1440fffc, 0x00000000, 0x9522000a, 0x30420010,
4874         0x1040009b, 0x00000000, 0x3c030800, 0x946323d4, 0x3c070800, 0x24e72400,
4875         0x8ce40000, 0x8f626800, 0x24630030, 0x00832821, 0x3c030010, 0x00431024,
4876         0x1440000a, 0x00000000, 0x94a20004, 0x3c040800, 0x8c842408, 0x3c030800,
4877         0x8c6323fc, 0x00441023, 0x00621821, 0x3c010800, 0xac2323fc, 0x3c040800,
4878         0x8c8423fc, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402, 0x00822021,
4879         0x00041027, 0xa4a20006, 0x3c030800, 0x8c632404, 0x3c0200ff, 0x3442fff8,
4880         0x00628824, 0x96220008, 0x24050001, 0x24034000, 0x000231c0, 0x00801021,
4881         0xa4c2001a, 0xa4c0001c, 0xace00000, 0x3c010800, 0xac251b60, 0xaf635cb8,
4882         0x8f625cb0, 0x30420002, 0x10400003, 0x00000000, 0x3c010800, 0xac201b60,
4883         0x8e220008, 0xaf625cb8, 0x8f625cb0, 0x30420002, 0x10400003, 0x00000000,
4884         0x3c010800, 0xac201b60, 0x3c020800, 0x8c421b60, 0x1040ffec, 0x00000000,
4885         0x3c040800, 0x0e00063b, 0x8c842404, 0x0a00032a, 0x00000000, 0x3c030800,
4886         0x90631b98, 0x24020002, 0x14620003, 0x3c034b65, 0x0a0002e1, 0x00008021,
4887         0x8e22001c, 0x34637654, 0x10430002, 0x24100002, 0x24100001, 0x00c02021,
4888         0x0e000350, 0x02003021, 0x24020003, 0x3c010800, 0xa0221b98, 0x24020002,
4889         0x1202000a, 0x24020001, 0x3c030800, 0x8c6323f0, 0x10620006, 0x00000000,
4890         0x3c020800, 0x944223d8, 0x00021400, 0x0a00031f, 0xae220014, 0x3c040800,
4891         0x248423da, 0x94820000, 0x00021400, 0xae220014, 0x3c020800, 0x8c421bbc,
4892         0x3c03c000, 0x3c010800, 0xa0201b98, 0x00431025, 0xaf625c5c, 0x8f625c50,
4893         0x30420002, 0x10400009, 0x00000000, 0x2484f7e2, 0x8c820000, 0x00431025,
4894         0xaf625c5c, 0x8f625c50, 0x30420002, 0x1440fffa, 0x00000000, 0x3c020800,
4895         0x24421b84, 0x8c430000, 0x24630001, 0xac430000, 0x8f630c14, 0x3063000f,
4896         0x2c620002, 0x1440000c, 0x3c024000, 0x8f630c14, 0x3c020800, 0x8c421b40,
4897         0x3063000f, 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002, 0x1040fff7,
4898         0x00000000, 0x3c024000, 0x02421825, 0xaf635c9c, 0x8f625c90, 0x30420002,
4899         0x1440fffc, 0x00000000, 0x12600003, 0x00000000, 0x0e0004c0, 0x00000000,
4900         0x8fbf0028, 0x8fb30024, 0x8fb20020, 0x8fb1001c, 0x8fb00018, 0x03e00008,
4901         0x27bd0030, 0x8f634450, 0x3c040800, 0x24841b88, 0x8c820000, 0x00031c02,
4902         0x0043102b, 0x14400007, 0x3c038000, 0x8c840004, 0x8f624450, 0x00021c02,
4903         0x0083102b, 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024,
4904         0x1440fffd, 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3c024000,
4905         0x00822025, 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00000000,
4906         0x03e00008, 0x00000000, 0x27bdffe0, 0x00805821, 0x14c00011, 0x256e0008,
4907         0x3c020800, 0x8c4223f4, 0x10400007, 0x24020016, 0x3c010800, 0xa42223d2,
4908         0x2402002a, 0x3c010800, 0x0a000364, 0xa42223d4, 0x8d670010, 0x00071402,
4909         0x3c010800, 0xa42223d2, 0x3c010800, 0xa42723d4, 0x3c040800, 0x948423d4,
4910         0x3c030800, 0x946323d2, 0x95cf0006, 0x3c020800, 0x944223d0, 0x00832023,
4911         0x01e2c023, 0x3065ffff, 0x24a20028, 0x01c24821, 0x3082ffff, 0x14c0001a,
4912         0x01226021, 0x9582000c, 0x3042003f, 0x3c010800, 0xa42223d6, 0x95820004,
4913         0x95830006, 0x3c010800, 0xac2023e4, 0x3c010800, 0xac2023e8, 0x00021400,
4914         0x00431025, 0x3c010800, 0xac221bc0, 0x95220004, 0x3c010800, 0xa4221bc4,
4915         0x95230002, 0x01e51023, 0x0043102a, 0x10400010, 0x24020001, 0x3c010800,
4916         0x0a000398, 0xac2223f8, 0x3c030800, 0x8c6323e8, 0x3c020800, 0x94421bc4,
4917         0x00431021, 0xa5220004, 0x3c020800, 0x94421bc0, 0xa5820004, 0x3c020800,
4918         0x8c421bc0, 0xa5820006, 0x3c020800, 0x8c4223f0, 0x3c0d0800, 0x8dad23e4,
4919         0x3c0a0800, 0x144000e5, 0x8d4a23e8, 0x3c020800, 0x94421bc4, 0x004a1821,
4920         0x3063ffff, 0x0062182b, 0x24020002, 0x10c2000d, 0x01435023, 0x3c020800,
4921         0x944223d6, 0x30420009, 0x10400008, 0x00000000, 0x9582000c, 0x3042fff6,
4922         0xa582000c, 0x3c020800, 0x944223d6, 0x30420009, 0x01a26823, 0x3c020800,
4923         0x8c4223f8, 0x1040004a, 0x01203821, 0x3c020800, 0x944223d2, 0x00004021,
4924         0xa520000a, 0x01e21023, 0xa5220002, 0x3082ffff, 0x00021042, 0x18400008,
4925         0x00003021, 0x00401821, 0x94e20000, 0x25080001, 0x00c23021, 0x0103102a,
4926         0x1440fffb, 0x24e70002, 0x00061c02, 0x30c2ffff, 0x00623021, 0x00061402,
4927         0x00c23021, 0x00c02821, 0x00061027, 0xa522000a, 0x00003021, 0x2527000c,
4928         0x00004021, 0x94e20000, 0x25080001, 0x00c23021, 0x2d020004, 0x1440fffb,
4929         0x24e70002, 0x95220002, 0x00004021, 0x91230009, 0x00442023, 0x01803821,
4930         0x3082ffff, 0xa4e00010, 0x00621821, 0x00021042, 0x18400010, 0x00c33021,
4931         0x00404821, 0x94e20000, 0x24e70002, 0x00c23021, 0x30e2007f, 0x14400006,
4932         0x25080001, 0x8d630000, 0x3c02007f, 0x3442ff80, 0x00625824, 0x25670008,
4933         0x0109102a, 0x1440fff3, 0x00000000, 0x30820001, 0x10400005, 0x00061c02,
4934         0xa0e00001, 0x94e20000, 0x00c23021, 0x00061c02, 0x30c2ffff, 0x00623021,
4935         0x00061402, 0x00c23021, 0x0a00047d, 0x30c6ffff, 0x24020002, 0x14c20081,
4936         0x00000000, 0x3c020800, 0x8c42240c, 0x14400007, 0x00000000, 0x3c020800,
4937         0x944223d2, 0x95230002, 0x01e21023, 0x10620077, 0x00000000, 0x3c020800,
4938         0x944223d2, 0x01e21023, 0xa5220002, 0x3c020800, 0x8c42240c, 0x1040001a,
4939         0x31e3ffff, 0x8dc70010, 0x3c020800, 0x94421b96, 0x00e04021, 0x00072c02,
4940         0x00aa2021, 0x00431023, 0x00823823, 0x00072402, 0x30e2ffff, 0x00823821,
4941         0x00071027, 0xa522000a, 0x3102ffff, 0x3c040800, 0x948423d4, 0x00453023,
4942         0x00e02821, 0x00641823, 0x006d1821, 0x00c33021, 0x00061c02, 0x30c2ffff,
4943         0x0a00047d, 0x00623021, 0x01203821, 0x00004021, 0x3082ffff, 0x00021042,
4944         0x18400008, 0x00003021, 0x00401821, 0x94e20000, 0x25080001, 0x00c23021,
4945         0x0103102a, 0x1440fffb, 0x24e70002, 0x00061c02, 0x30c2ffff, 0x00623021,
4946         0x00061402, 0x00c23021, 0x00c02821, 0x00061027, 0xa522000a, 0x00003021,
4947         0x2527000c, 0x00004021, 0x94e20000, 0x25080001, 0x00c23021, 0x2d020004,
4948         0x1440fffb, 0x24e70002, 0x95220002, 0x00004021, 0x91230009, 0x00442023,
4949         0x01803821, 0x3082ffff, 0xa4e00010, 0x3c040800, 0x948423d4, 0x00621821,
4950         0x00c33021, 0x00061c02, 0x30c2ffff, 0x00623021, 0x00061c02, 0x3c020800,
4951         0x944223d0, 0x00c34821, 0x00441023, 0x00021fc2, 0x00431021, 0x00021043,
4952         0x18400010, 0x00003021, 0x00402021, 0x94e20000, 0x24e70002, 0x00c23021,
4953         0x30e2007f, 0x14400006, 0x25080001, 0x8d630000, 0x3c02007f, 0x3442ff80,
4954         0x00625824, 0x25670008, 0x0104102a, 0x1440fff3, 0x00000000, 0x3c020800,
4955         0x944223ec, 0x00c23021, 0x3122ffff, 0x00c23021, 0x00061c02, 0x30c2ffff,
4956         0x00623021, 0x00061402, 0x00c23021, 0x00c04021, 0x00061027, 0xa5820010,
4957         0xadc00014, 0x0a00049d, 0xadc00000, 0x8dc70010, 0x00e04021, 0x11400007,
4958         0x00072c02, 0x00aa3021, 0x00061402, 0x30c3ffff, 0x00433021, 0x00061402,
4959         0x00c22821, 0x00051027, 0xa522000a, 0x3c030800, 0x946323d4, 0x3102ffff,
4960         0x01e21021, 0x00433023, 0x00cd3021, 0x00061c02, 0x30c2ffff, 0x00623021,
4961         0x00061402, 0x00c23021, 0x00c04021, 0x00061027, 0xa5820010, 0x3102ffff,
4962         0x00051c00, 0x00431025, 0xadc20010, 0x3c020800, 0x8c4223f4, 0x10400005,
4963         0x2de205eb, 0x14400002, 0x25e2fff2, 0x34028870, 0xa5c20034, 0x3c030800,
4964         0x246323e8, 0x8c620000, 0x24420001, 0xac620000, 0x3c040800, 0x8c8423e4,
4965         0x3c020800, 0x8c421bc0, 0x3303ffff, 0x00832021, 0x00431821, 0x0062102b,
4966         0x3c010800, 0xac2423e4, 0x10400003, 0x2482ffff, 0x3c010800, 0xac2223e4,
4967         0x3c010800, 0xac231bc0, 0x03e00008, 0x27bd0020, 0x27bdffb8, 0x3c050800,
4968         0x24a51b96, 0xafbf0044, 0xafbe0040, 0xafb7003c, 0xafb60038, 0xafb50034,
4969         0xafb40030, 0xafb3002c, 0xafb20028, 0xafb10024, 0xafb00020, 0x94a90000,
4970         0x3c020800, 0x944223d0, 0x3c030800, 0x8c631bb0, 0x3c040800, 0x8c841bac,
4971         0x01221023, 0x0064182a, 0xa7a9001e, 0x106000be, 0xa7a20016, 0x24be0022,
4972         0x97b6001e, 0x24b3001a, 0x24b70016, 0x8fc20000, 0x14400008, 0x00000000,
4973         0x8fc2fff8, 0x97a30016, 0x8fc4fff4, 0x00431021, 0x0082202a, 0x148000b0,
4974         0x00000000, 0x97d50818, 0x32a2ffff, 0x104000a3, 0x00009021, 0x0040a021,
4975         0x00008821, 0x0e000625, 0x00000000, 0x00403021, 0x14c00007, 0x00000000,
4976         0x3c020800, 0x8c4223dc, 0x24420001, 0x3c010800, 0x0a000596, 0xac2223dc,
4977         0x3c100800, 0x02118021, 0x8e101bc8, 0x9608000a, 0x31020040, 0x10400005,
4978         0x2407180c, 0x8e02000c, 0x2407188c, 0x00021400, 0xacc20018, 0x31020080,
4979         0x54400001, 0x34e70010, 0x3c020800, 0x00511021, 0x8c421bd0, 0x3c030800,
4980         0x00711821, 0x8c631bd4, 0x00021500, 0x00031c00, 0x00431025, 0xacc20014,
4981         0x96040008, 0x3242ffff, 0x00821021, 0x0282102a, 0x14400002, 0x02b22823,
4982         0x00802821, 0x8e020000, 0x02459021, 0xacc20000, 0x8e020004, 0x00c02021,
4983         0x26310010, 0xac820004, 0x30e2ffff, 0xac800008, 0xa485000e, 0xac820010,
4984         0x24020305, 0x0e0005a2, 0xa482000c, 0x3242ffff, 0x0054102b, 0x1440ffc5,
4985         0x3242ffff, 0x0a00058e, 0x00000000, 0x8e620000, 0x8e63fffc, 0x0043102a,
4986         0x10400067, 0x00000000, 0x8e62fff0, 0x00028900, 0x3c100800, 0x02118021,
4987         0x0e000625, 0x8e101bc8, 0x00403021, 0x14c00005, 0x00000000, 0x8e62082c,
4988         0x24420001, 0x0a000596, 0xae62082c, 0x9608000a, 0x31020040, 0x10400005,
4989         0x2407180c, 0x8e02000c, 0x2407188c, 0x00021400, 0xacc20018, 0x3c020800,
4990         0x00511021, 0x8c421bd0, 0x3c030800, 0x00711821, 0x8c631bd4, 0x00021500,
4991         0x00031c00, 0x00431025, 0xacc20014, 0x8e63fff4, 0x96020008, 0x00432023,
4992         0x3242ffff, 0x3083ffff, 0x00431021, 0x02c2102a, 0x10400003, 0x00802821,
4993         0x97a9001e, 0x01322823, 0x8e620000, 0x30a4ffff, 0x00441021, 0xae620000,
4994         0xa4c5000e, 0x8e020000, 0xacc20000, 0x8e020004, 0x8e63fff4, 0x00431021,
4995         0xacc20004, 0x8e63fff4, 0x96020008, 0x00641821, 0x0062102a, 0x14400006,
4996         0x02459021, 0x8e62fff0, 0xae60fff4, 0x24420001, 0x0a000571, 0xae62fff0,
4997         0xae63fff4, 0xacc00008, 0x3242ffff, 0x10560003, 0x31020004, 0x10400006,
4998         0x24020305, 0x31020080, 0x54400001, 0x34e70010, 0x34e70020, 0x24020905,
4999         0xa4c2000c, 0x8ee30000, 0x8ee20004, 0x14620007, 0x3c02b49a, 0x8ee20860,
5000         0x54400001, 0x34e70400, 0x3c024b65, 0x0a000588, 0x34427654, 0x344289ab,
5001         0xacc2001c, 0x30e2ffff, 0xacc20010, 0x0e0005a2, 0x00c02021, 0x3242ffff,
5002         0x0056102b, 0x1440ff9b, 0x00000000, 0x8e620000, 0x8e63fffc, 0x0043102a,
5003         0x1440ff48, 0x00000000, 0x8fbf0044, 0x8fbe0040, 0x8fb7003c, 0x8fb60038,
5004         0x8fb50034, 0x8fb40030, 0x8fb3002c, 0x8fb20028, 0x8fb10024, 0x8fb00020,
5005         0x03e00008, 0x27bd0048, 0x27bdffe8, 0xafbf0014, 0xafb00010, 0x8f624450,
5006         0x8f634410, 0x0a0005b1, 0x00808021, 0x8f626820, 0x30422000, 0x10400003,
5007         0x00000000, 0x0e0001f0, 0x00002021, 0x8f624450, 0x8f634410, 0x3042ffff,
5008         0x0043102b, 0x1440fff5, 0x00000000, 0x8f630c14, 0x3063000f, 0x2c620002,
5009         0x1440000b, 0x00000000, 0x8f630c14, 0x3c020800, 0x8c421b40, 0x3063000f,
5010         0x24420001, 0x3c010800, 0xac221b40, 0x2c620002, 0x1040fff7, 0x00000000,
5011         0xaf705c18, 0x8f625c10, 0x30420002, 0x10400009, 0x00000000, 0x8f626820,
5012         0x30422000, 0x1040fff8, 0x00000000, 0x0e0001f0, 0x00002021, 0x0a0005c4,
5013         0x00000000, 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x00000000,
5014         0x00000000, 0x00000000, 0x27bdffe8, 0x3c1bc000, 0xafbf0014, 0xafb00010,
5015         0xaf60680c, 0x8f626804, 0x34420082, 0xaf626804, 0x8f634000, 0x24020b50,
5016         0x3c010800, 0xac221b54, 0x24020b78, 0x3c010800, 0xac221b64, 0x34630002,
5017         0xaf634000, 0x0e000605, 0x00808021, 0x3c010800, 0xa0221b68, 0x304200ff,
5018         0x24030002, 0x14430005, 0x00000000, 0x3c020800, 0x8c421b54, 0x0a0005f8,
5019         0xac5000c0, 0x3c020800, 0x8c421b54, 0xac5000bc, 0x8f624434, 0x8f634438,
5020         0x8f644410, 0x3c010800, 0xac221b5c, 0x3c010800, 0xac231b6c, 0x3c010800,
5021         0xac241b58, 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x3c040800,
5022         0x8c870000, 0x3c03aa55, 0x3463aa55, 0x3c06c003, 0xac830000, 0x8cc20000,
5023         0x14430007, 0x24050002, 0x3c0355aa, 0x346355aa, 0xac830000, 0x8cc20000,
5024         0x50430001, 0x24050001, 0x3c020800, 0xac470000, 0x03e00008, 0x00a01021,
5025         0x27bdfff8, 0x18800009, 0x00002821, 0x8f63680c, 0x8f62680c, 0x1043fffe,
5026         0x00000000, 0x24a50001, 0x00a4102a, 0x1440fff9, 0x00000000, 0x03e00008,
5027         0x27bd0008, 0x8f634450, 0x3c020800, 0x8c421b5c, 0x00031c02, 0x0043102b,
5028         0x14400008, 0x3c038000, 0x3c040800, 0x8c841b6c, 0x8f624450, 0x00021c02,
5029         0x0083102b, 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024,
5030         0x1440fffd, 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3082ffff,
5031         0x2442e000, 0x2c422001, 0x14400003, 0x3c024000, 0x0a000648, 0x2402ffff,
5032         0x00822025, 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00001021,
5033         0x03e00008, 0x00000000, 0x8f624450, 0x3c030800, 0x8c631b58, 0x0a000651,
5034         0x3042ffff, 0x8f624450, 0x3042ffff, 0x0043102b, 0x1440fffc, 0x00000000,
5035         0x03e00008, 0x00000000, 0x27bdffe0, 0x00802821, 0x3c040800, 0x24841af0,
5036         0x00003021, 0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014,
5037         0x0a000660, 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x00000000,
5038         0x00000000, 0x00000000, 0x3c020800, 0x34423000, 0x3c030800, 0x34633000,
5039         0x3c040800, 0x348437ff, 0x3c010800, 0xac221b74, 0x24020040, 0x3c010800,
5040         0xac221b78, 0x3c010800, 0xac201b70, 0xac600000, 0x24630004, 0x0083102b,
5041         0x5040fffd, 0xac600000, 0x03e00008, 0x00000000, 0x00804821, 0x8faa0010,
5042         0x3c020800, 0x8c421b70, 0x3c040800, 0x8c841b78, 0x8fab0014, 0x24430001,
5043         0x0044102b, 0x3c010800, 0xac231b70, 0x14400003, 0x00004021, 0x3c010800,
5044         0xac201b70, 0x3c020800, 0x8c421b70, 0x3c030800, 0x8c631b74, 0x91240000,
5045         0x00021140, 0x00431021, 0x00481021, 0x25080001, 0xa0440000, 0x29020008,
5046         0x1440fff4, 0x25290001, 0x3c020800, 0x8c421b70, 0x3c030800, 0x8c631b74,
5047         0x8f64680c, 0x00021140, 0x00431021, 0xac440008, 0xac45000c, 0xac460010,
5048         0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c, 0x00000000, 0x00000000,
5049 };
5050
5051 static u32 tg3TsoFwRodata[] = {
5052         0x4d61696e, 0x43707542, 0x00000000, 0x4d61696e, 0x43707541, 0x00000000,
5053         0x00000000, 0x00000000, 0x73746b6f, 0x66666c64, 0x496e0000, 0x73746b6f,
5054         0x66662a2a, 0x00000000, 0x53774576, 0x656e7430, 0x00000000, 0x00000000,
5055         0x00000000, 0x00000000, 0x66617461, 0x6c457272, 0x00000000, 0x00000000,
5056         0x00000000,
5057 };
5058
5059 static u32 tg3TsoFwData[] = {
5060         0x00000000, 0x73746b6f, 0x66666c64, 0x5f76312e, 0x362e3000, 0x00000000,
5061         0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
5062         0x00000000,
5063 };
5064
5065 /* 5705 needs a special version of the TSO firmware.  */
5066 #define TG3_TSO5_FW_RELEASE_MAJOR       0x1
5067 #define TG3_TSO5_FW_RELASE_MINOR        0x2
5068 #define TG3_TSO5_FW_RELEASE_FIX         0x0
5069 #define TG3_TSO5_FW_START_ADDR          0x00010000
5070 #define TG3_TSO5_FW_TEXT_ADDR           0x00010000
5071 #define TG3_TSO5_FW_TEXT_LEN            0xe90
5072 #define TG3_TSO5_FW_RODATA_ADDR         0x00010e90
5073 #define TG3_TSO5_FW_RODATA_LEN          0x50
5074 #define TG3_TSO5_FW_DATA_ADDR           0x00010f00
5075 #define TG3_TSO5_FW_DATA_LEN            0x20
5076 #define TG3_TSO5_FW_SBSS_ADDR           0x00010f20
5077 #define TG3_TSO5_FW_SBSS_LEN            0x28
5078 #define TG3_TSO5_FW_BSS_ADDR            0x00010f50
5079 #define TG3_TSO5_FW_BSS_LEN             0x88
5080
5081 static u32 tg3Tso5FwText[(TG3_TSO5_FW_TEXT_LEN / 4) + 1] = {
5082         0x0c004003, 0x00000000, 0x00010f04, 0x00000000, 0x10000003, 0x00000000,
5083         0x0000000d, 0x0000000d, 0x3c1d0001, 0x37bde000, 0x03a0f021, 0x3c100001,
5084         0x26100000, 0x0c004010, 0x00000000, 0x0000000d, 0x27bdffe0, 0x3c04fefe,
5085         0xafbf0018, 0x0c0042e8, 0x34840002, 0x0c004364, 0x00000000, 0x3c030001,
5086         0x90630f34, 0x24020002, 0x3c040001, 0x24840e9c, 0x14620003, 0x24050001,
5087         0x3c040001, 0x24840e90, 0x24060002, 0x00003821, 0xafa00010, 0x0c004378,
5088         0xafa00014, 0x0c00402c, 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020,
5089         0x00000000, 0x00000000, 0x27bdffe0, 0xafbf001c, 0xafb20018, 0xafb10014,
5090         0x0c0042d4, 0xafb00010, 0x3c128000, 0x24110001, 0x8f706810, 0x32020400,
5091         0x10400007, 0x00000000, 0x8f641008, 0x00921024, 0x14400003, 0x00000000,
5092         0x0c004064, 0x00000000, 0x3c020001, 0x90420f56, 0x10510003, 0x32020200,
5093         0x1040fff1, 0x00000000, 0x0c0041b4, 0x00000000, 0x08004034, 0x00000000,
5094         0x8fbf001c, 0x8fb20018, 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0020,
5095         0x27bdffe0, 0x3c040001, 0x24840eb0, 0x00002821, 0x00003021, 0x00003821,
5096         0xafbf0018, 0xafa00010, 0x0c004378, 0xafa00014, 0x0000d021, 0x24020130,
5097         0xaf625000, 0x3c010001, 0xa4200f50, 0x3c010001, 0xa0200f57, 0x8fbf0018,
5098         0x03e00008, 0x27bd0020, 0x00000000, 0x00000000, 0x3c030001, 0x24630f60,
5099         0x90620000, 0x27bdfff0, 0x14400003, 0x0080c021, 0x08004073, 0x00004821,
5100         0x3c022000, 0x03021024, 0x10400003, 0x24090002, 0x08004073, 0xa0600000,
5101         0x24090001, 0x00181040, 0x30431f80, 0x346f8008, 0x1520004b, 0x25eb0028,
5102         0x3c040001, 0x00832021, 0x8c848010, 0x3c050001, 0x24a50f7a, 0x00041402,
5103         0xa0a20000, 0x3c010001, 0xa0240f7b, 0x3c020001, 0x00431021, 0x94428014,
5104         0x3c010001, 0xa0220f7c, 0x3c0c0001, 0x01836021, 0x8d8c8018, 0x304200ff,
5105         0x24420008, 0x000220c3, 0x24020001, 0x3c010001, 0xa0220f60, 0x0124102b,
5106         0x1040000c, 0x00003821, 0x24a6000e, 0x01602821, 0x8ca20000, 0x8ca30004,
5107         0x24a50008, 0x24e70001, 0xacc20000, 0xacc30004, 0x00e4102b, 0x1440fff8,
5108         0x24c60008, 0x00003821, 0x3c080001, 0x25080f7b, 0x91060000, 0x3c020001,
5109         0x90420f7c, 0x2503000d, 0x00c32821, 0x00461023, 0x00021fc2, 0x00431021,
5110         0x00021043, 0x1840000c, 0x00002021, 0x91020001, 0x00461023, 0x00021fc2,
5111         0x00431021, 0x00021843, 0x94a20000, 0x24e70001, 0x00822021, 0x00e3102a,
5112         0x1440fffb, 0x24a50002, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402,
5113         0x00822021, 0x3c02ffff, 0x01821024, 0x3083ffff, 0x00431025, 0x3c010001,
5114         0x080040fa, 0xac220f80, 0x3c050001, 0x24a50f7c, 0x90a20000, 0x3c0c0001,
5115         0x01836021, 0x8d8c8018, 0x000220c2, 0x1080000e, 0x00003821, 0x01603021,
5116         0x24a5000c, 0x8ca20000, 0x8ca30004, 0x24a50008, 0x24e70001, 0xacc20000,
5117         0xacc30004, 0x00e4102b, 0x1440fff8, 0x24c60008, 0x3c050001, 0x24a50f7c,
5118         0x90a20000, 0x30430007, 0x24020004, 0x10620011, 0x28620005, 0x10400005,
5119         0x24020002, 0x10620008, 0x000710c0, 0x080040fa, 0x00000000, 0x24020006,
5120         0x1062000e, 0x000710c0, 0x080040fa, 0x00000000, 0x00a21821, 0x9463000c,
5121         0x004b1021, 0x080040fa, 0xa4430000, 0x000710c0, 0x00a21821, 0x8c63000c,
5122         0x004b1021, 0x080040fa, 0xac430000, 0x00a21821, 0x8c63000c, 0x004b2021,
5123         0x00a21021, 0xac830000, 0x94420010, 0xa4820004, 0x95e70006, 0x3c020001,
5124         0x90420f7c, 0x3c030001, 0x90630f7a, 0x00e2c823, 0x3c020001, 0x90420f7b,
5125         0x24630028, 0x01e34021, 0x24420028, 0x15200012, 0x01e23021, 0x94c2000c,
5126         0x3c010001, 0xa4220f78, 0x94c20004, 0x94c30006, 0x3c010001, 0xa4200f76,
5127         0x3c010001, 0xa4200f72, 0x00021400, 0x00431025, 0x3c010001, 0xac220f6c,
5128         0x95020004, 0x3c010001, 0x08004124, 0xa4220f70, 0x3c020001, 0x94420f70,
5129         0x3c030001, 0x94630f72, 0x00431021, 0xa5020004, 0x3c020001, 0x94420f6c,
5130         0xa4c20004, 0x3c020001, 0x8c420f6c, 0xa4c20006, 0x3c040001, 0x94840f72,
5131         0x3c020001, 0x94420f70, 0x3c0a0001, 0x954a0f76, 0x00441821, 0x3063ffff,
5132         0x0062182a, 0x24020002, 0x1122000b, 0x00832023, 0x3c030001, 0x94630f78,
5133         0x30620009, 0x10400006, 0x3062fff6, 0xa4c2000c, 0x3c020001, 0x94420f78,
5134         0x30420009, 0x01425023, 0x24020001, 0x1122001b, 0x29220002, 0x50400005,
5135         0x24020002, 0x11200007, 0x31a2ffff, 0x08004197, 0x00000000, 0x1122001d,
5136         0x24020016, 0x08004197, 0x31a2ffff, 0x3c0e0001, 0x95ce0f80, 0x10800005,
5137         0x01806821, 0x01c42021, 0x00041c02, 0x3082ffff, 0x00627021, 0x000e1027,
5138         0xa502000a, 0x3c030001, 0x90630f7b, 0x31a2ffff, 0x00e21021, 0x0800418d,
5139         0x00432023, 0x3c020001, 0x94420f80, 0x00442021, 0x00041c02, 0x3082ffff,
5140         0x00622021, 0x00807021, 0x00041027, 0x08004185, 0xa502000a, 0x3c050001,
5141         0x24a50f7a, 0x90a30000, 0x14620002, 0x24e2fff2, 0xa5e20034, 0x90a20000,
5142         0x00e21023, 0xa5020002, 0x3c030001, 0x94630f80, 0x3c020001, 0x94420f5a,
5143         0x30e5ffff, 0x00641821, 0x00451023, 0x00622023, 0x00041c02, 0x3082ffff,
5144         0x00622021, 0x00041027, 0xa502000a, 0x3c030001, 0x90630f7c, 0x24620001,
5145         0x14a20005, 0x00807021, 0x01631021, 0x90420000, 0x08004185, 0x00026200,
5146         0x24620002, 0x14a20003, 0x306200fe, 0x004b1021, 0x944c0000, 0x3c020001,
5147         0x94420f82, 0x3183ffff, 0x3c040001, 0x90840f7b, 0x00431021, 0x00e21021,
5148         0x00442023, 0x008a2021, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402,
5149         0x00822021, 0x00806821, 0x00041027, 0xa4c20010, 0x31a2ffff, 0x000e1c00,
5150         0x00431025, 0x3c040001, 0x24840f72, 0xade20010, 0x94820000, 0x3c050001,
5151         0x94a50f76, 0x3c030001, 0x8c630f6c, 0x24420001, 0x00b92821, 0xa4820000,
5152         0x3322ffff, 0x00622021, 0x0083182b, 0x3c010001, 0xa4250f76, 0x10600003,
5153         0x24a2ffff, 0x3c010001, 0xa4220f76, 0x3c024000, 0x03021025, 0x3c010001,
5154         0xac240f6c, 0xaf621008, 0x03e00008, 0x27bd0010, 0x3c030001, 0x90630f56,
5155         0x27bdffe8, 0x24020001, 0xafbf0014, 0x10620026, 0xafb00010, 0x8f620cf4,
5156         0x2442ffff, 0x3042007f, 0x00021100, 0x8c434000, 0x3c010001, 0xac230f64,
5157         0x8c434008, 0x24444000, 0x8c5c4004, 0x30620040, 0x14400002, 0x24020088,
5158         0x24020008, 0x3c010001, 0xa4220f68, 0x30620004, 0x10400005, 0x24020001,
5159         0x3c010001, 0xa0220f57, 0x080041d5, 0x00031402, 0x3c010001, 0xa0200f57,
5160         0x00031402, 0x3c010001, 0xa4220f54, 0x9483000c, 0x24020001, 0x3c010001,
5161         0xa4200f50, 0x3c010001, 0xa0220f56, 0x3c010001, 0xa4230f62, 0x24020001,
5162         0x1342001e, 0x00000000, 0x13400005, 0x24020003, 0x13420067, 0x00000000,
5163         0x080042cf, 0x00000000, 0x3c020001, 0x94420f62, 0x241a0001, 0x3c010001,
5164         0xa4200f5e, 0x3c010001, 0xa4200f52, 0x304407ff, 0x00021bc2, 0x00031823,
5165         0x3063003e, 0x34630036, 0x00021242, 0x3042003c, 0x00621821, 0x3c010001,
5166         0xa4240f58, 0x00832021, 0x24630030, 0x3c010001, 0xa4240f5a, 0x3c010001,
5167         0xa4230f5c, 0x3c060001, 0x24c60f52, 0x94c50000, 0x94c30002, 0x3c040001,
5168         0x94840f5a, 0x00651021, 0x0044102a, 0x10400013, 0x3c108000, 0x00a31021,
5169         0xa4c20000, 0x3c02a000, 0xaf620cf4, 0x3c010001, 0xa0200f56, 0x8f641008,
5170         0x00901024, 0x14400003, 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4,
5171         0x00501024, 0x104000b7, 0x00000000, 0x0800420f, 0x00000000, 0x3c030001,
5172         0x94630f50, 0x00851023, 0xa4c40000, 0x00621821, 0x3042ffff, 0x3c010001,
5173         0xa4230f50, 0xaf620ce8, 0x3c020001, 0x94420f68, 0x34420024, 0xaf620cec,
5174         0x94c30002, 0x3c020001, 0x94420f50, 0x14620012, 0x3c028000, 0x3c108000,
5175         0x3c02a000, 0xaf620cf4, 0x3c010001, 0xa0200f56, 0x8f641008, 0x00901024,
5176         0x14400003, 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4, 0x00501024,
5177         0x1440fff7, 0x00000000, 0x080042cf, 0x241a0003, 0xaf620cf4, 0x3c108000,
5178         0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064, 0x00000000,
5179         0x8f620cf4, 0x00501024, 0x1440fff7, 0x00000000, 0x080042cf, 0x241a0003,
5180         0x3c070001, 0x24e70f50, 0x94e20000, 0x03821021, 0xaf620ce0, 0x3c020001,
5181         0x8c420f64, 0xaf620ce4, 0x3c050001, 0x94a50f54, 0x94e30000, 0x3c040001,
5182         0x94840f58, 0x3c020001, 0x94420f5e, 0x00a32823, 0x00822023, 0x30a6ffff,
5183         0x3083ffff, 0x00c3102b, 0x14400043, 0x00000000, 0x3c020001, 0x94420f5c,
5184         0x00021400, 0x00621025, 0xaf620ce8, 0x94e20000, 0x3c030001, 0x94630f54,
5185         0x00441021, 0xa4e20000, 0x3042ffff, 0x14430021, 0x3c020008, 0x3c020001,
5186         0x90420f57, 0x10400006, 0x3c03000c, 0x3c020001, 0x94420f68, 0x34630624,
5187         0x0800427c, 0x0000d021, 0x3c020001, 0x94420f68, 0x3c030008, 0x34630624,
5188         0x00431025, 0xaf620cec, 0x3c108000, 0x3c02a000, 0xaf620cf4, 0x3c010001,
5189         0xa0200f56, 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064,
5190         0x00000000, 0x8f620cf4, 0x00501024, 0x10400015, 0x00000000, 0x08004283,
5191         0x00000000, 0x3c030001, 0x94630f68, 0x34420624, 0x3c108000, 0x00621825,
5192         0x3c028000, 0xaf630cec, 0xaf620cf4, 0x8f641008, 0x00901024, 0x14400003,
5193         0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4, 0x00501024, 0x1440fff7,
5194         0x00000000, 0x3c010001, 0x080042cf, 0xa4200f5e, 0x3c020001, 0x94420f5c,
5195         0x00021400, 0x00c21025, 0xaf620ce8, 0x3c020001, 0x90420f57, 0x10400009,
5196         0x3c03000c, 0x3c020001, 0x94420f68, 0x34630624, 0x0000d021, 0x00431025,
5197         0xaf620cec, 0x080042c1, 0x3c108000, 0x3c020001, 0x94420f68, 0x3c030008,
5198         0x34630604, 0x00431025, 0xaf620cec, 0x3c020001, 0x94420f5e, 0x00451021,
5199         0x3c010001, 0xa4220f5e, 0x3c108000, 0x3c02a000, 0xaf620cf4, 0x3c010001,
5200         0xa0200f56, 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064,
5201         0x00000000, 0x8f620cf4, 0x00501024, 0x1440fff7, 0x00000000, 0x8fbf0014,
5202         0x8fb00010, 0x03e00008, 0x27bd0018, 0x00000000, 0x27bdffe0, 0x3c040001,
5203         0x24840ec0, 0x00002821, 0x00003021, 0x00003821, 0xafbf0018, 0xafa00010,
5204         0x0c004378, 0xafa00014, 0x0000d021, 0x24020130, 0xaf625000, 0x3c010001,
5205         0xa4200f50, 0x3c010001, 0xa0200f57, 0x8fbf0018, 0x03e00008, 0x27bd0020,
5206         0x27bdffe8, 0x3c1bc000, 0xafbf0014, 0xafb00010, 0xaf60680c, 0x8f626804,
5207         0x34420082, 0xaf626804, 0x8f634000, 0x24020b50, 0x3c010001, 0xac220f20,
5208         0x24020b78, 0x3c010001, 0xac220f30, 0x34630002, 0xaf634000, 0x0c004315,
5209         0x00808021, 0x3c010001, 0xa0220f34, 0x304200ff, 0x24030002, 0x14430005,
5210         0x00000000, 0x3c020001, 0x8c420f20, 0x08004308, 0xac5000c0, 0x3c020001,
5211         0x8c420f20, 0xac5000bc, 0x8f624434, 0x8f634438, 0x8f644410, 0x3c010001,
5212         0xac220f28, 0x3c010001, 0xac230f38, 0x3c010001, 0xac240f24, 0x8fbf0014,
5213         0x8fb00010, 0x03e00008, 0x27bd0018, 0x03e00008, 0x24020001, 0x27bdfff8,
5214         0x18800009, 0x00002821, 0x8f63680c, 0x8f62680c, 0x1043fffe, 0x00000000,
5215         0x24a50001, 0x00a4102a, 0x1440fff9, 0x00000000, 0x03e00008, 0x27bd0008,
5216         0x8f634450, 0x3c020001, 0x8c420f28, 0x00031c02, 0x0043102b, 0x14400008,
5217         0x3c038000, 0x3c040001, 0x8c840f38, 0x8f624450, 0x00021c02, 0x0083102b,
5218         0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024, 0x1440fffd,
5219         0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3082ffff, 0x2442e000,
5220         0x2c422001, 0x14400003, 0x3c024000, 0x08004347, 0x2402ffff, 0x00822025,
5221         0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00001021, 0x03e00008,
5222         0x00000000, 0x8f624450, 0x3c030001, 0x8c630f24, 0x08004350, 0x3042ffff,
5223         0x8f624450, 0x3042ffff, 0x0043102b, 0x1440fffc, 0x00000000, 0x03e00008,
5224         0x00000000, 0x27bdffe0, 0x00802821, 0x3c040001, 0x24840ed0, 0x00003021,
5225         0x00003821, 0xafbf0018, 0xafa00010, 0x0c004378, 0xafa00014, 0x0800435f,
5226         0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x3c020001, 0x3442d600,
5227         0x3c030001, 0x3463d600, 0x3c040001, 0x3484ddff, 0x3c010001, 0xac220f40,
5228         0x24020040, 0x3c010001, 0xac220f44, 0x3c010001, 0xac200f3c, 0xac600000,
5229         0x24630004, 0x0083102b, 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000,
5230         0x00804821, 0x8faa0010, 0x3c020001, 0x8c420f3c, 0x3c040001, 0x8c840f44,
5231         0x8fab0014, 0x24430001, 0x0044102b, 0x3c010001, 0xac230f3c, 0x14400003,
5232         0x00004021, 0x3c010001, 0xac200f3c, 0x3c020001, 0x8c420f3c, 0x3c030001,
5233         0x8c630f40, 0x91240000, 0x00021140, 0x00431021, 0x00481021, 0x25080001,
5234         0xa0440000, 0x29020008, 0x1440fff4, 0x25290001, 0x3c020001, 0x8c420f3c,
5235         0x3c030001, 0x8c630f40, 0x8f64680c, 0x00021140, 0x00431021, 0xac440008,
5236         0xac45000c, 0xac460010, 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c,
5237         0x00000000, 0x00000000, 0x00000000,
5238 };
5239
5240 static u32 tg3Tso5FwRodata[(TG3_TSO5_FW_RODATA_LEN / 4) + 1] = {
5241         0x4d61696e, 0x43707542, 0x00000000, 0x4d61696e, 0x43707541, 0x00000000,
5242         0x00000000, 0x00000000, 0x73746b6f, 0x66666c64, 0x00000000, 0x00000000,
5243         0x73746b6f, 0x66666c64, 0x00000000, 0x00000000, 0x66617461, 0x6c457272,
5244         0x00000000, 0x00000000, 0x00000000,
5245 };
5246
5247 static u32 tg3Tso5FwData[(TG3_TSO5_FW_DATA_LEN / 4) + 1] = {
5248         0x00000000, 0x73746b6f, 0x66666c64, 0x5f76312e, 0x322e3000, 0x00000000,
5249         0x00000000, 0x00000000, 0x00000000,
5250 };
5251
5252 /* tp->lock is held. */
5253 static int tg3_load_tso_firmware(struct tg3 *tp)
5254 {
5255         struct fw_info info;
5256         unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
5257         int err, i;
5258
5259         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
5260                 return 0;
5261
5262         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
5263                 info.text_base = TG3_TSO5_FW_TEXT_ADDR;
5264                 info.text_len = TG3_TSO5_FW_TEXT_LEN;
5265                 info.text_data = &tg3Tso5FwText[0];
5266                 info.rodata_base = TG3_TSO5_FW_RODATA_ADDR;
5267                 info.rodata_len = TG3_TSO5_FW_RODATA_LEN;
5268                 info.rodata_data = &tg3Tso5FwRodata[0];
5269                 info.data_base = TG3_TSO5_FW_DATA_ADDR;
5270                 info.data_len = TG3_TSO5_FW_DATA_LEN;
5271                 info.data_data = &tg3Tso5FwData[0];
5272                 cpu_base = RX_CPU_BASE;
5273                 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
5274                 cpu_scratch_size = (info.text_len +
5275                                     info.rodata_len +
5276                                     info.data_len +
5277                                     TG3_TSO5_FW_SBSS_LEN +
5278                                     TG3_TSO5_FW_BSS_LEN);
5279         } else {
5280                 info.text_base = TG3_TSO_FW_TEXT_ADDR;
5281                 info.text_len = TG3_TSO_FW_TEXT_LEN;
5282                 info.text_data = &tg3TsoFwText[0];
5283                 info.rodata_base = TG3_TSO_FW_RODATA_ADDR;
5284                 info.rodata_len = TG3_TSO_FW_RODATA_LEN;
5285                 info.rodata_data = &tg3TsoFwRodata[0];
5286                 info.data_base = TG3_TSO_FW_DATA_ADDR;
5287                 info.data_len = TG3_TSO_FW_DATA_LEN;
5288                 info.data_data = &tg3TsoFwData[0];
5289                 cpu_base = TX_CPU_BASE;
5290                 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
5291                 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
5292         }
5293
5294         err = tg3_load_firmware_cpu(tp, cpu_base,
5295                                     cpu_scratch_base, cpu_scratch_size,
5296                                     &info);
5297         if (err)
5298                 return err;
5299
5300         /* Now startup the cpu. */
5301         tw32(cpu_base + CPU_STATE, 0xffffffff);
5302         tw32_f(cpu_base + CPU_PC,    info.text_base);
5303
5304         for (i = 0; i < 5; i++) {
5305                 if (tr32(cpu_base + CPU_PC) == info.text_base)
5306                         break;
5307                 tw32(cpu_base + CPU_STATE, 0xffffffff);
5308                 tw32(cpu_base + CPU_MODE,  CPU_MODE_HALT);
5309                 tw32_f(cpu_base + CPU_PC,    info.text_base);
5310                 udelay(1000);
5311         }
5312         if (i >= 5) {
5313                 printk(KERN_ERR PFX "tg3_load_tso_firmware fails for %s "
5314                        "to set CPU PC, is %08x should be %08x\n",
5315                        tp->dev->name, tr32(cpu_base + CPU_PC),
5316                        info.text_base);
5317                 return -ENODEV;
5318         }
5319         tw32(cpu_base + CPU_STATE, 0xffffffff);
5320         tw32_f(cpu_base + CPU_MODE,  0x00000000);
5321         return 0;
5322 }
5323
5324 #endif /* TG3_TSO_SUPPORT != 0 */
5325
5326 /* tp->lock is held. */
5327 static void __tg3_set_mac_addr(struct tg3 *tp)
5328 {
5329         u32 addr_high, addr_low;
5330         int i;
5331
5332         addr_high = ((tp->dev->dev_addr[0] << 8) |
5333                      tp->dev->dev_addr[1]);
5334         addr_low = ((tp->dev->dev_addr[2] << 24) |
5335                     (tp->dev->dev_addr[3] << 16) |
5336                     (tp->dev->dev_addr[4] <<  8) |
5337                     (tp->dev->dev_addr[5] <<  0));
5338         for (i = 0; i < 4; i++) {
5339                 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
5340                 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
5341         }
5342
5343         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
5344             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
5345                 for (i = 0; i < 12; i++) {
5346                         tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
5347                         tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
5348                 }
5349         }
5350
5351         addr_high = (tp->dev->dev_addr[0] +
5352                      tp->dev->dev_addr[1] +
5353                      tp->dev->dev_addr[2] +
5354                      tp->dev->dev_addr[3] +
5355                      tp->dev->dev_addr[4] +
5356                      tp->dev->dev_addr[5]) &
5357                 TX_BACKOFF_SEED_MASK;
5358         tw32(MAC_TX_BACKOFF_SEED, addr_high);
5359 }
5360
5361 static int tg3_set_mac_addr(struct net_device *dev, void *p)
5362 {
5363         struct tg3 *tp = netdev_priv(dev);
5364         struct sockaddr *addr = p;
5365
5366         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
5367
5368         spin_lock_bh(&tp->lock);
5369         __tg3_set_mac_addr(tp);
5370         spin_unlock_bh(&tp->lock);
5371
5372         return 0;
5373 }
5374
5375 /* tp->lock is held. */
5376 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
5377                            dma_addr_t mapping, u32 maxlen_flags,
5378                            u32 nic_addr)
5379 {
5380         tg3_write_mem(tp,
5381                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
5382                       ((u64) mapping >> 32));
5383         tg3_write_mem(tp,
5384                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
5385                       ((u64) mapping & 0xffffffff));
5386         tg3_write_mem(tp,
5387                       (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
5388                        maxlen_flags);
5389
5390         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
5391                 tg3_write_mem(tp,
5392                               (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
5393                               nic_addr);
5394 }
5395
5396 static void __tg3_set_rx_mode(struct net_device *);
5397 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
5398 {
5399         tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
5400         tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
5401         tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
5402         tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
5403         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5404                 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
5405                 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
5406         }
5407         tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
5408         tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
5409         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5410                 u32 val = ec->stats_block_coalesce_usecs;
5411
5412                 if (!netif_carrier_ok(tp->dev))
5413                         val = 0;
5414
5415                 tw32(HOSTCC_STAT_COAL_TICKS, val);
5416         }
5417 }
5418
5419 /* tp->lock is held. */
5420 static int tg3_reset_hw(struct tg3 *tp)
5421 {
5422         u32 val, rdmac_mode;
5423         int i, err, limit;
5424
5425         tg3_disable_ints(tp);
5426
5427         tg3_stop_fw(tp);
5428
5429         tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
5430
5431         if (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) {
5432                 tg3_abort_hw(tp, 1);
5433         }
5434
5435         err = tg3_chip_reset(tp);
5436         if (err)
5437                 return err;
5438
5439         tg3_write_sig_legacy(tp, RESET_KIND_INIT);
5440
5441         /* This works around an issue with Athlon chipsets on
5442          * B3 tigon3 silicon.  This bit has no effect on any
5443          * other revision.  But do not set this on PCI Express
5444          * chips.
5445          */
5446         if (!(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
5447                 tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
5448         tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
5449
5450         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
5451             (tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
5452                 val = tr32(TG3PCI_PCISTATE);
5453                 val |= PCISTATE_RETRY_SAME_DMA;
5454                 tw32(TG3PCI_PCISTATE, val);
5455         }
5456
5457         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_BX) {
5458                 /* Enable some hw fixes.  */
5459                 val = tr32(TG3PCI_MSI_DATA);
5460                 val |= (1 << 26) | (1 << 28) | (1 << 29);
5461                 tw32(TG3PCI_MSI_DATA, val);
5462         }
5463
5464         /* Descriptor ring init may make accesses to the
5465          * NIC SRAM area to setup the TX descriptors, so we
5466          * can only do this after the hardware has been
5467          * successfully reset.
5468          */
5469         tg3_init_rings(tp);
5470
5471         /* This value is determined during the probe time DMA
5472          * engine test, tg3_test_dma.
5473          */
5474         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
5475
5476         tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
5477                           GRC_MODE_4X_NIC_SEND_RINGS |
5478                           GRC_MODE_NO_TX_PHDR_CSUM |
5479                           GRC_MODE_NO_RX_PHDR_CSUM);
5480         tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
5481         if (tp->tg3_flags & TG3_FLAG_NO_TX_PSEUDO_CSUM)
5482                 tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
5483         if (tp->tg3_flags & TG3_FLAG_NO_RX_PSEUDO_CSUM)
5484                 tp->grc_mode |= GRC_MODE_NO_RX_PHDR_CSUM;
5485
5486         tw32(GRC_MODE,
5487              tp->grc_mode |
5488              (GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP));
5489
5490         /* Setup the timer prescalar register.  Clock is always 66Mhz. */
5491         val = tr32(GRC_MISC_CFG);
5492         val &= ~0xff;
5493         val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
5494         tw32(GRC_MISC_CFG, val);
5495
5496         /* Initialize MBUF/DESC pool. */
5497         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
5498                 /* Do nothing.  */
5499         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
5500                 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
5501                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
5502                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
5503                 else
5504                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
5505                 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
5506                 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
5507         }
5508 #if TG3_TSO_SUPPORT != 0
5509         else if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
5510                 int fw_len;
5511
5512                 fw_len = (TG3_TSO5_FW_TEXT_LEN +
5513                           TG3_TSO5_FW_RODATA_LEN +
5514                           TG3_TSO5_FW_DATA_LEN +
5515                           TG3_TSO5_FW_SBSS_LEN +
5516                           TG3_TSO5_FW_BSS_LEN);
5517                 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
5518                 tw32(BUFMGR_MB_POOL_ADDR,
5519                      NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
5520                 tw32(BUFMGR_MB_POOL_SIZE,
5521                      NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
5522         }
5523 #endif
5524
5525         if (tp->dev->mtu <= ETH_DATA_LEN) {
5526                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
5527                      tp->bufmgr_config.mbuf_read_dma_low_water);
5528                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
5529                      tp->bufmgr_config.mbuf_mac_rx_low_water);
5530                 tw32(BUFMGR_MB_HIGH_WATER,
5531                      tp->bufmgr_config.mbuf_high_water);
5532         } else {
5533                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
5534                      tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
5535                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
5536                      tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
5537                 tw32(BUFMGR_MB_HIGH_WATER,
5538                      tp->bufmgr_config.mbuf_high_water_jumbo);
5539         }
5540         tw32(BUFMGR_DMA_LOW_WATER,
5541              tp->bufmgr_config.dma_low_water);
5542         tw32(BUFMGR_DMA_HIGH_WATER,
5543              tp->bufmgr_config.dma_high_water);
5544
5545         tw32(BUFMGR_MODE, BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE);
5546         for (i = 0; i < 2000; i++) {
5547                 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
5548                         break;
5549                 udelay(10);
5550         }
5551         if (i >= 2000) {
5552                 printk(KERN_ERR PFX "tg3_reset_hw cannot enable BUFMGR for %s.\n",
5553                        tp->dev->name);
5554                 return -ENODEV;
5555         }
5556
5557         /* Setup replenish threshold. */
5558         tw32(RCVBDI_STD_THRESH, tp->rx_pending / 8);
5559
5560         /* Initialize TG3_BDINFO's at:
5561          *  RCVDBDI_STD_BD:     standard eth size rx ring
5562          *  RCVDBDI_JUMBO_BD:   jumbo frame rx ring
5563          *  RCVDBDI_MINI_BD:    small frame rx ring (??? does not work)
5564          *
5565          * like so:
5566          *  TG3_BDINFO_HOST_ADDR:       high/low parts of DMA address of ring
5567          *  TG3_BDINFO_MAXLEN_FLAGS:    (rx max buffer size << 16) |
5568          *                              ring attribute flags
5569          *  TG3_BDINFO_NIC_ADDR:        location of descriptors in nic SRAM
5570          *
5571          * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
5572          * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
5573          *
5574          * The size of each ring is fixed in the firmware, but the location is
5575          * configurable.
5576          */
5577         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
5578              ((u64) tp->rx_std_mapping >> 32));
5579         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
5580              ((u64) tp->rx_std_mapping & 0xffffffff));
5581         tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
5582              NIC_SRAM_RX_BUFFER_DESC);
5583
5584         /* Don't even try to program the JUMBO/MINI buffer descriptor
5585          * configs on 5705.
5586          */
5587         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
5588                 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS,
5589                      RX_STD_MAX_SIZE_5705 << BDINFO_FLAGS_MAXLEN_SHIFT);
5590         } else {
5591                 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS,
5592                      RX_STD_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT);
5593
5594                 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
5595                      BDINFO_FLAGS_DISABLED);
5596
5597                 /* Setup replenish threshold. */
5598                 tw32(RCVBDI_JUMBO_THRESH, tp->rx_jumbo_pending / 8);
5599
5600                 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
5601                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
5602                              ((u64) tp->rx_jumbo_mapping >> 32));
5603                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
5604                              ((u64) tp->rx_jumbo_mapping & 0xffffffff));
5605                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
5606                              RX_JUMBO_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT);
5607                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
5608                              NIC_SRAM_RX_JUMBO_BUFFER_DESC);
5609                 } else {
5610                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
5611                              BDINFO_FLAGS_DISABLED);
5612                 }
5613
5614         }
5615
5616         /* There is only one send ring on 5705/5750, no need to explicitly
5617          * disable the others.
5618          */
5619         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5620                 /* Clear out send RCB ring in SRAM. */
5621                 for (i = NIC_SRAM_SEND_RCB; i < NIC_SRAM_RCV_RET_RCB; i += TG3_BDINFO_SIZE)
5622                         tg3_write_mem(tp, i + TG3_BDINFO_MAXLEN_FLAGS,
5623                                       BDINFO_FLAGS_DISABLED);
5624         }
5625
5626         tp->tx_prod = 0;
5627         tp->tx_cons = 0;
5628         tw32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0);
5629         tw32_tx_mbox(MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0);
5630
5631         tg3_set_bdinfo(tp, NIC_SRAM_SEND_RCB,
5632                        tp->tx_desc_mapping,
5633                        (TG3_TX_RING_SIZE <<
5634                         BDINFO_FLAGS_MAXLEN_SHIFT),
5635                        NIC_SRAM_TX_BUFFER_DESC);
5636
5637         /* There is only one receive return ring on 5705/5750, no need
5638          * to explicitly disable the others.
5639          */
5640         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5641                 for (i = NIC_SRAM_RCV_RET_RCB; i < NIC_SRAM_STATS_BLK;
5642                      i += TG3_BDINFO_SIZE) {
5643                         tg3_write_mem(tp, i + TG3_BDINFO_MAXLEN_FLAGS,
5644                                       BDINFO_FLAGS_DISABLED);
5645                 }
5646         }
5647
5648         tp->rx_rcb_ptr = 0;
5649         tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW, 0);
5650
5651         tg3_set_bdinfo(tp, NIC_SRAM_RCV_RET_RCB,
5652                        tp->rx_rcb_mapping,
5653                        (TG3_RX_RCB_RING_SIZE(tp) <<
5654                         BDINFO_FLAGS_MAXLEN_SHIFT),
5655                        0);
5656
5657         tp->rx_std_ptr = tp->rx_pending;
5658         tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
5659                      tp->rx_std_ptr);
5660
5661         tp->rx_jumbo_ptr = (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) ?
5662                                                 tp->rx_jumbo_pending : 0;
5663         tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
5664                      tp->rx_jumbo_ptr);
5665
5666         /* Initialize MAC address and backoff seed. */
5667         __tg3_set_mac_addr(tp);
5668
5669         /* MTU + ethernet header + FCS + optional VLAN tag */
5670         tw32(MAC_RX_MTU_SIZE, tp->dev->mtu + ETH_HLEN + 8);
5671
5672         /* The slot time is changed by tg3_setup_phy if we
5673          * run at gigabit with half duplex.
5674          */
5675         tw32(MAC_TX_LENGTHS,
5676              (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
5677              (6 << TX_LENGTHS_IPG_SHIFT) |
5678              (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
5679
5680         /* Receive rules. */
5681         tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
5682         tw32(RCVLPC_CONFIG, 0x0181);
5683
5684         /* Calculate RDMAC_MODE setting early, we need it to determine
5685          * the RCVLPC_STATE_ENABLE mask.
5686          */
5687         rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
5688                       RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
5689                       RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
5690                       RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
5691                       RDMAC_MODE_LNGREAD_ENAB);
5692         if (tp->tg3_flags & TG3_FLAG_SPLIT_MODE)
5693                 rdmac_mode |= RDMAC_MODE_SPLIT_ENABLE;
5694
5695         /* If statement applies to 5705 and 5750 PCI devices only */
5696         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
5697              tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
5698             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)) {
5699                 if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE &&
5700                     (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
5701                      tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
5702                         rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
5703                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
5704                            !(tp->tg3_flags2 & TG3_FLG2_IS_5788)) {
5705                         rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
5706                 }
5707         }
5708
5709         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)
5710                 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
5711
5712 #if TG3_TSO_SUPPORT != 0
5713         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
5714                 rdmac_mode |= (1 << 27);
5715 #endif
5716
5717         /* Receive/send statistics. */
5718         if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
5719             (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
5720                 val = tr32(RCVLPC_STATS_ENABLE);
5721                 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
5722                 tw32(RCVLPC_STATS_ENABLE, val);
5723         } else {
5724                 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
5725         }
5726         tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
5727         tw32(SNDDATAI_STATSENAB, 0xffffff);
5728         tw32(SNDDATAI_STATSCTRL,
5729              (SNDDATAI_SCTRL_ENABLE |
5730               SNDDATAI_SCTRL_FASTUPD));
5731
5732         /* Setup host coalescing engine. */
5733         tw32(HOSTCC_MODE, 0);
5734         for (i = 0; i < 2000; i++) {
5735                 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
5736                         break;
5737                 udelay(10);
5738         }
5739
5740         __tg3_set_coalesce(tp, &tp->coal);
5741
5742         /* set status block DMA address */
5743         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
5744              ((u64) tp->status_mapping >> 32));
5745         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
5746              ((u64) tp->status_mapping & 0xffffffff));
5747
5748         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5749                 /* Status/statistics block address.  See tg3_timer,
5750                  * the tg3_periodic_fetch_stats call there, and
5751                  * tg3_get_stats to see how this works for 5705/5750 chips.
5752                  */
5753                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
5754                      ((u64) tp->stats_mapping >> 32));
5755                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
5756                      ((u64) tp->stats_mapping & 0xffffffff));
5757                 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
5758                 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
5759         }
5760
5761         tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
5762
5763         tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
5764         tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
5765         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
5766                 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
5767
5768         /* Clear statistics/status block in chip, and status block in ram. */
5769         for (i = NIC_SRAM_STATS_BLK;
5770              i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
5771              i += sizeof(u32)) {
5772                 tg3_write_mem(tp, i, 0);
5773                 udelay(40);
5774         }
5775         memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
5776
5777         tp->mac_mode = MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
5778                 MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE | MAC_MODE_FHDE_ENABLE;
5779         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
5780         udelay(40);
5781
5782         /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
5783          * If TG3_FLAG_EEPROM_WRITE_PROT is set, we should read the
5784          * register to preserve the GPIO settings for LOMs. The GPIOs,
5785          * whether used as inputs or outputs, are set by boot code after
5786          * reset.
5787          */
5788         if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
5789                 u32 gpio_mask;
5790
5791                 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE2 |
5792                             GRC_LCLCTRL_GPIO_OUTPUT0 | GRC_LCLCTRL_GPIO_OUTPUT2;
5793
5794                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
5795                         gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
5796                                      GRC_LCLCTRL_GPIO_OUTPUT3;
5797
5798                 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
5799
5800                 /* GPIO1 must be driven high for eeprom write protect */
5801                 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
5802                                        GRC_LCLCTRL_GPIO_OUTPUT1);
5803         }
5804         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
5805         udelay(100);
5806
5807         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0);
5808         tp->last_tag = 0;
5809
5810         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5811                 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
5812                 udelay(40);
5813         }
5814
5815         val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
5816                WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
5817                WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
5818                WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
5819                WDMAC_MODE_LNGREAD_ENAB);
5820
5821         /* If statement applies to 5705 and 5750 PCI devices only */
5822         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
5823              tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
5824             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
5825                 if ((tp->tg3_flags & TG3_FLG2_TSO_CAPABLE) &&
5826                     (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
5827                      tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
5828                         /* nothing */
5829                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
5830                            !(tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
5831                            !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
5832                         val |= WDMAC_MODE_RX_ACCEL;
5833                 }
5834         }
5835
5836         tw32_f(WDMAC_MODE, val);
5837         udelay(40);
5838
5839         if ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0) {
5840                 val = tr32(TG3PCI_X_CAPS);
5841                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) {
5842                         val &= ~PCIX_CAPS_BURST_MASK;
5843                         val |= (PCIX_CAPS_MAX_BURST_CPIOB << PCIX_CAPS_BURST_SHIFT);
5844                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
5845                         val &= ~(PCIX_CAPS_SPLIT_MASK | PCIX_CAPS_BURST_MASK);
5846                         val |= (PCIX_CAPS_MAX_BURST_CPIOB << PCIX_CAPS_BURST_SHIFT);
5847                         if (tp->tg3_flags & TG3_FLAG_SPLIT_MODE)
5848                                 val |= (tp->split_mode_max_reqs <<
5849                                         PCIX_CAPS_SPLIT_SHIFT);
5850                 }
5851                 tw32(TG3PCI_X_CAPS, val);
5852         }
5853
5854         tw32_f(RDMAC_MODE, rdmac_mode);
5855         udelay(40);
5856
5857         tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
5858         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
5859                 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
5860         tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
5861         tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
5862         tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
5863         tw32(RCVDBDI_MODE, RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ);
5864         tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
5865 #if TG3_TSO_SUPPORT != 0
5866         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
5867                 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
5868 #endif
5869         tw32(SNDBDI_MODE, SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE);
5870         tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
5871
5872         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
5873                 err = tg3_load_5701_a0_firmware_fix(tp);
5874                 if (err)
5875                         return err;
5876         }
5877
5878 #if TG3_TSO_SUPPORT != 0
5879         if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
5880                 err = tg3_load_tso_firmware(tp);
5881                 if (err)
5882                         return err;
5883         }
5884 #endif
5885
5886         tp->tx_mode = TX_MODE_ENABLE;
5887         tw32_f(MAC_TX_MODE, tp->tx_mode);
5888         udelay(100);
5889
5890         tp->rx_mode = RX_MODE_ENABLE;
5891         tw32_f(MAC_RX_MODE, tp->rx_mode);
5892         udelay(10);
5893
5894         if (tp->link_config.phy_is_low_power) {
5895                 tp->link_config.phy_is_low_power = 0;
5896                 tp->link_config.speed = tp->link_config.orig_speed;
5897                 tp->link_config.duplex = tp->link_config.orig_duplex;
5898                 tp->link_config.autoneg = tp->link_config.orig_autoneg;
5899         }
5900
5901         tp->mi_mode = MAC_MI_MODE_BASE;
5902         tw32_f(MAC_MI_MODE, tp->mi_mode);
5903         udelay(80);
5904
5905         tw32(MAC_LED_CTRL, tp->led_ctrl);
5906
5907         tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
5908         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
5909                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
5910                 udelay(10);
5911         }
5912         tw32_f(MAC_RX_MODE, tp->rx_mode);
5913         udelay(10);
5914
5915         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
5916                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) &&
5917                         !(tp->tg3_flags2 & TG3_FLG2_SERDES_PREEMPHASIS)) {
5918                         /* Set drive transmission level to 1.2V  */
5919                         /* only if the signal pre-emphasis bit is not set  */
5920                         val = tr32(MAC_SERDES_CFG);
5921                         val &= 0xfffff000;
5922                         val |= 0x880;
5923                         tw32(MAC_SERDES_CFG, val);
5924                 }
5925                 if (tp->pci_chip_rev_id == CHIPREV_ID_5703_A1)
5926                         tw32(MAC_SERDES_CFG, 0x616000);
5927         }
5928
5929         /* Prevent chip from dropping frames when flow control
5930          * is enabled.
5931          */
5932         tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, 2);
5933
5934         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
5935             (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
5936                 /* Use hardware link auto-negotiation */
5937                 tp->tg3_flags2 |= TG3_FLG2_HW_AUTONEG;
5938         }
5939
5940         err = tg3_setup_phy(tp, 1);
5941         if (err)
5942                 return err;
5943
5944         if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
5945                 u32 tmp;
5946
5947                 /* Clear CRC stats. */
5948                 if (!tg3_readphy(tp, 0x1e, &tmp)) {
5949                         tg3_writephy(tp, 0x1e, tmp | 0x8000);
5950                         tg3_readphy(tp, 0x14, &tmp);
5951                 }
5952         }
5953
5954         __tg3_set_rx_mode(tp->dev);
5955
5956         /* Initialize receive rules. */
5957         tw32(MAC_RCV_RULE_0,  0xc2000000 & RCV_RULE_DISABLE_MASK);
5958         tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
5959         tw32(MAC_RCV_RULE_1,  0x86000004 & RCV_RULE_DISABLE_MASK);
5960         tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
5961
5962         if ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
5963             (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5780))
5964                 limit = 8;
5965         else
5966                 limit = 16;
5967         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF)
5968                 limit -= 4;
5969         switch (limit) {
5970         case 16:
5971                 tw32(MAC_RCV_RULE_15,  0); tw32(MAC_RCV_VALUE_15,  0);
5972         case 15:
5973                 tw32(MAC_RCV_RULE_14,  0); tw32(MAC_RCV_VALUE_14,  0);
5974         case 14:
5975                 tw32(MAC_RCV_RULE_13,  0); tw32(MAC_RCV_VALUE_13,  0);
5976         case 13:
5977                 tw32(MAC_RCV_RULE_12,  0); tw32(MAC_RCV_VALUE_12,  0);
5978         case 12:
5979                 tw32(MAC_RCV_RULE_11,  0); tw32(MAC_RCV_VALUE_11,  0);
5980         case 11:
5981                 tw32(MAC_RCV_RULE_10,  0); tw32(MAC_RCV_VALUE_10,  0);
5982         case 10:
5983                 tw32(MAC_RCV_RULE_9,  0); tw32(MAC_RCV_VALUE_9,  0);
5984         case 9:
5985                 tw32(MAC_RCV_RULE_8,  0); tw32(MAC_RCV_VALUE_8,  0);
5986         case 8:
5987                 tw32(MAC_RCV_RULE_7,  0); tw32(MAC_RCV_VALUE_7,  0);
5988         case 7:
5989                 tw32(MAC_RCV_RULE_6,  0); tw32(MAC_RCV_VALUE_6,  0);
5990         case 6:
5991                 tw32(MAC_RCV_RULE_5,  0); tw32(MAC_RCV_VALUE_5,  0);
5992         case 5:
5993                 tw32(MAC_RCV_RULE_4,  0); tw32(MAC_RCV_VALUE_4,  0);
5994         case 4:
5995                 /* tw32(MAC_RCV_RULE_3,  0); tw32(MAC_RCV_VALUE_3,  0); */
5996         case 3:
5997                 /* tw32(MAC_RCV_RULE_2,  0); tw32(MAC_RCV_VALUE_2,  0); */
5998         case 2:
5999         case 1:
6000
6001         default:
6002                 break;
6003         };
6004
6005         tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
6006
6007         return 0;
6008 }
6009
6010 /* Called at device open time to get the chip ready for
6011  * packet processing.  Invoked with tp->lock held.
6012  */
6013 static int tg3_init_hw(struct tg3 *tp)
6014 {
6015         int err;
6016
6017         /* Force the chip into D0. */
6018         err = tg3_set_power_state(tp, 0);
6019         if (err)
6020                 goto out;
6021
6022         tg3_switch_clocks(tp);
6023
6024         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
6025
6026         err = tg3_reset_hw(tp);
6027
6028 out:
6029         return err;
6030 }
6031
6032 #define TG3_STAT_ADD32(PSTAT, REG) \
6033 do {    u32 __val = tr32(REG); \
6034         (PSTAT)->low += __val; \
6035         if ((PSTAT)->low < __val) \
6036                 (PSTAT)->high += 1; \
6037 } while (0)
6038
6039 static void tg3_periodic_fetch_stats(struct tg3 *tp)
6040 {
6041         struct tg3_hw_stats *sp = tp->hw_stats;
6042
6043         if (!netif_carrier_ok(tp->dev))
6044                 return;
6045
6046         TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
6047         TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
6048         TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
6049         TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
6050         TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
6051         TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
6052         TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
6053         TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
6054         TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
6055         TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
6056         TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
6057         TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
6058         TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
6059
6060         TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
6061         TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
6062         TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
6063         TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
6064         TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
6065         TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
6066         TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
6067         TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
6068         TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
6069         TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
6070         TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
6071         TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
6072         TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
6073         TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
6074 }
6075
6076 static void tg3_timer(unsigned long __opaque)
6077 {
6078         struct tg3 *tp = (struct tg3 *) __opaque;
6079
6080         spin_lock(&tp->lock);
6081
6082         if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) {
6083                 /* All of this garbage is because when using non-tagged
6084                  * IRQ status the mailbox/status_block protocol the chip
6085                  * uses with the cpu is race prone.
6086                  */
6087                 if (tp->hw_status->status & SD_STATUS_UPDATED) {
6088                         tw32(GRC_LOCAL_CTRL,
6089                              tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
6090                 } else {
6091                         tw32(HOSTCC_MODE, tp->coalesce_mode |
6092                              (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
6093                 }
6094
6095                 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
6096                         tp->tg3_flags2 |= TG3_FLG2_RESTART_TIMER;
6097                         spin_unlock(&tp->lock);
6098                         schedule_work(&tp->reset_task);
6099                         return;
6100                 }
6101         }
6102
6103         /* This part only runs once per second. */
6104         if (!--tp->timer_counter) {
6105                 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
6106                         tg3_periodic_fetch_stats(tp);
6107
6108                 if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
6109                         u32 mac_stat;
6110                         int phy_event;
6111
6112                         mac_stat = tr32(MAC_STATUS);
6113
6114                         phy_event = 0;
6115                         if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) {
6116                                 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
6117                                         phy_event = 1;
6118                         } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
6119                                 phy_event = 1;
6120
6121                         if (phy_event)
6122                                 tg3_setup_phy(tp, 0);
6123                 } else if (tp->tg3_flags & TG3_FLAG_POLL_SERDES) {
6124                         u32 mac_stat = tr32(MAC_STATUS);
6125                         int need_setup = 0;
6126
6127                         if (netif_carrier_ok(tp->dev) &&
6128                             (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
6129                                 need_setup = 1;
6130                         }
6131                         if (! netif_carrier_ok(tp->dev) &&
6132                             (mac_stat & (MAC_STATUS_PCS_SYNCED |
6133                                          MAC_STATUS_SIGNAL_DET))) {
6134                                 need_setup = 1;
6135                         }
6136                         if (need_setup) {
6137                                 tw32_f(MAC_MODE,
6138                                      (tp->mac_mode &
6139                                       ~MAC_MODE_PORT_MODE_MASK));
6140                                 udelay(40);
6141                                 tw32_f(MAC_MODE, tp->mac_mode);
6142                                 udelay(40);
6143                                 tg3_setup_phy(tp, 0);
6144                         }
6145                 } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
6146                         tg3_serdes_parallel_detect(tp);
6147
6148                 tp->timer_counter = tp->timer_multiplier;
6149         }
6150
6151         /* Heartbeat is only sent once every 120 seconds.  */
6152         if (!--tp->asf_counter) {
6153                 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
6154                         u32 val;
6155
6156                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_ALIVE);
6157                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
6158                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, 3);
6159                         val = tr32(GRC_RX_CPU_EVENT);
6160                         val |= (1 << 14);
6161                         tw32(GRC_RX_CPU_EVENT, val);
6162                 }
6163                 tp->asf_counter = tp->asf_multiplier;
6164         }
6165
6166         spin_unlock(&tp->lock);
6167
6168         tp->timer.expires = jiffies + tp->timer_offset;
6169         add_timer(&tp->timer);
6170 }
6171
6172 static int tg3_test_interrupt(struct tg3 *tp)
6173 {
6174         struct net_device *dev = tp->dev;
6175         int err, i;
6176         u32 int_mbox = 0;
6177
6178         if (!netif_running(dev))
6179                 return -ENODEV;
6180
6181         tg3_disable_ints(tp);
6182
6183         free_irq(tp->pdev->irq, dev);
6184
6185         err = request_irq(tp->pdev->irq, tg3_test_isr,
6186                           SA_SHIRQ | SA_SAMPLE_RANDOM, dev->name, dev);
6187         if (err)
6188                 return err;
6189
6190         tg3_enable_ints(tp);
6191
6192         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
6193                HOSTCC_MODE_NOW);
6194
6195         for (i = 0; i < 5; i++) {
6196                 int_mbox = tr32_mailbox(MAILBOX_INTERRUPT_0 +
6197                                         TG3_64BIT_REG_LOW);
6198                 if (int_mbox != 0)
6199                         break;
6200                 msleep(10);
6201         }
6202
6203         tg3_disable_ints(tp);
6204
6205         free_irq(tp->pdev->irq, dev);
6206         
6207         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI)
6208                 err = request_irq(tp->pdev->irq, tg3_msi,
6209                                   SA_SAMPLE_RANDOM, dev->name, dev);
6210         else {
6211                 irqreturn_t (*fn)(int, void *, struct pt_regs *)=tg3_interrupt;
6212                 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
6213                         fn = tg3_interrupt_tagged;
6214                 err = request_irq(tp->pdev->irq, fn,
6215                                   SA_SHIRQ | SA_SAMPLE_RANDOM, dev->name, dev);
6216         }
6217
6218         if (err)
6219                 return err;
6220
6221         if (int_mbox != 0)
6222                 return 0;
6223
6224         return -EIO;
6225 }
6226
6227 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
6228  * successfully restored
6229  */
6230 static int tg3_test_msi(struct tg3 *tp)
6231 {
6232         struct net_device *dev = tp->dev;
6233         int err;
6234         u16 pci_cmd;
6235
6236         if (!(tp->tg3_flags2 & TG3_FLG2_USING_MSI))
6237                 return 0;
6238
6239         /* Turn off SERR reporting in case MSI terminates with Master
6240          * Abort.
6241          */
6242         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
6243         pci_write_config_word(tp->pdev, PCI_COMMAND,
6244                               pci_cmd & ~PCI_COMMAND_SERR);
6245
6246         err = tg3_test_interrupt(tp);
6247
6248         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
6249
6250         if (!err)
6251                 return 0;
6252
6253         /* other failures */
6254         if (err != -EIO)
6255                 return err;
6256
6257         /* MSI test failed, go back to INTx mode */
6258         printk(KERN_WARNING PFX "%s: No interrupt was generated using MSI, "
6259                "switching to INTx mode. Please report this failure to "
6260                "the PCI maintainer and include system chipset information.\n",
6261                        tp->dev->name);
6262
6263         free_irq(tp->pdev->irq, dev);
6264         pci_disable_msi(tp->pdev);
6265
6266         tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
6267
6268         {
6269                 irqreturn_t (*fn)(int, void *, struct pt_regs *)=tg3_interrupt;
6270                 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
6271                         fn = tg3_interrupt_tagged;
6272
6273                 err = request_irq(tp->pdev->irq, fn,
6274                                   SA_SHIRQ | SA_SAMPLE_RANDOM, dev->name, dev);
6275         }
6276         if (err)
6277                 return err;
6278
6279         /* Need to reset the chip because the MSI cycle may have terminated
6280          * with Master Abort.
6281          */
6282         tg3_full_lock(tp, 1);
6283
6284         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
6285         err = tg3_init_hw(tp);
6286
6287         tg3_full_unlock(tp);
6288
6289         if (err)
6290                 free_irq(tp->pdev->irq, dev);
6291
6292         return err;
6293 }
6294
6295 static int tg3_open(struct net_device *dev)
6296 {
6297         struct tg3 *tp = netdev_priv(dev);
6298         int err;
6299
6300         tg3_full_lock(tp, 0);
6301
6302         tg3_disable_ints(tp);
6303         tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
6304
6305         tg3_full_unlock(tp);
6306
6307         /* The placement of this call is tied
6308          * to the setup and use of Host TX descriptors.
6309          */
6310         err = tg3_alloc_consistent(tp);
6311         if (err)
6312                 return err;
6313
6314         if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
6315             (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5750_AX) &&
6316             (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5750_BX)) {
6317                 /* All MSI supporting chips should support tagged
6318                  * status.  Assert that this is the case.
6319                  */
6320                 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) {
6321                         printk(KERN_WARNING PFX "%s: MSI without TAGGED? "
6322                                "Not using MSI.\n", tp->dev->name);
6323                 } else if (pci_enable_msi(tp->pdev) == 0) {
6324                         u32 msi_mode;
6325
6326                         msi_mode = tr32(MSGINT_MODE);
6327                         tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
6328                         tp->tg3_flags2 |= TG3_FLG2_USING_MSI;
6329                 }
6330         }
6331         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI)
6332                 err = request_irq(tp->pdev->irq, tg3_msi,
6333                                   SA_SAMPLE_RANDOM, dev->name, dev);
6334         else {
6335                 irqreturn_t (*fn)(int, void *, struct pt_regs *)=tg3_interrupt;
6336                 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
6337                         fn = tg3_interrupt_tagged;
6338
6339                 err = request_irq(tp->pdev->irq, fn,
6340                                   SA_SHIRQ | SA_SAMPLE_RANDOM, dev->name, dev);
6341         }
6342
6343         if (err) {
6344                 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6345                         pci_disable_msi(tp->pdev);
6346                         tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
6347                 }
6348                 tg3_free_consistent(tp);
6349                 return err;
6350         }
6351
6352         tg3_full_lock(tp, 0);
6353
6354         err = tg3_init_hw(tp);
6355         if (err) {
6356                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
6357                 tg3_free_rings(tp);
6358         } else {
6359                 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
6360                         tp->timer_offset = HZ;
6361                 else
6362                         tp->timer_offset = HZ / 10;
6363
6364                 BUG_ON(tp->timer_offset > HZ);
6365                 tp->timer_counter = tp->timer_multiplier =
6366                         (HZ / tp->timer_offset);
6367                 tp->asf_counter = tp->asf_multiplier =
6368                         ((HZ / tp->timer_offset) * 120);
6369
6370                 init_timer(&tp->timer);
6371                 tp->timer.expires = jiffies + tp->timer_offset;
6372                 tp->timer.data = (unsigned long) tp;
6373                 tp->timer.function = tg3_timer;
6374         }
6375
6376         tg3_full_unlock(tp);
6377
6378         if (err) {
6379                 free_irq(tp->pdev->irq, dev);
6380                 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6381                         pci_disable_msi(tp->pdev);
6382                         tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
6383                 }
6384                 tg3_free_consistent(tp);
6385                 return err;
6386         }
6387
6388         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6389                 err = tg3_test_msi(tp);
6390
6391                 if (err) {
6392                         tg3_full_lock(tp, 0);
6393
6394                         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6395                                 pci_disable_msi(tp->pdev);
6396                                 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
6397                         }
6398                         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
6399                         tg3_free_rings(tp);
6400                         tg3_free_consistent(tp);
6401
6402                         tg3_full_unlock(tp);
6403
6404                         return err;
6405                 }
6406         }
6407
6408         tg3_full_lock(tp, 0);
6409
6410         add_timer(&tp->timer);
6411         tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
6412         tg3_enable_ints(tp);
6413
6414         tg3_full_unlock(tp);
6415
6416         netif_start_queue(dev);
6417
6418         return 0;
6419 }
6420
6421 #if 0
6422 /*static*/ void tg3_dump_state(struct tg3 *tp)
6423 {
6424         u32 val32, val32_2, val32_3, val32_4, val32_5;
6425         u16 val16;
6426         int i;
6427
6428         pci_read_config_word(tp->pdev, PCI_STATUS, &val16);
6429         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE, &val32);
6430         printk("DEBUG: PCI status [%04x] TG3PCI state[%08x]\n",
6431                val16, val32);
6432
6433         /* MAC block */
6434         printk("DEBUG: MAC_MODE[%08x] MAC_STATUS[%08x]\n",
6435                tr32(MAC_MODE), tr32(MAC_STATUS));
6436         printk("       MAC_EVENT[%08x] MAC_LED_CTRL[%08x]\n",
6437                tr32(MAC_EVENT), tr32(MAC_LED_CTRL));
6438         printk("DEBUG: MAC_TX_MODE[%08x] MAC_TX_STATUS[%08x]\n",
6439                tr32(MAC_TX_MODE), tr32(MAC_TX_STATUS));
6440         printk("       MAC_RX_MODE[%08x] MAC_RX_STATUS[%08x]\n",
6441                tr32(MAC_RX_MODE), tr32(MAC_RX_STATUS));
6442
6443         /* Send data initiator control block */
6444         printk("DEBUG: SNDDATAI_MODE[%08x] SNDDATAI_STATUS[%08x]\n",
6445                tr32(SNDDATAI_MODE), tr32(SNDDATAI_STATUS));
6446         printk("       SNDDATAI_STATSCTRL[%08x]\n",
6447                tr32(SNDDATAI_STATSCTRL));
6448
6449         /* Send data completion control block */
6450         printk("DEBUG: SNDDATAC_MODE[%08x]\n", tr32(SNDDATAC_MODE));
6451
6452         /* Send BD ring selector block */
6453         printk("DEBUG: SNDBDS_MODE[%08x] SNDBDS_STATUS[%08x]\n",
6454                tr32(SNDBDS_MODE), tr32(SNDBDS_STATUS));
6455
6456         /* Send BD initiator control block */
6457         printk("DEBUG: SNDBDI_MODE[%08x] SNDBDI_STATUS[%08x]\n",
6458                tr32(SNDBDI_MODE), tr32(SNDBDI_STATUS));
6459
6460         /* Send BD completion control block */
6461         printk("DEBUG: SNDBDC_MODE[%08x]\n", tr32(SNDBDC_MODE));
6462
6463         /* Receive list placement control block */
6464         printk("DEBUG: RCVLPC_MODE[%08x] RCVLPC_STATUS[%08x]\n",
6465                tr32(RCVLPC_MODE), tr32(RCVLPC_STATUS));
6466         printk("       RCVLPC_STATSCTRL[%08x]\n",
6467                tr32(RCVLPC_STATSCTRL));
6468
6469         /* Receive data and receive BD initiator control block */
6470         printk("DEBUG: RCVDBDI_MODE[%08x] RCVDBDI_STATUS[%08x]\n",
6471                tr32(RCVDBDI_MODE), tr32(RCVDBDI_STATUS));
6472
6473         /* Receive data completion control block */
6474         printk("DEBUG: RCVDCC_MODE[%08x]\n",
6475                tr32(RCVDCC_MODE));
6476
6477         /* Receive BD initiator control block */
6478         printk("DEBUG: RCVBDI_MODE[%08x] RCVBDI_STATUS[%08x]\n",
6479                tr32(RCVBDI_MODE), tr32(RCVBDI_STATUS));
6480
6481         /* Receive BD completion control block */
6482         printk("DEBUG: RCVCC_MODE[%08x] RCVCC_STATUS[%08x]\n",
6483                tr32(RCVCC_MODE), tr32(RCVCC_STATUS));
6484
6485         /* Receive list selector control block */
6486         printk("DEBUG: RCVLSC_MODE[%08x] RCVLSC_STATUS[%08x]\n",
6487                tr32(RCVLSC_MODE), tr32(RCVLSC_STATUS));
6488
6489         /* Mbuf cluster free block */
6490         printk("DEBUG: MBFREE_MODE[%08x] MBFREE_STATUS[%08x]\n",
6491                tr32(MBFREE_MODE), tr32(MBFREE_STATUS));
6492
6493         /* Host coalescing control block */
6494         printk("DEBUG: HOSTCC_MODE[%08x] HOSTCC_STATUS[%08x]\n",
6495                tr32(HOSTCC_MODE), tr32(HOSTCC_STATUS));
6496         printk("DEBUG: HOSTCC_STATS_BLK_HOST_ADDR[%08x%08x]\n",
6497                tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
6498                tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
6499         printk("DEBUG: HOSTCC_STATUS_BLK_HOST_ADDR[%08x%08x]\n",
6500                tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
6501                tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
6502         printk("DEBUG: HOSTCC_STATS_BLK_NIC_ADDR[%08x]\n",
6503                tr32(HOSTCC_STATS_BLK_NIC_ADDR));
6504         printk("DEBUG: HOSTCC_STATUS_BLK_NIC_ADDR[%08x]\n",
6505                tr32(HOSTCC_STATUS_BLK_NIC_ADDR));
6506
6507         /* Memory arbiter control block */
6508         printk("DEBUG: MEMARB_MODE[%08x] MEMARB_STATUS[%08x]\n",
6509                tr32(MEMARB_MODE), tr32(MEMARB_STATUS));
6510
6511         /* Buffer manager control block */
6512         printk("DEBUG: BUFMGR_MODE[%08x] BUFMGR_STATUS[%08x]\n",
6513                tr32(BUFMGR_MODE), tr32(BUFMGR_STATUS));
6514         printk("DEBUG: BUFMGR_MB_POOL_ADDR[%08x] BUFMGR_MB_POOL_SIZE[%08x]\n",
6515                tr32(BUFMGR_MB_POOL_ADDR), tr32(BUFMGR_MB_POOL_SIZE));
6516         printk("DEBUG: BUFMGR_DMA_DESC_POOL_ADDR[%08x] "
6517                "BUFMGR_DMA_DESC_POOL_SIZE[%08x]\n",
6518                tr32(BUFMGR_DMA_DESC_POOL_ADDR),
6519                tr32(BUFMGR_DMA_DESC_POOL_SIZE));
6520
6521         /* Read DMA control block */
6522         printk("DEBUG: RDMAC_MODE[%08x] RDMAC_STATUS[%08x]\n",
6523                tr32(RDMAC_MODE), tr32(RDMAC_STATUS));
6524
6525         /* Write DMA control block */
6526         printk("DEBUG: WDMAC_MODE[%08x] WDMAC_STATUS[%08x]\n",
6527                tr32(WDMAC_MODE), tr32(WDMAC_STATUS));
6528
6529         /* DMA completion block */
6530         printk("DEBUG: DMAC_MODE[%08x]\n",
6531                tr32(DMAC_MODE));
6532
6533         /* GRC block */
6534         printk("DEBUG: GRC_MODE[%08x] GRC_MISC_CFG[%08x]\n",
6535                tr32(GRC_MODE), tr32(GRC_MISC_CFG));
6536         printk("DEBUG: GRC_LOCAL_CTRL[%08x]\n",
6537                tr32(GRC_LOCAL_CTRL));
6538
6539         /* TG3_BDINFOs */
6540         printk("DEBUG: RCVDBDI_JUMBO_BD[%08x%08x:%08x:%08x]\n",
6541                tr32(RCVDBDI_JUMBO_BD + 0x0),
6542                tr32(RCVDBDI_JUMBO_BD + 0x4),
6543                tr32(RCVDBDI_JUMBO_BD + 0x8),
6544                tr32(RCVDBDI_JUMBO_BD + 0xc));
6545         printk("DEBUG: RCVDBDI_STD_BD[%08x%08x:%08x:%08x]\n",
6546                tr32(RCVDBDI_STD_BD + 0x0),
6547                tr32(RCVDBDI_STD_BD + 0x4),
6548                tr32(RCVDBDI_STD_BD + 0x8),
6549                tr32(RCVDBDI_STD_BD + 0xc));
6550         printk("DEBUG: RCVDBDI_MINI_BD[%08x%08x:%08x:%08x]\n",
6551                tr32(RCVDBDI_MINI_BD + 0x0),
6552                tr32(RCVDBDI_MINI_BD + 0x4),
6553                tr32(RCVDBDI_MINI_BD + 0x8),
6554                tr32(RCVDBDI_MINI_BD + 0xc));
6555
6556         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x0, &val32);
6557         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x4, &val32_2);
6558         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x8, &val32_3);
6559         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0xc, &val32_4);
6560         printk("DEBUG: SRAM_SEND_RCB_0[%08x%08x:%08x:%08x]\n",
6561                val32, val32_2, val32_3, val32_4);
6562
6563         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x0, &val32);
6564         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x4, &val32_2);
6565         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x8, &val32_3);
6566         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0xc, &val32_4);
6567         printk("DEBUG: SRAM_RCV_RET_RCB_0[%08x%08x:%08x:%08x]\n",
6568                val32, val32_2, val32_3, val32_4);
6569
6570         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x0, &val32);
6571         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x4, &val32_2);
6572         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x8, &val32_3);
6573         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0xc, &val32_4);
6574         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x10, &val32_5);
6575         printk("DEBUG: SRAM_STATUS_BLK[%08x:%08x:%08x:%08x:%08x]\n",
6576                val32, val32_2, val32_3, val32_4, val32_5);
6577
6578         /* SW status block */
6579         printk("DEBUG: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
6580                tp->hw_status->status,
6581                tp->hw_status->status_tag,
6582                tp->hw_status->rx_jumbo_consumer,
6583                tp->hw_status->rx_consumer,
6584                tp->hw_status->rx_mini_consumer,
6585                tp->hw_status->idx[0].rx_producer,
6586                tp->hw_status->idx[0].tx_consumer);
6587
6588         /* SW statistics block */
6589         printk("DEBUG: Host statistics block [%08x:%08x:%08x:%08x]\n",
6590                ((u32 *)tp->hw_stats)[0],
6591                ((u32 *)tp->hw_stats)[1],
6592                ((u32 *)tp->hw_stats)[2],
6593                ((u32 *)tp->hw_stats)[3]);
6594
6595         /* Mailboxes */
6596         printk("DEBUG: SNDHOST_PROD[%08x%08x] SNDNIC_PROD[%08x%08x]\n",
6597                tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + 0x0),
6598                tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + 0x4),
6599                tr32_mailbox(MAILBOX_SNDNIC_PROD_IDX_0 + 0x0),
6600                tr32_mailbox(MAILBOX_SNDNIC_PROD_IDX_0 + 0x4));
6601
6602         /* NIC side send descriptors. */
6603         for (i = 0; i < 6; i++) {
6604                 unsigned long txd;
6605
6606                 txd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_TX_BUFFER_DESC
6607                         + (i * sizeof(struct tg3_tx_buffer_desc));
6608                 printk("DEBUG: NIC TXD(%d)[%08x:%08x:%08x:%08x]\n",
6609                        i,
6610                        readl(txd + 0x0), readl(txd + 0x4),
6611                        readl(txd + 0x8), readl(txd + 0xc));
6612         }
6613
6614         /* NIC side RX descriptors. */
6615         for (i = 0; i < 6; i++) {
6616                 unsigned long rxd;
6617
6618                 rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_BUFFER_DESC
6619                         + (i * sizeof(struct tg3_rx_buffer_desc));
6620                 printk("DEBUG: NIC RXD_STD(%d)[0][%08x:%08x:%08x:%08x]\n",
6621                        i,
6622                        readl(rxd + 0x0), readl(rxd + 0x4),
6623                        readl(rxd + 0x8), readl(rxd + 0xc));
6624                 rxd += (4 * sizeof(u32));
6625                 printk("DEBUG: NIC RXD_STD(%d)[1][%08x:%08x:%08x:%08x]\n",
6626                        i,
6627                        readl(rxd + 0x0), readl(rxd + 0x4),
6628                        readl(rxd + 0x8), readl(rxd + 0xc));
6629         }
6630
6631         for (i = 0; i < 6; i++) {
6632                 unsigned long rxd;
6633
6634                 rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_JUMBO_BUFFER_DESC
6635                         + (i * sizeof(struct tg3_rx_buffer_desc));
6636                 printk("DEBUG: NIC RXD_JUMBO(%d)[0][%08x:%08x:%08x:%08x]\n",
6637                        i,
6638                        readl(rxd + 0x0), readl(rxd + 0x4),
6639                        readl(rxd + 0x8), readl(rxd + 0xc));
6640                 rxd += (4 * sizeof(u32));
6641                 printk("DEBUG: NIC RXD_JUMBO(%d)[1][%08x:%08x:%08x:%08x]\n",
6642                        i,
6643                        readl(rxd + 0x0), readl(rxd + 0x4),
6644                        readl(rxd + 0x8), readl(rxd + 0xc));
6645         }
6646 }
6647 #endif
6648
6649 static struct net_device_stats *tg3_get_stats(struct net_device *);
6650 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *);
6651
6652 static int tg3_close(struct net_device *dev)
6653 {
6654         struct tg3 *tp = netdev_priv(dev);
6655
6656         netif_stop_queue(dev);
6657
6658         del_timer_sync(&tp->timer);
6659
6660         tg3_full_lock(tp, 1);
6661 #if 0
6662         tg3_dump_state(tp);
6663 #endif
6664
6665         tg3_disable_ints(tp);
6666
6667         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
6668         tg3_free_rings(tp);
6669         tp->tg3_flags &=
6670                 ~(TG3_FLAG_INIT_COMPLETE |
6671                   TG3_FLAG_GOT_SERDES_FLOWCTL);
6672         netif_carrier_off(tp->dev);
6673
6674         tg3_full_unlock(tp);
6675
6676         free_irq(tp->pdev->irq, dev);
6677         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6678                 pci_disable_msi(tp->pdev);
6679                 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
6680         }
6681
6682         memcpy(&tp->net_stats_prev, tg3_get_stats(tp->dev),
6683                sizeof(tp->net_stats_prev));
6684         memcpy(&tp->estats_prev, tg3_get_estats(tp),
6685                sizeof(tp->estats_prev));
6686
6687         tg3_free_consistent(tp);
6688
6689         return 0;
6690 }
6691
6692 static inline unsigned long get_stat64(tg3_stat64_t *val)
6693 {
6694         unsigned long ret;
6695
6696 #if (BITS_PER_LONG == 32)
6697         ret = val->low;
6698 #else
6699         ret = ((u64)val->high << 32) | ((u64)val->low);
6700 #endif
6701         return ret;
6702 }
6703
6704 static unsigned long calc_crc_errors(struct tg3 *tp)
6705 {
6706         struct tg3_hw_stats *hw_stats = tp->hw_stats;
6707
6708         if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
6709             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
6710              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
6711                 u32 val;
6712
6713                 spin_lock_bh(&tp->lock);
6714                 if (!tg3_readphy(tp, 0x1e, &val)) {
6715                         tg3_writephy(tp, 0x1e, val | 0x8000);
6716                         tg3_readphy(tp, 0x14, &val);
6717                 } else
6718                         val = 0;
6719                 spin_unlock_bh(&tp->lock);
6720
6721                 tp->phy_crc_errors += val;
6722
6723                 return tp->phy_crc_errors;
6724         }
6725
6726         return get_stat64(&hw_stats->rx_fcs_errors);
6727 }
6728
6729 #define ESTAT_ADD(member) \
6730         estats->member =        old_estats->member + \
6731                                 get_stat64(&hw_stats->member)
6732
6733 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *tp)
6734 {
6735         struct tg3_ethtool_stats *estats = &tp->estats;
6736         struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
6737         struct tg3_hw_stats *hw_stats = tp->hw_stats;
6738
6739         if (!hw_stats)
6740                 return old_estats;
6741
6742         ESTAT_ADD(rx_octets);
6743         ESTAT_ADD(rx_fragments);
6744         ESTAT_ADD(rx_ucast_packets);
6745         ESTAT_ADD(rx_mcast_packets);
6746         ESTAT_ADD(rx_bcast_packets);
6747         ESTAT_ADD(rx_fcs_errors);
6748         ESTAT_ADD(rx_align_errors);
6749         ESTAT_ADD(rx_xon_pause_rcvd);
6750         ESTAT_ADD(rx_xoff_pause_rcvd);
6751         ESTAT_ADD(rx_mac_ctrl_rcvd);
6752         ESTAT_ADD(rx_xoff_entered);
6753         ESTAT_ADD(rx_frame_too_long_errors);
6754         ESTAT_ADD(rx_jabbers);
6755         ESTAT_ADD(rx_undersize_packets);
6756         ESTAT_ADD(rx_in_length_errors);
6757         ESTAT_ADD(rx_out_length_errors);
6758         ESTAT_ADD(rx_64_or_less_octet_packets);
6759         ESTAT_ADD(rx_65_to_127_octet_packets);
6760         ESTAT_ADD(rx_128_to_255_octet_packets);
6761         ESTAT_ADD(rx_256_to_511_octet_packets);
6762         ESTAT_ADD(rx_512_to_1023_octet_packets);
6763         ESTAT_ADD(rx_1024_to_1522_octet_packets);
6764         ESTAT_ADD(rx_1523_to_2047_octet_packets);
6765         ESTAT_ADD(rx_2048_to_4095_octet_packets);
6766         ESTAT_ADD(rx_4096_to_8191_octet_packets);
6767         ESTAT_ADD(rx_8192_to_9022_octet_packets);
6768
6769         ESTAT_ADD(tx_octets);
6770         ESTAT_ADD(tx_collisions);
6771         ESTAT_ADD(tx_xon_sent);
6772         ESTAT_ADD(tx_xoff_sent);
6773         ESTAT_ADD(tx_flow_control);
6774         ESTAT_ADD(tx_mac_errors);
6775         ESTAT_ADD(tx_single_collisions);
6776         ESTAT_ADD(tx_mult_collisions);
6777         ESTAT_ADD(tx_deferred);
6778         ESTAT_ADD(tx_excessive_collisions);
6779         ESTAT_ADD(tx_late_collisions);
6780         ESTAT_ADD(tx_collide_2times);
6781         ESTAT_ADD(tx_collide_3times);
6782         ESTAT_ADD(tx_collide_4times);
6783         ESTAT_ADD(tx_collide_5times);
6784         ESTAT_ADD(tx_collide_6times);
6785         ESTAT_ADD(tx_collide_7times);
6786         ESTAT_ADD(tx_collide_8times);
6787         ESTAT_ADD(tx_collide_9times);
6788         ESTAT_ADD(tx_collide_10times);
6789         ESTAT_ADD(tx_collide_11times);
6790         ESTAT_ADD(tx_collide_12times);
6791         ESTAT_ADD(tx_collide_13times);
6792         ESTAT_ADD(tx_collide_14times);
6793         ESTAT_ADD(tx_collide_15times);
6794         ESTAT_ADD(tx_ucast_packets);
6795         ESTAT_ADD(tx_mcast_packets);
6796         ESTAT_ADD(tx_bcast_packets);
6797         ESTAT_ADD(tx_carrier_sense_errors);
6798         ESTAT_ADD(tx_discards);
6799         ESTAT_ADD(tx_errors);
6800
6801         ESTAT_ADD(dma_writeq_full);
6802         ESTAT_ADD(dma_write_prioq_full);
6803         ESTAT_ADD(rxbds_empty);
6804         ESTAT_ADD(rx_discards);
6805         ESTAT_ADD(rx_errors);
6806         ESTAT_ADD(rx_threshold_hit);
6807
6808         ESTAT_ADD(dma_readq_full);
6809         ESTAT_ADD(dma_read_prioq_full);
6810         ESTAT_ADD(tx_comp_queue_full);
6811
6812         ESTAT_ADD(ring_set_send_prod_index);
6813         ESTAT_ADD(ring_status_update);
6814         ESTAT_ADD(nic_irqs);
6815         ESTAT_ADD(nic_avoided_irqs);
6816         ESTAT_ADD(nic_tx_threshold_hit);
6817
6818         return estats;
6819 }
6820
6821 static struct net_device_stats *tg3_get_stats(struct net_device *dev)
6822 {
6823         struct tg3 *tp = netdev_priv(dev);
6824         struct net_device_stats *stats = &tp->net_stats;
6825         struct net_device_stats *old_stats = &tp->net_stats_prev;
6826         struct tg3_hw_stats *hw_stats = tp->hw_stats;
6827
6828         if (!hw_stats)
6829                 return old_stats;
6830
6831         stats->rx_packets = old_stats->rx_packets +
6832                 get_stat64(&hw_stats->rx_ucast_packets) +
6833                 get_stat64(&hw_stats->rx_mcast_packets) +
6834                 get_stat64(&hw_stats->rx_bcast_packets);
6835                 
6836         stats->tx_packets = old_stats->tx_packets +
6837                 get_stat64(&hw_stats->tx_ucast_packets) +
6838                 get_stat64(&hw_stats->tx_mcast_packets) +
6839                 get_stat64(&hw_stats->tx_bcast_packets);
6840
6841         stats->rx_bytes = old_stats->rx_bytes +
6842                 get_stat64(&hw_stats->rx_octets);
6843         stats->tx_bytes = old_stats->tx_bytes +
6844                 get_stat64(&hw_stats->tx_octets);
6845
6846         stats->rx_errors = old_stats->rx_errors +
6847                 get_stat64(&hw_stats->rx_errors) +
6848                 get_stat64(&hw_stats->rx_discards);
6849         stats->tx_errors = old_stats->tx_errors +
6850                 get_stat64(&hw_stats->tx_errors) +
6851                 get_stat64(&hw_stats->tx_mac_errors) +
6852                 get_stat64(&hw_stats->tx_carrier_sense_errors) +
6853                 get_stat64(&hw_stats->tx_discards);
6854
6855         stats->multicast = old_stats->multicast +
6856                 get_stat64(&hw_stats->rx_mcast_packets);
6857         stats->collisions = old_stats->collisions +
6858                 get_stat64(&hw_stats->tx_collisions);
6859
6860         stats->rx_length_errors = old_stats->rx_length_errors +
6861                 get_stat64(&hw_stats->rx_frame_too_long_errors) +
6862                 get_stat64(&hw_stats->rx_undersize_packets);
6863
6864         stats->rx_over_errors = old_stats->rx_over_errors +
6865                 get_stat64(&hw_stats->rxbds_empty);
6866         stats->rx_frame_errors = old_stats->rx_frame_errors +
6867                 get_stat64(&hw_stats->rx_align_errors);
6868         stats->tx_aborted_errors = old_stats->tx_aborted_errors +
6869                 get_stat64(&hw_stats->tx_discards);
6870         stats->tx_carrier_errors = old_stats->tx_carrier_errors +
6871                 get_stat64(&hw_stats->tx_carrier_sense_errors);
6872
6873         stats->rx_crc_errors = old_stats->rx_crc_errors +
6874                 calc_crc_errors(tp);
6875
6876         return stats;
6877 }
6878
6879 static inline u32 calc_crc(unsigned char *buf, int len)
6880 {
6881         u32 reg;
6882         u32 tmp;
6883         int j, k;
6884
6885         reg = 0xffffffff;
6886
6887         for (j = 0; j < len; j++) {
6888                 reg ^= buf[j];
6889
6890                 for (k = 0; k < 8; k++) {
6891                         tmp = reg & 0x01;
6892
6893                         reg >>= 1;
6894
6895                         if (tmp) {
6896                                 reg ^= 0xedb88320;
6897                         }
6898                 }
6899         }
6900
6901         return ~reg;
6902 }
6903
6904 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
6905 {
6906         /* accept or reject all multicast frames */
6907         tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
6908         tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
6909         tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
6910         tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
6911 }
6912
6913 static void __tg3_set_rx_mode(struct net_device *dev)
6914 {
6915         struct tg3 *tp = netdev_priv(dev);
6916         u32 rx_mode;
6917
6918         rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
6919                                   RX_MODE_KEEP_VLAN_TAG);
6920
6921         /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
6922          * flag clear.
6923          */
6924 #if TG3_VLAN_TAG_USED
6925         if (!tp->vlgrp &&
6926             !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
6927                 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
6928 #else
6929         /* By definition, VLAN is disabled always in this
6930          * case.
6931          */
6932         if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
6933                 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
6934 #endif
6935
6936         if (dev->flags & IFF_PROMISC) {
6937                 /* Promiscuous mode. */
6938                 rx_mode |= RX_MODE_PROMISC;
6939         } else if (dev->flags & IFF_ALLMULTI) {
6940                 /* Accept all multicast. */
6941                 tg3_set_multi (tp, 1);
6942         } else if (dev->mc_count < 1) {
6943                 /* Reject all multicast. */
6944                 tg3_set_multi (tp, 0);
6945         } else {
6946                 /* Accept one or more multicast(s). */
6947                 struct dev_mc_list *mclist;
6948                 unsigned int i;
6949                 u32 mc_filter[4] = { 0, };
6950                 u32 regidx;
6951                 u32 bit;
6952                 u32 crc;
6953
6954                 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
6955                      i++, mclist = mclist->next) {
6956
6957                         crc = calc_crc (mclist->dmi_addr, ETH_ALEN);
6958                         bit = ~crc & 0x7f;
6959                         regidx = (bit & 0x60) >> 5;
6960                         bit &= 0x1f;
6961                         mc_filter[regidx] |= (1 << bit);
6962                 }
6963
6964                 tw32(MAC_HASH_REG_0, mc_filter[0]);
6965                 tw32(MAC_HASH_REG_1, mc_filter[1]);
6966                 tw32(MAC_HASH_REG_2, mc_filter[2]);
6967                 tw32(MAC_HASH_REG_3, mc_filter[3]);
6968         }
6969
6970         if (rx_mode != tp->rx_mode) {
6971                 tp->rx_mode = rx_mode;
6972                 tw32_f(MAC_RX_MODE, rx_mode);
6973                 udelay(10);
6974         }
6975 }
6976
6977 static void tg3_set_rx_mode(struct net_device *dev)
6978 {
6979         struct tg3 *tp = netdev_priv(dev);
6980
6981         tg3_full_lock(tp, 0);
6982         __tg3_set_rx_mode(dev);
6983         tg3_full_unlock(tp);
6984 }
6985
6986 #define TG3_REGDUMP_LEN         (32 * 1024)
6987
6988 static int tg3_get_regs_len(struct net_device *dev)
6989 {
6990         return TG3_REGDUMP_LEN;
6991 }
6992
6993 static void tg3_get_regs(struct net_device *dev,
6994                 struct ethtool_regs *regs, void *_p)
6995 {
6996         u32 *p = _p;
6997         struct tg3 *tp = netdev_priv(dev);
6998         u8 *orig_p = _p;
6999         int i;
7000
7001         regs->version = 0;
7002
7003         memset(p, 0, TG3_REGDUMP_LEN);
7004
7005         tg3_full_lock(tp, 0);
7006
7007 #define __GET_REG32(reg)        (*(p)++ = tr32(reg))
7008 #define GET_REG32_LOOP(base,len)                \
7009 do {    p = (u32 *)(orig_p + (base));           \
7010         for (i = 0; i < len; i += 4)            \
7011                 __GET_REG32((base) + i);        \
7012 } while (0)
7013 #define GET_REG32_1(reg)                        \
7014 do {    p = (u32 *)(orig_p + (reg));            \
7015         __GET_REG32((reg));                     \
7016 } while (0)
7017
7018         GET_REG32_LOOP(TG3PCI_VENDOR, 0xb0);
7019         GET_REG32_LOOP(MAILBOX_INTERRUPT_0, 0x200);
7020         GET_REG32_LOOP(MAC_MODE, 0x4f0);
7021         GET_REG32_LOOP(SNDDATAI_MODE, 0xe0);
7022         GET_REG32_1(SNDDATAC_MODE);
7023         GET_REG32_LOOP(SNDBDS_MODE, 0x80);
7024         GET_REG32_LOOP(SNDBDI_MODE, 0x48);
7025         GET_REG32_1(SNDBDC_MODE);
7026         GET_REG32_LOOP(RCVLPC_MODE, 0x20);
7027         GET_REG32_LOOP(RCVLPC_SELLST_BASE, 0x15c);
7028         GET_REG32_LOOP(RCVDBDI_MODE, 0x0c);
7029         GET_REG32_LOOP(RCVDBDI_JUMBO_BD, 0x3c);
7030         GET_REG32_LOOP(RCVDBDI_BD_PROD_IDX_0, 0x44);
7031         GET_REG32_1(RCVDCC_MODE);
7032         GET_REG32_LOOP(RCVBDI_MODE, 0x20);
7033         GET_REG32_LOOP(RCVCC_MODE, 0x14);
7034         GET_REG32_LOOP(RCVLSC_MODE, 0x08);
7035         GET_REG32_1(MBFREE_MODE);
7036         GET_REG32_LOOP(HOSTCC_MODE, 0x100);
7037         GET_REG32_LOOP(MEMARB_MODE, 0x10);
7038         GET_REG32_LOOP(BUFMGR_MODE, 0x58);
7039         GET_REG32_LOOP(RDMAC_MODE, 0x08);
7040         GET_REG32_LOOP(WDMAC_MODE, 0x08);
7041         GET_REG32_LOOP(RX_CPU_BASE, 0x280);
7042         GET_REG32_LOOP(TX_CPU_BASE, 0x280);
7043         GET_REG32_LOOP(GRCMBOX_INTERRUPT_0, 0x110);
7044         GET_REG32_LOOP(FTQ_RESET, 0x120);
7045         GET_REG32_LOOP(MSGINT_MODE, 0x0c);
7046         GET_REG32_1(DMAC_MODE);
7047         GET_REG32_LOOP(GRC_MODE, 0x4c);
7048         if (tp->tg3_flags & TG3_FLAG_NVRAM)
7049                 GET_REG32_LOOP(NVRAM_CMD, 0x24);
7050
7051 #undef __GET_REG32
7052 #undef GET_REG32_LOOP
7053 #undef GET_REG32_1
7054
7055         tg3_full_unlock(tp);
7056 }
7057
7058 static int tg3_get_eeprom_len(struct net_device *dev)
7059 {
7060         struct tg3 *tp = netdev_priv(dev);
7061
7062         return tp->nvram_size;
7063 }
7064
7065 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val);
7066
7067 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
7068 {
7069         struct tg3 *tp = netdev_priv(dev);
7070         int ret;
7071         u8  *pd;
7072         u32 i, offset, len, val, b_offset, b_count;
7073
7074         offset = eeprom->offset;
7075         len = eeprom->len;
7076         eeprom->len = 0;
7077
7078         eeprom->magic = TG3_EEPROM_MAGIC;
7079
7080         if (offset & 3) {
7081                 /* adjustments to start on required 4 byte boundary */
7082                 b_offset = offset & 3;
7083                 b_count = 4 - b_offset;
7084                 if (b_count > len) {
7085                         /* i.e. offset=1 len=2 */
7086                         b_count = len;
7087                 }
7088                 ret = tg3_nvram_read(tp, offset-b_offset, &val);
7089                 if (ret)
7090                         return ret;
7091                 val = cpu_to_le32(val);
7092                 memcpy(data, ((char*)&val) + b_offset, b_count);
7093                 len -= b_count;
7094                 offset += b_count;
7095                 eeprom->len += b_count;
7096         }
7097
7098         /* read bytes upto the last 4 byte boundary */
7099         pd = &data[eeprom->len];
7100         for (i = 0; i < (len - (len & 3)); i += 4) {
7101                 ret = tg3_nvram_read(tp, offset + i, &val);
7102                 if (ret) {
7103                         eeprom->len += i;
7104                         return ret;
7105                 }
7106                 val = cpu_to_le32(val);
7107                 memcpy(pd + i, &val, 4);
7108         }
7109         eeprom->len += i;
7110
7111         if (len & 3) {
7112                 /* read last bytes not ending on 4 byte boundary */
7113                 pd = &data[eeprom->len];
7114                 b_count = len & 3;
7115                 b_offset = offset + len - b_count;
7116                 ret = tg3_nvram_read(tp, b_offset, &val);
7117                 if (ret)
7118                         return ret;
7119                 val = cpu_to_le32(val);
7120                 memcpy(pd, ((char*)&val), b_count);
7121                 eeprom->len += b_count;
7122         }
7123         return 0;
7124 }
7125
7126 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf); 
7127
7128 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
7129 {
7130         struct tg3 *tp = netdev_priv(dev);
7131         int ret;
7132         u32 offset, len, b_offset, odd_len, start, end;
7133         u8 *buf;
7134
7135         if (eeprom->magic != TG3_EEPROM_MAGIC)
7136                 return -EINVAL;
7137
7138         offset = eeprom->offset;
7139         len = eeprom->len;
7140
7141         if ((b_offset = (offset & 3))) {
7142                 /* adjustments to start on required 4 byte boundary */
7143                 ret = tg3_nvram_read(tp, offset-b_offset, &start);
7144                 if (ret)
7145                         return ret;
7146                 start = cpu_to_le32(start);
7147                 len += b_offset;
7148                 offset &= ~3;
7149                 if (len < 4)
7150                         len = 4;
7151         }
7152
7153         odd_len = 0;
7154         if (len & 3) {
7155                 /* adjustments to end on required 4 byte boundary */
7156                 odd_len = 1;
7157                 len = (len + 3) & ~3;
7158                 ret = tg3_nvram_read(tp, offset+len-4, &end);
7159                 if (ret)
7160                         return ret;
7161                 end = cpu_to_le32(end);
7162         }
7163
7164         buf = data;
7165         if (b_offset || odd_len) {
7166                 buf = kmalloc(len, GFP_KERNEL);
7167                 if (buf == 0)
7168                         return -ENOMEM;
7169                 if (b_offset)
7170                         memcpy(buf, &start, 4);
7171                 if (odd_len)
7172                         memcpy(buf+len-4, &end, 4);
7173                 memcpy(buf + b_offset, data, eeprom->len);
7174         }
7175
7176         ret = tg3_nvram_write_block(tp, offset, len, buf);
7177
7178         if (buf != data)
7179                 kfree(buf);
7180
7181         return ret;
7182 }
7183
7184 static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
7185 {
7186         struct tg3 *tp = netdev_priv(dev);
7187   
7188         cmd->supported = (SUPPORTED_Autoneg);
7189
7190         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
7191                 cmd->supported |= (SUPPORTED_1000baseT_Half |
7192                                    SUPPORTED_1000baseT_Full);
7193
7194         if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES))
7195                 cmd->supported |= (SUPPORTED_100baseT_Half |
7196                                   SUPPORTED_100baseT_Full |
7197                                   SUPPORTED_10baseT_Half |
7198                                   SUPPORTED_10baseT_Full |
7199                                   SUPPORTED_MII);
7200         else
7201                 cmd->supported |= SUPPORTED_FIBRE;
7202   
7203         cmd->advertising = tp->link_config.advertising;
7204         if (netif_running(dev)) {
7205                 cmd->speed = tp->link_config.active_speed;
7206                 cmd->duplex = tp->link_config.active_duplex;
7207         }
7208         cmd->port = 0;
7209         cmd->phy_address = PHY_ADDR;
7210         cmd->transceiver = 0;
7211         cmd->autoneg = tp->link_config.autoneg;
7212         cmd->maxtxpkt = 0;
7213         cmd->maxrxpkt = 0;
7214         return 0;
7215 }
7216   
7217 static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
7218 {
7219         struct tg3 *tp = netdev_priv(dev);
7220   
7221         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
7222                 /* These are the only valid advertisement bits allowed.  */
7223                 if (cmd->autoneg == AUTONEG_ENABLE &&
7224                     (cmd->advertising & ~(ADVERTISED_1000baseT_Half |
7225                                           ADVERTISED_1000baseT_Full |
7226                                           ADVERTISED_Autoneg |
7227                                           ADVERTISED_FIBRE)))
7228                         return -EINVAL;
7229         }
7230
7231         tg3_full_lock(tp, 0);
7232
7233         tp->link_config.autoneg = cmd->autoneg;
7234         if (cmd->autoneg == AUTONEG_ENABLE) {
7235                 tp->link_config.advertising = cmd->advertising;
7236                 tp->link_config.speed = SPEED_INVALID;
7237                 tp->link_config.duplex = DUPLEX_INVALID;
7238         } else {
7239                 tp->link_config.advertising = 0;
7240                 tp->link_config.speed = cmd->speed;
7241                 tp->link_config.duplex = cmd->duplex;
7242         }
7243   
7244         if (netif_running(dev))
7245                 tg3_setup_phy(tp, 1);
7246
7247         tg3_full_unlock(tp);
7248   
7249         return 0;
7250 }
7251   
7252 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
7253 {
7254         struct tg3 *tp = netdev_priv(dev);
7255   
7256         strcpy(info->driver, DRV_MODULE_NAME);
7257         strcpy(info->version, DRV_MODULE_VERSION);
7258         strcpy(info->bus_info, pci_name(tp->pdev));
7259 }
7260   
7261 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
7262 {
7263         struct tg3 *tp = netdev_priv(dev);
7264   
7265         wol->supported = WAKE_MAGIC;
7266         wol->wolopts = 0;
7267         if (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)
7268                 wol->wolopts = WAKE_MAGIC;
7269         memset(&wol->sopass, 0, sizeof(wol->sopass));
7270 }
7271   
7272 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
7273 {
7274         struct tg3 *tp = netdev_priv(dev);
7275   
7276         if (wol->wolopts & ~WAKE_MAGIC)
7277                 return -EINVAL;
7278         if ((wol->wolopts & WAKE_MAGIC) &&
7279             tp->tg3_flags2 & TG3_FLG2_PHY_SERDES &&
7280             !(tp->tg3_flags & TG3_FLAG_SERDES_WOL_CAP))
7281                 return -EINVAL;
7282   
7283         spin_lock_bh(&tp->lock);
7284         if (wol->wolopts & WAKE_MAGIC)
7285                 tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
7286         else
7287                 tp->tg3_flags &= ~TG3_FLAG_WOL_ENABLE;
7288         spin_unlock_bh(&tp->lock);
7289   
7290         return 0;
7291 }
7292   
7293 static u32 tg3_get_msglevel(struct net_device *dev)
7294 {
7295         struct tg3 *tp = netdev_priv(dev);
7296         return tp->msg_enable;
7297 }
7298   
7299 static void tg3_set_msglevel(struct net_device *dev, u32 value)
7300 {
7301         struct tg3 *tp = netdev_priv(dev);
7302         tp->msg_enable = value;
7303 }
7304   
7305 #if TG3_TSO_SUPPORT != 0
7306 static int tg3_set_tso(struct net_device *dev, u32 value)
7307 {
7308         struct tg3 *tp = netdev_priv(dev);
7309
7310         if (!(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
7311                 if (value)
7312                         return -EINVAL;
7313                 return 0;
7314         }
7315         return ethtool_op_set_tso(dev, value);
7316 }
7317 #endif
7318   
7319 static int tg3_nway_reset(struct net_device *dev)
7320 {
7321         struct tg3 *tp = netdev_priv(dev);
7322         u32 bmcr;
7323         int r;
7324   
7325         if (!netif_running(dev))
7326                 return -EAGAIN;
7327
7328         spin_lock_bh(&tp->lock);
7329         r = -EINVAL;
7330         tg3_readphy(tp, MII_BMCR, &bmcr);
7331         if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
7332             (bmcr & BMCR_ANENABLE)) {
7333                 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART);
7334                 r = 0;
7335         }
7336         spin_unlock_bh(&tp->lock);
7337   
7338         return r;
7339 }
7340   
7341 static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
7342 {
7343         struct tg3 *tp = netdev_priv(dev);
7344   
7345         ering->rx_max_pending = TG3_RX_RING_SIZE - 1;
7346         ering->rx_mini_max_pending = 0;
7347         ering->rx_jumbo_max_pending = TG3_RX_JUMBO_RING_SIZE - 1;
7348
7349         ering->rx_pending = tp->rx_pending;
7350         ering->rx_mini_pending = 0;
7351         ering->rx_jumbo_pending = tp->rx_jumbo_pending;
7352         ering->tx_pending = tp->tx_pending;
7353 }
7354   
7355 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
7356 {
7357         struct tg3 *tp = netdev_priv(dev);
7358         int irq_sync = 0;
7359   
7360         if ((ering->rx_pending > TG3_RX_RING_SIZE - 1) ||
7361             (ering->rx_jumbo_pending > TG3_RX_JUMBO_RING_SIZE - 1) ||
7362             (ering->tx_pending > TG3_TX_RING_SIZE - 1))
7363                 return -EINVAL;
7364   
7365         if (netif_running(dev)) {
7366                 tg3_netif_stop(tp);
7367                 irq_sync = 1;
7368         }
7369
7370         tg3_full_lock(tp, irq_sync);
7371   
7372         tp->rx_pending = ering->rx_pending;
7373
7374         if ((tp->tg3_flags2 & TG3_FLG2_MAX_RXPEND_64) &&
7375             tp->rx_pending > 63)
7376                 tp->rx_pending = 63;
7377         tp->rx_jumbo_pending = ering->rx_jumbo_pending;
7378         tp->tx_pending = ering->tx_pending;
7379
7380         if (netif_running(dev)) {
7381                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
7382                 tg3_init_hw(tp);
7383                 tg3_netif_start(tp);
7384         }
7385
7386         tg3_full_unlock(tp);
7387   
7388         return 0;
7389 }
7390   
7391 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
7392 {
7393         struct tg3 *tp = netdev_priv(dev);
7394   
7395         epause->autoneg = (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG) != 0;
7396         epause->rx_pause = (tp->tg3_flags & TG3_FLAG_RX_PAUSE) != 0;
7397         epause->tx_pause = (tp->tg3_flags & TG3_FLAG_TX_PAUSE) != 0;
7398 }
7399   
7400 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
7401 {
7402         struct tg3 *tp = netdev_priv(dev);
7403         int irq_sync = 0;
7404   
7405         if (netif_running(dev)) {
7406                 tg3_netif_stop(tp);
7407                 irq_sync = 1;
7408         }
7409
7410         tg3_full_lock(tp, irq_sync);
7411
7412         if (epause->autoneg)
7413                 tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
7414         else
7415                 tp->tg3_flags &= ~TG3_FLAG_PAUSE_AUTONEG;
7416         if (epause->rx_pause)
7417                 tp->tg3_flags |= TG3_FLAG_RX_PAUSE;
7418         else
7419                 tp->tg3_flags &= ~TG3_FLAG_RX_PAUSE;
7420         if (epause->tx_pause)
7421                 tp->tg3_flags |= TG3_FLAG_TX_PAUSE;
7422         else
7423                 tp->tg3_flags &= ~TG3_FLAG_TX_PAUSE;
7424
7425         if (netif_running(dev)) {
7426                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
7427                 tg3_init_hw(tp);
7428                 tg3_netif_start(tp);
7429         }
7430
7431         tg3_full_unlock(tp);
7432   
7433         return 0;
7434 }
7435   
7436 static u32 tg3_get_rx_csum(struct net_device *dev)
7437 {
7438         struct tg3 *tp = netdev_priv(dev);
7439         return (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0;
7440 }
7441   
7442 static int tg3_set_rx_csum(struct net_device *dev, u32 data)
7443 {
7444         struct tg3 *tp = netdev_priv(dev);
7445   
7446         if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
7447                 if (data != 0)
7448                         return -EINVAL;
7449                 return 0;
7450         }
7451   
7452         spin_lock_bh(&tp->lock);
7453         if (data)
7454                 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
7455         else
7456                 tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
7457         spin_unlock_bh(&tp->lock);
7458   
7459         return 0;
7460 }
7461   
7462 static int tg3_set_tx_csum(struct net_device *dev, u32 data)
7463 {
7464         struct tg3 *tp = netdev_priv(dev);
7465   
7466         if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
7467                 if (data != 0)
7468                         return -EINVAL;
7469                 return 0;
7470         }
7471   
7472         if (data)
7473                 dev->features |= NETIF_F_IP_CSUM;
7474         else
7475                 dev->features &= ~NETIF_F_IP_CSUM;
7476
7477         return 0;
7478 }
7479
7480 static int tg3_get_stats_count (struct net_device *dev)
7481 {
7482         return TG3_NUM_STATS;
7483 }
7484
7485 static int tg3_get_test_count (struct net_device *dev)
7486 {
7487         return TG3_NUM_TEST;
7488 }
7489
7490 static void tg3_get_strings (struct net_device *dev, u32 stringset, u8 *buf)
7491 {
7492         switch (stringset) {
7493         case ETH_SS_STATS:
7494                 memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
7495                 break;
7496         case ETH_SS_TEST:
7497                 memcpy(buf, &ethtool_test_keys, sizeof(ethtool_test_keys));
7498                 break;
7499         default:
7500                 WARN_ON(1);     /* we need a WARN() */
7501                 break;
7502         }
7503 }
7504
7505 static void tg3_get_ethtool_stats (struct net_device *dev,
7506                                    struct ethtool_stats *estats, u64 *tmp_stats)
7507 {
7508         struct tg3 *tp = netdev_priv(dev);
7509         memcpy(tmp_stats, tg3_get_estats(tp), sizeof(tp->estats));
7510 }
7511
7512 #define NVRAM_TEST_SIZE 0x100
7513
7514 static int tg3_test_nvram(struct tg3 *tp)
7515 {
7516         u32 *buf, csum;
7517         int i, j, err = 0;
7518
7519         buf = kmalloc(NVRAM_TEST_SIZE, GFP_KERNEL);
7520         if (buf == NULL)
7521                 return -ENOMEM;
7522
7523         for (i = 0, j = 0; i < NVRAM_TEST_SIZE; i += 4, j++) {
7524                 u32 val;
7525
7526                 if ((err = tg3_nvram_read(tp, i, &val)) != 0)
7527                         break;
7528                 buf[j] = cpu_to_le32(val);
7529         }
7530         if (i < NVRAM_TEST_SIZE)
7531                 goto out;
7532
7533         err = -EIO;
7534         if (cpu_to_be32(buf[0]) != TG3_EEPROM_MAGIC)
7535                 goto out;
7536
7537         /* Bootstrap checksum at offset 0x10 */
7538         csum = calc_crc((unsigned char *) buf, 0x10);
7539         if(csum != cpu_to_le32(buf[0x10/4]))
7540                 goto out;
7541
7542         /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
7543         csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
7544         if (csum != cpu_to_le32(buf[0xfc/4]))
7545                  goto out;
7546
7547         err = 0;
7548
7549 out:
7550         kfree(buf);
7551         return err;
7552 }
7553
7554 #define TG3_SERDES_TIMEOUT_SEC  2
7555 #define TG3_COPPER_TIMEOUT_SEC  6
7556
7557 static int tg3_test_link(struct tg3 *tp)
7558 {
7559         int i, max;
7560
7561         if (!netif_running(tp->dev))
7562                 return -ENODEV;
7563
7564         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
7565                 max = TG3_SERDES_TIMEOUT_SEC;
7566         else
7567                 max = TG3_COPPER_TIMEOUT_SEC;
7568
7569         for (i = 0; i < max; i++) {
7570                 if (netif_carrier_ok(tp->dev))
7571                         return 0;
7572
7573                 if (msleep_interruptible(1000))
7574                         break;
7575         }
7576
7577         return -EIO;
7578 }
7579
7580 /* Only test the commonly used registers */
7581 static int tg3_test_registers(struct tg3 *tp)
7582 {
7583         int i, is_5705;
7584         u32 offset, read_mask, write_mask, val, save_val, read_val;
7585         static struct {
7586                 u16 offset;
7587                 u16 flags;
7588 #define TG3_FL_5705     0x1
7589 #define TG3_FL_NOT_5705 0x2
7590 #define TG3_FL_NOT_5788 0x4
7591                 u32 read_mask;
7592                 u32 write_mask;
7593         } reg_tbl[] = {
7594                 /* MAC Control Registers */
7595                 { MAC_MODE, TG3_FL_NOT_5705,
7596                         0x00000000, 0x00ef6f8c },
7597                 { MAC_MODE, TG3_FL_5705,
7598                         0x00000000, 0x01ef6b8c },
7599                 { MAC_STATUS, TG3_FL_NOT_5705,
7600                         0x03800107, 0x00000000 },
7601                 { MAC_STATUS, TG3_FL_5705,
7602                         0x03800100, 0x00000000 },
7603                 { MAC_ADDR_0_HIGH, 0x0000,
7604                         0x00000000, 0x0000ffff },
7605                 { MAC_ADDR_0_LOW, 0x0000,
7606                         0x00000000, 0xffffffff },
7607                 { MAC_RX_MTU_SIZE, 0x0000,
7608                         0x00000000, 0x0000ffff },
7609                 { MAC_TX_MODE, 0x0000,
7610                         0x00000000, 0x00000070 },
7611                 { MAC_TX_LENGTHS, 0x0000,
7612                         0x00000000, 0x00003fff },
7613                 { MAC_RX_MODE, TG3_FL_NOT_5705,
7614                         0x00000000, 0x000007fc },
7615                 { MAC_RX_MODE, TG3_FL_5705,
7616                         0x00000000, 0x000007dc },
7617                 { MAC_HASH_REG_0, 0x0000,
7618                         0x00000000, 0xffffffff },
7619                 { MAC_HASH_REG_1, 0x0000,
7620                         0x00000000, 0xffffffff },
7621                 { MAC_HASH_REG_2, 0x0000,
7622                         0x00000000, 0xffffffff },
7623                 { MAC_HASH_REG_3, 0x0000,
7624                         0x00000000, 0xffffffff },
7625
7626                 /* Receive Data and Receive BD Initiator Control Registers. */
7627                 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
7628                         0x00000000, 0xffffffff },
7629                 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
7630                         0x00000000, 0xffffffff },
7631                 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
7632                         0x00000000, 0x00000003 },
7633                 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
7634                         0x00000000, 0xffffffff },
7635                 { RCVDBDI_STD_BD+0, 0x0000,
7636                         0x00000000, 0xffffffff },
7637                 { RCVDBDI_STD_BD+4, 0x0000,
7638                         0x00000000, 0xffffffff },
7639                 { RCVDBDI_STD_BD+8, 0x0000,
7640                         0x00000000, 0xffff0002 },
7641                 { RCVDBDI_STD_BD+0xc, 0x0000,
7642                         0x00000000, 0xffffffff },
7643         
7644                 /* Receive BD Initiator Control Registers. */
7645                 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
7646                         0x00000000, 0xffffffff },
7647                 { RCVBDI_STD_THRESH, TG3_FL_5705,
7648                         0x00000000, 0x000003ff },
7649                 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
7650                         0x00000000, 0xffffffff },
7651         
7652                 /* Host Coalescing Control Registers. */
7653                 { HOSTCC_MODE, TG3_FL_NOT_5705,
7654                         0x00000000, 0x00000004 },
7655                 { HOSTCC_MODE, TG3_FL_5705,
7656                         0x00000000, 0x000000f6 },
7657                 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
7658                         0x00000000, 0xffffffff },
7659                 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
7660                         0x00000000, 0x000003ff },
7661                 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
7662                         0x00000000, 0xffffffff },
7663                 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
7664                         0x00000000, 0x000003ff },
7665                 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
7666                         0x00000000, 0xffffffff },
7667                 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
7668                         0x00000000, 0x000000ff },
7669                 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
7670                         0x00000000, 0xffffffff },
7671                 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
7672                         0x00000000, 0x000000ff },
7673                 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
7674                         0x00000000, 0xffffffff },
7675                 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
7676                         0x00000000, 0xffffffff },
7677                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
7678                         0x00000000, 0xffffffff },
7679                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
7680                         0x00000000, 0x000000ff },
7681                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
7682                         0x00000000, 0xffffffff },
7683                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
7684                         0x00000000, 0x000000ff },
7685                 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
7686                         0x00000000, 0xffffffff },
7687                 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
7688                         0x00000000, 0xffffffff },
7689                 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
7690                         0x00000000, 0xffffffff },
7691                 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
7692                         0x00000000, 0xffffffff },
7693                 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
7694                         0x00000000, 0xffffffff },
7695                 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
7696                         0xffffffff, 0x00000000 },
7697                 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
7698                         0xffffffff, 0x00000000 },
7699
7700                 /* Buffer Manager Control Registers. */
7701                 { BUFMGR_MB_POOL_ADDR, 0x0000,
7702                         0x00000000, 0x007fff80 },
7703                 { BUFMGR_MB_POOL_SIZE, 0x0000,
7704                         0x00000000, 0x007fffff },
7705                 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
7706                         0x00000000, 0x0000003f },
7707                 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
7708                         0x00000000, 0x000001ff },
7709                 { BUFMGR_MB_HIGH_WATER, 0x0000,
7710                         0x00000000, 0x000001ff },
7711                 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
7712                         0xffffffff, 0x00000000 },
7713                 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
7714                         0xffffffff, 0x00000000 },
7715         
7716                 /* Mailbox Registers */
7717                 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
7718                         0x00000000, 0x000001ff },
7719                 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
7720                         0x00000000, 0x000001ff },
7721                 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
7722                         0x00000000, 0x000007ff },
7723                 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
7724                         0x00000000, 0x000001ff },
7725
7726                 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
7727         };
7728
7729         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
7730                 is_5705 = 1;
7731         else
7732                 is_5705 = 0;
7733
7734         for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
7735                 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
7736                         continue;
7737
7738                 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
7739                         continue;
7740
7741                 if ((tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
7742                     (reg_tbl[i].flags & TG3_FL_NOT_5788))
7743                         continue;
7744
7745                 offset = (u32) reg_tbl[i].offset;
7746                 read_mask = reg_tbl[i].read_mask;
7747                 write_mask = reg_tbl[i].write_mask;
7748
7749                 /* Save the original register content */
7750                 save_val = tr32(offset);
7751
7752                 /* Determine the read-only value. */
7753                 read_val = save_val & read_mask;
7754
7755                 /* Write zero to the register, then make sure the read-only bits
7756                  * are not changed and the read/write bits are all zeros.
7757                  */
7758                 tw32(offset, 0);
7759
7760                 val = tr32(offset);
7761
7762                 /* Test the read-only and read/write bits. */
7763                 if (((val & read_mask) != read_val) || (val & write_mask))
7764                         goto out;
7765
7766                 /* Write ones to all the bits defined by RdMask and WrMask, then
7767                  * make sure the read-only bits are not changed and the
7768                  * read/write bits are all ones.
7769                  */
7770                 tw32(offset, read_mask | write_mask);
7771
7772                 val = tr32(offset);
7773
7774                 /* Test the read-only bits. */
7775                 if ((val & read_mask) != read_val)
7776                         goto out;
7777
7778                 /* Test the read/write bits. */
7779                 if ((val & write_mask) != write_mask)
7780                         goto out;
7781
7782                 tw32(offset, save_val);
7783         }
7784
7785         return 0;
7786
7787 out:
7788         printk(KERN_ERR PFX "Register test failed at offset %x\n", offset);
7789         tw32(offset, save_val);
7790         return -EIO;
7791 }
7792
7793 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
7794 {
7795         static u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
7796         int i;
7797         u32 j;
7798
7799         for (i = 0; i < sizeof(test_pattern)/sizeof(u32); i++) {
7800                 for (j = 0; j < len; j += 4) {
7801                         u32 val;
7802
7803                         tg3_write_mem(tp, offset + j, test_pattern[i]);
7804                         tg3_read_mem(tp, offset + j, &val);
7805                         if (val != test_pattern[i])
7806                                 return -EIO;
7807                 }
7808         }
7809         return 0;
7810 }
7811
7812 static int tg3_test_memory(struct tg3 *tp)
7813 {
7814         static struct mem_entry {
7815                 u32 offset;
7816                 u32 len;
7817         } mem_tbl_570x[] = {
7818                 { 0x00000000, 0x01000},
7819                 { 0x00002000, 0x1c000},
7820                 { 0xffffffff, 0x00000}
7821         }, mem_tbl_5705[] = {
7822                 { 0x00000100, 0x0000c},
7823                 { 0x00000200, 0x00008},
7824                 { 0x00000b50, 0x00400},
7825                 { 0x00004000, 0x00800},
7826                 { 0x00006000, 0x01000},
7827                 { 0x00008000, 0x02000},
7828                 { 0x00010000, 0x0e000},
7829                 { 0xffffffff, 0x00000}
7830         };
7831         struct mem_entry *mem_tbl;
7832         int err = 0;
7833         int i;
7834
7835         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
7836                 mem_tbl = mem_tbl_5705;
7837         else
7838                 mem_tbl = mem_tbl_570x;
7839
7840         for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
7841                 if ((err = tg3_do_mem_test(tp, mem_tbl[i].offset,
7842                     mem_tbl[i].len)) != 0)
7843                         break;
7844         }
7845         
7846         return err;
7847 }
7848
7849 static int tg3_test_loopback(struct tg3 *tp)
7850 {
7851         u32 mac_mode, send_idx, rx_start_idx, rx_idx, tx_idx, opaque_key;
7852         u32 desc_idx;
7853         struct sk_buff *skb, *rx_skb;
7854         u8 *tx_data;
7855         dma_addr_t map;
7856         int num_pkts, tx_len, rx_len, i, err;
7857         struct tg3_rx_buffer_desc *desc;
7858
7859         if (!netif_running(tp->dev))
7860                 return -ENODEV;
7861
7862         err = -EIO;
7863
7864         tg3_reset_hw(tp);
7865
7866         mac_mode = (tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK) |
7867                    MAC_MODE_PORT_INT_LPBACK | MAC_MODE_LINK_POLARITY |
7868                    MAC_MODE_PORT_MODE_GMII;
7869         tw32(MAC_MODE, mac_mode);
7870
7871         tx_len = 1514;
7872         skb = dev_alloc_skb(tx_len);
7873         tx_data = skb_put(skb, tx_len);
7874         memcpy(tx_data, tp->dev->dev_addr, 6);
7875         memset(tx_data + 6, 0x0, 8);
7876
7877         tw32(MAC_RX_MTU_SIZE, tx_len + 4);
7878
7879         for (i = 14; i < tx_len; i++)
7880                 tx_data[i] = (u8) (i & 0xff);
7881
7882         map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
7883
7884         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
7885              HOSTCC_MODE_NOW);
7886
7887         udelay(10);
7888
7889         rx_start_idx = tp->hw_status->idx[0].rx_producer;
7890
7891         send_idx = 0;
7892         num_pkts = 0;
7893
7894         tg3_set_txd(tp, send_idx, map, tx_len, 0, 1);
7895
7896         send_idx++;
7897         num_pkts++;
7898
7899         tw32_tx_mbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW, send_idx);
7900         tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW);
7901
7902         udelay(10);
7903
7904         for (i = 0; i < 10; i++) {
7905                 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
7906                        HOSTCC_MODE_NOW);
7907
7908                 udelay(10);
7909
7910                 tx_idx = tp->hw_status->idx[0].tx_consumer;
7911                 rx_idx = tp->hw_status->idx[0].rx_producer;
7912                 if ((tx_idx == send_idx) &&
7913                     (rx_idx == (rx_start_idx + num_pkts)))
7914                         break;
7915         }
7916
7917         pci_unmap_single(tp->pdev, map, tx_len, PCI_DMA_TODEVICE);
7918         dev_kfree_skb(skb);
7919
7920         if (tx_idx != send_idx)
7921                 goto out;
7922
7923         if (rx_idx != rx_start_idx + num_pkts)
7924                 goto out;
7925
7926         desc = &tp->rx_rcb[rx_start_idx];
7927         desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
7928         opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
7929         if (opaque_key != RXD_OPAQUE_RING_STD)
7930                 goto out;
7931
7932         if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
7933             (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
7934                 goto out;
7935
7936         rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4;
7937         if (rx_len != tx_len)
7938                 goto out;
7939
7940         rx_skb = tp->rx_std_buffers[desc_idx].skb;
7941
7942         map = pci_unmap_addr(&tp->rx_std_buffers[desc_idx], mapping);
7943         pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len, PCI_DMA_FROMDEVICE);
7944
7945         for (i = 14; i < tx_len; i++) {
7946                 if (*(rx_skb->data + i) != (u8) (i & 0xff))
7947                         goto out;
7948         }
7949         err = 0;
7950         
7951         /* tg3_free_rings will unmap and free the rx_skb */
7952 out:
7953         return err;
7954 }
7955
7956 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
7957                           u64 *data)
7958 {
7959         struct tg3 *tp = netdev_priv(dev);
7960
7961         memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
7962
7963         if (tg3_test_nvram(tp) != 0) {
7964                 etest->flags |= ETH_TEST_FL_FAILED;
7965                 data[0] = 1;
7966         }
7967         if (tg3_test_link(tp) != 0) {
7968                 etest->flags |= ETH_TEST_FL_FAILED;
7969                 data[1] = 1;
7970         }
7971         if (etest->flags & ETH_TEST_FL_OFFLINE) {
7972                 int irq_sync = 0;
7973
7974                 if (netif_running(dev)) {
7975                         tg3_netif_stop(tp);
7976                         irq_sync = 1;
7977                 }
7978
7979                 tg3_full_lock(tp, irq_sync);
7980
7981                 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
7982                 tg3_nvram_lock(tp);
7983                 tg3_halt_cpu(tp, RX_CPU_BASE);
7984                 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
7985                         tg3_halt_cpu(tp, TX_CPU_BASE);
7986                 tg3_nvram_unlock(tp);
7987
7988                 if (tg3_test_registers(tp) != 0) {
7989                         etest->flags |= ETH_TEST_FL_FAILED;
7990                         data[2] = 1;
7991                 }
7992                 if (tg3_test_memory(tp) != 0) {
7993                         etest->flags |= ETH_TEST_FL_FAILED;
7994                         data[3] = 1;
7995                 }
7996                 if (tg3_test_loopback(tp) != 0) {
7997                         etest->flags |= ETH_TEST_FL_FAILED;
7998                         data[4] = 1;
7999                 }
8000
8001                 tg3_full_unlock(tp);
8002
8003                 if (tg3_test_interrupt(tp) != 0) {
8004                         etest->flags |= ETH_TEST_FL_FAILED;
8005                         data[5] = 1;
8006                 }
8007
8008                 tg3_full_lock(tp, 0);
8009
8010                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
8011                 if (netif_running(dev)) {
8012                         tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
8013                         tg3_init_hw(tp);
8014                         tg3_netif_start(tp);
8015                 }
8016
8017                 tg3_full_unlock(tp);
8018         }
8019 }
8020
8021 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
8022 {
8023         struct mii_ioctl_data *data = if_mii(ifr);
8024         struct tg3 *tp = netdev_priv(dev);
8025         int err;
8026
8027         switch(cmd) {
8028         case SIOCGMIIPHY:
8029                 data->phy_id = PHY_ADDR;
8030
8031                 /* fallthru */
8032         case SIOCGMIIREG: {
8033                 u32 mii_regval;
8034
8035                 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
8036                         break;                  /* We have no PHY */
8037
8038                 spin_lock_bh(&tp->lock);
8039                 err = tg3_readphy(tp, data->reg_num & 0x1f, &mii_regval);
8040                 spin_unlock_bh(&tp->lock);
8041
8042                 data->val_out = mii_regval;
8043
8044                 return err;
8045         }
8046
8047         case SIOCSMIIREG:
8048                 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
8049                         break;                  /* We have no PHY */
8050
8051                 if (!capable(CAP_NET_ADMIN))
8052                         return -EPERM;
8053
8054                 spin_lock_bh(&tp->lock);
8055                 err = tg3_writephy(tp, data->reg_num & 0x1f, data->val_in);
8056                 spin_unlock_bh(&tp->lock);
8057
8058                 return err;
8059
8060         default:
8061                 /* do nothing */
8062                 break;
8063         }
8064         return -EOPNOTSUPP;
8065 }
8066
8067 #if TG3_VLAN_TAG_USED
8068 static void tg3_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
8069 {
8070         struct tg3 *tp = netdev_priv(dev);
8071
8072         tg3_full_lock(tp, 0);
8073
8074         tp->vlgrp = grp;
8075
8076         /* Update RX_MODE_KEEP_VLAN_TAG bit in RX_MODE register. */
8077         __tg3_set_rx_mode(dev);
8078
8079         tg3_full_unlock(tp);
8080 }
8081
8082 static void tg3_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
8083 {
8084         struct tg3 *tp = netdev_priv(dev);
8085
8086         tg3_full_lock(tp, 0);
8087         if (tp->vlgrp)
8088                 tp->vlgrp->vlan_devices[vid] = NULL;
8089         tg3_full_unlock(tp);
8090 }
8091 #endif
8092
8093 static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
8094 {
8095         struct tg3 *tp = netdev_priv(dev);
8096
8097         memcpy(ec, &tp->coal, sizeof(*ec));
8098         return 0;
8099 }
8100
8101 static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
8102 {
8103         struct tg3 *tp = netdev_priv(dev);
8104         u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
8105         u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
8106
8107         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
8108                 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
8109                 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
8110                 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
8111                 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
8112         }
8113
8114         if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
8115             (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
8116             (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
8117             (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
8118             (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
8119             (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
8120             (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
8121             (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
8122             (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
8123             (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
8124                 return -EINVAL;
8125
8126         /* No rx interrupts will be generated if both are zero */
8127         if ((ec->rx_coalesce_usecs == 0) &&
8128             (ec->rx_max_coalesced_frames == 0))
8129                 return -EINVAL;
8130
8131         /* No tx interrupts will be generated if both are zero */
8132         if ((ec->tx_coalesce_usecs == 0) &&
8133             (ec->tx_max_coalesced_frames == 0))
8134                 return -EINVAL;
8135
8136         /* Only copy relevant parameters, ignore all others. */
8137         tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
8138         tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
8139         tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
8140         tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
8141         tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
8142         tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
8143         tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
8144         tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
8145         tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
8146
8147         if (netif_running(dev)) {
8148                 tg3_full_lock(tp, 0);
8149                 __tg3_set_coalesce(tp, &tp->coal);
8150                 tg3_full_unlock(tp);
8151         }
8152         return 0;
8153 }
8154
8155 static struct ethtool_ops tg3_ethtool_ops = {
8156         .get_settings           = tg3_get_settings,
8157         .set_settings           = tg3_set_settings,
8158         .get_drvinfo            = tg3_get_drvinfo,
8159         .get_regs_len           = tg3_get_regs_len,
8160         .get_regs               = tg3_get_regs,
8161         .get_wol                = tg3_get_wol,
8162         .set_wol                = tg3_set_wol,
8163         .get_msglevel           = tg3_get_msglevel,
8164         .set_msglevel           = tg3_set_msglevel,
8165         .nway_reset             = tg3_nway_reset,
8166         .get_link               = ethtool_op_get_link,
8167         .get_eeprom_len         = tg3_get_eeprom_len,
8168         .get_eeprom             = tg3_get_eeprom,
8169         .set_eeprom             = tg3_set_eeprom,
8170         .get_ringparam          = tg3_get_ringparam,
8171         .set_ringparam          = tg3_set_ringparam,
8172         .get_pauseparam         = tg3_get_pauseparam,
8173         .set_pauseparam         = tg3_set_pauseparam,
8174         .get_rx_csum            = tg3_get_rx_csum,
8175         .set_rx_csum            = tg3_set_rx_csum,
8176         .get_tx_csum            = ethtool_op_get_tx_csum,
8177         .set_tx_csum            = tg3_set_tx_csum,
8178         .get_sg                 = ethtool_op_get_sg,
8179         .set_sg                 = ethtool_op_set_sg,
8180 #if TG3_TSO_SUPPORT != 0
8181         .get_tso                = ethtool_op_get_tso,
8182         .set_tso                = tg3_set_tso,
8183 #endif
8184         .self_test_count        = tg3_get_test_count,
8185         .self_test              = tg3_self_test,
8186         .get_strings            = tg3_get_strings,
8187         .get_stats_count        = tg3_get_stats_count,
8188         .get_ethtool_stats      = tg3_get_ethtool_stats,
8189         .get_coalesce           = tg3_get_coalesce,
8190         .set_coalesce           = tg3_set_coalesce,
8191 };
8192
8193 static void __devinit tg3_get_eeprom_size(struct tg3 *tp)
8194 {
8195         u32 cursize, val;
8196
8197         tp->nvram_size = EEPROM_CHIP_SIZE;
8198
8199         if (tg3_nvram_read(tp, 0, &val) != 0)
8200                 return;
8201
8202         if (swab32(val) != TG3_EEPROM_MAGIC)
8203                 return;
8204
8205         /*
8206          * Size the chip by reading offsets at increasing powers of two.
8207          * When we encounter our validation signature, we know the addressing
8208          * has wrapped around, and thus have our chip size.
8209          */
8210         cursize = 0x800;
8211
8212         while (cursize < tp->nvram_size) {
8213                 if (tg3_nvram_read(tp, cursize, &val) != 0)
8214                         return;
8215
8216                 if (swab32(val) == TG3_EEPROM_MAGIC)
8217                         break;
8218
8219                 cursize <<= 1;
8220         }
8221
8222         tp->nvram_size = cursize;
8223 }
8224                 
8225 static void __devinit tg3_get_nvram_size(struct tg3 *tp)
8226 {
8227         u32 val;
8228
8229         if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
8230                 if (val != 0) {
8231                         tp->nvram_size = (val >> 16) * 1024;
8232                         return;
8233                 }
8234         }
8235         tp->nvram_size = 0x20000;
8236 }
8237
8238 static void __devinit tg3_get_nvram_info(struct tg3 *tp)
8239 {
8240         u32 nvcfg1;
8241
8242         nvcfg1 = tr32(NVRAM_CFG1);
8243         if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
8244                 tp->tg3_flags2 |= TG3_FLG2_FLASH;
8245         }
8246         else {
8247                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
8248                 tw32(NVRAM_CFG1, nvcfg1);
8249         }
8250
8251         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
8252                 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
8253                         case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
8254                                 tp->nvram_jedecnum = JEDEC_ATMEL;
8255                                 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
8256                                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
8257                                 break;
8258                         case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
8259                                 tp->nvram_jedecnum = JEDEC_ATMEL;
8260                                 tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
8261                                 break;
8262                         case FLASH_VENDOR_ATMEL_EEPROM:
8263                                 tp->nvram_jedecnum = JEDEC_ATMEL;
8264                                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
8265                                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
8266                                 break;
8267                         case FLASH_VENDOR_ST:
8268                                 tp->nvram_jedecnum = JEDEC_ST;
8269                                 tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
8270                                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
8271                                 break;
8272                         case FLASH_VENDOR_SAIFUN:
8273                                 tp->nvram_jedecnum = JEDEC_SAIFUN;
8274                                 tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
8275                                 break;
8276                         case FLASH_VENDOR_SST_SMALL:
8277                         case FLASH_VENDOR_SST_LARGE:
8278                                 tp->nvram_jedecnum = JEDEC_SST;
8279                                 tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
8280                                 break;
8281                 }
8282         }
8283         else {
8284                 tp->nvram_jedecnum = JEDEC_ATMEL;
8285                 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
8286                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
8287         }
8288 }
8289
8290 static void __devinit tg3_get_5752_nvram_info(struct tg3 *tp)
8291 {
8292         u32 nvcfg1;
8293
8294         nvcfg1 = tr32(NVRAM_CFG1);
8295
8296         /* NVRAM protection for TPM */
8297         if (nvcfg1 & (1 << 27))
8298                 tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM;
8299
8300         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
8301                 case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
8302                 case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
8303                         tp->nvram_jedecnum = JEDEC_ATMEL;
8304                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
8305                         break;
8306                 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
8307                         tp->nvram_jedecnum = JEDEC_ATMEL;
8308                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
8309                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
8310                         break;
8311                 case FLASH_5752VENDOR_ST_M45PE10:
8312                 case FLASH_5752VENDOR_ST_M45PE20:
8313                 case FLASH_5752VENDOR_ST_M45PE40:
8314                         tp->nvram_jedecnum = JEDEC_ST;
8315                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
8316                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
8317                         break;
8318         }
8319
8320         if (tp->tg3_flags2 & TG3_FLG2_FLASH) {
8321                 switch (nvcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
8322                         case FLASH_5752PAGE_SIZE_256:
8323                                 tp->nvram_pagesize = 256;
8324                                 break;
8325                         case FLASH_5752PAGE_SIZE_512:
8326                                 tp->nvram_pagesize = 512;
8327                                 break;
8328                         case FLASH_5752PAGE_SIZE_1K:
8329                                 tp->nvram_pagesize = 1024;
8330                                 break;
8331                         case FLASH_5752PAGE_SIZE_2K:
8332                                 tp->nvram_pagesize = 2048;
8333                                 break;
8334                         case FLASH_5752PAGE_SIZE_4K:
8335                                 tp->nvram_pagesize = 4096;
8336                                 break;
8337                         case FLASH_5752PAGE_SIZE_264:
8338                                 tp->nvram_pagesize = 264;
8339                                 break;
8340                 }
8341         }
8342         else {
8343                 /* For eeprom, set pagesize to maximum eeprom size */
8344                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
8345
8346                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
8347                 tw32(NVRAM_CFG1, nvcfg1);
8348         }
8349 }
8350
8351 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
8352 static void __devinit tg3_nvram_init(struct tg3 *tp)
8353 {
8354         int j;
8355
8356         if (tp->tg3_flags2 & TG3_FLG2_SUN_570X)
8357                 return;
8358
8359         tw32_f(GRC_EEPROM_ADDR,
8360              (EEPROM_ADDR_FSM_RESET |
8361               (EEPROM_DEFAULT_CLOCK_PERIOD <<
8362                EEPROM_ADDR_CLKPERD_SHIFT)));
8363
8364         /* XXX schedule_timeout() ... */
8365         for (j = 0; j < 100; j++)
8366                 udelay(10);
8367
8368         /* Enable seeprom accesses. */
8369         tw32_f(GRC_LOCAL_CTRL,
8370              tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
8371         udelay(100);
8372
8373         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
8374             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
8375                 tp->tg3_flags |= TG3_FLAG_NVRAM;
8376
8377                 tg3_enable_nvram_access(tp);
8378
8379                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
8380                         tg3_get_5752_nvram_info(tp);
8381                 else
8382                         tg3_get_nvram_info(tp);
8383
8384                 tg3_get_nvram_size(tp);
8385
8386                 tg3_disable_nvram_access(tp);
8387
8388         } else {
8389                 tp->tg3_flags &= ~(TG3_FLAG_NVRAM | TG3_FLAG_NVRAM_BUFFERED);
8390
8391                 tg3_get_eeprom_size(tp);
8392         }
8393 }
8394
8395 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
8396                                         u32 offset, u32 *val)
8397 {
8398         u32 tmp;
8399         int i;
8400
8401         if (offset > EEPROM_ADDR_ADDR_MASK ||
8402             (offset % 4) != 0)
8403                 return -EINVAL;
8404
8405         tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
8406                                         EEPROM_ADDR_DEVID_MASK |
8407                                         EEPROM_ADDR_READ);
8408         tw32(GRC_EEPROM_ADDR,
8409              tmp |
8410              (0 << EEPROM_ADDR_DEVID_SHIFT) |
8411              ((offset << EEPROM_ADDR_ADDR_SHIFT) &
8412               EEPROM_ADDR_ADDR_MASK) |
8413              EEPROM_ADDR_READ | EEPROM_ADDR_START);
8414
8415         for (i = 0; i < 10000; i++) {
8416                 tmp = tr32(GRC_EEPROM_ADDR);
8417
8418                 if (tmp & EEPROM_ADDR_COMPLETE)
8419                         break;
8420                 udelay(100);
8421         }
8422         if (!(tmp & EEPROM_ADDR_COMPLETE))
8423                 return -EBUSY;
8424
8425         *val = tr32(GRC_EEPROM_DATA);
8426         return 0;
8427 }
8428
8429 #define NVRAM_CMD_TIMEOUT 10000
8430
8431 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
8432 {
8433         int i;
8434
8435         tw32(NVRAM_CMD, nvram_cmd);
8436         for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
8437                 udelay(10);
8438                 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
8439                         udelay(10);
8440                         break;
8441                 }
8442         }
8443         if (i == NVRAM_CMD_TIMEOUT) {
8444                 return -EBUSY;
8445         }
8446         return 0;
8447 }
8448
8449 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
8450 {
8451         int ret;
8452
8453         if (tp->tg3_flags2 & TG3_FLG2_SUN_570X) {
8454                 printk(KERN_ERR PFX "Attempt to do nvram_read on Sun 570X\n");
8455                 return -EINVAL;
8456         }
8457
8458         if (!(tp->tg3_flags & TG3_FLAG_NVRAM))
8459                 return tg3_nvram_read_using_eeprom(tp, offset, val);
8460
8461         if ((tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) &&
8462                 (tp->tg3_flags2 & TG3_FLG2_FLASH) &&
8463                 (tp->nvram_jedecnum == JEDEC_ATMEL)) {
8464
8465                 offset = ((offset / tp->nvram_pagesize) <<
8466                           ATMEL_AT45DB0X1B_PAGE_POS) +
8467                         (offset % tp->nvram_pagesize);
8468         }
8469
8470         if (offset > NVRAM_ADDR_MSK)
8471                 return -EINVAL;
8472
8473         tg3_nvram_lock(tp);
8474
8475         tg3_enable_nvram_access(tp);
8476
8477         tw32(NVRAM_ADDR, offset);
8478         ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
8479                 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
8480
8481         if (ret == 0)
8482                 *val = swab32(tr32(NVRAM_RDDATA));
8483
8484         tg3_nvram_unlock(tp);
8485
8486         tg3_disable_nvram_access(tp);
8487
8488         return ret;
8489 }
8490
8491 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
8492                                     u32 offset, u32 len, u8 *buf)
8493 {
8494         int i, j, rc = 0;
8495         u32 val;
8496
8497         for (i = 0; i < len; i += 4) {
8498                 u32 addr, data;
8499
8500                 addr = offset + i;
8501
8502                 memcpy(&data, buf + i, 4);
8503
8504                 tw32(GRC_EEPROM_DATA, cpu_to_le32(data));
8505
8506                 val = tr32(GRC_EEPROM_ADDR);
8507                 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
8508
8509                 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
8510                         EEPROM_ADDR_READ);
8511                 tw32(GRC_EEPROM_ADDR, val |
8512                         (0 << EEPROM_ADDR_DEVID_SHIFT) |
8513                         (addr & EEPROM_ADDR_ADDR_MASK) |
8514                         EEPROM_ADDR_START |
8515                         EEPROM_ADDR_WRITE);
8516                 
8517                 for (j = 0; j < 10000; j++) {
8518                         val = tr32(GRC_EEPROM_ADDR);
8519
8520                         if (val & EEPROM_ADDR_COMPLETE)
8521                                 break;
8522                         udelay(100);
8523                 }
8524                 if (!(val & EEPROM_ADDR_COMPLETE)) {
8525                         rc = -EBUSY;
8526                         break;
8527                 }
8528         }
8529
8530         return rc;
8531 }
8532
8533 /* offset and length are dword aligned */
8534 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
8535                 u8 *buf)
8536 {
8537         int ret = 0;
8538         u32 pagesize = tp->nvram_pagesize;
8539         u32 pagemask = pagesize - 1;
8540         u32 nvram_cmd;
8541         u8 *tmp;
8542
8543         tmp = kmalloc(pagesize, GFP_KERNEL);
8544         if (tmp == NULL)
8545                 return -ENOMEM;
8546
8547         while (len) {
8548                 int j;
8549                 u32 phy_addr, page_off, size;
8550
8551                 phy_addr = offset & ~pagemask;
8552         
8553                 for (j = 0; j < pagesize; j += 4) {
8554                         if ((ret = tg3_nvram_read(tp, phy_addr + j,
8555                                                 (u32 *) (tmp + j))))
8556                                 break;
8557                 }
8558                 if (ret)
8559                         break;
8560
8561                 page_off = offset & pagemask;
8562                 size = pagesize;
8563                 if (len < size)
8564                         size = len;
8565
8566                 len -= size;
8567
8568                 memcpy(tmp + page_off, buf, size);
8569
8570                 offset = offset + (pagesize - page_off);
8571
8572                 tg3_enable_nvram_access(tp);
8573
8574                 /*
8575                  * Before we can erase the flash page, we need
8576                  * to issue a special "write enable" command.
8577                  */
8578                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
8579
8580                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
8581                         break;
8582
8583                 /* Erase the target page */
8584                 tw32(NVRAM_ADDR, phy_addr);
8585
8586                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
8587                         NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
8588
8589                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
8590                         break;
8591
8592                 /* Issue another write enable to start the write. */
8593                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
8594
8595                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
8596                         break;
8597
8598                 for (j = 0; j < pagesize; j += 4) {
8599                         u32 data;
8600
8601                         data = *((u32 *) (tmp + j));
8602                         tw32(NVRAM_WRDATA, cpu_to_be32(data));
8603
8604                         tw32(NVRAM_ADDR, phy_addr + j);
8605
8606                         nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
8607                                 NVRAM_CMD_WR;
8608
8609                         if (j == 0)
8610                                 nvram_cmd |= NVRAM_CMD_FIRST;
8611                         else if (j == (pagesize - 4))
8612                                 nvram_cmd |= NVRAM_CMD_LAST;
8613
8614                         if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
8615                                 break;
8616                 }
8617                 if (ret)
8618                         break;
8619         }
8620
8621         nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
8622         tg3_nvram_exec_cmd(tp, nvram_cmd);
8623
8624         kfree(tmp);
8625
8626         return ret;
8627 }
8628
8629 /* offset and length are dword aligned */
8630 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
8631                 u8 *buf)
8632 {
8633         int i, ret = 0;
8634
8635         for (i = 0; i < len; i += 4, offset += 4) {
8636                 u32 data, page_off, phy_addr, nvram_cmd;
8637
8638                 memcpy(&data, buf + i, 4);
8639                 tw32(NVRAM_WRDATA, cpu_to_be32(data));
8640
8641                 page_off = offset % tp->nvram_pagesize;
8642
8643                 if ((tp->tg3_flags2 & TG3_FLG2_FLASH) &&
8644                         (tp->nvram_jedecnum == JEDEC_ATMEL)) {
8645
8646                         phy_addr = ((offset / tp->nvram_pagesize) <<
8647                                     ATMEL_AT45DB0X1B_PAGE_POS) + page_off;
8648                 }
8649                 else {
8650                         phy_addr = offset;
8651                 }
8652
8653                 tw32(NVRAM_ADDR, phy_addr);
8654
8655                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
8656
8657                 if ((page_off == 0) || (i == 0))
8658                         nvram_cmd |= NVRAM_CMD_FIRST;
8659                 else if (page_off == (tp->nvram_pagesize - 4))
8660                         nvram_cmd |= NVRAM_CMD_LAST;
8661
8662                 if (i == (len - 4))
8663                         nvram_cmd |= NVRAM_CMD_LAST;
8664
8665                 if ((tp->nvram_jedecnum == JEDEC_ST) &&
8666                         (nvram_cmd & NVRAM_CMD_FIRST)) {
8667
8668                         if ((ret = tg3_nvram_exec_cmd(tp,
8669                                 NVRAM_CMD_WREN | NVRAM_CMD_GO |
8670                                 NVRAM_CMD_DONE)))
8671
8672                                 break;
8673                 }
8674                 if (!(tp->tg3_flags2 & TG3_FLG2_FLASH)) {
8675                         /* We always do complete word writes to eeprom. */
8676                         nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
8677                 }
8678
8679                 if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
8680                         break;
8681         }
8682         return ret;
8683 }
8684
8685 /* offset and length are dword aligned */
8686 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
8687 {
8688         int ret;
8689
8690         if (tp->tg3_flags2 & TG3_FLG2_SUN_570X) {
8691                 printk(KERN_ERR PFX "Attempt to do nvram_write on Sun 570X\n");
8692                 return -EINVAL;
8693         }
8694
8695         if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
8696                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
8697                        ~GRC_LCLCTRL_GPIO_OUTPUT1);
8698                 udelay(40);
8699         }
8700
8701         if (!(tp->tg3_flags & TG3_FLAG_NVRAM)) {
8702                 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
8703         }
8704         else {
8705                 u32 grc_mode;
8706
8707                 tg3_nvram_lock(tp);
8708
8709                 tg3_enable_nvram_access(tp);
8710                 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
8711                     !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM))
8712                         tw32(NVRAM_WRITE1, 0x406);
8713
8714                 grc_mode = tr32(GRC_MODE);
8715                 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
8716
8717                 if ((tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) ||
8718                         !(tp->tg3_flags2 & TG3_FLG2_FLASH)) {
8719
8720                         ret = tg3_nvram_write_block_buffered(tp, offset, len,
8721                                 buf);
8722                 }
8723                 else {
8724                         ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
8725                                 buf);
8726                 }
8727
8728                 grc_mode = tr32(GRC_MODE);
8729                 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
8730
8731                 tg3_disable_nvram_access(tp);
8732                 tg3_nvram_unlock(tp);
8733         }
8734
8735         if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
8736                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
8737                 udelay(40);
8738         }
8739
8740         return ret;
8741 }
8742
8743 struct subsys_tbl_ent {
8744         u16 subsys_vendor, subsys_devid;
8745         u32 phy_id;
8746 };
8747
8748 static struct subsys_tbl_ent subsys_id_to_phy_id[] = {
8749         /* Broadcom boards. */
8750         { PCI_VENDOR_ID_BROADCOM, 0x1644, PHY_ID_BCM5401 }, /* BCM95700A6 */
8751         { PCI_VENDOR_ID_BROADCOM, 0x0001, PHY_ID_BCM5701 }, /* BCM95701A5 */
8752         { PCI_VENDOR_ID_BROADCOM, 0x0002, PHY_ID_BCM8002 }, /* BCM95700T6 */
8753         { PCI_VENDOR_ID_BROADCOM, 0x0003, 0 },              /* BCM95700A9 */
8754         { PCI_VENDOR_ID_BROADCOM, 0x0005, PHY_ID_BCM5701 }, /* BCM95701T1 */
8755         { PCI_VENDOR_ID_BROADCOM, 0x0006, PHY_ID_BCM5701 }, /* BCM95701T8 */
8756         { PCI_VENDOR_ID_BROADCOM, 0x0007, 0 },              /* BCM95701A7 */
8757         { PCI_VENDOR_ID_BROADCOM, 0x0008, PHY_ID_BCM5701 }, /* BCM95701A10 */
8758         { PCI_VENDOR_ID_BROADCOM, 0x8008, PHY_ID_BCM5701 }, /* BCM95701A12 */
8759         { PCI_VENDOR_ID_BROADCOM, 0x0009, PHY_ID_BCM5703 }, /* BCM95703Ax1 */
8760         { PCI_VENDOR_ID_BROADCOM, 0x8009, PHY_ID_BCM5703 }, /* BCM95703Ax2 */
8761
8762         /* 3com boards. */
8763         { PCI_VENDOR_ID_3COM, 0x1000, PHY_ID_BCM5401 }, /* 3C996T */
8764         { PCI_VENDOR_ID_3COM, 0x1006, PHY_ID_BCM5701 }, /* 3C996BT */
8765         { PCI_VENDOR_ID_3COM, 0x1004, 0 },              /* 3C996SX */
8766         { PCI_VENDOR_ID_3COM, 0x1007, PHY_ID_BCM5701 }, /* 3C1000T */
8767         { PCI_VENDOR_ID_3COM, 0x1008, PHY_ID_BCM5701 }, /* 3C940BR01 */
8768
8769         /* DELL boards. */
8770         { PCI_VENDOR_ID_DELL, 0x00d1, PHY_ID_BCM5401 }, /* VIPER */
8771         { PCI_VENDOR_ID_DELL, 0x0106, PHY_ID_BCM5401 }, /* JAGUAR */
8772         { PCI_VENDOR_ID_DELL, 0x0109, PHY_ID_BCM5411 }, /* MERLOT */
8773         { PCI_VENDOR_ID_DELL, 0x010a, PHY_ID_BCM5411 }, /* SLIM_MERLOT */
8774
8775         /* Compaq boards. */
8776         { PCI_VENDOR_ID_COMPAQ, 0x007c, PHY_ID_BCM5701 }, /* BANSHEE */
8777         { PCI_VENDOR_ID_COMPAQ, 0x009a, PHY_ID_BCM5701 }, /* BANSHEE_2 */
8778         { PCI_VENDOR_ID_COMPAQ, 0x007d, 0 },              /* CHANGELING */
8779         { PCI_VENDOR_ID_COMPAQ, 0x0085, PHY_ID_BCM5701 }, /* NC7780 */
8780         { PCI_VENDOR_ID_COMPAQ, 0x0099, PHY_ID_BCM5701 }, /* NC7780_2 */
8781
8782         /* IBM boards. */
8783         { PCI_VENDOR_ID_IBM, 0x0281, 0 } /* IBM??? */
8784 };
8785
8786 static inline struct subsys_tbl_ent *lookup_by_subsys(struct tg3 *tp)
8787 {
8788         int i;
8789
8790         for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
8791                 if ((subsys_id_to_phy_id[i].subsys_vendor ==
8792                      tp->pdev->subsystem_vendor) &&
8793                     (subsys_id_to_phy_id[i].subsys_devid ==
8794                      tp->pdev->subsystem_device))
8795                         return &subsys_id_to_phy_id[i];
8796         }
8797         return NULL;
8798 }
8799
8800 /* Since this function may be called in D3-hot power state during
8801  * tg3_init_one(), only config cycles are allowed.
8802  */
8803 static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
8804 {
8805         u32 val;
8806
8807         /* Make sure register accesses (indirect or otherwise)
8808          * will function correctly.
8809          */
8810         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
8811                                tp->misc_host_ctrl);
8812
8813         tp->phy_id = PHY_ID_INVALID;
8814         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
8815
8816         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
8817         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
8818                 u32 nic_cfg, led_cfg;
8819                 u32 nic_phy_id, ver, cfg2 = 0, eeprom_phy_id;
8820                 int eeprom_phy_serdes = 0;
8821
8822                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
8823                 tp->nic_sram_data_cfg = nic_cfg;
8824
8825                 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
8826                 ver >>= NIC_SRAM_DATA_VER_SHIFT;
8827                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700) &&
8828                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) &&
8829                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5703) &&
8830                     (ver > 0) && (ver < 0x100))
8831                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
8832
8833                 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
8834                     NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
8835                         eeprom_phy_serdes = 1;
8836
8837                 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
8838                 if (nic_phy_id != 0) {
8839                         u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
8840                         u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
8841
8842                         eeprom_phy_id  = (id1 >> 16) << 10;
8843                         eeprom_phy_id |= (id2 & 0xfc00) << 16;
8844                         eeprom_phy_id |= (id2 & 0x03ff) <<  0;
8845                 } else
8846                         eeprom_phy_id = 0;
8847
8848                 tp->phy_id = eeprom_phy_id;
8849                 if (eeprom_phy_serdes) {
8850                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780)
8851                                 tp->tg3_flags2 |= TG3_FLG2_MII_SERDES;
8852                         else
8853                                 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
8854                 }
8855
8856                 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
8857                         led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
8858                                     SHASTA_EXT_LED_MODE_MASK);
8859                 else
8860                         led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
8861
8862                 switch (led_cfg) {
8863                 default:
8864                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
8865                         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
8866                         break;
8867
8868                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
8869                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
8870                         break;
8871
8872                 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
8873                         tp->led_ctrl = LED_CTRL_MODE_MAC;
8874
8875                         /* Default to PHY_1_MODE if 0 (MAC_MODE) is
8876                          * read on some older 5700/5701 bootcode.
8877                          */
8878                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
8879                             ASIC_REV_5700 ||
8880                             GET_ASIC_REV(tp->pci_chip_rev_id) ==
8881                             ASIC_REV_5701)
8882                                 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
8883
8884                         break;
8885
8886                 case SHASTA_EXT_LED_SHARED:
8887                         tp->led_ctrl = LED_CTRL_MODE_SHARED;
8888                         if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
8889                             tp->pci_chip_rev_id != CHIPREV_ID_5750_A1)
8890                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
8891                                                  LED_CTRL_MODE_PHY_2);
8892                         break;
8893
8894                 case SHASTA_EXT_LED_MAC:
8895                         tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
8896                         break;
8897
8898                 case SHASTA_EXT_LED_COMBO:
8899                         tp->led_ctrl = LED_CTRL_MODE_COMBO;
8900                         if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0)
8901                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
8902                                                  LED_CTRL_MODE_PHY_2);
8903                         break;
8904
8905                 };
8906
8907                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
8908                      GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) &&
8909                     tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
8910                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
8911
8912                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700) &&
8913                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) &&
8914                     (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP))
8915                         tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT;
8916
8917                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
8918                         tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
8919                         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
8920                                 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
8921                 }
8922                 if (nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL)
8923                         tp->tg3_flags |= TG3_FLAG_SERDES_WOL_CAP;
8924
8925                 if (cfg2 & (1 << 17))
8926                         tp->tg3_flags2 |= TG3_FLG2_CAPACITIVE_COUPLING;
8927
8928                 /* serdes signal pre-emphasis in register 0x590 set by */
8929                 /* bootcode if bit 18 is set */
8930                 if (cfg2 & (1 << 18))
8931                         tp->tg3_flags2 |= TG3_FLG2_SERDES_PREEMPHASIS;
8932         }
8933 }
8934
8935 static int __devinit tg3_phy_probe(struct tg3 *tp)
8936 {
8937         u32 hw_phy_id_1, hw_phy_id_2;
8938         u32 hw_phy_id, hw_phy_id_masked;
8939         int err;
8940
8941         /* Reading the PHY ID register can conflict with ASF
8942          * firwmare access to the PHY hardware.
8943          */
8944         err = 0;
8945         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
8946                 hw_phy_id = hw_phy_id_masked = PHY_ID_INVALID;
8947         } else {
8948                 /* Now read the physical PHY_ID from the chip and verify
8949                  * that it is sane.  If it doesn't look good, we fall back
8950                  * to either the hard-coded table based PHY_ID and failing
8951                  * that the value found in the eeprom area.
8952                  */
8953                 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
8954                 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
8955
8956                 hw_phy_id  = (hw_phy_id_1 & 0xffff) << 10;
8957                 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
8958                 hw_phy_id |= (hw_phy_id_2 & 0x03ff) <<  0;
8959
8960                 hw_phy_id_masked = hw_phy_id & PHY_ID_MASK;
8961         }
8962
8963         if (!err && KNOWN_PHY_ID(hw_phy_id_masked)) {
8964                 tp->phy_id = hw_phy_id;
8965                 if (hw_phy_id_masked == PHY_ID_BCM8002)
8966                         tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
8967                 else
8968                         tp->tg3_flags2 &= ~TG3_FLG2_PHY_SERDES;
8969         } else {
8970                 if (tp->phy_id != PHY_ID_INVALID) {
8971                         /* Do nothing, phy ID already set up in
8972                          * tg3_get_eeprom_hw_cfg().
8973                          */
8974                 } else {
8975                         struct subsys_tbl_ent *p;
8976
8977                         /* No eeprom signature?  Try the hardcoded
8978                          * subsys device table.
8979                          */
8980                         p = lookup_by_subsys(tp);
8981                         if (!p)
8982                                 return -ENODEV;
8983
8984                         tp->phy_id = p->phy_id;
8985                         if (!tp->phy_id ||
8986                             tp->phy_id == PHY_ID_BCM8002)
8987                                 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
8988                 }
8989         }
8990
8991         if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) &&
8992             !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
8993                 u32 bmsr, adv_reg, tg3_ctrl;
8994
8995                 tg3_readphy(tp, MII_BMSR, &bmsr);
8996                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
8997                     (bmsr & BMSR_LSTATUS))
8998                         goto skip_phy_reset;
8999                     
9000                 err = tg3_phy_reset(tp);
9001                 if (err)
9002                         return err;
9003
9004                 adv_reg = (ADVERTISE_10HALF | ADVERTISE_10FULL |
9005                            ADVERTISE_100HALF | ADVERTISE_100FULL |
9006                            ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
9007                 tg3_ctrl = 0;
9008                 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
9009                         tg3_ctrl = (MII_TG3_CTRL_ADV_1000_HALF |
9010                                     MII_TG3_CTRL_ADV_1000_FULL);
9011                         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
9012                             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
9013                                 tg3_ctrl |= (MII_TG3_CTRL_AS_MASTER |
9014                                              MII_TG3_CTRL_ENABLE_AS_MASTER);
9015                 }
9016
9017                 if (!tg3_copper_is_advertising_all(tp)) {
9018                         tg3_writephy(tp, MII_ADVERTISE, adv_reg);
9019
9020                         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
9021                                 tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
9022
9023                         tg3_writephy(tp, MII_BMCR,
9024                                      BMCR_ANENABLE | BMCR_ANRESTART);
9025                 }
9026                 tg3_phy_set_wirespeed(tp);
9027
9028                 tg3_writephy(tp, MII_ADVERTISE, adv_reg);
9029                 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
9030                         tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
9031         }
9032
9033 skip_phy_reset:
9034         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
9035                 err = tg3_init_5401phy_dsp(tp);
9036                 if (err)
9037                         return err;
9038         }
9039
9040         if (!err && ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401)) {
9041                 err = tg3_init_5401phy_dsp(tp);
9042         }
9043
9044         if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
9045                 tp->link_config.advertising =
9046                         (ADVERTISED_1000baseT_Half |
9047                          ADVERTISED_1000baseT_Full |
9048                          ADVERTISED_Autoneg |
9049                          ADVERTISED_FIBRE);
9050         if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
9051                 tp->link_config.advertising &=
9052                         ~(ADVERTISED_1000baseT_Half |
9053                           ADVERTISED_1000baseT_Full);
9054
9055         return err;
9056 }
9057
9058 static void __devinit tg3_read_partno(struct tg3 *tp)
9059 {
9060         unsigned char vpd_data[256];
9061         int i;
9062
9063         if (tp->tg3_flags2 & TG3_FLG2_SUN_570X) {
9064                 /* Sun decided not to put the necessary bits in the
9065                  * NVRAM of their onboard tg3 parts :(
9066                  */
9067                 strcpy(tp->board_part_number, "Sun 570X");
9068                 return;
9069         }
9070
9071         for (i = 0; i < 256; i += 4) {
9072                 u32 tmp;
9073
9074                 if (tg3_nvram_read(tp, 0x100 + i, &tmp))
9075                         goto out_not_found;
9076
9077                 vpd_data[i + 0] = ((tmp >>  0) & 0xff);
9078                 vpd_data[i + 1] = ((tmp >>  8) & 0xff);
9079                 vpd_data[i + 2] = ((tmp >> 16) & 0xff);
9080                 vpd_data[i + 3] = ((tmp >> 24) & 0xff);
9081         }
9082
9083         /* Now parse and find the part number. */
9084         for (i = 0; i < 256; ) {
9085                 unsigned char val = vpd_data[i];
9086                 int block_end;
9087
9088                 if (val == 0x82 || val == 0x91) {
9089                         i = (i + 3 +
9090                              (vpd_data[i + 1] +
9091                               (vpd_data[i + 2] << 8)));
9092                         continue;
9093                 }
9094
9095                 if (val != 0x90)
9096                         goto out_not_found;
9097
9098                 block_end = (i + 3 +
9099                              (vpd_data[i + 1] +
9100                               (vpd_data[i + 2] << 8)));
9101                 i += 3;
9102                 while (i < block_end) {
9103                         if (vpd_data[i + 0] == 'P' &&
9104                             vpd_data[i + 1] == 'N') {
9105                                 int partno_len = vpd_data[i + 2];
9106
9107                                 if (partno_len > 24)
9108                                         goto out_not_found;
9109
9110                                 memcpy(tp->board_part_number,
9111                                        &vpd_data[i + 3],
9112                                        partno_len);
9113
9114                                 /* Success. */
9115                                 return;
9116                         }
9117                 }
9118
9119                 /* Part number not found. */
9120                 goto out_not_found;
9121         }
9122
9123 out_not_found:
9124         strcpy(tp->board_part_number, "none");
9125 }
9126
9127 #ifdef CONFIG_SPARC64
9128 static int __devinit tg3_is_sun_570X(struct tg3 *tp)
9129 {
9130         struct pci_dev *pdev = tp->pdev;
9131         struct pcidev_cookie *pcp = pdev->sysdata;
9132
9133         if (pcp != NULL) {
9134                 int node = pcp->prom_node;
9135                 u32 venid;
9136                 int err;
9137
9138                 err = prom_getproperty(node, "subsystem-vendor-id",
9139                                        (char *) &venid, sizeof(venid));
9140                 if (err == 0 || err == -1)
9141                         return 0;
9142                 if (venid == PCI_VENDOR_ID_SUN)
9143                         return 1;
9144         }
9145         return 0;
9146 }
9147 #endif
9148
9149 static int __devinit tg3_get_invariants(struct tg3 *tp)
9150 {
9151         static struct pci_device_id write_reorder_chipsets[] = {
9152                 { PCI_DEVICE(PCI_VENDOR_ID_INTEL,
9153                              PCI_DEVICE_ID_INTEL_82801AA_8) },
9154                 { PCI_DEVICE(PCI_VENDOR_ID_INTEL,
9155                              PCI_DEVICE_ID_INTEL_82801AB_8) },
9156                 { PCI_DEVICE(PCI_VENDOR_ID_INTEL,
9157                              PCI_DEVICE_ID_INTEL_82801BA_11) },
9158                 { PCI_DEVICE(PCI_VENDOR_ID_INTEL,
9159                              PCI_DEVICE_ID_INTEL_82801BA_6) },
9160                 { PCI_DEVICE(PCI_VENDOR_ID_AMD,
9161                              PCI_DEVICE_ID_AMD_FE_GATE_700C) },
9162                 { },
9163         };
9164         u32 misc_ctrl_reg;
9165         u32 cacheline_sz_reg;
9166         u32 pci_state_reg, grc_misc_cfg;
9167         u32 val;
9168         u16 pci_cmd;
9169         int err;
9170
9171 #ifdef CONFIG_SPARC64
9172         if (tg3_is_sun_570X(tp))
9173                 tp->tg3_flags2 |= TG3_FLG2_SUN_570X;
9174 #endif
9175
9176         /* If we have an AMD 762 or Intel ICH/ICH0/ICH2 chipset, write
9177          * reordering to the mailbox registers done by the host
9178          * controller can cause major troubles.  We read back from
9179          * every mailbox register write to force the writes to be
9180          * posted to the chip in order.
9181          */
9182         if (pci_dev_present(write_reorder_chipsets))
9183                 tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;
9184
9185         /* Force memory write invalidate off.  If we leave it on,
9186          * then on 5700_BX chips we have to enable a workaround.
9187          * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
9188          * to match the cacheline size.  The Broadcom driver have this
9189          * workaround but turns MWI off all the times so never uses
9190          * it.  This seems to suggest that the workaround is insufficient.
9191          */
9192         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
9193         pci_cmd &= ~PCI_COMMAND_INVALIDATE;
9194         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
9195
9196         /* It is absolutely critical that TG3PCI_MISC_HOST_CTRL
9197          * has the register indirect write enable bit set before
9198          * we try to access any of the MMIO registers.  It is also
9199          * critical that the PCI-X hw workaround situation is decided
9200          * before that as well.
9201          */
9202         pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
9203                               &misc_ctrl_reg);
9204
9205         tp->pci_chip_rev_id = (misc_ctrl_reg >>
9206                                MISC_HOST_CTRL_CHIPREV_SHIFT);
9207
9208         /* Wrong chip ID in 5752 A0. This code can be removed later
9209          * as A0 is not in production.
9210          */
9211         if (tp->pci_chip_rev_id == CHIPREV_ID_5752_A0_HW)
9212                 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
9213
9214         /* Find msi capability. */
9215         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780)
9216                 tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI);
9217
9218         /* Initialize misc host control in PCI block. */
9219         tp->misc_host_ctrl |= (misc_ctrl_reg &
9220                                MISC_HOST_CTRL_CHIPREV);
9221         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
9222                                tp->misc_host_ctrl);
9223
9224         pci_read_config_dword(tp->pdev, TG3PCI_CACHELINESZ,
9225                               &cacheline_sz_reg);
9226
9227         tp->pci_cacheline_sz = (cacheline_sz_reg >>  0) & 0xff;
9228         tp->pci_lat_timer    = (cacheline_sz_reg >>  8) & 0xff;
9229         tp->pci_hdr_type     = (cacheline_sz_reg >> 16) & 0xff;
9230         tp->pci_bist         = (cacheline_sz_reg >> 24) & 0xff;
9231
9232         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
9233             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
9234             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780)
9235                 tp->tg3_flags2 |= TG3_FLG2_5750_PLUS;
9236
9237         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) ||
9238             (tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
9239                 tp->tg3_flags2 |= TG3_FLG2_5705_PLUS;
9240
9241         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
9242                 tp->tg3_flags2 |= TG3_FLG2_HW_TSO;
9243
9244         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705 &&
9245             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5750 &&
9246             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752)
9247                 tp->tg3_flags2 |= TG3_FLG2_JUMBO_CAPABLE;
9248
9249         if (pci_find_capability(tp->pdev, PCI_CAP_ID_EXP) != 0)
9250                 tp->tg3_flags2 |= TG3_FLG2_PCI_EXPRESS;
9251
9252         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
9253             tp->pci_lat_timer < 64) {
9254                 tp->pci_lat_timer = 64;
9255
9256                 cacheline_sz_reg  = ((tp->pci_cacheline_sz & 0xff) <<  0);
9257                 cacheline_sz_reg |= ((tp->pci_lat_timer    & 0xff) <<  8);
9258                 cacheline_sz_reg |= ((tp->pci_hdr_type     & 0xff) << 16);
9259                 cacheline_sz_reg |= ((tp->pci_bist         & 0xff) << 24);
9260
9261                 pci_write_config_dword(tp->pdev, TG3PCI_CACHELINESZ,
9262                                        cacheline_sz_reg);
9263         }
9264
9265         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
9266                               &pci_state_reg);
9267
9268         if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0) {
9269                 tp->tg3_flags |= TG3_FLAG_PCIX_MODE;
9270
9271                 /* If this is a 5700 BX chipset, and we are in PCI-X
9272                  * mode, enable register write workaround.
9273                  *
9274                  * The workaround is to use indirect register accesses
9275                  * for all chip writes not to mailbox registers.
9276                  */
9277                 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX) {
9278                         u32 pm_reg;
9279                         u16 pci_cmd;
9280
9281                         tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
9282
9283                         /* The chip can have it's power management PCI config
9284                          * space registers clobbered due to this bug.
9285                          * So explicitly force the chip into D0 here.
9286                          */
9287                         pci_read_config_dword(tp->pdev, TG3PCI_PM_CTRL_STAT,
9288                                               &pm_reg);
9289                         pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
9290                         pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
9291                         pci_write_config_dword(tp->pdev, TG3PCI_PM_CTRL_STAT,
9292                                                pm_reg);
9293
9294                         /* Also, force SERR#/PERR# in PCI command. */
9295                         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
9296                         pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
9297                         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
9298                 }
9299         }
9300
9301         /* Back to back register writes can cause problems on this chip,
9302          * the workaround is to read back all reg writes except those to
9303          * mailbox regs.  See tg3_write_indirect_reg32().
9304          *
9305          * PCI Express 5750_A0 rev chips need this workaround too.
9306          */
9307         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
9308             ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
9309              tp->pci_chip_rev_id == CHIPREV_ID_5750_A0))
9310                 tp->tg3_flags |= TG3_FLAG_5701_REG_WRITE_BUG;
9311
9312         if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
9313                 tp->tg3_flags |= TG3_FLAG_PCI_HIGH_SPEED;
9314         if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
9315                 tp->tg3_flags |= TG3_FLAG_PCI_32BIT;
9316
9317         /* Chip-specific fixup from Broadcom driver */
9318         if ((tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) &&
9319             (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
9320                 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
9321                 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
9322         }
9323
9324         /* Default fast path register access methods */
9325         tp->read32 = tg3_read32;
9326         tp->write32 = tg3_write32;
9327         tp->read32_mbox = tg3_read32;
9328         tp->write32_mbox = tg3_write32;
9329         tp->write32_tx_mbox = tg3_write32;
9330         tp->write32_rx_mbox = tg3_write32;
9331
9332         /* Various workaround register access methods */
9333         if (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG)
9334                 tp->write32 = tg3_write_indirect_reg32;
9335         else if (tp->tg3_flags & TG3_FLAG_5701_REG_WRITE_BUG)
9336                 tp->write32 = tg3_write_flush_reg32;
9337
9338         if ((tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG) ||
9339             (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)) {
9340                 tp->write32_tx_mbox = tg3_write32_tx_mbox;
9341                 if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
9342                         tp->write32_rx_mbox = tg3_write_flush_reg32;
9343         }
9344
9345         /* Get eeprom hw config before calling tg3_set_power_state().
9346          * In particular, the TG3_FLAG_EEPROM_WRITE_PROT flag must be
9347          * determined before calling tg3_set_power_state() so that
9348          * we know whether or not to switch out of Vaux power.
9349          * When the flag is set, it means that GPIO1 is used for eeprom
9350          * write protect and also implies that it is a LOM where GPIOs
9351          * are not used to switch power.
9352          */ 
9353         tg3_get_eeprom_hw_cfg(tp);
9354
9355         /* Set up tp->grc_local_ctrl before calling tg3_set_power_state().
9356          * GPIO1 driven high will bring 5700's external PHY out of reset.
9357          * It is also used as eeprom write protect on LOMs.
9358          */
9359         tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
9360         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
9361             (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT))
9362                 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
9363                                        GRC_LCLCTRL_GPIO_OUTPUT1);
9364         /* Unused GPIO3 must be driven as output on 5752 because there
9365          * are no pull-up resistors on unused GPIO pins.
9366          */
9367         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
9368                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
9369
9370         /* Force the chip into D0. */
9371         err = tg3_set_power_state(tp, 0);
9372         if (err) {
9373                 printk(KERN_ERR PFX "(%s) transition to D0 failed\n",
9374                        pci_name(tp->pdev));
9375                 return err;
9376         }
9377
9378         /* 5700 B0 chips do not support checksumming correctly due
9379          * to hardware bugs.
9380          */
9381         if (tp->pci_chip_rev_id == CHIPREV_ID_5700_B0)
9382                 tp->tg3_flags |= TG3_FLAG_BROKEN_CHECKSUMS;
9383
9384         /* Pseudo-header checksum is done by hardware logic and not
9385          * the offload processers, so make the chip do the pseudo-
9386          * header checksums on receive.  For transmit it is more
9387          * convenient to do the pseudo-header checksum in software
9388          * as Linux does that on transmit for us in all cases.
9389          */
9390         tp->tg3_flags |= TG3_FLAG_NO_TX_PSEUDO_CSUM;
9391         tp->tg3_flags &= ~TG3_FLAG_NO_RX_PSEUDO_CSUM;
9392
9393         /* Derive initial jumbo mode from MTU assigned in
9394          * ether_setup() via the alloc_etherdev() call
9395          */
9396         if (tp->dev->mtu > ETH_DATA_LEN &&
9397             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5780)
9398                 tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE;
9399
9400         /* Determine WakeOnLan speed to use. */
9401         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
9402             tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
9403             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0 ||
9404             tp->pci_chip_rev_id == CHIPREV_ID_5701_B2) {
9405                 tp->tg3_flags &= ~(TG3_FLAG_WOL_SPEED_100MB);
9406         } else {
9407                 tp->tg3_flags |= TG3_FLAG_WOL_SPEED_100MB;
9408         }
9409
9410         /* A few boards don't want Ethernet@WireSpeed phy feature */
9411         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
9412             ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
9413              (tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) &&
9414              (tp->pci_chip_rev_id != CHIPREV_ID_5705_A1)) ||
9415             (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES))
9416                 tp->tg3_flags2 |= TG3_FLG2_NO_ETH_WIRE_SPEED;
9417
9418         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5703_AX ||
9419             GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_AX)
9420                 tp->tg3_flags2 |= TG3_FLG2_PHY_ADC_BUG;
9421         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0)
9422                 tp->tg3_flags2 |= TG3_FLG2_PHY_5704_A0_BUG;
9423
9424         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
9425                 tp->tg3_flags2 |= TG3_FLG2_PHY_BER_BUG;
9426
9427         tp->coalesce_mode = 0;
9428         if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX &&
9429             GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX)
9430                 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
9431
9432         /* Initialize MAC MI mode, polling disabled. */
9433         tw32_f(MAC_MI_MODE, tp->mi_mode);
9434         udelay(80);
9435
9436         /* Initialize data/descriptor byte/word swapping. */
9437         val = tr32(GRC_MODE);
9438         val &= GRC_MODE_HOST_STACKUP;
9439         tw32(GRC_MODE, val | tp->grc_mode);
9440
9441         tg3_switch_clocks(tp);
9442
9443         /* Clear this out for sanity. */
9444         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
9445
9446         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
9447                               &pci_state_reg);
9448         if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
9449             (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) == 0) {
9450                 u32 chiprevid = GET_CHIP_REV_ID(tp->misc_host_ctrl);
9451
9452                 if (chiprevid == CHIPREV_ID_5701_A0 ||
9453                     chiprevid == CHIPREV_ID_5701_B0 ||
9454                     chiprevid == CHIPREV_ID_5701_B2 ||
9455                     chiprevid == CHIPREV_ID_5701_B5) {
9456                         void __iomem *sram_base;
9457
9458                         /* Write some dummy words into the SRAM status block
9459                          * area, see if it reads back correctly.  If the return
9460                          * value is bad, force enable the PCIX workaround.
9461                          */
9462                         sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
9463
9464                         writel(0x00000000, sram_base);
9465                         writel(0x00000000, sram_base + 4);
9466                         writel(0xffffffff, sram_base + 4);
9467                         if (readl(sram_base) != 0x00000000)
9468                                 tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
9469                 }
9470         }
9471
9472         udelay(50);
9473         tg3_nvram_init(tp);
9474
9475         grc_misc_cfg = tr32(GRC_MISC_CFG);
9476         grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
9477
9478         /* Broadcom's driver says that CIOBE multisplit has a bug */
9479 #if 0
9480         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
9481             grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5704CIOBE) {
9482                 tp->tg3_flags |= TG3_FLAG_SPLIT_MODE;
9483                 tp->split_mode_max_reqs = SPLIT_MODE_5704_MAX_REQ;
9484         }
9485 #endif
9486         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
9487             (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
9488              grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
9489                 tp->tg3_flags2 |= TG3_FLG2_IS_5788;
9490
9491         if (!(tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
9492             (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700))
9493                 tp->tg3_flags |= TG3_FLAG_TAGGED_STATUS;
9494         if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) {
9495                 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
9496                                       HOSTCC_MODE_CLRTICK_TXBD);
9497
9498                 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
9499                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
9500                                        tp->misc_host_ctrl);
9501         }
9502
9503         /* these are limited to 10/100 only */
9504         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
9505              (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
9506             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
9507              tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
9508              (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901 ||
9509               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901_2 ||
9510               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5705F)) ||
9511             (tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
9512              (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5751F ||
9513               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5753F)))
9514                 tp->tg3_flags |= TG3_FLAG_10_100_ONLY;
9515
9516         err = tg3_phy_probe(tp);
9517         if (err) {
9518                 printk(KERN_ERR PFX "(%s) phy probe failed, err %d\n",
9519                        pci_name(tp->pdev), err);
9520                 /* ... but do not return immediately ... */
9521         }
9522
9523         tg3_read_partno(tp);
9524
9525         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
9526                 tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
9527         } else {
9528                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
9529                         tp->tg3_flags |= TG3_FLAG_USE_MI_INTERRUPT;
9530                 else
9531                         tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
9532         }
9533
9534         /* 5700 {AX,BX} chips have a broken status block link
9535          * change bit implementation, so we must use the
9536          * status register in those cases.
9537          */
9538         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
9539                 tp->tg3_flags |= TG3_FLAG_USE_LINKCHG_REG;
9540         else
9541                 tp->tg3_flags &= ~TG3_FLAG_USE_LINKCHG_REG;
9542
9543         /* The led_ctrl is set during tg3_phy_probe, here we might
9544          * have to force the link status polling mechanism based
9545          * upon subsystem IDs.
9546          */
9547         if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
9548             !(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
9549                 tp->tg3_flags |= (TG3_FLAG_USE_MI_INTERRUPT |
9550                                   TG3_FLAG_USE_LINKCHG_REG);
9551         }
9552
9553         /* For all SERDES we poll the MAC status register. */
9554         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
9555                 tp->tg3_flags |= TG3_FLAG_POLL_SERDES;
9556         else
9557                 tp->tg3_flags &= ~TG3_FLAG_POLL_SERDES;
9558
9559         /* 5700 BX chips need to have their TX producer index mailboxes
9560          * written twice to workaround a bug.
9561          */
9562         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX)
9563                 tp->tg3_flags |= TG3_FLAG_TXD_MBOX_HWBUG;
9564         else
9565                 tp->tg3_flags &= ~TG3_FLAG_TXD_MBOX_HWBUG;
9566
9567         /* It seems all chips can get confused if TX buffers
9568          * straddle the 4GB address boundary in some cases.
9569          */
9570         tp->dev->hard_start_xmit = tg3_start_xmit;
9571
9572         tp->rx_offset = 2;
9573         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
9574             (tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0)
9575                 tp->rx_offset = 0;
9576
9577         /* By default, disable wake-on-lan.  User can change this
9578          * using ETHTOOL_SWOL.
9579          */
9580         tp->tg3_flags &= ~TG3_FLAG_WOL_ENABLE;
9581
9582         return err;
9583 }
9584
9585 #ifdef CONFIG_SPARC64
9586 static int __devinit tg3_get_macaddr_sparc(struct tg3 *tp)
9587 {
9588         struct net_device *dev = tp->dev;
9589         struct pci_dev *pdev = tp->pdev;
9590         struct pcidev_cookie *pcp = pdev->sysdata;
9591
9592         if (pcp != NULL) {
9593                 int node = pcp->prom_node;
9594
9595                 if (prom_getproplen(node, "local-mac-address") == 6) {
9596                         prom_getproperty(node, "local-mac-address",
9597                                          dev->dev_addr, 6);
9598                         return 0;
9599                 }
9600         }
9601         return -ENODEV;
9602 }
9603
9604 static int __devinit tg3_get_default_macaddr_sparc(struct tg3 *tp)
9605 {
9606         struct net_device *dev = tp->dev;
9607
9608         memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
9609         return 0;
9610 }
9611 #endif
9612
9613 static int __devinit tg3_get_device_address(struct tg3 *tp)
9614 {
9615         struct net_device *dev = tp->dev;
9616         u32 hi, lo, mac_offset;
9617
9618 #ifdef CONFIG_SPARC64
9619         if (!tg3_get_macaddr_sparc(tp))
9620                 return 0;
9621 #endif
9622
9623         mac_offset = 0x7c;
9624         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
9625              !(tp->tg3_flags & TG3_FLG2_SUN_570X)) ||
9626             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) {
9627                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
9628                         mac_offset = 0xcc;
9629                 if (tg3_nvram_lock(tp))
9630                         tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
9631                 else
9632                         tg3_nvram_unlock(tp);
9633         }
9634
9635         /* First try to get it from MAC address mailbox. */
9636         tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
9637         if ((hi >> 16) == 0x484b) {
9638                 dev->dev_addr[0] = (hi >>  8) & 0xff;
9639                 dev->dev_addr[1] = (hi >>  0) & 0xff;
9640
9641                 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
9642                 dev->dev_addr[2] = (lo >> 24) & 0xff;
9643                 dev->dev_addr[3] = (lo >> 16) & 0xff;
9644                 dev->dev_addr[4] = (lo >>  8) & 0xff;
9645                 dev->dev_addr[5] = (lo >>  0) & 0xff;
9646         }
9647         /* Next, try NVRAM. */
9648         else if (!(tp->tg3_flags & TG3_FLG2_SUN_570X) &&
9649                  !tg3_nvram_read(tp, mac_offset + 0, &hi) &&
9650                  !tg3_nvram_read(tp, mac_offset + 4, &lo)) {
9651                 dev->dev_addr[0] = ((hi >> 16) & 0xff);
9652                 dev->dev_addr[1] = ((hi >> 24) & 0xff);
9653                 dev->dev_addr[2] = ((lo >>  0) & 0xff);
9654                 dev->dev_addr[3] = ((lo >>  8) & 0xff);
9655                 dev->dev_addr[4] = ((lo >> 16) & 0xff);
9656                 dev->dev_addr[5] = ((lo >> 24) & 0xff);
9657         }
9658         /* Finally just fetch it out of the MAC control regs. */
9659         else {
9660                 hi = tr32(MAC_ADDR_0_HIGH);
9661                 lo = tr32(MAC_ADDR_0_LOW);
9662
9663                 dev->dev_addr[5] = lo & 0xff;
9664                 dev->dev_addr[4] = (lo >> 8) & 0xff;
9665                 dev->dev_addr[3] = (lo >> 16) & 0xff;
9666                 dev->dev_addr[2] = (lo >> 24) & 0xff;
9667                 dev->dev_addr[1] = hi & 0xff;
9668                 dev->dev_addr[0] = (hi >> 8) & 0xff;
9669         }
9670
9671         if (!is_valid_ether_addr(&dev->dev_addr[0])) {
9672 #ifdef CONFIG_SPARC64
9673                 if (!tg3_get_default_macaddr_sparc(tp))
9674                         return 0;
9675 #endif
9676                 return -EINVAL;
9677         }
9678         return 0;
9679 }
9680
9681 #define BOUNDARY_SINGLE_CACHELINE       1
9682 #define BOUNDARY_MULTI_CACHELINE        2
9683
9684 static u32 __devinit tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
9685 {
9686         int cacheline_size;
9687         u8 byte;
9688         int goal;
9689
9690         pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
9691         if (byte == 0)
9692                 cacheline_size = 1024;
9693         else
9694                 cacheline_size = (int) byte * 4;
9695
9696         /* On 5703 and later chips, the boundary bits have no
9697          * effect.
9698          */
9699         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
9700             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
9701             !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
9702                 goto out;
9703
9704 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
9705         goal = BOUNDARY_MULTI_CACHELINE;
9706 #else
9707 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
9708         goal = BOUNDARY_SINGLE_CACHELINE;
9709 #else
9710         goal = 0;
9711 #endif
9712 #endif
9713
9714         if (!goal)
9715                 goto out;
9716
9717         /* PCI controllers on most RISC systems tend to disconnect
9718          * when a device tries to burst across a cache-line boundary.
9719          * Therefore, letting tg3 do so just wastes PCI bandwidth.
9720          *
9721          * Unfortunately, for PCI-E there are only limited
9722          * write-side controls for this, and thus for reads
9723          * we will still get the disconnects.  We'll also waste
9724          * these PCI cycles for both read and write for chips
9725          * other than 5700 and 5701 which do not implement the
9726          * boundary bits.
9727          */
9728         if ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
9729             !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
9730                 switch (cacheline_size) {
9731                 case 16:
9732                 case 32:
9733                 case 64:
9734                 case 128:
9735                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
9736                                 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
9737                                         DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
9738                         } else {
9739                                 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
9740                                         DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
9741                         }
9742                         break;
9743
9744                 case 256:
9745                         val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
9746                                 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
9747                         break;
9748
9749                 default:
9750                         val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
9751                                 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
9752                         break;
9753                 };
9754         } else if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
9755                 switch (cacheline_size) {
9756                 case 16:
9757                 case 32:
9758                 case 64:
9759                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
9760                                 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
9761                                 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
9762                                 break;
9763                         }
9764                         /* fallthrough */
9765                 case 128:
9766                 default:
9767                         val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
9768                         val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
9769                         break;
9770                 };
9771         } else {
9772                 switch (cacheline_size) {
9773                 case 16:
9774                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
9775                                 val |= (DMA_RWCTRL_READ_BNDRY_16 |
9776                                         DMA_RWCTRL_WRITE_BNDRY_16);
9777                                 break;
9778                         }
9779                         /* fallthrough */
9780                 case 32:
9781                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
9782                                 val |= (DMA_RWCTRL_READ_BNDRY_32 |
9783                                         DMA_RWCTRL_WRITE_BNDRY_32);
9784                                 break;
9785                         }
9786                         /* fallthrough */
9787                 case 64:
9788                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
9789                                 val |= (DMA_RWCTRL_READ_BNDRY_64 |
9790                                         DMA_RWCTRL_WRITE_BNDRY_64);
9791                                 break;
9792                         }
9793                         /* fallthrough */
9794                 case 128:
9795                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
9796                                 val |= (DMA_RWCTRL_READ_BNDRY_128 |
9797                                         DMA_RWCTRL_WRITE_BNDRY_128);
9798                                 break;
9799                         }
9800                         /* fallthrough */
9801                 case 256:
9802                         val |= (DMA_RWCTRL_READ_BNDRY_256 |
9803                                 DMA_RWCTRL_WRITE_BNDRY_256);
9804                         break;
9805                 case 512:
9806                         val |= (DMA_RWCTRL_READ_BNDRY_512 |
9807                                 DMA_RWCTRL_WRITE_BNDRY_512);
9808                         break;
9809                 case 1024:
9810                 default:
9811                         val |= (DMA_RWCTRL_READ_BNDRY_1024 |
9812                                 DMA_RWCTRL_WRITE_BNDRY_1024);
9813                         break;
9814                 };
9815         }
9816
9817 out:
9818         return val;
9819 }
9820
9821 static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma, int size, int to_device)
9822 {
9823         struct tg3_internal_buffer_desc test_desc;
9824         u32 sram_dma_descs;
9825         int i, ret;
9826
9827         sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
9828
9829         tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
9830         tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
9831         tw32(RDMAC_STATUS, 0);
9832         tw32(WDMAC_STATUS, 0);
9833
9834         tw32(BUFMGR_MODE, 0);
9835         tw32(FTQ_RESET, 0);
9836
9837         test_desc.addr_hi = ((u64) buf_dma) >> 32;
9838         test_desc.addr_lo = buf_dma & 0xffffffff;
9839         test_desc.nic_mbuf = 0x00002100;
9840         test_desc.len = size;
9841
9842         /*
9843          * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
9844          * the *second* time the tg3 driver was getting loaded after an
9845          * initial scan.
9846          *
9847          * Broadcom tells me:
9848          *   ...the DMA engine is connected to the GRC block and a DMA
9849          *   reset may affect the GRC block in some unpredictable way...
9850          *   The behavior of resets to individual blocks has not been tested.
9851          *
9852          * Broadcom noted the GRC reset will also reset all sub-components.
9853          */
9854         if (to_device) {
9855                 test_desc.cqid_sqid = (13 << 8) | 2;
9856
9857                 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
9858                 udelay(40);
9859         } else {
9860                 test_desc.cqid_sqid = (16 << 8) | 7;
9861
9862                 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
9863                 udelay(40);
9864         }
9865         test_desc.flags = 0x00000005;
9866
9867         for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
9868                 u32 val;
9869
9870                 val = *(((u32 *)&test_desc) + i);
9871                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
9872                                        sram_dma_descs + (i * sizeof(u32)));
9873                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
9874         }
9875         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
9876
9877         if (to_device) {
9878                 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
9879         } else {
9880                 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
9881         }
9882
9883         ret = -ENODEV;
9884         for (i = 0; i < 40; i++) {
9885                 u32 val;
9886
9887                 if (to_device)
9888                         val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
9889                 else
9890                         val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
9891                 if ((val & 0xffff) == sram_dma_descs) {
9892                         ret = 0;
9893                         break;
9894                 }
9895
9896                 udelay(100);
9897         }
9898
9899         return ret;
9900 }
9901
9902 #define TEST_BUFFER_SIZE        0x2000
9903
9904 static int __devinit tg3_test_dma(struct tg3 *tp)
9905 {
9906         dma_addr_t buf_dma;
9907         u32 *buf, saved_dma_rwctrl;
9908         int ret;
9909
9910         buf = pci_alloc_consistent(tp->pdev, TEST_BUFFER_SIZE, &buf_dma);
9911         if (!buf) {
9912                 ret = -ENOMEM;
9913                 goto out_nofree;
9914         }
9915
9916         tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
9917                           (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
9918
9919         tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
9920
9921         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
9922                 /* DMA read watermark not used on PCIE */
9923                 tp->dma_rwctrl |= 0x00180000;
9924         } else if (!(tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
9925                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
9926                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
9927                         tp->dma_rwctrl |= 0x003f0000;
9928                 else
9929                         tp->dma_rwctrl |= 0x003f000f;
9930         } else {
9931                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
9932                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
9933                         u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
9934
9935                         if (ccval == 0x6 || ccval == 0x7)
9936                                 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
9937
9938                         /* Set bit 23 to enable PCIX hw bug fix */
9939                         tp->dma_rwctrl |= 0x009f0000;
9940                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) {
9941                         /* 5780 always in PCIX mode */
9942                         tp->dma_rwctrl |= 0x00144000;
9943                 } else {
9944                         tp->dma_rwctrl |= 0x001b000f;
9945                 }
9946         }
9947
9948         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
9949             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
9950                 tp->dma_rwctrl &= 0xfffffff0;
9951
9952         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
9953             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
9954                 /* Remove this if it causes problems for some boards. */
9955                 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
9956
9957                 /* On 5700/5701 chips, we need to set this bit.
9958                  * Otherwise the chip will issue cacheline transactions
9959                  * to streamable DMA memory with not all the byte
9960                  * enables turned on.  This is an error on several
9961                  * RISC PCI controllers, in particular sparc64.
9962                  *
9963                  * On 5703/5704 chips, this bit has been reassigned
9964                  * a different meaning.  In particular, it is used
9965                  * on those chips to enable a PCI-X workaround.
9966                  */
9967                 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
9968         }
9969
9970         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
9971
9972 #if 0
9973         /* Unneeded, already done by tg3_get_invariants.  */
9974         tg3_switch_clocks(tp);
9975 #endif
9976
9977         ret = 0;
9978         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
9979             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
9980                 goto out;
9981
9982         /* It is best to perform DMA test with maximum write burst size
9983          * to expose the 5700/5701 write DMA bug.
9984          */
9985         saved_dma_rwctrl = tp->dma_rwctrl;
9986         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
9987         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
9988
9989         while (1) {
9990                 u32 *p = buf, i;
9991
9992                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
9993                         p[i] = i;
9994
9995                 /* Send the buffer to the chip. */
9996                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1);
9997                 if (ret) {
9998                         printk(KERN_ERR "tg3_test_dma() Write the buffer failed %d\n", ret);
9999                         break;
10000                 }
10001
10002 #if 0
10003                 /* validate data reached card RAM correctly. */
10004                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
10005                         u32 val;
10006                         tg3_read_mem(tp, 0x2100 + (i*4), &val);
10007                         if (le32_to_cpu(val) != p[i]) {
10008                                 printk(KERN_ERR "  tg3_test_dma()  Card buffer corrupted on write! (%d != %d)\n", val, i);
10009                                 /* ret = -ENODEV here? */
10010                         }
10011                         p[i] = 0;
10012                 }
10013 #endif
10014                 /* Now read it back. */
10015                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0);
10016                 if (ret) {
10017                         printk(KERN_ERR "tg3_test_dma() Read the buffer failed %d\n", ret);
10018
10019                         break;
10020                 }
10021
10022                 /* Verify it. */
10023                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
10024                         if (p[i] == i)
10025                                 continue;
10026
10027                         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
10028                             DMA_RWCTRL_WRITE_BNDRY_16) {
10029                                 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
10030                                 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
10031                                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
10032                                 break;
10033                         } else {
10034                                 printk(KERN_ERR "tg3_test_dma() buffer corrupted on read back! (%d != %d)\n", p[i], i);
10035                                 ret = -ENODEV;
10036                                 goto out;
10037                         }
10038                 }
10039
10040                 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
10041                         /* Success. */
10042                         ret = 0;
10043                         break;
10044                 }
10045         }
10046         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
10047             DMA_RWCTRL_WRITE_BNDRY_16) {
10048                 static struct pci_device_id dma_wait_state_chipsets[] = {
10049                         { PCI_DEVICE(PCI_VENDOR_ID_APPLE,
10050                                      PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
10051                         { },
10052                 };
10053
10054                 /* DMA test passed without adjusting DMA boundary,
10055                  * now look for chipsets that are known to expose the
10056                  * DMA bug without failing the test.
10057                  */
10058                 if (pci_dev_present(dma_wait_state_chipsets)) {
10059                         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
10060                         tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
10061                 }
10062                 else
10063                         /* Safe to use the calculated DMA boundary. */
10064                         tp->dma_rwctrl = saved_dma_rwctrl;
10065
10066                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
10067         }
10068
10069 out:
10070         pci_free_consistent(tp->pdev, TEST_BUFFER_SIZE, buf, buf_dma);
10071 out_nofree:
10072         return ret;
10073 }
10074
10075 static void __devinit tg3_init_link_config(struct tg3 *tp)
10076 {
10077         tp->link_config.advertising =
10078                 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
10079                  ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
10080                  ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full |
10081                  ADVERTISED_Autoneg | ADVERTISED_MII);
10082         tp->link_config.speed = SPEED_INVALID;
10083         tp->link_config.duplex = DUPLEX_INVALID;
10084         tp->link_config.autoneg = AUTONEG_ENABLE;
10085         netif_carrier_off(tp->dev);
10086         tp->link_config.active_speed = SPEED_INVALID;
10087         tp->link_config.active_duplex = DUPLEX_INVALID;
10088         tp->link_config.phy_is_low_power = 0;
10089         tp->link_config.orig_speed = SPEED_INVALID;
10090         tp->link_config.orig_duplex = DUPLEX_INVALID;
10091         tp->link_config.orig_autoneg = AUTONEG_INVALID;
10092 }
10093
10094 static void __devinit tg3_init_bufmgr_config(struct tg3 *tp)
10095 {
10096         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
10097                 tp->bufmgr_config.mbuf_read_dma_low_water =
10098                         DEFAULT_MB_RDMA_LOW_WATER_5705;
10099                 tp->bufmgr_config.mbuf_mac_rx_low_water =
10100                         DEFAULT_MB_MACRX_LOW_WATER_5705;
10101                 tp->bufmgr_config.mbuf_high_water =
10102                         DEFAULT_MB_HIGH_WATER_5705;
10103
10104                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
10105                         DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
10106                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
10107                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
10108                 tp->bufmgr_config.mbuf_high_water_jumbo =
10109                         DEFAULT_MB_HIGH_WATER_JUMBO_5780;
10110         } else {
10111                 tp->bufmgr_config.mbuf_read_dma_low_water =
10112                         DEFAULT_MB_RDMA_LOW_WATER;
10113                 tp->bufmgr_config.mbuf_mac_rx_low_water =
10114                         DEFAULT_MB_MACRX_LOW_WATER;
10115                 tp->bufmgr_config.mbuf_high_water =
10116                         DEFAULT_MB_HIGH_WATER;
10117
10118                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
10119                         DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
10120                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
10121                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
10122                 tp->bufmgr_config.mbuf_high_water_jumbo =
10123                         DEFAULT_MB_HIGH_WATER_JUMBO;
10124         }
10125
10126         tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
10127         tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
10128 }
10129
10130 static char * __devinit tg3_phy_string(struct tg3 *tp)
10131 {
10132         switch (tp->phy_id & PHY_ID_MASK) {
10133         case PHY_ID_BCM5400:    return "5400";
10134         case PHY_ID_BCM5401:    return "5401";
10135         case PHY_ID_BCM5411:    return "5411";
10136         case PHY_ID_BCM5701:    return "5701";
10137         case PHY_ID_BCM5703:    return "5703";
10138         case PHY_ID_BCM5704:    return "5704";
10139         case PHY_ID_BCM5705:    return "5705";
10140         case PHY_ID_BCM5750:    return "5750";
10141         case PHY_ID_BCM5752:    return "5752";
10142         case PHY_ID_BCM5780:    return "5780";
10143         case PHY_ID_BCM8002:    return "8002/serdes";
10144         case 0:                 return "serdes";
10145         default:                return "unknown";
10146         };
10147 }
10148
10149 static struct pci_dev * __devinit tg3_find_5704_peer(struct tg3 *tp)
10150 {
10151         struct pci_dev *peer;
10152         unsigned int func, devnr = tp->pdev->devfn & ~7;
10153
10154         for (func = 0; func < 8; func++) {
10155                 peer = pci_get_slot(tp->pdev->bus, devnr | func);
10156                 if (peer && peer != tp->pdev)
10157                         break;
10158                 pci_dev_put(peer);
10159         }
10160         if (!peer || peer == tp->pdev)
10161                 BUG();
10162
10163         /*
10164          * We don't need to keep the refcount elevated; there's no way
10165          * to remove one half of this device without removing the other
10166          */
10167         pci_dev_put(peer);
10168
10169         return peer;
10170 }
10171
10172 static void __devinit tg3_init_coal(struct tg3 *tp)
10173 {
10174         struct ethtool_coalesce *ec = &tp->coal;
10175
10176         memset(ec, 0, sizeof(*ec));
10177         ec->cmd = ETHTOOL_GCOALESCE;
10178         ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
10179         ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
10180         ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
10181         ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
10182         ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
10183         ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
10184         ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
10185         ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
10186         ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
10187
10188         if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
10189                                  HOSTCC_MODE_CLRTICK_TXBD)) {
10190                 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
10191                 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
10192                 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
10193                 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
10194         }
10195
10196         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
10197                 ec->rx_coalesce_usecs_irq = 0;
10198                 ec->tx_coalesce_usecs_irq = 0;
10199                 ec->stats_block_coalesce_usecs = 0;
10200         }
10201 }
10202
10203 static int __devinit tg3_init_one(struct pci_dev *pdev,
10204                                   const struct pci_device_id *ent)
10205 {
10206         static int tg3_version_printed = 0;
10207         unsigned long tg3reg_base, tg3reg_len;
10208         struct net_device *dev;
10209         struct tg3 *tp;
10210         int i, err, pci_using_dac, pm_cap;
10211
10212         if (tg3_version_printed++ == 0)
10213                 printk(KERN_INFO "%s", version);
10214
10215         err = pci_enable_device(pdev);
10216         if (err) {
10217                 printk(KERN_ERR PFX "Cannot enable PCI device, "
10218                        "aborting.\n");
10219                 return err;
10220         }
10221
10222         if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
10223                 printk(KERN_ERR PFX "Cannot find proper PCI device "
10224                        "base address, aborting.\n");
10225                 err = -ENODEV;
10226                 goto err_out_disable_pdev;
10227         }
10228
10229         err = pci_request_regions(pdev, DRV_MODULE_NAME);
10230         if (err) {
10231                 printk(KERN_ERR PFX "Cannot obtain PCI resources, "
10232                        "aborting.\n");
10233                 goto err_out_disable_pdev;
10234         }
10235
10236         pci_set_master(pdev);
10237
10238         /* Find power-management capability. */
10239         pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
10240         if (pm_cap == 0) {
10241                 printk(KERN_ERR PFX "Cannot find PowerManagement capability, "
10242                        "aborting.\n");
10243                 err = -EIO;
10244                 goto err_out_free_res;
10245         }
10246
10247         /* Configure DMA attributes. */
10248         err = pci_set_dma_mask(pdev, 0xffffffffffffffffULL);
10249         if (!err) {
10250                 pci_using_dac = 1;
10251                 err = pci_set_consistent_dma_mask(pdev, 0xffffffffffffffffULL);
10252                 if (err < 0) {
10253                         printk(KERN_ERR PFX "Unable to obtain 64 bit DMA "
10254                                "for consistent allocations\n");
10255                         goto err_out_free_res;
10256                 }
10257         } else {
10258                 err = pci_set_dma_mask(pdev, 0xffffffffULL);
10259                 if (err) {
10260                         printk(KERN_ERR PFX "No usable DMA configuration, "
10261                                "aborting.\n");
10262                         goto err_out_free_res;
10263                 }
10264                 pci_using_dac = 0;
10265         }
10266
10267         tg3reg_base = pci_resource_start(pdev, 0);
10268         tg3reg_len = pci_resource_len(pdev, 0);
10269
10270         dev = alloc_etherdev(sizeof(*tp));
10271         if (!dev) {
10272                 printk(KERN_ERR PFX "Etherdev alloc failed, aborting.\n");
10273                 err = -ENOMEM;
10274                 goto err_out_free_res;
10275         }
10276
10277         SET_MODULE_OWNER(dev);
10278         SET_NETDEV_DEV(dev, &pdev->dev);
10279
10280         if (pci_using_dac)
10281                 dev->features |= NETIF_F_HIGHDMA;
10282         dev->features |= NETIF_F_LLTX;
10283 #if TG3_VLAN_TAG_USED
10284         dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
10285         dev->vlan_rx_register = tg3_vlan_rx_register;
10286         dev->vlan_rx_kill_vid = tg3_vlan_rx_kill_vid;
10287 #endif
10288
10289         tp = netdev_priv(dev);
10290         tp->pdev = pdev;
10291         tp->dev = dev;
10292         tp->pm_cap = pm_cap;
10293         tp->mac_mode = TG3_DEF_MAC_MODE;
10294         tp->rx_mode = TG3_DEF_RX_MODE;
10295         tp->tx_mode = TG3_DEF_TX_MODE;
10296         tp->mi_mode = MAC_MI_MODE_BASE;
10297         if (tg3_debug > 0)
10298                 tp->msg_enable = tg3_debug;
10299         else
10300                 tp->msg_enable = TG3_DEF_MSG_ENABLE;
10301
10302         /* The word/byte swap controls here control register access byte
10303          * swapping.  DMA data byte swapping is controlled in the GRC_MODE
10304          * setting below.
10305          */
10306         tp->misc_host_ctrl =
10307                 MISC_HOST_CTRL_MASK_PCI_INT |
10308                 MISC_HOST_CTRL_WORD_SWAP |
10309                 MISC_HOST_CTRL_INDIR_ACCESS |
10310                 MISC_HOST_CTRL_PCISTATE_RW;
10311
10312         /* The NONFRM (non-frame) byte/word swap controls take effect
10313          * on descriptor entries, anything which isn't packet data.
10314          *
10315          * The StrongARM chips on the board (one for tx, one for rx)
10316          * are running in big-endian mode.
10317          */
10318         tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
10319                         GRC_MODE_WSWAP_NONFRM_DATA);
10320 #ifdef __BIG_ENDIAN
10321         tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
10322 #endif
10323         spin_lock_init(&tp->lock);
10324         spin_lock_init(&tp->tx_lock);
10325         spin_lock_init(&tp->indirect_lock);
10326         INIT_WORK(&tp->reset_task, tg3_reset_task, tp);
10327
10328         tp->regs = ioremap_nocache(tg3reg_base, tg3reg_len);
10329         if (tp->regs == 0UL) {
10330                 printk(KERN_ERR PFX "Cannot map device registers, "
10331                        "aborting.\n");
10332                 err = -ENOMEM;
10333                 goto err_out_free_dev;
10334         }
10335
10336         tg3_init_link_config(tp);
10337
10338         tp->rx_pending = TG3_DEF_RX_RING_PENDING;
10339         tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
10340         tp->tx_pending = TG3_DEF_TX_RING_PENDING;
10341
10342         dev->open = tg3_open;
10343         dev->stop = tg3_close;
10344         dev->get_stats = tg3_get_stats;
10345         dev->set_multicast_list = tg3_set_rx_mode;
10346         dev->set_mac_address = tg3_set_mac_addr;
10347         dev->do_ioctl = tg3_ioctl;
10348         dev->tx_timeout = tg3_tx_timeout;
10349         dev->poll = tg3_poll;
10350         dev->ethtool_ops = &tg3_ethtool_ops;
10351         dev->weight = 64;
10352         dev->watchdog_timeo = TG3_TX_TIMEOUT;
10353         dev->change_mtu = tg3_change_mtu;
10354         dev->irq = pdev->irq;
10355 #ifdef CONFIG_NET_POLL_CONTROLLER
10356         dev->poll_controller = tg3_poll_controller;
10357 #endif
10358
10359         err = tg3_get_invariants(tp);
10360         if (err) {
10361                 printk(KERN_ERR PFX "Problem fetching invariants of chip, "
10362                        "aborting.\n");
10363                 goto err_out_iounmap;
10364         }
10365
10366         tg3_init_bufmgr_config(tp);
10367
10368 #if TG3_TSO_SUPPORT != 0
10369         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
10370                 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
10371         }
10372         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
10373             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
10374             tp->pci_chip_rev_id == CHIPREV_ID_5705_A0 ||
10375             (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0) {
10376                 tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
10377         } else {
10378                 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
10379         }
10380
10381         /* TSO is off by default, user can enable using ethtool.  */
10382 #if 0
10383         if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)
10384                 dev->features |= NETIF_F_TSO;
10385 #endif
10386
10387 #endif
10388
10389         if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 &&
10390             !(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) &&
10391             !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
10392                 tp->tg3_flags2 |= TG3_FLG2_MAX_RXPEND_64;
10393                 tp->rx_pending = 63;
10394         }
10395
10396         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
10397                 tp->pdev_peer = tg3_find_5704_peer(tp);
10398
10399         err = tg3_get_device_address(tp);
10400         if (err) {
10401                 printk(KERN_ERR PFX "Could not obtain valid ethernet address, "
10402                        "aborting.\n");
10403                 goto err_out_iounmap;
10404         }
10405
10406         /*
10407          * Reset chip in case UNDI or EFI driver did not shutdown
10408          * DMA self test will enable WDMAC and we'll see (spurious)
10409          * pending DMA on the PCI bus at that point.
10410          */
10411         if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
10412             (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
10413                 pci_save_state(tp->pdev);
10414                 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
10415                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10416         }
10417
10418         err = tg3_test_dma(tp);
10419         if (err) {
10420                 printk(KERN_ERR PFX "DMA engine test failed, aborting.\n");
10421                 goto err_out_iounmap;
10422         }
10423
10424         /* Tigon3 can do ipv4 only... and some chips have buggy
10425          * checksumming.
10426          */
10427         if ((tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) == 0) {
10428                 dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM;
10429                 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
10430         } else
10431                 tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
10432
10433         if (tp->tg3_flags2 & TG3_FLG2_IS_5788)
10434                 dev->features &= ~NETIF_F_HIGHDMA;
10435
10436         /* flow control autonegotiation is default behavior */
10437         tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
10438
10439         tg3_init_coal(tp);
10440
10441         /* Now that we have fully setup the chip, save away a snapshot
10442          * of the PCI config space.  We need to restore this after
10443          * GRC_MISC_CFG core clock resets and some resume events.
10444          */
10445         pci_save_state(tp->pdev);
10446
10447         err = register_netdev(dev);
10448         if (err) {
10449                 printk(KERN_ERR PFX "Cannot register net device, "
10450                        "aborting.\n");
10451                 goto err_out_iounmap;
10452         }
10453
10454         pci_set_drvdata(pdev, dev);
10455
10456         printk(KERN_INFO "%s: Tigon3 [partno(%s) rev %04x PHY(%s)] (PCI%s:%s:%s) %sBaseT Ethernet ",
10457                dev->name,
10458                tp->board_part_number,
10459                tp->pci_chip_rev_id,
10460                tg3_phy_string(tp),
10461                ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ? "X" : ""),
10462                ((tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED) ?
10463                 ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ? "133MHz" : "66MHz") :
10464                 ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ? "100MHz" : "33MHz")),
10465                ((tp->tg3_flags & TG3_FLAG_PCI_32BIT) ? "32-bit" : "64-bit"),
10466                (tp->tg3_flags & TG3_FLAG_10_100_ONLY) ? "10/100" : "10/100/1000");
10467
10468         for (i = 0; i < 6; i++)
10469                 printk("%2.2x%c", dev->dev_addr[i],
10470                        i == 5 ? '\n' : ':');
10471
10472         printk(KERN_INFO "%s: RXcsums[%d] LinkChgREG[%d] "
10473                "MIirq[%d] ASF[%d] Split[%d] WireSpeed[%d] "
10474                "TSOcap[%d] \n",
10475                dev->name,
10476                (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0,
10477                (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) != 0,
10478                (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) != 0,
10479                (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0,
10480                (tp->tg3_flags & TG3_FLAG_SPLIT_MODE) != 0,
10481                (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED) == 0,
10482                (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) != 0);
10483         printk(KERN_INFO "%s: dma_rwctrl[%08x]\n",
10484                dev->name, tp->dma_rwctrl);
10485
10486         return 0;
10487
10488 err_out_iounmap:
10489         iounmap(tp->regs);
10490
10491 err_out_free_dev:
10492         free_netdev(dev);
10493
10494 err_out_free_res:
10495         pci_release_regions(pdev);
10496
10497 err_out_disable_pdev:
10498         pci_disable_device(pdev);
10499         pci_set_drvdata(pdev, NULL);
10500         return err;
10501 }
10502
10503 static void __devexit tg3_remove_one(struct pci_dev *pdev)
10504 {
10505         struct net_device *dev = pci_get_drvdata(pdev);
10506
10507         if (dev) {
10508                 struct tg3 *tp = netdev_priv(dev);
10509
10510                 unregister_netdev(dev);
10511                 iounmap(tp->regs);
10512                 free_netdev(dev);
10513                 pci_release_regions(pdev);
10514                 pci_disable_device(pdev);
10515                 pci_set_drvdata(pdev, NULL);
10516         }
10517 }
10518
10519 static int tg3_suspend(struct pci_dev *pdev, pm_message_t state)
10520 {
10521         struct net_device *dev = pci_get_drvdata(pdev);
10522         struct tg3 *tp = netdev_priv(dev);
10523         int err;
10524
10525         if (!netif_running(dev))
10526                 return 0;
10527
10528         tg3_netif_stop(tp);
10529
10530         del_timer_sync(&tp->timer);
10531
10532         tg3_full_lock(tp, 1);
10533         tg3_disable_ints(tp);
10534         tg3_full_unlock(tp);
10535
10536         netif_device_detach(dev);
10537
10538         tg3_full_lock(tp, 0);
10539         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10540         tg3_full_unlock(tp);
10541
10542         err = tg3_set_power_state(tp, pci_choose_state(pdev, state));
10543         if (err) {
10544                 tg3_full_lock(tp, 0);
10545
10546                 tg3_init_hw(tp);
10547
10548                 tp->timer.expires = jiffies + tp->timer_offset;
10549                 add_timer(&tp->timer);
10550
10551                 netif_device_attach(dev);
10552                 tg3_netif_start(tp);
10553
10554                 tg3_full_unlock(tp);
10555         }
10556
10557         return err;
10558 }
10559
10560 static int tg3_resume(struct pci_dev *pdev)
10561 {
10562         struct net_device *dev = pci_get_drvdata(pdev);
10563         struct tg3 *tp = netdev_priv(dev);
10564         int err;
10565
10566         if (!netif_running(dev))
10567                 return 0;
10568
10569         pci_restore_state(tp->pdev);
10570
10571         err = tg3_set_power_state(tp, 0);
10572         if (err)
10573                 return err;
10574
10575         netif_device_attach(dev);
10576
10577         tg3_full_lock(tp, 0);
10578
10579         tg3_init_hw(tp);
10580
10581         tp->timer.expires = jiffies + tp->timer_offset;
10582         add_timer(&tp->timer);
10583
10584         tg3_netif_start(tp);
10585
10586         tg3_full_unlock(tp);
10587
10588         return 0;
10589 }
10590
10591 static struct pci_driver tg3_driver = {
10592         .name           = DRV_MODULE_NAME,
10593         .id_table       = tg3_pci_tbl,
10594         .probe          = tg3_init_one,
10595         .remove         = __devexit_p(tg3_remove_one),
10596         .suspend        = tg3_suspend,
10597         .resume         = tg3_resume
10598 };
10599
10600 static int __init tg3_init(void)
10601 {
10602         return pci_module_init(&tg3_driver);
10603 }
10604
10605 static void __exit tg3_cleanup(void)
10606 {
10607         pci_unregister_driver(&tg3_driver);
10608 }
10609
10610 module_init(tg3_init);
10611 module_exit(tg3_cleanup);