[TG3]: Add nvram detection for 5752
[linux-2.6] / drivers / net / tg3.c
1 /*
2  * tg3.c: Broadcom Tigon3 ethernet driver.
3  *
4  * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5  * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6  * Copyright (C) 2004 Sun Microsystems Inc.
7  * Copyright (C) 2005 Broadcom Corporation.
8  *
9  * Firmware is:
10  *      Copyright (C) 2000-2003 Broadcom Corporation.
11  */
12
13 #include <linux/config.h>
14
15 #include <linux/module.h>
16 #include <linux/moduleparam.h>
17 #include <linux/kernel.h>
18 #include <linux/types.h>
19 #include <linux/compiler.h>
20 #include <linux/slab.h>
21 #include <linux/delay.h>
22 #include <linux/init.h>
23 #include <linux/ioport.h>
24 #include <linux/pci.h>
25 #include <linux/netdevice.h>
26 #include <linux/etherdevice.h>
27 #include <linux/skbuff.h>
28 #include <linux/ethtool.h>
29 #include <linux/mii.h>
30 #include <linux/if_vlan.h>
31 #include <linux/ip.h>
32 #include <linux/tcp.h>
33 #include <linux/workqueue.h>
34
35 #include <net/checksum.h>
36
37 #include <asm/system.h>
38 #include <asm/io.h>
39 #include <asm/byteorder.h>
40 #include <asm/uaccess.h>
41
42 #ifdef CONFIG_SPARC64
43 #include <asm/idprom.h>
44 #include <asm/oplib.h>
45 #include <asm/pbm.h>
46 #endif
47
48 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
49 #define TG3_VLAN_TAG_USED 1
50 #else
51 #define TG3_VLAN_TAG_USED 0
52 #endif
53
54 #ifdef NETIF_F_TSO
55 #define TG3_TSO_SUPPORT 1
56 #else
57 #define TG3_TSO_SUPPORT 0
58 #endif
59
60 #include "tg3.h"
61
62 #define DRV_MODULE_NAME         "tg3"
63 #define PFX DRV_MODULE_NAME     ": "
64 #define DRV_MODULE_VERSION      "3.25"
65 #define DRV_MODULE_RELDATE      "March 24, 2005"
66
67 #define TG3_DEF_MAC_MODE        0
68 #define TG3_DEF_RX_MODE         0
69 #define TG3_DEF_TX_MODE         0
70 #define TG3_DEF_MSG_ENABLE        \
71         (NETIF_MSG_DRV          | \
72          NETIF_MSG_PROBE        | \
73          NETIF_MSG_LINK         | \
74          NETIF_MSG_TIMER        | \
75          NETIF_MSG_IFDOWN       | \
76          NETIF_MSG_IFUP         | \
77          NETIF_MSG_RX_ERR       | \
78          NETIF_MSG_TX_ERR)
79
80 /* length of time before we decide the hardware is borked,
81  * and dev->tx_timeout() should be called to fix the problem
82  */
83 #define TG3_TX_TIMEOUT                  (5 * HZ)
84
85 /* hardware minimum and maximum for a single frame's data payload */
86 #define TG3_MIN_MTU                     60
87 #define TG3_MAX_MTU(tp) \
88         (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ? 9000 : 1500)
89
90 /* These numbers seem to be hard coded in the NIC firmware somehow.
91  * You can't change the ring sizes, but you can change where you place
92  * them in the NIC onboard memory.
93  */
94 #define TG3_RX_RING_SIZE                512
95 #define TG3_DEF_RX_RING_PENDING         200
96 #define TG3_RX_JUMBO_RING_SIZE          256
97 #define TG3_DEF_RX_JUMBO_RING_PENDING   100
98
99 /* Do not place this n-ring entries value into the tp struct itself,
100  * we really want to expose these constants to GCC so that modulo et
101  * al.  operations are done with shifts and masks instead of with
102  * hw multiply/modulo instructions.  Another solution would be to
103  * replace things like '% foo' with '& (foo - 1)'.
104  */
105 #define TG3_RX_RCB_RING_SIZE(tp)        \
106         ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ?  512 : 1024)
107
108 #define TG3_TX_RING_SIZE                512
109 #define TG3_DEF_TX_RING_PENDING         (TG3_TX_RING_SIZE - 1)
110
111 #define TG3_RX_RING_BYTES       (sizeof(struct tg3_rx_buffer_desc) * \
112                                  TG3_RX_RING_SIZE)
113 #define TG3_RX_JUMBO_RING_BYTES (sizeof(struct tg3_rx_buffer_desc) * \
114                                  TG3_RX_JUMBO_RING_SIZE)
115 #define TG3_RX_RCB_RING_BYTES(tp) (sizeof(struct tg3_rx_buffer_desc) * \
116                                    TG3_RX_RCB_RING_SIZE(tp))
117 #define TG3_TX_RING_BYTES       (sizeof(struct tg3_tx_buffer_desc) * \
118                                  TG3_TX_RING_SIZE)
119 #define TX_RING_GAP(TP) \
120         (TG3_TX_RING_SIZE - (TP)->tx_pending)
121 #define TX_BUFFS_AVAIL(TP)                                              \
122         (((TP)->tx_cons <= (TP)->tx_prod) ?                             \
123           (TP)->tx_cons + (TP)->tx_pending - (TP)->tx_prod :            \
124           (TP)->tx_cons - (TP)->tx_prod - TX_RING_GAP(TP))
125 #define NEXT_TX(N)              (((N) + 1) & (TG3_TX_RING_SIZE - 1))
126
127 #define RX_PKT_BUF_SZ           (1536 + tp->rx_offset + 64)
128 #define RX_JUMBO_PKT_BUF_SZ     (9046 + tp->rx_offset + 64)
129
130 /* minimum number of free TX descriptors required to wake up TX process */
131 #define TG3_TX_WAKEUP_THRESH            (TG3_TX_RING_SIZE / 4)
132
133 /* number of ETHTOOL_GSTATS u64's */
134 #define TG3_NUM_STATS           (sizeof(struct tg3_ethtool_stats)/sizeof(u64))
135
136 static char version[] __devinitdata =
137         DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
138
139 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
140 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
141 MODULE_LICENSE("GPL");
142 MODULE_VERSION(DRV_MODULE_VERSION);
143
144 static int tg3_debug = -1;      /* -1 == use TG3_DEF_MSG_ENABLE as value */
145 module_param(tg3_debug, int, 0);
146 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
147
148 static struct pci_device_id tg3_pci_tbl[] = {
149         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700,
150           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
151         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701,
152           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
153         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702,
154           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
155         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703,
156           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
157         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704,
158           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
159         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE,
160           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
161         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705,
162           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
163         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2,
164           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
165         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M,
166           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
167         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2,
168           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
169         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X,
170           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
171         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X,
172           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
173         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S,
174           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
175         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3,
176           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
177         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3,
178           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
179         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782,
180           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
181         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788,
182           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
183         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789,
184           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
185         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901,
186           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
187         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2,
188           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
189         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2,
190           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
191         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F,
192           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
193         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5720,
194           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
195         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721,
196           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
197         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750,
198           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
199         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751,
200           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
201         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750M,
202           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
203         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M,
204           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
205         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F,
206           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
207         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752,
208           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
209         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753,
210           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
211         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M,
212           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
213         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F,
214           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
215         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781,
216           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
217         { PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX,
218           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
219         { PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX,
220           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
221         { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000,
222           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
223         { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001,
224           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
225         { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003,
226           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
227         { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100,
228           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
229         { PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3,
230           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
231         { 0, }
232 };
233
234 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
235
236 static struct {
237         const char string[ETH_GSTRING_LEN];
238 } ethtool_stats_keys[TG3_NUM_STATS] = {
239         { "rx_octets" },
240         { "rx_fragments" },
241         { "rx_ucast_packets" },
242         { "rx_mcast_packets" },
243         { "rx_bcast_packets" },
244         { "rx_fcs_errors" },
245         { "rx_align_errors" },
246         { "rx_xon_pause_rcvd" },
247         { "rx_xoff_pause_rcvd" },
248         { "rx_mac_ctrl_rcvd" },
249         { "rx_xoff_entered" },
250         { "rx_frame_too_long_errors" },
251         { "rx_jabbers" },
252         { "rx_undersize_packets" },
253         { "rx_in_length_errors" },
254         { "rx_out_length_errors" },
255         { "rx_64_or_less_octet_packets" },
256         { "rx_65_to_127_octet_packets" },
257         { "rx_128_to_255_octet_packets" },
258         { "rx_256_to_511_octet_packets" },
259         { "rx_512_to_1023_octet_packets" },
260         { "rx_1024_to_1522_octet_packets" },
261         { "rx_1523_to_2047_octet_packets" },
262         { "rx_2048_to_4095_octet_packets" },
263         { "rx_4096_to_8191_octet_packets" },
264         { "rx_8192_to_9022_octet_packets" },
265
266         { "tx_octets" },
267         { "tx_collisions" },
268
269         { "tx_xon_sent" },
270         { "tx_xoff_sent" },
271         { "tx_flow_control" },
272         { "tx_mac_errors" },
273         { "tx_single_collisions" },
274         { "tx_mult_collisions" },
275         { "tx_deferred" },
276         { "tx_excessive_collisions" },
277         { "tx_late_collisions" },
278         { "tx_collide_2times" },
279         { "tx_collide_3times" },
280         { "tx_collide_4times" },
281         { "tx_collide_5times" },
282         { "tx_collide_6times" },
283         { "tx_collide_7times" },
284         { "tx_collide_8times" },
285         { "tx_collide_9times" },
286         { "tx_collide_10times" },
287         { "tx_collide_11times" },
288         { "tx_collide_12times" },
289         { "tx_collide_13times" },
290         { "tx_collide_14times" },
291         { "tx_collide_15times" },
292         { "tx_ucast_packets" },
293         { "tx_mcast_packets" },
294         { "tx_bcast_packets" },
295         { "tx_carrier_sense_errors" },
296         { "tx_discards" },
297         { "tx_errors" },
298
299         { "dma_writeq_full" },
300         { "dma_write_prioq_full" },
301         { "rxbds_empty" },
302         { "rx_discards" },
303         { "rx_errors" },
304         { "rx_threshold_hit" },
305
306         { "dma_readq_full" },
307         { "dma_read_prioq_full" },
308         { "tx_comp_queue_full" },
309
310         { "ring_set_send_prod_index" },
311         { "ring_status_update" },
312         { "nic_irqs" },
313         { "nic_avoided_irqs" },
314         { "nic_tx_threshold_hit" }
315 };
316
317 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
318 {
319         if ((tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) != 0) {
320                 unsigned long flags;
321
322                 spin_lock_irqsave(&tp->indirect_lock, flags);
323                 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
324                 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
325                 spin_unlock_irqrestore(&tp->indirect_lock, flags);
326         } else {
327                 writel(val, tp->regs + off);
328                 if ((tp->tg3_flags & TG3_FLAG_5701_REG_WRITE_BUG) != 0)
329                         readl(tp->regs + off);
330         }
331 }
332
333 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val)
334 {
335         if ((tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) != 0) {
336                 unsigned long flags;
337
338                 spin_lock_irqsave(&tp->indirect_lock, flags);
339                 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
340                 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
341                 spin_unlock_irqrestore(&tp->indirect_lock, flags);
342         } else {
343                 void __iomem *dest = tp->regs + off;
344                 writel(val, dest);
345                 readl(dest);    /* always flush PCI write */
346         }
347 }
348
349 static inline void _tw32_rx_mbox(struct tg3 *tp, u32 off, u32 val)
350 {
351         void __iomem *mbox = tp->regs + off;
352         writel(val, mbox);
353         if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
354                 readl(mbox);
355 }
356
357 static inline void _tw32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
358 {
359         void __iomem *mbox = tp->regs + off;
360         writel(val, mbox);
361         if (tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG)
362                 writel(val, mbox);
363         if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
364                 readl(mbox);
365 }
366
367 #define tw32_mailbox(reg, val)  writel(((val) & 0xffffffff), tp->regs + (reg))
368 #define tw32_rx_mbox(reg, val)  _tw32_rx_mbox(tp, reg, val)
369 #define tw32_tx_mbox(reg, val)  _tw32_tx_mbox(tp, reg, val)
370
371 #define tw32(reg,val)           tg3_write_indirect_reg32(tp,(reg),(val))
372 #define tw32_f(reg,val)         _tw32_flush(tp,(reg),(val))
373 #define tw16(reg,val)           writew(((val) & 0xffff), tp->regs + (reg))
374 #define tw8(reg,val)            writeb(((val) & 0xff), tp->regs + (reg))
375 #define tr32(reg)               readl(tp->regs + (reg))
376 #define tr16(reg)               readw(tp->regs + (reg))
377 #define tr8(reg)                readb(tp->regs + (reg))
378
379 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
380 {
381         unsigned long flags;
382
383         spin_lock_irqsave(&tp->indirect_lock, flags);
384         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
385         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
386
387         /* Always leave this as zero. */
388         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
389         spin_unlock_irqrestore(&tp->indirect_lock, flags);
390 }
391
392 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
393 {
394         unsigned long flags;
395
396         spin_lock_irqsave(&tp->indirect_lock, flags);
397         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
398         pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
399
400         /* Always leave this as zero. */
401         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
402         spin_unlock_irqrestore(&tp->indirect_lock, flags);
403 }
404
405 static void tg3_disable_ints(struct tg3 *tp)
406 {
407         tw32(TG3PCI_MISC_HOST_CTRL,
408              (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
409         tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
410         tr32(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW);
411 }
412
413 static inline void tg3_cond_int(struct tg3 *tp)
414 {
415         if (tp->hw_status->status & SD_STATUS_UPDATED)
416                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
417 }
418
419 static void tg3_enable_ints(struct tg3 *tp)
420 {
421         tw32(TG3PCI_MISC_HOST_CTRL,
422              (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
423         tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000000);
424         tr32(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW);
425
426         tg3_cond_int(tp);
427 }
428
429 /* tg3_restart_ints
430  *  similar to tg3_enable_ints, but it can return without flushing the
431  *  PIO write which reenables interrupts
432  */
433 static void tg3_restart_ints(struct tg3 *tp)
434 {
435         tw32(TG3PCI_MISC_HOST_CTRL,
436                 (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
437         tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000000);
438         mmiowb();
439
440         tg3_cond_int(tp);
441 }
442
443 static inline void tg3_netif_stop(struct tg3 *tp)
444 {
445         netif_poll_disable(tp->dev);
446         netif_tx_disable(tp->dev);
447 }
448
449 static inline void tg3_netif_start(struct tg3 *tp)
450 {
451         netif_wake_queue(tp->dev);
452         /* NOTE: unconditional netif_wake_queue is only appropriate
453          * so long as all callers are assured to have free tx slots
454          * (such as after tg3_init_hw)
455          */
456         netif_poll_enable(tp->dev);
457         tg3_cond_int(tp);
458 }
459
460 static void tg3_switch_clocks(struct tg3 *tp)
461 {
462         u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
463         u32 orig_clock_ctrl;
464
465         orig_clock_ctrl = clock_ctrl;
466         clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
467                        CLOCK_CTRL_CLKRUN_OENABLE |
468                        0x1f);
469         tp->pci_clock_ctrl = clock_ctrl;
470
471         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
472                 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
473                         tw32_f(TG3PCI_CLOCK_CTRL,
474                                clock_ctrl | CLOCK_CTRL_625_CORE);
475                         udelay(40);
476                 }
477         } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
478                 tw32_f(TG3PCI_CLOCK_CTRL,
479                      clock_ctrl |
480                      (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK));
481                 udelay(40);
482                 tw32_f(TG3PCI_CLOCK_CTRL,
483                      clock_ctrl | (CLOCK_CTRL_ALTCLK));
484                 udelay(40);
485         }
486         tw32_f(TG3PCI_CLOCK_CTRL, clock_ctrl);
487         udelay(40);
488 }
489
490 #define PHY_BUSY_LOOPS  5000
491
492 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
493 {
494         u32 frame_val;
495         unsigned int loops;
496         int ret;
497
498         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
499                 tw32_f(MAC_MI_MODE,
500                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
501                 udelay(80);
502         }
503
504         *val = 0x0;
505
506         frame_val  = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
507                       MI_COM_PHY_ADDR_MASK);
508         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
509                       MI_COM_REG_ADDR_MASK);
510         frame_val |= (MI_COM_CMD_READ | MI_COM_START);
511         
512         tw32_f(MAC_MI_COM, frame_val);
513
514         loops = PHY_BUSY_LOOPS;
515         while (loops != 0) {
516                 udelay(10);
517                 frame_val = tr32(MAC_MI_COM);
518
519                 if ((frame_val & MI_COM_BUSY) == 0) {
520                         udelay(5);
521                         frame_val = tr32(MAC_MI_COM);
522                         break;
523                 }
524                 loops -= 1;
525         }
526
527         ret = -EBUSY;
528         if (loops != 0) {
529                 *val = frame_val & MI_COM_DATA_MASK;
530                 ret = 0;
531         }
532
533         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
534                 tw32_f(MAC_MI_MODE, tp->mi_mode);
535                 udelay(80);
536         }
537
538         return ret;
539 }
540
541 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
542 {
543         u32 frame_val;
544         unsigned int loops;
545         int ret;
546
547         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
548                 tw32_f(MAC_MI_MODE,
549                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
550                 udelay(80);
551         }
552
553         frame_val  = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
554                       MI_COM_PHY_ADDR_MASK);
555         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
556                       MI_COM_REG_ADDR_MASK);
557         frame_val |= (val & MI_COM_DATA_MASK);
558         frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
559         
560         tw32_f(MAC_MI_COM, frame_val);
561
562         loops = PHY_BUSY_LOOPS;
563         while (loops != 0) {
564                 udelay(10);
565                 frame_val = tr32(MAC_MI_COM);
566                 if ((frame_val & MI_COM_BUSY) == 0) {
567                         udelay(5);
568                         frame_val = tr32(MAC_MI_COM);
569                         break;
570                 }
571                 loops -= 1;
572         }
573
574         ret = -EBUSY;
575         if (loops != 0)
576                 ret = 0;
577
578         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
579                 tw32_f(MAC_MI_MODE, tp->mi_mode);
580                 udelay(80);
581         }
582
583         return ret;
584 }
585
586 static void tg3_phy_set_wirespeed(struct tg3 *tp)
587 {
588         u32 val;
589
590         if (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED)
591                 return;
592
593         if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x7007) &&
594             !tg3_readphy(tp, MII_TG3_AUX_CTRL, &val))
595                 tg3_writephy(tp, MII_TG3_AUX_CTRL,
596                              (val | (1 << 15) | (1 << 4)));
597 }
598
599 static int tg3_bmcr_reset(struct tg3 *tp)
600 {
601         u32 phy_control;
602         int limit, err;
603
604         /* OK, reset it, and poll the BMCR_RESET bit until it
605          * clears or we time out.
606          */
607         phy_control = BMCR_RESET;
608         err = tg3_writephy(tp, MII_BMCR, phy_control);
609         if (err != 0)
610                 return -EBUSY;
611
612         limit = 5000;
613         while (limit--) {
614                 err = tg3_readphy(tp, MII_BMCR, &phy_control);
615                 if (err != 0)
616                         return -EBUSY;
617
618                 if ((phy_control & BMCR_RESET) == 0) {
619                         udelay(40);
620                         break;
621                 }
622                 udelay(10);
623         }
624         if (limit <= 0)
625                 return -EBUSY;
626
627         return 0;
628 }
629
630 static int tg3_wait_macro_done(struct tg3 *tp)
631 {
632         int limit = 100;
633
634         while (limit--) {
635                 u32 tmp32;
636
637                 if (!tg3_readphy(tp, 0x16, &tmp32)) {
638                         if ((tmp32 & 0x1000) == 0)
639                                 break;
640                 }
641         }
642         if (limit <= 0)
643                 return -EBUSY;
644
645         return 0;
646 }
647
648 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
649 {
650         static const u32 test_pat[4][6] = {
651         { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
652         { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
653         { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
654         { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
655         };
656         int chan;
657
658         for (chan = 0; chan < 4; chan++) {
659                 int i;
660
661                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
662                              (chan * 0x2000) | 0x0200);
663                 tg3_writephy(tp, 0x16, 0x0002);
664
665                 for (i = 0; i < 6; i++)
666                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
667                                      test_pat[chan][i]);
668
669                 tg3_writephy(tp, 0x16, 0x0202);
670                 if (tg3_wait_macro_done(tp)) {
671                         *resetp = 1;
672                         return -EBUSY;
673                 }
674
675                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
676                              (chan * 0x2000) | 0x0200);
677                 tg3_writephy(tp, 0x16, 0x0082);
678                 if (tg3_wait_macro_done(tp)) {
679                         *resetp = 1;
680                         return -EBUSY;
681                 }
682
683                 tg3_writephy(tp, 0x16, 0x0802);
684                 if (tg3_wait_macro_done(tp)) {
685                         *resetp = 1;
686                         return -EBUSY;
687                 }
688
689                 for (i = 0; i < 6; i += 2) {
690                         u32 low, high;
691
692                         if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
693                             tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
694                             tg3_wait_macro_done(tp)) {
695                                 *resetp = 1;
696                                 return -EBUSY;
697                         }
698                         low &= 0x7fff;
699                         high &= 0x000f;
700                         if (low != test_pat[chan][i] ||
701                             high != test_pat[chan][i+1]) {
702                                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
703                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
704                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
705
706                                 return -EBUSY;
707                         }
708                 }
709         }
710
711         return 0;
712 }
713
714 static int tg3_phy_reset_chanpat(struct tg3 *tp)
715 {
716         int chan;
717
718         for (chan = 0; chan < 4; chan++) {
719                 int i;
720
721                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
722                              (chan * 0x2000) | 0x0200);
723                 tg3_writephy(tp, 0x16, 0x0002);
724                 for (i = 0; i < 6; i++)
725                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
726                 tg3_writephy(tp, 0x16, 0x0202);
727                 if (tg3_wait_macro_done(tp))
728                         return -EBUSY;
729         }
730
731         return 0;
732 }
733
734 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
735 {
736         u32 reg32, phy9_orig;
737         int retries, do_phy_reset, err;
738
739         retries = 10;
740         do_phy_reset = 1;
741         do {
742                 if (do_phy_reset) {
743                         err = tg3_bmcr_reset(tp);
744                         if (err)
745                                 return err;
746                         do_phy_reset = 0;
747                 }
748
749                 /* Disable transmitter and interrupt.  */
750                 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
751                         continue;
752
753                 reg32 |= 0x3000;
754                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
755
756                 /* Set full-duplex, 1000 mbps.  */
757                 tg3_writephy(tp, MII_BMCR,
758                              BMCR_FULLDPLX | TG3_BMCR_SPEED1000);
759
760                 /* Set to master mode.  */
761                 if (tg3_readphy(tp, MII_TG3_CTRL, &phy9_orig))
762                         continue;
763
764                 tg3_writephy(tp, MII_TG3_CTRL,
765                              (MII_TG3_CTRL_AS_MASTER |
766                               MII_TG3_CTRL_ENABLE_AS_MASTER));
767
768                 /* Enable SM_DSP_CLOCK and 6dB.  */
769                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
770
771                 /* Block the PHY control access.  */
772                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
773                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0800);
774
775                 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
776                 if (!err)
777                         break;
778         } while (--retries);
779
780         err = tg3_phy_reset_chanpat(tp);
781         if (err)
782                 return err;
783
784         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
785         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0000);
786
787         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
788         tg3_writephy(tp, 0x16, 0x0000);
789
790         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
791             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
792                 /* Set Extended packet length bit for jumbo frames */
793                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4400);
794         }
795         else {
796                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
797         }
798
799         tg3_writephy(tp, MII_TG3_CTRL, phy9_orig);
800
801         if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32)) {
802                 reg32 &= ~0x3000;
803                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
804         } else if (!err)
805                 err = -EBUSY;
806
807         return err;
808 }
809
810 /* This will reset the tigon3 PHY if there is no valid
811  * link unless the FORCE argument is non-zero.
812  */
813 static int tg3_phy_reset(struct tg3 *tp)
814 {
815         u32 phy_status;
816         int err;
817
818         err  = tg3_readphy(tp, MII_BMSR, &phy_status);
819         err |= tg3_readphy(tp, MII_BMSR, &phy_status);
820         if (err != 0)
821                 return -EBUSY;
822
823         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
824             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
825             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
826                 err = tg3_phy_reset_5703_4_5(tp);
827                 if (err)
828                         return err;
829                 goto out;
830         }
831
832         err = tg3_bmcr_reset(tp);
833         if (err)
834                 return err;
835
836 out:
837         if (tp->tg3_flags2 & TG3_FLG2_PHY_ADC_BUG) {
838                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
839                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
840                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x2aaa);
841                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
842                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0323);
843                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
844         }
845         if (tp->tg3_flags2 & TG3_FLG2_PHY_5704_A0_BUG) {
846                 tg3_writephy(tp, 0x1c, 0x8d68);
847                 tg3_writephy(tp, 0x1c, 0x8d68);
848         }
849         if (tp->tg3_flags2 & TG3_FLG2_PHY_BER_BUG) {
850                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
851                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
852                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x310b);
853                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
854                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x9506);
855                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x401f);
856                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x14e2);
857                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
858         }
859         /* Set Extended packet length bit (bit 14) on all chips that */
860         /* support jumbo frames */
861         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
862                 /* Cannot do read-modify-write on 5401 */
863                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
864         } else if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
865                 u32 phy_reg;
866
867                 /* Set bit 14 with read-modify-write to preserve other bits */
868                 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0007) &&
869                     !tg3_readphy(tp, MII_TG3_AUX_CTRL, &phy_reg))
870                         tg3_writephy(tp, MII_TG3_AUX_CTRL, phy_reg | 0x4000);
871         }
872
873         /* Set phy register 0x10 bit 0 to high fifo elasticity to support
874          * jumbo frames transmission.
875          */
876         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
877                 u32 phy_reg;
878
879                 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &phy_reg))
880                     tg3_writephy(tp, MII_TG3_EXT_CTRL,
881                                  phy_reg | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
882         }
883
884         tg3_phy_set_wirespeed(tp);
885         return 0;
886 }
887
888 static void tg3_frob_aux_power(struct tg3 *tp)
889 {
890         struct tg3 *tp_peer = tp;
891
892         if ((tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) != 0)
893                 return;
894
895         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
896                 tp_peer = pci_get_drvdata(tp->pdev_peer);
897                 if (!tp_peer)
898                         BUG();
899         }
900
901
902         if ((tp->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
903             (tp_peer->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0) {
904                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
905                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
906                         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
907                              (GRC_LCLCTRL_GPIO_OE0 |
908                               GRC_LCLCTRL_GPIO_OE1 |
909                               GRC_LCLCTRL_GPIO_OE2 |
910                               GRC_LCLCTRL_GPIO_OUTPUT0 |
911                               GRC_LCLCTRL_GPIO_OUTPUT1));
912                         udelay(100);
913                 } else {
914                         u32 no_gpio2;
915                         u32 grc_local_ctrl;
916
917                         if (tp_peer != tp &&
918                             (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
919                                 return;
920
921                         /* On 5753 and variants, GPIO2 cannot be used. */
922                         no_gpio2 = tp->nic_sram_data_cfg &
923                                     NIC_SRAM_DATA_CFG_NO_GPIO2;
924
925                         grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
926                                          GRC_LCLCTRL_GPIO_OE1 |
927                                          GRC_LCLCTRL_GPIO_OE2 |
928                                          GRC_LCLCTRL_GPIO_OUTPUT1 |
929                                          GRC_LCLCTRL_GPIO_OUTPUT2;
930                         if (no_gpio2) {
931                                 grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
932                                                     GRC_LCLCTRL_GPIO_OUTPUT2);
933                         }
934                         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
935                                                 grc_local_ctrl);
936                         udelay(100);
937
938                         grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
939
940                         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
941                                                 grc_local_ctrl);
942                         udelay(100);
943
944                         if (!no_gpio2) {
945                                 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
946                                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
947                                        grc_local_ctrl);
948                                 udelay(100);
949                         }
950                 }
951         } else {
952                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
953                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
954                         if (tp_peer != tp &&
955                             (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
956                                 return;
957
958                         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
959                              (GRC_LCLCTRL_GPIO_OE1 |
960                               GRC_LCLCTRL_GPIO_OUTPUT1));
961                         udelay(100);
962
963                         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
964                              (GRC_LCLCTRL_GPIO_OE1));
965                         udelay(100);
966
967                         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
968                              (GRC_LCLCTRL_GPIO_OE1 |
969                               GRC_LCLCTRL_GPIO_OUTPUT1));
970                         udelay(100);
971                 }
972         }
973 }
974
975 static int tg3_setup_phy(struct tg3 *, int);
976
977 #define RESET_KIND_SHUTDOWN     0
978 #define RESET_KIND_INIT         1
979 #define RESET_KIND_SUSPEND      2
980
981 static void tg3_write_sig_post_reset(struct tg3 *, int);
982 static int tg3_halt_cpu(struct tg3 *, u32);
983
984 static int tg3_set_power_state(struct tg3 *tp, int state)
985 {
986         u32 misc_host_ctrl;
987         u16 power_control, power_caps;
988         int pm = tp->pm_cap;
989
990         /* Make sure register accesses (indirect or otherwise)
991          * will function correctly.
992          */
993         pci_write_config_dword(tp->pdev,
994                                TG3PCI_MISC_HOST_CTRL,
995                                tp->misc_host_ctrl);
996
997         pci_read_config_word(tp->pdev,
998                              pm + PCI_PM_CTRL,
999                              &power_control);
1000         power_control |= PCI_PM_CTRL_PME_STATUS;
1001         power_control &= ~(PCI_PM_CTRL_STATE_MASK);
1002         switch (state) {
1003         case 0:
1004                 power_control |= 0;
1005                 pci_write_config_word(tp->pdev,
1006                                       pm + PCI_PM_CTRL,
1007                                       power_control);
1008                 udelay(100);    /* Delay after power state change */
1009
1010                 /* Switch out of Vaux if it is not a LOM */
1011                 if (!(tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT)) {
1012                         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
1013                         udelay(100);
1014                 }
1015
1016                 return 0;
1017
1018         case 1:
1019                 power_control |= 1;
1020                 break;
1021
1022         case 2:
1023                 power_control |= 2;
1024                 break;
1025
1026         case 3:
1027                 power_control |= 3;
1028                 break;
1029
1030         default:
1031                 printk(KERN_WARNING PFX "%s: Invalid power state (%d) "
1032                        "requested.\n",
1033                        tp->dev->name, state);
1034                 return -EINVAL;
1035         };
1036
1037         power_control |= PCI_PM_CTRL_PME_ENABLE;
1038
1039         misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
1040         tw32(TG3PCI_MISC_HOST_CTRL,
1041              misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
1042
1043         if (tp->link_config.phy_is_low_power == 0) {
1044                 tp->link_config.phy_is_low_power = 1;
1045                 tp->link_config.orig_speed = tp->link_config.speed;
1046                 tp->link_config.orig_duplex = tp->link_config.duplex;
1047                 tp->link_config.orig_autoneg = tp->link_config.autoneg;
1048         }
1049
1050         if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
1051                 tp->link_config.speed = SPEED_10;
1052                 tp->link_config.duplex = DUPLEX_HALF;
1053                 tp->link_config.autoneg = AUTONEG_ENABLE;
1054                 tg3_setup_phy(tp, 0);
1055         }
1056
1057         pci_read_config_word(tp->pdev, pm + PCI_PM_PMC, &power_caps);
1058
1059         if (tp->tg3_flags & TG3_FLAG_WOL_ENABLE) {
1060                 u32 mac_mode;
1061
1062                 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
1063                         tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x5a);
1064                         udelay(40);
1065
1066                         mac_mode = MAC_MODE_PORT_MODE_MII;
1067
1068                         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 ||
1069                             !(tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB))
1070                                 mac_mode |= MAC_MODE_LINK_POLARITY;
1071                 } else {
1072                         mac_mode = MAC_MODE_PORT_MODE_TBI;
1073                 }
1074
1075                 if (!(tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
1076                         tw32(MAC_LED_CTRL, tp->led_ctrl);
1077
1078                 if (((power_caps & PCI_PM_CAP_PME_D3cold) &&
1079                      (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)))
1080                         mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
1081
1082                 tw32_f(MAC_MODE, mac_mode);
1083                 udelay(100);
1084
1085                 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
1086                 udelay(10);
1087         }
1088
1089         if (!(tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB) &&
1090             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1091              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
1092                 u32 base_val;
1093
1094                 base_val = tp->pci_clock_ctrl;
1095                 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
1096                              CLOCK_CTRL_TXCLK_DISABLE);
1097
1098                 tw32_f(TG3PCI_CLOCK_CTRL, base_val |
1099                      CLOCK_CTRL_ALTCLK |
1100                      CLOCK_CTRL_PWRDOWN_PLL133);
1101                 udelay(40);
1102         } else if (!((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
1103                      (tp->tg3_flags & TG3_FLAG_ENABLE_ASF))) {
1104                 u32 newbits1, newbits2;
1105
1106                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1107                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1108                         newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
1109                                     CLOCK_CTRL_TXCLK_DISABLE |
1110                                     CLOCK_CTRL_ALTCLK);
1111                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
1112                 } else if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
1113                         newbits1 = CLOCK_CTRL_625_CORE;
1114                         newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
1115                 } else {
1116                         newbits1 = CLOCK_CTRL_ALTCLK;
1117                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
1118                 }
1119
1120                 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1);
1121                 udelay(40);
1122
1123                 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2);
1124                 udelay(40);
1125
1126                 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
1127                         u32 newbits3;
1128
1129                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1130                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1131                                 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
1132                                             CLOCK_CTRL_TXCLK_DISABLE |
1133                                             CLOCK_CTRL_44MHZ_CORE);
1134                         } else {
1135                                 newbits3 = CLOCK_CTRL_44MHZ_CORE;
1136                         }
1137
1138                         tw32_f(TG3PCI_CLOCK_CTRL,
1139                                          tp->pci_clock_ctrl | newbits3);
1140                         udelay(40);
1141                 }
1142         }
1143
1144         tg3_frob_aux_power(tp);
1145
1146         /* Workaround for unstable PLL clock */
1147         if ((GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX) ||
1148             (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX)) {
1149                 u32 val = tr32(0x7d00);
1150
1151                 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
1152                 tw32(0x7d00, val);
1153                 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
1154                         tg3_halt_cpu(tp, RX_CPU_BASE);
1155         }
1156
1157         /* Finally, set the new power state. */
1158         pci_write_config_word(tp->pdev, pm + PCI_PM_CTRL, power_control);
1159         udelay(100);    /* Delay after power state change */
1160
1161         tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
1162
1163         return 0;
1164 }
1165
1166 static void tg3_link_report(struct tg3 *tp)
1167 {
1168         if (!netif_carrier_ok(tp->dev)) {
1169                 printk(KERN_INFO PFX "%s: Link is down.\n", tp->dev->name);
1170         } else {
1171                 printk(KERN_INFO PFX "%s: Link is up at %d Mbps, %s duplex.\n",
1172                        tp->dev->name,
1173                        (tp->link_config.active_speed == SPEED_1000 ?
1174                         1000 :
1175                         (tp->link_config.active_speed == SPEED_100 ?
1176                          100 : 10)),
1177                        (tp->link_config.active_duplex == DUPLEX_FULL ?
1178                         "full" : "half"));
1179
1180                 printk(KERN_INFO PFX "%s: Flow control is %s for TX and "
1181                        "%s for RX.\n",
1182                        tp->dev->name,
1183                        (tp->tg3_flags & TG3_FLAG_TX_PAUSE) ? "on" : "off",
1184                        (tp->tg3_flags & TG3_FLAG_RX_PAUSE) ? "on" : "off");
1185         }
1186 }
1187
1188 static void tg3_setup_flow_control(struct tg3 *tp, u32 local_adv, u32 remote_adv)
1189 {
1190         u32 new_tg3_flags = 0;
1191         u32 old_rx_mode = tp->rx_mode;
1192         u32 old_tx_mode = tp->tx_mode;
1193
1194         if (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG) {
1195                 if (local_adv & ADVERTISE_PAUSE_CAP) {
1196                         if (local_adv & ADVERTISE_PAUSE_ASYM) {
1197                                 if (remote_adv & LPA_PAUSE_CAP)
1198                                         new_tg3_flags |=
1199                                                 (TG3_FLAG_RX_PAUSE |
1200                                                 TG3_FLAG_TX_PAUSE);
1201                                 else if (remote_adv & LPA_PAUSE_ASYM)
1202                                         new_tg3_flags |=
1203                                                 (TG3_FLAG_RX_PAUSE);
1204                         } else {
1205                                 if (remote_adv & LPA_PAUSE_CAP)
1206                                         new_tg3_flags |=
1207                                                 (TG3_FLAG_RX_PAUSE |
1208                                                 TG3_FLAG_TX_PAUSE);
1209                         }
1210                 } else if (local_adv & ADVERTISE_PAUSE_ASYM) {
1211                         if ((remote_adv & LPA_PAUSE_CAP) &&
1212                         (remote_adv & LPA_PAUSE_ASYM))
1213                                 new_tg3_flags |= TG3_FLAG_TX_PAUSE;
1214                 }
1215
1216                 tp->tg3_flags &= ~(TG3_FLAG_RX_PAUSE | TG3_FLAG_TX_PAUSE);
1217                 tp->tg3_flags |= new_tg3_flags;
1218         } else {
1219                 new_tg3_flags = tp->tg3_flags;
1220         }
1221
1222         if (new_tg3_flags & TG3_FLAG_RX_PAUSE)
1223                 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1224         else
1225                 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1226
1227         if (old_rx_mode != tp->rx_mode) {
1228                 tw32_f(MAC_RX_MODE, tp->rx_mode);
1229         }
1230         
1231         if (new_tg3_flags & TG3_FLAG_TX_PAUSE)
1232                 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1233         else
1234                 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1235
1236         if (old_tx_mode != tp->tx_mode) {
1237                 tw32_f(MAC_TX_MODE, tp->tx_mode);
1238         }
1239 }
1240
1241 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
1242 {
1243         switch (val & MII_TG3_AUX_STAT_SPDMASK) {
1244         case MII_TG3_AUX_STAT_10HALF:
1245                 *speed = SPEED_10;
1246                 *duplex = DUPLEX_HALF;
1247                 break;
1248
1249         case MII_TG3_AUX_STAT_10FULL:
1250                 *speed = SPEED_10;
1251                 *duplex = DUPLEX_FULL;
1252                 break;
1253
1254         case MII_TG3_AUX_STAT_100HALF:
1255                 *speed = SPEED_100;
1256                 *duplex = DUPLEX_HALF;
1257                 break;
1258
1259         case MII_TG3_AUX_STAT_100FULL:
1260                 *speed = SPEED_100;
1261                 *duplex = DUPLEX_FULL;
1262                 break;
1263
1264         case MII_TG3_AUX_STAT_1000HALF:
1265                 *speed = SPEED_1000;
1266                 *duplex = DUPLEX_HALF;
1267                 break;
1268
1269         case MII_TG3_AUX_STAT_1000FULL:
1270                 *speed = SPEED_1000;
1271                 *duplex = DUPLEX_FULL;
1272                 break;
1273
1274         default:
1275                 *speed = SPEED_INVALID;
1276                 *duplex = DUPLEX_INVALID;
1277                 break;
1278         };
1279 }
1280
1281 static void tg3_phy_copper_begin(struct tg3 *tp)
1282 {
1283         u32 new_adv;
1284         int i;
1285
1286         if (tp->link_config.phy_is_low_power) {
1287                 /* Entering low power mode.  Disable gigabit and
1288                  * 100baseT advertisements.
1289                  */
1290                 tg3_writephy(tp, MII_TG3_CTRL, 0);
1291
1292                 new_adv = (ADVERTISE_10HALF | ADVERTISE_10FULL |
1293                            ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
1294                 if (tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB)
1295                         new_adv |= (ADVERTISE_100HALF | ADVERTISE_100FULL);
1296
1297                 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1298         } else if (tp->link_config.speed == SPEED_INVALID) {
1299                 tp->link_config.advertising =
1300                         (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
1301                          ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
1302                          ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full |
1303                          ADVERTISED_Autoneg | ADVERTISED_MII);
1304
1305                 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
1306                         tp->link_config.advertising &=
1307                                 ~(ADVERTISED_1000baseT_Half |
1308                                   ADVERTISED_1000baseT_Full);
1309
1310                 new_adv = (ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
1311                 if (tp->link_config.advertising & ADVERTISED_10baseT_Half)
1312                         new_adv |= ADVERTISE_10HALF;
1313                 if (tp->link_config.advertising & ADVERTISED_10baseT_Full)
1314                         new_adv |= ADVERTISE_10FULL;
1315                 if (tp->link_config.advertising & ADVERTISED_100baseT_Half)
1316                         new_adv |= ADVERTISE_100HALF;
1317                 if (tp->link_config.advertising & ADVERTISED_100baseT_Full)
1318                         new_adv |= ADVERTISE_100FULL;
1319                 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1320
1321                 if (tp->link_config.advertising &
1322                     (ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full)) {
1323                         new_adv = 0;
1324                         if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
1325                                 new_adv |= MII_TG3_CTRL_ADV_1000_HALF;
1326                         if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
1327                                 new_adv |= MII_TG3_CTRL_ADV_1000_FULL;
1328                         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY) &&
1329                             (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1330                              tp->pci_chip_rev_id == CHIPREV_ID_5701_B0))
1331                                 new_adv |= (MII_TG3_CTRL_AS_MASTER |
1332                                             MII_TG3_CTRL_ENABLE_AS_MASTER);
1333                         tg3_writephy(tp, MII_TG3_CTRL, new_adv);
1334                 } else {
1335                         tg3_writephy(tp, MII_TG3_CTRL, 0);
1336                 }
1337         } else {
1338                 /* Asking for a specific link mode. */
1339                 if (tp->link_config.speed == SPEED_1000) {
1340                         new_adv = ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP;
1341                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
1342
1343                         if (tp->link_config.duplex == DUPLEX_FULL)
1344                                 new_adv = MII_TG3_CTRL_ADV_1000_FULL;
1345                         else
1346                                 new_adv = MII_TG3_CTRL_ADV_1000_HALF;
1347                         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1348                             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
1349                                 new_adv |= (MII_TG3_CTRL_AS_MASTER |
1350                                             MII_TG3_CTRL_ENABLE_AS_MASTER);
1351                         tg3_writephy(tp, MII_TG3_CTRL, new_adv);
1352                 } else {
1353                         tg3_writephy(tp, MII_TG3_CTRL, 0);
1354
1355                         new_adv = ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP;
1356                         if (tp->link_config.speed == SPEED_100) {
1357                                 if (tp->link_config.duplex == DUPLEX_FULL)
1358                                         new_adv |= ADVERTISE_100FULL;
1359                                 else
1360                                         new_adv |= ADVERTISE_100HALF;
1361                         } else {
1362                                 if (tp->link_config.duplex == DUPLEX_FULL)
1363                                         new_adv |= ADVERTISE_10FULL;
1364                                 else
1365                                         new_adv |= ADVERTISE_10HALF;
1366                         }
1367                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
1368                 }
1369         }
1370
1371         if (tp->link_config.autoneg == AUTONEG_DISABLE &&
1372             tp->link_config.speed != SPEED_INVALID) {
1373                 u32 bmcr, orig_bmcr;
1374
1375                 tp->link_config.active_speed = tp->link_config.speed;
1376                 tp->link_config.active_duplex = tp->link_config.duplex;
1377
1378                 bmcr = 0;
1379                 switch (tp->link_config.speed) {
1380                 default:
1381                 case SPEED_10:
1382                         break;
1383
1384                 case SPEED_100:
1385                         bmcr |= BMCR_SPEED100;
1386                         break;
1387
1388                 case SPEED_1000:
1389                         bmcr |= TG3_BMCR_SPEED1000;
1390                         break;
1391                 };
1392
1393                 if (tp->link_config.duplex == DUPLEX_FULL)
1394                         bmcr |= BMCR_FULLDPLX;
1395
1396                 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
1397                     (bmcr != orig_bmcr)) {
1398                         tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
1399                         for (i = 0; i < 1500; i++) {
1400                                 u32 tmp;
1401
1402                                 udelay(10);
1403                                 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
1404                                     tg3_readphy(tp, MII_BMSR, &tmp))
1405                                         continue;
1406                                 if (!(tmp & BMSR_LSTATUS)) {
1407                                         udelay(40);
1408                                         break;
1409                                 }
1410                         }
1411                         tg3_writephy(tp, MII_BMCR, bmcr);
1412                         udelay(40);
1413                 }
1414         } else {
1415                 tg3_writephy(tp, MII_BMCR,
1416                              BMCR_ANENABLE | BMCR_ANRESTART);
1417         }
1418 }
1419
1420 static int tg3_init_5401phy_dsp(struct tg3 *tp)
1421 {
1422         int err;
1423
1424         /* Turn off tap power management. */
1425         /* Set Extended packet length bit */
1426         err  = tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
1427
1428         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0012);
1429         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1804);
1430
1431         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0013);
1432         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1204);
1433
1434         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
1435         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0132);
1436
1437         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
1438         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0232);
1439
1440         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1441         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0a20);
1442
1443         udelay(40);
1444
1445         return err;
1446 }
1447
1448 static int tg3_copper_is_advertising_all(struct tg3 *tp)
1449 {
1450         u32 adv_reg, all_mask;
1451
1452         if (tg3_readphy(tp, MII_ADVERTISE, &adv_reg))
1453                 return 0;
1454
1455         all_mask = (ADVERTISE_10HALF | ADVERTISE_10FULL |
1456                     ADVERTISE_100HALF | ADVERTISE_100FULL);
1457         if ((adv_reg & all_mask) != all_mask)
1458                 return 0;
1459         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
1460                 u32 tg3_ctrl;
1461
1462                 if (tg3_readphy(tp, MII_TG3_CTRL, &tg3_ctrl))
1463                         return 0;
1464
1465                 all_mask = (MII_TG3_CTRL_ADV_1000_HALF |
1466                             MII_TG3_CTRL_ADV_1000_FULL);
1467                 if ((tg3_ctrl & all_mask) != all_mask)
1468                         return 0;
1469         }
1470         return 1;
1471 }
1472
1473 static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
1474 {
1475         int current_link_up;
1476         u32 bmsr, dummy;
1477         u16 current_speed;
1478         u8 current_duplex;
1479         int i, err;
1480
1481         tw32(MAC_EVENT, 0);
1482
1483         tw32_f(MAC_STATUS,
1484              (MAC_STATUS_SYNC_CHANGED |
1485               MAC_STATUS_CFG_CHANGED |
1486               MAC_STATUS_MI_COMPLETION |
1487               MAC_STATUS_LNKSTATE_CHANGED));
1488         udelay(40);
1489
1490         tp->mi_mode = MAC_MI_MODE_BASE;
1491         tw32_f(MAC_MI_MODE, tp->mi_mode);
1492         udelay(80);
1493
1494         tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x02);
1495
1496         /* Some third-party PHYs need to be reset on link going
1497          * down.
1498          */
1499         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
1500              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
1501              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
1502             netif_carrier_ok(tp->dev)) {
1503                 tg3_readphy(tp, MII_BMSR, &bmsr);
1504                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
1505                     !(bmsr & BMSR_LSTATUS))
1506                         force_reset = 1;
1507         }
1508         if (force_reset)
1509                 tg3_phy_reset(tp);
1510
1511         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
1512                 tg3_readphy(tp, MII_BMSR, &bmsr);
1513                 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
1514                     !(tp->tg3_flags & TG3_FLAG_INIT_COMPLETE))
1515                         bmsr = 0;
1516
1517                 if (!(bmsr & BMSR_LSTATUS)) {
1518                         err = tg3_init_5401phy_dsp(tp);
1519                         if (err)
1520                                 return err;
1521
1522                         tg3_readphy(tp, MII_BMSR, &bmsr);
1523                         for (i = 0; i < 1000; i++) {
1524                                 udelay(10);
1525                                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
1526                                     (bmsr & BMSR_LSTATUS)) {
1527                                         udelay(40);
1528                                         break;
1529                                 }
1530                         }
1531
1532                         if ((tp->phy_id & PHY_ID_REV_MASK) == PHY_REV_BCM5401_B0 &&
1533                             !(bmsr & BMSR_LSTATUS) &&
1534                             tp->link_config.active_speed == SPEED_1000) {
1535                                 err = tg3_phy_reset(tp);
1536                                 if (!err)
1537                                         err = tg3_init_5401phy_dsp(tp);
1538                                 if (err)
1539                                         return err;
1540                         }
1541                 }
1542         } else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1543                    tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
1544                 /* 5701 {A0,B0} CRC bug workaround */
1545                 tg3_writephy(tp, 0x15, 0x0a75);
1546                 tg3_writephy(tp, 0x1c, 0x8c68);
1547                 tg3_writephy(tp, 0x1c, 0x8d68);
1548                 tg3_writephy(tp, 0x1c, 0x8c68);
1549         }
1550
1551         /* Clear pending interrupts... */
1552         tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
1553         tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
1554
1555         if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT)
1556                 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
1557         else
1558                 tg3_writephy(tp, MII_TG3_IMASK, ~0);
1559
1560         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1561             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1562                 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
1563                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
1564                                      MII_TG3_EXT_CTRL_LNK3_LED_MODE);
1565                 else
1566                         tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
1567         }
1568
1569         current_link_up = 0;
1570         current_speed = SPEED_INVALID;
1571         current_duplex = DUPLEX_INVALID;
1572
1573         if (tp->tg3_flags2 & TG3_FLG2_CAPACITIVE_COUPLING) {
1574                 u32 val;
1575
1576                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4007);
1577                 tg3_readphy(tp, MII_TG3_AUX_CTRL, &val);
1578                 if (!(val & (1 << 10))) {
1579                         val |= (1 << 10);
1580                         tg3_writephy(tp, MII_TG3_AUX_CTRL, val);
1581                         goto relink;
1582                 }
1583         }
1584
1585         bmsr = 0;
1586         for (i = 0; i < 100; i++) {
1587                 tg3_readphy(tp, MII_BMSR, &bmsr);
1588                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
1589                     (bmsr & BMSR_LSTATUS))
1590                         break;
1591                 udelay(40);
1592         }
1593
1594         if (bmsr & BMSR_LSTATUS) {
1595                 u32 aux_stat, bmcr;
1596
1597                 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
1598                 for (i = 0; i < 2000; i++) {
1599                         udelay(10);
1600                         if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
1601                             aux_stat)
1602                                 break;
1603                 }
1604
1605                 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
1606                                              &current_speed,
1607                                              &current_duplex);
1608
1609                 bmcr = 0;
1610                 for (i = 0; i < 200; i++) {
1611                         tg3_readphy(tp, MII_BMCR, &bmcr);
1612                         if (tg3_readphy(tp, MII_BMCR, &bmcr))
1613                                 continue;
1614                         if (bmcr && bmcr != 0x7fff)
1615                                 break;
1616                         udelay(10);
1617                 }
1618
1619                 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
1620                         if (bmcr & BMCR_ANENABLE) {
1621                                 current_link_up = 1;
1622
1623                                 /* Force autoneg restart if we are exiting
1624                                  * low power mode.
1625                                  */
1626                                 if (!tg3_copper_is_advertising_all(tp))
1627                                         current_link_up = 0;
1628                         } else {
1629                                 current_link_up = 0;
1630                         }
1631                 } else {
1632                         if (!(bmcr & BMCR_ANENABLE) &&
1633                             tp->link_config.speed == current_speed &&
1634                             tp->link_config.duplex == current_duplex) {
1635                                 current_link_up = 1;
1636                         } else {
1637                                 current_link_up = 0;
1638                         }
1639                 }
1640
1641                 tp->link_config.active_speed = current_speed;
1642                 tp->link_config.active_duplex = current_duplex;
1643         }
1644
1645         if (current_link_up == 1 &&
1646             (tp->link_config.active_duplex == DUPLEX_FULL) &&
1647             (tp->link_config.autoneg == AUTONEG_ENABLE)) {
1648                 u32 local_adv, remote_adv;
1649
1650                 if (tg3_readphy(tp, MII_ADVERTISE, &local_adv))
1651                         local_adv = 0;
1652                 local_adv &= (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
1653
1654                 if (tg3_readphy(tp, MII_LPA, &remote_adv))
1655                         remote_adv = 0;
1656
1657                 remote_adv &= (LPA_PAUSE_CAP | LPA_PAUSE_ASYM);
1658
1659                 /* If we are not advertising full pause capability,
1660                  * something is wrong.  Bring the link down and reconfigure.
1661                  */
1662                 if (local_adv != ADVERTISE_PAUSE_CAP) {
1663                         current_link_up = 0;
1664                 } else {
1665                         tg3_setup_flow_control(tp, local_adv, remote_adv);
1666                 }
1667         }
1668 relink:
1669         if (current_link_up == 0) {
1670                 u32 tmp;
1671
1672                 tg3_phy_copper_begin(tp);
1673
1674                 tg3_readphy(tp, MII_BMSR, &tmp);
1675                 if (!tg3_readphy(tp, MII_BMSR, &tmp) &&
1676                     (tmp & BMSR_LSTATUS))
1677                         current_link_up = 1;
1678         }
1679
1680         tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
1681         if (current_link_up == 1) {
1682                 if (tp->link_config.active_speed == SPEED_100 ||
1683                     tp->link_config.active_speed == SPEED_10)
1684                         tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
1685                 else
1686                         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
1687         } else
1688                 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
1689
1690         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
1691         if (tp->link_config.active_duplex == DUPLEX_HALF)
1692                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
1693
1694         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
1695         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
1696                 if ((tp->led_ctrl == LED_CTRL_MODE_PHY_2) ||
1697                     (current_link_up == 1 &&
1698                      tp->link_config.active_speed == SPEED_10))
1699                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
1700         } else {
1701                 if (current_link_up == 1)
1702                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
1703         }
1704
1705         /* ??? Without this setting Netgear GA302T PHY does not
1706          * ??? send/receive packets...
1707          */
1708         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411 &&
1709             tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) {
1710                 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
1711                 tw32_f(MAC_MI_MODE, tp->mi_mode);
1712                 udelay(80);
1713         }
1714
1715         tw32_f(MAC_MODE, tp->mac_mode);
1716         udelay(40);
1717
1718         if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
1719                 /* Polled via timer. */
1720                 tw32_f(MAC_EVENT, 0);
1721         } else {
1722                 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
1723         }
1724         udelay(40);
1725
1726         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 &&
1727             current_link_up == 1 &&
1728             tp->link_config.active_speed == SPEED_1000 &&
1729             ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ||
1730              (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED))) {
1731                 udelay(120);
1732                 tw32_f(MAC_STATUS,
1733                      (MAC_STATUS_SYNC_CHANGED |
1734                       MAC_STATUS_CFG_CHANGED));
1735                 udelay(40);
1736                 tg3_write_mem(tp,
1737                               NIC_SRAM_FIRMWARE_MBOX,
1738                               NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
1739         }
1740
1741         if (current_link_up != netif_carrier_ok(tp->dev)) {
1742                 if (current_link_up)
1743                         netif_carrier_on(tp->dev);
1744                 else
1745                         netif_carrier_off(tp->dev);
1746                 tg3_link_report(tp);
1747         }
1748
1749         return 0;
1750 }
1751
1752 struct tg3_fiber_aneginfo {
1753         int state;
1754 #define ANEG_STATE_UNKNOWN              0
1755 #define ANEG_STATE_AN_ENABLE            1
1756 #define ANEG_STATE_RESTART_INIT         2
1757 #define ANEG_STATE_RESTART              3
1758 #define ANEG_STATE_DISABLE_LINK_OK      4
1759 #define ANEG_STATE_ABILITY_DETECT_INIT  5
1760 #define ANEG_STATE_ABILITY_DETECT       6
1761 #define ANEG_STATE_ACK_DETECT_INIT      7
1762 #define ANEG_STATE_ACK_DETECT           8
1763 #define ANEG_STATE_COMPLETE_ACK_INIT    9
1764 #define ANEG_STATE_COMPLETE_ACK         10
1765 #define ANEG_STATE_IDLE_DETECT_INIT     11
1766 #define ANEG_STATE_IDLE_DETECT          12
1767 #define ANEG_STATE_LINK_OK              13
1768 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT  14
1769 #define ANEG_STATE_NEXT_PAGE_WAIT       15
1770
1771         u32 flags;
1772 #define MR_AN_ENABLE            0x00000001
1773 #define MR_RESTART_AN           0x00000002
1774 #define MR_AN_COMPLETE          0x00000004
1775 #define MR_PAGE_RX              0x00000008
1776 #define MR_NP_LOADED            0x00000010
1777 #define MR_TOGGLE_TX            0x00000020
1778 #define MR_LP_ADV_FULL_DUPLEX   0x00000040
1779 #define MR_LP_ADV_HALF_DUPLEX   0x00000080
1780 #define MR_LP_ADV_SYM_PAUSE     0x00000100
1781 #define MR_LP_ADV_ASYM_PAUSE    0x00000200
1782 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
1783 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
1784 #define MR_LP_ADV_NEXT_PAGE     0x00001000
1785 #define MR_TOGGLE_RX            0x00002000
1786 #define MR_NP_RX                0x00004000
1787
1788 #define MR_LINK_OK              0x80000000
1789
1790         unsigned long link_time, cur_time;
1791
1792         u32 ability_match_cfg;
1793         int ability_match_count;
1794
1795         char ability_match, idle_match, ack_match;
1796
1797         u32 txconfig, rxconfig;
1798 #define ANEG_CFG_NP             0x00000080
1799 #define ANEG_CFG_ACK            0x00000040
1800 #define ANEG_CFG_RF2            0x00000020
1801 #define ANEG_CFG_RF1            0x00000010
1802 #define ANEG_CFG_PS2            0x00000001
1803 #define ANEG_CFG_PS1            0x00008000
1804 #define ANEG_CFG_HD             0x00004000
1805 #define ANEG_CFG_FD             0x00002000
1806 #define ANEG_CFG_INVAL          0x00001f06
1807
1808 };
1809 #define ANEG_OK         0
1810 #define ANEG_DONE       1
1811 #define ANEG_TIMER_ENAB 2
1812 #define ANEG_FAILED     -1
1813
1814 #define ANEG_STATE_SETTLE_TIME  10000
1815
1816 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
1817                                    struct tg3_fiber_aneginfo *ap)
1818 {
1819         unsigned long delta;
1820         u32 rx_cfg_reg;
1821         int ret;
1822
1823         if (ap->state == ANEG_STATE_UNKNOWN) {
1824                 ap->rxconfig = 0;
1825                 ap->link_time = 0;
1826                 ap->cur_time = 0;
1827                 ap->ability_match_cfg = 0;
1828                 ap->ability_match_count = 0;
1829                 ap->ability_match = 0;
1830                 ap->idle_match = 0;
1831                 ap->ack_match = 0;
1832         }
1833         ap->cur_time++;
1834
1835         if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
1836                 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
1837
1838                 if (rx_cfg_reg != ap->ability_match_cfg) {
1839                         ap->ability_match_cfg = rx_cfg_reg;
1840                         ap->ability_match = 0;
1841                         ap->ability_match_count = 0;
1842                 } else {
1843                         if (++ap->ability_match_count > 1) {
1844                                 ap->ability_match = 1;
1845                                 ap->ability_match_cfg = rx_cfg_reg;
1846                         }
1847                 }
1848                 if (rx_cfg_reg & ANEG_CFG_ACK)
1849                         ap->ack_match = 1;
1850                 else
1851                         ap->ack_match = 0;
1852
1853                 ap->idle_match = 0;
1854         } else {
1855                 ap->idle_match = 1;
1856                 ap->ability_match_cfg = 0;
1857                 ap->ability_match_count = 0;
1858                 ap->ability_match = 0;
1859                 ap->ack_match = 0;
1860
1861                 rx_cfg_reg = 0;
1862         }
1863
1864         ap->rxconfig = rx_cfg_reg;
1865         ret = ANEG_OK;
1866
1867         switch(ap->state) {
1868         case ANEG_STATE_UNKNOWN:
1869                 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
1870                         ap->state = ANEG_STATE_AN_ENABLE;
1871
1872                 /* fallthru */
1873         case ANEG_STATE_AN_ENABLE:
1874                 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
1875                 if (ap->flags & MR_AN_ENABLE) {
1876                         ap->link_time = 0;
1877                         ap->cur_time = 0;
1878                         ap->ability_match_cfg = 0;
1879                         ap->ability_match_count = 0;
1880                         ap->ability_match = 0;
1881                         ap->idle_match = 0;
1882                         ap->ack_match = 0;
1883
1884                         ap->state = ANEG_STATE_RESTART_INIT;
1885                 } else {
1886                         ap->state = ANEG_STATE_DISABLE_LINK_OK;
1887                 }
1888                 break;
1889
1890         case ANEG_STATE_RESTART_INIT:
1891                 ap->link_time = ap->cur_time;
1892                 ap->flags &= ~(MR_NP_LOADED);
1893                 ap->txconfig = 0;
1894                 tw32(MAC_TX_AUTO_NEG, 0);
1895                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
1896                 tw32_f(MAC_MODE, tp->mac_mode);
1897                 udelay(40);
1898
1899                 ret = ANEG_TIMER_ENAB;
1900                 ap->state = ANEG_STATE_RESTART;
1901
1902                 /* fallthru */
1903         case ANEG_STATE_RESTART:
1904                 delta = ap->cur_time - ap->link_time;
1905                 if (delta > ANEG_STATE_SETTLE_TIME) {
1906                         ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
1907                 } else {
1908                         ret = ANEG_TIMER_ENAB;
1909                 }
1910                 break;
1911
1912         case ANEG_STATE_DISABLE_LINK_OK:
1913                 ret = ANEG_DONE;
1914                 break;
1915
1916         case ANEG_STATE_ABILITY_DETECT_INIT:
1917                 ap->flags &= ~(MR_TOGGLE_TX);
1918                 ap->txconfig = (ANEG_CFG_FD | ANEG_CFG_PS1);
1919                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
1920                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
1921                 tw32_f(MAC_MODE, tp->mac_mode);
1922                 udelay(40);
1923
1924                 ap->state = ANEG_STATE_ABILITY_DETECT;
1925                 break;
1926
1927         case ANEG_STATE_ABILITY_DETECT:
1928                 if (ap->ability_match != 0 && ap->rxconfig != 0) {
1929                         ap->state = ANEG_STATE_ACK_DETECT_INIT;
1930                 }
1931                 break;
1932
1933         case ANEG_STATE_ACK_DETECT_INIT:
1934                 ap->txconfig |= ANEG_CFG_ACK;
1935                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
1936                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
1937                 tw32_f(MAC_MODE, tp->mac_mode);
1938                 udelay(40);
1939
1940                 ap->state = ANEG_STATE_ACK_DETECT;
1941
1942                 /* fallthru */
1943         case ANEG_STATE_ACK_DETECT:
1944                 if (ap->ack_match != 0) {
1945                         if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
1946                             (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
1947                                 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
1948                         } else {
1949                                 ap->state = ANEG_STATE_AN_ENABLE;
1950                         }
1951                 } else if (ap->ability_match != 0 &&
1952                            ap->rxconfig == 0) {
1953                         ap->state = ANEG_STATE_AN_ENABLE;
1954                 }
1955                 break;
1956
1957         case ANEG_STATE_COMPLETE_ACK_INIT:
1958                 if (ap->rxconfig & ANEG_CFG_INVAL) {
1959                         ret = ANEG_FAILED;
1960                         break;
1961                 }
1962                 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
1963                                MR_LP_ADV_HALF_DUPLEX |
1964                                MR_LP_ADV_SYM_PAUSE |
1965                                MR_LP_ADV_ASYM_PAUSE |
1966                                MR_LP_ADV_REMOTE_FAULT1 |
1967                                MR_LP_ADV_REMOTE_FAULT2 |
1968                                MR_LP_ADV_NEXT_PAGE |
1969                                MR_TOGGLE_RX |
1970                                MR_NP_RX);
1971                 if (ap->rxconfig & ANEG_CFG_FD)
1972                         ap->flags |= MR_LP_ADV_FULL_DUPLEX;
1973                 if (ap->rxconfig & ANEG_CFG_HD)
1974                         ap->flags |= MR_LP_ADV_HALF_DUPLEX;
1975                 if (ap->rxconfig & ANEG_CFG_PS1)
1976                         ap->flags |= MR_LP_ADV_SYM_PAUSE;
1977                 if (ap->rxconfig & ANEG_CFG_PS2)
1978                         ap->flags |= MR_LP_ADV_ASYM_PAUSE;
1979                 if (ap->rxconfig & ANEG_CFG_RF1)
1980                         ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
1981                 if (ap->rxconfig & ANEG_CFG_RF2)
1982                         ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
1983                 if (ap->rxconfig & ANEG_CFG_NP)
1984                         ap->flags |= MR_LP_ADV_NEXT_PAGE;
1985
1986                 ap->link_time = ap->cur_time;
1987
1988                 ap->flags ^= (MR_TOGGLE_TX);
1989                 if (ap->rxconfig & 0x0008)
1990                         ap->flags |= MR_TOGGLE_RX;
1991                 if (ap->rxconfig & ANEG_CFG_NP)
1992                         ap->flags |= MR_NP_RX;
1993                 ap->flags |= MR_PAGE_RX;
1994
1995                 ap->state = ANEG_STATE_COMPLETE_ACK;
1996                 ret = ANEG_TIMER_ENAB;
1997                 break;
1998
1999         case ANEG_STATE_COMPLETE_ACK:
2000                 if (ap->ability_match != 0 &&
2001                     ap->rxconfig == 0) {
2002                         ap->state = ANEG_STATE_AN_ENABLE;
2003                         break;
2004                 }
2005                 delta = ap->cur_time - ap->link_time;
2006                 if (delta > ANEG_STATE_SETTLE_TIME) {
2007                         if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
2008                                 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
2009                         } else {
2010                                 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
2011                                     !(ap->flags & MR_NP_RX)) {
2012                                         ap->state = ANEG_STATE_IDLE_DETECT_INIT;
2013                                 } else {
2014                                         ret = ANEG_FAILED;
2015                                 }
2016                         }
2017                 }
2018                 break;
2019
2020         case ANEG_STATE_IDLE_DETECT_INIT:
2021                 ap->link_time = ap->cur_time;
2022                 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
2023                 tw32_f(MAC_MODE, tp->mac_mode);
2024                 udelay(40);
2025
2026                 ap->state = ANEG_STATE_IDLE_DETECT;
2027                 ret = ANEG_TIMER_ENAB;
2028                 break;
2029
2030         case ANEG_STATE_IDLE_DETECT:
2031                 if (ap->ability_match != 0 &&
2032                     ap->rxconfig == 0) {
2033                         ap->state = ANEG_STATE_AN_ENABLE;
2034                         break;
2035                 }
2036                 delta = ap->cur_time - ap->link_time;
2037                 if (delta > ANEG_STATE_SETTLE_TIME) {
2038                         /* XXX another gem from the Broadcom driver :( */
2039                         ap->state = ANEG_STATE_LINK_OK;
2040                 }
2041                 break;
2042
2043         case ANEG_STATE_LINK_OK:
2044                 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
2045                 ret = ANEG_DONE;
2046                 break;
2047
2048         case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
2049                 /* ??? unimplemented */
2050                 break;
2051
2052         case ANEG_STATE_NEXT_PAGE_WAIT:
2053                 /* ??? unimplemented */
2054                 break;
2055
2056         default:
2057                 ret = ANEG_FAILED;
2058                 break;
2059         };
2060
2061         return ret;
2062 }
2063
2064 static int fiber_autoneg(struct tg3 *tp, u32 *flags)
2065 {
2066         int res = 0;
2067         struct tg3_fiber_aneginfo aninfo;
2068         int status = ANEG_FAILED;
2069         unsigned int tick;
2070         u32 tmp;
2071
2072         tw32_f(MAC_TX_AUTO_NEG, 0);
2073
2074         tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
2075         tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
2076         udelay(40);
2077
2078         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
2079         udelay(40);
2080
2081         memset(&aninfo, 0, sizeof(aninfo));
2082         aninfo.flags |= MR_AN_ENABLE;
2083         aninfo.state = ANEG_STATE_UNKNOWN;
2084         aninfo.cur_time = 0;
2085         tick = 0;
2086         while (++tick < 195000) {
2087                 status = tg3_fiber_aneg_smachine(tp, &aninfo);
2088                 if (status == ANEG_DONE || status == ANEG_FAILED)
2089                         break;
2090
2091                 udelay(1);
2092         }
2093
2094         tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
2095         tw32_f(MAC_MODE, tp->mac_mode);
2096         udelay(40);
2097
2098         *flags = aninfo.flags;
2099
2100         if (status == ANEG_DONE &&
2101             (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
2102                              MR_LP_ADV_FULL_DUPLEX)))
2103                 res = 1;
2104
2105         return res;
2106 }
2107
2108 static void tg3_init_bcm8002(struct tg3 *tp)
2109 {
2110         u32 mac_status = tr32(MAC_STATUS);
2111         int i;
2112
2113         /* Reset when initting first time or we have a link. */
2114         if ((tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) &&
2115             !(mac_status & MAC_STATUS_PCS_SYNCED))
2116                 return;
2117
2118         /* Set PLL lock range. */
2119         tg3_writephy(tp, 0x16, 0x8007);
2120
2121         /* SW reset */
2122         tg3_writephy(tp, MII_BMCR, BMCR_RESET);
2123
2124         /* Wait for reset to complete. */
2125         /* XXX schedule_timeout() ... */
2126         for (i = 0; i < 500; i++)
2127                 udelay(10);
2128
2129         /* Config mode; select PMA/Ch 1 regs. */
2130         tg3_writephy(tp, 0x10, 0x8411);
2131
2132         /* Enable auto-lock and comdet, select txclk for tx. */
2133         tg3_writephy(tp, 0x11, 0x0a10);
2134
2135         tg3_writephy(tp, 0x18, 0x00a0);
2136         tg3_writephy(tp, 0x16, 0x41ff);
2137
2138         /* Assert and deassert POR. */
2139         tg3_writephy(tp, 0x13, 0x0400);
2140         udelay(40);
2141         tg3_writephy(tp, 0x13, 0x0000);
2142
2143         tg3_writephy(tp, 0x11, 0x0a50);
2144         udelay(40);
2145         tg3_writephy(tp, 0x11, 0x0a10);
2146
2147         /* Wait for signal to stabilize */
2148         /* XXX schedule_timeout() ... */
2149         for (i = 0; i < 15000; i++)
2150                 udelay(10);
2151
2152         /* Deselect the channel register so we can read the PHYID
2153          * later.
2154          */
2155         tg3_writephy(tp, 0x10, 0x8011);
2156 }
2157
2158 static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
2159 {
2160         u32 sg_dig_ctrl, sg_dig_status;
2161         u32 serdes_cfg, expected_sg_dig_ctrl;
2162         int workaround, port_a;
2163         int current_link_up;
2164
2165         serdes_cfg = 0;
2166         expected_sg_dig_ctrl = 0;
2167         workaround = 0;
2168         port_a = 1;
2169         current_link_up = 0;
2170
2171         if (tp->pci_chip_rev_id != CHIPREV_ID_5704_A0 &&
2172             tp->pci_chip_rev_id != CHIPREV_ID_5704_A1) {
2173                 workaround = 1;
2174                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
2175                         port_a = 0;
2176
2177                 /* preserve bits 0-11,13,14 for signal pre-emphasis */
2178                 /* preserve bits 20-23 for voltage regulator */
2179                 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
2180         }
2181
2182         sg_dig_ctrl = tr32(SG_DIG_CTRL);
2183
2184         if (tp->link_config.autoneg != AUTONEG_ENABLE) {
2185                 if (sg_dig_ctrl & (1 << 31)) {
2186                         if (workaround) {
2187                                 u32 val = serdes_cfg;
2188
2189                                 if (port_a)
2190                                         val |= 0xc010000;
2191                                 else
2192                                         val |= 0x4010000;
2193                                 tw32_f(MAC_SERDES_CFG, val);
2194                         }
2195                         tw32_f(SG_DIG_CTRL, 0x01388400);
2196                 }
2197                 if (mac_status & MAC_STATUS_PCS_SYNCED) {
2198                         tg3_setup_flow_control(tp, 0, 0);
2199                         current_link_up = 1;
2200                 }
2201                 goto out;
2202         }
2203
2204         /* Want auto-negotiation.  */
2205         expected_sg_dig_ctrl = 0x81388400;
2206
2207         /* Pause capability */
2208         expected_sg_dig_ctrl |= (1 << 11);
2209
2210         /* Asymettric pause */
2211         expected_sg_dig_ctrl |= (1 << 12);
2212
2213         if (sg_dig_ctrl != expected_sg_dig_ctrl) {
2214                 if (workaround)
2215                         tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
2216                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | (1 << 30));
2217                 udelay(5);
2218                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
2219
2220                 tp->tg3_flags2 |= TG3_FLG2_PHY_JUST_INITTED;
2221         } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
2222                                  MAC_STATUS_SIGNAL_DET)) {
2223                 int i;
2224
2225                 /* Giver time to negotiate (~200ms) */
2226                 for (i = 0; i < 40000; i++) {
2227                         sg_dig_status = tr32(SG_DIG_STATUS);
2228                         if (sg_dig_status & (0x3))
2229                                 break;
2230                         udelay(5);
2231                 }
2232                 mac_status = tr32(MAC_STATUS);
2233
2234                 if ((sg_dig_status & (1 << 1)) &&
2235                     (mac_status & MAC_STATUS_PCS_SYNCED)) {
2236                         u32 local_adv, remote_adv;
2237
2238                         local_adv = ADVERTISE_PAUSE_CAP;
2239                         remote_adv = 0;
2240                         if (sg_dig_status & (1 << 19))
2241                                 remote_adv |= LPA_PAUSE_CAP;
2242                         if (sg_dig_status & (1 << 20))
2243                                 remote_adv |= LPA_PAUSE_ASYM;
2244
2245                         tg3_setup_flow_control(tp, local_adv, remote_adv);
2246                         current_link_up = 1;
2247                         tp->tg3_flags2 &= ~TG3_FLG2_PHY_JUST_INITTED;
2248                 } else if (!(sg_dig_status & (1 << 1))) {
2249                         if (tp->tg3_flags2 & TG3_FLG2_PHY_JUST_INITTED)
2250                                 tp->tg3_flags2 &= ~TG3_FLG2_PHY_JUST_INITTED;
2251                         else {
2252                                 if (workaround) {
2253                                         u32 val = serdes_cfg;
2254
2255                                         if (port_a)
2256                                                 val |= 0xc010000;
2257                                         else
2258                                                 val |= 0x4010000;
2259
2260                                         tw32_f(MAC_SERDES_CFG, val);
2261                                 }
2262
2263                                 tw32_f(SG_DIG_CTRL, 0x01388400);
2264                                 udelay(40);
2265
2266                                 /* Link parallel detection - link is up */
2267                                 /* only if we have PCS_SYNC and not */
2268                                 /* receiving config code words */
2269                                 mac_status = tr32(MAC_STATUS);
2270                                 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
2271                                     !(mac_status & MAC_STATUS_RCVD_CFG)) {
2272                                         tg3_setup_flow_control(tp, 0, 0);
2273                                         current_link_up = 1;
2274                                 }
2275                         }
2276                 }
2277         }
2278
2279 out:
2280         return current_link_up;
2281 }
2282
2283 static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
2284 {
2285         int current_link_up = 0;
2286
2287         if (!(mac_status & MAC_STATUS_PCS_SYNCED)) {
2288                 tp->tg3_flags &= ~TG3_FLAG_GOT_SERDES_FLOWCTL;
2289                 goto out;
2290         }
2291
2292         if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2293                 u32 flags;
2294                 int i;
2295   
2296                 if (fiber_autoneg(tp, &flags)) {
2297                         u32 local_adv, remote_adv;
2298
2299                         local_adv = ADVERTISE_PAUSE_CAP;
2300                         remote_adv = 0;
2301                         if (flags & MR_LP_ADV_SYM_PAUSE)
2302                                 remote_adv |= LPA_PAUSE_CAP;
2303                         if (flags & MR_LP_ADV_ASYM_PAUSE)
2304                                 remote_adv |= LPA_PAUSE_ASYM;
2305
2306                         tg3_setup_flow_control(tp, local_adv, remote_adv);
2307
2308                         tp->tg3_flags |= TG3_FLAG_GOT_SERDES_FLOWCTL;
2309                         current_link_up = 1;
2310                 }
2311                 for (i = 0; i < 30; i++) {
2312                         udelay(20);
2313                         tw32_f(MAC_STATUS,
2314                                (MAC_STATUS_SYNC_CHANGED |
2315                                 MAC_STATUS_CFG_CHANGED));
2316                         udelay(40);
2317                         if ((tr32(MAC_STATUS) &
2318                              (MAC_STATUS_SYNC_CHANGED |
2319                               MAC_STATUS_CFG_CHANGED)) == 0)
2320                                 break;
2321                 }
2322
2323                 mac_status = tr32(MAC_STATUS);
2324                 if (current_link_up == 0 &&
2325                     (mac_status & MAC_STATUS_PCS_SYNCED) &&
2326                     !(mac_status & MAC_STATUS_RCVD_CFG))
2327                         current_link_up = 1;
2328         } else {
2329                 /* Forcing 1000FD link up. */
2330                 current_link_up = 1;
2331                 tp->tg3_flags |= TG3_FLAG_GOT_SERDES_FLOWCTL;
2332
2333                 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
2334                 udelay(40);
2335         }
2336
2337 out:
2338         return current_link_up;
2339 }
2340
2341 static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
2342 {
2343         u32 orig_pause_cfg;
2344         u16 orig_active_speed;
2345         u8 orig_active_duplex;
2346         u32 mac_status;
2347         int current_link_up;
2348         int i;
2349
2350         orig_pause_cfg =
2351                 (tp->tg3_flags & (TG3_FLAG_RX_PAUSE |
2352                                   TG3_FLAG_TX_PAUSE));
2353         orig_active_speed = tp->link_config.active_speed;
2354         orig_active_duplex = tp->link_config.active_duplex;
2355
2356         if (!(tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG) &&
2357             netif_carrier_ok(tp->dev) &&
2358             (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE)) {
2359                 mac_status = tr32(MAC_STATUS);
2360                 mac_status &= (MAC_STATUS_PCS_SYNCED |
2361                                MAC_STATUS_SIGNAL_DET |
2362                                MAC_STATUS_CFG_CHANGED |
2363                                MAC_STATUS_RCVD_CFG);
2364                 if (mac_status == (MAC_STATUS_PCS_SYNCED |
2365                                    MAC_STATUS_SIGNAL_DET)) {
2366                         tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
2367                                             MAC_STATUS_CFG_CHANGED));
2368                         return 0;
2369                 }
2370         }
2371
2372         tw32_f(MAC_TX_AUTO_NEG, 0);
2373
2374         tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
2375         tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
2376         tw32_f(MAC_MODE, tp->mac_mode);
2377         udelay(40);
2378
2379         if (tp->phy_id == PHY_ID_BCM8002)
2380                 tg3_init_bcm8002(tp);
2381
2382         /* Enable link change event even when serdes polling.  */
2383         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2384         udelay(40);
2385
2386         current_link_up = 0;
2387         mac_status = tr32(MAC_STATUS);
2388
2389         if (tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG)
2390                 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
2391         else
2392                 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
2393
2394         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
2395         tw32_f(MAC_MODE, tp->mac_mode);
2396         udelay(40);
2397
2398         tp->hw_status->status =
2399                 (SD_STATUS_UPDATED |
2400                  (tp->hw_status->status & ~SD_STATUS_LINK_CHG));
2401
2402         for (i = 0; i < 100; i++) {
2403                 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
2404                                     MAC_STATUS_CFG_CHANGED));
2405                 udelay(5);
2406                 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
2407                                          MAC_STATUS_CFG_CHANGED)) == 0)
2408                         break;
2409         }
2410
2411         mac_status = tr32(MAC_STATUS);
2412         if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
2413                 current_link_up = 0;
2414                 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2415                         tw32_f(MAC_MODE, (tp->mac_mode |
2416                                           MAC_MODE_SEND_CONFIGS));
2417                         udelay(1);
2418                         tw32_f(MAC_MODE, tp->mac_mode);
2419                 }
2420         }
2421
2422         if (current_link_up == 1) {
2423                 tp->link_config.active_speed = SPEED_1000;
2424                 tp->link_config.active_duplex = DUPLEX_FULL;
2425                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
2426                                     LED_CTRL_LNKLED_OVERRIDE |
2427                                     LED_CTRL_1000MBPS_ON));
2428         } else {
2429                 tp->link_config.active_speed = SPEED_INVALID;
2430                 tp->link_config.active_duplex = DUPLEX_INVALID;
2431                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
2432                                     LED_CTRL_LNKLED_OVERRIDE |
2433                                     LED_CTRL_TRAFFIC_OVERRIDE));
2434         }
2435
2436         if (current_link_up != netif_carrier_ok(tp->dev)) {
2437                 if (current_link_up)
2438                         netif_carrier_on(tp->dev);
2439                 else
2440                         netif_carrier_off(tp->dev);
2441                 tg3_link_report(tp);
2442         } else {
2443                 u32 now_pause_cfg =
2444                         tp->tg3_flags & (TG3_FLAG_RX_PAUSE |
2445                                          TG3_FLAG_TX_PAUSE);
2446                 if (orig_pause_cfg != now_pause_cfg ||
2447                     orig_active_speed != tp->link_config.active_speed ||
2448                     orig_active_duplex != tp->link_config.active_duplex)
2449                         tg3_link_report(tp);
2450         }
2451
2452         return 0;
2453 }
2454
2455 static int tg3_setup_phy(struct tg3 *tp, int force_reset)
2456 {
2457         int err;
2458
2459         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
2460                 err = tg3_setup_fiber_phy(tp, force_reset);
2461         } else {
2462                 err = tg3_setup_copper_phy(tp, force_reset);
2463         }
2464
2465         if (tp->link_config.active_speed == SPEED_1000 &&
2466             tp->link_config.active_duplex == DUPLEX_HALF)
2467                 tw32(MAC_TX_LENGTHS,
2468                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2469                       (6 << TX_LENGTHS_IPG_SHIFT) |
2470                       (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
2471         else
2472                 tw32(MAC_TX_LENGTHS,
2473                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2474                       (6 << TX_LENGTHS_IPG_SHIFT) |
2475                       (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
2476
2477         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
2478                 if (netif_carrier_ok(tp->dev)) {
2479                         tw32(HOSTCC_STAT_COAL_TICKS,
2480                              DEFAULT_STAT_COAL_TICKS);
2481                 } else {
2482                         tw32(HOSTCC_STAT_COAL_TICKS, 0);
2483                 }
2484         }
2485
2486         return err;
2487 }
2488
2489 /* Tigon3 never reports partial packet sends.  So we do not
2490  * need special logic to handle SKBs that have not had all
2491  * of their frags sent yet, like SunGEM does.
2492  */
2493 static void tg3_tx(struct tg3 *tp)
2494 {
2495         u32 hw_idx = tp->hw_status->idx[0].tx_consumer;
2496         u32 sw_idx = tp->tx_cons;
2497
2498         while (sw_idx != hw_idx) {
2499                 struct tx_ring_info *ri = &tp->tx_buffers[sw_idx];
2500                 struct sk_buff *skb = ri->skb;
2501                 int i;
2502
2503                 if (unlikely(skb == NULL))
2504                         BUG();
2505
2506                 pci_unmap_single(tp->pdev,
2507                                  pci_unmap_addr(ri, mapping),
2508                                  skb_headlen(skb),
2509                                  PCI_DMA_TODEVICE);
2510
2511                 ri->skb = NULL;
2512
2513                 sw_idx = NEXT_TX(sw_idx);
2514
2515                 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2516                         if (unlikely(sw_idx == hw_idx))
2517                                 BUG();
2518
2519                         ri = &tp->tx_buffers[sw_idx];
2520                         if (unlikely(ri->skb != NULL))
2521                                 BUG();
2522
2523                         pci_unmap_page(tp->pdev,
2524                                        pci_unmap_addr(ri, mapping),
2525                                        skb_shinfo(skb)->frags[i].size,
2526                                        PCI_DMA_TODEVICE);
2527
2528                         sw_idx = NEXT_TX(sw_idx);
2529                 }
2530
2531                 dev_kfree_skb_irq(skb);
2532         }
2533
2534         tp->tx_cons = sw_idx;
2535
2536         if (netif_queue_stopped(tp->dev) &&
2537             (TX_BUFFS_AVAIL(tp) > TG3_TX_WAKEUP_THRESH))
2538                 netif_wake_queue(tp->dev);
2539 }
2540
2541 /* Returns size of skb allocated or < 0 on error.
2542  *
2543  * We only need to fill in the address because the other members
2544  * of the RX descriptor are invariant, see tg3_init_rings.
2545  *
2546  * Note the purposeful assymetry of cpu vs. chip accesses.  For
2547  * posting buffers we only dirty the first cache line of the RX
2548  * descriptor (containing the address).  Whereas for the RX status
2549  * buffers the cpu only reads the last cacheline of the RX descriptor
2550  * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
2551  */
2552 static int tg3_alloc_rx_skb(struct tg3 *tp, u32 opaque_key,
2553                             int src_idx, u32 dest_idx_unmasked)
2554 {
2555         struct tg3_rx_buffer_desc *desc;
2556         struct ring_info *map, *src_map;
2557         struct sk_buff *skb;
2558         dma_addr_t mapping;
2559         int skb_size, dest_idx;
2560
2561         src_map = NULL;
2562         switch (opaque_key) {
2563         case RXD_OPAQUE_RING_STD:
2564                 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
2565                 desc = &tp->rx_std[dest_idx];
2566                 map = &tp->rx_std_buffers[dest_idx];
2567                 if (src_idx >= 0)
2568                         src_map = &tp->rx_std_buffers[src_idx];
2569                 skb_size = RX_PKT_BUF_SZ;
2570                 break;
2571
2572         case RXD_OPAQUE_RING_JUMBO:
2573                 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
2574                 desc = &tp->rx_jumbo[dest_idx];
2575                 map = &tp->rx_jumbo_buffers[dest_idx];
2576                 if (src_idx >= 0)
2577                         src_map = &tp->rx_jumbo_buffers[src_idx];
2578                 skb_size = RX_JUMBO_PKT_BUF_SZ;
2579                 break;
2580
2581         default:
2582                 return -EINVAL;
2583         };
2584
2585         /* Do not overwrite any of the map or rp information
2586          * until we are sure we can commit to a new buffer.
2587          *
2588          * Callers depend upon this behavior and assume that
2589          * we leave everything unchanged if we fail.
2590          */
2591         skb = dev_alloc_skb(skb_size);
2592         if (skb == NULL)
2593                 return -ENOMEM;
2594
2595         skb->dev = tp->dev;
2596         skb_reserve(skb, tp->rx_offset);
2597
2598         mapping = pci_map_single(tp->pdev, skb->data,
2599                                  skb_size - tp->rx_offset,
2600                                  PCI_DMA_FROMDEVICE);
2601
2602         map->skb = skb;
2603         pci_unmap_addr_set(map, mapping, mapping);
2604
2605         if (src_map != NULL)
2606                 src_map->skb = NULL;
2607
2608         desc->addr_hi = ((u64)mapping >> 32);
2609         desc->addr_lo = ((u64)mapping & 0xffffffff);
2610
2611         return skb_size;
2612 }
2613
2614 /* We only need to move over in the address because the other
2615  * members of the RX descriptor are invariant.  See notes above
2616  * tg3_alloc_rx_skb for full details.
2617  */
2618 static void tg3_recycle_rx(struct tg3 *tp, u32 opaque_key,
2619                            int src_idx, u32 dest_idx_unmasked)
2620 {
2621         struct tg3_rx_buffer_desc *src_desc, *dest_desc;
2622         struct ring_info *src_map, *dest_map;
2623         int dest_idx;
2624
2625         switch (opaque_key) {
2626         case RXD_OPAQUE_RING_STD:
2627                 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
2628                 dest_desc = &tp->rx_std[dest_idx];
2629                 dest_map = &tp->rx_std_buffers[dest_idx];
2630                 src_desc = &tp->rx_std[src_idx];
2631                 src_map = &tp->rx_std_buffers[src_idx];
2632                 break;
2633
2634         case RXD_OPAQUE_RING_JUMBO:
2635                 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
2636                 dest_desc = &tp->rx_jumbo[dest_idx];
2637                 dest_map = &tp->rx_jumbo_buffers[dest_idx];
2638                 src_desc = &tp->rx_jumbo[src_idx];
2639                 src_map = &tp->rx_jumbo_buffers[src_idx];
2640                 break;
2641
2642         default:
2643                 return;
2644         };
2645
2646         dest_map->skb = src_map->skb;
2647         pci_unmap_addr_set(dest_map, mapping,
2648                            pci_unmap_addr(src_map, mapping));
2649         dest_desc->addr_hi = src_desc->addr_hi;
2650         dest_desc->addr_lo = src_desc->addr_lo;
2651
2652         src_map->skb = NULL;
2653 }
2654
2655 #if TG3_VLAN_TAG_USED
2656 static int tg3_vlan_rx(struct tg3 *tp, struct sk_buff *skb, u16 vlan_tag)
2657 {
2658         return vlan_hwaccel_receive_skb(skb, tp->vlgrp, vlan_tag);
2659 }
2660 #endif
2661
2662 /* The RX ring scheme is composed of multiple rings which post fresh
2663  * buffers to the chip, and one special ring the chip uses to report
2664  * status back to the host.
2665  *
2666  * The special ring reports the status of received packets to the
2667  * host.  The chip does not write into the original descriptor the
2668  * RX buffer was obtained from.  The chip simply takes the original
2669  * descriptor as provided by the host, updates the status and length
2670  * field, then writes this into the next status ring entry.
2671  *
2672  * Each ring the host uses to post buffers to the chip is described
2673  * by a TG3_BDINFO entry in the chips SRAM area.  When a packet arrives,
2674  * it is first placed into the on-chip ram.  When the packet's length
2675  * is known, it walks down the TG3_BDINFO entries to select the ring.
2676  * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
2677  * which is within the range of the new packet's length is chosen.
2678  *
2679  * The "separate ring for rx status" scheme may sound queer, but it makes
2680  * sense from a cache coherency perspective.  If only the host writes
2681  * to the buffer post rings, and only the chip writes to the rx status
2682  * rings, then cache lines never move beyond shared-modified state.
2683  * If both the host and chip were to write into the same ring, cache line
2684  * eviction could occur since both entities want it in an exclusive state.
2685  */
2686 static int tg3_rx(struct tg3 *tp, int budget)
2687 {
2688         u32 work_mask;
2689         u32 rx_rcb_ptr = tp->rx_rcb_ptr;
2690         u16 hw_idx, sw_idx;
2691         int received;
2692
2693         hw_idx = tp->hw_status->idx[0].rx_producer;
2694         /*
2695          * We need to order the read of hw_idx and the read of
2696          * the opaque cookie.
2697          */
2698         rmb();
2699         sw_idx = rx_rcb_ptr % TG3_RX_RCB_RING_SIZE(tp);
2700         work_mask = 0;
2701         received = 0;
2702         while (sw_idx != hw_idx && budget > 0) {
2703                 struct tg3_rx_buffer_desc *desc = &tp->rx_rcb[sw_idx];
2704                 unsigned int len;
2705                 struct sk_buff *skb;
2706                 dma_addr_t dma_addr;
2707                 u32 opaque_key, desc_idx, *post_ptr;
2708
2709                 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
2710                 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
2711                 if (opaque_key == RXD_OPAQUE_RING_STD) {
2712                         dma_addr = pci_unmap_addr(&tp->rx_std_buffers[desc_idx],
2713                                                   mapping);
2714                         skb = tp->rx_std_buffers[desc_idx].skb;
2715                         post_ptr = &tp->rx_std_ptr;
2716                 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
2717                         dma_addr = pci_unmap_addr(&tp->rx_jumbo_buffers[desc_idx],
2718                                                   mapping);
2719                         skb = tp->rx_jumbo_buffers[desc_idx].skb;
2720                         post_ptr = &tp->rx_jumbo_ptr;
2721                 }
2722                 else {
2723                         goto next_pkt_nopost;
2724                 }
2725
2726                 work_mask |= opaque_key;
2727
2728                 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
2729                     (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
2730                 drop_it:
2731                         tg3_recycle_rx(tp, opaque_key,
2732                                        desc_idx, *post_ptr);
2733                 drop_it_no_recycle:
2734                         /* Other statistics kept track of by card. */
2735                         tp->net_stats.rx_dropped++;
2736                         goto next_pkt;
2737                 }
2738
2739                 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4; /* omit crc */
2740
2741                 if (len > RX_COPY_THRESHOLD 
2742                         && tp->rx_offset == 2
2743                         /* rx_offset != 2 iff this is a 5701 card running
2744                          * in PCI-X mode [see tg3_get_invariants()] */
2745                 ) {
2746                         int skb_size;
2747
2748                         skb_size = tg3_alloc_rx_skb(tp, opaque_key,
2749                                                     desc_idx, *post_ptr);
2750                         if (skb_size < 0)
2751                                 goto drop_it;
2752
2753                         pci_unmap_single(tp->pdev, dma_addr,
2754                                          skb_size - tp->rx_offset,
2755                                          PCI_DMA_FROMDEVICE);
2756
2757                         skb_put(skb, len);
2758                 } else {
2759                         struct sk_buff *copy_skb;
2760
2761                         tg3_recycle_rx(tp, opaque_key,
2762                                        desc_idx, *post_ptr);
2763
2764                         copy_skb = dev_alloc_skb(len + 2);
2765                         if (copy_skb == NULL)
2766                                 goto drop_it_no_recycle;
2767
2768                         copy_skb->dev = tp->dev;
2769                         skb_reserve(copy_skb, 2);
2770                         skb_put(copy_skb, len);
2771                         pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
2772                         memcpy(copy_skb->data, skb->data, len);
2773                         pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
2774
2775                         /* We'll reuse the original ring buffer. */
2776                         skb = copy_skb;
2777                 }
2778
2779                 if ((tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) &&
2780                     (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
2781                     (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
2782                       >> RXD_TCPCSUM_SHIFT) == 0xffff))
2783                         skb->ip_summed = CHECKSUM_UNNECESSARY;
2784                 else
2785                         skb->ip_summed = CHECKSUM_NONE;
2786
2787                 skb->protocol = eth_type_trans(skb, tp->dev);
2788 #if TG3_VLAN_TAG_USED
2789                 if (tp->vlgrp != NULL &&
2790                     desc->type_flags & RXD_FLAG_VLAN) {
2791                         tg3_vlan_rx(tp, skb,
2792                                     desc->err_vlan & RXD_VLAN_MASK);
2793                 } else
2794 #endif
2795                         netif_receive_skb(skb);
2796
2797                 tp->dev->last_rx = jiffies;
2798                 received++;
2799                 budget--;
2800
2801 next_pkt:
2802                 (*post_ptr)++;
2803 next_pkt_nopost:
2804                 rx_rcb_ptr++;
2805                 sw_idx = rx_rcb_ptr % TG3_RX_RCB_RING_SIZE(tp);
2806         }
2807
2808         /* ACK the status ring. */
2809         tp->rx_rcb_ptr = rx_rcb_ptr;
2810         tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW,
2811                      (rx_rcb_ptr % TG3_RX_RCB_RING_SIZE(tp)));
2812
2813         /* Refill RX ring(s). */
2814         if (work_mask & RXD_OPAQUE_RING_STD) {
2815                 sw_idx = tp->rx_std_ptr % TG3_RX_RING_SIZE;
2816                 tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
2817                              sw_idx);
2818         }
2819         if (work_mask & RXD_OPAQUE_RING_JUMBO) {
2820                 sw_idx = tp->rx_jumbo_ptr % TG3_RX_JUMBO_RING_SIZE;
2821                 tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
2822                              sw_idx);
2823         }
2824         mmiowb();
2825
2826         return received;
2827 }
2828
2829 static int tg3_poll(struct net_device *netdev, int *budget)
2830 {
2831         struct tg3 *tp = netdev_priv(netdev);
2832         struct tg3_hw_status *sblk = tp->hw_status;
2833         unsigned long flags;
2834         int done;
2835
2836         spin_lock_irqsave(&tp->lock, flags);
2837
2838         /* handle link change and other phy events */
2839         if (!(tp->tg3_flags &
2840               (TG3_FLAG_USE_LINKCHG_REG |
2841                TG3_FLAG_POLL_SERDES))) {
2842                 if (sblk->status & SD_STATUS_LINK_CHG) {
2843                         sblk->status = SD_STATUS_UPDATED |
2844                                 (sblk->status & ~SD_STATUS_LINK_CHG);
2845                         tg3_setup_phy(tp, 0);
2846                 }
2847         }
2848
2849         /* run TX completion thread */
2850         if (sblk->idx[0].tx_consumer != tp->tx_cons) {
2851                 spin_lock(&tp->tx_lock);
2852                 tg3_tx(tp);
2853                 spin_unlock(&tp->tx_lock);
2854         }
2855
2856         spin_unlock_irqrestore(&tp->lock, flags);
2857
2858         /* run RX thread, within the bounds set by NAPI.
2859          * All RX "locking" is done by ensuring outside
2860          * code synchronizes with dev->poll()
2861          */
2862         done = 1;
2863         if (sblk->idx[0].rx_producer != tp->rx_rcb_ptr) {
2864                 int orig_budget = *budget;
2865                 int work_done;
2866
2867                 if (orig_budget > netdev->quota)
2868                         orig_budget = netdev->quota;
2869
2870                 work_done = tg3_rx(tp, orig_budget);
2871
2872                 *budget -= work_done;
2873                 netdev->quota -= work_done;
2874
2875                 if (work_done >= orig_budget)
2876                         done = 0;
2877         }
2878
2879         /* if no more work, tell net stack and NIC we're done */
2880         if (done) {
2881                 spin_lock_irqsave(&tp->lock, flags);
2882                 __netif_rx_complete(netdev);
2883                 tg3_restart_ints(tp);
2884                 spin_unlock_irqrestore(&tp->lock, flags);
2885         }
2886
2887         return (done ? 0 : 1);
2888 }
2889
2890 static inline unsigned int tg3_has_work(struct net_device *dev, struct tg3 *tp)
2891 {
2892         struct tg3_hw_status *sblk = tp->hw_status;
2893         unsigned int work_exists = 0;
2894
2895         /* check for phy events */
2896         if (!(tp->tg3_flags &
2897               (TG3_FLAG_USE_LINKCHG_REG |
2898                TG3_FLAG_POLL_SERDES))) {
2899                 if (sblk->status & SD_STATUS_LINK_CHG)
2900                         work_exists = 1;
2901         }
2902         /* check for RX/TX work to do */
2903         if (sblk->idx[0].tx_consumer != tp->tx_cons ||
2904             sblk->idx[0].rx_producer != tp->rx_rcb_ptr)
2905                 work_exists = 1;
2906
2907         return work_exists;
2908 }
2909
2910 static irqreturn_t tg3_interrupt(int irq, void *dev_id, struct pt_regs *regs)
2911 {
2912         struct net_device *dev = dev_id;
2913         struct tg3 *tp = netdev_priv(dev);
2914         struct tg3_hw_status *sblk = tp->hw_status;
2915         unsigned long flags;
2916         unsigned int handled = 1;
2917
2918         spin_lock_irqsave(&tp->lock, flags);
2919
2920         /* In INTx mode, it is possible for the interrupt to arrive at
2921          * the CPU before the status block posted prior to the interrupt.
2922          * Reading the PCI State register will confirm whether the
2923          * interrupt is ours and will flush the status block.
2924          */
2925         if ((sblk->status & SD_STATUS_UPDATED) ||
2926             !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
2927                 /*
2928                  * writing any value to intr-mbox-0 clears PCI INTA# and
2929                  * chip-internal interrupt pending events.
2930                  * writing non-zero to intr-mbox-0 additional tells the
2931                  * NIC to stop sending us irqs, engaging "in-intr-handler"
2932                  * event coalescing.
2933                  */
2934                 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
2935                              0x00000001);
2936                 /*
2937                  * Flush PCI write.  This also guarantees that our
2938                  * status block has been flushed to host memory.
2939                  */
2940                 tr32(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW);
2941                 sblk->status &= ~SD_STATUS_UPDATED;
2942
2943                 if (likely(tg3_has_work(dev, tp)))
2944                         netif_rx_schedule(dev);         /* schedule NAPI poll */
2945                 else {
2946                         /* no work, shared interrupt perhaps?  re-enable
2947                          * interrupts, and flush that PCI write
2948                          */
2949                         tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
2950                                 0x00000000);
2951                         tr32(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW);
2952                 }
2953         } else {        /* shared interrupt */
2954                 handled = 0;
2955         }
2956
2957         spin_unlock_irqrestore(&tp->lock, flags);
2958
2959         return IRQ_RETVAL(handled);
2960 }
2961
2962 static int tg3_init_hw(struct tg3 *);
2963 static int tg3_halt(struct tg3 *);
2964
2965 #ifdef CONFIG_NET_POLL_CONTROLLER
2966 static void tg3_poll_controller(struct net_device *dev)
2967 {
2968         tg3_interrupt(dev->irq, dev, NULL);
2969 }
2970 #endif
2971
2972 static void tg3_reset_task(void *_data)
2973 {
2974         struct tg3 *tp = _data;
2975         unsigned int restart_timer;
2976
2977         tg3_netif_stop(tp);
2978
2979         spin_lock_irq(&tp->lock);
2980         spin_lock(&tp->tx_lock);
2981
2982         restart_timer = tp->tg3_flags2 & TG3_FLG2_RESTART_TIMER;
2983         tp->tg3_flags2 &= ~TG3_FLG2_RESTART_TIMER;
2984
2985         tg3_halt(tp);
2986         tg3_init_hw(tp);
2987
2988         tg3_netif_start(tp);
2989
2990         spin_unlock(&tp->tx_lock);
2991         spin_unlock_irq(&tp->lock);
2992
2993         if (restart_timer)
2994                 mod_timer(&tp->timer, jiffies + 1);
2995 }
2996
2997 static void tg3_tx_timeout(struct net_device *dev)
2998 {
2999         struct tg3 *tp = netdev_priv(dev);
3000
3001         printk(KERN_ERR PFX "%s: transmit timed out, resetting\n",
3002                dev->name);
3003
3004         schedule_work(&tp->reset_task);
3005 }
3006
3007 static void tg3_set_txd(struct tg3 *, int, dma_addr_t, int, u32, u32);
3008
3009 static int tigon3_4gb_hwbug_workaround(struct tg3 *tp, struct sk_buff *skb,
3010                                        u32 guilty_entry, int guilty_len,
3011                                        u32 last_plus_one, u32 *start, u32 mss)
3012 {
3013         struct sk_buff *new_skb = skb_copy(skb, GFP_ATOMIC);
3014         dma_addr_t new_addr;
3015         u32 entry = *start;
3016         int i;
3017
3018         if (!new_skb) {
3019                 dev_kfree_skb(skb);
3020                 return -1;
3021         }
3022
3023         /* New SKB is guaranteed to be linear. */
3024         entry = *start;
3025         new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
3026                                   PCI_DMA_TODEVICE);
3027         tg3_set_txd(tp, entry, new_addr, new_skb->len,
3028                     (skb->ip_summed == CHECKSUM_HW) ?
3029                     TXD_FLAG_TCPUDP_CSUM : 0, 1 | (mss << 1));
3030         *start = NEXT_TX(entry);
3031
3032         /* Now clean up the sw ring entries. */
3033         i = 0;
3034         while (entry != last_plus_one) {
3035                 int len;
3036
3037                 if (i == 0)
3038                         len = skb_headlen(skb);
3039                 else
3040                         len = skb_shinfo(skb)->frags[i-1].size;
3041                 pci_unmap_single(tp->pdev,
3042                                  pci_unmap_addr(&tp->tx_buffers[entry], mapping),
3043                                  len, PCI_DMA_TODEVICE);
3044                 if (i == 0) {
3045                         tp->tx_buffers[entry].skb = new_skb;
3046                         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, new_addr);
3047                 } else {
3048                         tp->tx_buffers[entry].skb = NULL;
3049                 }
3050                 entry = NEXT_TX(entry);
3051                 i++;
3052         }
3053
3054         dev_kfree_skb(skb);
3055
3056         return 0;
3057 }
3058
3059 static void tg3_set_txd(struct tg3 *tp, int entry,
3060                         dma_addr_t mapping, int len, u32 flags,
3061                         u32 mss_and_is_end)
3062 {
3063         struct tg3_tx_buffer_desc *txd = &tp->tx_ring[entry];
3064         int is_end = (mss_and_is_end & 0x1);
3065         u32 mss = (mss_and_is_end >> 1);
3066         u32 vlan_tag = 0;
3067
3068         if (is_end)
3069                 flags |= TXD_FLAG_END;
3070         if (flags & TXD_FLAG_VLAN) {
3071                 vlan_tag = flags >> 16;
3072                 flags &= 0xffff;
3073         }
3074         vlan_tag |= (mss << TXD_MSS_SHIFT);
3075
3076         txd->addr_hi = ((u64) mapping >> 32);
3077         txd->addr_lo = ((u64) mapping & 0xffffffff);
3078         txd->len_flags = (len << TXD_LEN_SHIFT) | flags;
3079         txd->vlan_tag = vlan_tag << TXD_VLAN_TAG_SHIFT;
3080 }
3081
3082 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
3083 {
3084         u32 base = (u32) mapping & 0xffffffff;
3085
3086         return ((base > 0xffffdcc0) &&
3087                 (base + len + 8 < base));
3088 }
3089
3090 static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
3091 {
3092         struct tg3 *tp = netdev_priv(dev);
3093         dma_addr_t mapping;
3094         unsigned int i;
3095         u32 len, entry, base_flags, mss;
3096         int would_hit_hwbug;
3097         unsigned long flags;
3098
3099         len = skb_headlen(skb);
3100
3101         /* No BH disabling for tx_lock here.  We are running in BH disabled
3102          * context and TX reclaim runs via tp->poll inside of a software
3103          * interrupt.  Rejoice!
3104          *
3105          * Actually, things are not so simple.  If we are to take a hw
3106          * IRQ here, we can deadlock, consider:
3107          *
3108          *       CPU1           CPU2
3109          *   tg3_start_xmit
3110          *   take tp->tx_lock
3111          *                      tg3_timer
3112          *                      take tp->lock
3113          *   tg3_interrupt
3114          *   spin on tp->lock
3115          *                      spin on tp->tx_lock
3116          *
3117          * So we really do need to disable interrupts when taking
3118          * tx_lock here.
3119          */
3120         local_irq_save(flags);
3121         if (!spin_trylock(&tp->tx_lock)) { 
3122                 local_irq_restore(flags);
3123                 return NETDEV_TX_LOCKED; 
3124         } 
3125
3126         /* This is a hard error, log it. */
3127         if (unlikely(TX_BUFFS_AVAIL(tp) <= (skb_shinfo(skb)->nr_frags + 1))) {
3128                 netif_stop_queue(dev);
3129                 spin_unlock_irqrestore(&tp->tx_lock, flags);
3130                 printk(KERN_ERR PFX "%s: BUG! Tx Ring full when queue awake!\n",
3131                        dev->name);
3132                 return NETDEV_TX_BUSY;
3133         }
3134
3135         entry = tp->tx_prod;
3136         base_flags = 0;
3137         if (skb->ip_summed == CHECKSUM_HW)
3138                 base_flags |= TXD_FLAG_TCPUDP_CSUM;
3139 #if TG3_TSO_SUPPORT != 0
3140         mss = 0;
3141         if (skb->len > (tp->dev->mtu + ETH_HLEN) &&
3142             (mss = skb_shinfo(skb)->tso_size) != 0) {
3143                 int tcp_opt_len, ip_tcp_len;
3144
3145                 if (skb_header_cloned(skb) &&
3146                     pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
3147                         dev_kfree_skb(skb);
3148                         goto out_unlock;
3149                 }
3150
3151                 tcp_opt_len = ((skb->h.th->doff - 5) * 4);
3152                 ip_tcp_len = (skb->nh.iph->ihl * 4) + sizeof(struct tcphdr);
3153
3154                 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
3155                                TXD_FLAG_CPU_POST_DMA);
3156
3157                 skb->nh.iph->check = 0;
3158                 skb->nh.iph->tot_len = ntohs(mss + ip_tcp_len + tcp_opt_len);
3159                 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
3160                         skb->h.th->check = 0;
3161                         base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
3162                 }
3163                 else {
3164                         skb->h.th->check =
3165                                 ~csum_tcpudp_magic(skb->nh.iph->saddr,
3166                                                    skb->nh.iph->daddr,
3167                                                    0, IPPROTO_TCP, 0);
3168                 }
3169
3170                 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO) ||
3171                     (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)) {
3172                         if (tcp_opt_len || skb->nh.iph->ihl > 5) {
3173                                 int tsflags;
3174
3175                                 tsflags = ((skb->nh.iph->ihl - 5) +
3176                                            (tcp_opt_len >> 2));
3177                                 mss |= (tsflags << 11);
3178                         }
3179                 } else {
3180                         if (tcp_opt_len || skb->nh.iph->ihl > 5) {
3181                                 int tsflags;
3182
3183                                 tsflags = ((skb->nh.iph->ihl - 5) +
3184                                            (tcp_opt_len >> 2));
3185                                 base_flags |= tsflags << 12;
3186                         }
3187                 }
3188         }
3189 #else
3190         mss = 0;
3191 #endif
3192 #if TG3_VLAN_TAG_USED
3193         if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
3194                 base_flags |= (TXD_FLAG_VLAN |
3195                                (vlan_tx_tag_get(skb) << 16));
3196 #endif
3197
3198         /* Queue skb data, a.k.a. the main skb fragment. */
3199         mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
3200
3201         tp->tx_buffers[entry].skb = skb;
3202         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
3203
3204         would_hit_hwbug = 0;
3205
3206         if (tg3_4g_overflow_test(mapping, len))
3207                 would_hit_hwbug = entry + 1;
3208
3209         tg3_set_txd(tp, entry, mapping, len, base_flags,
3210                     (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
3211
3212         entry = NEXT_TX(entry);
3213
3214         /* Now loop through additional data fragments, and queue them. */
3215         if (skb_shinfo(skb)->nr_frags > 0) {
3216                 unsigned int i, last;
3217
3218                 last = skb_shinfo(skb)->nr_frags - 1;
3219                 for (i = 0; i <= last; i++) {
3220                         skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3221
3222                         len = frag->size;
3223                         mapping = pci_map_page(tp->pdev,
3224                                                frag->page,
3225                                                frag->page_offset,
3226                                                len, PCI_DMA_TODEVICE);
3227
3228                         tp->tx_buffers[entry].skb = NULL;
3229                         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
3230
3231                         if (tg3_4g_overflow_test(mapping, len)) {
3232                                 /* Only one should match. */
3233                                 if (would_hit_hwbug)
3234                                         BUG();
3235                                 would_hit_hwbug = entry + 1;
3236                         }
3237
3238                         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
3239                                 tg3_set_txd(tp, entry, mapping, len,
3240                                             base_flags, (i == last)|(mss << 1));
3241                         else
3242                                 tg3_set_txd(tp, entry, mapping, len,
3243                                             base_flags, (i == last));
3244
3245                         entry = NEXT_TX(entry);
3246                 }
3247         }
3248
3249         if (would_hit_hwbug) {
3250                 u32 last_plus_one = entry;
3251                 u32 start;
3252                 unsigned int len = 0;
3253
3254                 would_hit_hwbug -= 1;
3255                 entry = entry - 1 - skb_shinfo(skb)->nr_frags;
3256                 entry &= (TG3_TX_RING_SIZE - 1);
3257                 start = entry;
3258                 i = 0;
3259                 while (entry != last_plus_one) {
3260                         if (i == 0)
3261                                 len = skb_headlen(skb);
3262                         else
3263                                 len = skb_shinfo(skb)->frags[i-1].size;
3264
3265                         if (entry == would_hit_hwbug)
3266                                 break;
3267
3268                         i++;
3269                         entry = NEXT_TX(entry);
3270
3271                 }
3272
3273                 /* If the workaround fails due to memory/mapping
3274                  * failure, silently drop this packet.
3275                  */
3276                 if (tigon3_4gb_hwbug_workaround(tp, skb,
3277                                                 entry, len,
3278                                                 last_plus_one,
3279                                                 &start, mss))
3280                         goto out_unlock;
3281
3282                 entry = start;
3283         }
3284
3285         /* Packets are ready, update Tx producer idx local and on card. */
3286         tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry);
3287
3288         tp->tx_prod = entry;
3289         if (TX_BUFFS_AVAIL(tp) <= (MAX_SKB_FRAGS + 1))
3290                 netif_stop_queue(dev);
3291
3292 out_unlock:
3293         mmiowb();
3294         spin_unlock_irqrestore(&tp->tx_lock, flags);
3295
3296         dev->trans_start = jiffies;
3297
3298         return NETDEV_TX_OK;
3299 }
3300
3301 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
3302                                int new_mtu)
3303 {
3304         dev->mtu = new_mtu;
3305
3306         if (new_mtu > ETH_DATA_LEN)
3307                 tp->tg3_flags |= TG3_FLAG_JUMBO_ENABLE;
3308         else
3309                 tp->tg3_flags &= ~TG3_FLAG_JUMBO_ENABLE;
3310 }
3311
3312 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
3313 {
3314         struct tg3 *tp = netdev_priv(dev);
3315
3316         if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
3317                 return -EINVAL;
3318
3319         if (!netif_running(dev)) {
3320                 /* We'll just catch it later when the
3321                  * device is up'd.
3322                  */
3323                 tg3_set_mtu(dev, tp, new_mtu);
3324                 return 0;
3325         }
3326
3327         tg3_netif_stop(tp);
3328         spin_lock_irq(&tp->lock);
3329         spin_lock(&tp->tx_lock);
3330
3331         tg3_halt(tp);
3332
3333         tg3_set_mtu(dev, tp, new_mtu);
3334
3335         tg3_init_hw(tp);
3336
3337         tg3_netif_start(tp);
3338
3339         spin_unlock(&tp->tx_lock);
3340         spin_unlock_irq(&tp->lock);
3341
3342         return 0;
3343 }
3344
3345 /* Free up pending packets in all rx/tx rings.
3346  *
3347  * The chip has been shut down and the driver detached from
3348  * the networking, so no interrupts or new tx packets will
3349  * end up in the driver.  tp->{tx,}lock is not held and we are not
3350  * in an interrupt context and thus may sleep.
3351  */
3352 static void tg3_free_rings(struct tg3 *tp)
3353 {
3354         struct ring_info *rxp;
3355         int i;
3356
3357         for (i = 0; i < TG3_RX_RING_SIZE; i++) {
3358                 rxp = &tp->rx_std_buffers[i];
3359
3360                 if (rxp->skb == NULL)
3361                         continue;
3362                 pci_unmap_single(tp->pdev,
3363                                  pci_unmap_addr(rxp, mapping),
3364                                  RX_PKT_BUF_SZ - tp->rx_offset,
3365                                  PCI_DMA_FROMDEVICE);
3366                 dev_kfree_skb_any(rxp->skb);
3367                 rxp->skb = NULL;
3368         }
3369
3370         for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
3371                 rxp = &tp->rx_jumbo_buffers[i];
3372
3373                 if (rxp->skb == NULL)
3374                         continue;
3375                 pci_unmap_single(tp->pdev,
3376                                  pci_unmap_addr(rxp, mapping),
3377                                  RX_JUMBO_PKT_BUF_SZ - tp->rx_offset,
3378                                  PCI_DMA_FROMDEVICE);
3379                 dev_kfree_skb_any(rxp->skb);
3380                 rxp->skb = NULL;
3381         }
3382
3383         for (i = 0; i < TG3_TX_RING_SIZE; ) {
3384                 struct tx_ring_info *txp;
3385                 struct sk_buff *skb;
3386                 int j;
3387
3388                 txp = &tp->tx_buffers[i];
3389                 skb = txp->skb;
3390
3391                 if (skb == NULL) {
3392                         i++;
3393                         continue;
3394                 }
3395
3396                 pci_unmap_single(tp->pdev,
3397                                  pci_unmap_addr(txp, mapping),
3398                                  skb_headlen(skb),
3399                                  PCI_DMA_TODEVICE);
3400                 txp->skb = NULL;
3401
3402                 i++;
3403
3404                 for (j = 0; j < skb_shinfo(skb)->nr_frags; j++) {
3405                         txp = &tp->tx_buffers[i & (TG3_TX_RING_SIZE - 1)];
3406                         pci_unmap_page(tp->pdev,
3407                                        pci_unmap_addr(txp, mapping),
3408                                        skb_shinfo(skb)->frags[j].size,
3409                                        PCI_DMA_TODEVICE);
3410                         i++;
3411                 }
3412
3413                 dev_kfree_skb_any(skb);
3414         }
3415 }
3416
3417 /* Initialize tx/rx rings for packet processing.
3418  *
3419  * The chip has been shut down and the driver detached from
3420  * the networking, so no interrupts or new tx packets will
3421  * end up in the driver.  tp->{tx,}lock are held and thus
3422  * we may not sleep.
3423  */
3424 static void tg3_init_rings(struct tg3 *tp)
3425 {
3426         u32 i;
3427
3428         /* Free up all the SKBs. */
3429         tg3_free_rings(tp);
3430
3431         /* Zero out all descriptors. */
3432         memset(tp->rx_std, 0, TG3_RX_RING_BYTES);
3433         memset(tp->rx_jumbo, 0, TG3_RX_JUMBO_RING_BYTES);
3434         memset(tp->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
3435         memset(tp->tx_ring, 0, TG3_TX_RING_BYTES);
3436
3437         /* Initialize invariants of the rings, we only set this
3438          * stuff once.  This works because the card does not
3439          * write into the rx buffer posting rings.
3440          */
3441         for (i = 0; i < TG3_RX_RING_SIZE; i++) {
3442                 struct tg3_rx_buffer_desc *rxd;
3443
3444                 rxd = &tp->rx_std[i];
3445                 rxd->idx_len = (RX_PKT_BUF_SZ - tp->rx_offset - 64)
3446                         << RXD_LEN_SHIFT;
3447                 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
3448                 rxd->opaque = (RXD_OPAQUE_RING_STD |
3449                                (i << RXD_OPAQUE_INDEX_SHIFT));
3450         }
3451
3452         if (tp->tg3_flags & TG3_FLAG_JUMBO_ENABLE) {
3453                 for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
3454                         struct tg3_rx_buffer_desc *rxd;
3455
3456                         rxd = &tp->rx_jumbo[i];
3457                         rxd->idx_len = (RX_JUMBO_PKT_BUF_SZ - tp->rx_offset - 64)
3458                                 << RXD_LEN_SHIFT;
3459                         rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
3460                                 RXD_FLAG_JUMBO;
3461                         rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
3462                                (i << RXD_OPAQUE_INDEX_SHIFT));
3463                 }
3464         }
3465
3466         /* Now allocate fresh SKBs for each rx ring. */
3467         for (i = 0; i < tp->rx_pending; i++) {
3468                 if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_STD,
3469                                      -1, i) < 0)
3470                         break;
3471         }
3472
3473         if (tp->tg3_flags & TG3_FLAG_JUMBO_ENABLE) {
3474                 for (i = 0; i < tp->rx_jumbo_pending; i++) {
3475                         if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_JUMBO,
3476                                              -1, i) < 0)
3477                                 break;
3478                 }
3479         }
3480 }
3481
3482 /*
3483  * Must not be invoked with interrupt sources disabled and
3484  * the hardware shutdown down.
3485  */
3486 static void tg3_free_consistent(struct tg3 *tp)
3487 {
3488         if (tp->rx_std_buffers) {
3489                 kfree(tp->rx_std_buffers);
3490                 tp->rx_std_buffers = NULL;
3491         }
3492         if (tp->rx_std) {
3493                 pci_free_consistent(tp->pdev, TG3_RX_RING_BYTES,
3494                                     tp->rx_std, tp->rx_std_mapping);
3495                 tp->rx_std = NULL;
3496         }
3497         if (tp->rx_jumbo) {
3498                 pci_free_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
3499                                     tp->rx_jumbo, tp->rx_jumbo_mapping);
3500                 tp->rx_jumbo = NULL;
3501         }
3502         if (tp->rx_rcb) {
3503                 pci_free_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
3504                                     tp->rx_rcb, tp->rx_rcb_mapping);
3505                 tp->rx_rcb = NULL;
3506         }
3507         if (tp->tx_ring) {
3508                 pci_free_consistent(tp->pdev, TG3_TX_RING_BYTES,
3509                         tp->tx_ring, tp->tx_desc_mapping);
3510                 tp->tx_ring = NULL;
3511         }
3512         if (tp->hw_status) {
3513                 pci_free_consistent(tp->pdev, TG3_HW_STATUS_SIZE,
3514                                     tp->hw_status, tp->status_mapping);
3515                 tp->hw_status = NULL;
3516         }
3517         if (tp->hw_stats) {
3518                 pci_free_consistent(tp->pdev, sizeof(struct tg3_hw_stats),
3519                                     tp->hw_stats, tp->stats_mapping);
3520                 tp->hw_stats = NULL;
3521         }
3522 }
3523
3524 /*
3525  * Must not be invoked with interrupt sources disabled and
3526  * the hardware shutdown down.  Can sleep.
3527  */
3528 static int tg3_alloc_consistent(struct tg3 *tp)
3529 {
3530         tp->rx_std_buffers = kmalloc((sizeof(struct ring_info) *
3531                                       (TG3_RX_RING_SIZE +
3532                                        TG3_RX_JUMBO_RING_SIZE)) +
3533                                      (sizeof(struct tx_ring_info) *
3534                                       TG3_TX_RING_SIZE),
3535                                      GFP_KERNEL);
3536         if (!tp->rx_std_buffers)
3537                 return -ENOMEM;
3538
3539         memset(tp->rx_std_buffers, 0,
3540                (sizeof(struct ring_info) *
3541                 (TG3_RX_RING_SIZE +
3542                  TG3_RX_JUMBO_RING_SIZE)) +
3543                (sizeof(struct tx_ring_info) *
3544                 TG3_TX_RING_SIZE));
3545
3546         tp->rx_jumbo_buffers = &tp->rx_std_buffers[TG3_RX_RING_SIZE];
3547         tp->tx_buffers = (struct tx_ring_info *)
3548                 &tp->rx_jumbo_buffers[TG3_RX_JUMBO_RING_SIZE];
3549
3550         tp->rx_std = pci_alloc_consistent(tp->pdev, TG3_RX_RING_BYTES,
3551                                           &tp->rx_std_mapping);
3552         if (!tp->rx_std)
3553                 goto err_out;
3554
3555         tp->rx_jumbo = pci_alloc_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
3556                                             &tp->rx_jumbo_mapping);
3557
3558         if (!tp->rx_jumbo)
3559                 goto err_out;
3560
3561         tp->rx_rcb = pci_alloc_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
3562                                           &tp->rx_rcb_mapping);
3563         if (!tp->rx_rcb)
3564                 goto err_out;
3565
3566         tp->tx_ring = pci_alloc_consistent(tp->pdev, TG3_TX_RING_BYTES,
3567                                            &tp->tx_desc_mapping);
3568         if (!tp->tx_ring)
3569                 goto err_out;
3570
3571         tp->hw_status = pci_alloc_consistent(tp->pdev,
3572                                              TG3_HW_STATUS_SIZE,
3573                                              &tp->status_mapping);
3574         if (!tp->hw_status)
3575                 goto err_out;
3576
3577         tp->hw_stats = pci_alloc_consistent(tp->pdev,
3578                                             sizeof(struct tg3_hw_stats),
3579                                             &tp->stats_mapping);
3580         if (!tp->hw_stats)
3581                 goto err_out;
3582
3583         memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
3584         memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
3585
3586         return 0;
3587
3588 err_out:
3589         tg3_free_consistent(tp);
3590         return -ENOMEM;
3591 }
3592
3593 #define MAX_WAIT_CNT 1000
3594
3595 /* To stop a block, clear the enable bit and poll till it
3596  * clears.  tp->lock is held.
3597  */
3598 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit)
3599 {
3600         unsigned int i;
3601         u32 val;
3602
3603         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
3604                 switch (ofs) {
3605                 case RCVLSC_MODE:
3606                 case DMAC_MODE:
3607                 case MBFREE_MODE:
3608                 case BUFMGR_MODE:
3609                 case MEMARB_MODE:
3610                         /* We can't enable/disable these bits of the
3611                          * 5705/5750, just say success.
3612                          */
3613                         return 0;
3614
3615                 default:
3616                         break;
3617                 };
3618         }
3619
3620         val = tr32(ofs);
3621         val &= ~enable_bit;
3622         tw32_f(ofs, val);
3623
3624         for (i = 0; i < MAX_WAIT_CNT; i++) {
3625                 udelay(100);
3626                 val = tr32(ofs);
3627                 if ((val & enable_bit) == 0)
3628                         break;
3629         }
3630
3631         if (i == MAX_WAIT_CNT) {
3632                 printk(KERN_ERR PFX "tg3_stop_block timed out, "
3633                        "ofs=%lx enable_bit=%x\n",
3634                        ofs, enable_bit);
3635                 return -ENODEV;
3636         }
3637
3638         return 0;
3639 }
3640
3641 /* tp->lock is held. */
3642 static int tg3_abort_hw(struct tg3 *tp)
3643 {
3644         int i, err;
3645
3646         tg3_disable_ints(tp);
3647
3648         tp->rx_mode &= ~RX_MODE_ENABLE;
3649         tw32_f(MAC_RX_MODE, tp->rx_mode);
3650         udelay(10);
3651
3652         err  = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE);
3653         err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE);
3654         err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE);
3655         err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE);
3656         err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE);
3657         err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE);
3658
3659         err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE);
3660         err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE);
3661         err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
3662         err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE);
3663         err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
3664         err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE);
3665         err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE);
3666         if (err)
3667                 goto out;
3668
3669         tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
3670         tw32_f(MAC_MODE, tp->mac_mode);
3671         udelay(40);
3672
3673         tp->tx_mode &= ~TX_MODE_ENABLE;
3674         tw32_f(MAC_TX_MODE, tp->tx_mode);
3675
3676         for (i = 0; i < MAX_WAIT_CNT; i++) {
3677                 udelay(100);
3678                 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
3679                         break;
3680         }
3681         if (i >= MAX_WAIT_CNT) {
3682                 printk(KERN_ERR PFX "tg3_abort_hw timed out for %s, "
3683                        "TX_MODE_ENABLE will not clear MAC_TX_MODE=%08x\n",
3684                        tp->dev->name, tr32(MAC_TX_MODE));
3685                 return -ENODEV;
3686         }
3687
3688         err  = tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE);
3689         err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE);
3690         err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE);
3691
3692         tw32(FTQ_RESET, 0xffffffff);
3693         tw32(FTQ_RESET, 0x00000000);
3694
3695         err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE);
3696         err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE);
3697         if (err)
3698                 goto out;
3699
3700         if (tp->hw_status)
3701                 memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
3702         if (tp->hw_stats)
3703                 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
3704
3705 out:
3706         return err;
3707 }
3708
3709 /* tp->lock is held. */
3710 static int tg3_nvram_lock(struct tg3 *tp)
3711 {
3712         if (tp->tg3_flags & TG3_FLAG_NVRAM) {
3713                 int i;
3714
3715                 tw32(NVRAM_SWARB, SWARB_REQ_SET1);
3716                 for (i = 0; i < 8000; i++) {
3717                         if (tr32(NVRAM_SWARB) & SWARB_GNT1)
3718                                 break;
3719                         udelay(20);
3720                 }
3721                 if (i == 8000)
3722                         return -ENODEV;
3723         }
3724         return 0;
3725 }
3726
3727 /* tp->lock is held. */
3728 static void tg3_nvram_unlock(struct tg3 *tp)
3729 {
3730         if (tp->tg3_flags & TG3_FLAG_NVRAM)
3731                 tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
3732 }
3733
3734 /* tp->lock is held. */
3735 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
3736 {
3737         if (!(tp->tg3_flags2 & TG3_FLG2_SUN_570X))
3738                 tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
3739                               NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
3740
3741         if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
3742                 switch (kind) {
3743                 case RESET_KIND_INIT:
3744                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
3745                                       DRV_STATE_START);
3746                         break;
3747
3748                 case RESET_KIND_SHUTDOWN:
3749                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
3750                                       DRV_STATE_UNLOAD);
3751                         break;
3752
3753                 case RESET_KIND_SUSPEND:
3754                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
3755                                       DRV_STATE_SUSPEND);
3756                         break;
3757
3758                 default:
3759                         break;
3760                 };
3761         }
3762 }
3763
3764 /* tp->lock is held. */
3765 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
3766 {
3767         if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
3768                 switch (kind) {
3769                 case RESET_KIND_INIT:
3770                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
3771                                       DRV_STATE_START_DONE);
3772                         break;
3773
3774                 case RESET_KIND_SHUTDOWN:
3775                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
3776                                       DRV_STATE_UNLOAD_DONE);
3777                         break;
3778
3779                 default:
3780                         break;
3781                 };
3782         }
3783 }
3784
3785 /* tp->lock is held. */
3786 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
3787 {
3788         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
3789                 switch (kind) {
3790                 case RESET_KIND_INIT:
3791                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
3792                                       DRV_STATE_START);
3793                         break;
3794
3795                 case RESET_KIND_SHUTDOWN:
3796                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
3797                                       DRV_STATE_UNLOAD);
3798                         break;
3799
3800                 case RESET_KIND_SUSPEND:
3801                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
3802                                       DRV_STATE_SUSPEND);
3803                         break;
3804
3805                 default:
3806                         break;
3807                 };
3808         }
3809 }
3810
3811 static void tg3_stop_fw(struct tg3 *);
3812
3813 /* tp->lock is held. */
3814 static int tg3_chip_reset(struct tg3 *tp)
3815 {
3816         u32 val;
3817         u32 flags_save;
3818         int i;
3819
3820         if (!(tp->tg3_flags2 & TG3_FLG2_SUN_570X))
3821                 tg3_nvram_lock(tp);
3822
3823         /*
3824          * We must avoid the readl() that normally takes place.
3825          * It locks machines, causes machine checks, and other
3826          * fun things.  So, temporarily disable the 5701
3827          * hardware workaround, while we do the reset.
3828          */
3829         flags_save = tp->tg3_flags;
3830         tp->tg3_flags &= ~TG3_FLAG_5701_REG_WRITE_BUG;
3831
3832         /* do the reset */
3833         val = GRC_MISC_CFG_CORECLK_RESET;
3834
3835         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
3836                 if (tr32(0x7e2c) == 0x60) {
3837                         tw32(0x7e2c, 0x20);
3838                 }
3839                 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
3840                         tw32(GRC_MISC_CFG, (1 << 29));
3841                         val |= (1 << 29);
3842                 }
3843         }
3844
3845         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
3846                 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
3847         tw32(GRC_MISC_CFG, val);
3848
3849         /* restore 5701 hardware bug workaround flag */
3850         tp->tg3_flags = flags_save;
3851
3852         /* Unfortunately, we have to delay before the PCI read back.
3853          * Some 575X chips even will not respond to a PCI cfg access
3854          * when the reset command is given to the chip.
3855          *
3856          * How do these hardware designers expect things to work
3857          * properly if the PCI write is posted for a long period
3858          * of time?  It is always necessary to have some method by
3859          * which a register read back can occur to push the write
3860          * out which does the reset.
3861          *
3862          * For most tg3 variants the trick below was working.
3863          * Ho hum...
3864          */
3865         udelay(120);
3866
3867         /* Flush PCI posted writes.  The normal MMIO registers
3868          * are inaccessible at this time so this is the only
3869          * way to make this reliably (actually, this is no longer
3870          * the case, see above).  I tried to use indirect
3871          * register read/write but this upset some 5701 variants.
3872          */
3873         pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
3874
3875         udelay(120);
3876
3877         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
3878                 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) {
3879                         int i;
3880                         u32 cfg_val;
3881
3882                         /* Wait for link training to complete.  */
3883                         for (i = 0; i < 5000; i++)
3884                                 udelay(100);
3885
3886                         pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
3887                         pci_write_config_dword(tp->pdev, 0xc4,
3888                                                cfg_val | (1 << 15));
3889                 }
3890                 /* Set PCIE max payload size and clear error status.  */
3891                 pci_write_config_dword(tp->pdev, 0xd8, 0xf5000);
3892         }
3893
3894         /* Re-enable indirect register accesses. */
3895         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
3896                                tp->misc_host_ctrl);
3897
3898         /* Set MAX PCI retry to zero. */
3899         val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
3900         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
3901             (tp->tg3_flags & TG3_FLAG_PCIX_MODE))
3902                 val |= PCISTATE_RETRY_SAME_DMA;
3903         pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
3904
3905         pci_restore_state(tp->pdev);
3906
3907         /* Make sure PCI-X relaxed ordering bit is clear. */
3908         pci_read_config_dword(tp->pdev, TG3PCI_X_CAPS, &val);
3909         val &= ~PCIX_CAPS_RELAXED_ORDERING;
3910         pci_write_config_dword(tp->pdev, TG3PCI_X_CAPS, val);
3911
3912         tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
3913
3914         if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A3) {
3915                 tg3_stop_fw(tp);
3916                 tw32(0x5000, 0x400);
3917         }
3918
3919         tw32(GRC_MODE, tp->grc_mode);
3920
3921         if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0) {
3922                 u32 val = tr32(0xc4);
3923
3924                 tw32(0xc4, val | (1 << 15));
3925         }
3926
3927         if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
3928             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
3929                 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
3930                 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0)
3931                         tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
3932                 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
3933         }
3934
3935         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
3936                 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
3937                 tw32_f(MAC_MODE, tp->mac_mode);
3938         } else
3939                 tw32_f(MAC_MODE, 0);
3940         udelay(40);
3941
3942         if (!(tp->tg3_flags2 & TG3_FLG2_SUN_570X)) {
3943                 /* Wait for firmware initialization to complete. */
3944                 for (i = 0; i < 100000; i++) {
3945                         tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
3946                         if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
3947                                 break;
3948                         udelay(10);
3949                 }
3950                 if (i >= 100000) {
3951                         printk(KERN_ERR PFX "tg3_reset_hw timed out for %s, "
3952                                "firmware will not restart magic=%08x\n",
3953                                tp->dev->name, val);
3954                         return -ENODEV;
3955                 }
3956         }
3957
3958         if ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
3959             tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
3960                 u32 val = tr32(0x7c00);
3961
3962                 tw32(0x7c00, val | (1 << 25));
3963         }
3964
3965         /* Reprobe ASF enable state.  */
3966         tp->tg3_flags &= ~TG3_FLAG_ENABLE_ASF;
3967         tp->tg3_flags2 &= ~TG3_FLG2_ASF_NEW_HANDSHAKE;
3968         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
3969         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
3970                 u32 nic_cfg;
3971
3972                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
3973                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
3974                         tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
3975                         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
3976                                 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
3977                 }
3978         }
3979
3980         return 0;
3981 }
3982
3983 /* tp->lock is held. */
3984 static void tg3_stop_fw(struct tg3 *tp)
3985 {
3986         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
3987                 u32 val;
3988                 int i;
3989
3990                 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
3991                 val = tr32(GRC_RX_CPU_EVENT);
3992                 val |= (1 << 14);
3993                 tw32(GRC_RX_CPU_EVENT, val);
3994
3995                 /* Wait for RX cpu to ACK the event.  */
3996                 for (i = 0; i < 100; i++) {
3997                         if (!(tr32(GRC_RX_CPU_EVENT) & (1 << 14)))
3998                                 break;
3999                         udelay(1);
4000                 }
4001         }
4002 }
4003
4004 /* tp->lock is held. */
4005 static int tg3_halt(struct tg3 *tp)
4006 {
4007         int err;
4008
4009         tg3_stop_fw(tp);
4010
4011         tg3_write_sig_pre_reset(tp, RESET_KIND_SHUTDOWN);
4012
4013         tg3_abort_hw(tp);
4014         err = tg3_chip_reset(tp);
4015
4016         tg3_write_sig_legacy(tp, RESET_KIND_SHUTDOWN);
4017         tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
4018
4019         if (err)
4020                 return err;
4021
4022         return 0;
4023 }
4024
4025 #define TG3_FW_RELEASE_MAJOR    0x0
4026 #define TG3_FW_RELASE_MINOR     0x0
4027 #define TG3_FW_RELEASE_FIX      0x0
4028 #define TG3_FW_START_ADDR       0x08000000
4029 #define TG3_FW_TEXT_ADDR        0x08000000
4030 #define TG3_FW_TEXT_LEN         0x9c0
4031 #define TG3_FW_RODATA_ADDR      0x080009c0
4032 #define TG3_FW_RODATA_LEN       0x60
4033 #define TG3_FW_DATA_ADDR        0x08000a40
4034 #define TG3_FW_DATA_LEN         0x20
4035 #define TG3_FW_SBSS_ADDR        0x08000a60
4036 #define TG3_FW_SBSS_LEN         0xc
4037 #define TG3_FW_BSS_ADDR         0x08000a70
4038 #define TG3_FW_BSS_LEN          0x10
4039
4040 static u32 tg3FwText[(TG3_FW_TEXT_LEN / sizeof(u32)) + 1] = {
4041         0x00000000, 0x10000003, 0x00000000, 0x0000000d, 0x0000000d, 0x3c1d0800,
4042         0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100000, 0x0e000018, 0x00000000,
4043         0x0000000d, 0x3c1d0800, 0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100034,
4044         0x0e00021c, 0x00000000, 0x0000000d, 0x00000000, 0x00000000, 0x00000000,
4045         0x27bdffe0, 0x3c1cc000, 0xafbf0018, 0xaf80680c, 0x0e00004c, 0x241b2105,
4046         0x97850000, 0x97870002, 0x9782002c, 0x9783002e, 0x3c040800, 0x248409c0,
4047         0xafa00014, 0x00021400, 0x00621825, 0x00052c00, 0xafa30010, 0x8f860010,
4048         0x00e52825, 0x0e000060, 0x24070102, 0x3c02ac00, 0x34420100, 0x3c03ac01,
4049         0x34630100, 0xaf820490, 0x3c02ffff, 0xaf820494, 0xaf830498, 0xaf82049c,
4050         0x24020001, 0xaf825ce0, 0x0e00003f, 0xaf825d00, 0x0e000140, 0x00000000,
4051         0x8fbf0018, 0x03e00008, 0x27bd0020, 0x2402ffff, 0xaf825404, 0x8f835400,
4052         0x34630400, 0xaf835400, 0xaf825404, 0x3c020800, 0x24420034, 0xaf82541c,
4053         0x03e00008, 0xaf805400, 0x00000000, 0x00000000, 0x3c020800, 0x34423000,
4054         0x3c030800, 0x34633000, 0x3c040800, 0x348437ff, 0x3c010800, 0xac220a64,
4055         0x24020040, 0x3c010800, 0xac220a68, 0x3c010800, 0xac200a60, 0xac600000,
4056         0x24630004, 0x0083102b, 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000,
4057         0x00804821, 0x8faa0010, 0x3c020800, 0x8c420a60, 0x3c040800, 0x8c840a68,
4058         0x8fab0014, 0x24430001, 0x0044102b, 0x3c010800, 0xac230a60, 0x14400003,
4059         0x00004021, 0x3c010800, 0xac200a60, 0x3c020800, 0x8c420a60, 0x3c030800,
4060         0x8c630a64, 0x91240000, 0x00021140, 0x00431021, 0x00481021, 0x25080001,
4061         0xa0440000, 0x29020008, 0x1440fff4, 0x25290001, 0x3c020800, 0x8c420a60,
4062         0x3c030800, 0x8c630a64, 0x8f84680c, 0x00021140, 0x00431021, 0xac440008,
4063         0xac45000c, 0xac460010, 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c,
4064         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4065         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4066         0, 0, 0, 0, 0, 0,
4067         0x02000008, 0x00000000, 0x0a0001e3, 0x3c0a0001, 0x0a0001e3, 0x3c0a0002,
4068         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4069         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4070         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4071         0x0a0001e3, 0x3c0a0007, 0x0a0001e3, 0x3c0a0008, 0x0a0001e3, 0x3c0a0009,
4072         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a000b,
4073         0x0a0001e3, 0x3c0a000c, 0x0a0001e3, 0x3c0a000d, 0x0a0001e3, 0x00000000,
4074         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a000e, 0x0a0001e3, 0x00000000,
4075         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4076         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4077         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a0013, 0x0a0001e3, 0x3c0a0014,
4078         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4079         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4080         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4081         0x27bdffe0, 0x00001821, 0x00001021, 0xafbf0018, 0xafb10014, 0xafb00010,
4082         0x3c010800, 0x00220821, 0xac200a70, 0x3c010800, 0x00220821, 0xac200a74,
4083         0x3c010800, 0x00220821, 0xac200a78, 0x24630001, 0x1860fff5, 0x2442000c,
4084         0x24110001, 0x8f906810, 0x32020004, 0x14400005, 0x24040001, 0x3c020800,
4085         0x8c420a78, 0x18400003, 0x00002021, 0x0e000182, 0x00000000, 0x32020001,
4086         0x10400003, 0x00000000, 0x0e000169, 0x00000000, 0x0a000153, 0xaf915028,
4087         0x8fbf0018, 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0020, 0x3c050800,
4088         0x8ca50a70, 0x3c060800, 0x8cc60a80, 0x3c070800, 0x8ce70a78, 0x27bdffe0,
4089         0x3c040800, 0x248409d0, 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014,
4090         0x0e00017b, 0x00002021, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x24020001,
4091         0x8f836810, 0x00821004, 0x00021027, 0x00621824, 0x03e00008, 0xaf836810,
4092         0x27bdffd8, 0xafbf0024, 0x1080002e, 0xafb00020, 0x8f825cec, 0xafa20018,
4093         0x8f825cec, 0x3c100800, 0x26100a78, 0xafa2001c, 0x34028000, 0xaf825cec,
4094         0x8e020000, 0x18400016, 0x00000000, 0x3c020800, 0x94420a74, 0x8fa3001c,
4095         0x000221c0, 0xac830004, 0x8fa2001c, 0x3c010800, 0x0e000201, 0xac220a74,
4096         0x10400005, 0x00000000, 0x8e020000, 0x24420001, 0x0a0001df, 0xae020000,
4097         0x3c020800, 0x8c420a70, 0x00021c02, 0x000321c0, 0x0a0001c5, 0xafa2001c,
4098         0x0e000201, 0x00000000, 0x1040001f, 0x00000000, 0x8e020000, 0x8fa3001c,
4099         0x24420001, 0x3c010800, 0xac230a70, 0x3c010800, 0xac230a74, 0x0a0001df,
4100         0xae020000, 0x3c100800, 0x26100a78, 0x8e020000, 0x18400028, 0x00000000,
4101         0x0e000201, 0x00000000, 0x14400024, 0x00000000, 0x8e020000, 0x3c030800,
4102         0x8c630a70, 0x2442ffff, 0xafa3001c, 0x18400006, 0xae020000, 0x00031402,
4103         0x000221c0, 0x8c820004, 0x3c010800, 0xac220a70, 0x97a2001e, 0x2442ff00,
4104         0x2c420300, 0x1440000b, 0x24024000, 0x3c040800, 0x248409dc, 0xafa00010,
4105         0xafa00014, 0x8fa6001c, 0x24050008, 0x0e000060, 0x00003821, 0x0a0001df,
4106         0x00000000, 0xaf825cf8, 0x3c020800, 0x8c420a40, 0x8fa3001c, 0x24420001,
4107         0xaf835cf8, 0x3c010800, 0xac220a40, 0x8fbf0024, 0x8fb00020, 0x03e00008,
4108         0x27bd0028, 0x27bdffe0, 0x3c040800, 0x248409e8, 0x00002821, 0x00003021,
4109         0x00003821, 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014, 0x8fbf0018,
4110         0x03e00008, 0x27bd0020, 0x8f82680c, 0x8f85680c, 0x00021827, 0x0003182b,
4111         0x00031823, 0x00431024, 0x00441021, 0x00a2282b, 0x10a00006, 0x00000000,
4112         0x00401821, 0x8f82680c, 0x0043102b, 0x1440fffd, 0x00000000, 0x03e00008,
4113         0x00000000, 0x3c040800, 0x8c840000, 0x3c030800, 0x8c630a40, 0x0064102b,
4114         0x54400002, 0x00831023, 0x00641023, 0x2c420008, 0x03e00008, 0x38420001,
4115         0x27bdffe0, 0x00802821, 0x3c040800, 0x24840a00, 0x00003021, 0x00003821,
4116         0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014, 0x0a000216, 0x00000000,
4117         0x8fbf0018, 0x03e00008, 0x27bd0020, 0x00000000, 0x27bdffe0, 0x3c1cc000,
4118         0xafbf0018, 0x0e00004c, 0xaf80680c, 0x3c040800, 0x24840a10, 0x03802821,
4119         0x00003021, 0x00003821, 0xafa00010, 0x0e000060, 0xafa00014, 0x2402ffff,
4120         0xaf825404, 0x3c0200aa, 0x0e000234, 0xaf825434, 0x8fbf0018, 0x03e00008,
4121         0x27bd0020, 0x00000000, 0x00000000, 0x00000000, 0x27bdffe8, 0xafb00010,
4122         0x24100001, 0xafbf0014, 0x3c01c003, 0xac200000, 0x8f826810, 0x30422000,
4123         0x10400003, 0x00000000, 0x0e000246, 0x00000000, 0x0a00023a, 0xaf905428,
4124         0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x27bdfff8, 0x8f845d0c,
4125         0x3c0200ff, 0x3c030800, 0x8c630a50, 0x3442fff8, 0x00821024, 0x1043001e,
4126         0x3c0500ff, 0x34a5fff8, 0x3c06c003, 0x3c074000, 0x00851824, 0x8c620010,
4127         0x3c010800, 0xac230a50, 0x30420008, 0x10400005, 0x00871025, 0x8cc20000,
4128         0x24420001, 0xacc20000, 0x00871025, 0xaf825d0c, 0x8fa20000, 0x24420001,
4129         0xafa20000, 0x8fa20000, 0x8fa20000, 0x24420001, 0xafa20000, 0x8fa20000,
4130         0x8f845d0c, 0x3c030800, 0x8c630a50, 0x00851024, 0x1443ffe8, 0x00851824,
4131         0x27bd0008, 0x03e00008, 0x00000000, 0x00000000, 0x00000000
4132 };
4133
4134 static u32 tg3FwRodata[(TG3_FW_RODATA_LEN / sizeof(u32)) + 1] = {
4135         0x35373031, 0x726c7341, 0x00000000, 0x00000000, 0x53774576, 0x656e7430,
4136         0x00000000, 0x726c7045, 0x76656e74, 0x31000000, 0x556e6b6e, 0x45766e74,
4137         0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x66617461, 0x6c457272,
4138         0x00000000, 0x00000000, 0x4d61696e, 0x43707542, 0x00000000, 0x00000000,
4139         0x00000000
4140 };
4141
4142 #if 0 /* All zeros, don't eat up space with it. */
4143 u32 tg3FwData[(TG3_FW_DATA_LEN / sizeof(u32)) + 1] = {
4144         0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
4145         0x00000000, 0x00000000, 0x00000000, 0x00000000
4146 };
4147 #endif
4148
4149 #define RX_CPU_SCRATCH_BASE     0x30000
4150 #define RX_CPU_SCRATCH_SIZE     0x04000
4151 #define TX_CPU_SCRATCH_BASE     0x34000
4152 #define TX_CPU_SCRATCH_SIZE     0x04000
4153
4154 /* tp->lock is held. */
4155 static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
4156 {
4157         int i;
4158
4159         if (offset == TX_CPU_BASE &&
4160             (tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
4161                 BUG();
4162
4163         if (offset == RX_CPU_BASE) {
4164                 for (i = 0; i < 10000; i++) {
4165                         tw32(offset + CPU_STATE, 0xffffffff);
4166                         tw32(offset + CPU_MODE,  CPU_MODE_HALT);
4167                         if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
4168                                 break;
4169                 }
4170
4171                 tw32(offset + CPU_STATE, 0xffffffff);
4172                 tw32_f(offset + CPU_MODE,  CPU_MODE_HALT);
4173                 udelay(10);
4174         } else {
4175                 for (i = 0; i < 10000; i++) {
4176                         tw32(offset + CPU_STATE, 0xffffffff);
4177                         tw32(offset + CPU_MODE,  CPU_MODE_HALT);
4178                         if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
4179                                 break;
4180                 }
4181         }
4182
4183         if (i >= 10000) {
4184                 printk(KERN_ERR PFX "tg3_reset_cpu timed out for %s, "
4185                        "and %s CPU\n",
4186                        tp->dev->name,
4187                        (offset == RX_CPU_BASE ? "RX" : "TX"));
4188                 return -ENODEV;
4189         }
4190         return 0;
4191 }
4192
4193 struct fw_info {
4194         unsigned int text_base;
4195         unsigned int text_len;
4196         u32 *text_data;
4197         unsigned int rodata_base;
4198         unsigned int rodata_len;
4199         u32 *rodata_data;
4200         unsigned int data_base;
4201         unsigned int data_len;
4202         u32 *data_data;
4203 };
4204
4205 /* tp->lock is held. */
4206 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base, u32 cpu_scratch_base,
4207                                  int cpu_scratch_size, struct fw_info *info)
4208 {
4209         int err, i;
4210         u32 orig_tg3_flags = tp->tg3_flags;
4211         void (*write_op)(struct tg3 *, u32, u32);
4212
4213         if (cpu_base == TX_CPU_BASE &&
4214             (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
4215                 printk(KERN_ERR PFX "tg3_load_firmware_cpu: Trying to load "
4216                        "TX cpu firmware on %s which is 5705.\n",
4217                        tp->dev->name);
4218                 return -EINVAL;
4219         }
4220
4221         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
4222                 write_op = tg3_write_mem;
4223         else
4224                 write_op = tg3_write_indirect_reg32;
4225
4226         /* Force use of PCI config space for indirect register
4227          * write calls.
4228          */
4229         tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
4230
4231         err = tg3_halt_cpu(tp, cpu_base);
4232         if (err)
4233                 goto out;
4234
4235         for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
4236                 write_op(tp, cpu_scratch_base + i, 0);
4237         tw32(cpu_base + CPU_STATE, 0xffffffff);
4238         tw32(cpu_base + CPU_MODE, tr32(cpu_base+CPU_MODE)|CPU_MODE_HALT);
4239         for (i = 0; i < (info->text_len / sizeof(u32)); i++)
4240                 write_op(tp, (cpu_scratch_base +
4241                               (info->text_base & 0xffff) +
4242                               (i * sizeof(u32))),
4243                          (info->text_data ?
4244                           info->text_data[i] : 0));
4245         for (i = 0; i < (info->rodata_len / sizeof(u32)); i++)
4246                 write_op(tp, (cpu_scratch_base +
4247                               (info->rodata_base & 0xffff) +
4248                               (i * sizeof(u32))),
4249                          (info->rodata_data ?
4250                           info->rodata_data[i] : 0));
4251         for (i = 0; i < (info->data_len / sizeof(u32)); i++)
4252                 write_op(tp, (cpu_scratch_base +
4253                               (info->data_base & 0xffff) +
4254                               (i * sizeof(u32))),
4255                          (info->data_data ?
4256                           info->data_data[i] : 0));
4257
4258         err = 0;
4259
4260 out:
4261         tp->tg3_flags = orig_tg3_flags;
4262         return err;
4263 }
4264
4265 /* tp->lock is held. */
4266 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
4267 {
4268         struct fw_info info;
4269         int err, i;
4270
4271         info.text_base = TG3_FW_TEXT_ADDR;
4272         info.text_len = TG3_FW_TEXT_LEN;
4273         info.text_data = &tg3FwText[0];
4274         info.rodata_base = TG3_FW_RODATA_ADDR;
4275         info.rodata_len = TG3_FW_RODATA_LEN;
4276         info.rodata_data = &tg3FwRodata[0];
4277         info.data_base = TG3_FW_DATA_ADDR;
4278         info.data_len = TG3_FW_DATA_LEN;
4279         info.data_data = NULL;
4280
4281         err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
4282                                     RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
4283                                     &info);
4284         if (err)
4285                 return err;
4286
4287         err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
4288                                     TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
4289                                     &info);
4290         if (err)
4291                 return err;
4292
4293         /* Now startup only the RX cpu. */
4294         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
4295         tw32_f(RX_CPU_BASE + CPU_PC,    TG3_FW_TEXT_ADDR);
4296
4297         for (i = 0; i < 5; i++) {
4298                 if (tr32(RX_CPU_BASE + CPU_PC) == TG3_FW_TEXT_ADDR)
4299                         break;
4300                 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
4301                 tw32(RX_CPU_BASE + CPU_MODE,  CPU_MODE_HALT);
4302                 tw32_f(RX_CPU_BASE + CPU_PC,    TG3_FW_TEXT_ADDR);
4303                 udelay(1000);
4304         }
4305         if (i >= 5) {
4306                 printk(KERN_ERR PFX "tg3_load_firmware fails for %s "
4307                        "to set RX CPU PC, is %08x should be %08x\n",
4308                        tp->dev->name, tr32(RX_CPU_BASE + CPU_PC),
4309                        TG3_FW_TEXT_ADDR);
4310                 return -ENODEV;
4311         }
4312         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
4313         tw32_f(RX_CPU_BASE + CPU_MODE,  0x00000000);
4314
4315         return 0;
4316 }
4317
4318 #if TG3_TSO_SUPPORT != 0
4319
4320 #define TG3_TSO_FW_RELEASE_MAJOR        0x1
4321 #define TG3_TSO_FW_RELASE_MINOR         0x6
4322 #define TG3_TSO_FW_RELEASE_FIX          0x0
4323 #define TG3_TSO_FW_START_ADDR           0x08000000
4324 #define TG3_TSO_FW_TEXT_ADDR            0x08000000
4325 #define TG3_TSO_FW_TEXT_LEN             0x1aa0
4326 #define TG3_TSO_FW_RODATA_ADDR          0x08001aa0
4327 #define TG3_TSO_FW_RODATA_LEN           0x60
4328 #define TG3_TSO_FW_DATA_ADDR            0x08001b20
4329 #define TG3_TSO_FW_DATA_LEN             0x30
4330 #define TG3_TSO_FW_SBSS_ADDR            0x08001b50
4331 #define TG3_TSO_FW_SBSS_LEN             0x2c
4332 #define TG3_TSO_FW_BSS_ADDR             0x08001b80
4333 #define TG3_TSO_FW_BSS_LEN              0x894
4334
4335 static u32 tg3TsoFwText[(TG3_TSO_FW_TEXT_LEN / 4) + 1] = {
4336         0x0e000003, 0x00000000, 0x08001b24, 0x00000000, 0x10000003, 0x00000000,
4337         0x0000000d, 0x0000000d, 0x3c1d0800, 0x37bd4000, 0x03a0f021, 0x3c100800,
4338         0x26100000, 0x0e000010, 0x00000000, 0x0000000d, 0x27bdffe0, 0x3c04fefe,
4339         0xafbf0018, 0x0e0005d8, 0x34840002, 0x0e000668, 0x00000000, 0x3c030800,
4340         0x90631b68, 0x24020002, 0x3c040800, 0x24841aac, 0x14620003, 0x24050001,
4341         0x3c040800, 0x24841aa0, 0x24060006, 0x00003821, 0xafa00010, 0x0e00067c,
4342         0xafa00014, 0x8f625c50, 0x34420001, 0xaf625c50, 0x8f625c90, 0x34420001,
4343         0xaf625c90, 0x2402ffff, 0x0e000034, 0xaf625404, 0x8fbf0018, 0x03e00008,
4344         0x27bd0020, 0x00000000, 0x00000000, 0x00000000, 0x27bdffe0, 0xafbf001c,
4345         0xafb20018, 0xafb10014, 0x0e00005b, 0xafb00010, 0x24120002, 0x24110001,
4346         0x8f706820, 0x32020100, 0x10400003, 0x00000000, 0x0e0000bb, 0x00000000,
4347         0x8f706820, 0x32022000, 0x10400004, 0x32020001, 0x0e0001f0, 0x24040001,
4348         0x32020001, 0x10400003, 0x00000000, 0x0e0000a3, 0x00000000, 0x3c020800,
4349         0x90421b98, 0x14520003, 0x00000000, 0x0e0004c0, 0x00000000, 0x0a00003c,
4350         0xaf715028, 0x8fbf001c, 0x8fb20018, 0x8fb10014, 0x8fb00010, 0x03e00008,
4351         0x27bd0020, 0x27bdffe0, 0x3c040800, 0x24841ac0, 0x00002821, 0x00003021,
4352         0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014, 0x3c040800,
4353         0x248423d8, 0xa4800000, 0x3c010800, 0xa0201b98, 0x3c010800, 0xac201b9c,
4354         0x3c010800, 0xac201ba0, 0x3c010800, 0xac201ba4, 0x3c010800, 0xac201bac,
4355         0x3c010800, 0xac201bb8, 0x3c010800, 0xac201bbc, 0x8f624434, 0x3c010800,
4356         0xac221b88, 0x8f624438, 0x3c010800, 0xac221b8c, 0x8f624410, 0xac80f7a8,
4357         0x3c010800, 0xac201b84, 0x3c010800, 0xac2023e0, 0x3c010800, 0xac2023c8,
4358         0x3c010800, 0xac2023cc, 0x3c010800, 0xac202400, 0x3c010800, 0xac221b90,
4359         0x8f620068, 0x24030007, 0x00021702, 0x10430005, 0x00000000, 0x8f620068,
4360         0x00021702, 0x14400004, 0x24020001, 0x3c010800, 0x0a000097, 0xac20240c,
4361         0xac820034, 0x3c040800, 0x24841acc, 0x3c050800, 0x8ca5240c, 0x00003021,
4362         0x00003821, 0xafa00010, 0x0e00067c, 0xafa00014, 0x8fbf0018, 0x03e00008,
4363         0x27bd0020, 0x27bdffe0, 0x3c040800, 0x24841ad8, 0x00002821, 0x00003021,
4364         0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014, 0x0e00005b,
4365         0x00000000, 0x0e0000b4, 0x00002021, 0x8fbf0018, 0x03e00008, 0x27bd0020,
4366         0x24020001, 0x8f636820, 0x00821004, 0x00021027, 0x00621824, 0x03e00008,
4367         0xaf636820, 0x27bdffd0, 0xafbf002c, 0xafb60028, 0xafb50024, 0xafb40020,
4368         0xafb3001c, 0xafb20018, 0xafb10014, 0xafb00010, 0x8f675c5c, 0x3c030800,
4369         0x24631bbc, 0x8c620000, 0x14470005, 0x3c0200ff, 0x3c020800, 0x90421b98,
4370         0x14400119, 0x3c0200ff, 0x3442fff8, 0x00e28824, 0xac670000, 0x00111902,
4371         0x306300ff, 0x30e20003, 0x000211c0, 0x00622825, 0x00a04021, 0x00071602,
4372         0x3c030800, 0x90631b98, 0x3044000f, 0x14600036, 0x00804821, 0x24020001,
4373         0x3c010800, 0xa0221b98, 0x00051100, 0x00821025, 0x3c010800, 0xac201b9c,
4374         0x3c010800, 0xac201ba0, 0x3c010800, 0xac201ba4, 0x3c010800, 0xac201bac,
4375         0x3c010800, 0xac201bb8, 0x3c010800, 0xac201bb0, 0x3c010800, 0xac201bb4,
4376         0x3c010800, 0xa42223d8, 0x9622000c, 0x30437fff, 0x3c010800, 0xa4222410,
4377         0x30428000, 0x3c010800, 0xa4231bc6, 0x10400005, 0x24020001, 0x3c010800,
4378         0xac2223f4, 0x0a000102, 0x2406003e, 0x24060036, 0x3c010800, 0xac2023f4,
4379         0x9622000a, 0x3c030800, 0x94631bc6, 0x3c010800, 0xac2023f0, 0x3c010800,
4380         0xac2023f8, 0x00021302, 0x00021080, 0x00c21021, 0x00621821, 0x3c010800,
4381         0xa42223d0, 0x3c010800, 0x0a000115, 0xa4231b96, 0x9622000c, 0x3c010800,
4382         0xa42223ec, 0x3c040800, 0x24841b9c, 0x8c820000, 0x00021100, 0x3c010800,
4383         0x00220821, 0xac311bc8, 0x8c820000, 0x00021100, 0x3c010800, 0x00220821,
4384         0xac271bcc, 0x8c820000, 0x25030001, 0x306601ff, 0x00021100, 0x3c010800,
4385         0x00220821, 0xac261bd0, 0x8c820000, 0x00021100, 0x3c010800, 0x00220821,
4386         0xac291bd4, 0x96230008, 0x3c020800, 0x8c421bac, 0x00432821, 0x3c010800,
4387         0xac251bac, 0x9622000a, 0x30420004, 0x14400018, 0x00061100, 0x8f630c14,
4388         0x3063000f, 0x2c620002, 0x1440000b, 0x3c02c000, 0x8f630c14, 0x3c020800,
4389         0x8c421b40, 0x3063000f, 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002,
4390         0x1040fff7, 0x3c02c000, 0x00e21825, 0xaf635c5c, 0x8f625c50, 0x30420002,
4391         0x10400014, 0x00000000, 0x0a000147, 0x00000000, 0x3c030800, 0x8c631b80,
4392         0x3c040800, 0x94841b94, 0x01221025, 0x3c010800, 0xa42223da, 0x24020001,
4393         0x3c010800, 0xac221bb8, 0x24630001, 0x0085202a, 0x3c010800, 0x10800003,
4394         0xac231b80, 0x3c010800, 0xa4251b94, 0x3c060800, 0x24c61b9c, 0x8cc20000,
4395         0x24420001, 0xacc20000, 0x28420080, 0x14400005, 0x00000000, 0x0e000656,
4396         0x24040002, 0x0a0001e6, 0x00000000, 0x3c020800, 0x8c421bb8, 0x10400078,
4397         0x24020001, 0x3c050800, 0x90a51b98, 0x14a20072, 0x00000000, 0x3c150800,
4398         0x96b51b96, 0x3c040800, 0x8c841bac, 0x32a3ffff, 0x0083102a, 0x1440006c,
4399         0x00000000, 0x14830003, 0x00000000, 0x3c010800, 0xac2523f0, 0x1060005c,
4400         0x00009021, 0x24d60004, 0x0060a021, 0x24d30014, 0x8ec20000, 0x00028100,
4401         0x3c110800, 0x02308821, 0x0e000625, 0x8e311bc8, 0x00402821, 0x10a00054,
4402         0x00000000, 0x9628000a, 0x31020040, 0x10400005, 0x2407180c, 0x8e22000c,
4403         0x2407188c, 0x00021400, 0xaca20018, 0x3c030800, 0x00701821, 0x8c631bd0,
4404         0x3c020800, 0x00501021, 0x8c421bd4, 0x00031d00, 0x00021400, 0x00621825,
4405         0xaca30014, 0x8ec30004, 0x96220008, 0x00432023, 0x3242ffff, 0x3083ffff,
4406         0x00431021, 0x0282102a, 0x14400002, 0x02b23023, 0x00803021, 0x8e620000,
4407         0x30c4ffff, 0x00441021, 0xae620000, 0x8e220000, 0xaca20000, 0x8e220004,
4408         0x8e63fff4, 0x00431021, 0xaca20004, 0xa4a6000e, 0x8e62fff4, 0x00441021,
4409         0xae62fff4, 0x96230008, 0x0043102a, 0x14400005, 0x02469021, 0x8e62fff0,
4410         0xae60fff4, 0x24420001, 0xae62fff0, 0xaca00008, 0x3242ffff, 0x14540008,
4411         0x24020305, 0x31020080, 0x54400001, 0x34e70010, 0x24020905, 0xa4a2000c,
4412         0x0a0001cb, 0x34e70020, 0xa4a2000c, 0x3c020800, 0x8c4223f0, 0x10400003,
4413         0x3c024b65, 0x0a0001d3, 0x34427654, 0x3c02b49a, 0x344289ab, 0xaca2001c,
4414         0x30e2ffff, 0xaca20010, 0x0e0005a2, 0x00a02021, 0x3242ffff, 0x0054102b,
4415         0x1440ffa9, 0x00000000, 0x24020002, 0x3c010800, 0x0a0001e6, 0xa0221b98,
4416         0x8ec2083c, 0x24420001, 0x0a0001e6, 0xaec2083c, 0x0e0004c0, 0x00000000,
4417         0x8fbf002c, 0x8fb60028, 0x8fb50024, 0x8fb40020, 0x8fb3001c, 0x8fb20018,
4418         0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0030, 0x27bdffd0, 0xafbf0028,
4419         0xafb30024, 0xafb20020, 0xafb1001c, 0xafb00018, 0x8f725c9c, 0x3c0200ff,
4420         0x3442fff8, 0x3c070800, 0x24e71bb4, 0x02428824, 0x9623000e, 0x8ce20000,
4421         0x00431021, 0xace20000, 0x8e220010, 0x30420020, 0x14400011, 0x00809821,
4422         0x0e00063b, 0x02202021, 0x3c02c000, 0x02421825, 0xaf635c9c, 0x8f625c90,
4423         0x30420002, 0x1040011e, 0x00000000, 0xaf635c9c, 0x8f625c90, 0x30420002,
4424         0x10400119, 0x00000000, 0x0a00020d, 0x00000000, 0x8e240008, 0x8e230014,
4425         0x00041402, 0x000231c0, 0x00031502, 0x304201ff, 0x2442ffff, 0x3042007f,
4426         0x00031942, 0x30637800, 0x00021100, 0x24424000, 0x00624821, 0x9522000a,
4427         0x3084ffff, 0x30420008, 0x104000b0, 0x000429c0, 0x3c020800, 0x8c422400,
4428         0x14400024, 0x24c50008, 0x94c20014, 0x3c010800, 0xa42223d0, 0x8cc40010,
4429         0x00041402, 0x3c010800, 0xa42223d2, 0x3c010800, 0xa42423d4, 0x94c2000e,
4430         0x3083ffff, 0x00431023, 0x3c010800, 0xac222408, 0x94c2001a, 0x3c010800,
4431         0xac262400, 0x3c010800, 0xac322404, 0x3c010800, 0xac2223fc, 0x3c02c000,
4432         0x02421825, 0xaf635c9c, 0x8f625c90, 0x30420002, 0x104000e5, 0x00000000,
4433         0xaf635c9c, 0x8f625c90, 0x30420002, 0x104000e0, 0x00000000, 0x0a000246,
4434         0x00000000, 0x94c2000e, 0x3c030800, 0x946323d4, 0x00434023, 0x3103ffff,
4435         0x2c620008, 0x1040001c, 0x00000000, 0x94c20014, 0x24420028, 0x00a22821,
4436         0x00031042, 0x1840000b, 0x00002021, 0x24e60848, 0x00403821, 0x94a30000,
4437         0x8cc20000, 0x24840001, 0x00431021, 0xacc20000, 0x0087102a, 0x1440fff9,
4438         0x24a50002, 0x31020001, 0x1040001f, 0x3c024000, 0x3c040800, 0x248423fc,
4439         0xa0a00001, 0x94a30000, 0x8c820000, 0x00431021, 0x0a000285, 0xac820000,
4440         0x8f626800, 0x3c030010, 0x00431024, 0x10400009, 0x00000000, 0x94c2001a,
4441         0x3c030800, 0x8c6323fc, 0x00431021, 0x3c010800, 0xac2223fc, 0x0a000286,
4442         0x3c024000, 0x94c2001a, 0x94c4001c, 0x3c030800, 0x8c6323fc, 0x00441023,
4443         0x00621821, 0x3c010800, 0xac2323fc, 0x3c024000, 0x02421825, 0xaf635c9c,
4444         0x8f625c90, 0x30420002, 0x1440fffc, 0x00000000, 0x9522000a, 0x30420010,
4445         0x1040009b, 0x00000000, 0x3c030800, 0x946323d4, 0x3c070800, 0x24e72400,
4446         0x8ce40000, 0x8f626800, 0x24630030, 0x00832821, 0x3c030010, 0x00431024,
4447         0x1440000a, 0x00000000, 0x94a20004, 0x3c040800, 0x8c842408, 0x3c030800,
4448         0x8c6323fc, 0x00441023, 0x00621821, 0x3c010800, 0xac2323fc, 0x3c040800,
4449         0x8c8423fc, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402, 0x00822021,
4450         0x00041027, 0xa4a20006, 0x3c030800, 0x8c632404, 0x3c0200ff, 0x3442fff8,
4451         0x00628824, 0x96220008, 0x24050001, 0x24034000, 0x000231c0, 0x00801021,
4452         0xa4c2001a, 0xa4c0001c, 0xace00000, 0x3c010800, 0xac251b60, 0xaf635cb8,
4453         0x8f625cb0, 0x30420002, 0x10400003, 0x00000000, 0x3c010800, 0xac201b60,
4454         0x8e220008, 0xaf625cb8, 0x8f625cb0, 0x30420002, 0x10400003, 0x00000000,
4455         0x3c010800, 0xac201b60, 0x3c020800, 0x8c421b60, 0x1040ffec, 0x00000000,
4456         0x3c040800, 0x0e00063b, 0x8c842404, 0x0a00032a, 0x00000000, 0x3c030800,
4457         0x90631b98, 0x24020002, 0x14620003, 0x3c034b65, 0x0a0002e1, 0x00008021,
4458         0x8e22001c, 0x34637654, 0x10430002, 0x24100002, 0x24100001, 0x00c02021,
4459         0x0e000350, 0x02003021, 0x24020003, 0x3c010800, 0xa0221b98, 0x24020002,
4460         0x1202000a, 0x24020001, 0x3c030800, 0x8c6323f0, 0x10620006, 0x00000000,
4461         0x3c020800, 0x944223d8, 0x00021400, 0x0a00031f, 0xae220014, 0x3c040800,
4462         0x248423da, 0x94820000, 0x00021400, 0xae220014, 0x3c020800, 0x8c421bbc,
4463         0x3c03c000, 0x3c010800, 0xa0201b98, 0x00431025, 0xaf625c5c, 0x8f625c50,
4464         0x30420002, 0x10400009, 0x00000000, 0x2484f7e2, 0x8c820000, 0x00431025,
4465         0xaf625c5c, 0x8f625c50, 0x30420002, 0x1440fffa, 0x00000000, 0x3c020800,
4466         0x24421b84, 0x8c430000, 0x24630001, 0xac430000, 0x8f630c14, 0x3063000f,
4467         0x2c620002, 0x1440000c, 0x3c024000, 0x8f630c14, 0x3c020800, 0x8c421b40,
4468         0x3063000f, 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002, 0x1040fff7,
4469         0x00000000, 0x3c024000, 0x02421825, 0xaf635c9c, 0x8f625c90, 0x30420002,
4470         0x1440fffc, 0x00000000, 0x12600003, 0x00000000, 0x0e0004c0, 0x00000000,
4471         0x8fbf0028, 0x8fb30024, 0x8fb20020, 0x8fb1001c, 0x8fb00018, 0x03e00008,
4472         0x27bd0030, 0x8f634450, 0x3c040800, 0x24841b88, 0x8c820000, 0x00031c02,
4473         0x0043102b, 0x14400007, 0x3c038000, 0x8c840004, 0x8f624450, 0x00021c02,
4474         0x0083102b, 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024,
4475         0x1440fffd, 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3c024000,
4476         0x00822025, 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00000000,
4477         0x03e00008, 0x00000000, 0x27bdffe0, 0x00805821, 0x14c00011, 0x256e0008,
4478         0x3c020800, 0x8c4223f4, 0x10400007, 0x24020016, 0x3c010800, 0xa42223d2,
4479         0x2402002a, 0x3c010800, 0x0a000364, 0xa42223d4, 0x8d670010, 0x00071402,
4480         0x3c010800, 0xa42223d2, 0x3c010800, 0xa42723d4, 0x3c040800, 0x948423d4,
4481         0x3c030800, 0x946323d2, 0x95cf0006, 0x3c020800, 0x944223d0, 0x00832023,
4482         0x01e2c023, 0x3065ffff, 0x24a20028, 0x01c24821, 0x3082ffff, 0x14c0001a,
4483         0x01226021, 0x9582000c, 0x3042003f, 0x3c010800, 0xa42223d6, 0x95820004,
4484         0x95830006, 0x3c010800, 0xac2023e4, 0x3c010800, 0xac2023e8, 0x00021400,
4485         0x00431025, 0x3c010800, 0xac221bc0, 0x95220004, 0x3c010800, 0xa4221bc4,
4486         0x95230002, 0x01e51023, 0x0043102a, 0x10400010, 0x24020001, 0x3c010800,
4487         0x0a000398, 0xac2223f8, 0x3c030800, 0x8c6323e8, 0x3c020800, 0x94421bc4,
4488         0x00431021, 0xa5220004, 0x3c020800, 0x94421bc0, 0xa5820004, 0x3c020800,
4489         0x8c421bc0, 0xa5820006, 0x3c020800, 0x8c4223f0, 0x3c0d0800, 0x8dad23e4,
4490         0x3c0a0800, 0x144000e5, 0x8d4a23e8, 0x3c020800, 0x94421bc4, 0x004a1821,
4491         0x3063ffff, 0x0062182b, 0x24020002, 0x10c2000d, 0x01435023, 0x3c020800,
4492         0x944223d6, 0x30420009, 0x10400008, 0x00000000, 0x9582000c, 0x3042fff6,
4493         0xa582000c, 0x3c020800, 0x944223d6, 0x30420009, 0x01a26823, 0x3c020800,
4494         0x8c4223f8, 0x1040004a, 0x01203821, 0x3c020800, 0x944223d2, 0x00004021,
4495         0xa520000a, 0x01e21023, 0xa5220002, 0x3082ffff, 0x00021042, 0x18400008,
4496         0x00003021, 0x00401821, 0x94e20000, 0x25080001, 0x00c23021, 0x0103102a,
4497         0x1440fffb, 0x24e70002, 0x00061c02, 0x30c2ffff, 0x00623021, 0x00061402,
4498         0x00c23021, 0x00c02821, 0x00061027, 0xa522000a, 0x00003021, 0x2527000c,
4499         0x00004021, 0x94e20000, 0x25080001, 0x00c23021, 0x2d020004, 0x1440fffb,
4500         0x24e70002, 0x95220002, 0x00004021, 0x91230009, 0x00442023, 0x01803821,
4501         0x3082ffff, 0xa4e00010, 0x00621821, 0x00021042, 0x18400010, 0x00c33021,
4502         0x00404821, 0x94e20000, 0x24e70002, 0x00c23021, 0x30e2007f, 0x14400006,
4503         0x25080001, 0x8d630000, 0x3c02007f, 0x3442ff80, 0x00625824, 0x25670008,
4504         0x0109102a, 0x1440fff3, 0x00000000, 0x30820001, 0x10400005, 0x00061c02,
4505         0xa0e00001, 0x94e20000, 0x00c23021, 0x00061c02, 0x30c2ffff, 0x00623021,
4506         0x00061402, 0x00c23021, 0x0a00047d, 0x30c6ffff, 0x24020002, 0x14c20081,
4507         0x00000000, 0x3c020800, 0x8c42240c, 0x14400007, 0x00000000, 0x3c020800,
4508         0x944223d2, 0x95230002, 0x01e21023, 0x10620077, 0x00000000, 0x3c020800,
4509         0x944223d2, 0x01e21023, 0xa5220002, 0x3c020800, 0x8c42240c, 0x1040001a,
4510         0x31e3ffff, 0x8dc70010, 0x3c020800, 0x94421b96, 0x00e04021, 0x00072c02,
4511         0x00aa2021, 0x00431023, 0x00823823, 0x00072402, 0x30e2ffff, 0x00823821,
4512         0x00071027, 0xa522000a, 0x3102ffff, 0x3c040800, 0x948423d4, 0x00453023,
4513         0x00e02821, 0x00641823, 0x006d1821, 0x00c33021, 0x00061c02, 0x30c2ffff,
4514         0x0a00047d, 0x00623021, 0x01203821, 0x00004021, 0x3082ffff, 0x00021042,
4515         0x18400008, 0x00003021, 0x00401821, 0x94e20000, 0x25080001, 0x00c23021,
4516         0x0103102a, 0x1440fffb, 0x24e70002, 0x00061c02, 0x30c2ffff, 0x00623021,
4517         0x00061402, 0x00c23021, 0x00c02821, 0x00061027, 0xa522000a, 0x00003021,
4518         0x2527000c, 0x00004021, 0x94e20000, 0x25080001, 0x00c23021, 0x2d020004,
4519         0x1440fffb, 0x24e70002, 0x95220002, 0x00004021, 0x91230009, 0x00442023,
4520         0x01803821, 0x3082ffff, 0xa4e00010, 0x3c040800, 0x948423d4, 0x00621821,
4521         0x00c33021, 0x00061c02, 0x30c2ffff, 0x00623021, 0x00061c02, 0x3c020800,
4522         0x944223d0, 0x00c34821, 0x00441023, 0x00021fc2, 0x00431021, 0x00021043,
4523         0x18400010, 0x00003021, 0x00402021, 0x94e20000, 0x24e70002, 0x00c23021,
4524         0x30e2007f, 0x14400006, 0x25080001, 0x8d630000, 0x3c02007f, 0x3442ff80,
4525         0x00625824, 0x25670008, 0x0104102a, 0x1440fff3, 0x00000000, 0x3c020800,
4526         0x944223ec, 0x00c23021, 0x3122ffff, 0x00c23021, 0x00061c02, 0x30c2ffff,
4527         0x00623021, 0x00061402, 0x00c23021, 0x00c04021, 0x00061027, 0xa5820010,
4528         0xadc00014, 0x0a00049d, 0xadc00000, 0x8dc70010, 0x00e04021, 0x11400007,
4529         0x00072c02, 0x00aa3021, 0x00061402, 0x30c3ffff, 0x00433021, 0x00061402,
4530         0x00c22821, 0x00051027, 0xa522000a, 0x3c030800, 0x946323d4, 0x3102ffff,
4531         0x01e21021, 0x00433023, 0x00cd3021, 0x00061c02, 0x30c2ffff, 0x00623021,
4532         0x00061402, 0x00c23021, 0x00c04021, 0x00061027, 0xa5820010, 0x3102ffff,
4533         0x00051c00, 0x00431025, 0xadc20010, 0x3c020800, 0x8c4223f4, 0x10400005,
4534         0x2de205eb, 0x14400002, 0x25e2fff2, 0x34028870, 0xa5c20034, 0x3c030800,
4535         0x246323e8, 0x8c620000, 0x24420001, 0xac620000, 0x3c040800, 0x8c8423e4,
4536         0x3c020800, 0x8c421bc0, 0x3303ffff, 0x00832021, 0x00431821, 0x0062102b,
4537         0x3c010800, 0xac2423e4, 0x10400003, 0x2482ffff, 0x3c010800, 0xac2223e4,
4538         0x3c010800, 0xac231bc0, 0x03e00008, 0x27bd0020, 0x27bdffb8, 0x3c050800,
4539         0x24a51b96, 0xafbf0044, 0xafbe0040, 0xafb7003c, 0xafb60038, 0xafb50034,
4540         0xafb40030, 0xafb3002c, 0xafb20028, 0xafb10024, 0xafb00020, 0x94a90000,
4541         0x3c020800, 0x944223d0, 0x3c030800, 0x8c631bb0, 0x3c040800, 0x8c841bac,
4542         0x01221023, 0x0064182a, 0xa7a9001e, 0x106000be, 0xa7a20016, 0x24be0022,
4543         0x97b6001e, 0x24b3001a, 0x24b70016, 0x8fc20000, 0x14400008, 0x00000000,
4544         0x8fc2fff8, 0x97a30016, 0x8fc4fff4, 0x00431021, 0x0082202a, 0x148000b0,
4545         0x00000000, 0x97d50818, 0x32a2ffff, 0x104000a3, 0x00009021, 0x0040a021,
4546         0x00008821, 0x0e000625, 0x00000000, 0x00403021, 0x14c00007, 0x00000000,
4547         0x3c020800, 0x8c4223dc, 0x24420001, 0x3c010800, 0x0a000596, 0xac2223dc,
4548         0x3c100800, 0x02118021, 0x8e101bc8, 0x9608000a, 0x31020040, 0x10400005,
4549         0x2407180c, 0x8e02000c, 0x2407188c, 0x00021400, 0xacc20018, 0x31020080,
4550         0x54400001, 0x34e70010, 0x3c020800, 0x00511021, 0x8c421bd0, 0x3c030800,
4551         0x00711821, 0x8c631bd4, 0x00021500, 0x00031c00, 0x00431025, 0xacc20014,
4552         0x96040008, 0x3242ffff, 0x00821021, 0x0282102a, 0x14400002, 0x02b22823,
4553         0x00802821, 0x8e020000, 0x02459021, 0xacc20000, 0x8e020004, 0x00c02021,
4554         0x26310010, 0xac820004, 0x30e2ffff, 0xac800008, 0xa485000e, 0xac820010,
4555         0x24020305, 0x0e0005a2, 0xa482000c, 0x3242ffff, 0x0054102b, 0x1440ffc5,
4556         0x3242ffff, 0x0a00058e, 0x00000000, 0x8e620000, 0x8e63fffc, 0x0043102a,
4557         0x10400067, 0x00000000, 0x8e62fff0, 0x00028900, 0x3c100800, 0x02118021,
4558         0x0e000625, 0x8e101bc8, 0x00403021, 0x14c00005, 0x00000000, 0x8e62082c,
4559         0x24420001, 0x0a000596, 0xae62082c, 0x9608000a, 0x31020040, 0x10400005,
4560         0x2407180c, 0x8e02000c, 0x2407188c, 0x00021400, 0xacc20018, 0x3c020800,
4561         0x00511021, 0x8c421bd0, 0x3c030800, 0x00711821, 0x8c631bd4, 0x00021500,
4562         0x00031c00, 0x00431025, 0xacc20014, 0x8e63fff4, 0x96020008, 0x00432023,
4563         0x3242ffff, 0x3083ffff, 0x00431021, 0x02c2102a, 0x10400003, 0x00802821,
4564         0x97a9001e, 0x01322823, 0x8e620000, 0x30a4ffff, 0x00441021, 0xae620000,
4565         0xa4c5000e, 0x8e020000, 0xacc20000, 0x8e020004, 0x8e63fff4, 0x00431021,
4566         0xacc20004, 0x8e63fff4, 0x96020008, 0x00641821, 0x0062102a, 0x14400006,
4567         0x02459021, 0x8e62fff0, 0xae60fff4, 0x24420001, 0x0a000571, 0xae62fff0,
4568         0xae63fff4, 0xacc00008, 0x3242ffff, 0x10560003, 0x31020004, 0x10400006,
4569         0x24020305, 0x31020080, 0x54400001, 0x34e70010, 0x34e70020, 0x24020905,
4570         0xa4c2000c, 0x8ee30000, 0x8ee20004, 0x14620007, 0x3c02b49a, 0x8ee20860,
4571         0x54400001, 0x34e70400, 0x3c024b65, 0x0a000588, 0x34427654, 0x344289ab,
4572         0xacc2001c, 0x30e2ffff, 0xacc20010, 0x0e0005a2, 0x00c02021, 0x3242ffff,
4573         0x0056102b, 0x1440ff9b, 0x00000000, 0x8e620000, 0x8e63fffc, 0x0043102a,
4574         0x1440ff48, 0x00000000, 0x8fbf0044, 0x8fbe0040, 0x8fb7003c, 0x8fb60038,
4575         0x8fb50034, 0x8fb40030, 0x8fb3002c, 0x8fb20028, 0x8fb10024, 0x8fb00020,
4576         0x03e00008, 0x27bd0048, 0x27bdffe8, 0xafbf0014, 0xafb00010, 0x8f624450,
4577         0x8f634410, 0x0a0005b1, 0x00808021, 0x8f626820, 0x30422000, 0x10400003,
4578         0x00000000, 0x0e0001f0, 0x00002021, 0x8f624450, 0x8f634410, 0x3042ffff,
4579         0x0043102b, 0x1440fff5, 0x00000000, 0x8f630c14, 0x3063000f, 0x2c620002,
4580         0x1440000b, 0x00000000, 0x8f630c14, 0x3c020800, 0x8c421b40, 0x3063000f,
4581         0x24420001, 0x3c010800, 0xac221b40, 0x2c620002, 0x1040fff7, 0x00000000,
4582         0xaf705c18, 0x8f625c10, 0x30420002, 0x10400009, 0x00000000, 0x8f626820,
4583         0x30422000, 0x1040fff8, 0x00000000, 0x0e0001f0, 0x00002021, 0x0a0005c4,
4584         0x00000000, 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x00000000,
4585         0x00000000, 0x00000000, 0x27bdffe8, 0x3c1bc000, 0xafbf0014, 0xafb00010,
4586         0xaf60680c, 0x8f626804, 0x34420082, 0xaf626804, 0x8f634000, 0x24020b50,
4587         0x3c010800, 0xac221b54, 0x24020b78, 0x3c010800, 0xac221b64, 0x34630002,
4588         0xaf634000, 0x0e000605, 0x00808021, 0x3c010800, 0xa0221b68, 0x304200ff,
4589         0x24030002, 0x14430005, 0x00000000, 0x3c020800, 0x8c421b54, 0x0a0005f8,
4590         0xac5000c0, 0x3c020800, 0x8c421b54, 0xac5000bc, 0x8f624434, 0x8f634438,
4591         0x8f644410, 0x3c010800, 0xac221b5c, 0x3c010800, 0xac231b6c, 0x3c010800,
4592         0xac241b58, 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x3c040800,
4593         0x8c870000, 0x3c03aa55, 0x3463aa55, 0x3c06c003, 0xac830000, 0x8cc20000,
4594         0x14430007, 0x24050002, 0x3c0355aa, 0x346355aa, 0xac830000, 0x8cc20000,
4595         0x50430001, 0x24050001, 0x3c020800, 0xac470000, 0x03e00008, 0x00a01021,
4596         0x27bdfff8, 0x18800009, 0x00002821, 0x8f63680c, 0x8f62680c, 0x1043fffe,
4597         0x00000000, 0x24a50001, 0x00a4102a, 0x1440fff9, 0x00000000, 0x03e00008,
4598         0x27bd0008, 0x8f634450, 0x3c020800, 0x8c421b5c, 0x00031c02, 0x0043102b,
4599         0x14400008, 0x3c038000, 0x3c040800, 0x8c841b6c, 0x8f624450, 0x00021c02,
4600         0x0083102b, 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024,
4601         0x1440fffd, 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3082ffff,
4602         0x2442e000, 0x2c422001, 0x14400003, 0x3c024000, 0x0a000648, 0x2402ffff,
4603         0x00822025, 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00001021,
4604         0x03e00008, 0x00000000, 0x8f624450, 0x3c030800, 0x8c631b58, 0x0a000651,
4605         0x3042ffff, 0x8f624450, 0x3042ffff, 0x0043102b, 0x1440fffc, 0x00000000,
4606         0x03e00008, 0x00000000, 0x27bdffe0, 0x00802821, 0x3c040800, 0x24841af0,
4607         0x00003021, 0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014,
4608         0x0a000660, 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x00000000,
4609         0x00000000, 0x00000000, 0x3c020800, 0x34423000, 0x3c030800, 0x34633000,
4610         0x3c040800, 0x348437ff, 0x3c010800, 0xac221b74, 0x24020040, 0x3c010800,
4611         0xac221b78, 0x3c010800, 0xac201b70, 0xac600000, 0x24630004, 0x0083102b,
4612         0x5040fffd, 0xac600000, 0x03e00008, 0x00000000, 0x00804821, 0x8faa0010,
4613         0x3c020800, 0x8c421b70, 0x3c040800, 0x8c841b78, 0x8fab0014, 0x24430001,
4614         0x0044102b, 0x3c010800, 0xac231b70, 0x14400003, 0x00004021, 0x3c010800,
4615         0xac201b70, 0x3c020800, 0x8c421b70, 0x3c030800, 0x8c631b74, 0x91240000,
4616         0x00021140, 0x00431021, 0x00481021, 0x25080001, 0xa0440000, 0x29020008,
4617         0x1440fff4, 0x25290001, 0x3c020800, 0x8c421b70, 0x3c030800, 0x8c631b74,
4618         0x8f64680c, 0x00021140, 0x00431021, 0xac440008, 0xac45000c, 0xac460010,
4619         0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c, 0x00000000, 0x00000000,
4620 };
4621
4622 static u32 tg3TsoFwRodata[] = {
4623         0x4d61696e, 0x43707542, 0x00000000, 0x4d61696e, 0x43707541, 0x00000000,
4624         0x00000000, 0x00000000, 0x73746b6f, 0x66666c64, 0x496e0000, 0x73746b6f,
4625         0x66662a2a, 0x00000000, 0x53774576, 0x656e7430, 0x00000000, 0x00000000,
4626         0x00000000, 0x00000000, 0x66617461, 0x6c457272, 0x00000000, 0x00000000,
4627         0x00000000,
4628 };
4629
4630 static u32 tg3TsoFwData[] = {
4631         0x00000000, 0x73746b6f, 0x66666c64, 0x5f76312e, 0x362e3000, 0x00000000,
4632         0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
4633         0x00000000,
4634 };
4635
4636 /* 5705 needs a special version of the TSO firmware.  */
4637 #define TG3_TSO5_FW_RELEASE_MAJOR       0x1
4638 #define TG3_TSO5_FW_RELASE_MINOR        0x2
4639 #define TG3_TSO5_FW_RELEASE_FIX         0x0
4640 #define TG3_TSO5_FW_START_ADDR          0x00010000
4641 #define TG3_TSO5_FW_TEXT_ADDR           0x00010000
4642 #define TG3_TSO5_FW_TEXT_LEN            0xe90
4643 #define TG3_TSO5_FW_RODATA_ADDR         0x00010e90
4644 #define TG3_TSO5_FW_RODATA_LEN          0x50
4645 #define TG3_TSO5_FW_DATA_ADDR           0x00010f00
4646 #define TG3_TSO5_FW_DATA_LEN            0x20
4647 #define TG3_TSO5_FW_SBSS_ADDR           0x00010f20
4648 #define TG3_TSO5_FW_SBSS_LEN            0x28
4649 #define TG3_TSO5_FW_BSS_ADDR            0x00010f50
4650 #define TG3_TSO5_FW_BSS_LEN             0x88
4651
4652 static u32 tg3Tso5FwText[(TG3_TSO5_FW_TEXT_LEN / 4) + 1] = {
4653         0x0c004003, 0x00000000, 0x00010f04, 0x00000000, 0x10000003, 0x00000000,
4654         0x0000000d, 0x0000000d, 0x3c1d0001, 0x37bde000, 0x03a0f021, 0x3c100001,
4655         0x26100000, 0x0c004010, 0x00000000, 0x0000000d, 0x27bdffe0, 0x3c04fefe,
4656         0xafbf0018, 0x0c0042e8, 0x34840002, 0x0c004364, 0x00000000, 0x3c030001,
4657         0x90630f34, 0x24020002, 0x3c040001, 0x24840e9c, 0x14620003, 0x24050001,
4658         0x3c040001, 0x24840e90, 0x24060002, 0x00003821, 0xafa00010, 0x0c004378,
4659         0xafa00014, 0x0c00402c, 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020,
4660         0x00000000, 0x00000000, 0x27bdffe0, 0xafbf001c, 0xafb20018, 0xafb10014,
4661         0x0c0042d4, 0xafb00010, 0x3c128000, 0x24110001, 0x8f706810, 0x32020400,
4662         0x10400007, 0x00000000, 0x8f641008, 0x00921024, 0x14400003, 0x00000000,
4663         0x0c004064, 0x00000000, 0x3c020001, 0x90420f56, 0x10510003, 0x32020200,
4664         0x1040fff1, 0x00000000, 0x0c0041b4, 0x00000000, 0x08004034, 0x00000000,
4665         0x8fbf001c, 0x8fb20018, 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0020,
4666         0x27bdffe0, 0x3c040001, 0x24840eb0, 0x00002821, 0x00003021, 0x00003821,
4667         0xafbf0018, 0xafa00010, 0x0c004378, 0xafa00014, 0x0000d021, 0x24020130,
4668         0xaf625000, 0x3c010001, 0xa4200f50, 0x3c010001, 0xa0200f57, 0x8fbf0018,
4669         0x03e00008, 0x27bd0020, 0x00000000, 0x00000000, 0x3c030001, 0x24630f60,
4670         0x90620000, 0x27bdfff0, 0x14400003, 0x0080c021, 0x08004073, 0x00004821,
4671         0x3c022000, 0x03021024, 0x10400003, 0x24090002, 0x08004073, 0xa0600000,
4672         0x24090001, 0x00181040, 0x30431f80, 0x346f8008, 0x1520004b, 0x25eb0028,
4673         0x3c040001, 0x00832021, 0x8c848010, 0x3c050001, 0x24a50f7a, 0x00041402,
4674         0xa0a20000, 0x3c010001, 0xa0240f7b, 0x3c020001, 0x00431021, 0x94428014,
4675         0x3c010001, 0xa0220f7c, 0x3c0c0001, 0x01836021, 0x8d8c8018, 0x304200ff,
4676         0x24420008, 0x000220c3, 0x24020001, 0x3c010001, 0xa0220f60, 0x0124102b,
4677         0x1040000c, 0x00003821, 0x24a6000e, 0x01602821, 0x8ca20000, 0x8ca30004,
4678         0x24a50008, 0x24e70001, 0xacc20000, 0xacc30004, 0x00e4102b, 0x1440fff8,
4679         0x24c60008, 0x00003821, 0x3c080001, 0x25080f7b, 0x91060000, 0x3c020001,
4680         0x90420f7c, 0x2503000d, 0x00c32821, 0x00461023, 0x00021fc2, 0x00431021,
4681         0x00021043, 0x1840000c, 0x00002021, 0x91020001, 0x00461023, 0x00021fc2,
4682         0x00431021, 0x00021843, 0x94a20000, 0x24e70001, 0x00822021, 0x00e3102a,
4683         0x1440fffb, 0x24a50002, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402,
4684         0x00822021, 0x3c02ffff, 0x01821024, 0x3083ffff, 0x00431025, 0x3c010001,
4685         0x080040fa, 0xac220f80, 0x3c050001, 0x24a50f7c, 0x90a20000, 0x3c0c0001,
4686         0x01836021, 0x8d8c8018, 0x000220c2, 0x1080000e, 0x00003821, 0x01603021,
4687         0x24a5000c, 0x8ca20000, 0x8ca30004, 0x24a50008, 0x24e70001, 0xacc20000,
4688         0xacc30004, 0x00e4102b, 0x1440fff8, 0x24c60008, 0x3c050001, 0x24a50f7c,
4689         0x90a20000, 0x30430007, 0x24020004, 0x10620011, 0x28620005, 0x10400005,
4690         0x24020002, 0x10620008, 0x000710c0, 0x080040fa, 0x00000000, 0x24020006,
4691         0x1062000e, 0x000710c0, 0x080040fa, 0x00000000, 0x00a21821, 0x9463000c,
4692         0x004b1021, 0x080040fa, 0xa4430000, 0x000710c0, 0x00a21821, 0x8c63000c,
4693         0x004b1021, 0x080040fa, 0xac430000, 0x00a21821, 0x8c63000c, 0x004b2021,
4694         0x00a21021, 0xac830000, 0x94420010, 0xa4820004, 0x95e70006, 0x3c020001,
4695         0x90420f7c, 0x3c030001, 0x90630f7a, 0x00e2c823, 0x3c020001, 0x90420f7b,
4696         0x24630028, 0x01e34021, 0x24420028, 0x15200012, 0x01e23021, 0x94c2000c,
4697         0x3c010001, 0xa4220f78, 0x94c20004, 0x94c30006, 0x3c010001, 0xa4200f76,
4698         0x3c010001, 0xa4200f72, 0x00021400, 0x00431025, 0x3c010001, 0xac220f6c,
4699         0x95020004, 0x3c010001, 0x08004124, 0xa4220f70, 0x3c020001, 0x94420f70,
4700         0x3c030001, 0x94630f72, 0x00431021, 0xa5020004, 0x3c020001, 0x94420f6c,
4701         0xa4c20004, 0x3c020001, 0x8c420f6c, 0xa4c20006, 0x3c040001, 0x94840f72,
4702         0x3c020001, 0x94420f70, 0x3c0a0001, 0x954a0f76, 0x00441821, 0x3063ffff,
4703         0x0062182a, 0x24020002, 0x1122000b, 0x00832023, 0x3c030001, 0x94630f78,
4704         0x30620009, 0x10400006, 0x3062fff6, 0xa4c2000c, 0x3c020001, 0x94420f78,
4705         0x30420009, 0x01425023, 0x24020001, 0x1122001b, 0x29220002, 0x50400005,
4706         0x24020002, 0x11200007, 0x31a2ffff, 0x08004197, 0x00000000, 0x1122001d,
4707         0x24020016, 0x08004197, 0x31a2ffff, 0x3c0e0001, 0x95ce0f80, 0x10800005,
4708         0x01806821, 0x01c42021, 0x00041c02, 0x3082ffff, 0x00627021, 0x000e1027,
4709         0xa502000a, 0x3c030001, 0x90630f7b, 0x31a2ffff, 0x00e21021, 0x0800418d,
4710         0x00432023, 0x3c020001, 0x94420f80, 0x00442021, 0x00041c02, 0x3082ffff,
4711         0x00622021, 0x00807021, 0x00041027, 0x08004185, 0xa502000a, 0x3c050001,
4712         0x24a50f7a, 0x90a30000, 0x14620002, 0x24e2fff2, 0xa5e20034, 0x90a20000,
4713         0x00e21023, 0xa5020002, 0x3c030001, 0x94630f80, 0x3c020001, 0x94420f5a,
4714         0x30e5ffff, 0x00641821, 0x00451023, 0x00622023, 0x00041c02, 0x3082ffff,
4715         0x00622021, 0x00041027, 0xa502000a, 0x3c030001, 0x90630f7c, 0x24620001,
4716         0x14a20005, 0x00807021, 0x01631021, 0x90420000, 0x08004185, 0x00026200,
4717         0x24620002, 0x14a20003, 0x306200fe, 0x004b1021, 0x944c0000, 0x3c020001,
4718         0x94420f82, 0x3183ffff, 0x3c040001, 0x90840f7b, 0x00431021, 0x00e21021,
4719         0x00442023, 0x008a2021, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402,
4720         0x00822021, 0x00806821, 0x00041027, 0xa4c20010, 0x31a2ffff, 0x000e1c00,
4721         0x00431025, 0x3c040001, 0x24840f72, 0xade20010, 0x94820000, 0x3c050001,
4722         0x94a50f76, 0x3c030001, 0x8c630f6c, 0x24420001, 0x00b92821, 0xa4820000,
4723         0x3322ffff, 0x00622021, 0x0083182b, 0x3c010001, 0xa4250f76, 0x10600003,
4724         0x24a2ffff, 0x3c010001, 0xa4220f76, 0x3c024000, 0x03021025, 0x3c010001,
4725         0xac240f6c, 0xaf621008, 0x03e00008, 0x27bd0010, 0x3c030001, 0x90630f56,
4726         0x27bdffe8, 0x24020001, 0xafbf0014, 0x10620026, 0xafb00010, 0x8f620cf4,
4727         0x2442ffff, 0x3042007f, 0x00021100, 0x8c434000, 0x3c010001, 0xac230f64,
4728         0x8c434008, 0x24444000, 0x8c5c4004, 0x30620040, 0x14400002, 0x24020088,
4729         0x24020008, 0x3c010001, 0xa4220f68, 0x30620004, 0x10400005, 0x24020001,
4730         0x3c010001, 0xa0220f57, 0x080041d5, 0x00031402, 0x3c010001, 0xa0200f57,
4731         0x00031402, 0x3c010001, 0xa4220f54, 0x9483000c, 0x24020001, 0x3c010001,
4732         0xa4200f50, 0x3c010001, 0xa0220f56, 0x3c010001, 0xa4230f62, 0x24020001,
4733         0x1342001e, 0x00000000, 0x13400005, 0x24020003, 0x13420067, 0x00000000,
4734         0x080042cf, 0x00000000, 0x3c020001, 0x94420f62, 0x241a0001, 0x3c010001,
4735         0xa4200f5e, 0x3c010001, 0xa4200f52, 0x304407ff, 0x00021bc2, 0x00031823,
4736         0x3063003e, 0x34630036, 0x00021242, 0x3042003c, 0x00621821, 0x3c010001,
4737         0xa4240f58, 0x00832021, 0x24630030, 0x3c010001, 0xa4240f5a, 0x3c010001,
4738         0xa4230f5c, 0x3c060001, 0x24c60f52, 0x94c50000, 0x94c30002, 0x3c040001,
4739         0x94840f5a, 0x00651021, 0x0044102a, 0x10400013, 0x3c108000, 0x00a31021,
4740         0xa4c20000, 0x3c02a000, 0xaf620cf4, 0x3c010001, 0xa0200f56, 0x8f641008,
4741         0x00901024, 0x14400003, 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4,
4742         0x00501024, 0x104000b7, 0x00000000, 0x0800420f, 0x00000000, 0x3c030001,
4743         0x94630f50, 0x00851023, 0xa4c40000, 0x00621821, 0x3042ffff, 0x3c010001,
4744         0xa4230f50, 0xaf620ce8, 0x3c020001, 0x94420f68, 0x34420024, 0xaf620cec,
4745         0x94c30002, 0x3c020001, 0x94420f50, 0x14620012, 0x3c028000, 0x3c108000,
4746         0x3c02a000, 0xaf620cf4, 0x3c010001, 0xa0200f56, 0x8f641008, 0x00901024,
4747         0x14400003, 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4, 0x00501024,
4748         0x1440fff7, 0x00000000, 0x080042cf, 0x241a0003, 0xaf620cf4, 0x3c108000,
4749         0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064, 0x00000000,
4750         0x8f620cf4, 0x00501024, 0x1440fff7, 0x00000000, 0x080042cf, 0x241a0003,
4751         0x3c070001, 0x24e70f50, 0x94e20000, 0x03821021, 0xaf620ce0, 0x3c020001,
4752         0x8c420f64, 0xaf620ce4, 0x3c050001, 0x94a50f54, 0x94e30000, 0x3c040001,
4753         0x94840f58, 0x3c020001, 0x94420f5e, 0x00a32823, 0x00822023, 0x30a6ffff,
4754         0x3083ffff, 0x00c3102b, 0x14400043, 0x00000000, 0x3c020001, 0x94420f5c,
4755         0x00021400, 0x00621025, 0xaf620ce8, 0x94e20000, 0x3c030001, 0x94630f54,
4756         0x00441021, 0xa4e20000, 0x3042ffff, 0x14430021, 0x3c020008, 0x3c020001,
4757         0x90420f57, 0x10400006, 0x3c03000c, 0x3c020001, 0x94420f68, 0x34630624,
4758         0x0800427c, 0x0000d021, 0x3c020001, 0x94420f68, 0x3c030008, 0x34630624,
4759         0x00431025, 0xaf620cec, 0x3c108000, 0x3c02a000, 0xaf620cf4, 0x3c010001,
4760         0xa0200f56, 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064,
4761         0x00000000, 0x8f620cf4, 0x00501024, 0x10400015, 0x00000000, 0x08004283,
4762         0x00000000, 0x3c030001, 0x94630f68, 0x34420624, 0x3c108000, 0x00621825,
4763         0x3c028000, 0xaf630cec, 0xaf620cf4, 0x8f641008, 0x00901024, 0x14400003,
4764         0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4, 0x00501024, 0x1440fff7,
4765         0x00000000, 0x3c010001, 0x080042cf, 0xa4200f5e, 0x3c020001, 0x94420f5c,
4766         0x00021400, 0x00c21025, 0xaf620ce8, 0x3c020001, 0x90420f57, 0x10400009,
4767         0x3c03000c, 0x3c020001, 0x94420f68, 0x34630624, 0x0000d021, 0x00431025,
4768         0xaf620cec, 0x080042c1, 0x3c108000, 0x3c020001, 0x94420f68, 0x3c030008,
4769         0x34630604, 0x00431025, 0xaf620cec, 0x3c020001, 0x94420f5e, 0x00451021,
4770         0x3c010001, 0xa4220f5e, 0x3c108000, 0x3c02a000, 0xaf620cf4, 0x3c010001,
4771         0xa0200f56, 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064,
4772         0x00000000, 0x8f620cf4, 0x00501024, 0x1440fff7, 0x00000000, 0x8fbf0014,
4773         0x8fb00010, 0x03e00008, 0x27bd0018, 0x00000000, 0x27bdffe0, 0x3c040001,
4774         0x24840ec0, 0x00002821, 0x00003021, 0x00003821, 0xafbf0018, 0xafa00010,
4775         0x0c004378, 0xafa00014, 0x0000d021, 0x24020130, 0xaf625000, 0x3c010001,
4776         0xa4200f50, 0x3c010001, 0xa0200f57, 0x8fbf0018, 0x03e00008, 0x27bd0020,
4777         0x27bdffe8, 0x3c1bc000, 0xafbf0014, 0xafb00010, 0xaf60680c, 0x8f626804,
4778         0x34420082, 0xaf626804, 0x8f634000, 0x24020b50, 0x3c010001, 0xac220f20,
4779         0x24020b78, 0x3c010001, 0xac220f30, 0x34630002, 0xaf634000, 0x0c004315,
4780         0x00808021, 0x3c010001, 0xa0220f34, 0x304200ff, 0x24030002, 0x14430005,
4781         0x00000000, 0x3c020001, 0x8c420f20, 0x08004308, 0xac5000c0, 0x3c020001,
4782         0x8c420f20, 0xac5000bc, 0x8f624434, 0x8f634438, 0x8f644410, 0x3c010001,
4783         0xac220f28, 0x3c010001, 0xac230f38, 0x3c010001, 0xac240f24, 0x8fbf0014,
4784         0x8fb00010, 0x03e00008, 0x27bd0018, 0x03e00008, 0x24020001, 0x27bdfff8,
4785         0x18800009, 0x00002821, 0x8f63680c, 0x8f62680c, 0x1043fffe, 0x00000000,
4786         0x24a50001, 0x00a4102a, 0x1440fff9, 0x00000000, 0x03e00008, 0x27bd0008,
4787         0x8f634450, 0x3c020001, 0x8c420f28, 0x00031c02, 0x0043102b, 0x14400008,
4788         0x3c038000, 0x3c040001, 0x8c840f38, 0x8f624450, 0x00021c02, 0x0083102b,
4789         0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024, 0x1440fffd,
4790         0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3082ffff, 0x2442e000,
4791         0x2c422001, 0x14400003, 0x3c024000, 0x08004347, 0x2402ffff, 0x00822025,
4792         0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00001021, 0x03e00008,
4793         0x00000000, 0x8f624450, 0x3c030001, 0x8c630f24, 0x08004350, 0x3042ffff,
4794         0x8f624450, 0x3042ffff, 0x0043102b, 0x1440fffc, 0x00000000, 0x03e00008,
4795         0x00000000, 0x27bdffe0, 0x00802821, 0x3c040001, 0x24840ed0, 0x00003021,
4796         0x00003821, 0xafbf0018, 0xafa00010, 0x0c004378, 0xafa00014, 0x0800435f,
4797         0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x3c020001, 0x3442d600,
4798         0x3c030001, 0x3463d600, 0x3c040001, 0x3484ddff, 0x3c010001, 0xac220f40,
4799         0x24020040, 0x3c010001, 0xac220f44, 0x3c010001, 0xac200f3c, 0xac600000,
4800         0x24630004, 0x0083102b, 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000,
4801         0x00804821, 0x8faa0010, 0x3c020001, 0x8c420f3c, 0x3c040001, 0x8c840f44,
4802         0x8fab0014, 0x24430001, 0x0044102b, 0x3c010001, 0xac230f3c, 0x14400003,
4803         0x00004021, 0x3c010001, 0xac200f3c, 0x3c020001, 0x8c420f3c, 0x3c030001,
4804         0x8c630f40, 0x91240000, 0x00021140, 0x00431021, 0x00481021, 0x25080001,
4805         0xa0440000, 0x29020008, 0x1440fff4, 0x25290001, 0x3c020001, 0x8c420f3c,
4806         0x3c030001, 0x8c630f40, 0x8f64680c, 0x00021140, 0x00431021, 0xac440008,
4807         0xac45000c, 0xac460010, 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c,
4808         0x00000000, 0x00000000, 0x00000000,
4809 };
4810
4811 static u32 tg3Tso5FwRodata[(TG3_TSO5_FW_RODATA_LEN / 4) + 1] = {
4812         0x4d61696e, 0x43707542, 0x00000000, 0x4d61696e, 0x43707541, 0x00000000,
4813         0x00000000, 0x00000000, 0x73746b6f, 0x66666c64, 0x00000000, 0x00000000,
4814         0x73746b6f, 0x66666c64, 0x00000000, 0x00000000, 0x66617461, 0x6c457272,
4815         0x00000000, 0x00000000, 0x00000000,
4816 };
4817
4818 static u32 tg3Tso5FwData[(TG3_TSO5_FW_DATA_LEN / 4) + 1] = {
4819         0x00000000, 0x73746b6f, 0x66666c64, 0x5f76312e, 0x322e3000, 0x00000000,
4820         0x00000000, 0x00000000, 0x00000000,
4821 };
4822
4823 /* tp->lock is held. */
4824 static int tg3_load_tso_firmware(struct tg3 *tp)
4825 {
4826         struct fw_info info;
4827         unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
4828         int err, i;
4829
4830         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
4831                 return 0;
4832
4833         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
4834                 info.text_base = TG3_TSO5_FW_TEXT_ADDR;
4835                 info.text_len = TG3_TSO5_FW_TEXT_LEN;
4836                 info.text_data = &tg3Tso5FwText[0];
4837                 info.rodata_base = TG3_TSO5_FW_RODATA_ADDR;
4838                 info.rodata_len = TG3_TSO5_FW_RODATA_LEN;
4839                 info.rodata_data = &tg3Tso5FwRodata[0];
4840                 info.data_base = TG3_TSO5_FW_DATA_ADDR;
4841                 info.data_len = TG3_TSO5_FW_DATA_LEN;
4842                 info.data_data = &tg3Tso5FwData[0];
4843                 cpu_base = RX_CPU_BASE;
4844                 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
4845                 cpu_scratch_size = (info.text_len +
4846                                     info.rodata_len +
4847                                     info.data_len +
4848                                     TG3_TSO5_FW_SBSS_LEN +
4849                                     TG3_TSO5_FW_BSS_LEN);
4850         } else {
4851                 info.text_base = TG3_TSO_FW_TEXT_ADDR;
4852                 info.text_len = TG3_TSO_FW_TEXT_LEN;
4853                 info.text_data = &tg3TsoFwText[0];
4854                 info.rodata_base = TG3_TSO_FW_RODATA_ADDR;
4855                 info.rodata_len = TG3_TSO_FW_RODATA_LEN;
4856                 info.rodata_data = &tg3TsoFwRodata[0];
4857                 info.data_base = TG3_TSO_FW_DATA_ADDR;
4858                 info.data_len = TG3_TSO_FW_DATA_LEN;
4859                 info.data_data = &tg3TsoFwData[0];
4860                 cpu_base = TX_CPU_BASE;
4861                 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
4862                 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
4863         }
4864
4865         err = tg3_load_firmware_cpu(tp, cpu_base,
4866                                     cpu_scratch_base, cpu_scratch_size,
4867                                     &info);
4868         if (err)
4869                 return err;
4870
4871         /* Now startup the cpu. */
4872         tw32(cpu_base + CPU_STATE, 0xffffffff);
4873         tw32_f(cpu_base + CPU_PC,    info.text_base);
4874
4875         for (i = 0; i < 5; i++) {
4876                 if (tr32(cpu_base + CPU_PC) == info.text_base)
4877                         break;
4878                 tw32(cpu_base + CPU_STATE, 0xffffffff);
4879                 tw32(cpu_base + CPU_MODE,  CPU_MODE_HALT);
4880                 tw32_f(cpu_base + CPU_PC,    info.text_base);
4881                 udelay(1000);
4882         }
4883         if (i >= 5) {
4884                 printk(KERN_ERR PFX "tg3_load_tso_firmware fails for %s "
4885                        "to set CPU PC, is %08x should be %08x\n",
4886                        tp->dev->name, tr32(cpu_base + CPU_PC),
4887                        info.text_base);
4888                 return -ENODEV;
4889         }
4890         tw32(cpu_base + CPU_STATE, 0xffffffff);
4891         tw32_f(cpu_base + CPU_MODE,  0x00000000);
4892         return 0;
4893 }
4894
4895 #endif /* TG3_TSO_SUPPORT != 0 */
4896
4897 /* tp->lock is held. */
4898 static void __tg3_set_mac_addr(struct tg3 *tp)
4899 {
4900         u32 addr_high, addr_low;
4901         int i;
4902
4903         addr_high = ((tp->dev->dev_addr[0] << 8) |
4904                      tp->dev->dev_addr[1]);
4905         addr_low = ((tp->dev->dev_addr[2] << 24) |
4906                     (tp->dev->dev_addr[3] << 16) |
4907                     (tp->dev->dev_addr[4] <<  8) |
4908                     (tp->dev->dev_addr[5] <<  0));
4909         for (i = 0; i < 4; i++) {
4910                 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
4911                 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
4912         }
4913
4914         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
4915             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
4916                 for (i = 0; i < 12; i++) {
4917                         tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
4918                         tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
4919                 }
4920         }
4921
4922         addr_high = (tp->dev->dev_addr[0] +
4923                      tp->dev->dev_addr[1] +
4924                      tp->dev->dev_addr[2] +
4925                      tp->dev->dev_addr[3] +
4926                      tp->dev->dev_addr[4] +
4927                      tp->dev->dev_addr[5]) &
4928                 TX_BACKOFF_SEED_MASK;
4929         tw32(MAC_TX_BACKOFF_SEED, addr_high);
4930 }
4931
4932 static int tg3_set_mac_addr(struct net_device *dev, void *p)
4933 {
4934         struct tg3 *tp = netdev_priv(dev);
4935         struct sockaddr *addr = p;
4936
4937         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
4938
4939         spin_lock_irq(&tp->lock);
4940         __tg3_set_mac_addr(tp);
4941         spin_unlock_irq(&tp->lock);
4942
4943         return 0;
4944 }
4945
4946 /* tp->lock is held. */
4947 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
4948                            dma_addr_t mapping, u32 maxlen_flags,
4949                            u32 nic_addr)
4950 {
4951         tg3_write_mem(tp,
4952                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
4953                       ((u64) mapping >> 32));
4954         tg3_write_mem(tp,
4955                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
4956                       ((u64) mapping & 0xffffffff));
4957         tg3_write_mem(tp,
4958                       (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
4959                        maxlen_flags);
4960
4961         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
4962                 tg3_write_mem(tp,
4963                               (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
4964                               nic_addr);
4965 }
4966
4967 static void __tg3_set_rx_mode(struct net_device *);
4968
4969 /* tp->lock is held. */
4970 static int tg3_reset_hw(struct tg3 *tp)
4971 {
4972         u32 val, rdmac_mode;
4973         int i, err, limit;
4974
4975         tg3_disable_ints(tp);
4976
4977         tg3_stop_fw(tp);
4978
4979         tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
4980
4981         if (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) {
4982                 err = tg3_abort_hw(tp);
4983                 if (err)
4984                         return err;
4985         }
4986
4987         err = tg3_chip_reset(tp);
4988         if (err)
4989                 return err;
4990
4991         tg3_write_sig_legacy(tp, RESET_KIND_INIT);
4992
4993         /* This works around an issue with Athlon chipsets on
4994          * B3 tigon3 silicon.  This bit has no effect on any
4995          * other revision.  But do not set this on PCI Express
4996          * chips.
4997          */
4998         if (!(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
4999                 tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
5000         tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
5001
5002         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
5003             (tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
5004                 val = tr32(TG3PCI_PCISTATE);
5005                 val |= PCISTATE_RETRY_SAME_DMA;
5006                 tw32(TG3PCI_PCISTATE, val);
5007         }
5008
5009         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_BX) {
5010                 /* Enable some hw fixes.  */
5011                 val = tr32(TG3PCI_MSI_DATA);
5012                 val |= (1 << 26) | (1 << 28) | (1 << 29);
5013                 tw32(TG3PCI_MSI_DATA, val);
5014         }
5015
5016         /* Descriptor ring init may make accesses to the
5017          * NIC SRAM area to setup the TX descriptors, so we
5018          * can only do this after the hardware has been
5019          * successfully reset.
5020          */
5021         tg3_init_rings(tp);
5022
5023         /* This value is determined during the probe time DMA
5024          * engine test, tg3_test_dma.
5025          */
5026         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
5027
5028         tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
5029                           GRC_MODE_4X_NIC_SEND_RINGS |
5030                           GRC_MODE_NO_TX_PHDR_CSUM |
5031                           GRC_MODE_NO_RX_PHDR_CSUM);
5032         tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
5033         if (tp->tg3_flags & TG3_FLAG_NO_TX_PSEUDO_CSUM)
5034                 tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
5035         if (tp->tg3_flags & TG3_FLAG_NO_RX_PSEUDO_CSUM)
5036                 tp->grc_mode |= GRC_MODE_NO_RX_PHDR_CSUM;
5037
5038         tw32(GRC_MODE,
5039              tp->grc_mode |
5040              (GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP));
5041
5042         /* Setup the timer prescalar register.  Clock is always 66Mhz. */
5043         val = tr32(GRC_MISC_CFG);
5044         val &= ~0xff;
5045         val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
5046         tw32(GRC_MISC_CFG, val);
5047
5048         /* Initialize MBUF/DESC pool. */
5049         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
5050                 /* Do nothing.  */
5051         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
5052                 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
5053                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
5054                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
5055                 else
5056                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
5057                 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
5058                 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
5059         }
5060 #if TG3_TSO_SUPPORT != 0
5061         else if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
5062                 int fw_len;
5063
5064                 fw_len = (TG3_TSO5_FW_TEXT_LEN +
5065                           TG3_TSO5_FW_RODATA_LEN +
5066                           TG3_TSO5_FW_DATA_LEN +
5067                           TG3_TSO5_FW_SBSS_LEN +
5068                           TG3_TSO5_FW_BSS_LEN);
5069                 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
5070                 tw32(BUFMGR_MB_POOL_ADDR,
5071                      NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
5072                 tw32(BUFMGR_MB_POOL_SIZE,
5073                      NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
5074         }
5075 #endif
5076
5077         if (!(tp->tg3_flags & TG3_FLAG_JUMBO_ENABLE)) {
5078                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
5079                      tp->bufmgr_config.mbuf_read_dma_low_water);
5080                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
5081                      tp->bufmgr_config.mbuf_mac_rx_low_water);
5082                 tw32(BUFMGR_MB_HIGH_WATER,
5083                      tp->bufmgr_config.mbuf_high_water);
5084         } else {
5085                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
5086                      tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
5087                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
5088                      tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
5089                 tw32(BUFMGR_MB_HIGH_WATER,
5090                      tp->bufmgr_config.mbuf_high_water_jumbo);
5091         }
5092         tw32(BUFMGR_DMA_LOW_WATER,
5093              tp->bufmgr_config.dma_low_water);
5094         tw32(BUFMGR_DMA_HIGH_WATER,
5095              tp->bufmgr_config.dma_high_water);
5096
5097         tw32(BUFMGR_MODE, BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE);
5098         for (i = 0; i < 2000; i++) {
5099                 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
5100                         break;
5101                 udelay(10);
5102         }
5103         if (i >= 2000) {
5104                 printk(KERN_ERR PFX "tg3_reset_hw cannot enable BUFMGR for %s.\n",
5105                        tp->dev->name);
5106                 return -ENODEV;
5107         }
5108
5109         /* Setup replenish threshold. */
5110         tw32(RCVBDI_STD_THRESH, tp->rx_pending / 8);
5111
5112         /* Initialize TG3_BDINFO's at:
5113          *  RCVDBDI_STD_BD:     standard eth size rx ring
5114          *  RCVDBDI_JUMBO_BD:   jumbo frame rx ring
5115          *  RCVDBDI_MINI_BD:    small frame rx ring (??? does not work)
5116          *
5117          * like so:
5118          *  TG3_BDINFO_HOST_ADDR:       high/low parts of DMA address of ring
5119          *  TG3_BDINFO_MAXLEN_FLAGS:    (rx max buffer size << 16) |
5120          *                              ring attribute flags
5121          *  TG3_BDINFO_NIC_ADDR:        location of descriptors in nic SRAM
5122          *
5123          * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
5124          * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
5125          *
5126          * The size of each ring is fixed in the firmware, but the location is
5127          * configurable.
5128          */
5129         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
5130              ((u64) tp->rx_std_mapping >> 32));
5131         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
5132              ((u64) tp->rx_std_mapping & 0xffffffff));
5133         tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
5134              NIC_SRAM_RX_BUFFER_DESC);
5135
5136         /* Don't even try to program the JUMBO/MINI buffer descriptor
5137          * configs on 5705.
5138          */
5139         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
5140                 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS,
5141                      RX_STD_MAX_SIZE_5705 << BDINFO_FLAGS_MAXLEN_SHIFT);
5142         } else {
5143                 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS,
5144                      RX_STD_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT);
5145
5146                 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
5147                      BDINFO_FLAGS_DISABLED);
5148
5149                 /* Setup replenish threshold. */
5150                 tw32(RCVBDI_JUMBO_THRESH, tp->rx_jumbo_pending / 8);
5151
5152                 if (tp->tg3_flags & TG3_FLAG_JUMBO_ENABLE) {
5153                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
5154                              ((u64) tp->rx_jumbo_mapping >> 32));
5155                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
5156                              ((u64) tp->rx_jumbo_mapping & 0xffffffff));
5157                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
5158                              RX_JUMBO_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT);
5159                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
5160                              NIC_SRAM_RX_JUMBO_BUFFER_DESC);
5161                 } else {
5162                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
5163                              BDINFO_FLAGS_DISABLED);
5164                 }
5165
5166         }
5167
5168         /* There is only one send ring on 5705/5750, no need to explicitly
5169          * disable the others.
5170          */
5171         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5172                 /* Clear out send RCB ring in SRAM. */
5173                 for (i = NIC_SRAM_SEND_RCB; i < NIC_SRAM_RCV_RET_RCB; i += TG3_BDINFO_SIZE)
5174                         tg3_write_mem(tp, i + TG3_BDINFO_MAXLEN_FLAGS,
5175                                       BDINFO_FLAGS_DISABLED);
5176         }
5177
5178         tp->tx_prod = 0;
5179         tp->tx_cons = 0;
5180         tw32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0);
5181         tw32_tx_mbox(MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0);
5182
5183         tg3_set_bdinfo(tp, NIC_SRAM_SEND_RCB,
5184                        tp->tx_desc_mapping,
5185                        (TG3_TX_RING_SIZE <<
5186                         BDINFO_FLAGS_MAXLEN_SHIFT),
5187                        NIC_SRAM_TX_BUFFER_DESC);
5188
5189         /* There is only one receive return ring on 5705/5750, no need
5190          * to explicitly disable the others.
5191          */
5192         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5193                 for (i = NIC_SRAM_RCV_RET_RCB; i < NIC_SRAM_STATS_BLK;
5194                      i += TG3_BDINFO_SIZE) {
5195                         tg3_write_mem(tp, i + TG3_BDINFO_MAXLEN_FLAGS,
5196                                       BDINFO_FLAGS_DISABLED);
5197                 }
5198         }
5199
5200         tp->rx_rcb_ptr = 0;
5201         tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW, 0);
5202
5203         tg3_set_bdinfo(tp, NIC_SRAM_RCV_RET_RCB,
5204                        tp->rx_rcb_mapping,
5205                        (TG3_RX_RCB_RING_SIZE(tp) <<
5206                         BDINFO_FLAGS_MAXLEN_SHIFT),
5207                        0);
5208
5209         tp->rx_std_ptr = tp->rx_pending;
5210         tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
5211                      tp->rx_std_ptr);
5212
5213         tp->rx_jumbo_ptr = (tp->tg3_flags & TG3_FLAG_JUMBO_ENABLE) ?
5214                                                 tp->rx_jumbo_pending : 0;
5215         tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
5216                      tp->rx_jumbo_ptr);
5217
5218         /* Initialize MAC address and backoff seed. */
5219         __tg3_set_mac_addr(tp);
5220
5221         /* MTU + ethernet header + FCS + optional VLAN tag */
5222         tw32(MAC_RX_MTU_SIZE, tp->dev->mtu + ETH_HLEN + 8);
5223
5224         /* The slot time is changed by tg3_setup_phy if we
5225          * run at gigabit with half duplex.
5226          */
5227         tw32(MAC_TX_LENGTHS,
5228              (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
5229              (6 << TX_LENGTHS_IPG_SHIFT) |
5230              (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
5231
5232         /* Receive rules. */
5233         tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
5234         tw32(RCVLPC_CONFIG, 0x0181);
5235
5236         /* Calculate RDMAC_MODE setting early, we need it to determine
5237          * the RCVLPC_STATE_ENABLE mask.
5238          */
5239         rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
5240                       RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
5241                       RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
5242                       RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
5243                       RDMAC_MODE_LNGREAD_ENAB);
5244         if (tp->tg3_flags & TG3_FLAG_SPLIT_MODE)
5245                 rdmac_mode |= RDMAC_MODE_SPLIT_ENABLE;
5246
5247         /* If statement applies to 5705 and 5750 PCI devices only */
5248         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
5249              tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
5250             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)) {
5251                 if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE &&
5252                     (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
5253                      tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
5254                         rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
5255                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
5256                            !(tp->tg3_flags2 & TG3_FLG2_IS_5788)) {
5257                         rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
5258                 }
5259         }
5260
5261         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)
5262                 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
5263
5264 #if TG3_TSO_SUPPORT != 0
5265         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
5266                 rdmac_mode |= (1 << 27);
5267 #endif
5268
5269         /* Receive/send statistics. */
5270         if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
5271             (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
5272                 val = tr32(RCVLPC_STATS_ENABLE);
5273                 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
5274                 tw32(RCVLPC_STATS_ENABLE, val);
5275         } else {
5276                 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
5277         }
5278         tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
5279         tw32(SNDDATAI_STATSENAB, 0xffffff);
5280         tw32(SNDDATAI_STATSCTRL,
5281              (SNDDATAI_SCTRL_ENABLE |
5282               SNDDATAI_SCTRL_FASTUPD));
5283
5284         /* Setup host coalescing engine. */
5285         tw32(HOSTCC_MODE, 0);
5286         for (i = 0; i < 2000; i++) {
5287                 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
5288                         break;
5289                 udelay(10);
5290         }
5291
5292         tw32(HOSTCC_RXCOL_TICKS, 0);
5293         tw32(HOSTCC_TXCOL_TICKS, LOW_TXCOL_TICKS);
5294         tw32(HOSTCC_RXMAX_FRAMES, 1);
5295         tw32(HOSTCC_TXMAX_FRAMES, LOW_RXMAX_FRAMES);
5296         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5297                 tw32(HOSTCC_RXCOAL_TICK_INT, 0);
5298                 tw32(HOSTCC_TXCOAL_TICK_INT, 0);
5299         }
5300         tw32(HOSTCC_RXCOAL_MAXF_INT, 1);
5301         tw32(HOSTCC_TXCOAL_MAXF_INT, 0);
5302
5303         /* set status block DMA address */
5304         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
5305              ((u64) tp->status_mapping >> 32));
5306         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
5307              ((u64) tp->status_mapping & 0xffffffff));
5308
5309         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5310                 /* Status/statistics block address.  See tg3_timer,
5311                  * the tg3_periodic_fetch_stats call there, and
5312                  * tg3_get_stats to see how this works for 5705/5750 chips.
5313                  */
5314                 tw32(HOSTCC_STAT_COAL_TICKS,
5315                      DEFAULT_STAT_COAL_TICKS);
5316                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
5317                      ((u64) tp->stats_mapping >> 32));
5318                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
5319                      ((u64) tp->stats_mapping & 0xffffffff));
5320                 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
5321                 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
5322         }
5323
5324         tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
5325
5326         tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
5327         tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
5328         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
5329                 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
5330
5331         /* Clear statistics/status block in chip, and status block in ram. */
5332         for (i = NIC_SRAM_STATS_BLK;
5333              i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
5334              i += sizeof(u32)) {
5335                 tg3_write_mem(tp, i, 0);
5336                 udelay(40);
5337         }
5338         memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
5339
5340         tp->mac_mode = MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
5341                 MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE | MAC_MODE_FHDE_ENABLE;
5342         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
5343         udelay(40);
5344
5345         /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
5346          * If TG3_FLAG_EEPROM_WRITE_PROT is set, we should read the
5347          * register to preserve the GPIO settings for LOMs. The GPIOs,
5348          * whether used as inputs or outputs, are set by boot code after
5349          * reset.
5350          */
5351         if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
5352                 u32 gpio_mask;
5353
5354                 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE2 |
5355                             GRC_LCLCTRL_GPIO_OUTPUT0 | GRC_LCLCTRL_GPIO_OUTPUT2;
5356
5357                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
5358                         gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
5359                                      GRC_LCLCTRL_GPIO_OUTPUT3;
5360
5361                 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
5362
5363                 /* GPIO1 must be driven high for eeprom write protect */
5364                 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
5365                                        GRC_LCLCTRL_GPIO_OUTPUT1);
5366         }
5367         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
5368         udelay(100);
5369
5370         tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0);
5371         tr32(MAILBOX_INTERRUPT_0);
5372
5373         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5374                 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
5375                 udelay(40);
5376         }
5377
5378         val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
5379                WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
5380                WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
5381                WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
5382                WDMAC_MODE_LNGREAD_ENAB);
5383
5384         /* If statement applies to 5705 and 5750 PCI devices only */
5385         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
5386              tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
5387             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
5388                 if ((tp->tg3_flags & TG3_FLG2_TSO_CAPABLE) &&
5389                     (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
5390                      tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
5391                         /* nothing */
5392                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
5393                            !(tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
5394                            !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
5395                         val |= WDMAC_MODE_RX_ACCEL;
5396                 }
5397         }
5398
5399         tw32_f(WDMAC_MODE, val);
5400         udelay(40);
5401
5402         if ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0) {
5403                 val = tr32(TG3PCI_X_CAPS);
5404                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) {
5405                         val &= ~PCIX_CAPS_BURST_MASK;
5406                         val |= (PCIX_CAPS_MAX_BURST_CPIOB << PCIX_CAPS_BURST_SHIFT);
5407                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
5408                         val &= ~(PCIX_CAPS_SPLIT_MASK | PCIX_CAPS_BURST_MASK);
5409                         val |= (PCIX_CAPS_MAX_BURST_CPIOB << PCIX_CAPS_BURST_SHIFT);
5410                         if (tp->tg3_flags & TG3_FLAG_SPLIT_MODE)
5411                                 val |= (tp->split_mode_max_reqs <<
5412                                         PCIX_CAPS_SPLIT_SHIFT);
5413                 }
5414                 tw32(TG3PCI_X_CAPS, val);
5415         }
5416
5417         tw32_f(RDMAC_MODE, rdmac_mode);
5418         udelay(40);
5419
5420         tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
5421         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
5422                 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
5423         tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
5424         tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
5425         tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
5426         tw32(RCVDBDI_MODE, RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ);
5427         tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
5428 #if TG3_TSO_SUPPORT != 0
5429         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
5430                 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
5431 #endif
5432         tw32(SNDBDI_MODE, SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE);
5433         tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
5434
5435         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
5436                 err = tg3_load_5701_a0_firmware_fix(tp);
5437                 if (err)
5438                         return err;
5439         }
5440
5441 #if TG3_TSO_SUPPORT != 0
5442         if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
5443                 err = tg3_load_tso_firmware(tp);
5444                 if (err)
5445                         return err;
5446         }
5447 #endif
5448
5449         tp->tx_mode = TX_MODE_ENABLE;
5450         tw32_f(MAC_TX_MODE, tp->tx_mode);
5451         udelay(100);
5452
5453         tp->rx_mode = RX_MODE_ENABLE;
5454         tw32_f(MAC_RX_MODE, tp->rx_mode);
5455         udelay(10);
5456
5457         if (tp->link_config.phy_is_low_power) {
5458                 tp->link_config.phy_is_low_power = 0;
5459                 tp->link_config.speed = tp->link_config.orig_speed;
5460                 tp->link_config.duplex = tp->link_config.orig_duplex;
5461                 tp->link_config.autoneg = tp->link_config.orig_autoneg;
5462         }
5463
5464         tp->mi_mode = MAC_MI_MODE_BASE;
5465         tw32_f(MAC_MI_MODE, tp->mi_mode);
5466         udelay(80);
5467
5468         tw32(MAC_LED_CTRL, tp->led_ctrl);
5469
5470         tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
5471         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
5472                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
5473                 udelay(10);
5474         }
5475         tw32_f(MAC_RX_MODE, tp->rx_mode);
5476         udelay(10);
5477
5478         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
5479                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) &&
5480                         !(tp->tg3_flags2 & TG3_FLG2_SERDES_PREEMPHASIS)) {
5481                         /* Set drive transmission level to 1.2V  */
5482                         /* only if the signal pre-emphasis bit is not set  */
5483                         val = tr32(MAC_SERDES_CFG);
5484                         val &= 0xfffff000;
5485                         val |= 0x880;
5486                         tw32(MAC_SERDES_CFG, val);
5487                 }
5488                 if (tp->pci_chip_rev_id == CHIPREV_ID_5703_A1)
5489                         tw32(MAC_SERDES_CFG, 0x616000);
5490         }
5491
5492         /* Prevent chip from dropping frames when flow control
5493          * is enabled.
5494          */
5495         tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, 2);
5496
5497         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
5498             (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
5499                 /* Use hardware link auto-negotiation */
5500                 tp->tg3_flags2 |= TG3_FLG2_HW_AUTONEG;
5501         }
5502
5503         err = tg3_setup_phy(tp, 1);
5504         if (err)
5505                 return err;
5506
5507         if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
5508                 u32 tmp;
5509
5510                 /* Clear CRC stats. */
5511                 if (!tg3_readphy(tp, 0x1e, &tmp)) {
5512                         tg3_writephy(tp, 0x1e, tmp | 0x8000);
5513                         tg3_readphy(tp, 0x14, &tmp);
5514                 }
5515         }
5516
5517         __tg3_set_rx_mode(tp->dev);
5518
5519         /* Initialize receive rules. */
5520         tw32(MAC_RCV_RULE_0,  0xc2000000 & RCV_RULE_DISABLE_MASK);
5521         tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
5522         tw32(MAC_RCV_RULE_1,  0x86000004 & RCV_RULE_DISABLE_MASK);
5523         tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
5524
5525         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
5526                 limit = 8;
5527         else
5528                 limit = 16;
5529         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF)
5530                 limit -= 4;
5531         switch (limit) {
5532         case 16:
5533                 tw32(MAC_RCV_RULE_15,  0); tw32(MAC_RCV_VALUE_15,  0);
5534         case 15:
5535                 tw32(MAC_RCV_RULE_14,  0); tw32(MAC_RCV_VALUE_14,  0);
5536         case 14:
5537                 tw32(MAC_RCV_RULE_13,  0); tw32(MAC_RCV_VALUE_13,  0);
5538         case 13:
5539                 tw32(MAC_RCV_RULE_12,  0); tw32(MAC_RCV_VALUE_12,  0);
5540         case 12:
5541                 tw32(MAC_RCV_RULE_11,  0); tw32(MAC_RCV_VALUE_11,  0);
5542         case 11:
5543                 tw32(MAC_RCV_RULE_10,  0); tw32(MAC_RCV_VALUE_10,  0);
5544         case 10:
5545                 tw32(MAC_RCV_RULE_9,  0); tw32(MAC_RCV_VALUE_9,  0);
5546         case 9:
5547                 tw32(MAC_RCV_RULE_8,  0); tw32(MAC_RCV_VALUE_8,  0);
5548         case 8:
5549                 tw32(MAC_RCV_RULE_7,  0); tw32(MAC_RCV_VALUE_7,  0);
5550         case 7:
5551                 tw32(MAC_RCV_RULE_6,  0); tw32(MAC_RCV_VALUE_6,  0);
5552         case 6:
5553                 tw32(MAC_RCV_RULE_5,  0); tw32(MAC_RCV_VALUE_5,  0);
5554         case 5:
5555                 tw32(MAC_RCV_RULE_4,  0); tw32(MAC_RCV_VALUE_4,  0);
5556         case 4:
5557                 /* tw32(MAC_RCV_RULE_3,  0); tw32(MAC_RCV_VALUE_3,  0); */
5558         case 3:
5559                 /* tw32(MAC_RCV_RULE_2,  0); tw32(MAC_RCV_VALUE_2,  0); */
5560         case 2:
5561         case 1:
5562
5563         default:
5564                 break;
5565         };
5566
5567         tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
5568
5569         if (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE)
5570                 tg3_enable_ints(tp);
5571
5572         return 0;
5573 }
5574
5575 /* Called at device open time to get the chip ready for
5576  * packet processing.  Invoked with tp->lock held.
5577  */
5578 static int tg3_init_hw(struct tg3 *tp)
5579 {
5580         int err;
5581
5582         /* Force the chip into D0. */
5583         err = tg3_set_power_state(tp, 0);
5584         if (err)
5585                 goto out;
5586
5587         tg3_switch_clocks(tp);
5588
5589         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
5590
5591         err = tg3_reset_hw(tp);
5592
5593 out:
5594         return err;
5595 }
5596
5597 #define TG3_STAT_ADD32(PSTAT, REG) \
5598 do {    u32 __val = tr32(REG); \
5599         (PSTAT)->low += __val; \
5600         if ((PSTAT)->low < __val) \
5601                 (PSTAT)->high += 1; \
5602 } while (0)
5603
5604 static void tg3_periodic_fetch_stats(struct tg3 *tp)
5605 {
5606         struct tg3_hw_stats *sp = tp->hw_stats;
5607
5608         if (!netif_carrier_ok(tp->dev))
5609                 return;
5610
5611         TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
5612         TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
5613         TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
5614         TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
5615         TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
5616         TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
5617         TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
5618         TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
5619         TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
5620         TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
5621         TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
5622         TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
5623         TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
5624
5625         TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
5626         TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
5627         TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
5628         TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
5629         TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
5630         TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
5631         TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
5632         TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
5633         TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
5634         TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
5635         TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
5636         TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
5637         TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
5638         TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
5639 }
5640
5641 static void tg3_timer(unsigned long __opaque)
5642 {
5643         struct tg3 *tp = (struct tg3 *) __opaque;
5644         unsigned long flags;
5645
5646         spin_lock_irqsave(&tp->lock, flags);
5647         spin_lock(&tp->tx_lock);
5648
5649         /* All of this garbage is because when using non-tagged
5650          * IRQ status the mailbox/status_block protocol the chip
5651          * uses with the cpu is race prone.
5652          */
5653         if (tp->hw_status->status & SD_STATUS_UPDATED) {
5654                 tw32(GRC_LOCAL_CTRL,
5655                      tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
5656         } else {
5657                 tw32(HOSTCC_MODE, tp->coalesce_mode |
5658                      (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
5659         }
5660
5661         if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
5662                 tp->tg3_flags2 |= TG3_FLG2_RESTART_TIMER;
5663                 spin_unlock(&tp->tx_lock);
5664                 spin_unlock_irqrestore(&tp->lock, flags);
5665                 schedule_work(&tp->reset_task);
5666                 return;
5667         }
5668
5669         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
5670                 tg3_periodic_fetch_stats(tp);
5671
5672         /* This part only runs once per second. */
5673         if (!--tp->timer_counter) {
5674                 if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
5675                         u32 mac_stat;
5676                         int phy_event;
5677
5678                         mac_stat = tr32(MAC_STATUS);
5679
5680                         phy_event = 0;
5681                         if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) {
5682                                 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
5683                                         phy_event = 1;
5684                         } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
5685                                 phy_event = 1;
5686
5687                         if (phy_event)
5688                                 tg3_setup_phy(tp, 0);
5689                 } else if (tp->tg3_flags & TG3_FLAG_POLL_SERDES) {
5690                         u32 mac_stat = tr32(MAC_STATUS);
5691                         int need_setup = 0;
5692
5693                         if (netif_carrier_ok(tp->dev) &&
5694                             (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
5695                                 need_setup = 1;
5696                         }
5697                         if (! netif_carrier_ok(tp->dev) &&
5698                             (mac_stat & (MAC_STATUS_PCS_SYNCED |
5699                                          MAC_STATUS_SIGNAL_DET))) {
5700                                 need_setup = 1;
5701                         }
5702                         if (need_setup) {
5703                                 tw32_f(MAC_MODE,
5704                                      (tp->mac_mode &
5705                                       ~MAC_MODE_PORT_MODE_MASK));
5706                                 udelay(40);
5707                                 tw32_f(MAC_MODE, tp->mac_mode);
5708                                 udelay(40);
5709                                 tg3_setup_phy(tp, 0);
5710                         }
5711                 }
5712
5713                 tp->timer_counter = tp->timer_multiplier;
5714         }
5715
5716         /* Heartbeat is only sent once every 120 seconds.  */
5717         if (!--tp->asf_counter) {
5718                 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
5719                         u32 val;
5720
5721                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_ALIVE);
5722                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
5723                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, 3);
5724                         val = tr32(GRC_RX_CPU_EVENT);
5725                         val |= (1 << 14);
5726                         tw32(GRC_RX_CPU_EVENT, val);
5727                 }
5728                 tp->asf_counter = tp->asf_multiplier;
5729         }
5730
5731         spin_unlock(&tp->tx_lock);
5732         spin_unlock_irqrestore(&tp->lock, flags);
5733
5734         tp->timer.expires = jiffies + tp->timer_offset;
5735         add_timer(&tp->timer);
5736 }
5737
5738 static int tg3_open(struct net_device *dev)
5739 {
5740         struct tg3 *tp = netdev_priv(dev);
5741         int err;
5742
5743         spin_lock_irq(&tp->lock);
5744         spin_lock(&tp->tx_lock);
5745
5746         tg3_disable_ints(tp);
5747         tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
5748
5749         spin_unlock(&tp->tx_lock);
5750         spin_unlock_irq(&tp->lock);
5751
5752         /* The placement of this call is tied
5753          * to the setup and use of Host TX descriptors.
5754          */
5755         err = tg3_alloc_consistent(tp);
5756         if (err)
5757                 return err;
5758
5759         err = request_irq(dev->irq, tg3_interrupt,
5760                           SA_SHIRQ, dev->name, dev);
5761
5762         if (err) {
5763                 tg3_free_consistent(tp);
5764                 return err;
5765         }
5766
5767         spin_lock_irq(&tp->lock);
5768         spin_lock(&tp->tx_lock);
5769
5770         err = tg3_init_hw(tp);
5771         if (err) {
5772                 tg3_halt(tp);
5773                 tg3_free_rings(tp);
5774         } else {
5775                 tp->timer_offset = HZ / 10;
5776                 tp->timer_counter = tp->timer_multiplier = 10;
5777                 tp->asf_counter = tp->asf_multiplier = (10 * 120);
5778
5779                 init_timer(&tp->timer);
5780                 tp->timer.expires = jiffies + tp->timer_offset;
5781                 tp->timer.data = (unsigned long) tp;
5782                 tp->timer.function = tg3_timer;
5783                 add_timer(&tp->timer);
5784
5785                 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
5786         }
5787
5788         spin_unlock(&tp->tx_lock);
5789         spin_unlock_irq(&tp->lock);
5790
5791         if (err) {
5792                 free_irq(dev->irq, dev);
5793                 tg3_free_consistent(tp);
5794                 return err;
5795         }
5796
5797         spin_lock_irq(&tp->lock);
5798         spin_lock(&tp->tx_lock);
5799
5800         tg3_enable_ints(tp);
5801
5802         spin_unlock(&tp->tx_lock);
5803         spin_unlock_irq(&tp->lock);
5804
5805         netif_start_queue(dev);
5806
5807         return 0;
5808 }
5809
5810 #if 0
5811 /*static*/ void tg3_dump_state(struct tg3 *tp)
5812 {
5813         u32 val32, val32_2, val32_3, val32_4, val32_5;
5814         u16 val16;
5815         int i;
5816
5817         pci_read_config_word(tp->pdev, PCI_STATUS, &val16);
5818         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE, &val32);
5819         printk("DEBUG: PCI status [%04x] TG3PCI state[%08x]\n",
5820                val16, val32);
5821
5822         /* MAC block */
5823         printk("DEBUG: MAC_MODE[%08x] MAC_STATUS[%08x]\n",
5824                tr32(MAC_MODE), tr32(MAC_STATUS));
5825         printk("       MAC_EVENT[%08x] MAC_LED_CTRL[%08x]\n",
5826                tr32(MAC_EVENT), tr32(MAC_LED_CTRL));
5827         printk("DEBUG: MAC_TX_MODE[%08x] MAC_TX_STATUS[%08x]\n",
5828                tr32(MAC_TX_MODE), tr32(MAC_TX_STATUS));
5829         printk("       MAC_RX_MODE[%08x] MAC_RX_STATUS[%08x]\n",
5830                tr32(MAC_RX_MODE), tr32(MAC_RX_STATUS));
5831
5832         /* Send data initiator control block */
5833         printk("DEBUG: SNDDATAI_MODE[%08x] SNDDATAI_STATUS[%08x]\n",
5834                tr32(SNDDATAI_MODE), tr32(SNDDATAI_STATUS));
5835         printk("       SNDDATAI_STATSCTRL[%08x]\n",
5836                tr32(SNDDATAI_STATSCTRL));
5837
5838         /* Send data completion control block */
5839         printk("DEBUG: SNDDATAC_MODE[%08x]\n", tr32(SNDDATAC_MODE));
5840
5841         /* Send BD ring selector block */
5842         printk("DEBUG: SNDBDS_MODE[%08x] SNDBDS_STATUS[%08x]\n",
5843                tr32(SNDBDS_MODE), tr32(SNDBDS_STATUS));
5844
5845         /* Send BD initiator control block */
5846         printk("DEBUG: SNDBDI_MODE[%08x] SNDBDI_STATUS[%08x]\n",
5847                tr32(SNDBDI_MODE), tr32(SNDBDI_STATUS));
5848
5849         /* Send BD completion control block */
5850         printk("DEBUG: SNDBDC_MODE[%08x]\n", tr32(SNDBDC_MODE));
5851
5852         /* Receive list placement control block */
5853         printk("DEBUG: RCVLPC_MODE[%08x] RCVLPC_STATUS[%08x]\n",
5854                tr32(RCVLPC_MODE), tr32(RCVLPC_STATUS));
5855         printk("       RCVLPC_STATSCTRL[%08x]\n",
5856                tr32(RCVLPC_STATSCTRL));
5857
5858         /* Receive data and receive BD initiator control block */
5859         printk("DEBUG: RCVDBDI_MODE[%08x] RCVDBDI_STATUS[%08x]\n",
5860                tr32(RCVDBDI_MODE), tr32(RCVDBDI_STATUS));
5861
5862         /* Receive data completion control block */
5863         printk("DEBUG: RCVDCC_MODE[%08x]\n",
5864                tr32(RCVDCC_MODE));
5865
5866         /* Receive BD initiator control block */
5867         printk("DEBUG: RCVBDI_MODE[%08x] RCVBDI_STATUS[%08x]\n",
5868                tr32(RCVBDI_MODE), tr32(RCVBDI_STATUS));
5869
5870         /* Receive BD completion control block */
5871         printk("DEBUG: RCVCC_MODE[%08x] RCVCC_STATUS[%08x]\n",
5872                tr32(RCVCC_MODE), tr32(RCVCC_STATUS));
5873
5874         /* Receive list selector control block */
5875         printk("DEBUG: RCVLSC_MODE[%08x] RCVLSC_STATUS[%08x]\n",
5876                tr32(RCVLSC_MODE), tr32(RCVLSC_STATUS));
5877
5878         /* Mbuf cluster free block */
5879         printk("DEBUG: MBFREE_MODE[%08x] MBFREE_STATUS[%08x]\n",
5880                tr32(MBFREE_MODE), tr32(MBFREE_STATUS));
5881
5882         /* Host coalescing control block */
5883         printk("DEBUG: HOSTCC_MODE[%08x] HOSTCC_STATUS[%08x]\n",
5884                tr32(HOSTCC_MODE), tr32(HOSTCC_STATUS));
5885         printk("DEBUG: HOSTCC_STATS_BLK_HOST_ADDR[%08x%08x]\n",
5886                tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
5887                tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
5888         printk("DEBUG: HOSTCC_STATUS_BLK_HOST_ADDR[%08x%08x]\n",
5889                tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
5890                tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
5891         printk("DEBUG: HOSTCC_STATS_BLK_NIC_ADDR[%08x]\n",
5892                tr32(HOSTCC_STATS_BLK_NIC_ADDR));
5893         printk("DEBUG: HOSTCC_STATUS_BLK_NIC_ADDR[%08x]\n",
5894                tr32(HOSTCC_STATUS_BLK_NIC_ADDR));
5895
5896         /* Memory arbiter control block */
5897         printk("DEBUG: MEMARB_MODE[%08x] MEMARB_STATUS[%08x]\n",
5898                tr32(MEMARB_MODE), tr32(MEMARB_STATUS));
5899
5900         /* Buffer manager control block */
5901         printk("DEBUG: BUFMGR_MODE[%08x] BUFMGR_STATUS[%08x]\n",
5902                tr32(BUFMGR_MODE), tr32(BUFMGR_STATUS));
5903         printk("DEBUG: BUFMGR_MB_POOL_ADDR[%08x] BUFMGR_MB_POOL_SIZE[%08x]\n",
5904                tr32(BUFMGR_MB_POOL_ADDR), tr32(BUFMGR_MB_POOL_SIZE));
5905         printk("DEBUG: BUFMGR_DMA_DESC_POOL_ADDR[%08x] "
5906                "BUFMGR_DMA_DESC_POOL_SIZE[%08x]\n",
5907                tr32(BUFMGR_DMA_DESC_POOL_ADDR),
5908                tr32(BUFMGR_DMA_DESC_POOL_SIZE));
5909
5910         /* Read DMA control block */
5911         printk("DEBUG: RDMAC_MODE[%08x] RDMAC_STATUS[%08x]\n",
5912                tr32(RDMAC_MODE), tr32(RDMAC_STATUS));
5913
5914         /* Write DMA control block */
5915         printk("DEBUG: WDMAC_MODE[%08x] WDMAC_STATUS[%08x]\n",
5916                tr32(WDMAC_MODE), tr32(WDMAC_STATUS));
5917
5918         /* DMA completion block */
5919         printk("DEBUG: DMAC_MODE[%08x]\n",
5920                tr32(DMAC_MODE));
5921
5922         /* GRC block */
5923         printk("DEBUG: GRC_MODE[%08x] GRC_MISC_CFG[%08x]\n",
5924                tr32(GRC_MODE), tr32(GRC_MISC_CFG));
5925         printk("DEBUG: GRC_LOCAL_CTRL[%08x]\n",
5926                tr32(GRC_LOCAL_CTRL));
5927
5928         /* TG3_BDINFOs */
5929         printk("DEBUG: RCVDBDI_JUMBO_BD[%08x%08x:%08x:%08x]\n",
5930                tr32(RCVDBDI_JUMBO_BD + 0x0),
5931                tr32(RCVDBDI_JUMBO_BD + 0x4),
5932                tr32(RCVDBDI_JUMBO_BD + 0x8),
5933                tr32(RCVDBDI_JUMBO_BD + 0xc));
5934         printk("DEBUG: RCVDBDI_STD_BD[%08x%08x:%08x:%08x]\n",
5935                tr32(RCVDBDI_STD_BD + 0x0),
5936                tr32(RCVDBDI_STD_BD + 0x4),
5937                tr32(RCVDBDI_STD_BD + 0x8),
5938                tr32(RCVDBDI_STD_BD + 0xc));
5939         printk("DEBUG: RCVDBDI_MINI_BD[%08x%08x:%08x:%08x]\n",
5940                tr32(RCVDBDI_MINI_BD + 0x0),
5941                tr32(RCVDBDI_MINI_BD + 0x4),
5942                tr32(RCVDBDI_MINI_BD + 0x8),
5943                tr32(RCVDBDI_MINI_BD + 0xc));
5944
5945         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x0, &val32);
5946         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x4, &val32_2);
5947         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x8, &val32_3);
5948         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0xc, &val32_4);
5949         printk("DEBUG: SRAM_SEND_RCB_0[%08x%08x:%08x:%08x]\n",
5950                val32, val32_2, val32_3, val32_4);
5951
5952         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x0, &val32);
5953         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x4, &val32_2);
5954         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x8, &val32_3);
5955         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0xc, &val32_4);
5956         printk("DEBUG: SRAM_RCV_RET_RCB_0[%08x%08x:%08x:%08x]\n",
5957                val32, val32_2, val32_3, val32_4);
5958
5959         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x0, &val32);
5960         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x4, &val32_2);
5961         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x8, &val32_3);
5962         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0xc, &val32_4);
5963         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x10, &val32_5);
5964         printk("DEBUG: SRAM_STATUS_BLK[%08x:%08x:%08x:%08x:%08x]\n",
5965                val32, val32_2, val32_3, val32_4, val32_5);
5966
5967         /* SW status block */
5968         printk("DEBUG: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
5969                tp->hw_status->status,
5970                tp->hw_status->status_tag,
5971                tp->hw_status->rx_jumbo_consumer,
5972                tp->hw_status->rx_consumer,
5973                tp->hw_status->rx_mini_consumer,
5974                tp->hw_status->idx[0].rx_producer,
5975                tp->hw_status->idx[0].tx_consumer);
5976
5977         /* SW statistics block */
5978         printk("DEBUG: Host statistics block [%08x:%08x:%08x:%08x]\n",
5979                ((u32 *)tp->hw_stats)[0],
5980                ((u32 *)tp->hw_stats)[1],
5981                ((u32 *)tp->hw_stats)[2],
5982                ((u32 *)tp->hw_stats)[3]);
5983
5984         /* Mailboxes */
5985         printk("DEBUG: SNDHOST_PROD[%08x%08x] SNDNIC_PROD[%08x%08x]\n",
5986                tr32(MAILBOX_SNDHOST_PROD_IDX_0 + 0x0),
5987                tr32(MAILBOX_SNDHOST_PROD_IDX_0 + 0x4),
5988                tr32(MAILBOX_SNDNIC_PROD_IDX_0 + 0x0),
5989                tr32(MAILBOX_SNDNIC_PROD_IDX_0 + 0x4));
5990
5991         /* NIC side send descriptors. */
5992         for (i = 0; i < 6; i++) {
5993                 unsigned long txd;
5994
5995                 txd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_TX_BUFFER_DESC
5996                         + (i * sizeof(struct tg3_tx_buffer_desc));
5997                 printk("DEBUG: NIC TXD(%d)[%08x:%08x:%08x:%08x]\n",
5998                        i,
5999                        readl(txd + 0x0), readl(txd + 0x4),
6000                        readl(txd + 0x8), readl(txd + 0xc));
6001         }
6002
6003         /* NIC side RX descriptors. */
6004         for (i = 0; i < 6; i++) {
6005                 unsigned long rxd;
6006
6007                 rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_BUFFER_DESC
6008                         + (i * sizeof(struct tg3_rx_buffer_desc));
6009                 printk("DEBUG: NIC RXD_STD(%d)[0][%08x:%08x:%08x:%08x]\n",
6010                        i,
6011                        readl(rxd + 0x0), readl(rxd + 0x4),
6012                        readl(rxd + 0x8), readl(rxd + 0xc));
6013                 rxd += (4 * sizeof(u32));
6014                 printk("DEBUG: NIC RXD_STD(%d)[1][%08x:%08x:%08x:%08x]\n",
6015                        i,
6016                        readl(rxd + 0x0), readl(rxd + 0x4),
6017                        readl(rxd + 0x8), readl(rxd + 0xc));
6018         }
6019
6020         for (i = 0; i < 6; i++) {
6021                 unsigned long rxd;
6022
6023                 rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_JUMBO_BUFFER_DESC
6024                         + (i * sizeof(struct tg3_rx_buffer_desc));
6025                 printk("DEBUG: NIC RXD_JUMBO(%d)[0][%08x:%08x:%08x:%08x]\n",
6026                        i,
6027                        readl(rxd + 0x0), readl(rxd + 0x4),
6028                        readl(rxd + 0x8), readl(rxd + 0xc));
6029                 rxd += (4 * sizeof(u32));
6030                 printk("DEBUG: NIC RXD_JUMBO(%d)[1][%08x:%08x:%08x:%08x]\n",
6031                        i,
6032                        readl(rxd + 0x0), readl(rxd + 0x4),
6033                        readl(rxd + 0x8), readl(rxd + 0xc));
6034         }
6035 }
6036 #endif
6037
6038 static struct net_device_stats *tg3_get_stats(struct net_device *);
6039 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *);
6040
6041 static int tg3_close(struct net_device *dev)
6042 {
6043         struct tg3 *tp = netdev_priv(dev);
6044
6045         netif_stop_queue(dev);
6046
6047         del_timer_sync(&tp->timer);
6048
6049         spin_lock_irq(&tp->lock);
6050         spin_lock(&tp->tx_lock);
6051 #if 0
6052         tg3_dump_state(tp);
6053 #endif
6054
6055         tg3_disable_ints(tp);
6056
6057         tg3_halt(tp);
6058         tg3_free_rings(tp);
6059         tp->tg3_flags &=
6060                 ~(TG3_FLAG_INIT_COMPLETE |
6061                   TG3_FLAG_GOT_SERDES_FLOWCTL);
6062         netif_carrier_off(tp->dev);
6063
6064         spin_unlock(&tp->tx_lock);
6065         spin_unlock_irq(&tp->lock);
6066
6067         free_irq(dev->irq, dev);
6068
6069         memcpy(&tp->net_stats_prev, tg3_get_stats(tp->dev),
6070                sizeof(tp->net_stats_prev));
6071         memcpy(&tp->estats_prev, tg3_get_estats(tp),
6072                sizeof(tp->estats_prev));
6073
6074         tg3_free_consistent(tp);
6075
6076         return 0;
6077 }
6078
6079 static inline unsigned long get_stat64(tg3_stat64_t *val)
6080 {
6081         unsigned long ret;
6082
6083 #if (BITS_PER_LONG == 32)
6084         ret = val->low;
6085 #else
6086         ret = ((u64)val->high << 32) | ((u64)val->low);
6087 #endif
6088         return ret;
6089 }
6090
6091 static unsigned long calc_crc_errors(struct tg3 *tp)
6092 {
6093         struct tg3_hw_stats *hw_stats = tp->hw_stats;
6094
6095         if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
6096             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
6097              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
6098                 unsigned long flags;
6099                 u32 val;
6100
6101                 spin_lock_irqsave(&tp->lock, flags);
6102                 if (!tg3_readphy(tp, 0x1e, &val)) {
6103                         tg3_writephy(tp, 0x1e, val | 0x8000);
6104                         tg3_readphy(tp, 0x14, &val);
6105                 } else
6106                         val = 0;
6107                 spin_unlock_irqrestore(&tp->lock, flags);
6108
6109                 tp->phy_crc_errors += val;
6110
6111                 return tp->phy_crc_errors;
6112         }
6113
6114         return get_stat64(&hw_stats->rx_fcs_errors);
6115 }
6116
6117 #define ESTAT_ADD(member) \
6118         estats->member =        old_estats->member + \
6119                                 get_stat64(&hw_stats->member)
6120
6121 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *tp)
6122 {
6123         struct tg3_ethtool_stats *estats = &tp->estats;
6124         struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
6125         struct tg3_hw_stats *hw_stats = tp->hw_stats;
6126
6127         if (!hw_stats)
6128                 return old_estats;
6129
6130         ESTAT_ADD(rx_octets);
6131         ESTAT_ADD(rx_fragments);
6132         ESTAT_ADD(rx_ucast_packets);
6133         ESTAT_ADD(rx_mcast_packets);
6134         ESTAT_ADD(rx_bcast_packets);
6135         ESTAT_ADD(rx_fcs_errors);
6136         ESTAT_ADD(rx_align_errors);
6137         ESTAT_ADD(rx_xon_pause_rcvd);
6138         ESTAT_ADD(rx_xoff_pause_rcvd);
6139         ESTAT_ADD(rx_mac_ctrl_rcvd);
6140         ESTAT_ADD(rx_xoff_entered);
6141         ESTAT_ADD(rx_frame_too_long_errors);
6142         ESTAT_ADD(rx_jabbers);
6143         ESTAT_ADD(rx_undersize_packets);
6144         ESTAT_ADD(rx_in_length_errors);
6145         ESTAT_ADD(rx_out_length_errors);
6146         ESTAT_ADD(rx_64_or_less_octet_packets);
6147         ESTAT_ADD(rx_65_to_127_octet_packets);
6148         ESTAT_ADD(rx_128_to_255_octet_packets);
6149         ESTAT_ADD(rx_256_to_511_octet_packets);
6150         ESTAT_ADD(rx_512_to_1023_octet_packets);
6151         ESTAT_ADD(rx_1024_to_1522_octet_packets);
6152         ESTAT_ADD(rx_1523_to_2047_octet_packets);
6153         ESTAT_ADD(rx_2048_to_4095_octet_packets);
6154         ESTAT_ADD(rx_4096_to_8191_octet_packets);
6155         ESTAT_ADD(rx_8192_to_9022_octet_packets);
6156
6157         ESTAT_ADD(tx_octets);
6158         ESTAT_ADD(tx_collisions);
6159         ESTAT_ADD(tx_xon_sent);
6160         ESTAT_ADD(tx_xoff_sent);
6161         ESTAT_ADD(tx_flow_control);
6162         ESTAT_ADD(tx_mac_errors);
6163         ESTAT_ADD(tx_single_collisions);
6164         ESTAT_ADD(tx_mult_collisions);
6165         ESTAT_ADD(tx_deferred);
6166         ESTAT_ADD(tx_excessive_collisions);
6167         ESTAT_ADD(tx_late_collisions);
6168         ESTAT_ADD(tx_collide_2times);
6169         ESTAT_ADD(tx_collide_3times);
6170         ESTAT_ADD(tx_collide_4times);
6171         ESTAT_ADD(tx_collide_5times);
6172         ESTAT_ADD(tx_collide_6times);
6173         ESTAT_ADD(tx_collide_7times);
6174         ESTAT_ADD(tx_collide_8times);
6175         ESTAT_ADD(tx_collide_9times);
6176         ESTAT_ADD(tx_collide_10times);
6177         ESTAT_ADD(tx_collide_11times);
6178         ESTAT_ADD(tx_collide_12times);
6179         ESTAT_ADD(tx_collide_13times);
6180         ESTAT_ADD(tx_collide_14times);
6181         ESTAT_ADD(tx_collide_15times);
6182         ESTAT_ADD(tx_ucast_packets);
6183         ESTAT_ADD(tx_mcast_packets);
6184         ESTAT_ADD(tx_bcast_packets);
6185         ESTAT_ADD(tx_carrier_sense_errors);
6186         ESTAT_ADD(tx_discards);
6187         ESTAT_ADD(tx_errors);
6188
6189         ESTAT_ADD(dma_writeq_full);
6190         ESTAT_ADD(dma_write_prioq_full);
6191         ESTAT_ADD(rxbds_empty);
6192         ESTAT_ADD(rx_discards);
6193         ESTAT_ADD(rx_errors);
6194         ESTAT_ADD(rx_threshold_hit);
6195
6196         ESTAT_ADD(dma_readq_full);
6197         ESTAT_ADD(dma_read_prioq_full);
6198         ESTAT_ADD(tx_comp_queue_full);
6199
6200         ESTAT_ADD(ring_set_send_prod_index);
6201         ESTAT_ADD(ring_status_update);
6202         ESTAT_ADD(nic_irqs);
6203         ESTAT_ADD(nic_avoided_irqs);
6204         ESTAT_ADD(nic_tx_threshold_hit);
6205
6206         return estats;
6207 }
6208
6209 static struct net_device_stats *tg3_get_stats(struct net_device *dev)
6210 {
6211         struct tg3 *tp = netdev_priv(dev);
6212         struct net_device_stats *stats = &tp->net_stats;
6213         struct net_device_stats *old_stats = &tp->net_stats_prev;
6214         struct tg3_hw_stats *hw_stats = tp->hw_stats;
6215
6216         if (!hw_stats)
6217                 return old_stats;
6218
6219         stats->rx_packets = old_stats->rx_packets +
6220                 get_stat64(&hw_stats->rx_ucast_packets) +
6221                 get_stat64(&hw_stats->rx_mcast_packets) +
6222                 get_stat64(&hw_stats->rx_bcast_packets);
6223                 
6224         stats->tx_packets = old_stats->tx_packets +
6225                 get_stat64(&hw_stats->tx_ucast_packets) +
6226                 get_stat64(&hw_stats->tx_mcast_packets) +
6227                 get_stat64(&hw_stats->tx_bcast_packets);
6228
6229         stats->rx_bytes = old_stats->rx_bytes +
6230                 get_stat64(&hw_stats->rx_octets);
6231         stats->tx_bytes = old_stats->tx_bytes +
6232                 get_stat64(&hw_stats->tx_octets);
6233
6234         stats->rx_errors = old_stats->rx_errors +
6235                 get_stat64(&hw_stats->rx_errors) +
6236                 get_stat64(&hw_stats->rx_discards);
6237         stats->tx_errors = old_stats->tx_errors +
6238                 get_stat64(&hw_stats->tx_errors) +
6239                 get_stat64(&hw_stats->tx_mac_errors) +
6240                 get_stat64(&hw_stats->tx_carrier_sense_errors) +
6241                 get_stat64(&hw_stats->tx_discards);
6242
6243         stats->multicast = old_stats->multicast +
6244                 get_stat64(&hw_stats->rx_mcast_packets);
6245         stats->collisions = old_stats->collisions +
6246                 get_stat64(&hw_stats->tx_collisions);
6247
6248         stats->rx_length_errors = old_stats->rx_length_errors +
6249                 get_stat64(&hw_stats->rx_frame_too_long_errors) +
6250                 get_stat64(&hw_stats->rx_undersize_packets);
6251
6252         stats->rx_over_errors = old_stats->rx_over_errors +
6253                 get_stat64(&hw_stats->rxbds_empty);
6254         stats->rx_frame_errors = old_stats->rx_frame_errors +
6255                 get_stat64(&hw_stats->rx_align_errors);
6256         stats->tx_aborted_errors = old_stats->tx_aborted_errors +
6257                 get_stat64(&hw_stats->tx_discards);
6258         stats->tx_carrier_errors = old_stats->tx_carrier_errors +
6259                 get_stat64(&hw_stats->tx_carrier_sense_errors);
6260
6261         stats->rx_crc_errors = old_stats->rx_crc_errors +
6262                 calc_crc_errors(tp);
6263
6264         return stats;
6265 }
6266
6267 static inline u32 calc_crc(unsigned char *buf, int len)
6268 {
6269         u32 reg;
6270         u32 tmp;
6271         int j, k;
6272
6273         reg = 0xffffffff;
6274
6275         for (j = 0; j < len; j++) {
6276                 reg ^= buf[j];
6277
6278                 for (k = 0; k < 8; k++) {
6279                         tmp = reg & 0x01;
6280
6281                         reg >>= 1;
6282
6283                         if (tmp) {
6284                                 reg ^= 0xedb88320;
6285                         }
6286                 }
6287         }
6288
6289         return ~reg;
6290 }
6291
6292 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
6293 {
6294         /* accept or reject all multicast frames */
6295         tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
6296         tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
6297         tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
6298         tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
6299 }
6300
6301 static void __tg3_set_rx_mode(struct net_device *dev)
6302 {
6303         struct tg3 *tp = netdev_priv(dev);
6304         u32 rx_mode;
6305
6306         rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
6307                                   RX_MODE_KEEP_VLAN_TAG);
6308
6309         /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
6310          * flag clear.
6311          */
6312 #if TG3_VLAN_TAG_USED
6313         if (!tp->vlgrp &&
6314             !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
6315                 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
6316 #else
6317         /* By definition, VLAN is disabled always in this
6318          * case.
6319          */
6320         if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
6321                 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
6322 #endif
6323
6324         if (dev->flags & IFF_PROMISC) {
6325                 /* Promiscuous mode. */
6326                 rx_mode |= RX_MODE_PROMISC;
6327         } else if (dev->flags & IFF_ALLMULTI) {
6328                 /* Accept all multicast. */
6329                 tg3_set_multi (tp, 1);
6330         } else if (dev->mc_count < 1) {
6331                 /* Reject all multicast. */
6332                 tg3_set_multi (tp, 0);
6333         } else {
6334                 /* Accept one or more multicast(s). */
6335                 struct dev_mc_list *mclist;
6336                 unsigned int i;
6337                 u32 mc_filter[4] = { 0, };
6338                 u32 regidx;
6339                 u32 bit;
6340                 u32 crc;
6341
6342                 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
6343                      i++, mclist = mclist->next) {
6344
6345                         crc = calc_crc (mclist->dmi_addr, ETH_ALEN);
6346                         bit = ~crc & 0x7f;
6347                         regidx = (bit & 0x60) >> 5;
6348                         bit &= 0x1f;
6349                         mc_filter[regidx] |= (1 << bit);
6350                 }
6351
6352                 tw32(MAC_HASH_REG_0, mc_filter[0]);
6353                 tw32(MAC_HASH_REG_1, mc_filter[1]);
6354                 tw32(MAC_HASH_REG_2, mc_filter[2]);
6355                 tw32(MAC_HASH_REG_3, mc_filter[3]);
6356         }
6357
6358         if (rx_mode != tp->rx_mode) {
6359                 tp->rx_mode = rx_mode;
6360                 tw32_f(MAC_RX_MODE, rx_mode);
6361                 udelay(10);
6362         }
6363 }
6364
6365 static void tg3_set_rx_mode(struct net_device *dev)
6366 {
6367         struct tg3 *tp = netdev_priv(dev);
6368
6369         spin_lock_irq(&tp->lock);
6370         spin_lock(&tp->tx_lock);
6371         __tg3_set_rx_mode(dev);
6372         spin_unlock(&tp->tx_lock);
6373         spin_unlock_irq(&tp->lock);
6374 }
6375
6376 #define TG3_REGDUMP_LEN         (32 * 1024)
6377
6378 static int tg3_get_regs_len(struct net_device *dev)
6379 {
6380         return TG3_REGDUMP_LEN;
6381 }
6382
6383 static void tg3_get_regs(struct net_device *dev,
6384                 struct ethtool_regs *regs, void *_p)
6385 {
6386         u32 *p = _p;
6387         struct tg3 *tp = netdev_priv(dev);
6388         u8 *orig_p = _p;
6389         int i;
6390
6391         regs->version = 0;
6392
6393         memset(p, 0, TG3_REGDUMP_LEN);
6394
6395         spin_lock_irq(&tp->lock);
6396         spin_lock(&tp->tx_lock);
6397
6398 #define __GET_REG32(reg)        (*(p)++ = tr32(reg))
6399 #define GET_REG32_LOOP(base,len)                \
6400 do {    p = (u32 *)(orig_p + (base));           \
6401         for (i = 0; i < len; i += 4)            \
6402                 __GET_REG32((base) + i);        \
6403 } while (0)
6404 #define GET_REG32_1(reg)                        \
6405 do {    p = (u32 *)(orig_p + (reg));            \
6406         __GET_REG32((reg));                     \
6407 } while (0)
6408
6409         GET_REG32_LOOP(TG3PCI_VENDOR, 0xb0);
6410         GET_REG32_LOOP(MAILBOX_INTERRUPT_0, 0x200);
6411         GET_REG32_LOOP(MAC_MODE, 0x4f0);
6412         GET_REG32_LOOP(SNDDATAI_MODE, 0xe0);
6413         GET_REG32_1(SNDDATAC_MODE);
6414         GET_REG32_LOOP(SNDBDS_MODE, 0x80);
6415         GET_REG32_LOOP(SNDBDI_MODE, 0x48);
6416         GET_REG32_1(SNDBDC_MODE);
6417         GET_REG32_LOOP(RCVLPC_MODE, 0x20);
6418         GET_REG32_LOOP(RCVLPC_SELLST_BASE, 0x15c);
6419         GET_REG32_LOOP(RCVDBDI_MODE, 0x0c);
6420         GET_REG32_LOOP(RCVDBDI_JUMBO_BD, 0x3c);
6421         GET_REG32_LOOP(RCVDBDI_BD_PROD_IDX_0, 0x44);
6422         GET_REG32_1(RCVDCC_MODE);
6423         GET_REG32_LOOP(RCVBDI_MODE, 0x20);
6424         GET_REG32_LOOP(RCVCC_MODE, 0x14);
6425         GET_REG32_LOOP(RCVLSC_MODE, 0x08);
6426         GET_REG32_1(MBFREE_MODE);
6427         GET_REG32_LOOP(HOSTCC_MODE, 0x100);
6428         GET_REG32_LOOP(MEMARB_MODE, 0x10);
6429         GET_REG32_LOOP(BUFMGR_MODE, 0x58);
6430         GET_REG32_LOOP(RDMAC_MODE, 0x08);
6431         GET_REG32_LOOP(WDMAC_MODE, 0x08);
6432         GET_REG32_LOOP(RX_CPU_BASE, 0x280);
6433         GET_REG32_LOOP(TX_CPU_BASE, 0x280);
6434         GET_REG32_LOOP(GRCMBOX_INTERRUPT_0, 0x110);
6435         GET_REG32_LOOP(FTQ_RESET, 0x120);
6436         GET_REG32_LOOP(MSGINT_MODE, 0x0c);
6437         GET_REG32_1(DMAC_MODE);
6438         GET_REG32_LOOP(GRC_MODE, 0x4c);
6439         if (tp->tg3_flags & TG3_FLAG_NVRAM)
6440                 GET_REG32_LOOP(NVRAM_CMD, 0x24);
6441
6442 #undef __GET_REG32
6443 #undef GET_REG32_LOOP
6444 #undef GET_REG32_1
6445
6446         spin_unlock(&tp->tx_lock);
6447         spin_unlock_irq(&tp->lock);
6448 }
6449
6450 static int tg3_get_eeprom_len(struct net_device *dev)
6451 {
6452         struct tg3 *tp = netdev_priv(dev);
6453
6454         return tp->nvram_size;
6455 }
6456
6457 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val);
6458
6459 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
6460 {
6461         struct tg3 *tp = netdev_priv(dev);
6462         int ret;
6463         u8  *pd;
6464         u32 i, offset, len, val, b_offset, b_count;
6465
6466         offset = eeprom->offset;
6467         len = eeprom->len;
6468         eeprom->len = 0;
6469
6470         eeprom->magic = TG3_EEPROM_MAGIC;
6471
6472         if (offset & 3) {
6473                 /* adjustments to start on required 4 byte boundary */
6474                 b_offset = offset & 3;
6475                 b_count = 4 - b_offset;
6476                 if (b_count > len) {
6477                         /* i.e. offset=1 len=2 */
6478                         b_count = len;
6479                 }
6480                 ret = tg3_nvram_read(tp, offset-b_offset, &val);
6481                 if (ret)
6482                         return ret;
6483                 val = cpu_to_le32(val);
6484                 memcpy(data, ((char*)&val) + b_offset, b_count);
6485                 len -= b_count;
6486                 offset += b_count;
6487                 eeprom->len += b_count;
6488         }
6489
6490         /* read bytes upto the last 4 byte boundary */
6491         pd = &data[eeprom->len];
6492         for (i = 0; i < (len - (len & 3)); i += 4) {
6493                 ret = tg3_nvram_read(tp, offset + i, &val);
6494                 if (ret) {
6495                         eeprom->len += i;
6496                         return ret;
6497                 }
6498                 val = cpu_to_le32(val);
6499                 memcpy(pd + i, &val, 4);
6500         }
6501         eeprom->len += i;
6502
6503         if (len & 3) {
6504                 /* read last bytes not ending on 4 byte boundary */
6505                 pd = &data[eeprom->len];
6506                 b_count = len & 3;
6507                 b_offset = offset + len - b_count;
6508                 ret = tg3_nvram_read(tp, b_offset, &val);
6509                 if (ret)
6510                         return ret;
6511                 val = cpu_to_le32(val);
6512                 memcpy(pd, ((char*)&val), b_count);
6513                 eeprom->len += b_count;
6514         }
6515         return 0;
6516 }
6517
6518 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf); 
6519
6520 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
6521 {
6522         struct tg3 *tp = netdev_priv(dev);
6523         int ret;
6524         u32 offset, len, b_offset, odd_len, start, end;
6525         u8 *buf;
6526
6527         if (eeprom->magic != TG3_EEPROM_MAGIC)
6528                 return -EINVAL;
6529
6530         offset = eeprom->offset;
6531         len = eeprom->len;
6532
6533         if ((b_offset = (offset & 3))) {
6534                 /* adjustments to start on required 4 byte boundary */
6535                 ret = tg3_nvram_read(tp, offset-b_offset, &start);
6536                 if (ret)
6537                         return ret;
6538                 start = cpu_to_le32(start);
6539                 len += b_offset;
6540                 offset &= ~3;
6541         }
6542
6543         odd_len = 0;
6544         if ((len & 3) && ((len > 4) || (b_offset == 0))) {
6545                 /* adjustments to end on required 4 byte boundary */
6546                 odd_len = 1;
6547                 len = (len + 3) & ~3;
6548                 ret = tg3_nvram_read(tp, offset+len-4, &end);
6549                 if (ret)
6550                         return ret;
6551                 end = cpu_to_le32(end);
6552         }
6553
6554         buf = data;
6555         if (b_offset || odd_len) {
6556                 buf = kmalloc(len, GFP_KERNEL);
6557                 if (buf == 0)
6558                         return -ENOMEM;
6559                 if (b_offset)
6560                         memcpy(buf, &start, 4);
6561                 if (odd_len)
6562                         memcpy(buf+len-4, &end, 4);
6563                 memcpy(buf + b_offset, data, eeprom->len);
6564         }
6565
6566         ret = tg3_nvram_write_block(tp, offset, len, buf);
6567
6568         if (buf != data)
6569                 kfree(buf);
6570
6571         return ret;
6572 }
6573
6574 static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
6575 {
6576         struct tg3 *tp = netdev_priv(dev);
6577   
6578         cmd->supported = (SUPPORTED_Autoneg);
6579
6580         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
6581                 cmd->supported |= (SUPPORTED_1000baseT_Half |
6582                                    SUPPORTED_1000baseT_Full);
6583
6584         if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES))
6585                 cmd->supported |= (SUPPORTED_100baseT_Half |
6586                                   SUPPORTED_100baseT_Full |
6587                                   SUPPORTED_10baseT_Half |
6588                                   SUPPORTED_10baseT_Full |
6589                                   SUPPORTED_MII);
6590         else
6591                 cmd->supported |= SUPPORTED_FIBRE;
6592   
6593         cmd->advertising = tp->link_config.advertising;
6594         if (netif_running(dev)) {
6595                 cmd->speed = tp->link_config.active_speed;
6596                 cmd->duplex = tp->link_config.active_duplex;
6597         }
6598         cmd->port = 0;
6599         cmd->phy_address = PHY_ADDR;
6600         cmd->transceiver = 0;
6601         cmd->autoneg = tp->link_config.autoneg;
6602         cmd->maxtxpkt = 0;
6603         cmd->maxrxpkt = 0;
6604         return 0;
6605 }
6606   
6607 static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
6608 {
6609         struct tg3 *tp = netdev_priv(dev);
6610   
6611         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
6612                 /* These are the only valid advertisement bits allowed.  */
6613                 if (cmd->autoneg == AUTONEG_ENABLE &&
6614                     (cmd->advertising & ~(ADVERTISED_1000baseT_Half |
6615                                           ADVERTISED_1000baseT_Full |
6616                                           ADVERTISED_Autoneg |
6617                                           ADVERTISED_FIBRE)))
6618                         return -EINVAL;
6619         }
6620
6621         spin_lock_irq(&tp->lock);
6622         spin_lock(&tp->tx_lock);
6623
6624         tp->link_config.autoneg = cmd->autoneg;
6625         if (cmd->autoneg == AUTONEG_ENABLE) {
6626                 tp->link_config.advertising = cmd->advertising;
6627                 tp->link_config.speed = SPEED_INVALID;
6628                 tp->link_config.duplex = DUPLEX_INVALID;
6629         } else {
6630                 tp->link_config.advertising = 0;
6631                 tp->link_config.speed = cmd->speed;
6632                 tp->link_config.duplex = cmd->duplex;
6633         }
6634   
6635         if (netif_running(dev))
6636                 tg3_setup_phy(tp, 1);
6637
6638         spin_unlock(&tp->tx_lock);
6639         spin_unlock_irq(&tp->lock);
6640   
6641         return 0;
6642 }
6643   
6644 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
6645 {
6646         struct tg3 *tp = netdev_priv(dev);
6647   
6648         strcpy(info->driver, DRV_MODULE_NAME);
6649         strcpy(info->version, DRV_MODULE_VERSION);
6650         strcpy(info->bus_info, pci_name(tp->pdev));
6651 }
6652   
6653 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
6654 {
6655         struct tg3 *tp = netdev_priv(dev);
6656   
6657         wol->supported = WAKE_MAGIC;
6658         wol->wolopts = 0;
6659         if (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)
6660                 wol->wolopts = WAKE_MAGIC;
6661         memset(&wol->sopass, 0, sizeof(wol->sopass));
6662 }
6663   
6664 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
6665 {
6666         struct tg3 *tp = netdev_priv(dev);
6667   
6668         if (wol->wolopts & ~WAKE_MAGIC)
6669                 return -EINVAL;
6670         if ((wol->wolopts & WAKE_MAGIC) &&
6671             tp->tg3_flags2 & TG3_FLG2_PHY_SERDES &&
6672             !(tp->tg3_flags & TG3_FLAG_SERDES_WOL_CAP))
6673                 return -EINVAL;
6674   
6675         spin_lock_irq(&tp->lock);
6676         if (wol->wolopts & WAKE_MAGIC)
6677                 tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
6678         else
6679                 tp->tg3_flags &= ~TG3_FLAG_WOL_ENABLE;
6680         spin_unlock_irq(&tp->lock);
6681   
6682         return 0;
6683 }
6684   
6685 static u32 tg3_get_msglevel(struct net_device *dev)
6686 {
6687         struct tg3 *tp = netdev_priv(dev);
6688         return tp->msg_enable;
6689 }
6690   
6691 static void tg3_set_msglevel(struct net_device *dev, u32 value)
6692 {
6693         struct tg3 *tp = netdev_priv(dev);
6694         tp->msg_enable = value;
6695 }
6696   
6697 #if TG3_TSO_SUPPORT != 0
6698 static int tg3_set_tso(struct net_device *dev, u32 value)
6699 {
6700         struct tg3 *tp = netdev_priv(dev);
6701
6702         if (!(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
6703                 if (value)
6704                         return -EINVAL;
6705                 return 0;
6706         }
6707         return ethtool_op_set_tso(dev, value);
6708 }
6709 #endif
6710   
6711 static int tg3_nway_reset(struct net_device *dev)
6712 {
6713         struct tg3 *tp = netdev_priv(dev);
6714         u32 bmcr;
6715         int r;
6716   
6717         if (!netif_running(dev))
6718                 return -EAGAIN;
6719
6720         spin_lock_irq(&tp->lock);
6721         r = -EINVAL;
6722         tg3_readphy(tp, MII_BMCR, &bmcr);
6723         if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
6724             (bmcr & BMCR_ANENABLE)) {
6725                 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART);
6726                 r = 0;
6727         }
6728         spin_unlock_irq(&tp->lock);
6729   
6730         return r;
6731 }
6732   
6733 static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
6734 {
6735         struct tg3 *tp = netdev_priv(dev);
6736   
6737         ering->rx_max_pending = TG3_RX_RING_SIZE - 1;
6738         ering->rx_mini_max_pending = 0;
6739         ering->rx_jumbo_max_pending = TG3_RX_JUMBO_RING_SIZE - 1;
6740
6741         ering->rx_pending = tp->rx_pending;
6742         ering->rx_mini_pending = 0;
6743         ering->rx_jumbo_pending = tp->rx_jumbo_pending;
6744         ering->tx_pending = tp->tx_pending;
6745 }
6746   
6747 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
6748 {
6749         struct tg3 *tp = netdev_priv(dev);
6750   
6751         if ((ering->rx_pending > TG3_RX_RING_SIZE - 1) ||
6752             (ering->rx_jumbo_pending > TG3_RX_JUMBO_RING_SIZE - 1) ||
6753             (ering->tx_pending > TG3_TX_RING_SIZE - 1))
6754                 return -EINVAL;
6755   
6756         if (netif_running(dev))
6757                 tg3_netif_stop(tp);
6758
6759         spin_lock_irq(&tp->lock);
6760         spin_lock(&tp->tx_lock);
6761   
6762         tp->rx_pending = ering->rx_pending;
6763
6764         if ((tp->tg3_flags2 & TG3_FLG2_MAX_RXPEND_64) &&
6765             tp->rx_pending > 63)
6766                 tp->rx_pending = 63;
6767         tp->rx_jumbo_pending = ering->rx_jumbo_pending;
6768         tp->tx_pending = ering->tx_pending;
6769
6770         if (netif_running(dev)) {
6771                 tg3_halt(tp);
6772                 tg3_init_hw(tp);
6773                 tg3_netif_start(tp);
6774         }
6775
6776         spin_unlock(&tp->tx_lock);
6777         spin_unlock_irq(&tp->lock);
6778   
6779         return 0;
6780 }
6781   
6782 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
6783 {
6784         struct tg3 *tp = netdev_priv(dev);
6785   
6786         epause->autoneg = (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG) != 0;
6787         epause->rx_pause = (tp->tg3_flags & TG3_FLAG_RX_PAUSE) != 0;
6788         epause->tx_pause = (tp->tg3_flags & TG3_FLAG_TX_PAUSE) != 0;
6789 }
6790   
6791 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
6792 {
6793         struct tg3 *tp = netdev_priv(dev);
6794   
6795         if (netif_running(dev))
6796                 tg3_netif_stop(tp);
6797
6798         spin_lock_irq(&tp->lock);
6799         spin_lock(&tp->tx_lock);
6800         if (epause->autoneg)
6801                 tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
6802         else
6803                 tp->tg3_flags &= ~TG3_FLAG_PAUSE_AUTONEG;
6804         if (epause->rx_pause)
6805                 tp->tg3_flags |= TG3_FLAG_RX_PAUSE;
6806         else
6807                 tp->tg3_flags &= ~TG3_FLAG_RX_PAUSE;
6808         if (epause->tx_pause)
6809                 tp->tg3_flags |= TG3_FLAG_TX_PAUSE;
6810         else
6811                 tp->tg3_flags &= ~TG3_FLAG_TX_PAUSE;
6812
6813         if (netif_running(dev)) {
6814                 tg3_halt(tp);
6815                 tg3_init_hw(tp);
6816                 tg3_netif_start(tp);
6817         }
6818         spin_unlock(&tp->tx_lock);
6819         spin_unlock_irq(&tp->lock);
6820   
6821         return 0;
6822 }
6823   
6824 static u32 tg3_get_rx_csum(struct net_device *dev)
6825 {
6826         struct tg3 *tp = netdev_priv(dev);
6827         return (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0;
6828 }
6829   
6830 static int tg3_set_rx_csum(struct net_device *dev, u32 data)
6831 {
6832         struct tg3 *tp = netdev_priv(dev);
6833   
6834         if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
6835                 if (data != 0)
6836                         return -EINVAL;
6837                 return 0;
6838         }
6839   
6840         spin_lock_irq(&tp->lock);
6841         if (data)
6842                 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
6843         else
6844                 tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
6845         spin_unlock_irq(&tp->lock);
6846   
6847         return 0;
6848 }
6849   
6850 static int tg3_set_tx_csum(struct net_device *dev, u32 data)
6851 {
6852         struct tg3 *tp = netdev_priv(dev);
6853   
6854         if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
6855                 if (data != 0)
6856                         return -EINVAL;
6857                 return 0;
6858         }
6859   
6860         if (data)
6861                 dev->features |= NETIF_F_IP_CSUM;
6862         else
6863                 dev->features &= ~NETIF_F_IP_CSUM;
6864
6865         return 0;
6866 }
6867
6868 static int tg3_get_stats_count (struct net_device *dev)
6869 {
6870         return TG3_NUM_STATS;
6871 }
6872
6873 static void tg3_get_strings (struct net_device *dev, u32 stringset, u8 *buf)
6874 {
6875         switch (stringset) {
6876         case ETH_SS_STATS:
6877                 memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
6878                 break;
6879         default:
6880                 WARN_ON(1);     /* we need a WARN() */
6881                 break;
6882         }
6883 }
6884
6885 static void tg3_get_ethtool_stats (struct net_device *dev,
6886                                    struct ethtool_stats *estats, u64 *tmp_stats)
6887 {
6888         struct tg3 *tp = netdev_priv(dev);
6889         memcpy(tmp_stats, tg3_get_estats(tp), sizeof(tp->estats));
6890 }
6891
6892 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
6893 {
6894         struct mii_ioctl_data *data = if_mii(ifr);
6895         struct tg3 *tp = netdev_priv(dev);
6896         int err;
6897
6898         switch(cmd) {
6899         case SIOCGMIIPHY:
6900                 data->phy_id = PHY_ADDR;
6901
6902                 /* fallthru */
6903         case SIOCGMIIREG: {
6904                 u32 mii_regval;
6905
6906                 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
6907                         break;                  /* We have no PHY */
6908
6909                 spin_lock_irq(&tp->lock);
6910                 err = tg3_readphy(tp, data->reg_num & 0x1f, &mii_regval);
6911                 spin_unlock_irq(&tp->lock);
6912
6913                 data->val_out = mii_regval;
6914
6915                 return err;
6916         }
6917
6918         case SIOCSMIIREG:
6919                 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
6920                         break;                  /* We have no PHY */
6921
6922                 if (!capable(CAP_NET_ADMIN))
6923                         return -EPERM;
6924
6925                 spin_lock_irq(&tp->lock);
6926                 err = tg3_writephy(tp, data->reg_num & 0x1f, data->val_in);
6927                 spin_unlock_irq(&tp->lock);
6928
6929                 return err;
6930
6931         default:
6932                 /* do nothing */
6933                 break;
6934         }
6935         return -EOPNOTSUPP;
6936 }
6937
6938 #if TG3_VLAN_TAG_USED
6939 static void tg3_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
6940 {
6941         struct tg3 *tp = netdev_priv(dev);
6942
6943         spin_lock_irq(&tp->lock);
6944         spin_lock(&tp->tx_lock);
6945
6946         tp->vlgrp = grp;
6947
6948         /* Update RX_MODE_KEEP_VLAN_TAG bit in RX_MODE register. */
6949         __tg3_set_rx_mode(dev);
6950
6951         spin_unlock(&tp->tx_lock);
6952         spin_unlock_irq(&tp->lock);
6953 }
6954
6955 static void tg3_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
6956 {
6957         struct tg3 *tp = netdev_priv(dev);
6958
6959         spin_lock_irq(&tp->lock);
6960         spin_lock(&tp->tx_lock);
6961         if (tp->vlgrp)
6962                 tp->vlgrp->vlan_devices[vid] = NULL;
6963         spin_unlock(&tp->tx_lock);
6964         spin_unlock_irq(&tp->lock);
6965 }
6966 #endif
6967
6968 static struct ethtool_ops tg3_ethtool_ops = {
6969         .get_settings           = tg3_get_settings,
6970         .set_settings           = tg3_set_settings,
6971         .get_drvinfo            = tg3_get_drvinfo,
6972         .get_regs_len           = tg3_get_regs_len,
6973         .get_regs               = tg3_get_regs,
6974         .get_wol                = tg3_get_wol,
6975         .set_wol                = tg3_set_wol,
6976         .get_msglevel           = tg3_get_msglevel,
6977         .set_msglevel           = tg3_set_msglevel,
6978         .nway_reset             = tg3_nway_reset,
6979         .get_link               = ethtool_op_get_link,
6980         .get_eeprom_len         = tg3_get_eeprom_len,
6981         .get_eeprom             = tg3_get_eeprom,
6982         .set_eeprom             = tg3_set_eeprom,
6983         .get_ringparam          = tg3_get_ringparam,
6984         .set_ringparam          = tg3_set_ringparam,
6985         .get_pauseparam         = tg3_get_pauseparam,
6986         .set_pauseparam         = tg3_set_pauseparam,
6987         .get_rx_csum            = tg3_get_rx_csum,
6988         .set_rx_csum            = tg3_set_rx_csum,
6989         .get_tx_csum            = ethtool_op_get_tx_csum,
6990         .set_tx_csum            = tg3_set_tx_csum,
6991         .get_sg                 = ethtool_op_get_sg,
6992         .set_sg                 = ethtool_op_set_sg,
6993 #if TG3_TSO_SUPPORT != 0
6994         .get_tso                = ethtool_op_get_tso,
6995         .set_tso                = tg3_set_tso,
6996 #endif
6997         .get_strings            = tg3_get_strings,
6998         .get_stats_count        = tg3_get_stats_count,
6999         .get_ethtool_stats      = tg3_get_ethtool_stats,
7000 };
7001
7002 static void __devinit tg3_get_eeprom_size(struct tg3 *tp)
7003 {
7004         u32 cursize, val;
7005
7006         tp->nvram_size = EEPROM_CHIP_SIZE;
7007
7008         if (tg3_nvram_read(tp, 0, &val) != 0)
7009                 return;
7010
7011         if (swab32(val) != TG3_EEPROM_MAGIC)
7012                 return;
7013
7014         /*
7015          * Size the chip by reading offsets at increasing powers of two.
7016          * When we encounter our validation signature, we know the addressing
7017          * has wrapped around, and thus have our chip size.
7018          */
7019         cursize = 0x800;
7020
7021         while (cursize < tp->nvram_size) {
7022                 if (tg3_nvram_read(tp, cursize, &val) != 0)
7023                         return;
7024
7025                 if (swab32(val) == TG3_EEPROM_MAGIC)
7026                         break;
7027
7028                 cursize <<= 1;
7029         }
7030
7031         tp->nvram_size = cursize;
7032 }
7033                 
7034 static void __devinit tg3_get_nvram_size(struct tg3 *tp)
7035 {
7036         u32 val;
7037
7038         if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
7039                 if (val != 0) {
7040                         tp->nvram_size = (val >> 16) * 1024;
7041                         return;
7042                 }
7043         }
7044         tp->nvram_size = 0x20000;
7045 }
7046
7047 static void __devinit tg3_get_nvram_info(struct tg3 *tp)
7048 {
7049         u32 nvcfg1;
7050
7051         nvcfg1 = tr32(NVRAM_CFG1);
7052         if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
7053                 tp->tg3_flags2 |= TG3_FLG2_FLASH;
7054         }
7055         else {
7056                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
7057                 tw32(NVRAM_CFG1, nvcfg1);
7058         }
7059
7060         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
7061                 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
7062                         case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
7063                                 tp->nvram_jedecnum = JEDEC_ATMEL;
7064                                 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
7065                                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
7066                                 break;
7067                         case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
7068                                 tp->nvram_jedecnum = JEDEC_ATMEL;
7069                                 tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
7070                                 break;
7071                         case FLASH_VENDOR_ATMEL_EEPROM:
7072                                 tp->nvram_jedecnum = JEDEC_ATMEL;
7073                                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
7074                                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
7075                                 break;
7076                         case FLASH_VENDOR_ST:
7077                                 tp->nvram_jedecnum = JEDEC_ST;
7078                                 tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
7079                                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
7080                                 break;
7081                         case FLASH_VENDOR_SAIFUN:
7082                                 tp->nvram_jedecnum = JEDEC_SAIFUN;
7083                                 tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
7084                                 break;
7085                         case FLASH_VENDOR_SST_SMALL:
7086                         case FLASH_VENDOR_SST_LARGE:
7087                                 tp->nvram_jedecnum = JEDEC_SST;
7088                                 tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
7089                                 break;
7090                 }
7091         }
7092         else {
7093                 tp->nvram_jedecnum = JEDEC_ATMEL;
7094                 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
7095                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
7096         }
7097 }
7098
7099 static void __devinit tg3_get_5752_nvram_info(struct tg3 *tp)
7100 {
7101         u32 nvcfg1;
7102
7103         nvcfg1 = tr32(NVRAM_CFG1);
7104
7105         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
7106                 case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
7107                 case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
7108                         tp->nvram_jedecnum = JEDEC_ATMEL;
7109                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
7110                         break;
7111                 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
7112                         tp->nvram_jedecnum = JEDEC_ATMEL;
7113                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
7114                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
7115                         break;
7116                 case FLASH_5752VENDOR_ST_M45PE10:
7117                 case FLASH_5752VENDOR_ST_M45PE20:
7118                 case FLASH_5752VENDOR_ST_M45PE40:
7119                         tp->nvram_jedecnum = JEDEC_ST;
7120                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
7121                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
7122                         break;
7123         }
7124
7125         if (tp->tg3_flags2 & TG3_FLG2_FLASH) {
7126                 switch (nvcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
7127                         case FLASH_5752PAGE_SIZE_256:
7128                                 tp->nvram_pagesize = 256;
7129                                 break;
7130                         case FLASH_5752PAGE_SIZE_512:
7131                                 tp->nvram_pagesize = 512;
7132                                 break;
7133                         case FLASH_5752PAGE_SIZE_1K:
7134                                 tp->nvram_pagesize = 1024;
7135                                 break;
7136                         case FLASH_5752PAGE_SIZE_2K:
7137                                 tp->nvram_pagesize = 2048;
7138                                 break;
7139                         case FLASH_5752PAGE_SIZE_4K:
7140                                 tp->nvram_pagesize = 4096;
7141                                 break;
7142                         case FLASH_5752PAGE_SIZE_264:
7143                                 tp->nvram_pagesize = 264;
7144                                 break;
7145                 }
7146         }
7147         else {
7148                 /* For eeprom, set pagesize to maximum eeprom size */
7149                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
7150
7151                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
7152                 tw32(NVRAM_CFG1, nvcfg1);
7153         }
7154 }
7155
7156 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
7157 static void __devinit tg3_nvram_init(struct tg3 *tp)
7158 {
7159         int j;
7160
7161         if (tp->tg3_flags2 & TG3_FLG2_SUN_570X)
7162                 return;
7163
7164         tw32_f(GRC_EEPROM_ADDR,
7165              (EEPROM_ADDR_FSM_RESET |
7166               (EEPROM_DEFAULT_CLOCK_PERIOD <<
7167                EEPROM_ADDR_CLKPERD_SHIFT)));
7168
7169         /* XXX schedule_timeout() ... */
7170         for (j = 0; j < 100; j++)
7171                 udelay(10);
7172
7173         /* Enable seeprom accesses. */
7174         tw32_f(GRC_LOCAL_CTRL,
7175              tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
7176         udelay(100);
7177
7178         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
7179             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
7180                 tp->tg3_flags |= TG3_FLAG_NVRAM;
7181
7182                 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
7183                         u32 nvaccess = tr32(NVRAM_ACCESS);
7184
7185                         tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
7186                 }
7187
7188                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
7189                         tg3_get_5752_nvram_info(tp);
7190                 else
7191                         tg3_get_nvram_info(tp);
7192
7193                 tg3_get_nvram_size(tp);
7194
7195                 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
7196                         u32 nvaccess = tr32(NVRAM_ACCESS);
7197
7198                         tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
7199                 }
7200
7201         } else {
7202                 tp->tg3_flags &= ~(TG3_FLAG_NVRAM | TG3_FLAG_NVRAM_BUFFERED);
7203
7204                 tg3_get_eeprom_size(tp);
7205         }
7206 }
7207
7208 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
7209                                         u32 offset, u32 *val)
7210 {
7211         u32 tmp;
7212         int i;
7213
7214         if (offset > EEPROM_ADDR_ADDR_MASK ||
7215             (offset % 4) != 0)
7216                 return -EINVAL;
7217
7218         tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
7219                                         EEPROM_ADDR_DEVID_MASK |
7220                                         EEPROM_ADDR_READ);
7221         tw32(GRC_EEPROM_ADDR,
7222              tmp |
7223              (0 << EEPROM_ADDR_DEVID_SHIFT) |
7224              ((offset << EEPROM_ADDR_ADDR_SHIFT) &
7225               EEPROM_ADDR_ADDR_MASK) |
7226              EEPROM_ADDR_READ | EEPROM_ADDR_START);
7227
7228         for (i = 0; i < 10000; i++) {
7229                 tmp = tr32(GRC_EEPROM_ADDR);
7230
7231                 if (tmp & EEPROM_ADDR_COMPLETE)
7232                         break;
7233                 udelay(100);
7234         }
7235         if (!(tmp & EEPROM_ADDR_COMPLETE))
7236                 return -EBUSY;
7237
7238         *val = tr32(GRC_EEPROM_DATA);
7239         return 0;
7240 }
7241
7242 #define NVRAM_CMD_TIMEOUT 10000
7243
7244 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
7245 {
7246         int i;
7247
7248         tw32(NVRAM_CMD, nvram_cmd);
7249         for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
7250                 udelay(10);
7251                 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
7252                         udelay(10);
7253                         break;
7254                 }
7255         }
7256         if (i == NVRAM_CMD_TIMEOUT) {
7257                 return -EBUSY;
7258         }
7259         return 0;
7260 }
7261
7262 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
7263 {
7264         int ret;
7265
7266         if (tp->tg3_flags2 & TG3_FLG2_SUN_570X) {
7267                 printk(KERN_ERR PFX "Attempt to do nvram_read on Sun 570X\n");
7268                 return -EINVAL;
7269         }
7270
7271         if (!(tp->tg3_flags & TG3_FLAG_NVRAM))
7272                 return tg3_nvram_read_using_eeprom(tp, offset, val);
7273
7274         if ((tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) &&
7275                 (tp->tg3_flags2 & TG3_FLG2_FLASH) &&
7276                 (tp->nvram_jedecnum == JEDEC_ATMEL)) {
7277
7278                 offset = ((offset / tp->nvram_pagesize) <<
7279                           ATMEL_AT45DB0X1B_PAGE_POS) +
7280                         (offset % tp->nvram_pagesize);
7281         }
7282
7283         if (offset > NVRAM_ADDR_MSK)
7284                 return -EINVAL;
7285
7286         tg3_nvram_lock(tp);
7287
7288         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
7289                 u32 nvaccess = tr32(NVRAM_ACCESS);
7290
7291                 tw32_f(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
7292         }
7293
7294         tw32(NVRAM_ADDR, offset);
7295         ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
7296                 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
7297
7298         if (ret == 0)
7299                 *val = swab32(tr32(NVRAM_RDDATA));
7300
7301         tg3_nvram_unlock(tp);
7302
7303         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
7304                 u32 nvaccess = tr32(NVRAM_ACCESS);
7305
7306                 tw32_f(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
7307         }
7308
7309         return ret;
7310 }
7311
7312 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
7313                                     u32 offset, u32 len, u8 *buf)
7314 {
7315         int i, j, rc = 0;
7316         u32 val;
7317
7318         for (i = 0; i < len; i += 4) {
7319                 u32 addr, data;
7320
7321                 addr = offset + i;
7322
7323                 memcpy(&data, buf + i, 4);
7324
7325                 tw32(GRC_EEPROM_DATA, cpu_to_le32(data));
7326
7327                 val = tr32(GRC_EEPROM_ADDR);
7328                 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
7329
7330                 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
7331                         EEPROM_ADDR_READ);
7332                 tw32(GRC_EEPROM_ADDR, val |
7333                         (0 << EEPROM_ADDR_DEVID_SHIFT) |
7334                         (addr & EEPROM_ADDR_ADDR_MASK) |
7335                         EEPROM_ADDR_START |
7336                         EEPROM_ADDR_WRITE);
7337                 
7338                 for (j = 0; j < 10000; j++) {
7339                         val = tr32(GRC_EEPROM_ADDR);
7340
7341                         if (val & EEPROM_ADDR_COMPLETE)
7342                                 break;
7343                         udelay(100);
7344                 }
7345                 if (!(val & EEPROM_ADDR_COMPLETE)) {
7346                         rc = -EBUSY;
7347                         break;
7348                 }
7349         }
7350
7351         return rc;
7352 }
7353
7354 /* offset and length are dword aligned */
7355 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
7356                 u8 *buf)
7357 {
7358         int ret = 0;
7359         u32 pagesize = tp->nvram_pagesize;
7360         u32 pagemask = pagesize - 1;
7361         u32 nvram_cmd;
7362         u8 *tmp;
7363
7364         tmp = kmalloc(pagesize, GFP_KERNEL);
7365         if (tmp == NULL)
7366                 return -ENOMEM;
7367
7368         while (len) {
7369                 int j;
7370                 u32 phy_addr, page_off, size, nvaccess;
7371
7372                 phy_addr = offset & ~pagemask;
7373         
7374                 for (j = 0; j < pagesize; j += 4) {
7375                         if ((ret = tg3_nvram_read(tp, phy_addr + j,
7376                                                 (u32 *) (tmp + j))))
7377                                 break;
7378                 }
7379                 if (ret)
7380                         break;
7381
7382                 page_off = offset & pagemask;
7383                 size = pagesize;
7384                 if (len < size)
7385                         size = len;
7386
7387                 len -= size;
7388
7389                 memcpy(tmp + page_off, buf, size);
7390
7391                 offset = offset + (pagesize - page_off);
7392
7393                 nvaccess = tr32(NVRAM_ACCESS);
7394                 tw32_f(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
7395
7396                 /*
7397                  * Before we can erase the flash page, we need
7398                  * to issue a special "write enable" command.
7399                  */
7400                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
7401
7402                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
7403                         break;
7404
7405                 /* Erase the target page */
7406                 tw32(NVRAM_ADDR, phy_addr);
7407
7408                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
7409                         NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
7410
7411                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
7412                         break;
7413
7414                 /* Issue another write enable to start the write. */
7415                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
7416
7417                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
7418                         break;
7419
7420                 for (j = 0; j < pagesize; j += 4) {
7421                         u32 data;
7422
7423                         data = *((u32 *) (tmp + j));
7424                         tw32(NVRAM_WRDATA, cpu_to_be32(data));
7425
7426                         tw32(NVRAM_ADDR, phy_addr + j);
7427
7428                         nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
7429                                 NVRAM_CMD_WR;
7430
7431                         if (j == 0)
7432                                 nvram_cmd |= NVRAM_CMD_FIRST;
7433                         else if (j == (pagesize - 4))
7434                                 nvram_cmd |= NVRAM_CMD_LAST;
7435
7436                         if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
7437                                 break;
7438                 }
7439                 if (ret)
7440                         break;
7441         }
7442
7443         nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
7444         tg3_nvram_exec_cmd(tp, nvram_cmd);
7445
7446         kfree(tmp);
7447
7448         return ret;
7449 }
7450
7451 /* offset and length are dword aligned */
7452 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
7453                 u8 *buf)
7454 {
7455         int i, ret = 0;
7456
7457         for (i = 0; i < len; i += 4, offset += 4) {
7458                 u32 data, page_off, phy_addr, nvram_cmd;
7459
7460                 memcpy(&data, buf + i, 4);
7461                 tw32(NVRAM_WRDATA, cpu_to_be32(data));
7462
7463                 page_off = offset % tp->nvram_pagesize;
7464
7465                 if ((tp->tg3_flags2 & TG3_FLG2_FLASH) &&
7466                         (tp->nvram_jedecnum == JEDEC_ATMEL)) {
7467
7468                         phy_addr = ((offset / tp->nvram_pagesize) <<
7469                                     ATMEL_AT45DB0X1B_PAGE_POS) + page_off;
7470                 }
7471                 else {
7472                         phy_addr = offset;
7473                 }
7474
7475                 tw32(NVRAM_ADDR, phy_addr);
7476
7477                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
7478
7479                 if ((page_off == 0) || (i == 0))
7480                         nvram_cmd |= NVRAM_CMD_FIRST;
7481                 else if (page_off == (tp->nvram_pagesize - 4))
7482                         nvram_cmd |= NVRAM_CMD_LAST;
7483
7484                 if (i == (len - 4))
7485                         nvram_cmd |= NVRAM_CMD_LAST;
7486
7487                 if ((tp->nvram_jedecnum == JEDEC_ST) &&
7488                         (nvram_cmd & NVRAM_CMD_FIRST)) {
7489
7490                         if ((ret = tg3_nvram_exec_cmd(tp,
7491                                 NVRAM_CMD_WREN | NVRAM_CMD_GO |
7492                                 NVRAM_CMD_DONE)))
7493
7494                                 break;
7495                 }
7496                 if (!(tp->tg3_flags2 & TG3_FLG2_FLASH)) {
7497                         /* We always do complete word writes to eeprom. */
7498                         nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
7499                 }
7500
7501                 if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
7502                         break;
7503         }
7504         return ret;
7505 }
7506
7507 /* offset and length are dword aligned */
7508 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
7509 {
7510         int ret;
7511
7512         if (tp->tg3_flags2 & TG3_FLG2_SUN_570X) {
7513                 printk(KERN_ERR PFX "Attempt to do nvram_write on Sun 570X\n");
7514                 return -EINVAL;
7515         }
7516
7517         if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
7518                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
7519                        ~GRC_LCLCTRL_GPIO_OUTPUT1);
7520                 udelay(40);
7521         }
7522
7523         if (!(tp->tg3_flags & TG3_FLAG_NVRAM)) {
7524                 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
7525         }
7526         else {
7527                 u32 grc_mode;
7528
7529                 tg3_nvram_lock(tp);
7530
7531                 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
7532                         u32 nvaccess = tr32(NVRAM_ACCESS);
7533
7534                         tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
7535
7536                         tw32(NVRAM_WRITE1, 0x406);
7537                 }
7538
7539                 grc_mode = tr32(GRC_MODE);
7540                 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
7541
7542                 if ((tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) ||
7543                         !(tp->tg3_flags2 & TG3_FLG2_FLASH)) {
7544
7545                         ret = tg3_nvram_write_block_buffered(tp, offset, len,
7546                                 buf);
7547                 }
7548                 else {
7549                         ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
7550                                 buf);
7551                 }
7552
7553                 grc_mode = tr32(GRC_MODE);
7554                 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
7555
7556                 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
7557                         u32 nvaccess = tr32(NVRAM_ACCESS);
7558
7559                         tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
7560                 }
7561                 tg3_nvram_unlock(tp);
7562         }
7563
7564         if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
7565                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
7566                 udelay(40);
7567         }
7568
7569         return ret;
7570 }
7571
7572 struct subsys_tbl_ent {
7573         u16 subsys_vendor, subsys_devid;
7574         u32 phy_id;
7575 };
7576
7577 static struct subsys_tbl_ent subsys_id_to_phy_id[] = {
7578         /* Broadcom boards. */
7579         { PCI_VENDOR_ID_BROADCOM, 0x1644, PHY_ID_BCM5401 }, /* BCM95700A6 */
7580         { PCI_VENDOR_ID_BROADCOM, 0x0001, PHY_ID_BCM5701 }, /* BCM95701A5 */
7581         { PCI_VENDOR_ID_BROADCOM, 0x0002, PHY_ID_BCM8002 }, /* BCM95700T6 */
7582         { PCI_VENDOR_ID_BROADCOM, 0x0003, 0 },              /* BCM95700A9 */
7583         { PCI_VENDOR_ID_BROADCOM, 0x0005, PHY_ID_BCM5701 }, /* BCM95701T1 */
7584         { PCI_VENDOR_ID_BROADCOM, 0x0006, PHY_ID_BCM5701 }, /* BCM95701T8 */
7585         { PCI_VENDOR_ID_BROADCOM, 0x0007, 0 },              /* BCM95701A7 */
7586         { PCI_VENDOR_ID_BROADCOM, 0x0008, PHY_ID_BCM5701 }, /* BCM95701A10 */
7587         { PCI_VENDOR_ID_BROADCOM, 0x8008, PHY_ID_BCM5701 }, /* BCM95701A12 */
7588         { PCI_VENDOR_ID_BROADCOM, 0x0009, PHY_ID_BCM5703 }, /* BCM95703Ax1 */
7589         { PCI_VENDOR_ID_BROADCOM, 0x8009, PHY_ID_BCM5703 }, /* BCM95703Ax2 */
7590
7591         /* 3com boards. */
7592         { PCI_VENDOR_ID_3COM, 0x1000, PHY_ID_BCM5401 }, /* 3C996T */
7593         { PCI_VENDOR_ID_3COM, 0x1006, PHY_ID_BCM5701 }, /* 3C996BT */
7594         { PCI_VENDOR_ID_3COM, 0x1004, 0 },              /* 3C996SX */
7595         { PCI_VENDOR_ID_3COM, 0x1007, PHY_ID_BCM5701 }, /* 3C1000T */
7596         { PCI_VENDOR_ID_3COM, 0x1008, PHY_ID_BCM5701 }, /* 3C940BR01 */
7597
7598         /* DELL boards. */
7599         { PCI_VENDOR_ID_DELL, 0x00d1, PHY_ID_BCM5401 }, /* VIPER */
7600         { PCI_VENDOR_ID_DELL, 0x0106, PHY_ID_BCM5401 }, /* JAGUAR */
7601         { PCI_VENDOR_ID_DELL, 0x0109, PHY_ID_BCM5411 }, /* MERLOT */
7602         { PCI_VENDOR_ID_DELL, 0x010a, PHY_ID_BCM5411 }, /* SLIM_MERLOT */
7603
7604         /* Compaq boards. */
7605         { PCI_VENDOR_ID_COMPAQ, 0x007c, PHY_ID_BCM5701 }, /* BANSHEE */
7606         { PCI_VENDOR_ID_COMPAQ, 0x009a, PHY_ID_BCM5701 }, /* BANSHEE_2 */
7607         { PCI_VENDOR_ID_COMPAQ, 0x007d, 0 },              /* CHANGELING */
7608         { PCI_VENDOR_ID_COMPAQ, 0x0085, PHY_ID_BCM5701 }, /* NC7780 */
7609         { PCI_VENDOR_ID_COMPAQ, 0x0099, PHY_ID_BCM5701 }, /* NC7780_2 */
7610
7611         /* IBM boards. */
7612         { PCI_VENDOR_ID_IBM, 0x0281, 0 } /* IBM??? */
7613 };
7614
7615 static inline struct subsys_tbl_ent *lookup_by_subsys(struct tg3 *tp)
7616 {
7617         int i;
7618
7619         for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
7620                 if ((subsys_id_to_phy_id[i].subsys_vendor ==
7621                      tp->pdev->subsystem_vendor) &&
7622                     (subsys_id_to_phy_id[i].subsys_devid ==
7623                      tp->pdev->subsystem_device))
7624                         return &subsys_id_to_phy_id[i];
7625         }
7626         return NULL;
7627 }
7628
7629 /* Since this function may be called in D3-hot power state during
7630  * tg3_init_one(), only config cycles are allowed.
7631  */
7632 static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
7633 {
7634         u32 val;
7635
7636         /* Make sure register accesses (indirect or otherwise)
7637          * will function correctly.
7638          */
7639         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
7640                                tp->misc_host_ctrl);
7641
7642         tp->phy_id = PHY_ID_INVALID;
7643         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
7644
7645         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
7646         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
7647                 u32 nic_cfg, led_cfg;
7648                 u32 nic_phy_id, ver, cfg2 = 0, eeprom_phy_id;
7649                 int eeprom_phy_serdes = 0;
7650
7651                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
7652                 tp->nic_sram_data_cfg = nic_cfg;
7653
7654                 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
7655                 ver >>= NIC_SRAM_DATA_VER_SHIFT;
7656                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700) &&
7657                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) &&
7658                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5703) &&
7659                     (ver > 0) && (ver < 0x100))
7660                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
7661
7662                 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
7663                     NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
7664                         eeprom_phy_serdes = 1;
7665
7666                 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
7667                 if (nic_phy_id != 0) {
7668                         u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
7669                         u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
7670
7671                         eeprom_phy_id  = (id1 >> 16) << 10;
7672                         eeprom_phy_id |= (id2 & 0xfc00) << 16;
7673                         eeprom_phy_id |= (id2 & 0x03ff) <<  0;
7674                 } else
7675                         eeprom_phy_id = 0;
7676
7677                 tp->phy_id = eeprom_phy_id;
7678                 if (eeprom_phy_serdes)
7679                         tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
7680
7681                 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
7682                         led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
7683                                     SHASTA_EXT_LED_MODE_MASK);
7684                 else
7685                         led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
7686
7687                 switch (led_cfg) {
7688                 default:
7689                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
7690                         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
7691                         break;
7692
7693                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
7694                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
7695                         break;
7696
7697                 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
7698                         tp->led_ctrl = LED_CTRL_MODE_MAC;
7699                         break;
7700
7701                 case SHASTA_EXT_LED_SHARED:
7702                         tp->led_ctrl = LED_CTRL_MODE_SHARED;
7703                         if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
7704                             tp->pci_chip_rev_id != CHIPREV_ID_5750_A1)
7705                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
7706                                                  LED_CTRL_MODE_PHY_2);
7707                         break;
7708
7709                 case SHASTA_EXT_LED_MAC:
7710                         tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
7711                         break;
7712
7713                 case SHASTA_EXT_LED_COMBO:
7714                         tp->led_ctrl = LED_CTRL_MODE_COMBO;
7715                         if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0)
7716                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
7717                                                  LED_CTRL_MODE_PHY_2);
7718                         break;
7719
7720                 };
7721
7722                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
7723                      GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) &&
7724                     tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
7725                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
7726
7727                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700) &&
7728                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) &&
7729                     (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP))
7730                         tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT;
7731
7732                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
7733                         tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
7734                         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
7735                                 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
7736                 }
7737                 if (nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL)
7738                         tp->tg3_flags |= TG3_FLAG_SERDES_WOL_CAP;
7739
7740                 if (cfg2 & (1 << 17))
7741                         tp->tg3_flags2 |= TG3_FLG2_CAPACITIVE_COUPLING;
7742
7743                 /* serdes signal pre-emphasis in register 0x590 set by */
7744                 /* bootcode if bit 18 is set */
7745                 if (cfg2 & (1 << 18))
7746                         tp->tg3_flags2 |= TG3_FLG2_SERDES_PREEMPHASIS;
7747         }
7748 }
7749
7750 static int __devinit tg3_phy_probe(struct tg3 *tp)
7751 {
7752         u32 hw_phy_id_1, hw_phy_id_2;
7753         u32 hw_phy_id, hw_phy_id_masked;
7754         int err;
7755
7756         /* Reading the PHY ID register can conflict with ASF
7757          * firwmare access to the PHY hardware.
7758          */
7759         err = 0;
7760         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
7761                 hw_phy_id = hw_phy_id_masked = PHY_ID_INVALID;
7762         } else {
7763                 /* Now read the physical PHY_ID from the chip and verify
7764                  * that it is sane.  If it doesn't look good, we fall back
7765                  * to either the hard-coded table based PHY_ID and failing
7766                  * that the value found in the eeprom area.
7767                  */
7768                 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
7769                 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
7770
7771                 hw_phy_id  = (hw_phy_id_1 & 0xffff) << 10;
7772                 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
7773                 hw_phy_id |= (hw_phy_id_2 & 0x03ff) <<  0;
7774
7775                 hw_phy_id_masked = hw_phy_id & PHY_ID_MASK;
7776         }
7777
7778         if (!err && KNOWN_PHY_ID(hw_phy_id_masked)) {
7779                 tp->phy_id = hw_phy_id;
7780                 if (hw_phy_id_masked == PHY_ID_BCM8002)
7781                         tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
7782         } else {
7783                 if (tp->phy_id != PHY_ID_INVALID) {
7784                         /* Do nothing, phy ID already set up in
7785                          * tg3_get_eeprom_hw_cfg().
7786                          */
7787                 } else {
7788                         struct subsys_tbl_ent *p;
7789
7790                         /* No eeprom signature?  Try the hardcoded
7791                          * subsys device table.
7792                          */
7793                         p = lookup_by_subsys(tp);
7794                         if (!p)
7795                                 return -ENODEV;
7796
7797                         tp->phy_id = p->phy_id;
7798                         if (!tp->phy_id ||
7799                             tp->phy_id == PHY_ID_BCM8002)
7800                                 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
7801                 }
7802         }
7803
7804         if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
7805             !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
7806                 u32 bmsr, adv_reg, tg3_ctrl;
7807
7808                 tg3_readphy(tp, MII_BMSR, &bmsr);
7809                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
7810                     (bmsr & BMSR_LSTATUS))
7811                         goto skip_phy_reset;
7812                     
7813                 err = tg3_phy_reset(tp);
7814                 if (err)
7815                         return err;
7816
7817                 adv_reg = (ADVERTISE_10HALF | ADVERTISE_10FULL |
7818                            ADVERTISE_100HALF | ADVERTISE_100FULL |
7819                            ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
7820                 tg3_ctrl = 0;
7821                 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
7822                         tg3_ctrl = (MII_TG3_CTRL_ADV_1000_HALF |
7823                                     MII_TG3_CTRL_ADV_1000_FULL);
7824                         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
7825                             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
7826                                 tg3_ctrl |= (MII_TG3_CTRL_AS_MASTER |
7827                                              MII_TG3_CTRL_ENABLE_AS_MASTER);
7828                 }
7829
7830                 if (!tg3_copper_is_advertising_all(tp)) {
7831                         tg3_writephy(tp, MII_ADVERTISE, adv_reg);
7832
7833                         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
7834                                 tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
7835
7836                         tg3_writephy(tp, MII_BMCR,
7837                                      BMCR_ANENABLE | BMCR_ANRESTART);
7838                 }
7839                 tg3_phy_set_wirespeed(tp);
7840
7841                 tg3_writephy(tp, MII_ADVERTISE, adv_reg);
7842                 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
7843                         tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
7844         }
7845
7846 skip_phy_reset:
7847         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
7848                 err = tg3_init_5401phy_dsp(tp);
7849                 if (err)
7850                         return err;
7851         }
7852
7853         if (!err && ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401)) {
7854                 err = tg3_init_5401phy_dsp(tp);
7855         }
7856
7857         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
7858                 tp->link_config.advertising =
7859                         (ADVERTISED_1000baseT_Half |
7860                          ADVERTISED_1000baseT_Full |
7861                          ADVERTISED_Autoneg |
7862                          ADVERTISED_FIBRE);
7863         if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
7864                 tp->link_config.advertising &=
7865                         ~(ADVERTISED_1000baseT_Half |
7866                           ADVERTISED_1000baseT_Full);
7867
7868         return err;
7869 }
7870
7871 static void __devinit tg3_read_partno(struct tg3 *tp)
7872 {
7873         unsigned char vpd_data[256];
7874         int i;
7875
7876         if (tp->tg3_flags2 & TG3_FLG2_SUN_570X) {
7877                 /* Sun decided not to put the necessary bits in the
7878                  * NVRAM of their onboard tg3 parts :(
7879                  */
7880                 strcpy(tp->board_part_number, "Sun 570X");
7881                 return;
7882         }
7883
7884         for (i = 0; i < 256; i += 4) {
7885                 u32 tmp;
7886
7887                 if (tg3_nvram_read(tp, 0x100 + i, &tmp))
7888                         goto out_not_found;
7889
7890                 vpd_data[i + 0] = ((tmp >>  0) & 0xff);
7891                 vpd_data[i + 1] = ((tmp >>  8) & 0xff);
7892                 vpd_data[i + 2] = ((tmp >> 16) & 0xff);
7893                 vpd_data[i + 3] = ((tmp >> 24) & 0xff);
7894         }
7895
7896         /* Now parse and find the part number. */
7897         for (i = 0; i < 256; ) {
7898                 unsigned char val = vpd_data[i];
7899                 int block_end;
7900
7901                 if (val == 0x82 || val == 0x91) {
7902                         i = (i + 3 +
7903                              (vpd_data[i + 1] +
7904                               (vpd_data[i + 2] << 8)));
7905                         continue;
7906                 }
7907
7908                 if (val != 0x90)
7909                         goto out_not_found;
7910
7911                 block_end = (i + 3 +
7912                              (vpd_data[i + 1] +
7913                               (vpd_data[i + 2] << 8)));
7914                 i += 3;
7915                 while (i < block_end) {
7916                         if (vpd_data[i + 0] == 'P' &&
7917                             vpd_data[i + 1] == 'N') {
7918                                 int partno_len = vpd_data[i + 2];
7919
7920                                 if (partno_len > 24)
7921                                         goto out_not_found;
7922
7923                                 memcpy(tp->board_part_number,
7924                                        &vpd_data[i + 3],
7925                                        partno_len);
7926
7927                                 /* Success. */
7928                                 return;
7929                         }
7930                 }
7931
7932                 /* Part number not found. */
7933                 goto out_not_found;
7934         }
7935
7936 out_not_found:
7937         strcpy(tp->board_part_number, "none");
7938 }
7939
7940 #ifdef CONFIG_SPARC64
7941 static int __devinit tg3_is_sun_570X(struct tg3 *tp)
7942 {
7943         struct pci_dev *pdev = tp->pdev;
7944         struct pcidev_cookie *pcp = pdev->sysdata;
7945
7946         if (pcp != NULL) {
7947                 int node = pcp->prom_node;
7948                 u32 venid;
7949                 int err;
7950
7951                 err = prom_getproperty(node, "subsystem-vendor-id",
7952                                        (char *) &venid, sizeof(venid));
7953                 if (err == 0 || err == -1)
7954                         return 0;
7955                 if (venid == PCI_VENDOR_ID_SUN)
7956                         return 1;
7957         }
7958         return 0;
7959 }
7960 #endif
7961
7962 static int __devinit tg3_get_invariants(struct tg3 *tp)
7963 {
7964         static struct pci_device_id write_reorder_chipsets[] = {
7965                 { PCI_DEVICE(PCI_VENDOR_ID_INTEL,
7966                              PCI_DEVICE_ID_INTEL_82801AA_8) },
7967                 { PCI_DEVICE(PCI_VENDOR_ID_INTEL,
7968                              PCI_DEVICE_ID_INTEL_82801AB_8) },
7969                 { PCI_DEVICE(PCI_VENDOR_ID_INTEL,
7970                              PCI_DEVICE_ID_INTEL_82801BA_11) },
7971                 { PCI_DEVICE(PCI_VENDOR_ID_INTEL,
7972                              PCI_DEVICE_ID_INTEL_82801BA_6) },
7973                 { PCI_DEVICE(PCI_VENDOR_ID_AMD,
7974                              PCI_DEVICE_ID_AMD_FE_GATE_700C) },
7975                 { },
7976         };
7977         u32 misc_ctrl_reg;
7978         u32 cacheline_sz_reg;
7979         u32 pci_state_reg, grc_misc_cfg;
7980         u32 val;
7981         u16 pci_cmd;
7982         int err;
7983
7984 #ifdef CONFIG_SPARC64
7985         if (tg3_is_sun_570X(tp))
7986                 tp->tg3_flags2 |= TG3_FLG2_SUN_570X;
7987 #endif
7988
7989         /* If we have an AMD 762 or Intel ICH/ICH0/ICH2 chipset, write
7990          * reordering to the mailbox registers done by the host
7991          * controller can cause major troubles.  We read back from
7992          * every mailbox register write to force the writes to be
7993          * posted to the chip in order.
7994          */
7995         if (pci_dev_present(write_reorder_chipsets))
7996                 tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;
7997
7998         /* Force memory write invalidate off.  If we leave it on,
7999          * then on 5700_BX chips we have to enable a workaround.
8000          * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
8001          * to match the cacheline size.  The Broadcom driver have this
8002          * workaround but turns MWI off all the times so never uses
8003          * it.  This seems to suggest that the workaround is insufficient.
8004          */
8005         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
8006         pci_cmd &= ~PCI_COMMAND_INVALIDATE;
8007         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
8008
8009         /* It is absolutely critical that TG3PCI_MISC_HOST_CTRL
8010          * has the register indirect write enable bit set before
8011          * we try to access any of the MMIO registers.  It is also
8012          * critical that the PCI-X hw workaround situation is decided
8013          * before that as well.
8014          */
8015         pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
8016                               &misc_ctrl_reg);
8017
8018         tp->pci_chip_rev_id = (misc_ctrl_reg >>
8019                                MISC_HOST_CTRL_CHIPREV_SHIFT);
8020
8021         /* Wrong chip ID in 5752 A0. This code can be removed later
8022          * as A0 is not in production.
8023          */
8024         if (tp->pci_chip_rev_id == CHIPREV_ID_5752_A0_HW)
8025                 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
8026
8027         /* Initialize misc host control in PCI block. */
8028         tp->misc_host_ctrl |= (misc_ctrl_reg &
8029                                MISC_HOST_CTRL_CHIPREV);
8030         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
8031                                tp->misc_host_ctrl);
8032
8033         pci_read_config_dword(tp->pdev, TG3PCI_CACHELINESZ,
8034                               &cacheline_sz_reg);
8035
8036         tp->pci_cacheline_sz = (cacheline_sz_reg >>  0) & 0xff;
8037         tp->pci_lat_timer    = (cacheline_sz_reg >>  8) & 0xff;
8038         tp->pci_hdr_type     = (cacheline_sz_reg >> 16) & 0xff;
8039         tp->pci_bist         = (cacheline_sz_reg >> 24) & 0xff;
8040
8041         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
8042             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
8043                 tp->tg3_flags2 |= TG3_FLG2_5750_PLUS;
8044
8045         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) ||
8046             (tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
8047                 tp->tg3_flags2 |= TG3_FLG2_5705_PLUS;
8048
8049         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
8050                 tp->tg3_flags2 |= TG3_FLG2_HW_TSO;
8051
8052         if (pci_find_capability(tp->pdev, PCI_CAP_ID_EXP) != 0)
8053                 tp->tg3_flags2 |= TG3_FLG2_PCI_EXPRESS;
8054
8055         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
8056             tp->pci_lat_timer < 64) {
8057                 tp->pci_lat_timer = 64;
8058
8059                 cacheline_sz_reg  = ((tp->pci_cacheline_sz & 0xff) <<  0);
8060                 cacheline_sz_reg |= ((tp->pci_lat_timer    & 0xff) <<  8);
8061                 cacheline_sz_reg |= ((tp->pci_hdr_type     & 0xff) << 16);
8062                 cacheline_sz_reg |= ((tp->pci_bist         & 0xff) << 24);
8063
8064                 pci_write_config_dword(tp->pdev, TG3PCI_CACHELINESZ,
8065                                        cacheline_sz_reg);
8066         }
8067
8068         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
8069                               &pci_state_reg);
8070
8071         if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0) {
8072                 tp->tg3_flags |= TG3_FLAG_PCIX_MODE;
8073
8074                 /* If this is a 5700 BX chipset, and we are in PCI-X
8075                  * mode, enable register write workaround.
8076                  *
8077                  * The workaround is to use indirect register accesses
8078                  * for all chip writes not to mailbox registers.
8079                  */
8080                 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX) {
8081                         u32 pm_reg;
8082                         u16 pci_cmd;
8083
8084                         tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
8085
8086                         /* The chip can have it's power management PCI config
8087                          * space registers clobbered due to this bug.
8088                          * So explicitly force the chip into D0 here.
8089                          */
8090                         pci_read_config_dword(tp->pdev, TG3PCI_PM_CTRL_STAT,
8091                                               &pm_reg);
8092                         pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
8093                         pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
8094                         pci_write_config_dword(tp->pdev, TG3PCI_PM_CTRL_STAT,
8095                                                pm_reg);
8096
8097                         /* Also, force SERR#/PERR# in PCI command. */
8098                         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
8099                         pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
8100                         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
8101                 }
8102         }
8103
8104         /* Back to back register writes can cause problems on this chip,
8105          * the workaround is to read back all reg writes except those to
8106          * mailbox regs.  See tg3_write_indirect_reg32().
8107          *
8108          * PCI Express 5750_A0 rev chips need this workaround too.
8109          */
8110         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
8111             ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
8112              tp->pci_chip_rev_id == CHIPREV_ID_5750_A0))
8113                 tp->tg3_flags |= TG3_FLAG_5701_REG_WRITE_BUG;
8114
8115         if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
8116                 tp->tg3_flags |= TG3_FLAG_PCI_HIGH_SPEED;
8117         if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
8118                 tp->tg3_flags |= TG3_FLAG_PCI_32BIT;
8119
8120         /* Chip-specific fixup from Broadcom driver */
8121         if ((tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) &&
8122             (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
8123                 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
8124                 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
8125         }
8126
8127         /* Get eeprom hw config before calling tg3_set_power_state().
8128          * In particular, the TG3_FLAG_EEPROM_WRITE_PROT flag must be
8129          * determined before calling tg3_set_power_state() so that
8130          * we know whether or not to switch out of Vaux power.
8131          * When the flag is set, it means that GPIO1 is used for eeprom
8132          * write protect and also implies that it is a LOM where GPIOs
8133          * are not used to switch power.
8134          */ 
8135         tg3_get_eeprom_hw_cfg(tp);
8136
8137         /* Set up tp->grc_local_ctrl before calling tg3_set_power_state().
8138          * GPIO1 driven high will bring 5700's external PHY out of reset.
8139          * It is also used as eeprom write protect on LOMs.
8140          */
8141         tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
8142         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
8143             (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT))
8144                 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
8145                                        GRC_LCLCTRL_GPIO_OUTPUT1);
8146         /* Unused GPIO3 must be driven as output on 5752 because there
8147          * are no pull-up resistors on unused GPIO pins.
8148          */
8149         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
8150                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
8151
8152         /* Force the chip into D0. */
8153         err = tg3_set_power_state(tp, 0);
8154         if (err) {
8155                 printk(KERN_ERR PFX "(%s) transition to D0 failed\n",
8156                        pci_name(tp->pdev));
8157                 return err;
8158         }
8159
8160         /* 5700 B0 chips do not support checksumming correctly due
8161          * to hardware bugs.
8162          */
8163         if (tp->pci_chip_rev_id == CHIPREV_ID_5700_B0)
8164                 tp->tg3_flags |= TG3_FLAG_BROKEN_CHECKSUMS;
8165
8166         /* Pseudo-header checksum is done by hardware logic and not
8167          * the offload processers, so make the chip do the pseudo-
8168          * header checksums on receive.  For transmit it is more
8169          * convenient to do the pseudo-header checksum in software
8170          * as Linux does that on transmit for us in all cases.
8171          */
8172         tp->tg3_flags |= TG3_FLAG_NO_TX_PSEUDO_CSUM;
8173         tp->tg3_flags &= ~TG3_FLAG_NO_RX_PSEUDO_CSUM;
8174
8175         /* Derive initial jumbo mode from MTU assigned in
8176          * ether_setup() via the alloc_etherdev() call
8177          */
8178         if (tp->dev->mtu > ETH_DATA_LEN)
8179                 tp->tg3_flags |= TG3_FLAG_JUMBO_ENABLE;
8180
8181         /* Determine WakeOnLan speed to use. */
8182         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
8183             tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
8184             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0 ||
8185             tp->pci_chip_rev_id == CHIPREV_ID_5701_B2) {
8186                 tp->tg3_flags &= ~(TG3_FLAG_WOL_SPEED_100MB);
8187         } else {
8188                 tp->tg3_flags |= TG3_FLAG_WOL_SPEED_100MB;
8189         }
8190
8191         /* A few boards don't want Ethernet@WireSpeed phy feature */
8192         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
8193             ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
8194              (tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) &&
8195              (tp->pci_chip_rev_id != CHIPREV_ID_5705_A1)))
8196                 tp->tg3_flags2 |= TG3_FLG2_NO_ETH_WIRE_SPEED;
8197
8198         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5703_AX ||
8199             GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_AX)
8200                 tp->tg3_flags2 |= TG3_FLG2_PHY_ADC_BUG;
8201         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0)
8202                 tp->tg3_flags2 |= TG3_FLG2_PHY_5704_A0_BUG;
8203
8204         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
8205                 tp->tg3_flags2 |= TG3_FLG2_PHY_BER_BUG;
8206
8207         /* Only 5701 and later support tagged irq status mode.
8208          * Also, 5788 chips cannot use tagged irq status.
8209          *
8210          * However, since we are using NAPI avoid tagged irq status
8211          * because the interrupt condition is more difficult to
8212          * fully clear in that mode.
8213          */
8214         tp->coalesce_mode = 0;
8215
8216         if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX &&
8217             GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX)
8218                 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
8219
8220         /* Initialize MAC MI mode, polling disabled. */
8221         tw32_f(MAC_MI_MODE, tp->mi_mode);
8222         udelay(80);
8223
8224         /* Initialize data/descriptor byte/word swapping. */
8225         val = tr32(GRC_MODE);
8226         val &= GRC_MODE_HOST_STACKUP;
8227         tw32(GRC_MODE, val | tp->grc_mode);
8228
8229         tg3_switch_clocks(tp);
8230
8231         /* Clear this out for sanity. */
8232         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
8233
8234         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
8235                               &pci_state_reg);
8236         if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
8237             (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) == 0) {
8238                 u32 chiprevid = GET_CHIP_REV_ID(tp->misc_host_ctrl);
8239
8240                 if (chiprevid == CHIPREV_ID_5701_A0 ||
8241                     chiprevid == CHIPREV_ID_5701_B0 ||
8242                     chiprevid == CHIPREV_ID_5701_B2 ||
8243                     chiprevid == CHIPREV_ID_5701_B5) {
8244                         void __iomem *sram_base;
8245
8246                         /* Write some dummy words into the SRAM status block
8247                          * area, see if it reads back correctly.  If the return
8248                          * value is bad, force enable the PCIX workaround.
8249                          */
8250                         sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
8251
8252                         writel(0x00000000, sram_base);
8253                         writel(0x00000000, sram_base + 4);
8254                         writel(0xffffffff, sram_base + 4);
8255                         if (readl(sram_base) != 0x00000000)
8256                                 tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
8257                 }
8258         }
8259
8260         udelay(50);
8261         tg3_nvram_init(tp);
8262
8263         grc_misc_cfg = tr32(GRC_MISC_CFG);
8264         grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
8265
8266         /* Broadcom's driver says that CIOBE multisplit has a bug */
8267 #if 0
8268         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
8269             grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5704CIOBE) {
8270                 tp->tg3_flags |= TG3_FLAG_SPLIT_MODE;
8271                 tp->split_mode_max_reqs = SPLIT_MODE_5704_MAX_REQ;
8272         }
8273 #endif
8274         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
8275             (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
8276              grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
8277                 tp->tg3_flags2 |= TG3_FLG2_IS_5788;
8278
8279         /* these are limited to 10/100 only */
8280         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
8281              (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
8282             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
8283              tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
8284              (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901 ||
8285               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901_2 ||
8286               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5705F)) ||
8287             (tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
8288              (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5751F ||
8289               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5753F)))
8290                 tp->tg3_flags |= TG3_FLAG_10_100_ONLY;
8291
8292         err = tg3_phy_probe(tp);
8293         if (err) {
8294                 printk(KERN_ERR PFX "(%s) phy probe failed, err %d\n",
8295                        pci_name(tp->pdev), err);
8296                 /* ... but do not return immediately ... */
8297         }
8298
8299         tg3_read_partno(tp);
8300
8301         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
8302                 tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
8303         } else {
8304                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
8305                         tp->tg3_flags |= TG3_FLAG_USE_MI_INTERRUPT;
8306                 else
8307                         tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
8308         }
8309
8310         /* 5700 {AX,BX} chips have a broken status block link
8311          * change bit implementation, so we must use the
8312          * status register in those cases.
8313          */
8314         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
8315                 tp->tg3_flags |= TG3_FLAG_USE_LINKCHG_REG;
8316         else
8317                 tp->tg3_flags &= ~TG3_FLAG_USE_LINKCHG_REG;
8318
8319         /* The led_ctrl is set during tg3_phy_probe, here we might
8320          * have to force the link status polling mechanism based
8321          * upon subsystem IDs.
8322          */
8323         if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
8324             !(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
8325                 tp->tg3_flags |= (TG3_FLAG_USE_MI_INTERRUPT |
8326                                   TG3_FLAG_USE_LINKCHG_REG);
8327         }
8328
8329         /* For all SERDES we poll the MAC status register. */
8330         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
8331                 tp->tg3_flags |= TG3_FLAG_POLL_SERDES;
8332         else
8333                 tp->tg3_flags &= ~TG3_FLAG_POLL_SERDES;
8334
8335         /* 5700 BX chips need to have their TX producer index mailboxes
8336          * written twice to workaround a bug.
8337          */
8338         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX)
8339                 tp->tg3_flags |= TG3_FLAG_TXD_MBOX_HWBUG;
8340         else
8341                 tp->tg3_flags &= ~TG3_FLAG_TXD_MBOX_HWBUG;
8342
8343         /* It seems all chips can get confused if TX buffers
8344          * straddle the 4GB address boundary in some cases.
8345          */
8346         tp->dev->hard_start_xmit = tg3_start_xmit;
8347
8348         tp->rx_offset = 2;
8349         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
8350             (tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0)
8351                 tp->rx_offset = 0;
8352
8353         /* By default, disable wake-on-lan.  User can change this
8354          * using ETHTOOL_SWOL.
8355          */
8356         tp->tg3_flags &= ~TG3_FLAG_WOL_ENABLE;
8357
8358         return err;
8359 }
8360
8361 #ifdef CONFIG_SPARC64
8362 static int __devinit tg3_get_macaddr_sparc(struct tg3 *tp)
8363 {
8364         struct net_device *dev = tp->dev;
8365         struct pci_dev *pdev = tp->pdev;
8366         struct pcidev_cookie *pcp = pdev->sysdata;
8367
8368         if (pcp != NULL) {
8369                 int node = pcp->prom_node;
8370
8371                 if (prom_getproplen(node, "local-mac-address") == 6) {
8372                         prom_getproperty(node, "local-mac-address",
8373                                          dev->dev_addr, 6);
8374                         return 0;
8375                 }
8376         }
8377         return -ENODEV;
8378 }
8379
8380 static int __devinit tg3_get_default_macaddr_sparc(struct tg3 *tp)
8381 {
8382         struct net_device *dev = tp->dev;
8383
8384         memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
8385         return 0;
8386 }
8387 #endif
8388
8389 static int __devinit tg3_get_device_address(struct tg3 *tp)
8390 {
8391         struct net_device *dev = tp->dev;
8392         u32 hi, lo, mac_offset;
8393
8394 #ifdef CONFIG_SPARC64
8395         if (!tg3_get_macaddr_sparc(tp))
8396                 return 0;
8397 #endif
8398
8399         mac_offset = 0x7c;
8400         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
8401             !(tp->tg3_flags & TG3_FLG2_SUN_570X)) {
8402                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
8403                         mac_offset = 0xcc;
8404                 if (tg3_nvram_lock(tp))
8405                         tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
8406                 else
8407                         tg3_nvram_unlock(tp);
8408         }
8409
8410         /* First try to get it from MAC address mailbox. */
8411         tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
8412         if ((hi >> 16) == 0x484b) {
8413                 dev->dev_addr[0] = (hi >>  8) & 0xff;
8414                 dev->dev_addr[1] = (hi >>  0) & 0xff;
8415
8416                 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
8417                 dev->dev_addr[2] = (lo >> 24) & 0xff;
8418                 dev->dev_addr[3] = (lo >> 16) & 0xff;
8419                 dev->dev_addr[4] = (lo >>  8) & 0xff;
8420                 dev->dev_addr[5] = (lo >>  0) & 0xff;
8421         }
8422         /* Next, try NVRAM. */
8423         else if (!(tp->tg3_flags & TG3_FLG2_SUN_570X) &&
8424                  !tg3_nvram_read(tp, mac_offset + 0, &hi) &&
8425                  !tg3_nvram_read(tp, mac_offset + 4, &lo)) {
8426                 dev->dev_addr[0] = ((hi >> 16) & 0xff);
8427                 dev->dev_addr[1] = ((hi >> 24) & 0xff);
8428                 dev->dev_addr[2] = ((lo >>  0) & 0xff);
8429                 dev->dev_addr[3] = ((lo >>  8) & 0xff);
8430                 dev->dev_addr[4] = ((lo >> 16) & 0xff);
8431                 dev->dev_addr[5] = ((lo >> 24) & 0xff);
8432         }
8433         /* Finally just fetch it out of the MAC control regs. */
8434         else {
8435                 hi = tr32(MAC_ADDR_0_HIGH);
8436                 lo = tr32(MAC_ADDR_0_LOW);
8437
8438                 dev->dev_addr[5] = lo & 0xff;
8439                 dev->dev_addr[4] = (lo >> 8) & 0xff;
8440                 dev->dev_addr[3] = (lo >> 16) & 0xff;
8441                 dev->dev_addr[2] = (lo >> 24) & 0xff;
8442                 dev->dev_addr[1] = hi & 0xff;
8443                 dev->dev_addr[0] = (hi >> 8) & 0xff;
8444         }
8445
8446         if (!is_valid_ether_addr(&dev->dev_addr[0])) {
8447 #ifdef CONFIG_SPARC64
8448                 if (!tg3_get_default_macaddr_sparc(tp))
8449                         return 0;
8450 #endif
8451                 return -EINVAL;
8452         }
8453         return 0;
8454 }
8455
8456 static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma, int size, int to_device)
8457 {
8458         struct tg3_internal_buffer_desc test_desc;
8459         u32 sram_dma_descs;
8460         int i, ret;
8461
8462         sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
8463
8464         tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
8465         tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
8466         tw32(RDMAC_STATUS, 0);
8467         tw32(WDMAC_STATUS, 0);
8468
8469         tw32(BUFMGR_MODE, 0);
8470         tw32(FTQ_RESET, 0);
8471
8472         test_desc.addr_hi = ((u64) buf_dma) >> 32;
8473         test_desc.addr_lo = buf_dma & 0xffffffff;
8474         test_desc.nic_mbuf = 0x00002100;
8475         test_desc.len = size;
8476
8477         /*
8478          * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
8479          * the *second* time the tg3 driver was getting loaded after an
8480          * initial scan.
8481          *
8482          * Broadcom tells me:
8483          *   ...the DMA engine is connected to the GRC block and a DMA
8484          *   reset may affect the GRC block in some unpredictable way...
8485          *   The behavior of resets to individual blocks has not been tested.
8486          *
8487          * Broadcom noted the GRC reset will also reset all sub-components.
8488          */
8489         if (to_device) {
8490                 test_desc.cqid_sqid = (13 << 8) | 2;
8491
8492                 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
8493                 udelay(40);
8494         } else {
8495                 test_desc.cqid_sqid = (16 << 8) | 7;
8496
8497                 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
8498                 udelay(40);
8499         }
8500         test_desc.flags = 0x00000005;
8501
8502         for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
8503                 u32 val;
8504
8505                 val = *(((u32 *)&test_desc) + i);
8506                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
8507                                        sram_dma_descs + (i * sizeof(u32)));
8508                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
8509         }
8510         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
8511
8512         if (to_device) {
8513                 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
8514         } else {
8515                 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
8516         }
8517
8518         ret = -ENODEV;
8519         for (i = 0; i < 40; i++) {
8520                 u32 val;
8521
8522                 if (to_device)
8523                         val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
8524                 else
8525                         val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
8526                 if ((val & 0xffff) == sram_dma_descs) {
8527                         ret = 0;
8528                         break;
8529                 }
8530
8531                 udelay(100);
8532         }
8533
8534         return ret;
8535 }
8536
8537 #define TEST_BUFFER_SIZE        0x400
8538
8539 static int __devinit tg3_test_dma(struct tg3 *tp)
8540 {
8541         dma_addr_t buf_dma;
8542         u32 *buf;
8543         int ret;
8544
8545         buf = pci_alloc_consistent(tp->pdev, TEST_BUFFER_SIZE, &buf_dma);
8546         if (!buf) {
8547                 ret = -ENOMEM;
8548                 goto out_nofree;
8549         }
8550
8551         tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
8552                           (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
8553
8554 #ifndef CONFIG_X86
8555         {
8556                 u8 byte;
8557                 int cacheline_size;
8558                 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
8559
8560                 if (byte == 0)
8561                         cacheline_size = 1024;
8562                 else
8563                         cacheline_size = (int) byte * 4;
8564
8565                 switch (cacheline_size) {
8566                 case 16:
8567                 case 32:
8568                 case 64:
8569                 case 128:
8570                         if ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
8571                             !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
8572                                 tp->dma_rwctrl |=
8573                                         DMA_RWCTRL_WRITE_BNDRY_384_PCIX;
8574                                 break;
8575                         } else if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
8576                                 tp->dma_rwctrl &=
8577                                         ~(DMA_RWCTRL_PCI_WRITE_CMD);
8578                                 tp->dma_rwctrl |=
8579                                         DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
8580                                 break;
8581                         }
8582                         /* fallthrough */
8583                 case 256:
8584                         if (!(tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
8585                             !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
8586                                 tp->dma_rwctrl |=
8587                                         DMA_RWCTRL_WRITE_BNDRY_256;
8588                         else if (!(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
8589                                 tp->dma_rwctrl |=
8590                                         DMA_RWCTRL_WRITE_BNDRY_256_PCIX;
8591                 };
8592         }
8593 #endif
8594
8595         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
8596                 /* DMA read watermark not used on PCIE */
8597                 tp->dma_rwctrl |= 0x00180000;
8598         } else if (!(tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
8599                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
8600                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
8601                         tp->dma_rwctrl |= 0x003f0000;
8602                 else
8603                         tp->dma_rwctrl |= 0x003f000f;
8604         } else {
8605                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
8606                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
8607                         u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
8608
8609                         if (ccval == 0x6 || ccval == 0x7)
8610                                 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
8611
8612                         /* Set bit 23 to renable PCIX hw bug fix */
8613                         tp->dma_rwctrl |= 0x009f0000;
8614                 } else {
8615                         tp->dma_rwctrl |= 0x001b000f;
8616                 }
8617         }
8618
8619         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
8620             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
8621                 tp->dma_rwctrl &= 0xfffffff0;
8622
8623         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
8624             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
8625                 /* Remove this if it causes problems for some boards. */
8626                 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
8627
8628                 /* On 5700/5701 chips, we need to set this bit.
8629                  * Otherwise the chip will issue cacheline transactions
8630                  * to streamable DMA memory with not all the byte
8631                  * enables turned on.  This is an error on several
8632                  * RISC PCI controllers, in particular sparc64.
8633                  *
8634                  * On 5703/5704 chips, this bit has been reassigned
8635                  * a different meaning.  In particular, it is used
8636                  * on those chips to enable a PCI-X workaround.
8637                  */
8638                 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
8639         }
8640
8641         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
8642
8643 #if 0
8644         /* Unneeded, already done by tg3_get_invariants.  */
8645         tg3_switch_clocks(tp);
8646 #endif
8647
8648         ret = 0;
8649         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
8650             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
8651                 goto out;
8652
8653         while (1) {
8654                 u32 *p = buf, i;
8655
8656                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
8657                         p[i] = i;
8658
8659                 /* Send the buffer to the chip. */
8660                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1);
8661                 if (ret) {
8662                         printk(KERN_ERR "tg3_test_dma() Write the buffer failed %d\n", ret);
8663                         break;
8664                 }
8665
8666 #if 0
8667                 /* validate data reached card RAM correctly. */
8668                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
8669                         u32 val;
8670                         tg3_read_mem(tp, 0x2100 + (i*4), &val);
8671                         if (le32_to_cpu(val) != p[i]) {
8672                                 printk(KERN_ERR "  tg3_test_dma()  Card buffer corrupted on write! (%d != %d)\n", val, i);
8673                                 /* ret = -ENODEV here? */
8674                         }
8675                         p[i] = 0;
8676                 }
8677 #endif
8678                 /* Now read it back. */
8679                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0);
8680                 if (ret) {
8681                         printk(KERN_ERR "tg3_test_dma() Read the buffer failed %d\n", ret);
8682
8683                         break;
8684                 }
8685
8686                 /* Verify it. */
8687                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
8688                         if (p[i] == i)
8689                                 continue;
8690
8691                         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) ==
8692                             DMA_RWCTRL_WRITE_BNDRY_DISAB) {
8693                                 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
8694                                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
8695                                 break;
8696                         } else {
8697                                 printk(KERN_ERR "tg3_test_dma() buffer corrupted on read back! (%d != %d)\n", p[i], i);
8698                                 ret = -ENODEV;
8699                                 goto out;
8700                         }
8701                 }
8702
8703                 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
8704                         /* Success. */
8705                         ret = 0;
8706                         break;
8707                 }
8708         }
8709
8710 out:
8711         pci_free_consistent(tp->pdev, TEST_BUFFER_SIZE, buf, buf_dma);
8712 out_nofree:
8713         return ret;
8714 }
8715
8716 static void __devinit tg3_init_link_config(struct tg3 *tp)
8717 {
8718         tp->link_config.advertising =
8719                 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
8720                  ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
8721                  ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full |
8722                  ADVERTISED_Autoneg | ADVERTISED_MII);
8723         tp->link_config.speed = SPEED_INVALID;
8724         tp->link_config.duplex = DUPLEX_INVALID;
8725         tp->link_config.autoneg = AUTONEG_ENABLE;
8726         netif_carrier_off(tp->dev);
8727         tp->link_config.active_speed = SPEED_INVALID;
8728         tp->link_config.active_duplex = DUPLEX_INVALID;
8729         tp->link_config.phy_is_low_power = 0;
8730         tp->link_config.orig_speed = SPEED_INVALID;
8731         tp->link_config.orig_duplex = DUPLEX_INVALID;
8732         tp->link_config.orig_autoneg = AUTONEG_INVALID;
8733 }
8734
8735 static void __devinit tg3_init_bufmgr_config(struct tg3 *tp)
8736 {
8737         tp->bufmgr_config.mbuf_read_dma_low_water =
8738                 DEFAULT_MB_RDMA_LOW_WATER;
8739         tp->bufmgr_config.mbuf_mac_rx_low_water =
8740                 DEFAULT_MB_MACRX_LOW_WATER;
8741         tp->bufmgr_config.mbuf_high_water =
8742                 DEFAULT_MB_HIGH_WATER;
8743
8744         tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
8745                 DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
8746         tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
8747                 DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
8748         tp->bufmgr_config.mbuf_high_water_jumbo =
8749                 DEFAULT_MB_HIGH_WATER_JUMBO;
8750
8751         tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
8752         tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
8753 }
8754
8755 static char * __devinit tg3_phy_string(struct tg3 *tp)
8756 {
8757         switch (tp->phy_id & PHY_ID_MASK) {
8758         case PHY_ID_BCM5400:    return "5400";
8759         case PHY_ID_BCM5401:    return "5401";
8760         case PHY_ID_BCM5411:    return "5411";
8761         case PHY_ID_BCM5701:    return "5701";
8762         case PHY_ID_BCM5703:    return "5703";
8763         case PHY_ID_BCM5704:    return "5704";
8764         case PHY_ID_BCM5705:    return "5705";
8765         case PHY_ID_BCM5750:    return "5750";
8766         case PHY_ID_BCM5752:    return "5752";
8767         case PHY_ID_BCM8002:    return "8002/serdes";
8768         case 0:                 return "serdes";
8769         default:                return "unknown";
8770         };
8771 }
8772
8773 static struct pci_dev * __devinit tg3_find_5704_peer(struct tg3 *tp)
8774 {
8775         struct pci_dev *peer;
8776         unsigned int func, devnr = tp->pdev->devfn & ~7;
8777
8778         for (func = 0; func < 8; func++) {
8779                 peer = pci_get_slot(tp->pdev->bus, devnr | func);
8780                 if (peer && peer != tp->pdev)
8781                         break;
8782                 pci_dev_put(peer);
8783         }
8784         if (!peer || peer == tp->pdev)
8785                 BUG();
8786
8787         /*
8788          * We don't need to keep the refcount elevated; there's no way
8789          * to remove one half of this device without removing the other
8790          */
8791         pci_dev_put(peer);
8792
8793         return peer;
8794 }
8795
8796 static int __devinit tg3_init_one(struct pci_dev *pdev,
8797                                   const struct pci_device_id *ent)
8798 {
8799         static int tg3_version_printed = 0;
8800         unsigned long tg3reg_base, tg3reg_len;
8801         struct net_device *dev;
8802         struct tg3 *tp;
8803         int i, err, pci_using_dac, pm_cap;
8804
8805         if (tg3_version_printed++ == 0)
8806                 printk(KERN_INFO "%s", version);
8807
8808         err = pci_enable_device(pdev);
8809         if (err) {
8810                 printk(KERN_ERR PFX "Cannot enable PCI device, "
8811                        "aborting.\n");
8812                 return err;
8813         }
8814
8815         if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
8816                 printk(KERN_ERR PFX "Cannot find proper PCI device "
8817                        "base address, aborting.\n");
8818                 err = -ENODEV;
8819                 goto err_out_disable_pdev;
8820         }
8821
8822         err = pci_request_regions(pdev, DRV_MODULE_NAME);
8823         if (err) {
8824                 printk(KERN_ERR PFX "Cannot obtain PCI resources, "
8825                        "aborting.\n");
8826                 goto err_out_disable_pdev;
8827         }
8828
8829         pci_set_master(pdev);
8830
8831         /* Find power-management capability. */
8832         pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
8833         if (pm_cap == 0) {
8834                 printk(KERN_ERR PFX "Cannot find PowerManagement capability, "
8835                        "aborting.\n");
8836                 err = -EIO;
8837                 goto err_out_free_res;
8838         }
8839
8840         /* Configure DMA attributes. */
8841         err = pci_set_dma_mask(pdev, 0xffffffffffffffffULL);
8842         if (!err) {
8843                 pci_using_dac = 1;
8844                 err = pci_set_consistent_dma_mask(pdev, 0xffffffffffffffffULL);
8845                 if (err < 0) {
8846                         printk(KERN_ERR PFX "Unable to obtain 64 bit DMA "
8847                                "for consistent allocations\n");
8848                         goto err_out_free_res;
8849                 }
8850         } else {
8851                 err = pci_set_dma_mask(pdev, 0xffffffffULL);
8852                 if (err) {
8853                         printk(KERN_ERR PFX "No usable DMA configuration, "
8854                                "aborting.\n");
8855                         goto err_out_free_res;
8856                 }
8857                 pci_using_dac = 0;
8858         }
8859
8860         tg3reg_base = pci_resource_start(pdev, 0);
8861         tg3reg_len = pci_resource_len(pdev, 0);
8862
8863         dev = alloc_etherdev(sizeof(*tp));
8864         if (!dev) {
8865                 printk(KERN_ERR PFX "Etherdev alloc failed, aborting.\n");
8866                 err = -ENOMEM;
8867                 goto err_out_free_res;
8868         }
8869
8870         SET_MODULE_OWNER(dev);
8871         SET_NETDEV_DEV(dev, &pdev->dev);
8872
8873         if (pci_using_dac)
8874                 dev->features |= NETIF_F_HIGHDMA;
8875         dev->features |= NETIF_F_LLTX;
8876 #if TG3_VLAN_TAG_USED
8877         dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
8878         dev->vlan_rx_register = tg3_vlan_rx_register;
8879         dev->vlan_rx_kill_vid = tg3_vlan_rx_kill_vid;
8880 #endif
8881
8882         tp = netdev_priv(dev);
8883         tp->pdev = pdev;
8884         tp->dev = dev;
8885         tp->pm_cap = pm_cap;
8886         tp->mac_mode = TG3_DEF_MAC_MODE;
8887         tp->rx_mode = TG3_DEF_RX_MODE;
8888         tp->tx_mode = TG3_DEF_TX_MODE;
8889         tp->mi_mode = MAC_MI_MODE_BASE;
8890         if (tg3_debug > 0)
8891                 tp->msg_enable = tg3_debug;
8892         else
8893                 tp->msg_enable = TG3_DEF_MSG_ENABLE;
8894
8895         /* The word/byte swap controls here control register access byte
8896          * swapping.  DMA data byte swapping is controlled in the GRC_MODE
8897          * setting below.
8898          */
8899         tp->misc_host_ctrl =
8900                 MISC_HOST_CTRL_MASK_PCI_INT |
8901                 MISC_HOST_CTRL_WORD_SWAP |
8902                 MISC_HOST_CTRL_INDIR_ACCESS |
8903                 MISC_HOST_CTRL_PCISTATE_RW;
8904
8905         /* The NONFRM (non-frame) byte/word swap controls take effect
8906          * on descriptor entries, anything which isn't packet data.
8907          *
8908          * The StrongARM chips on the board (one for tx, one for rx)
8909          * are running in big-endian mode.
8910          */
8911         tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
8912                         GRC_MODE_WSWAP_NONFRM_DATA);
8913 #ifdef __BIG_ENDIAN
8914         tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
8915 #endif
8916         spin_lock_init(&tp->lock);
8917         spin_lock_init(&tp->tx_lock);
8918         spin_lock_init(&tp->indirect_lock);
8919         INIT_WORK(&tp->reset_task, tg3_reset_task, tp);
8920
8921         tp->regs = ioremap_nocache(tg3reg_base, tg3reg_len);
8922         if (tp->regs == 0UL) {
8923                 printk(KERN_ERR PFX "Cannot map device registers, "
8924                        "aborting.\n");
8925                 err = -ENOMEM;
8926                 goto err_out_free_dev;
8927         }
8928
8929         tg3_init_link_config(tp);
8930
8931         tg3_init_bufmgr_config(tp);
8932
8933         tp->rx_pending = TG3_DEF_RX_RING_PENDING;
8934         tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
8935         tp->tx_pending = TG3_DEF_TX_RING_PENDING;
8936
8937         dev->open = tg3_open;
8938         dev->stop = tg3_close;
8939         dev->get_stats = tg3_get_stats;
8940         dev->set_multicast_list = tg3_set_rx_mode;
8941         dev->set_mac_address = tg3_set_mac_addr;
8942         dev->do_ioctl = tg3_ioctl;
8943         dev->tx_timeout = tg3_tx_timeout;
8944         dev->poll = tg3_poll;
8945         dev->ethtool_ops = &tg3_ethtool_ops;
8946         dev->weight = 64;
8947         dev->watchdog_timeo = TG3_TX_TIMEOUT;
8948         dev->change_mtu = tg3_change_mtu;
8949         dev->irq = pdev->irq;
8950 #ifdef CONFIG_NET_POLL_CONTROLLER
8951         dev->poll_controller = tg3_poll_controller;
8952 #endif
8953
8954         err = tg3_get_invariants(tp);
8955         if (err) {
8956                 printk(KERN_ERR PFX "Problem fetching invariants of chip, "
8957                        "aborting.\n");
8958                 goto err_out_iounmap;
8959         }
8960
8961         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
8962                 tp->bufmgr_config.mbuf_read_dma_low_water =
8963                         DEFAULT_MB_RDMA_LOW_WATER_5705;
8964                 tp->bufmgr_config.mbuf_mac_rx_low_water =
8965                         DEFAULT_MB_MACRX_LOW_WATER_5705;
8966                 tp->bufmgr_config.mbuf_high_water =
8967                         DEFAULT_MB_HIGH_WATER_5705;
8968         }
8969
8970 #if TG3_TSO_SUPPORT != 0
8971         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
8972                 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
8973         }
8974         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
8975             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
8976             tp->pci_chip_rev_id == CHIPREV_ID_5705_A0 ||
8977             (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0) {
8978                 tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
8979         } else {
8980                 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
8981         }
8982
8983         /* TSO is off by default, user can enable using ethtool.  */
8984 #if 0
8985         if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)
8986                 dev->features |= NETIF_F_TSO;
8987 #endif
8988
8989 #endif
8990
8991         if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 &&
8992             !(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) &&
8993             !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
8994                 tp->tg3_flags2 |= TG3_FLG2_MAX_RXPEND_64;
8995                 tp->rx_pending = 63;
8996         }
8997
8998         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
8999                 tp->pdev_peer = tg3_find_5704_peer(tp);
9000
9001         err = tg3_get_device_address(tp);
9002         if (err) {
9003                 printk(KERN_ERR PFX "Could not obtain valid ethernet address, "
9004                        "aborting.\n");
9005                 goto err_out_iounmap;
9006         }
9007
9008         /*
9009          * Reset chip in case UNDI or EFI driver did not shutdown
9010          * DMA self test will enable WDMAC and we'll see (spurious)
9011          * pending DMA on the PCI bus at that point.
9012          */
9013         if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
9014             (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
9015                 pci_save_state(tp->pdev);
9016                 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
9017                 tg3_halt(tp);
9018         }
9019
9020         err = tg3_test_dma(tp);
9021         if (err) {
9022                 printk(KERN_ERR PFX "DMA engine test failed, aborting.\n");
9023                 goto err_out_iounmap;
9024         }
9025
9026         /* Tigon3 can do ipv4 only... and some chips have buggy
9027          * checksumming.
9028          */
9029         if ((tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) == 0) {
9030                 dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM;
9031                 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
9032         } else
9033                 tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
9034
9035         if (tp->tg3_flags2 & TG3_FLG2_IS_5788)
9036                 dev->features &= ~NETIF_F_HIGHDMA;
9037
9038         /* flow control autonegotiation is default behavior */
9039         tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
9040
9041         err = register_netdev(dev);
9042         if (err) {
9043                 printk(KERN_ERR PFX "Cannot register net device, "
9044                        "aborting.\n");
9045                 goto err_out_iounmap;
9046         }
9047
9048         pci_set_drvdata(pdev, dev);
9049
9050         /* Now that we have fully setup the chip, save away a snapshot
9051          * of the PCI config space.  We need to restore this after
9052          * GRC_MISC_CFG core clock resets and some resume events.
9053          */
9054         pci_save_state(tp->pdev);
9055
9056         printk(KERN_INFO "%s: Tigon3 [partno(%s) rev %04x PHY(%s)] (PCI%s:%s:%s) %sBaseT Ethernet ",
9057                dev->name,
9058                tp->board_part_number,
9059                tp->pci_chip_rev_id,
9060                tg3_phy_string(tp),
9061                ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ? "X" : ""),
9062                ((tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED) ?
9063                 ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ? "133MHz" : "66MHz") :
9064                 ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ? "100MHz" : "33MHz")),
9065                ((tp->tg3_flags & TG3_FLAG_PCI_32BIT) ? "32-bit" : "64-bit"),
9066                (tp->tg3_flags & TG3_FLAG_10_100_ONLY) ? "10/100" : "10/100/1000");
9067
9068         for (i = 0; i < 6; i++)
9069                 printk("%2.2x%c", dev->dev_addr[i],
9070                        i == 5 ? '\n' : ':');
9071
9072         printk(KERN_INFO "%s: RXcsums[%d] LinkChgREG[%d] "
9073                "MIirq[%d] ASF[%d] Split[%d] WireSpeed[%d] "
9074                "TSOcap[%d] \n",
9075                dev->name,
9076                (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0,
9077                (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) != 0,
9078                (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) != 0,
9079                (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0,
9080                (tp->tg3_flags & TG3_FLAG_SPLIT_MODE) != 0,
9081                (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED) == 0,
9082                (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) != 0);
9083
9084         return 0;
9085
9086 err_out_iounmap:
9087         iounmap(tp->regs);
9088
9089 err_out_free_dev:
9090         free_netdev(dev);
9091
9092 err_out_free_res:
9093         pci_release_regions(pdev);
9094
9095 err_out_disable_pdev:
9096         pci_disable_device(pdev);
9097         pci_set_drvdata(pdev, NULL);
9098         return err;
9099 }
9100
9101 static void __devexit tg3_remove_one(struct pci_dev *pdev)
9102 {
9103         struct net_device *dev = pci_get_drvdata(pdev);
9104
9105         if (dev) {
9106                 struct tg3 *tp = netdev_priv(dev);
9107
9108                 unregister_netdev(dev);
9109                 iounmap(tp->regs);
9110                 free_netdev(dev);
9111                 pci_release_regions(pdev);
9112                 pci_disable_device(pdev);
9113                 pci_set_drvdata(pdev, NULL);
9114         }
9115 }
9116
9117 static int tg3_suspend(struct pci_dev *pdev, pm_message_t state)
9118 {
9119         struct net_device *dev = pci_get_drvdata(pdev);
9120         struct tg3 *tp = netdev_priv(dev);
9121         int err;
9122
9123         if (!netif_running(dev))
9124                 return 0;
9125
9126         tg3_netif_stop(tp);
9127
9128         del_timer_sync(&tp->timer);
9129
9130         spin_lock_irq(&tp->lock);
9131         spin_lock(&tp->tx_lock);
9132         tg3_disable_ints(tp);
9133         spin_unlock(&tp->tx_lock);
9134         spin_unlock_irq(&tp->lock);
9135
9136         netif_device_detach(dev);
9137
9138         spin_lock_irq(&tp->lock);
9139         spin_lock(&tp->tx_lock);
9140         tg3_halt(tp);
9141         spin_unlock(&tp->tx_lock);
9142         spin_unlock_irq(&tp->lock);
9143
9144         err = tg3_set_power_state(tp, pci_choose_state(pdev, state));
9145         if (err) {
9146                 spin_lock_irq(&tp->lock);
9147                 spin_lock(&tp->tx_lock);
9148
9149                 tg3_init_hw(tp);
9150
9151                 tp->timer.expires = jiffies + tp->timer_offset;
9152                 add_timer(&tp->timer);
9153
9154                 netif_device_attach(dev);
9155                 tg3_netif_start(tp);
9156
9157                 spin_unlock(&tp->tx_lock);
9158                 spin_unlock_irq(&tp->lock);
9159         }
9160
9161         return err;
9162 }
9163
9164 static int tg3_resume(struct pci_dev *pdev)
9165 {
9166         struct net_device *dev = pci_get_drvdata(pdev);
9167         struct tg3 *tp = netdev_priv(dev);
9168         int err;
9169
9170         if (!netif_running(dev))
9171                 return 0;
9172
9173         pci_restore_state(tp->pdev);
9174
9175         err = tg3_set_power_state(tp, 0);
9176         if (err)
9177                 return err;
9178
9179         netif_device_attach(dev);
9180
9181         spin_lock_irq(&tp->lock);
9182         spin_lock(&tp->tx_lock);
9183
9184         tg3_init_hw(tp);
9185
9186         tp->timer.expires = jiffies + tp->timer_offset;
9187         add_timer(&tp->timer);
9188
9189         tg3_enable_ints(tp);
9190
9191         tg3_netif_start(tp);
9192
9193         spin_unlock(&tp->tx_lock);
9194         spin_unlock_irq(&tp->lock);
9195
9196         return 0;
9197 }
9198
9199 static struct pci_driver tg3_driver = {
9200         .name           = DRV_MODULE_NAME,
9201         .id_table       = tg3_pci_tbl,
9202         .probe          = tg3_init_one,
9203         .remove         = __devexit_p(tg3_remove_one),
9204         .suspend        = tg3_suspend,
9205         .resume         = tg3_resume
9206 };
9207
9208 static int __init tg3_init(void)
9209 {
9210         return pci_module_init(&tg3_driver);
9211 }
9212
9213 static void __exit tg3_cleanup(void)
9214 {
9215         pci_unregister_driver(&tg3_driver);
9216 }
9217
9218 module_init(tg3_init);
9219 module_exit(tg3_cleanup);