[TG3]: Update driver version.
[linux-2.6] / drivers / net / tg3.c
1 /*
2  * tg3.c: Broadcom Tigon3 ethernet driver.
3  *
4  * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5  * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6  * Copyright (C) 2004 Sun Microsystems Inc.
7  * Copyright (C) 2005 Broadcom Corporation.
8  *
9  * Firmware is:
10  *      Copyright (C) 2000-2003 Broadcom Corporation.
11  */
12
13 #include <linux/config.h>
14
15 #include <linux/module.h>
16 #include <linux/moduleparam.h>
17 #include <linux/kernel.h>
18 #include <linux/types.h>
19 #include <linux/compiler.h>
20 #include <linux/slab.h>
21 #include <linux/delay.h>
22 #include <linux/init.h>
23 #include <linux/ioport.h>
24 #include <linux/pci.h>
25 #include <linux/netdevice.h>
26 #include <linux/etherdevice.h>
27 #include <linux/skbuff.h>
28 #include <linux/ethtool.h>
29 #include <linux/mii.h>
30 #include <linux/if_vlan.h>
31 #include <linux/ip.h>
32 #include <linux/tcp.h>
33 #include <linux/workqueue.h>
34
35 #include <net/checksum.h>
36
37 #include <asm/system.h>
38 #include <asm/io.h>
39 #include <asm/byteorder.h>
40 #include <asm/uaccess.h>
41
42 #ifdef CONFIG_SPARC64
43 #include <asm/idprom.h>
44 #include <asm/oplib.h>
45 #include <asm/pbm.h>
46 #endif
47
48 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
49 #define TG3_VLAN_TAG_USED 1
50 #else
51 #define TG3_VLAN_TAG_USED 0
52 #endif
53
54 #ifdef NETIF_F_TSO
55 #define TG3_TSO_SUPPORT 1
56 #else
57 #define TG3_TSO_SUPPORT 0
58 #endif
59
60 #include "tg3.h"
61
62 #define DRV_MODULE_NAME         "tg3"
63 #define PFX DRV_MODULE_NAME     ": "
64 #define DRV_MODULE_VERSION      "3.29"
65 #define DRV_MODULE_RELDATE      "May 23, 2005"
66
67 #define TG3_DEF_MAC_MODE        0
68 #define TG3_DEF_RX_MODE         0
69 #define TG3_DEF_TX_MODE         0
70 #define TG3_DEF_MSG_ENABLE        \
71         (NETIF_MSG_DRV          | \
72          NETIF_MSG_PROBE        | \
73          NETIF_MSG_LINK         | \
74          NETIF_MSG_TIMER        | \
75          NETIF_MSG_IFDOWN       | \
76          NETIF_MSG_IFUP         | \
77          NETIF_MSG_RX_ERR       | \
78          NETIF_MSG_TX_ERR)
79
80 /* length of time before we decide the hardware is borked,
81  * and dev->tx_timeout() should be called to fix the problem
82  */
83 #define TG3_TX_TIMEOUT                  (5 * HZ)
84
85 /* hardware minimum and maximum for a single frame's data payload */
86 #define TG3_MIN_MTU                     60
87 #define TG3_MAX_MTU(tp) \
88         (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ? 9000 : 1500)
89
90 /* These numbers seem to be hard coded in the NIC firmware somehow.
91  * You can't change the ring sizes, but you can change where you place
92  * them in the NIC onboard memory.
93  */
94 #define TG3_RX_RING_SIZE                512
95 #define TG3_DEF_RX_RING_PENDING         200
96 #define TG3_RX_JUMBO_RING_SIZE          256
97 #define TG3_DEF_RX_JUMBO_RING_PENDING   100
98
99 /* Do not place this n-ring entries value into the tp struct itself,
100  * we really want to expose these constants to GCC so that modulo et
101  * al.  operations are done with shifts and masks instead of with
102  * hw multiply/modulo instructions.  Another solution would be to
103  * replace things like '% foo' with '& (foo - 1)'.
104  */
105 #define TG3_RX_RCB_RING_SIZE(tp)        \
106         ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ?  512 : 1024)
107
108 #define TG3_TX_RING_SIZE                512
109 #define TG3_DEF_TX_RING_PENDING         (TG3_TX_RING_SIZE - 1)
110
111 #define TG3_RX_RING_BYTES       (sizeof(struct tg3_rx_buffer_desc) * \
112                                  TG3_RX_RING_SIZE)
113 #define TG3_RX_JUMBO_RING_BYTES (sizeof(struct tg3_rx_buffer_desc) * \
114                                  TG3_RX_JUMBO_RING_SIZE)
115 #define TG3_RX_RCB_RING_BYTES(tp) (sizeof(struct tg3_rx_buffer_desc) * \
116                                    TG3_RX_RCB_RING_SIZE(tp))
117 #define TG3_TX_RING_BYTES       (sizeof(struct tg3_tx_buffer_desc) * \
118                                  TG3_TX_RING_SIZE)
119 #define TX_RING_GAP(TP) \
120         (TG3_TX_RING_SIZE - (TP)->tx_pending)
121 #define TX_BUFFS_AVAIL(TP)                                              \
122         (((TP)->tx_cons <= (TP)->tx_prod) ?                             \
123           (TP)->tx_cons + (TP)->tx_pending - (TP)->tx_prod :            \
124           (TP)->tx_cons - (TP)->tx_prod - TX_RING_GAP(TP))
125 #define NEXT_TX(N)              (((N) + 1) & (TG3_TX_RING_SIZE - 1))
126
127 #define RX_PKT_BUF_SZ           (1536 + tp->rx_offset + 64)
128 #define RX_JUMBO_PKT_BUF_SZ     (9046 + tp->rx_offset + 64)
129
130 /* minimum number of free TX descriptors required to wake up TX process */
131 #define TG3_TX_WAKEUP_THRESH            (TG3_TX_RING_SIZE / 4)
132
133 /* number of ETHTOOL_GSTATS u64's */
134 #define TG3_NUM_STATS           (sizeof(struct tg3_ethtool_stats)/sizeof(u64))
135
136 static char version[] __devinitdata =
137         DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
138
139 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
140 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
141 MODULE_LICENSE("GPL");
142 MODULE_VERSION(DRV_MODULE_VERSION);
143
144 static int tg3_debug = -1;      /* -1 == use TG3_DEF_MSG_ENABLE as value */
145 module_param(tg3_debug, int, 0);
146 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
147
148 static struct pci_device_id tg3_pci_tbl[] = {
149         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700,
150           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
151         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701,
152           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
153         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702,
154           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
155         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703,
156           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
157         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704,
158           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
159         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE,
160           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
161         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705,
162           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
163         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2,
164           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
165         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M,
166           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
167         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2,
168           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
169         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X,
170           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
171         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X,
172           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
173         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S,
174           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
175         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3,
176           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
177         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3,
178           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
179         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782,
180           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
181         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788,
182           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
183         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789,
184           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
185         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901,
186           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
187         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2,
188           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
189         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2,
190           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
191         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F,
192           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
193         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5720,
194           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
195         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721,
196           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
197         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750,
198           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
199         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751,
200           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
201         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750M,
202           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
203         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M,
204           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
205         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F,
206           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
207         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752,
208           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
209         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M,
210           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
211         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753,
212           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
213         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M,
214           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
215         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F,
216           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
217         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781,
218           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
219         { PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX,
220           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
221         { PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX,
222           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
223         { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000,
224           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
225         { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001,
226           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
227         { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003,
228           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
229         { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100,
230           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
231         { PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3,
232           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
233         { 0, }
234 };
235
236 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
237
238 static struct {
239         const char string[ETH_GSTRING_LEN];
240 } ethtool_stats_keys[TG3_NUM_STATS] = {
241         { "rx_octets" },
242         { "rx_fragments" },
243         { "rx_ucast_packets" },
244         { "rx_mcast_packets" },
245         { "rx_bcast_packets" },
246         { "rx_fcs_errors" },
247         { "rx_align_errors" },
248         { "rx_xon_pause_rcvd" },
249         { "rx_xoff_pause_rcvd" },
250         { "rx_mac_ctrl_rcvd" },
251         { "rx_xoff_entered" },
252         { "rx_frame_too_long_errors" },
253         { "rx_jabbers" },
254         { "rx_undersize_packets" },
255         { "rx_in_length_errors" },
256         { "rx_out_length_errors" },
257         { "rx_64_or_less_octet_packets" },
258         { "rx_65_to_127_octet_packets" },
259         { "rx_128_to_255_octet_packets" },
260         { "rx_256_to_511_octet_packets" },
261         { "rx_512_to_1023_octet_packets" },
262         { "rx_1024_to_1522_octet_packets" },
263         { "rx_1523_to_2047_octet_packets" },
264         { "rx_2048_to_4095_octet_packets" },
265         { "rx_4096_to_8191_octet_packets" },
266         { "rx_8192_to_9022_octet_packets" },
267
268         { "tx_octets" },
269         { "tx_collisions" },
270
271         { "tx_xon_sent" },
272         { "tx_xoff_sent" },
273         { "tx_flow_control" },
274         { "tx_mac_errors" },
275         { "tx_single_collisions" },
276         { "tx_mult_collisions" },
277         { "tx_deferred" },
278         { "tx_excessive_collisions" },
279         { "tx_late_collisions" },
280         { "tx_collide_2times" },
281         { "tx_collide_3times" },
282         { "tx_collide_4times" },
283         { "tx_collide_5times" },
284         { "tx_collide_6times" },
285         { "tx_collide_7times" },
286         { "tx_collide_8times" },
287         { "tx_collide_9times" },
288         { "tx_collide_10times" },
289         { "tx_collide_11times" },
290         { "tx_collide_12times" },
291         { "tx_collide_13times" },
292         { "tx_collide_14times" },
293         { "tx_collide_15times" },
294         { "tx_ucast_packets" },
295         { "tx_mcast_packets" },
296         { "tx_bcast_packets" },
297         { "tx_carrier_sense_errors" },
298         { "tx_discards" },
299         { "tx_errors" },
300
301         { "dma_writeq_full" },
302         { "dma_write_prioq_full" },
303         { "rxbds_empty" },
304         { "rx_discards" },
305         { "rx_errors" },
306         { "rx_threshold_hit" },
307
308         { "dma_readq_full" },
309         { "dma_read_prioq_full" },
310         { "tx_comp_queue_full" },
311
312         { "ring_set_send_prod_index" },
313         { "ring_status_update" },
314         { "nic_irqs" },
315         { "nic_avoided_irqs" },
316         { "nic_tx_threshold_hit" }
317 };
318
319 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
320 {
321         if ((tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) != 0) {
322                 unsigned long flags;
323
324                 spin_lock_irqsave(&tp->indirect_lock, flags);
325                 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
326                 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
327                 spin_unlock_irqrestore(&tp->indirect_lock, flags);
328         } else {
329                 writel(val, tp->regs + off);
330                 if ((tp->tg3_flags & TG3_FLAG_5701_REG_WRITE_BUG) != 0)
331                         readl(tp->regs + off);
332         }
333 }
334
335 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val)
336 {
337         if ((tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) != 0) {
338                 unsigned long flags;
339
340                 spin_lock_irqsave(&tp->indirect_lock, flags);
341                 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
342                 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
343                 spin_unlock_irqrestore(&tp->indirect_lock, flags);
344         } else {
345                 void __iomem *dest = tp->regs + off;
346                 writel(val, dest);
347                 readl(dest);    /* always flush PCI write */
348         }
349 }
350
351 static inline void _tw32_rx_mbox(struct tg3 *tp, u32 off, u32 val)
352 {
353         void __iomem *mbox = tp->regs + off;
354         writel(val, mbox);
355         if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
356                 readl(mbox);
357 }
358
359 static inline void _tw32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
360 {
361         void __iomem *mbox = tp->regs + off;
362         writel(val, mbox);
363         if (tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG)
364                 writel(val, mbox);
365         if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
366                 readl(mbox);
367 }
368
369 #define tw32_mailbox(reg, val)  writel(((val) & 0xffffffff), tp->regs + (reg))
370 #define tw32_rx_mbox(reg, val)  _tw32_rx_mbox(tp, reg, val)
371 #define tw32_tx_mbox(reg, val)  _tw32_tx_mbox(tp, reg, val)
372
373 #define tw32(reg,val)           tg3_write_indirect_reg32(tp,(reg),(val))
374 #define tw32_f(reg,val)         _tw32_flush(tp,(reg),(val))
375 #define tw16(reg,val)           writew(((val) & 0xffff), tp->regs + (reg))
376 #define tw8(reg,val)            writeb(((val) & 0xff), tp->regs + (reg))
377 #define tr32(reg)               readl(tp->regs + (reg))
378 #define tr16(reg)               readw(tp->regs + (reg))
379 #define tr8(reg)                readb(tp->regs + (reg))
380
381 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
382 {
383         unsigned long flags;
384
385         spin_lock_irqsave(&tp->indirect_lock, flags);
386         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
387         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
388
389         /* Always leave this as zero. */
390         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
391         spin_unlock_irqrestore(&tp->indirect_lock, flags);
392 }
393
394 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
395 {
396         unsigned long flags;
397
398         spin_lock_irqsave(&tp->indirect_lock, flags);
399         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
400         pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
401
402         /* Always leave this as zero. */
403         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
404         spin_unlock_irqrestore(&tp->indirect_lock, flags);
405 }
406
407 static void tg3_disable_ints(struct tg3 *tp)
408 {
409         tw32(TG3PCI_MISC_HOST_CTRL,
410              (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
411         tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
412         tr32(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW);
413 }
414
415 static inline void tg3_cond_int(struct tg3 *tp)
416 {
417         if (tp->hw_status->status & SD_STATUS_UPDATED)
418                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
419 }
420
421 static void tg3_enable_ints(struct tg3 *tp)
422 {
423         tw32(TG3PCI_MISC_HOST_CTRL,
424              (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
425         tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
426                      (tp->last_tag << 24));
427         tr32(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW);
428
429         tg3_cond_int(tp);
430 }
431
432 static inline unsigned int tg3_has_work(struct tg3 *tp)
433 {
434         struct tg3_hw_status *sblk = tp->hw_status;
435         unsigned int work_exists = 0;
436
437         /* check for phy events */
438         if (!(tp->tg3_flags &
439               (TG3_FLAG_USE_LINKCHG_REG |
440                TG3_FLAG_POLL_SERDES))) {
441                 if (sblk->status & SD_STATUS_LINK_CHG)
442                         work_exists = 1;
443         }
444         /* check for RX/TX work to do */
445         if (sblk->idx[0].tx_consumer != tp->tx_cons ||
446             sblk->idx[0].rx_producer != tp->rx_rcb_ptr)
447                 work_exists = 1;
448
449         return work_exists;
450 }
451
452 /* tg3_restart_ints
453  *  similar to tg3_enable_ints, but it accurately determines whether there
454  *  is new work pending and can return without flushing the PIO write
455  *  which reenables interrupts 
456  */
457 static void tg3_restart_ints(struct tg3 *tp)
458 {
459         tw32(TG3PCI_MISC_HOST_CTRL,
460                 (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
461         tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
462                      tp->last_tag << 24);
463         mmiowb();
464
465         /* When doing tagged status, this work check is unnecessary.
466          * The last_tag we write above tells the chip which piece of
467          * work we've completed.
468          */
469         if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
470             tg3_has_work(tp))
471                 tw32(HOSTCC_MODE, tp->coalesce_mode |
472                      (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
473 }
474
475 static inline void tg3_netif_stop(struct tg3 *tp)
476 {
477         netif_poll_disable(tp->dev);
478         netif_tx_disable(tp->dev);
479 }
480
481 static inline void tg3_netif_start(struct tg3 *tp)
482 {
483         netif_wake_queue(tp->dev);
484         /* NOTE: unconditional netif_wake_queue is only appropriate
485          * so long as all callers are assured to have free tx slots
486          * (such as after tg3_init_hw)
487          */
488         netif_poll_enable(tp->dev);
489         tg3_cond_int(tp);
490 }
491
492 static void tg3_switch_clocks(struct tg3 *tp)
493 {
494         u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
495         u32 orig_clock_ctrl;
496
497         orig_clock_ctrl = clock_ctrl;
498         clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
499                        CLOCK_CTRL_CLKRUN_OENABLE |
500                        0x1f);
501         tp->pci_clock_ctrl = clock_ctrl;
502
503         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
504                 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
505                         tw32_f(TG3PCI_CLOCK_CTRL,
506                                clock_ctrl | CLOCK_CTRL_625_CORE);
507                         udelay(40);
508                 }
509         } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
510                 tw32_f(TG3PCI_CLOCK_CTRL,
511                      clock_ctrl |
512                      (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK));
513                 udelay(40);
514                 tw32_f(TG3PCI_CLOCK_CTRL,
515                      clock_ctrl | (CLOCK_CTRL_ALTCLK));
516                 udelay(40);
517         }
518         tw32_f(TG3PCI_CLOCK_CTRL, clock_ctrl);
519         udelay(40);
520 }
521
522 #define PHY_BUSY_LOOPS  5000
523
524 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
525 {
526         u32 frame_val;
527         unsigned int loops;
528         int ret;
529
530         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
531                 tw32_f(MAC_MI_MODE,
532                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
533                 udelay(80);
534         }
535
536         *val = 0x0;
537
538         frame_val  = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
539                       MI_COM_PHY_ADDR_MASK);
540         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
541                       MI_COM_REG_ADDR_MASK);
542         frame_val |= (MI_COM_CMD_READ | MI_COM_START);
543         
544         tw32_f(MAC_MI_COM, frame_val);
545
546         loops = PHY_BUSY_LOOPS;
547         while (loops != 0) {
548                 udelay(10);
549                 frame_val = tr32(MAC_MI_COM);
550
551                 if ((frame_val & MI_COM_BUSY) == 0) {
552                         udelay(5);
553                         frame_val = tr32(MAC_MI_COM);
554                         break;
555                 }
556                 loops -= 1;
557         }
558
559         ret = -EBUSY;
560         if (loops != 0) {
561                 *val = frame_val & MI_COM_DATA_MASK;
562                 ret = 0;
563         }
564
565         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
566                 tw32_f(MAC_MI_MODE, tp->mi_mode);
567                 udelay(80);
568         }
569
570         return ret;
571 }
572
573 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
574 {
575         u32 frame_val;
576         unsigned int loops;
577         int ret;
578
579         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
580                 tw32_f(MAC_MI_MODE,
581                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
582                 udelay(80);
583         }
584
585         frame_val  = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
586                       MI_COM_PHY_ADDR_MASK);
587         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
588                       MI_COM_REG_ADDR_MASK);
589         frame_val |= (val & MI_COM_DATA_MASK);
590         frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
591         
592         tw32_f(MAC_MI_COM, frame_val);
593
594         loops = PHY_BUSY_LOOPS;
595         while (loops != 0) {
596                 udelay(10);
597                 frame_val = tr32(MAC_MI_COM);
598                 if ((frame_val & MI_COM_BUSY) == 0) {
599                         udelay(5);
600                         frame_val = tr32(MAC_MI_COM);
601                         break;
602                 }
603                 loops -= 1;
604         }
605
606         ret = -EBUSY;
607         if (loops != 0)
608                 ret = 0;
609
610         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
611                 tw32_f(MAC_MI_MODE, tp->mi_mode);
612                 udelay(80);
613         }
614
615         return ret;
616 }
617
618 static void tg3_phy_set_wirespeed(struct tg3 *tp)
619 {
620         u32 val;
621
622         if (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED)
623                 return;
624
625         if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x7007) &&
626             !tg3_readphy(tp, MII_TG3_AUX_CTRL, &val))
627                 tg3_writephy(tp, MII_TG3_AUX_CTRL,
628                              (val | (1 << 15) | (1 << 4)));
629 }
630
631 static int tg3_bmcr_reset(struct tg3 *tp)
632 {
633         u32 phy_control;
634         int limit, err;
635
636         /* OK, reset it, and poll the BMCR_RESET bit until it
637          * clears or we time out.
638          */
639         phy_control = BMCR_RESET;
640         err = tg3_writephy(tp, MII_BMCR, phy_control);
641         if (err != 0)
642                 return -EBUSY;
643
644         limit = 5000;
645         while (limit--) {
646                 err = tg3_readphy(tp, MII_BMCR, &phy_control);
647                 if (err != 0)
648                         return -EBUSY;
649
650                 if ((phy_control & BMCR_RESET) == 0) {
651                         udelay(40);
652                         break;
653                 }
654                 udelay(10);
655         }
656         if (limit <= 0)
657                 return -EBUSY;
658
659         return 0;
660 }
661
662 static int tg3_wait_macro_done(struct tg3 *tp)
663 {
664         int limit = 100;
665
666         while (limit--) {
667                 u32 tmp32;
668
669                 if (!tg3_readphy(tp, 0x16, &tmp32)) {
670                         if ((tmp32 & 0x1000) == 0)
671                                 break;
672                 }
673         }
674         if (limit <= 0)
675                 return -EBUSY;
676
677         return 0;
678 }
679
680 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
681 {
682         static const u32 test_pat[4][6] = {
683         { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
684         { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
685         { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
686         { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
687         };
688         int chan;
689
690         for (chan = 0; chan < 4; chan++) {
691                 int i;
692
693                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
694                              (chan * 0x2000) | 0x0200);
695                 tg3_writephy(tp, 0x16, 0x0002);
696
697                 for (i = 0; i < 6; i++)
698                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
699                                      test_pat[chan][i]);
700
701                 tg3_writephy(tp, 0x16, 0x0202);
702                 if (tg3_wait_macro_done(tp)) {
703                         *resetp = 1;
704                         return -EBUSY;
705                 }
706
707                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
708                              (chan * 0x2000) | 0x0200);
709                 tg3_writephy(tp, 0x16, 0x0082);
710                 if (tg3_wait_macro_done(tp)) {
711                         *resetp = 1;
712                         return -EBUSY;
713                 }
714
715                 tg3_writephy(tp, 0x16, 0x0802);
716                 if (tg3_wait_macro_done(tp)) {
717                         *resetp = 1;
718                         return -EBUSY;
719                 }
720
721                 for (i = 0; i < 6; i += 2) {
722                         u32 low, high;
723
724                         if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
725                             tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
726                             tg3_wait_macro_done(tp)) {
727                                 *resetp = 1;
728                                 return -EBUSY;
729                         }
730                         low &= 0x7fff;
731                         high &= 0x000f;
732                         if (low != test_pat[chan][i] ||
733                             high != test_pat[chan][i+1]) {
734                                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
735                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
736                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
737
738                                 return -EBUSY;
739                         }
740                 }
741         }
742
743         return 0;
744 }
745
746 static int tg3_phy_reset_chanpat(struct tg3 *tp)
747 {
748         int chan;
749
750         for (chan = 0; chan < 4; chan++) {
751                 int i;
752
753                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
754                              (chan * 0x2000) | 0x0200);
755                 tg3_writephy(tp, 0x16, 0x0002);
756                 for (i = 0; i < 6; i++)
757                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
758                 tg3_writephy(tp, 0x16, 0x0202);
759                 if (tg3_wait_macro_done(tp))
760                         return -EBUSY;
761         }
762
763         return 0;
764 }
765
766 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
767 {
768         u32 reg32, phy9_orig;
769         int retries, do_phy_reset, err;
770
771         retries = 10;
772         do_phy_reset = 1;
773         do {
774                 if (do_phy_reset) {
775                         err = tg3_bmcr_reset(tp);
776                         if (err)
777                                 return err;
778                         do_phy_reset = 0;
779                 }
780
781                 /* Disable transmitter and interrupt.  */
782                 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
783                         continue;
784
785                 reg32 |= 0x3000;
786                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
787
788                 /* Set full-duplex, 1000 mbps.  */
789                 tg3_writephy(tp, MII_BMCR,
790                              BMCR_FULLDPLX | TG3_BMCR_SPEED1000);
791
792                 /* Set to master mode.  */
793                 if (tg3_readphy(tp, MII_TG3_CTRL, &phy9_orig))
794                         continue;
795
796                 tg3_writephy(tp, MII_TG3_CTRL,
797                              (MII_TG3_CTRL_AS_MASTER |
798                               MII_TG3_CTRL_ENABLE_AS_MASTER));
799
800                 /* Enable SM_DSP_CLOCK and 6dB.  */
801                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
802
803                 /* Block the PHY control access.  */
804                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
805                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0800);
806
807                 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
808                 if (!err)
809                         break;
810         } while (--retries);
811
812         err = tg3_phy_reset_chanpat(tp);
813         if (err)
814                 return err;
815
816         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
817         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0000);
818
819         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
820         tg3_writephy(tp, 0x16, 0x0000);
821
822         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
823             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
824                 /* Set Extended packet length bit for jumbo frames */
825                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4400);
826         }
827         else {
828                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
829         }
830
831         tg3_writephy(tp, MII_TG3_CTRL, phy9_orig);
832
833         if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32)) {
834                 reg32 &= ~0x3000;
835                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
836         } else if (!err)
837                 err = -EBUSY;
838
839         return err;
840 }
841
842 /* This will reset the tigon3 PHY if there is no valid
843  * link unless the FORCE argument is non-zero.
844  */
845 static int tg3_phy_reset(struct tg3 *tp)
846 {
847         u32 phy_status;
848         int err;
849
850         err  = tg3_readphy(tp, MII_BMSR, &phy_status);
851         err |= tg3_readphy(tp, MII_BMSR, &phy_status);
852         if (err != 0)
853                 return -EBUSY;
854
855         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
856             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
857             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
858                 err = tg3_phy_reset_5703_4_5(tp);
859                 if (err)
860                         return err;
861                 goto out;
862         }
863
864         err = tg3_bmcr_reset(tp);
865         if (err)
866                 return err;
867
868 out:
869         if (tp->tg3_flags2 & TG3_FLG2_PHY_ADC_BUG) {
870                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
871                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
872                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x2aaa);
873                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
874                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0323);
875                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
876         }
877         if (tp->tg3_flags2 & TG3_FLG2_PHY_5704_A0_BUG) {
878                 tg3_writephy(tp, 0x1c, 0x8d68);
879                 tg3_writephy(tp, 0x1c, 0x8d68);
880         }
881         if (tp->tg3_flags2 & TG3_FLG2_PHY_BER_BUG) {
882                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
883                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
884                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x310b);
885                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
886                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x9506);
887                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x401f);
888                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x14e2);
889                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
890         }
891         /* Set Extended packet length bit (bit 14) on all chips that */
892         /* support jumbo frames */
893         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
894                 /* Cannot do read-modify-write on 5401 */
895                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
896         } else if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
897                 u32 phy_reg;
898
899                 /* Set bit 14 with read-modify-write to preserve other bits */
900                 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0007) &&
901                     !tg3_readphy(tp, MII_TG3_AUX_CTRL, &phy_reg))
902                         tg3_writephy(tp, MII_TG3_AUX_CTRL, phy_reg | 0x4000);
903         }
904
905         /* Set phy register 0x10 bit 0 to high fifo elasticity to support
906          * jumbo frames transmission.
907          */
908         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
909                 u32 phy_reg;
910
911                 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &phy_reg))
912                     tg3_writephy(tp, MII_TG3_EXT_CTRL,
913                                  phy_reg | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
914         }
915
916         tg3_phy_set_wirespeed(tp);
917         return 0;
918 }
919
920 static void tg3_frob_aux_power(struct tg3 *tp)
921 {
922         struct tg3 *tp_peer = tp;
923
924         if ((tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) != 0)
925                 return;
926
927         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
928                 tp_peer = pci_get_drvdata(tp->pdev_peer);
929                 if (!tp_peer)
930                         BUG();
931         }
932
933
934         if ((tp->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
935             (tp_peer->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0) {
936                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
937                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
938                         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
939                              (GRC_LCLCTRL_GPIO_OE0 |
940                               GRC_LCLCTRL_GPIO_OE1 |
941                               GRC_LCLCTRL_GPIO_OE2 |
942                               GRC_LCLCTRL_GPIO_OUTPUT0 |
943                               GRC_LCLCTRL_GPIO_OUTPUT1));
944                         udelay(100);
945                 } else {
946                         u32 no_gpio2;
947                         u32 grc_local_ctrl;
948
949                         if (tp_peer != tp &&
950                             (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
951                                 return;
952
953                         /* On 5753 and variants, GPIO2 cannot be used. */
954                         no_gpio2 = tp->nic_sram_data_cfg &
955                                     NIC_SRAM_DATA_CFG_NO_GPIO2;
956
957                         grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
958                                          GRC_LCLCTRL_GPIO_OE1 |
959                                          GRC_LCLCTRL_GPIO_OE2 |
960                                          GRC_LCLCTRL_GPIO_OUTPUT1 |
961                                          GRC_LCLCTRL_GPIO_OUTPUT2;
962                         if (no_gpio2) {
963                                 grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
964                                                     GRC_LCLCTRL_GPIO_OUTPUT2);
965                         }
966                         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
967                                                 grc_local_ctrl);
968                         udelay(100);
969
970                         grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
971
972                         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
973                                                 grc_local_ctrl);
974                         udelay(100);
975
976                         if (!no_gpio2) {
977                                 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
978                                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
979                                        grc_local_ctrl);
980                                 udelay(100);
981                         }
982                 }
983         } else {
984                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
985                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
986                         if (tp_peer != tp &&
987                             (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
988                                 return;
989
990                         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
991                              (GRC_LCLCTRL_GPIO_OE1 |
992                               GRC_LCLCTRL_GPIO_OUTPUT1));
993                         udelay(100);
994
995                         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
996                              (GRC_LCLCTRL_GPIO_OE1));
997                         udelay(100);
998
999                         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1000                              (GRC_LCLCTRL_GPIO_OE1 |
1001                               GRC_LCLCTRL_GPIO_OUTPUT1));
1002                         udelay(100);
1003                 }
1004         }
1005 }
1006
1007 static int tg3_setup_phy(struct tg3 *, int);
1008
1009 #define RESET_KIND_SHUTDOWN     0
1010 #define RESET_KIND_INIT         1
1011 #define RESET_KIND_SUSPEND      2
1012
1013 static void tg3_write_sig_post_reset(struct tg3 *, int);
1014 static int tg3_halt_cpu(struct tg3 *, u32);
1015
1016 static int tg3_set_power_state(struct tg3 *tp, int state)
1017 {
1018         u32 misc_host_ctrl;
1019         u16 power_control, power_caps;
1020         int pm = tp->pm_cap;
1021
1022         /* Make sure register accesses (indirect or otherwise)
1023          * will function correctly.
1024          */
1025         pci_write_config_dword(tp->pdev,
1026                                TG3PCI_MISC_HOST_CTRL,
1027                                tp->misc_host_ctrl);
1028
1029         pci_read_config_word(tp->pdev,
1030                              pm + PCI_PM_CTRL,
1031                              &power_control);
1032         power_control |= PCI_PM_CTRL_PME_STATUS;
1033         power_control &= ~(PCI_PM_CTRL_STATE_MASK);
1034         switch (state) {
1035         case 0:
1036                 power_control |= 0;
1037                 pci_write_config_word(tp->pdev,
1038                                       pm + PCI_PM_CTRL,
1039                                       power_control);
1040                 udelay(100);    /* Delay after power state change */
1041
1042                 /* Switch out of Vaux if it is not a LOM */
1043                 if (!(tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT)) {
1044                         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
1045                         udelay(100);
1046                 }
1047
1048                 return 0;
1049
1050         case 1:
1051                 power_control |= 1;
1052                 break;
1053
1054         case 2:
1055                 power_control |= 2;
1056                 break;
1057
1058         case 3:
1059                 power_control |= 3;
1060                 break;
1061
1062         default:
1063                 printk(KERN_WARNING PFX "%s: Invalid power state (%d) "
1064                        "requested.\n",
1065                        tp->dev->name, state);
1066                 return -EINVAL;
1067         };
1068
1069         power_control |= PCI_PM_CTRL_PME_ENABLE;
1070
1071         misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
1072         tw32(TG3PCI_MISC_HOST_CTRL,
1073              misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
1074
1075         if (tp->link_config.phy_is_low_power == 0) {
1076                 tp->link_config.phy_is_low_power = 1;
1077                 tp->link_config.orig_speed = tp->link_config.speed;
1078                 tp->link_config.orig_duplex = tp->link_config.duplex;
1079                 tp->link_config.orig_autoneg = tp->link_config.autoneg;
1080         }
1081
1082         if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
1083                 tp->link_config.speed = SPEED_10;
1084                 tp->link_config.duplex = DUPLEX_HALF;
1085                 tp->link_config.autoneg = AUTONEG_ENABLE;
1086                 tg3_setup_phy(tp, 0);
1087         }
1088
1089         pci_read_config_word(tp->pdev, pm + PCI_PM_PMC, &power_caps);
1090
1091         if (tp->tg3_flags & TG3_FLAG_WOL_ENABLE) {
1092                 u32 mac_mode;
1093
1094                 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
1095                         tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x5a);
1096                         udelay(40);
1097
1098                         mac_mode = MAC_MODE_PORT_MODE_MII;
1099
1100                         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 ||
1101                             !(tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB))
1102                                 mac_mode |= MAC_MODE_LINK_POLARITY;
1103                 } else {
1104                         mac_mode = MAC_MODE_PORT_MODE_TBI;
1105                 }
1106
1107                 if (!(tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
1108                         tw32(MAC_LED_CTRL, tp->led_ctrl);
1109
1110                 if (((power_caps & PCI_PM_CAP_PME_D3cold) &&
1111                      (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)))
1112                         mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
1113
1114                 tw32_f(MAC_MODE, mac_mode);
1115                 udelay(100);
1116
1117                 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
1118                 udelay(10);
1119         }
1120
1121         if (!(tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB) &&
1122             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1123              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
1124                 u32 base_val;
1125
1126                 base_val = tp->pci_clock_ctrl;
1127                 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
1128                              CLOCK_CTRL_TXCLK_DISABLE);
1129
1130                 tw32_f(TG3PCI_CLOCK_CTRL, base_val |
1131                      CLOCK_CTRL_ALTCLK |
1132                      CLOCK_CTRL_PWRDOWN_PLL133);
1133                 udelay(40);
1134         } else if (!((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
1135                      (tp->tg3_flags & TG3_FLAG_ENABLE_ASF))) {
1136                 u32 newbits1, newbits2;
1137
1138                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1139                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1140                         newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
1141                                     CLOCK_CTRL_TXCLK_DISABLE |
1142                                     CLOCK_CTRL_ALTCLK);
1143                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
1144                 } else if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
1145                         newbits1 = CLOCK_CTRL_625_CORE;
1146                         newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
1147                 } else {
1148                         newbits1 = CLOCK_CTRL_ALTCLK;
1149                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
1150                 }
1151
1152                 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1);
1153                 udelay(40);
1154
1155                 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2);
1156                 udelay(40);
1157
1158                 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
1159                         u32 newbits3;
1160
1161                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1162                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1163                                 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
1164                                             CLOCK_CTRL_TXCLK_DISABLE |
1165                                             CLOCK_CTRL_44MHZ_CORE);
1166                         } else {
1167                                 newbits3 = CLOCK_CTRL_44MHZ_CORE;
1168                         }
1169
1170                         tw32_f(TG3PCI_CLOCK_CTRL,
1171                                          tp->pci_clock_ctrl | newbits3);
1172                         udelay(40);
1173                 }
1174         }
1175
1176         tg3_frob_aux_power(tp);
1177
1178         /* Workaround for unstable PLL clock */
1179         if ((GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX) ||
1180             (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX)) {
1181                 u32 val = tr32(0x7d00);
1182
1183                 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
1184                 tw32(0x7d00, val);
1185                 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
1186                         tg3_halt_cpu(tp, RX_CPU_BASE);
1187         }
1188
1189         /* Finally, set the new power state. */
1190         pci_write_config_word(tp->pdev, pm + PCI_PM_CTRL, power_control);
1191         udelay(100);    /* Delay after power state change */
1192
1193         tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
1194
1195         return 0;
1196 }
1197
1198 static void tg3_link_report(struct tg3 *tp)
1199 {
1200         if (!netif_carrier_ok(tp->dev)) {
1201                 printk(KERN_INFO PFX "%s: Link is down.\n", tp->dev->name);
1202         } else {
1203                 printk(KERN_INFO PFX "%s: Link is up at %d Mbps, %s duplex.\n",
1204                        tp->dev->name,
1205                        (tp->link_config.active_speed == SPEED_1000 ?
1206                         1000 :
1207                         (tp->link_config.active_speed == SPEED_100 ?
1208                          100 : 10)),
1209                        (tp->link_config.active_duplex == DUPLEX_FULL ?
1210                         "full" : "half"));
1211
1212                 printk(KERN_INFO PFX "%s: Flow control is %s for TX and "
1213                        "%s for RX.\n",
1214                        tp->dev->name,
1215                        (tp->tg3_flags & TG3_FLAG_TX_PAUSE) ? "on" : "off",
1216                        (tp->tg3_flags & TG3_FLAG_RX_PAUSE) ? "on" : "off");
1217         }
1218 }
1219
1220 static void tg3_setup_flow_control(struct tg3 *tp, u32 local_adv, u32 remote_adv)
1221 {
1222         u32 new_tg3_flags = 0;
1223         u32 old_rx_mode = tp->rx_mode;
1224         u32 old_tx_mode = tp->tx_mode;
1225
1226         if (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG) {
1227                 if (local_adv & ADVERTISE_PAUSE_CAP) {
1228                         if (local_adv & ADVERTISE_PAUSE_ASYM) {
1229                                 if (remote_adv & LPA_PAUSE_CAP)
1230                                         new_tg3_flags |=
1231                                                 (TG3_FLAG_RX_PAUSE |
1232                                                 TG3_FLAG_TX_PAUSE);
1233                                 else if (remote_adv & LPA_PAUSE_ASYM)
1234                                         new_tg3_flags |=
1235                                                 (TG3_FLAG_RX_PAUSE);
1236                         } else {
1237                                 if (remote_adv & LPA_PAUSE_CAP)
1238                                         new_tg3_flags |=
1239                                                 (TG3_FLAG_RX_PAUSE |
1240                                                 TG3_FLAG_TX_PAUSE);
1241                         }
1242                 } else if (local_adv & ADVERTISE_PAUSE_ASYM) {
1243                         if ((remote_adv & LPA_PAUSE_CAP) &&
1244                         (remote_adv & LPA_PAUSE_ASYM))
1245                                 new_tg3_flags |= TG3_FLAG_TX_PAUSE;
1246                 }
1247
1248                 tp->tg3_flags &= ~(TG3_FLAG_RX_PAUSE | TG3_FLAG_TX_PAUSE);
1249                 tp->tg3_flags |= new_tg3_flags;
1250         } else {
1251                 new_tg3_flags = tp->tg3_flags;
1252         }
1253
1254         if (new_tg3_flags & TG3_FLAG_RX_PAUSE)
1255                 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1256         else
1257                 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1258
1259         if (old_rx_mode != tp->rx_mode) {
1260                 tw32_f(MAC_RX_MODE, tp->rx_mode);
1261         }
1262         
1263         if (new_tg3_flags & TG3_FLAG_TX_PAUSE)
1264                 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1265         else
1266                 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1267
1268         if (old_tx_mode != tp->tx_mode) {
1269                 tw32_f(MAC_TX_MODE, tp->tx_mode);
1270         }
1271 }
1272
1273 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
1274 {
1275         switch (val & MII_TG3_AUX_STAT_SPDMASK) {
1276         case MII_TG3_AUX_STAT_10HALF:
1277                 *speed = SPEED_10;
1278                 *duplex = DUPLEX_HALF;
1279                 break;
1280
1281         case MII_TG3_AUX_STAT_10FULL:
1282                 *speed = SPEED_10;
1283                 *duplex = DUPLEX_FULL;
1284                 break;
1285
1286         case MII_TG3_AUX_STAT_100HALF:
1287                 *speed = SPEED_100;
1288                 *duplex = DUPLEX_HALF;
1289                 break;
1290
1291         case MII_TG3_AUX_STAT_100FULL:
1292                 *speed = SPEED_100;
1293                 *duplex = DUPLEX_FULL;
1294                 break;
1295
1296         case MII_TG3_AUX_STAT_1000HALF:
1297                 *speed = SPEED_1000;
1298                 *duplex = DUPLEX_HALF;
1299                 break;
1300
1301         case MII_TG3_AUX_STAT_1000FULL:
1302                 *speed = SPEED_1000;
1303                 *duplex = DUPLEX_FULL;
1304                 break;
1305
1306         default:
1307                 *speed = SPEED_INVALID;
1308                 *duplex = DUPLEX_INVALID;
1309                 break;
1310         };
1311 }
1312
1313 static void tg3_phy_copper_begin(struct tg3 *tp)
1314 {
1315         u32 new_adv;
1316         int i;
1317
1318         if (tp->link_config.phy_is_low_power) {
1319                 /* Entering low power mode.  Disable gigabit and
1320                  * 100baseT advertisements.
1321                  */
1322                 tg3_writephy(tp, MII_TG3_CTRL, 0);
1323
1324                 new_adv = (ADVERTISE_10HALF | ADVERTISE_10FULL |
1325                            ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
1326                 if (tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB)
1327                         new_adv |= (ADVERTISE_100HALF | ADVERTISE_100FULL);
1328
1329                 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1330         } else if (tp->link_config.speed == SPEED_INVALID) {
1331                 tp->link_config.advertising =
1332                         (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
1333                          ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
1334                          ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full |
1335                          ADVERTISED_Autoneg | ADVERTISED_MII);
1336
1337                 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
1338                         tp->link_config.advertising &=
1339                                 ~(ADVERTISED_1000baseT_Half |
1340                                   ADVERTISED_1000baseT_Full);
1341
1342                 new_adv = (ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
1343                 if (tp->link_config.advertising & ADVERTISED_10baseT_Half)
1344                         new_adv |= ADVERTISE_10HALF;
1345                 if (tp->link_config.advertising & ADVERTISED_10baseT_Full)
1346                         new_adv |= ADVERTISE_10FULL;
1347                 if (tp->link_config.advertising & ADVERTISED_100baseT_Half)
1348                         new_adv |= ADVERTISE_100HALF;
1349                 if (tp->link_config.advertising & ADVERTISED_100baseT_Full)
1350                         new_adv |= ADVERTISE_100FULL;
1351                 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1352
1353                 if (tp->link_config.advertising &
1354                     (ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full)) {
1355                         new_adv = 0;
1356                         if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
1357                                 new_adv |= MII_TG3_CTRL_ADV_1000_HALF;
1358                         if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
1359                                 new_adv |= MII_TG3_CTRL_ADV_1000_FULL;
1360                         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY) &&
1361                             (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1362                              tp->pci_chip_rev_id == CHIPREV_ID_5701_B0))
1363                                 new_adv |= (MII_TG3_CTRL_AS_MASTER |
1364                                             MII_TG3_CTRL_ENABLE_AS_MASTER);
1365                         tg3_writephy(tp, MII_TG3_CTRL, new_adv);
1366                 } else {
1367                         tg3_writephy(tp, MII_TG3_CTRL, 0);
1368                 }
1369         } else {
1370                 /* Asking for a specific link mode. */
1371                 if (tp->link_config.speed == SPEED_1000) {
1372                         new_adv = ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP;
1373                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
1374
1375                         if (tp->link_config.duplex == DUPLEX_FULL)
1376                                 new_adv = MII_TG3_CTRL_ADV_1000_FULL;
1377                         else
1378                                 new_adv = MII_TG3_CTRL_ADV_1000_HALF;
1379                         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1380                             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
1381                                 new_adv |= (MII_TG3_CTRL_AS_MASTER |
1382                                             MII_TG3_CTRL_ENABLE_AS_MASTER);
1383                         tg3_writephy(tp, MII_TG3_CTRL, new_adv);
1384                 } else {
1385                         tg3_writephy(tp, MII_TG3_CTRL, 0);
1386
1387                         new_adv = ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP;
1388                         if (tp->link_config.speed == SPEED_100) {
1389                                 if (tp->link_config.duplex == DUPLEX_FULL)
1390                                         new_adv |= ADVERTISE_100FULL;
1391                                 else
1392                                         new_adv |= ADVERTISE_100HALF;
1393                         } else {
1394                                 if (tp->link_config.duplex == DUPLEX_FULL)
1395                                         new_adv |= ADVERTISE_10FULL;
1396                                 else
1397                                         new_adv |= ADVERTISE_10HALF;
1398                         }
1399                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
1400                 }
1401         }
1402
1403         if (tp->link_config.autoneg == AUTONEG_DISABLE &&
1404             tp->link_config.speed != SPEED_INVALID) {
1405                 u32 bmcr, orig_bmcr;
1406
1407                 tp->link_config.active_speed = tp->link_config.speed;
1408                 tp->link_config.active_duplex = tp->link_config.duplex;
1409
1410                 bmcr = 0;
1411                 switch (tp->link_config.speed) {
1412                 default:
1413                 case SPEED_10:
1414                         break;
1415
1416                 case SPEED_100:
1417                         bmcr |= BMCR_SPEED100;
1418                         break;
1419
1420                 case SPEED_1000:
1421                         bmcr |= TG3_BMCR_SPEED1000;
1422                         break;
1423                 };
1424
1425                 if (tp->link_config.duplex == DUPLEX_FULL)
1426                         bmcr |= BMCR_FULLDPLX;
1427
1428                 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
1429                     (bmcr != orig_bmcr)) {
1430                         tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
1431                         for (i = 0; i < 1500; i++) {
1432                                 u32 tmp;
1433
1434                                 udelay(10);
1435                                 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
1436                                     tg3_readphy(tp, MII_BMSR, &tmp))
1437                                         continue;
1438                                 if (!(tmp & BMSR_LSTATUS)) {
1439                                         udelay(40);
1440                                         break;
1441                                 }
1442                         }
1443                         tg3_writephy(tp, MII_BMCR, bmcr);
1444                         udelay(40);
1445                 }
1446         } else {
1447                 tg3_writephy(tp, MII_BMCR,
1448                              BMCR_ANENABLE | BMCR_ANRESTART);
1449         }
1450 }
1451
1452 static int tg3_init_5401phy_dsp(struct tg3 *tp)
1453 {
1454         int err;
1455
1456         /* Turn off tap power management. */
1457         /* Set Extended packet length bit */
1458         err  = tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
1459
1460         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0012);
1461         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1804);
1462
1463         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0013);
1464         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1204);
1465
1466         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
1467         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0132);
1468
1469         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
1470         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0232);
1471
1472         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1473         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0a20);
1474
1475         udelay(40);
1476
1477         return err;
1478 }
1479
1480 static int tg3_copper_is_advertising_all(struct tg3 *tp)
1481 {
1482         u32 adv_reg, all_mask;
1483
1484         if (tg3_readphy(tp, MII_ADVERTISE, &adv_reg))
1485                 return 0;
1486
1487         all_mask = (ADVERTISE_10HALF | ADVERTISE_10FULL |
1488                     ADVERTISE_100HALF | ADVERTISE_100FULL);
1489         if ((adv_reg & all_mask) != all_mask)
1490                 return 0;
1491         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
1492                 u32 tg3_ctrl;
1493
1494                 if (tg3_readphy(tp, MII_TG3_CTRL, &tg3_ctrl))
1495                         return 0;
1496
1497                 all_mask = (MII_TG3_CTRL_ADV_1000_HALF |
1498                             MII_TG3_CTRL_ADV_1000_FULL);
1499                 if ((tg3_ctrl & all_mask) != all_mask)
1500                         return 0;
1501         }
1502         return 1;
1503 }
1504
1505 static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
1506 {
1507         int current_link_up;
1508         u32 bmsr, dummy;
1509         u16 current_speed;
1510         u8 current_duplex;
1511         int i, err;
1512
1513         tw32(MAC_EVENT, 0);
1514
1515         tw32_f(MAC_STATUS,
1516              (MAC_STATUS_SYNC_CHANGED |
1517               MAC_STATUS_CFG_CHANGED |
1518               MAC_STATUS_MI_COMPLETION |
1519               MAC_STATUS_LNKSTATE_CHANGED));
1520         udelay(40);
1521
1522         tp->mi_mode = MAC_MI_MODE_BASE;
1523         tw32_f(MAC_MI_MODE, tp->mi_mode);
1524         udelay(80);
1525
1526         tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x02);
1527
1528         /* Some third-party PHYs need to be reset on link going
1529          * down.
1530          */
1531         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
1532              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
1533              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
1534             netif_carrier_ok(tp->dev)) {
1535                 tg3_readphy(tp, MII_BMSR, &bmsr);
1536                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
1537                     !(bmsr & BMSR_LSTATUS))
1538                         force_reset = 1;
1539         }
1540         if (force_reset)
1541                 tg3_phy_reset(tp);
1542
1543         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
1544                 tg3_readphy(tp, MII_BMSR, &bmsr);
1545                 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
1546                     !(tp->tg3_flags & TG3_FLAG_INIT_COMPLETE))
1547                         bmsr = 0;
1548
1549                 if (!(bmsr & BMSR_LSTATUS)) {
1550                         err = tg3_init_5401phy_dsp(tp);
1551                         if (err)
1552                                 return err;
1553
1554                         tg3_readphy(tp, MII_BMSR, &bmsr);
1555                         for (i = 0; i < 1000; i++) {
1556                                 udelay(10);
1557                                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
1558                                     (bmsr & BMSR_LSTATUS)) {
1559                                         udelay(40);
1560                                         break;
1561                                 }
1562                         }
1563
1564                         if ((tp->phy_id & PHY_ID_REV_MASK) == PHY_REV_BCM5401_B0 &&
1565                             !(bmsr & BMSR_LSTATUS) &&
1566                             tp->link_config.active_speed == SPEED_1000) {
1567                                 err = tg3_phy_reset(tp);
1568                                 if (!err)
1569                                         err = tg3_init_5401phy_dsp(tp);
1570                                 if (err)
1571                                         return err;
1572                         }
1573                 }
1574         } else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1575                    tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
1576                 /* 5701 {A0,B0} CRC bug workaround */
1577                 tg3_writephy(tp, 0x15, 0x0a75);
1578                 tg3_writephy(tp, 0x1c, 0x8c68);
1579                 tg3_writephy(tp, 0x1c, 0x8d68);
1580                 tg3_writephy(tp, 0x1c, 0x8c68);
1581         }
1582
1583         /* Clear pending interrupts... */
1584         tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
1585         tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
1586
1587         if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT)
1588                 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
1589         else
1590                 tg3_writephy(tp, MII_TG3_IMASK, ~0);
1591
1592         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1593             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1594                 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
1595                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
1596                                      MII_TG3_EXT_CTRL_LNK3_LED_MODE);
1597                 else
1598                         tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
1599         }
1600
1601         current_link_up = 0;
1602         current_speed = SPEED_INVALID;
1603         current_duplex = DUPLEX_INVALID;
1604
1605         if (tp->tg3_flags2 & TG3_FLG2_CAPACITIVE_COUPLING) {
1606                 u32 val;
1607
1608                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4007);
1609                 tg3_readphy(tp, MII_TG3_AUX_CTRL, &val);
1610                 if (!(val & (1 << 10))) {
1611                         val |= (1 << 10);
1612                         tg3_writephy(tp, MII_TG3_AUX_CTRL, val);
1613                         goto relink;
1614                 }
1615         }
1616
1617         bmsr = 0;
1618         for (i = 0; i < 100; i++) {
1619                 tg3_readphy(tp, MII_BMSR, &bmsr);
1620                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
1621                     (bmsr & BMSR_LSTATUS))
1622                         break;
1623                 udelay(40);
1624         }
1625
1626         if (bmsr & BMSR_LSTATUS) {
1627                 u32 aux_stat, bmcr;
1628
1629                 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
1630                 for (i = 0; i < 2000; i++) {
1631                         udelay(10);
1632                         if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
1633                             aux_stat)
1634                                 break;
1635                 }
1636
1637                 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
1638                                              &current_speed,
1639                                              &current_duplex);
1640
1641                 bmcr = 0;
1642                 for (i = 0; i < 200; i++) {
1643                         tg3_readphy(tp, MII_BMCR, &bmcr);
1644                         if (tg3_readphy(tp, MII_BMCR, &bmcr))
1645                                 continue;
1646                         if (bmcr && bmcr != 0x7fff)
1647                                 break;
1648                         udelay(10);
1649                 }
1650
1651                 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
1652                         if (bmcr & BMCR_ANENABLE) {
1653                                 current_link_up = 1;
1654
1655                                 /* Force autoneg restart if we are exiting
1656                                  * low power mode.
1657                                  */
1658                                 if (!tg3_copper_is_advertising_all(tp))
1659                                         current_link_up = 0;
1660                         } else {
1661                                 current_link_up = 0;
1662                         }
1663                 } else {
1664                         if (!(bmcr & BMCR_ANENABLE) &&
1665                             tp->link_config.speed == current_speed &&
1666                             tp->link_config.duplex == current_duplex) {
1667                                 current_link_up = 1;
1668                         } else {
1669                                 current_link_up = 0;
1670                         }
1671                 }
1672
1673                 tp->link_config.active_speed = current_speed;
1674                 tp->link_config.active_duplex = current_duplex;
1675         }
1676
1677         if (current_link_up == 1 &&
1678             (tp->link_config.active_duplex == DUPLEX_FULL) &&
1679             (tp->link_config.autoneg == AUTONEG_ENABLE)) {
1680                 u32 local_adv, remote_adv;
1681
1682                 if (tg3_readphy(tp, MII_ADVERTISE, &local_adv))
1683                         local_adv = 0;
1684                 local_adv &= (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
1685
1686                 if (tg3_readphy(tp, MII_LPA, &remote_adv))
1687                         remote_adv = 0;
1688
1689                 remote_adv &= (LPA_PAUSE_CAP | LPA_PAUSE_ASYM);
1690
1691                 /* If we are not advertising full pause capability,
1692                  * something is wrong.  Bring the link down and reconfigure.
1693                  */
1694                 if (local_adv != ADVERTISE_PAUSE_CAP) {
1695                         current_link_up = 0;
1696                 } else {
1697                         tg3_setup_flow_control(tp, local_adv, remote_adv);
1698                 }
1699         }
1700 relink:
1701         if (current_link_up == 0) {
1702                 u32 tmp;
1703
1704                 tg3_phy_copper_begin(tp);
1705
1706                 tg3_readphy(tp, MII_BMSR, &tmp);
1707                 if (!tg3_readphy(tp, MII_BMSR, &tmp) &&
1708                     (tmp & BMSR_LSTATUS))
1709                         current_link_up = 1;
1710         }
1711
1712         tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
1713         if (current_link_up == 1) {
1714                 if (tp->link_config.active_speed == SPEED_100 ||
1715                     tp->link_config.active_speed == SPEED_10)
1716                         tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
1717                 else
1718                         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
1719         } else
1720                 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
1721
1722         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
1723         if (tp->link_config.active_duplex == DUPLEX_HALF)
1724                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
1725
1726         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
1727         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
1728                 if ((tp->led_ctrl == LED_CTRL_MODE_PHY_2) ||
1729                     (current_link_up == 1 &&
1730                      tp->link_config.active_speed == SPEED_10))
1731                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
1732         } else {
1733                 if (current_link_up == 1)
1734                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
1735         }
1736
1737         /* ??? Without this setting Netgear GA302T PHY does not
1738          * ??? send/receive packets...
1739          */
1740         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411 &&
1741             tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) {
1742                 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
1743                 tw32_f(MAC_MI_MODE, tp->mi_mode);
1744                 udelay(80);
1745         }
1746
1747         tw32_f(MAC_MODE, tp->mac_mode);
1748         udelay(40);
1749
1750         if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
1751                 /* Polled via timer. */
1752                 tw32_f(MAC_EVENT, 0);
1753         } else {
1754                 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
1755         }
1756         udelay(40);
1757
1758         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 &&
1759             current_link_up == 1 &&
1760             tp->link_config.active_speed == SPEED_1000 &&
1761             ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ||
1762              (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED))) {
1763                 udelay(120);
1764                 tw32_f(MAC_STATUS,
1765                      (MAC_STATUS_SYNC_CHANGED |
1766                       MAC_STATUS_CFG_CHANGED));
1767                 udelay(40);
1768                 tg3_write_mem(tp,
1769                               NIC_SRAM_FIRMWARE_MBOX,
1770                               NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
1771         }
1772
1773         if (current_link_up != netif_carrier_ok(tp->dev)) {
1774                 if (current_link_up)
1775                         netif_carrier_on(tp->dev);
1776                 else
1777                         netif_carrier_off(tp->dev);
1778                 tg3_link_report(tp);
1779         }
1780
1781         return 0;
1782 }
1783
1784 struct tg3_fiber_aneginfo {
1785         int state;
1786 #define ANEG_STATE_UNKNOWN              0
1787 #define ANEG_STATE_AN_ENABLE            1
1788 #define ANEG_STATE_RESTART_INIT         2
1789 #define ANEG_STATE_RESTART              3
1790 #define ANEG_STATE_DISABLE_LINK_OK      4
1791 #define ANEG_STATE_ABILITY_DETECT_INIT  5
1792 #define ANEG_STATE_ABILITY_DETECT       6
1793 #define ANEG_STATE_ACK_DETECT_INIT      7
1794 #define ANEG_STATE_ACK_DETECT           8
1795 #define ANEG_STATE_COMPLETE_ACK_INIT    9
1796 #define ANEG_STATE_COMPLETE_ACK         10
1797 #define ANEG_STATE_IDLE_DETECT_INIT     11
1798 #define ANEG_STATE_IDLE_DETECT          12
1799 #define ANEG_STATE_LINK_OK              13
1800 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT  14
1801 #define ANEG_STATE_NEXT_PAGE_WAIT       15
1802
1803         u32 flags;
1804 #define MR_AN_ENABLE            0x00000001
1805 #define MR_RESTART_AN           0x00000002
1806 #define MR_AN_COMPLETE          0x00000004
1807 #define MR_PAGE_RX              0x00000008
1808 #define MR_NP_LOADED            0x00000010
1809 #define MR_TOGGLE_TX            0x00000020
1810 #define MR_LP_ADV_FULL_DUPLEX   0x00000040
1811 #define MR_LP_ADV_HALF_DUPLEX   0x00000080
1812 #define MR_LP_ADV_SYM_PAUSE     0x00000100
1813 #define MR_LP_ADV_ASYM_PAUSE    0x00000200
1814 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
1815 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
1816 #define MR_LP_ADV_NEXT_PAGE     0x00001000
1817 #define MR_TOGGLE_RX            0x00002000
1818 #define MR_NP_RX                0x00004000
1819
1820 #define MR_LINK_OK              0x80000000
1821
1822         unsigned long link_time, cur_time;
1823
1824         u32 ability_match_cfg;
1825         int ability_match_count;
1826
1827         char ability_match, idle_match, ack_match;
1828
1829         u32 txconfig, rxconfig;
1830 #define ANEG_CFG_NP             0x00000080
1831 #define ANEG_CFG_ACK            0x00000040
1832 #define ANEG_CFG_RF2            0x00000020
1833 #define ANEG_CFG_RF1            0x00000010
1834 #define ANEG_CFG_PS2            0x00000001
1835 #define ANEG_CFG_PS1            0x00008000
1836 #define ANEG_CFG_HD             0x00004000
1837 #define ANEG_CFG_FD             0x00002000
1838 #define ANEG_CFG_INVAL          0x00001f06
1839
1840 };
1841 #define ANEG_OK         0
1842 #define ANEG_DONE       1
1843 #define ANEG_TIMER_ENAB 2
1844 #define ANEG_FAILED     -1
1845
1846 #define ANEG_STATE_SETTLE_TIME  10000
1847
1848 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
1849                                    struct tg3_fiber_aneginfo *ap)
1850 {
1851         unsigned long delta;
1852         u32 rx_cfg_reg;
1853         int ret;
1854
1855         if (ap->state == ANEG_STATE_UNKNOWN) {
1856                 ap->rxconfig = 0;
1857                 ap->link_time = 0;
1858                 ap->cur_time = 0;
1859                 ap->ability_match_cfg = 0;
1860                 ap->ability_match_count = 0;
1861                 ap->ability_match = 0;
1862                 ap->idle_match = 0;
1863                 ap->ack_match = 0;
1864         }
1865         ap->cur_time++;
1866
1867         if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
1868                 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
1869
1870                 if (rx_cfg_reg != ap->ability_match_cfg) {
1871                         ap->ability_match_cfg = rx_cfg_reg;
1872                         ap->ability_match = 0;
1873                         ap->ability_match_count = 0;
1874                 } else {
1875                         if (++ap->ability_match_count > 1) {
1876                                 ap->ability_match = 1;
1877                                 ap->ability_match_cfg = rx_cfg_reg;
1878                         }
1879                 }
1880                 if (rx_cfg_reg & ANEG_CFG_ACK)
1881                         ap->ack_match = 1;
1882                 else
1883                         ap->ack_match = 0;
1884
1885                 ap->idle_match = 0;
1886         } else {
1887                 ap->idle_match = 1;
1888                 ap->ability_match_cfg = 0;
1889                 ap->ability_match_count = 0;
1890                 ap->ability_match = 0;
1891                 ap->ack_match = 0;
1892
1893                 rx_cfg_reg = 0;
1894         }
1895
1896         ap->rxconfig = rx_cfg_reg;
1897         ret = ANEG_OK;
1898
1899         switch(ap->state) {
1900         case ANEG_STATE_UNKNOWN:
1901                 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
1902                         ap->state = ANEG_STATE_AN_ENABLE;
1903
1904                 /* fallthru */
1905         case ANEG_STATE_AN_ENABLE:
1906                 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
1907                 if (ap->flags & MR_AN_ENABLE) {
1908                         ap->link_time = 0;
1909                         ap->cur_time = 0;
1910                         ap->ability_match_cfg = 0;
1911                         ap->ability_match_count = 0;
1912                         ap->ability_match = 0;
1913                         ap->idle_match = 0;
1914                         ap->ack_match = 0;
1915
1916                         ap->state = ANEG_STATE_RESTART_INIT;
1917                 } else {
1918                         ap->state = ANEG_STATE_DISABLE_LINK_OK;
1919                 }
1920                 break;
1921
1922         case ANEG_STATE_RESTART_INIT:
1923                 ap->link_time = ap->cur_time;
1924                 ap->flags &= ~(MR_NP_LOADED);
1925                 ap->txconfig = 0;
1926                 tw32(MAC_TX_AUTO_NEG, 0);
1927                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
1928                 tw32_f(MAC_MODE, tp->mac_mode);
1929                 udelay(40);
1930
1931                 ret = ANEG_TIMER_ENAB;
1932                 ap->state = ANEG_STATE_RESTART;
1933
1934                 /* fallthru */
1935         case ANEG_STATE_RESTART:
1936                 delta = ap->cur_time - ap->link_time;
1937                 if (delta > ANEG_STATE_SETTLE_TIME) {
1938                         ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
1939                 } else {
1940                         ret = ANEG_TIMER_ENAB;
1941                 }
1942                 break;
1943
1944         case ANEG_STATE_DISABLE_LINK_OK:
1945                 ret = ANEG_DONE;
1946                 break;
1947
1948         case ANEG_STATE_ABILITY_DETECT_INIT:
1949                 ap->flags &= ~(MR_TOGGLE_TX);
1950                 ap->txconfig = (ANEG_CFG_FD | ANEG_CFG_PS1);
1951                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
1952                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
1953                 tw32_f(MAC_MODE, tp->mac_mode);
1954                 udelay(40);
1955
1956                 ap->state = ANEG_STATE_ABILITY_DETECT;
1957                 break;
1958
1959         case ANEG_STATE_ABILITY_DETECT:
1960                 if (ap->ability_match != 0 && ap->rxconfig != 0) {
1961                         ap->state = ANEG_STATE_ACK_DETECT_INIT;
1962                 }
1963                 break;
1964
1965         case ANEG_STATE_ACK_DETECT_INIT:
1966                 ap->txconfig |= ANEG_CFG_ACK;
1967                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
1968                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
1969                 tw32_f(MAC_MODE, tp->mac_mode);
1970                 udelay(40);
1971
1972                 ap->state = ANEG_STATE_ACK_DETECT;
1973
1974                 /* fallthru */
1975         case ANEG_STATE_ACK_DETECT:
1976                 if (ap->ack_match != 0) {
1977                         if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
1978                             (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
1979                                 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
1980                         } else {
1981                                 ap->state = ANEG_STATE_AN_ENABLE;
1982                         }
1983                 } else if (ap->ability_match != 0 &&
1984                            ap->rxconfig == 0) {
1985                         ap->state = ANEG_STATE_AN_ENABLE;
1986                 }
1987                 break;
1988
1989         case ANEG_STATE_COMPLETE_ACK_INIT:
1990                 if (ap->rxconfig & ANEG_CFG_INVAL) {
1991                         ret = ANEG_FAILED;
1992                         break;
1993                 }
1994                 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
1995                                MR_LP_ADV_HALF_DUPLEX |
1996                                MR_LP_ADV_SYM_PAUSE |
1997                                MR_LP_ADV_ASYM_PAUSE |
1998                                MR_LP_ADV_REMOTE_FAULT1 |
1999                                MR_LP_ADV_REMOTE_FAULT2 |
2000                                MR_LP_ADV_NEXT_PAGE |
2001                                MR_TOGGLE_RX |
2002                                MR_NP_RX);
2003                 if (ap->rxconfig & ANEG_CFG_FD)
2004                         ap->flags |= MR_LP_ADV_FULL_DUPLEX;
2005                 if (ap->rxconfig & ANEG_CFG_HD)
2006                         ap->flags |= MR_LP_ADV_HALF_DUPLEX;
2007                 if (ap->rxconfig & ANEG_CFG_PS1)
2008                         ap->flags |= MR_LP_ADV_SYM_PAUSE;
2009                 if (ap->rxconfig & ANEG_CFG_PS2)
2010                         ap->flags |= MR_LP_ADV_ASYM_PAUSE;
2011                 if (ap->rxconfig & ANEG_CFG_RF1)
2012                         ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
2013                 if (ap->rxconfig & ANEG_CFG_RF2)
2014                         ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
2015                 if (ap->rxconfig & ANEG_CFG_NP)
2016                         ap->flags |= MR_LP_ADV_NEXT_PAGE;
2017
2018                 ap->link_time = ap->cur_time;
2019
2020                 ap->flags ^= (MR_TOGGLE_TX);
2021                 if (ap->rxconfig & 0x0008)
2022                         ap->flags |= MR_TOGGLE_RX;
2023                 if (ap->rxconfig & ANEG_CFG_NP)
2024                         ap->flags |= MR_NP_RX;
2025                 ap->flags |= MR_PAGE_RX;
2026
2027                 ap->state = ANEG_STATE_COMPLETE_ACK;
2028                 ret = ANEG_TIMER_ENAB;
2029                 break;
2030
2031         case ANEG_STATE_COMPLETE_ACK:
2032                 if (ap->ability_match != 0 &&
2033                     ap->rxconfig == 0) {
2034                         ap->state = ANEG_STATE_AN_ENABLE;
2035                         break;
2036                 }
2037                 delta = ap->cur_time - ap->link_time;
2038                 if (delta > ANEG_STATE_SETTLE_TIME) {
2039                         if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
2040                                 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
2041                         } else {
2042                                 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
2043                                     !(ap->flags & MR_NP_RX)) {
2044                                         ap->state = ANEG_STATE_IDLE_DETECT_INIT;
2045                                 } else {
2046                                         ret = ANEG_FAILED;
2047                                 }
2048                         }
2049                 }
2050                 break;
2051
2052         case ANEG_STATE_IDLE_DETECT_INIT:
2053                 ap->link_time = ap->cur_time;
2054                 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
2055                 tw32_f(MAC_MODE, tp->mac_mode);
2056                 udelay(40);
2057
2058                 ap->state = ANEG_STATE_IDLE_DETECT;
2059                 ret = ANEG_TIMER_ENAB;
2060                 break;
2061
2062         case ANEG_STATE_IDLE_DETECT:
2063                 if (ap->ability_match != 0 &&
2064                     ap->rxconfig == 0) {
2065                         ap->state = ANEG_STATE_AN_ENABLE;
2066                         break;
2067                 }
2068                 delta = ap->cur_time - ap->link_time;
2069                 if (delta > ANEG_STATE_SETTLE_TIME) {
2070                         /* XXX another gem from the Broadcom driver :( */
2071                         ap->state = ANEG_STATE_LINK_OK;
2072                 }
2073                 break;
2074
2075         case ANEG_STATE_LINK_OK:
2076                 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
2077                 ret = ANEG_DONE;
2078                 break;
2079
2080         case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
2081                 /* ??? unimplemented */
2082                 break;
2083
2084         case ANEG_STATE_NEXT_PAGE_WAIT:
2085                 /* ??? unimplemented */
2086                 break;
2087
2088         default:
2089                 ret = ANEG_FAILED;
2090                 break;
2091         };
2092
2093         return ret;
2094 }
2095
2096 static int fiber_autoneg(struct tg3 *tp, u32 *flags)
2097 {
2098         int res = 0;
2099         struct tg3_fiber_aneginfo aninfo;
2100         int status = ANEG_FAILED;
2101         unsigned int tick;
2102         u32 tmp;
2103
2104         tw32_f(MAC_TX_AUTO_NEG, 0);
2105
2106         tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
2107         tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
2108         udelay(40);
2109
2110         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
2111         udelay(40);
2112
2113         memset(&aninfo, 0, sizeof(aninfo));
2114         aninfo.flags |= MR_AN_ENABLE;
2115         aninfo.state = ANEG_STATE_UNKNOWN;
2116         aninfo.cur_time = 0;
2117         tick = 0;
2118         while (++tick < 195000) {
2119                 status = tg3_fiber_aneg_smachine(tp, &aninfo);
2120                 if (status == ANEG_DONE || status == ANEG_FAILED)
2121                         break;
2122
2123                 udelay(1);
2124         }
2125
2126         tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
2127         tw32_f(MAC_MODE, tp->mac_mode);
2128         udelay(40);
2129
2130         *flags = aninfo.flags;
2131
2132         if (status == ANEG_DONE &&
2133             (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
2134                              MR_LP_ADV_FULL_DUPLEX)))
2135                 res = 1;
2136
2137         return res;
2138 }
2139
2140 static void tg3_init_bcm8002(struct tg3 *tp)
2141 {
2142         u32 mac_status = tr32(MAC_STATUS);
2143         int i;
2144
2145         /* Reset when initting first time or we have a link. */
2146         if ((tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) &&
2147             !(mac_status & MAC_STATUS_PCS_SYNCED))
2148                 return;
2149
2150         /* Set PLL lock range. */
2151         tg3_writephy(tp, 0x16, 0x8007);
2152
2153         /* SW reset */
2154         tg3_writephy(tp, MII_BMCR, BMCR_RESET);
2155
2156         /* Wait for reset to complete. */
2157         /* XXX schedule_timeout() ... */
2158         for (i = 0; i < 500; i++)
2159                 udelay(10);
2160
2161         /* Config mode; select PMA/Ch 1 regs. */
2162         tg3_writephy(tp, 0x10, 0x8411);
2163
2164         /* Enable auto-lock and comdet, select txclk for tx. */
2165         tg3_writephy(tp, 0x11, 0x0a10);
2166
2167         tg3_writephy(tp, 0x18, 0x00a0);
2168         tg3_writephy(tp, 0x16, 0x41ff);
2169
2170         /* Assert and deassert POR. */
2171         tg3_writephy(tp, 0x13, 0x0400);
2172         udelay(40);
2173         tg3_writephy(tp, 0x13, 0x0000);
2174
2175         tg3_writephy(tp, 0x11, 0x0a50);
2176         udelay(40);
2177         tg3_writephy(tp, 0x11, 0x0a10);
2178
2179         /* Wait for signal to stabilize */
2180         /* XXX schedule_timeout() ... */
2181         for (i = 0; i < 15000; i++)
2182                 udelay(10);
2183
2184         /* Deselect the channel register so we can read the PHYID
2185          * later.
2186          */
2187         tg3_writephy(tp, 0x10, 0x8011);
2188 }
2189
2190 static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
2191 {
2192         u32 sg_dig_ctrl, sg_dig_status;
2193         u32 serdes_cfg, expected_sg_dig_ctrl;
2194         int workaround, port_a;
2195         int current_link_up;
2196
2197         serdes_cfg = 0;
2198         expected_sg_dig_ctrl = 0;
2199         workaround = 0;
2200         port_a = 1;
2201         current_link_up = 0;
2202
2203         if (tp->pci_chip_rev_id != CHIPREV_ID_5704_A0 &&
2204             tp->pci_chip_rev_id != CHIPREV_ID_5704_A1) {
2205                 workaround = 1;
2206                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
2207                         port_a = 0;
2208
2209                 /* preserve bits 0-11,13,14 for signal pre-emphasis */
2210                 /* preserve bits 20-23 for voltage regulator */
2211                 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
2212         }
2213
2214         sg_dig_ctrl = tr32(SG_DIG_CTRL);
2215
2216         if (tp->link_config.autoneg != AUTONEG_ENABLE) {
2217                 if (sg_dig_ctrl & (1 << 31)) {
2218                         if (workaround) {
2219                                 u32 val = serdes_cfg;
2220
2221                                 if (port_a)
2222                                         val |= 0xc010000;
2223                                 else
2224                                         val |= 0x4010000;
2225                                 tw32_f(MAC_SERDES_CFG, val);
2226                         }
2227                         tw32_f(SG_DIG_CTRL, 0x01388400);
2228                 }
2229                 if (mac_status & MAC_STATUS_PCS_SYNCED) {
2230                         tg3_setup_flow_control(tp, 0, 0);
2231                         current_link_up = 1;
2232                 }
2233                 goto out;
2234         }
2235
2236         /* Want auto-negotiation.  */
2237         expected_sg_dig_ctrl = 0x81388400;
2238
2239         /* Pause capability */
2240         expected_sg_dig_ctrl |= (1 << 11);
2241
2242         /* Asymettric pause */
2243         expected_sg_dig_ctrl |= (1 << 12);
2244
2245         if (sg_dig_ctrl != expected_sg_dig_ctrl) {
2246                 if (workaround)
2247                         tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
2248                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | (1 << 30));
2249                 udelay(5);
2250                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
2251
2252                 tp->tg3_flags2 |= TG3_FLG2_PHY_JUST_INITTED;
2253         } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
2254                                  MAC_STATUS_SIGNAL_DET)) {
2255                 int i;
2256
2257                 /* Giver time to negotiate (~200ms) */
2258                 for (i = 0; i < 40000; i++) {
2259                         sg_dig_status = tr32(SG_DIG_STATUS);
2260                         if (sg_dig_status & (0x3))
2261                                 break;
2262                         udelay(5);
2263                 }
2264                 mac_status = tr32(MAC_STATUS);
2265
2266                 if ((sg_dig_status & (1 << 1)) &&
2267                     (mac_status & MAC_STATUS_PCS_SYNCED)) {
2268                         u32 local_adv, remote_adv;
2269
2270                         local_adv = ADVERTISE_PAUSE_CAP;
2271                         remote_adv = 0;
2272                         if (sg_dig_status & (1 << 19))
2273                                 remote_adv |= LPA_PAUSE_CAP;
2274                         if (sg_dig_status & (1 << 20))
2275                                 remote_adv |= LPA_PAUSE_ASYM;
2276
2277                         tg3_setup_flow_control(tp, local_adv, remote_adv);
2278                         current_link_up = 1;
2279                         tp->tg3_flags2 &= ~TG3_FLG2_PHY_JUST_INITTED;
2280                 } else if (!(sg_dig_status & (1 << 1))) {
2281                         if (tp->tg3_flags2 & TG3_FLG2_PHY_JUST_INITTED)
2282                                 tp->tg3_flags2 &= ~TG3_FLG2_PHY_JUST_INITTED;
2283                         else {
2284                                 if (workaround) {
2285                                         u32 val = serdes_cfg;
2286
2287                                         if (port_a)
2288                                                 val |= 0xc010000;
2289                                         else
2290                                                 val |= 0x4010000;
2291
2292                                         tw32_f(MAC_SERDES_CFG, val);
2293                                 }
2294
2295                                 tw32_f(SG_DIG_CTRL, 0x01388400);
2296                                 udelay(40);
2297
2298                                 /* Link parallel detection - link is up */
2299                                 /* only if we have PCS_SYNC and not */
2300                                 /* receiving config code words */
2301                                 mac_status = tr32(MAC_STATUS);
2302                                 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
2303                                     !(mac_status & MAC_STATUS_RCVD_CFG)) {
2304                                         tg3_setup_flow_control(tp, 0, 0);
2305                                         current_link_up = 1;
2306                                 }
2307                         }
2308                 }
2309         }
2310
2311 out:
2312         return current_link_up;
2313 }
2314
2315 static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
2316 {
2317         int current_link_up = 0;
2318
2319         if (!(mac_status & MAC_STATUS_PCS_SYNCED)) {
2320                 tp->tg3_flags &= ~TG3_FLAG_GOT_SERDES_FLOWCTL;
2321                 goto out;
2322         }
2323
2324         if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2325                 u32 flags;
2326                 int i;
2327   
2328                 if (fiber_autoneg(tp, &flags)) {
2329                         u32 local_adv, remote_adv;
2330
2331                         local_adv = ADVERTISE_PAUSE_CAP;
2332                         remote_adv = 0;
2333                         if (flags & MR_LP_ADV_SYM_PAUSE)
2334                                 remote_adv |= LPA_PAUSE_CAP;
2335                         if (flags & MR_LP_ADV_ASYM_PAUSE)
2336                                 remote_adv |= LPA_PAUSE_ASYM;
2337
2338                         tg3_setup_flow_control(tp, local_adv, remote_adv);
2339
2340                         tp->tg3_flags |= TG3_FLAG_GOT_SERDES_FLOWCTL;
2341                         current_link_up = 1;
2342                 }
2343                 for (i = 0; i < 30; i++) {
2344                         udelay(20);
2345                         tw32_f(MAC_STATUS,
2346                                (MAC_STATUS_SYNC_CHANGED |
2347                                 MAC_STATUS_CFG_CHANGED));
2348                         udelay(40);
2349                         if ((tr32(MAC_STATUS) &
2350                              (MAC_STATUS_SYNC_CHANGED |
2351                               MAC_STATUS_CFG_CHANGED)) == 0)
2352                                 break;
2353                 }
2354
2355                 mac_status = tr32(MAC_STATUS);
2356                 if (current_link_up == 0 &&
2357                     (mac_status & MAC_STATUS_PCS_SYNCED) &&
2358                     !(mac_status & MAC_STATUS_RCVD_CFG))
2359                         current_link_up = 1;
2360         } else {
2361                 /* Forcing 1000FD link up. */
2362                 current_link_up = 1;
2363                 tp->tg3_flags |= TG3_FLAG_GOT_SERDES_FLOWCTL;
2364
2365                 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
2366                 udelay(40);
2367         }
2368
2369 out:
2370         return current_link_up;
2371 }
2372
2373 static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
2374 {
2375         u32 orig_pause_cfg;
2376         u16 orig_active_speed;
2377         u8 orig_active_duplex;
2378         u32 mac_status;
2379         int current_link_up;
2380         int i;
2381
2382         orig_pause_cfg =
2383                 (tp->tg3_flags & (TG3_FLAG_RX_PAUSE |
2384                                   TG3_FLAG_TX_PAUSE));
2385         orig_active_speed = tp->link_config.active_speed;
2386         orig_active_duplex = tp->link_config.active_duplex;
2387
2388         if (!(tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG) &&
2389             netif_carrier_ok(tp->dev) &&
2390             (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE)) {
2391                 mac_status = tr32(MAC_STATUS);
2392                 mac_status &= (MAC_STATUS_PCS_SYNCED |
2393                                MAC_STATUS_SIGNAL_DET |
2394                                MAC_STATUS_CFG_CHANGED |
2395                                MAC_STATUS_RCVD_CFG);
2396                 if (mac_status == (MAC_STATUS_PCS_SYNCED |
2397                                    MAC_STATUS_SIGNAL_DET)) {
2398                         tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
2399                                             MAC_STATUS_CFG_CHANGED));
2400                         return 0;
2401                 }
2402         }
2403
2404         tw32_f(MAC_TX_AUTO_NEG, 0);
2405
2406         tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
2407         tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
2408         tw32_f(MAC_MODE, tp->mac_mode);
2409         udelay(40);
2410
2411         if (tp->phy_id == PHY_ID_BCM8002)
2412                 tg3_init_bcm8002(tp);
2413
2414         /* Enable link change event even when serdes polling.  */
2415         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2416         udelay(40);
2417
2418         current_link_up = 0;
2419         mac_status = tr32(MAC_STATUS);
2420
2421         if (tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG)
2422                 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
2423         else
2424                 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
2425
2426         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
2427         tw32_f(MAC_MODE, tp->mac_mode);
2428         udelay(40);
2429
2430         tp->hw_status->status =
2431                 (SD_STATUS_UPDATED |
2432                  (tp->hw_status->status & ~SD_STATUS_LINK_CHG));
2433
2434         for (i = 0; i < 100; i++) {
2435                 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
2436                                     MAC_STATUS_CFG_CHANGED));
2437                 udelay(5);
2438                 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
2439                                          MAC_STATUS_CFG_CHANGED)) == 0)
2440                         break;
2441         }
2442
2443         mac_status = tr32(MAC_STATUS);
2444         if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
2445                 current_link_up = 0;
2446                 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2447                         tw32_f(MAC_MODE, (tp->mac_mode |
2448                                           MAC_MODE_SEND_CONFIGS));
2449                         udelay(1);
2450                         tw32_f(MAC_MODE, tp->mac_mode);
2451                 }
2452         }
2453
2454         if (current_link_up == 1) {
2455                 tp->link_config.active_speed = SPEED_1000;
2456                 tp->link_config.active_duplex = DUPLEX_FULL;
2457                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
2458                                     LED_CTRL_LNKLED_OVERRIDE |
2459                                     LED_CTRL_1000MBPS_ON));
2460         } else {
2461                 tp->link_config.active_speed = SPEED_INVALID;
2462                 tp->link_config.active_duplex = DUPLEX_INVALID;
2463                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
2464                                     LED_CTRL_LNKLED_OVERRIDE |
2465                                     LED_CTRL_TRAFFIC_OVERRIDE));
2466         }
2467
2468         if (current_link_up != netif_carrier_ok(tp->dev)) {
2469                 if (current_link_up)
2470                         netif_carrier_on(tp->dev);
2471                 else
2472                         netif_carrier_off(tp->dev);
2473                 tg3_link_report(tp);
2474         } else {
2475                 u32 now_pause_cfg =
2476                         tp->tg3_flags & (TG3_FLAG_RX_PAUSE |
2477                                          TG3_FLAG_TX_PAUSE);
2478                 if (orig_pause_cfg != now_pause_cfg ||
2479                     orig_active_speed != tp->link_config.active_speed ||
2480                     orig_active_duplex != tp->link_config.active_duplex)
2481                         tg3_link_report(tp);
2482         }
2483
2484         return 0;
2485 }
2486
2487 static int tg3_setup_phy(struct tg3 *tp, int force_reset)
2488 {
2489         int err;
2490
2491         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
2492                 err = tg3_setup_fiber_phy(tp, force_reset);
2493         } else {
2494                 err = tg3_setup_copper_phy(tp, force_reset);
2495         }
2496
2497         if (tp->link_config.active_speed == SPEED_1000 &&
2498             tp->link_config.active_duplex == DUPLEX_HALF)
2499                 tw32(MAC_TX_LENGTHS,
2500                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2501                       (6 << TX_LENGTHS_IPG_SHIFT) |
2502                       (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
2503         else
2504                 tw32(MAC_TX_LENGTHS,
2505                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2506                       (6 << TX_LENGTHS_IPG_SHIFT) |
2507                       (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
2508
2509         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
2510                 if (netif_carrier_ok(tp->dev)) {
2511                         tw32(HOSTCC_STAT_COAL_TICKS,
2512                              tp->coal.stats_block_coalesce_usecs);
2513                 } else {
2514                         tw32(HOSTCC_STAT_COAL_TICKS, 0);
2515                 }
2516         }
2517
2518         return err;
2519 }
2520
2521 /* Tigon3 never reports partial packet sends.  So we do not
2522  * need special logic to handle SKBs that have not had all
2523  * of their frags sent yet, like SunGEM does.
2524  */
2525 static void tg3_tx(struct tg3 *tp)
2526 {
2527         u32 hw_idx = tp->hw_status->idx[0].tx_consumer;
2528         u32 sw_idx = tp->tx_cons;
2529
2530         while (sw_idx != hw_idx) {
2531                 struct tx_ring_info *ri = &tp->tx_buffers[sw_idx];
2532                 struct sk_buff *skb = ri->skb;
2533                 int i;
2534
2535                 if (unlikely(skb == NULL))
2536                         BUG();
2537
2538                 pci_unmap_single(tp->pdev,
2539                                  pci_unmap_addr(ri, mapping),
2540                                  skb_headlen(skb),
2541                                  PCI_DMA_TODEVICE);
2542
2543                 ri->skb = NULL;
2544
2545                 sw_idx = NEXT_TX(sw_idx);
2546
2547                 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2548                         if (unlikely(sw_idx == hw_idx))
2549                                 BUG();
2550
2551                         ri = &tp->tx_buffers[sw_idx];
2552                         if (unlikely(ri->skb != NULL))
2553                                 BUG();
2554
2555                         pci_unmap_page(tp->pdev,
2556                                        pci_unmap_addr(ri, mapping),
2557                                        skb_shinfo(skb)->frags[i].size,
2558                                        PCI_DMA_TODEVICE);
2559
2560                         sw_idx = NEXT_TX(sw_idx);
2561                 }
2562
2563                 dev_kfree_skb_irq(skb);
2564         }
2565
2566         tp->tx_cons = sw_idx;
2567
2568         if (netif_queue_stopped(tp->dev) &&
2569             (TX_BUFFS_AVAIL(tp) > TG3_TX_WAKEUP_THRESH))
2570                 netif_wake_queue(tp->dev);
2571 }
2572
2573 /* Returns size of skb allocated or < 0 on error.
2574  *
2575  * We only need to fill in the address because the other members
2576  * of the RX descriptor are invariant, see tg3_init_rings.
2577  *
2578  * Note the purposeful assymetry of cpu vs. chip accesses.  For
2579  * posting buffers we only dirty the first cache line of the RX
2580  * descriptor (containing the address).  Whereas for the RX status
2581  * buffers the cpu only reads the last cacheline of the RX descriptor
2582  * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
2583  */
2584 static int tg3_alloc_rx_skb(struct tg3 *tp, u32 opaque_key,
2585                             int src_idx, u32 dest_idx_unmasked)
2586 {
2587         struct tg3_rx_buffer_desc *desc;
2588         struct ring_info *map, *src_map;
2589         struct sk_buff *skb;
2590         dma_addr_t mapping;
2591         int skb_size, dest_idx;
2592
2593         src_map = NULL;
2594         switch (opaque_key) {
2595         case RXD_OPAQUE_RING_STD:
2596                 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
2597                 desc = &tp->rx_std[dest_idx];
2598                 map = &tp->rx_std_buffers[dest_idx];
2599                 if (src_idx >= 0)
2600                         src_map = &tp->rx_std_buffers[src_idx];
2601                 skb_size = RX_PKT_BUF_SZ;
2602                 break;
2603
2604         case RXD_OPAQUE_RING_JUMBO:
2605                 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
2606                 desc = &tp->rx_jumbo[dest_idx];
2607                 map = &tp->rx_jumbo_buffers[dest_idx];
2608                 if (src_idx >= 0)
2609                         src_map = &tp->rx_jumbo_buffers[src_idx];
2610                 skb_size = RX_JUMBO_PKT_BUF_SZ;
2611                 break;
2612
2613         default:
2614                 return -EINVAL;
2615         };
2616
2617         /* Do not overwrite any of the map or rp information
2618          * until we are sure we can commit to a new buffer.
2619          *
2620          * Callers depend upon this behavior and assume that
2621          * we leave everything unchanged if we fail.
2622          */
2623         skb = dev_alloc_skb(skb_size);
2624         if (skb == NULL)
2625                 return -ENOMEM;
2626
2627         skb->dev = tp->dev;
2628         skb_reserve(skb, tp->rx_offset);
2629
2630         mapping = pci_map_single(tp->pdev, skb->data,
2631                                  skb_size - tp->rx_offset,
2632                                  PCI_DMA_FROMDEVICE);
2633
2634         map->skb = skb;
2635         pci_unmap_addr_set(map, mapping, mapping);
2636
2637         if (src_map != NULL)
2638                 src_map->skb = NULL;
2639
2640         desc->addr_hi = ((u64)mapping >> 32);
2641         desc->addr_lo = ((u64)mapping & 0xffffffff);
2642
2643         return skb_size;
2644 }
2645
2646 /* We only need to move over in the address because the other
2647  * members of the RX descriptor are invariant.  See notes above
2648  * tg3_alloc_rx_skb for full details.
2649  */
2650 static void tg3_recycle_rx(struct tg3 *tp, u32 opaque_key,
2651                            int src_idx, u32 dest_idx_unmasked)
2652 {
2653         struct tg3_rx_buffer_desc *src_desc, *dest_desc;
2654         struct ring_info *src_map, *dest_map;
2655         int dest_idx;
2656
2657         switch (opaque_key) {
2658         case RXD_OPAQUE_RING_STD:
2659                 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
2660                 dest_desc = &tp->rx_std[dest_idx];
2661                 dest_map = &tp->rx_std_buffers[dest_idx];
2662                 src_desc = &tp->rx_std[src_idx];
2663                 src_map = &tp->rx_std_buffers[src_idx];
2664                 break;
2665
2666         case RXD_OPAQUE_RING_JUMBO:
2667                 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
2668                 dest_desc = &tp->rx_jumbo[dest_idx];
2669                 dest_map = &tp->rx_jumbo_buffers[dest_idx];
2670                 src_desc = &tp->rx_jumbo[src_idx];
2671                 src_map = &tp->rx_jumbo_buffers[src_idx];
2672                 break;
2673
2674         default:
2675                 return;
2676         };
2677
2678         dest_map->skb = src_map->skb;
2679         pci_unmap_addr_set(dest_map, mapping,
2680                            pci_unmap_addr(src_map, mapping));
2681         dest_desc->addr_hi = src_desc->addr_hi;
2682         dest_desc->addr_lo = src_desc->addr_lo;
2683
2684         src_map->skb = NULL;
2685 }
2686
2687 #if TG3_VLAN_TAG_USED
2688 static int tg3_vlan_rx(struct tg3 *tp, struct sk_buff *skb, u16 vlan_tag)
2689 {
2690         return vlan_hwaccel_receive_skb(skb, tp->vlgrp, vlan_tag);
2691 }
2692 #endif
2693
2694 /* The RX ring scheme is composed of multiple rings which post fresh
2695  * buffers to the chip, and one special ring the chip uses to report
2696  * status back to the host.
2697  *
2698  * The special ring reports the status of received packets to the
2699  * host.  The chip does not write into the original descriptor the
2700  * RX buffer was obtained from.  The chip simply takes the original
2701  * descriptor as provided by the host, updates the status and length
2702  * field, then writes this into the next status ring entry.
2703  *
2704  * Each ring the host uses to post buffers to the chip is described
2705  * by a TG3_BDINFO entry in the chips SRAM area.  When a packet arrives,
2706  * it is first placed into the on-chip ram.  When the packet's length
2707  * is known, it walks down the TG3_BDINFO entries to select the ring.
2708  * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
2709  * which is within the range of the new packet's length is chosen.
2710  *
2711  * The "separate ring for rx status" scheme may sound queer, but it makes
2712  * sense from a cache coherency perspective.  If only the host writes
2713  * to the buffer post rings, and only the chip writes to the rx status
2714  * rings, then cache lines never move beyond shared-modified state.
2715  * If both the host and chip were to write into the same ring, cache line
2716  * eviction could occur since both entities want it in an exclusive state.
2717  */
2718 static int tg3_rx(struct tg3 *tp, int budget)
2719 {
2720         u32 work_mask;
2721         u32 sw_idx = tp->rx_rcb_ptr;
2722         u16 hw_idx;
2723         int received;
2724
2725         hw_idx = tp->hw_status->idx[0].rx_producer;
2726         /*
2727          * We need to order the read of hw_idx and the read of
2728          * the opaque cookie.
2729          */
2730         rmb();
2731         work_mask = 0;
2732         received = 0;
2733         while (sw_idx != hw_idx && budget > 0) {
2734                 struct tg3_rx_buffer_desc *desc = &tp->rx_rcb[sw_idx];
2735                 unsigned int len;
2736                 struct sk_buff *skb;
2737                 dma_addr_t dma_addr;
2738                 u32 opaque_key, desc_idx, *post_ptr;
2739
2740                 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
2741                 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
2742                 if (opaque_key == RXD_OPAQUE_RING_STD) {
2743                         dma_addr = pci_unmap_addr(&tp->rx_std_buffers[desc_idx],
2744                                                   mapping);
2745                         skb = tp->rx_std_buffers[desc_idx].skb;
2746                         post_ptr = &tp->rx_std_ptr;
2747                 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
2748                         dma_addr = pci_unmap_addr(&tp->rx_jumbo_buffers[desc_idx],
2749                                                   mapping);
2750                         skb = tp->rx_jumbo_buffers[desc_idx].skb;
2751                         post_ptr = &tp->rx_jumbo_ptr;
2752                 }
2753                 else {
2754                         goto next_pkt_nopost;
2755                 }
2756
2757                 work_mask |= opaque_key;
2758
2759                 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
2760                     (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
2761                 drop_it:
2762                         tg3_recycle_rx(tp, opaque_key,
2763                                        desc_idx, *post_ptr);
2764                 drop_it_no_recycle:
2765                         /* Other statistics kept track of by card. */
2766                         tp->net_stats.rx_dropped++;
2767                         goto next_pkt;
2768                 }
2769
2770                 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4; /* omit crc */
2771
2772                 if (len > RX_COPY_THRESHOLD 
2773                         && tp->rx_offset == 2
2774                         /* rx_offset != 2 iff this is a 5701 card running
2775                          * in PCI-X mode [see tg3_get_invariants()] */
2776                 ) {
2777                         int skb_size;
2778
2779                         skb_size = tg3_alloc_rx_skb(tp, opaque_key,
2780                                                     desc_idx, *post_ptr);
2781                         if (skb_size < 0)
2782                                 goto drop_it;
2783
2784                         pci_unmap_single(tp->pdev, dma_addr,
2785                                          skb_size - tp->rx_offset,
2786                                          PCI_DMA_FROMDEVICE);
2787
2788                         skb_put(skb, len);
2789                 } else {
2790                         struct sk_buff *copy_skb;
2791
2792                         tg3_recycle_rx(tp, opaque_key,
2793                                        desc_idx, *post_ptr);
2794
2795                         copy_skb = dev_alloc_skb(len + 2);
2796                         if (copy_skb == NULL)
2797                                 goto drop_it_no_recycle;
2798
2799                         copy_skb->dev = tp->dev;
2800                         skb_reserve(copy_skb, 2);
2801                         skb_put(copy_skb, len);
2802                         pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
2803                         memcpy(copy_skb->data, skb->data, len);
2804                         pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
2805
2806                         /* We'll reuse the original ring buffer. */
2807                         skb = copy_skb;
2808                 }
2809
2810                 if ((tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) &&
2811                     (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
2812                     (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
2813                       >> RXD_TCPCSUM_SHIFT) == 0xffff))
2814                         skb->ip_summed = CHECKSUM_UNNECESSARY;
2815                 else
2816                         skb->ip_summed = CHECKSUM_NONE;
2817
2818                 skb->protocol = eth_type_trans(skb, tp->dev);
2819 #if TG3_VLAN_TAG_USED
2820                 if (tp->vlgrp != NULL &&
2821                     desc->type_flags & RXD_FLAG_VLAN) {
2822                         tg3_vlan_rx(tp, skb,
2823                                     desc->err_vlan & RXD_VLAN_MASK);
2824                 } else
2825 #endif
2826                         netif_receive_skb(skb);
2827
2828                 tp->dev->last_rx = jiffies;
2829                 received++;
2830                 budget--;
2831
2832 next_pkt:
2833                 (*post_ptr)++;
2834 next_pkt_nopost:
2835                 sw_idx++;
2836                 sw_idx %= TG3_RX_RCB_RING_SIZE(tp);
2837
2838                 /* Refresh hw_idx to see if there is new work */
2839                 if (sw_idx == hw_idx) {
2840                         hw_idx = tp->hw_status->idx[0].rx_producer;
2841                         rmb();
2842                 }
2843         }
2844
2845         /* ACK the status ring. */
2846         tp->rx_rcb_ptr = sw_idx;
2847         tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW, sw_idx);
2848
2849         /* Refill RX ring(s). */
2850         if (work_mask & RXD_OPAQUE_RING_STD) {
2851                 sw_idx = tp->rx_std_ptr % TG3_RX_RING_SIZE;
2852                 tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
2853                              sw_idx);
2854         }
2855         if (work_mask & RXD_OPAQUE_RING_JUMBO) {
2856                 sw_idx = tp->rx_jumbo_ptr % TG3_RX_JUMBO_RING_SIZE;
2857                 tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
2858                              sw_idx);
2859         }
2860         mmiowb();
2861
2862         return received;
2863 }
2864
2865 static int tg3_poll(struct net_device *netdev, int *budget)
2866 {
2867         struct tg3 *tp = netdev_priv(netdev);
2868         struct tg3_hw_status *sblk = tp->hw_status;
2869         unsigned long flags;
2870         int done;
2871
2872         spin_lock_irqsave(&tp->lock, flags);
2873
2874         /* handle link change and other phy events */
2875         if (!(tp->tg3_flags &
2876               (TG3_FLAG_USE_LINKCHG_REG |
2877                TG3_FLAG_POLL_SERDES))) {
2878                 if (sblk->status & SD_STATUS_LINK_CHG) {
2879                         sblk->status = SD_STATUS_UPDATED |
2880                                 (sblk->status & ~SD_STATUS_LINK_CHG);
2881                         tg3_setup_phy(tp, 0);
2882                 }
2883         }
2884
2885         /* run TX completion thread */
2886         if (sblk->idx[0].tx_consumer != tp->tx_cons) {
2887                 spin_lock(&tp->tx_lock);
2888                 tg3_tx(tp);
2889                 spin_unlock(&tp->tx_lock);
2890         }
2891
2892         spin_unlock_irqrestore(&tp->lock, flags);
2893
2894         /* run RX thread, within the bounds set by NAPI.
2895          * All RX "locking" is done by ensuring outside
2896          * code synchronizes with dev->poll()
2897          */
2898         if (sblk->idx[0].rx_producer != tp->rx_rcb_ptr) {
2899                 int orig_budget = *budget;
2900                 int work_done;
2901
2902                 if (orig_budget > netdev->quota)
2903                         orig_budget = netdev->quota;
2904
2905                 work_done = tg3_rx(tp, orig_budget);
2906
2907                 *budget -= work_done;
2908                 netdev->quota -= work_done;
2909         }
2910
2911         if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
2912                 tp->last_tag = sblk->status_tag;
2913         rmb();
2914
2915         /* if no more work, tell net stack and NIC we're done */
2916         done = !tg3_has_work(tp);
2917         if (done) {
2918                 spin_lock_irqsave(&tp->lock, flags);
2919                 __netif_rx_complete(netdev);
2920                 tg3_restart_ints(tp);
2921                 spin_unlock_irqrestore(&tp->lock, flags);
2922         }
2923
2924         return (done ? 0 : 1);
2925 }
2926
2927 /* MSI ISR - No need to check for interrupt sharing and no need to
2928  * flush status block and interrupt mailbox. PCI ordering rules
2929  * guarantee that MSI will arrive after the status block.
2930  */
2931 static irqreturn_t tg3_msi(int irq, void *dev_id, struct pt_regs *regs)
2932 {
2933         struct net_device *dev = dev_id;
2934         struct tg3 *tp = netdev_priv(dev);
2935         struct tg3_hw_status *sblk = tp->hw_status;
2936         unsigned long flags;
2937
2938         spin_lock_irqsave(&tp->lock, flags);
2939
2940         /*
2941          * Writing any value to intr-mbox-0 clears PCI INTA# and
2942          * chip-internal interrupt pending events.
2943          * Writing non-zero to intr-mbox-0 additional tells the
2944          * NIC to stop sending us irqs, engaging "in-intr-handler"
2945          * event coalescing.
2946          */
2947         tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
2948         tp->last_tag = sblk->status_tag;
2949         sblk->status &= ~SD_STATUS_UPDATED;
2950         if (likely(tg3_has_work(tp)))
2951                 netif_rx_schedule(dev);         /* schedule NAPI poll */
2952         else {
2953                 /* No work, re-enable interrupts.  */
2954                 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
2955                              tp->last_tag << 24);
2956         }
2957
2958         spin_unlock_irqrestore(&tp->lock, flags);
2959
2960         return IRQ_RETVAL(1);
2961 }
2962
2963 static irqreturn_t tg3_interrupt(int irq, void *dev_id, struct pt_regs *regs)
2964 {
2965         struct net_device *dev = dev_id;
2966         struct tg3 *tp = netdev_priv(dev);
2967         struct tg3_hw_status *sblk = tp->hw_status;
2968         unsigned long flags;
2969         unsigned int handled = 1;
2970
2971         spin_lock_irqsave(&tp->lock, flags);
2972
2973         /* In INTx mode, it is possible for the interrupt to arrive at
2974          * the CPU before the status block posted prior to the interrupt.
2975          * Reading the PCI State register will confirm whether the
2976          * interrupt is ours and will flush the status block.
2977          */
2978         if ((sblk->status & SD_STATUS_UPDATED) ||
2979             !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
2980                 /*
2981                  * Writing any value to intr-mbox-0 clears PCI INTA# and
2982                  * chip-internal interrupt pending events.
2983                  * Writing non-zero to intr-mbox-0 additional tells the
2984                  * NIC to stop sending us irqs, engaging "in-intr-handler"
2985                  * event coalescing.
2986                  */
2987                 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
2988                              0x00000001);
2989                 sblk->status &= ~SD_STATUS_UPDATED;
2990                 if (likely(tg3_has_work(tp)))
2991                         netif_rx_schedule(dev);         /* schedule NAPI poll */
2992                 else {
2993                         /* No work, shared interrupt perhaps?  re-enable
2994                          * interrupts, and flush that PCI write
2995                          */
2996                         tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
2997                                 0x00000000);
2998                         tr32(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW);
2999                 }
3000         } else {        /* shared interrupt */
3001                 handled = 0;
3002         }
3003
3004         spin_unlock_irqrestore(&tp->lock, flags);
3005
3006         return IRQ_RETVAL(handled);
3007 }
3008
3009 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id, struct pt_regs *regs)
3010 {
3011         struct net_device *dev = dev_id;
3012         struct tg3 *tp = netdev_priv(dev);
3013         struct tg3_hw_status *sblk = tp->hw_status;
3014         unsigned long flags;
3015         unsigned int handled = 1;
3016
3017         spin_lock_irqsave(&tp->lock, flags);
3018
3019         /* In INTx mode, it is possible for the interrupt to arrive at
3020          * the CPU before the status block posted prior to the interrupt.
3021          * Reading the PCI State register will confirm whether the
3022          * interrupt is ours and will flush the status block.
3023          */
3024         if ((sblk->status & SD_STATUS_UPDATED) ||
3025             !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
3026                 /*
3027                  * writing any value to intr-mbox-0 clears PCI INTA# and
3028                  * chip-internal interrupt pending events.
3029                  * writing non-zero to intr-mbox-0 additional tells the
3030                  * NIC to stop sending us irqs, engaging "in-intr-handler"
3031                  * event coalescing.
3032                  */
3033                 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
3034                              0x00000001);
3035                 tp->last_tag = sblk->status_tag;
3036                 sblk->status &= ~SD_STATUS_UPDATED;
3037                 if (likely(tg3_has_work(tp)))
3038                         netif_rx_schedule(dev);         /* schedule NAPI poll */
3039                 else {
3040                         /* no work, shared interrupt perhaps?  re-enable
3041                          * interrupts, and flush that PCI write
3042                          */
3043                         tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
3044                                      tp->last_tag << 24);
3045                         tr32(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW);
3046                 }
3047         } else {        /* shared interrupt */
3048                 handled = 0;
3049         }
3050
3051         spin_unlock_irqrestore(&tp->lock, flags);
3052
3053         return IRQ_RETVAL(handled);
3054 }
3055
3056 /* ISR for interrupt test */
3057 static irqreturn_t tg3_test_isr(int irq, void *dev_id,
3058                 struct pt_regs *regs)
3059 {
3060         struct net_device *dev = dev_id;
3061         struct tg3 *tp = netdev_priv(dev);
3062         struct tg3_hw_status *sblk = tp->hw_status;
3063
3064         if (sblk->status & SD_STATUS_UPDATED) {
3065                 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
3066                              0x00000001);
3067                 return IRQ_RETVAL(1);
3068         }
3069         return IRQ_RETVAL(0);
3070 }
3071
3072 static int tg3_init_hw(struct tg3 *);
3073 static int tg3_halt(struct tg3 *, int);
3074
3075 #ifdef CONFIG_NET_POLL_CONTROLLER
3076 static void tg3_poll_controller(struct net_device *dev)
3077 {
3078         struct tg3 *tp = netdev_priv(dev);
3079
3080         tg3_interrupt(tp->pdev->irq, dev, NULL);
3081 }
3082 #endif
3083
3084 static void tg3_reset_task(void *_data)
3085 {
3086         struct tg3 *tp = _data;
3087         unsigned int restart_timer;
3088
3089         tg3_netif_stop(tp);
3090
3091         spin_lock_irq(&tp->lock);
3092         spin_lock(&tp->tx_lock);
3093
3094         restart_timer = tp->tg3_flags2 & TG3_FLG2_RESTART_TIMER;
3095         tp->tg3_flags2 &= ~TG3_FLG2_RESTART_TIMER;
3096
3097         tg3_halt(tp, 0);
3098         tg3_init_hw(tp);
3099
3100         tg3_netif_start(tp);
3101
3102         spin_unlock(&tp->tx_lock);
3103         spin_unlock_irq(&tp->lock);
3104
3105         if (restart_timer)
3106                 mod_timer(&tp->timer, jiffies + 1);
3107 }
3108
3109 static void tg3_tx_timeout(struct net_device *dev)
3110 {
3111         struct tg3 *tp = netdev_priv(dev);
3112
3113         printk(KERN_ERR PFX "%s: transmit timed out, resetting\n",
3114                dev->name);
3115
3116         schedule_work(&tp->reset_task);
3117 }
3118
3119 static void tg3_set_txd(struct tg3 *, int, dma_addr_t, int, u32, u32);
3120
3121 static int tigon3_4gb_hwbug_workaround(struct tg3 *tp, struct sk_buff *skb,
3122                                        u32 guilty_entry, int guilty_len,
3123                                        u32 last_plus_one, u32 *start, u32 mss)
3124 {
3125         struct sk_buff *new_skb = skb_copy(skb, GFP_ATOMIC);
3126         dma_addr_t new_addr;
3127         u32 entry = *start;
3128         int i;
3129
3130         if (!new_skb) {
3131                 dev_kfree_skb(skb);
3132                 return -1;
3133         }
3134
3135         /* New SKB is guaranteed to be linear. */
3136         entry = *start;
3137         new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
3138                                   PCI_DMA_TODEVICE);
3139         tg3_set_txd(tp, entry, new_addr, new_skb->len,
3140                     (skb->ip_summed == CHECKSUM_HW) ?
3141                     TXD_FLAG_TCPUDP_CSUM : 0, 1 | (mss << 1));
3142         *start = NEXT_TX(entry);
3143
3144         /* Now clean up the sw ring entries. */
3145         i = 0;
3146         while (entry != last_plus_one) {
3147                 int len;
3148
3149                 if (i == 0)
3150                         len = skb_headlen(skb);
3151                 else
3152                         len = skb_shinfo(skb)->frags[i-1].size;
3153                 pci_unmap_single(tp->pdev,
3154                                  pci_unmap_addr(&tp->tx_buffers[entry], mapping),
3155                                  len, PCI_DMA_TODEVICE);
3156                 if (i == 0) {
3157                         tp->tx_buffers[entry].skb = new_skb;
3158                         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, new_addr);
3159                 } else {
3160                         tp->tx_buffers[entry].skb = NULL;
3161                 }
3162                 entry = NEXT_TX(entry);
3163                 i++;
3164         }
3165
3166         dev_kfree_skb(skb);
3167
3168         return 0;
3169 }
3170
3171 static void tg3_set_txd(struct tg3 *tp, int entry,
3172                         dma_addr_t mapping, int len, u32 flags,
3173                         u32 mss_and_is_end)
3174 {
3175         struct tg3_tx_buffer_desc *txd = &tp->tx_ring[entry];
3176         int is_end = (mss_and_is_end & 0x1);
3177         u32 mss = (mss_and_is_end >> 1);
3178         u32 vlan_tag = 0;
3179
3180         if (is_end)
3181                 flags |= TXD_FLAG_END;
3182         if (flags & TXD_FLAG_VLAN) {
3183                 vlan_tag = flags >> 16;
3184                 flags &= 0xffff;
3185         }
3186         vlan_tag |= (mss << TXD_MSS_SHIFT);
3187
3188         txd->addr_hi = ((u64) mapping >> 32);
3189         txd->addr_lo = ((u64) mapping & 0xffffffff);
3190         txd->len_flags = (len << TXD_LEN_SHIFT) | flags;
3191         txd->vlan_tag = vlan_tag << TXD_VLAN_TAG_SHIFT;
3192 }
3193
3194 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
3195 {
3196         u32 base = (u32) mapping & 0xffffffff;
3197
3198         return ((base > 0xffffdcc0) &&
3199                 (base + len + 8 < base));
3200 }
3201
3202 static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
3203 {
3204         struct tg3 *tp = netdev_priv(dev);
3205         dma_addr_t mapping;
3206         unsigned int i;
3207         u32 len, entry, base_flags, mss;
3208         int would_hit_hwbug;
3209         unsigned long flags;
3210
3211         len = skb_headlen(skb);
3212
3213         /* No BH disabling for tx_lock here.  We are running in BH disabled
3214          * context and TX reclaim runs via tp->poll inside of a software
3215          * interrupt.  Rejoice!
3216          *
3217          * Actually, things are not so simple.  If we are to take a hw
3218          * IRQ here, we can deadlock, consider:
3219          *
3220          *       CPU1           CPU2
3221          *   tg3_start_xmit
3222          *   take tp->tx_lock
3223          *                      tg3_timer
3224          *                      take tp->lock
3225          *   tg3_interrupt
3226          *   spin on tp->lock
3227          *                      spin on tp->tx_lock
3228          *
3229          * So we really do need to disable interrupts when taking
3230          * tx_lock here.
3231          */
3232         local_irq_save(flags);
3233         if (!spin_trylock(&tp->tx_lock)) { 
3234                 local_irq_restore(flags);
3235                 return NETDEV_TX_LOCKED; 
3236         } 
3237
3238         /* This is a hard error, log it. */
3239         if (unlikely(TX_BUFFS_AVAIL(tp) <= (skb_shinfo(skb)->nr_frags + 1))) {
3240                 netif_stop_queue(dev);
3241                 spin_unlock_irqrestore(&tp->tx_lock, flags);
3242                 printk(KERN_ERR PFX "%s: BUG! Tx Ring full when queue awake!\n",
3243                        dev->name);
3244                 return NETDEV_TX_BUSY;
3245         }
3246
3247         entry = tp->tx_prod;
3248         base_flags = 0;
3249         if (skb->ip_summed == CHECKSUM_HW)
3250                 base_flags |= TXD_FLAG_TCPUDP_CSUM;
3251 #if TG3_TSO_SUPPORT != 0
3252         mss = 0;
3253         if (skb->len > (tp->dev->mtu + ETH_HLEN) &&
3254             (mss = skb_shinfo(skb)->tso_size) != 0) {
3255                 int tcp_opt_len, ip_tcp_len;
3256
3257                 if (skb_header_cloned(skb) &&
3258                     pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
3259                         dev_kfree_skb(skb);
3260                         goto out_unlock;
3261                 }
3262
3263                 tcp_opt_len = ((skb->h.th->doff - 5) * 4);
3264                 ip_tcp_len = (skb->nh.iph->ihl * 4) + sizeof(struct tcphdr);
3265
3266                 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
3267                                TXD_FLAG_CPU_POST_DMA);
3268
3269                 skb->nh.iph->check = 0;
3270                 skb->nh.iph->tot_len = ntohs(mss + ip_tcp_len + tcp_opt_len);
3271                 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
3272                         skb->h.th->check = 0;
3273                         base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
3274                 }
3275                 else {
3276                         skb->h.th->check =
3277                                 ~csum_tcpudp_magic(skb->nh.iph->saddr,
3278                                                    skb->nh.iph->daddr,
3279                                                    0, IPPROTO_TCP, 0);
3280                 }
3281
3282                 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO) ||
3283                     (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)) {
3284                         if (tcp_opt_len || skb->nh.iph->ihl > 5) {
3285                                 int tsflags;
3286
3287                                 tsflags = ((skb->nh.iph->ihl - 5) +
3288                                            (tcp_opt_len >> 2));
3289                                 mss |= (tsflags << 11);
3290                         }
3291                 } else {
3292                         if (tcp_opt_len || skb->nh.iph->ihl > 5) {
3293                                 int tsflags;
3294
3295                                 tsflags = ((skb->nh.iph->ihl - 5) +
3296                                            (tcp_opt_len >> 2));
3297                                 base_flags |= tsflags << 12;
3298                         }
3299                 }
3300         }
3301 #else
3302         mss = 0;
3303 #endif
3304 #if TG3_VLAN_TAG_USED
3305         if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
3306                 base_flags |= (TXD_FLAG_VLAN |
3307                                (vlan_tx_tag_get(skb) << 16));
3308 #endif
3309
3310         /* Queue skb data, a.k.a. the main skb fragment. */
3311         mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
3312
3313         tp->tx_buffers[entry].skb = skb;
3314         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
3315
3316         would_hit_hwbug = 0;
3317
3318         if (tg3_4g_overflow_test(mapping, len))
3319                 would_hit_hwbug = entry + 1;
3320
3321         tg3_set_txd(tp, entry, mapping, len, base_flags,
3322                     (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
3323
3324         entry = NEXT_TX(entry);
3325
3326         /* Now loop through additional data fragments, and queue them. */
3327         if (skb_shinfo(skb)->nr_frags > 0) {
3328                 unsigned int i, last;
3329
3330                 last = skb_shinfo(skb)->nr_frags - 1;
3331                 for (i = 0; i <= last; i++) {
3332                         skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3333
3334                         len = frag->size;
3335                         mapping = pci_map_page(tp->pdev,
3336                                                frag->page,
3337                                                frag->page_offset,
3338                                                len, PCI_DMA_TODEVICE);
3339
3340                         tp->tx_buffers[entry].skb = NULL;
3341                         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
3342
3343                         if (tg3_4g_overflow_test(mapping, len)) {
3344                                 /* Only one should match. */
3345                                 if (would_hit_hwbug)
3346                                         BUG();
3347                                 would_hit_hwbug = entry + 1;
3348                         }
3349
3350                         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
3351                                 tg3_set_txd(tp, entry, mapping, len,
3352                                             base_flags, (i == last)|(mss << 1));
3353                         else
3354                                 tg3_set_txd(tp, entry, mapping, len,
3355                                             base_flags, (i == last));
3356
3357                         entry = NEXT_TX(entry);
3358                 }
3359         }
3360
3361         if (would_hit_hwbug) {
3362                 u32 last_plus_one = entry;
3363                 u32 start;
3364                 unsigned int len = 0;
3365
3366                 would_hit_hwbug -= 1;
3367                 entry = entry - 1 - skb_shinfo(skb)->nr_frags;
3368                 entry &= (TG3_TX_RING_SIZE - 1);
3369                 start = entry;
3370                 i = 0;
3371                 while (entry != last_plus_one) {
3372                         if (i == 0)
3373                                 len = skb_headlen(skb);
3374                         else
3375                                 len = skb_shinfo(skb)->frags[i-1].size;
3376
3377                         if (entry == would_hit_hwbug)
3378                                 break;
3379
3380                         i++;
3381                         entry = NEXT_TX(entry);
3382
3383                 }
3384
3385                 /* If the workaround fails due to memory/mapping
3386                  * failure, silently drop this packet.
3387                  */
3388                 if (tigon3_4gb_hwbug_workaround(tp, skb,
3389                                                 entry, len,
3390                                                 last_plus_one,
3391                                                 &start, mss))
3392                         goto out_unlock;
3393
3394                 entry = start;
3395         }
3396
3397         /* Packets are ready, update Tx producer idx local and on card. */
3398         tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry);
3399
3400         tp->tx_prod = entry;
3401         if (TX_BUFFS_AVAIL(tp) <= (MAX_SKB_FRAGS + 1))
3402                 netif_stop_queue(dev);
3403
3404 out_unlock:
3405         mmiowb();
3406         spin_unlock_irqrestore(&tp->tx_lock, flags);
3407
3408         dev->trans_start = jiffies;
3409
3410         return NETDEV_TX_OK;
3411 }
3412
3413 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
3414                                int new_mtu)
3415 {
3416         dev->mtu = new_mtu;
3417
3418         if (new_mtu > ETH_DATA_LEN)
3419                 tp->tg3_flags |= TG3_FLAG_JUMBO_ENABLE;
3420         else
3421                 tp->tg3_flags &= ~TG3_FLAG_JUMBO_ENABLE;
3422 }
3423
3424 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
3425 {
3426         struct tg3 *tp = netdev_priv(dev);
3427
3428         if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
3429                 return -EINVAL;
3430
3431         if (!netif_running(dev)) {
3432                 /* We'll just catch it later when the
3433                  * device is up'd.
3434                  */
3435                 tg3_set_mtu(dev, tp, new_mtu);
3436                 return 0;
3437         }
3438
3439         tg3_netif_stop(tp);
3440         spin_lock_irq(&tp->lock);
3441         spin_lock(&tp->tx_lock);
3442
3443         tg3_halt(tp, 1);
3444
3445         tg3_set_mtu(dev, tp, new_mtu);
3446
3447         tg3_init_hw(tp);
3448
3449         tg3_netif_start(tp);
3450
3451         spin_unlock(&tp->tx_lock);
3452         spin_unlock_irq(&tp->lock);
3453
3454         return 0;
3455 }
3456
3457 /* Free up pending packets in all rx/tx rings.
3458  *
3459  * The chip has been shut down and the driver detached from
3460  * the networking, so no interrupts or new tx packets will
3461  * end up in the driver.  tp->{tx,}lock is not held and we are not
3462  * in an interrupt context and thus may sleep.
3463  */
3464 static void tg3_free_rings(struct tg3 *tp)
3465 {
3466         struct ring_info *rxp;
3467         int i;
3468
3469         for (i = 0; i < TG3_RX_RING_SIZE; i++) {
3470                 rxp = &tp->rx_std_buffers[i];
3471
3472                 if (rxp->skb == NULL)
3473                         continue;
3474                 pci_unmap_single(tp->pdev,
3475                                  pci_unmap_addr(rxp, mapping),
3476                                  RX_PKT_BUF_SZ - tp->rx_offset,
3477                                  PCI_DMA_FROMDEVICE);
3478                 dev_kfree_skb_any(rxp->skb);
3479                 rxp->skb = NULL;
3480         }
3481
3482         for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
3483                 rxp = &tp->rx_jumbo_buffers[i];
3484
3485                 if (rxp->skb == NULL)
3486                         continue;
3487                 pci_unmap_single(tp->pdev,
3488                                  pci_unmap_addr(rxp, mapping),
3489                                  RX_JUMBO_PKT_BUF_SZ - tp->rx_offset,
3490                                  PCI_DMA_FROMDEVICE);
3491                 dev_kfree_skb_any(rxp->skb);
3492                 rxp->skb = NULL;
3493         }
3494
3495         for (i = 0; i < TG3_TX_RING_SIZE; ) {
3496                 struct tx_ring_info *txp;
3497                 struct sk_buff *skb;
3498                 int j;
3499
3500                 txp = &tp->tx_buffers[i];
3501                 skb = txp->skb;
3502
3503                 if (skb == NULL) {
3504                         i++;
3505                         continue;
3506                 }
3507
3508                 pci_unmap_single(tp->pdev,
3509                                  pci_unmap_addr(txp, mapping),
3510                                  skb_headlen(skb),
3511                                  PCI_DMA_TODEVICE);
3512                 txp->skb = NULL;
3513
3514                 i++;
3515
3516                 for (j = 0; j < skb_shinfo(skb)->nr_frags; j++) {
3517                         txp = &tp->tx_buffers[i & (TG3_TX_RING_SIZE - 1)];
3518                         pci_unmap_page(tp->pdev,
3519                                        pci_unmap_addr(txp, mapping),
3520                                        skb_shinfo(skb)->frags[j].size,
3521                                        PCI_DMA_TODEVICE);
3522                         i++;
3523                 }
3524
3525                 dev_kfree_skb_any(skb);
3526         }
3527 }
3528
3529 /* Initialize tx/rx rings for packet processing.
3530  *
3531  * The chip has been shut down and the driver detached from
3532  * the networking, so no interrupts or new tx packets will
3533  * end up in the driver.  tp->{tx,}lock are held and thus
3534  * we may not sleep.
3535  */
3536 static void tg3_init_rings(struct tg3 *tp)
3537 {
3538         u32 i;
3539
3540         /* Free up all the SKBs. */
3541         tg3_free_rings(tp);
3542
3543         /* Zero out all descriptors. */
3544         memset(tp->rx_std, 0, TG3_RX_RING_BYTES);
3545         memset(tp->rx_jumbo, 0, TG3_RX_JUMBO_RING_BYTES);
3546         memset(tp->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
3547         memset(tp->tx_ring, 0, TG3_TX_RING_BYTES);
3548
3549         /* Initialize invariants of the rings, we only set this
3550          * stuff once.  This works because the card does not
3551          * write into the rx buffer posting rings.
3552          */
3553         for (i = 0; i < TG3_RX_RING_SIZE; i++) {
3554                 struct tg3_rx_buffer_desc *rxd;
3555
3556                 rxd = &tp->rx_std[i];
3557                 rxd->idx_len = (RX_PKT_BUF_SZ - tp->rx_offset - 64)
3558                         << RXD_LEN_SHIFT;
3559                 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
3560                 rxd->opaque = (RXD_OPAQUE_RING_STD |
3561                                (i << RXD_OPAQUE_INDEX_SHIFT));
3562         }
3563
3564         if (tp->tg3_flags & TG3_FLAG_JUMBO_ENABLE) {
3565                 for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
3566                         struct tg3_rx_buffer_desc *rxd;
3567
3568                         rxd = &tp->rx_jumbo[i];
3569                         rxd->idx_len = (RX_JUMBO_PKT_BUF_SZ - tp->rx_offset - 64)
3570                                 << RXD_LEN_SHIFT;
3571                         rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
3572                                 RXD_FLAG_JUMBO;
3573                         rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
3574                                (i << RXD_OPAQUE_INDEX_SHIFT));
3575                 }
3576         }
3577
3578         /* Now allocate fresh SKBs for each rx ring. */
3579         for (i = 0; i < tp->rx_pending; i++) {
3580                 if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_STD,
3581                                      -1, i) < 0)
3582                         break;
3583         }
3584
3585         if (tp->tg3_flags & TG3_FLAG_JUMBO_ENABLE) {
3586                 for (i = 0; i < tp->rx_jumbo_pending; i++) {
3587                         if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_JUMBO,
3588                                              -1, i) < 0)
3589                                 break;
3590                 }
3591         }
3592 }
3593
3594 /*
3595  * Must not be invoked with interrupt sources disabled and
3596  * the hardware shutdown down.
3597  */
3598 static void tg3_free_consistent(struct tg3 *tp)
3599 {
3600         if (tp->rx_std_buffers) {
3601                 kfree(tp->rx_std_buffers);
3602                 tp->rx_std_buffers = NULL;
3603         }
3604         if (tp->rx_std) {
3605                 pci_free_consistent(tp->pdev, TG3_RX_RING_BYTES,
3606                                     tp->rx_std, tp->rx_std_mapping);
3607                 tp->rx_std = NULL;
3608         }
3609         if (tp->rx_jumbo) {
3610                 pci_free_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
3611                                     tp->rx_jumbo, tp->rx_jumbo_mapping);
3612                 tp->rx_jumbo = NULL;
3613         }
3614         if (tp->rx_rcb) {
3615                 pci_free_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
3616                                     tp->rx_rcb, tp->rx_rcb_mapping);
3617                 tp->rx_rcb = NULL;
3618         }
3619         if (tp->tx_ring) {
3620                 pci_free_consistent(tp->pdev, TG3_TX_RING_BYTES,
3621                         tp->tx_ring, tp->tx_desc_mapping);
3622                 tp->tx_ring = NULL;
3623         }
3624         if (tp->hw_status) {
3625                 pci_free_consistent(tp->pdev, TG3_HW_STATUS_SIZE,
3626                                     tp->hw_status, tp->status_mapping);
3627                 tp->hw_status = NULL;
3628         }
3629         if (tp->hw_stats) {
3630                 pci_free_consistent(tp->pdev, sizeof(struct tg3_hw_stats),
3631                                     tp->hw_stats, tp->stats_mapping);
3632                 tp->hw_stats = NULL;
3633         }
3634 }
3635
3636 /*
3637  * Must not be invoked with interrupt sources disabled and
3638  * the hardware shutdown down.  Can sleep.
3639  */
3640 static int tg3_alloc_consistent(struct tg3 *tp)
3641 {
3642         tp->rx_std_buffers = kmalloc((sizeof(struct ring_info) *
3643                                       (TG3_RX_RING_SIZE +
3644                                        TG3_RX_JUMBO_RING_SIZE)) +
3645                                      (sizeof(struct tx_ring_info) *
3646                                       TG3_TX_RING_SIZE),
3647                                      GFP_KERNEL);
3648         if (!tp->rx_std_buffers)
3649                 return -ENOMEM;
3650
3651         memset(tp->rx_std_buffers, 0,
3652                (sizeof(struct ring_info) *
3653                 (TG3_RX_RING_SIZE +
3654                  TG3_RX_JUMBO_RING_SIZE)) +
3655                (sizeof(struct tx_ring_info) *
3656                 TG3_TX_RING_SIZE));
3657
3658         tp->rx_jumbo_buffers = &tp->rx_std_buffers[TG3_RX_RING_SIZE];
3659         tp->tx_buffers = (struct tx_ring_info *)
3660                 &tp->rx_jumbo_buffers[TG3_RX_JUMBO_RING_SIZE];
3661
3662         tp->rx_std = pci_alloc_consistent(tp->pdev, TG3_RX_RING_BYTES,
3663                                           &tp->rx_std_mapping);
3664         if (!tp->rx_std)
3665                 goto err_out;
3666
3667         tp->rx_jumbo = pci_alloc_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
3668                                             &tp->rx_jumbo_mapping);
3669
3670         if (!tp->rx_jumbo)
3671                 goto err_out;
3672
3673         tp->rx_rcb = pci_alloc_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
3674                                           &tp->rx_rcb_mapping);
3675         if (!tp->rx_rcb)
3676                 goto err_out;
3677
3678         tp->tx_ring = pci_alloc_consistent(tp->pdev, TG3_TX_RING_BYTES,
3679                                            &tp->tx_desc_mapping);
3680         if (!tp->tx_ring)
3681                 goto err_out;
3682
3683         tp->hw_status = pci_alloc_consistent(tp->pdev,
3684                                              TG3_HW_STATUS_SIZE,
3685                                              &tp->status_mapping);
3686         if (!tp->hw_status)
3687                 goto err_out;
3688
3689         tp->hw_stats = pci_alloc_consistent(tp->pdev,
3690                                             sizeof(struct tg3_hw_stats),
3691                                             &tp->stats_mapping);
3692         if (!tp->hw_stats)
3693                 goto err_out;
3694
3695         memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
3696         memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
3697
3698         return 0;
3699
3700 err_out:
3701         tg3_free_consistent(tp);
3702         return -ENOMEM;
3703 }
3704
3705 #define MAX_WAIT_CNT 1000
3706
3707 /* To stop a block, clear the enable bit and poll till it
3708  * clears.  tp->lock is held.
3709  */
3710 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, int silent)
3711 {
3712         unsigned int i;
3713         u32 val;
3714
3715         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
3716                 switch (ofs) {
3717                 case RCVLSC_MODE:
3718                 case DMAC_MODE:
3719                 case MBFREE_MODE:
3720                 case BUFMGR_MODE:
3721                 case MEMARB_MODE:
3722                         /* We can't enable/disable these bits of the
3723                          * 5705/5750, just say success.
3724                          */
3725                         return 0;
3726
3727                 default:
3728                         break;
3729                 };
3730         }
3731
3732         val = tr32(ofs);
3733         val &= ~enable_bit;
3734         tw32_f(ofs, val);
3735
3736         for (i = 0; i < MAX_WAIT_CNT; i++) {
3737                 udelay(100);
3738                 val = tr32(ofs);
3739                 if ((val & enable_bit) == 0)
3740                         break;
3741         }
3742
3743         if (i == MAX_WAIT_CNT && !silent) {
3744                 printk(KERN_ERR PFX "tg3_stop_block timed out, "
3745                        "ofs=%lx enable_bit=%x\n",
3746                        ofs, enable_bit);
3747                 return -ENODEV;
3748         }
3749
3750         return 0;
3751 }
3752
3753 /* tp->lock is held. */
3754 static int tg3_abort_hw(struct tg3 *tp, int silent)
3755 {
3756         int i, err;
3757
3758         tg3_disable_ints(tp);
3759
3760         tp->rx_mode &= ~RX_MODE_ENABLE;
3761         tw32_f(MAC_RX_MODE, tp->rx_mode);
3762         udelay(10);
3763
3764         err  = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
3765         err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
3766         err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
3767         err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
3768         err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
3769         err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
3770
3771         err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
3772         err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
3773         err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
3774         err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
3775         err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
3776         err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
3777         err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
3778
3779         tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
3780         tw32_f(MAC_MODE, tp->mac_mode);
3781         udelay(40);
3782
3783         tp->tx_mode &= ~TX_MODE_ENABLE;
3784         tw32_f(MAC_TX_MODE, tp->tx_mode);
3785
3786         for (i = 0; i < MAX_WAIT_CNT; i++) {
3787                 udelay(100);
3788                 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
3789                         break;
3790         }
3791         if (i >= MAX_WAIT_CNT) {
3792                 printk(KERN_ERR PFX "tg3_abort_hw timed out for %s, "
3793                        "TX_MODE_ENABLE will not clear MAC_TX_MODE=%08x\n",
3794                        tp->dev->name, tr32(MAC_TX_MODE));
3795                 err |= -ENODEV;
3796         }
3797
3798         err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
3799         err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
3800         err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
3801
3802         tw32(FTQ_RESET, 0xffffffff);
3803         tw32(FTQ_RESET, 0x00000000);
3804
3805         err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
3806         err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
3807
3808         if (tp->hw_status)
3809                 memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
3810         if (tp->hw_stats)
3811                 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
3812
3813         return err;
3814 }
3815
3816 /* tp->lock is held. */
3817 static int tg3_nvram_lock(struct tg3 *tp)
3818 {
3819         if (tp->tg3_flags & TG3_FLAG_NVRAM) {
3820                 int i;
3821
3822                 tw32(NVRAM_SWARB, SWARB_REQ_SET1);
3823                 for (i = 0; i < 8000; i++) {
3824                         if (tr32(NVRAM_SWARB) & SWARB_GNT1)
3825                                 break;
3826                         udelay(20);
3827                 }
3828                 if (i == 8000)
3829                         return -ENODEV;
3830         }
3831         return 0;
3832 }
3833
3834 /* tp->lock is held. */
3835 static void tg3_nvram_unlock(struct tg3 *tp)
3836 {
3837         if (tp->tg3_flags & TG3_FLAG_NVRAM)
3838                 tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
3839 }
3840
3841 /* tp->lock is held. */
3842 static void tg3_enable_nvram_access(struct tg3 *tp)
3843 {
3844         if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
3845             !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM)) {
3846                 u32 nvaccess = tr32(NVRAM_ACCESS);
3847
3848                 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
3849         }
3850 }
3851
3852 /* tp->lock is held. */
3853 static void tg3_disable_nvram_access(struct tg3 *tp)
3854 {
3855         if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
3856             !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM)) {
3857                 u32 nvaccess = tr32(NVRAM_ACCESS);
3858
3859                 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
3860         }
3861 }
3862
3863 /* tp->lock is held. */
3864 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
3865 {
3866         if (!(tp->tg3_flags2 & TG3_FLG2_SUN_570X))
3867                 tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
3868                               NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
3869
3870         if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
3871                 switch (kind) {
3872                 case RESET_KIND_INIT:
3873                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
3874                                       DRV_STATE_START);
3875                         break;
3876
3877                 case RESET_KIND_SHUTDOWN:
3878                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
3879                                       DRV_STATE_UNLOAD);
3880                         break;
3881
3882                 case RESET_KIND_SUSPEND:
3883                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
3884                                       DRV_STATE_SUSPEND);
3885                         break;
3886
3887                 default:
3888                         break;
3889                 };
3890         }
3891 }
3892
3893 /* tp->lock is held. */
3894 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
3895 {
3896         if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
3897                 switch (kind) {
3898                 case RESET_KIND_INIT:
3899                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
3900                                       DRV_STATE_START_DONE);
3901                         break;
3902
3903                 case RESET_KIND_SHUTDOWN:
3904                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
3905                                       DRV_STATE_UNLOAD_DONE);
3906                         break;
3907
3908                 default:
3909                         break;
3910                 };
3911         }
3912 }
3913
3914 /* tp->lock is held. */
3915 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
3916 {
3917         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
3918                 switch (kind) {
3919                 case RESET_KIND_INIT:
3920                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
3921                                       DRV_STATE_START);
3922                         break;
3923
3924                 case RESET_KIND_SHUTDOWN:
3925                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
3926                                       DRV_STATE_UNLOAD);
3927                         break;
3928
3929                 case RESET_KIND_SUSPEND:
3930                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
3931                                       DRV_STATE_SUSPEND);
3932                         break;
3933
3934                 default:
3935                         break;
3936                 };
3937         }
3938 }
3939
3940 static void tg3_stop_fw(struct tg3 *);
3941
3942 /* tp->lock is held. */
3943 static int tg3_chip_reset(struct tg3 *tp)
3944 {
3945         u32 val;
3946         u32 flags_save;
3947         int i;
3948
3949         if (!(tp->tg3_flags2 & TG3_FLG2_SUN_570X))
3950                 tg3_nvram_lock(tp);
3951
3952         /*
3953          * We must avoid the readl() that normally takes place.
3954          * It locks machines, causes machine checks, and other
3955          * fun things.  So, temporarily disable the 5701
3956          * hardware workaround, while we do the reset.
3957          */
3958         flags_save = tp->tg3_flags;
3959         tp->tg3_flags &= ~TG3_FLAG_5701_REG_WRITE_BUG;
3960
3961         /* do the reset */
3962         val = GRC_MISC_CFG_CORECLK_RESET;
3963
3964         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
3965                 if (tr32(0x7e2c) == 0x60) {
3966                         tw32(0x7e2c, 0x20);
3967                 }
3968                 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
3969                         tw32(GRC_MISC_CFG, (1 << 29));
3970                         val |= (1 << 29);
3971                 }
3972         }
3973
3974         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
3975                 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
3976         tw32(GRC_MISC_CFG, val);
3977
3978         /* restore 5701 hardware bug workaround flag */
3979         tp->tg3_flags = flags_save;
3980
3981         /* Unfortunately, we have to delay before the PCI read back.
3982          * Some 575X chips even will not respond to a PCI cfg access
3983          * when the reset command is given to the chip.
3984          *
3985          * How do these hardware designers expect things to work
3986          * properly if the PCI write is posted for a long period
3987          * of time?  It is always necessary to have some method by
3988          * which a register read back can occur to push the write
3989          * out which does the reset.
3990          *
3991          * For most tg3 variants the trick below was working.
3992          * Ho hum...
3993          */
3994         udelay(120);
3995
3996         /* Flush PCI posted writes.  The normal MMIO registers
3997          * are inaccessible at this time so this is the only
3998          * way to make this reliably (actually, this is no longer
3999          * the case, see above).  I tried to use indirect
4000          * register read/write but this upset some 5701 variants.
4001          */
4002         pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
4003
4004         udelay(120);
4005
4006         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
4007                 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) {
4008                         int i;
4009                         u32 cfg_val;
4010
4011                         /* Wait for link training to complete.  */
4012                         for (i = 0; i < 5000; i++)
4013                                 udelay(100);
4014
4015                         pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
4016                         pci_write_config_dword(tp->pdev, 0xc4,
4017                                                cfg_val | (1 << 15));
4018                 }
4019                 /* Set PCIE max payload size and clear error status.  */
4020                 pci_write_config_dword(tp->pdev, 0xd8, 0xf5000);
4021         }
4022
4023         /* Re-enable indirect register accesses. */
4024         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
4025                                tp->misc_host_ctrl);
4026
4027         /* Set MAX PCI retry to zero. */
4028         val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
4029         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
4030             (tp->tg3_flags & TG3_FLAG_PCIX_MODE))
4031                 val |= PCISTATE_RETRY_SAME_DMA;
4032         pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
4033
4034         pci_restore_state(tp->pdev);
4035
4036         /* Make sure PCI-X relaxed ordering bit is clear. */
4037         pci_read_config_dword(tp->pdev, TG3PCI_X_CAPS, &val);
4038         val &= ~PCIX_CAPS_RELAXED_ORDERING;
4039         pci_write_config_dword(tp->pdev, TG3PCI_X_CAPS, val);
4040
4041         tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
4042
4043         if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A3) {
4044                 tg3_stop_fw(tp);
4045                 tw32(0x5000, 0x400);
4046         }
4047
4048         tw32(GRC_MODE, tp->grc_mode);
4049
4050         if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0) {
4051                 u32 val = tr32(0xc4);
4052
4053                 tw32(0xc4, val | (1 << 15));
4054         }
4055
4056         if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
4057             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
4058                 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
4059                 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0)
4060                         tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
4061                 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
4062         }
4063
4064         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
4065                 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
4066                 tw32_f(MAC_MODE, tp->mac_mode);
4067         } else
4068                 tw32_f(MAC_MODE, 0);
4069         udelay(40);
4070
4071         if (!(tp->tg3_flags2 & TG3_FLG2_SUN_570X)) {
4072                 /* Wait for firmware initialization to complete. */
4073                 for (i = 0; i < 100000; i++) {
4074                         tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
4075                         if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
4076                                 break;
4077                         udelay(10);
4078                 }
4079                 if (i >= 100000) {
4080                         printk(KERN_ERR PFX "tg3_reset_hw timed out for %s, "
4081                                "firmware will not restart magic=%08x\n",
4082                                tp->dev->name, val);
4083                         return -ENODEV;
4084                 }
4085         }
4086
4087         if ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
4088             tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
4089                 u32 val = tr32(0x7c00);
4090
4091                 tw32(0x7c00, val | (1 << 25));
4092         }
4093
4094         /* Reprobe ASF enable state.  */
4095         tp->tg3_flags &= ~TG3_FLAG_ENABLE_ASF;
4096         tp->tg3_flags2 &= ~TG3_FLG2_ASF_NEW_HANDSHAKE;
4097         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
4098         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
4099                 u32 nic_cfg;
4100
4101                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
4102                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
4103                         tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
4104                         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
4105                                 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
4106                 }
4107         }
4108
4109         return 0;
4110 }
4111
4112 /* tp->lock is held. */
4113 static void tg3_stop_fw(struct tg3 *tp)
4114 {
4115         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
4116                 u32 val;
4117                 int i;
4118
4119                 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
4120                 val = tr32(GRC_RX_CPU_EVENT);
4121                 val |= (1 << 14);
4122                 tw32(GRC_RX_CPU_EVENT, val);
4123
4124                 /* Wait for RX cpu to ACK the event.  */
4125                 for (i = 0; i < 100; i++) {
4126                         if (!(tr32(GRC_RX_CPU_EVENT) & (1 << 14)))
4127                                 break;
4128                         udelay(1);
4129                 }
4130         }
4131 }
4132
4133 /* tp->lock is held. */
4134 static int tg3_halt(struct tg3 *tp, int silent)
4135 {
4136         int err;
4137
4138         tg3_stop_fw(tp);
4139
4140         tg3_write_sig_pre_reset(tp, RESET_KIND_SHUTDOWN);
4141
4142         tg3_abort_hw(tp, silent);
4143         err = tg3_chip_reset(tp);
4144
4145         tg3_write_sig_legacy(tp, RESET_KIND_SHUTDOWN);
4146         tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
4147
4148         if (err)
4149                 return err;
4150
4151         return 0;
4152 }
4153
4154 #define TG3_FW_RELEASE_MAJOR    0x0
4155 #define TG3_FW_RELASE_MINOR     0x0
4156 #define TG3_FW_RELEASE_FIX      0x0
4157 #define TG3_FW_START_ADDR       0x08000000
4158 #define TG3_FW_TEXT_ADDR        0x08000000
4159 #define TG3_FW_TEXT_LEN         0x9c0
4160 #define TG3_FW_RODATA_ADDR      0x080009c0
4161 #define TG3_FW_RODATA_LEN       0x60
4162 #define TG3_FW_DATA_ADDR        0x08000a40
4163 #define TG3_FW_DATA_LEN         0x20
4164 #define TG3_FW_SBSS_ADDR        0x08000a60
4165 #define TG3_FW_SBSS_LEN         0xc
4166 #define TG3_FW_BSS_ADDR         0x08000a70
4167 #define TG3_FW_BSS_LEN          0x10
4168
4169 static u32 tg3FwText[(TG3_FW_TEXT_LEN / sizeof(u32)) + 1] = {
4170         0x00000000, 0x10000003, 0x00000000, 0x0000000d, 0x0000000d, 0x3c1d0800,
4171         0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100000, 0x0e000018, 0x00000000,
4172         0x0000000d, 0x3c1d0800, 0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100034,
4173         0x0e00021c, 0x00000000, 0x0000000d, 0x00000000, 0x00000000, 0x00000000,
4174         0x27bdffe0, 0x3c1cc000, 0xafbf0018, 0xaf80680c, 0x0e00004c, 0x241b2105,
4175         0x97850000, 0x97870002, 0x9782002c, 0x9783002e, 0x3c040800, 0x248409c0,
4176         0xafa00014, 0x00021400, 0x00621825, 0x00052c00, 0xafa30010, 0x8f860010,
4177         0x00e52825, 0x0e000060, 0x24070102, 0x3c02ac00, 0x34420100, 0x3c03ac01,
4178         0x34630100, 0xaf820490, 0x3c02ffff, 0xaf820494, 0xaf830498, 0xaf82049c,
4179         0x24020001, 0xaf825ce0, 0x0e00003f, 0xaf825d00, 0x0e000140, 0x00000000,
4180         0x8fbf0018, 0x03e00008, 0x27bd0020, 0x2402ffff, 0xaf825404, 0x8f835400,
4181         0x34630400, 0xaf835400, 0xaf825404, 0x3c020800, 0x24420034, 0xaf82541c,
4182         0x03e00008, 0xaf805400, 0x00000000, 0x00000000, 0x3c020800, 0x34423000,
4183         0x3c030800, 0x34633000, 0x3c040800, 0x348437ff, 0x3c010800, 0xac220a64,
4184         0x24020040, 0x3c010800, 0xac220a68, 0x3c010800, 0xac200a60, 0xac600000,
4185         0x24630004, 0x0083102b, 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000,
4186         0x00804821, 0x8faa0010, 0x3c020800, 0x8c420a60, 0x3c040800, 0x8c840a68,
4187         0x8fab0014, 0x24430001, 0x0044102b, 0x3c010800, 0xac230a60, 0x14400003,
4188         0x00004021, 0x3c010800, 0xac200a60, 0x3c020800, 0x8c420a60, 0x3c030800,
4189         0x8c630a64, 0x91240000, 0x00021140, 0x00431021, 0x00481021, 0x25080001,
4190         0xa0440000, 0x29020008, 0x1440fff4, 0x25290001, 0x3c020800, 0x8c420a60,
4191         0x3c030800, 0x8c630a64, 0x8f84680c, 0x00021140, 0x00431021, 0xac440008,
4192         0xac45000c, 0xac460010, 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c,
4193         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4194         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4195         0, 0, 0, 0, 0, 0,
4196         0x02000008, 0x00000000, 0x0a0001e3, 0x3c0a0001, 0x0a0001e3, 0x3c0a0002,
4197         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4198         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4199         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4200         0x0a0001e3, 0x3c0a0007, 0x0a0001e3, 0x3c0a0008, 0x0a0001e3, 0x3c0a0009,
4201         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a000b,
4202         0x0a0001e3, 0x3c0a000c, 0x0a0001e3, 0x3c0a000d, 0x0a0001e3, 0x00000000,
4203         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a000e, 0x0a0001e3, 0x00000000,
4204         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4205         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4206         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a0013, 0x0a0001e3, 0x3c0a0014,
4207         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4208         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4209         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4210         0x27bdffe0, 0x00001821, 0x00001021, 0xafbf0018, 0xafb10014, 0xafb00010,
4211         0x3c010800, 0x00220821, 0xac200a70, 0x3c010800, 0x00220821, 0xac200a74,
4212         0x3c010800, 0x00220821, 0xac200a78, 0x24630001, 0x1860fff5, 0x2442000c,
4213         0x24110001, 0x8f906810, 0x32020004, 0x14400005, 0x24040001, 0x3c020800,
4214         0x8c420a78, 0x18400003, 0x00002021, 0x0e000182, 0x00000000, 0x32020001,
4215         0x10400003, 0x00000000, 0x0e000169, 0x00000000, 0x0a000153, 0xaf915028,
4216         0x8fbf0018, 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0020, 0x3c050800,
4217         0x8ca50a70, 0x3c060800, 0x8cc60a80, 0x3c070800, 0x8ce70a78, 0x27bdffe0,
4218         0x3c040800, 0x248409d0, 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014,
4219         0x0e00017b, 0x00002021, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x24020001,
4220         0x8f836810, 0x00821004, 0x00021027, 0x00621824, 0x03e00008, 0xaf836810,
4221         0x27bdffd8, 0xafbf0024, 0x1080002e, 0xafb00020, 0x8f825cec, 0xafa20018,
4222         0x8f825cec, 0x3c100800, 0x26100a78, 0xafa2001c, 0x34028000, 0xaf825cec,
4223         0x8e020000, 0x18400016, 0x00000000, 0x3c020800, 0x94420a74, 0x8fa3001c,
4224         0x000221c0, 0xac830004, 0x8fa2001c, 0x3c010800, 0x0e000201, 0xac220a74,
4225         0x10400005, 0x00000000, 0x8e020000, 0x24420001, 0x0a0001df, 0xae020000,
4226         0x3c020800, 0x8c420a70, 0x00021c02, 0x000321c0, 0x0a0001c5, 0xafa2001c,
4227         0x0e000201, 0x00000000, 0x1040001f, 0x00000000, 0x8e020000, 0x8fa3001c,
4228         0x24420001, 0x3c010800, 0xac230a70, 0x3c010800, 0xac230a74, 0x0a0001df,
4229         0xae020000, 0x3c100800, 0x26100a78, 0x8e020000, 0x18400028, 0x00000000,
4230         0x0e000201, 0x00000000, 0x14400024, 0x00000000, 0x8e020000, 0x3c030800,
4231         0x8c630a70, 0x2442ffff, 0xafa3001c, 0x18400006, 0xae020000, 0x00031402,
4232         0x000221c0, 0x8c820004, 0x3c010800, 0xac220a70, 0x97a2001e, 0x2442ff00,
4233         0x2c420300, 0x1440000b, 0x24024000, 0x3c040800, 0x248409dc, 0xafa00010,
4234         0xafa00014, 0x8fa6001c, 0x24050008, 0x0e000060, 0x00003821, 0x0a0001df,
4235         0x00000000, 0xaf825cf8, 0x3c020800, 0x8c420a40, 0x8fa3001c, 0x24420001,
4236         0xaf835cf8, 0x3c010800, 0xac220a40, 0x8fbf0024, 0x8fb00020, 0x03e00008,
4237         0x27bd0028, 0x27bdffe0, 0x3c040800, 0x248409e8, 0x00002821, 0x00003021,
4238         0x00003821, 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014, 0x8fbf0018,
4239         0x03e00008, 0x27bd0020, 0x8f82680c, 0x8f85680c, 0x00021827, 0x0003182b,
4240         0x00031823, 0x00431024, 0x00441021, 0x00a2282b, 0x10a00006, 0x00000000,
4241         0x00401821, 0x8f82680c, 0x0043102b, 0x1440fffd, 0x00000000, 0x03e00008,
4242         0x00000000, 0x3c040800, 0x8c840000, 0x3c030800, 0x8c630a40, 0x0064102b,
4243         0x54400002, 0x00831023, 0x00641023, 0x2c420008, 0x03e00008, 0x38420001,
4244         0x27bdffe0, 0x00802821, 0x3c040800, 0x24840a00, 0x00003021, 0x00003821,
4245         0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014, 0x0a000216, 0x00000000,
4246         0x8fbf0018, 0x03e00008, 0x27bd0020, 0x00000000, 0x27bdffe0, 0x3c1cc000,
4247         0xafbf0018, 0x0e00004c, 0xaf80680c, 0x3c040800, 0x24840a10, 0x03802821,
4248         0x00003021, 0x00003821, 0xafa00010, 0x0e000060, 0xafa00014, 0x2402ffff,
4249         0xaf825404, 0x3c0200aa, 0x0e000234, 0xaf825434, 0x8fbf0018, 0x03e00008,
4250         0x27bd0020, 0x00000000, 0x00000000, 0x00000000, 0x27bdffe8, 0xafb00010,
4251         0x24100001, 0xafbf0014, 0x3c01c003, 0xac200000, 0x8f826810, 0x30422000,
4252         0x10400003, 0x00000000, 0x0e000246, 0x00000000, 0x0a00023a, 0xaf905428,
4253         0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x27bdfff8, 0x8f845d0c,
4254         0x3c0200ff, 0x3c030800, 0x8c630a50, 0x3442fff8, 0x00821024, 0x1043001e,
4255         0x3c0500ff, 0x34a5fff8, 0x3c06c003, 0x3c074000, 0x00851824, 0x8c620010,
4256         0x3c010800, 0xac230a50, 0x30420008, 0x10400005, 0x00871025, 0x8cc20000,
4257         0x24420001, 0xacc20000, 0x00871025, 0xaf825d0c, 0x8fa20000, 0x24420001,
4258         0xafa20000, 0x8fa20000, 0x8fa20000, 0x24420001, 0xafa20000, 0x8fa20000,
4259         0x8f845d0c, 0x3c030800, 0x8c630a50, 0x00851024, 0x1443ffe8, 0x00851824,
4260         0x27bd0008, 0x03e00008, 0x00000000, 0x00000000, 0x00000000
4261 };
4262
4263 static u32 tg3FwRodata[(TG3_FW_RODATA_LEN / sizeof(u32)) + 1] = {
4264         0x35373031, 0x726c7341, 0x00000000, 0x00000000, 0x53774576, 0x656e7430,
4265         0x00000000, 0x726c7045, 0x76656e74, 0x31000000, 0x556e6b6e, 0x45766e74,
4266         0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x66617461, 0x6c457272,
4267         0x00000000, 0x00000000, 0x4d61696e, 0x43707542, 0x00000000, 0x00000000,
4268         0x00000000
4269 };
4270
4271 #if 0 /* All zeros, don't eat up space with it. */
4272 u32 tg3FwData[(TG3_FW_DATA_LEN / sizeof(u32)) + 1] = {
4273         0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
4274         0x00000000, 0x00000000, 0x00000000, 0x00000000
4275 };
4276 #endif
4277
4278 #define RX_CPU_SCRATCH_BASE     0x30000
4279 #define RX_CPU_SCRATCH_SIZE     0x04000
4280 #define TX_CPU_SCRATCH_BASE     0x34000
4281 #define TX_CPU_SCRATCH_SIZE     0x04000
4282
4283 /* tp->lock is held. */
4284 static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
4285 {
4286         int i;
4287
4288         if (offset == TX_CPU_BASE &&
4289             (tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
4290                 BUG();
4291
4292         if (offset == RX_CPU_BASE) {
4293                 for (i = 0; i < 10000; i++) {
4294                         tw32(offset + CPU_STATE, 0xffffffff);
4295                         tw32(offset + CPU_MODE,  CPU_MODE_HALT);
4296                         if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
4297                                 break;
4298                 }
4299
4300                 tw32(offset + CPU_STATE, 0xffffffff);
4301                 tw32_f(offset + CPU_MODE,  CPU_MODE_HALT);
4302                 udelay(10);
4303         } else {
4304                 for (i = 0; i < 10000; i++) {
4305                         tw32(offset + CPU_STATE, 0xffffffff);
4306                         tw32(offset + CPU_MODE,  CPU_MODE_HALT);
4307                         if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
4308                                 break;
4309                 }
4310         }
4311
4312         if (i >= 10000) {
4313                 printk(KERN_ERR PFX "tg3_reset_cpu timed out for %s, "
4314                        "and %s CPU\n",
4315                        tp->dev->name,
4316                        (offset == RX_CPU_BASE ? "RX" : "TX"));
4317                 return -ENODEV;
4318         }
4319         return 0;
4320 }
4321
4322 struct fw_info {
4323         unsigned int text_base;
4324         unsigned int text_len;
4325         u32 *text_data;
4326         unsigned int rodata_base;
4327         unsigned int rodata_len;
4328         u32 *rodata_data;
4329         unsigned int data_base;
4330         unsigned int data_len;
4331         u32 *data_data;
4332 };
4333
4334 /* tp->lock is held. */
4335 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base, u32 cpu_scratch_base,
4336                                  int cpu_scratch_size, struct fw_info *info)
4337 {
4338         int err, i;
4339         u32 orig_tg3_flags = tp->tg3_flags;
4340         void (*write_op)(struct tg3 *, u32, u32);
4341
4342         if (cpu_base == TX_CPU_BASE &&
4343             (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
4344                 printk(KERN_ERR PFX "tg3_load_firmware_cpu: Trying to load "
4345                        "TX cpu firmware on %s which is 5705.\n",
4346                        tp->dev->name);
4347                 return -EINVAL;
4348         }
4349
4350         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
4351                 write_op = tg3_write_mem;
4352         else
4353                 write_op = tg3_write_indirect_reg32;
4354
4355         /* Force use of PCI config space for indirect register
4356          * write calls.
4357          */
4358         tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
4359
4360         err = tg3_halt_cpu(tp, cpu_base);
4361         if (err)
4362                 goto out;
4363
4364         for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
4365                 write_op(tp, cpu_scratch_base + i, 0);
4366         tw32(cpu_base + CPU_STATE, 0xffffffff);
4367         tw32(cpu_base + CPU_MODE, tr32(cpu_base+CPU_MODE)|CPU_MODE_HALT);
4368         for (i = 0; i < (info->text_len / sizeof(u32)); i++)
4369                 write_op(tp, (cpu_scratch_base +
4370                               (info->text_base & 0xffff) +
4371                               (i * sizeof(u32))),
4372                          (info->text_data ?
4373                           info->text_data[i] : 0));
4374         for (i = 0; i < (info->rodata_len / sizeof(u32)); i++)
4375                 write_op(tp, (cpu_scratch_base +
4376                               (info->rodata_base & 0xffff) +
4377                               (i * sizeof(u32))),
4378                          (info->rodata_data ?
4379                           info->rodata_data[i] : 0));
4380         for (i = 0; i < (info->data_len / sizeof(u32)); i++)
4381                 write_op(tp, (cpu_scratch_base +
4382                               (info->data_base & 0xffff) +
4383                               (i * sizeof(u32))),
4384                          (info->data_data ?
4385                           info->data_data[i] : 0));
4386
4387         err = 0;
4388
4389 out:
4390         tp->tg3_flags = orig_tg3_flags;
4391         return err;
4392 }
4393
4394 /* tp->lock is held. */
4395 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
4396 {
4397         struct fw_info info;
4398         int err, i;
4399
4400         info.text_base = TG3_FW_TEXT_ADDR;
4401         info.text_len = TG3_FW_TEXT_LEN;
4402         info.text_data = &tg3FwText[0];
4403         info.rodata_base = TG3_FW_RODATA_ADDR;
4404         info.rodata_len = TG3_FW_RODATA_LEN;
4405         info.rodata_data = &tg3FwRodata[0];
4406         info.data_base = TG3_FW_DATA_ADDR;
4407         info.data_len = TG3_FW_DATA_LEN;
4408         info.data_data = NULL;
4409
4410         err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
4411                                     RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
4412                                     &info);
4413         if (err)
4414                 return err;
4415
4416         err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
4417                                     TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
4418                                     &info);
4419         if (err)
4420                 return err;
4421
4422         /* Now startup only the RX cpu. */
4423         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
4424         tw32_f(RX_CPU_BASE + CPU_PC,    TG3_FW_TEXT_ADDR);
4425
4426         for (i = 0; i < 5; i++) {
4427                 if (tr32(RX_CPU_BASE + CPU_PC) == TG3_FW_TEXT_ADDR)
4428                         break;
4429                 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
4430                 tw32(RX_CPU_BASE + CPU_MODE,  CPU_MODE_HALT);
4431                 tw32_f(RX_CPU_BASE + CPU_PC,    TG3_FW_TEXT_ADDR);
4432                 udelay(1000);
4433         }
4434         if (i >= 5) {
4435                 printk(KERN_ERR PFX "tg3_load_firmware fails for %s "
4436                        "to set RX CPU PC, is %08x should be %08x\n",
4437                        tp->dev->name, tr32(RX_CPU_BASE + CPU_PC),
4438                        TG3_FW_TEXT_ADDR);
4439                 return -ENODEV;
4440         }
4441         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
4442         tw32_f(RX_CPU_BASE + CPU_MODE,  0x00000000);
4443
4444         return 0;
4445 }
4446
4447 #if TG3_TSO_SUPPORT != 0
4448
4449 #define TG3_TSO_FW_RELEASE_MAJOR        0x1
4450 #define TG3_TSO_FW_RELASE_MINOR         0x6
4451 #define TG3_TSO_FW_RELEASE_FIX          0x0
4452 #define TG3_TSO_FW_START_ADDR           0x08000000
4453 #define TG3_TSO_FW_TEXT_ADDR            0x08000000
4454 #define TG3_TSO_FW_TEXT_LEN             0x1aa0
4455 #define TG3_TSO_FW_RODATA_ADDR          0x08001aa0
4456 #define TG3_TSO_FW_RODATA_LEN           0x60
4457 #define TG3_TSO_FW_DATA_ADDR            0x08001b20
4458 #define TG3_TSO_FW_DATA_LEN             0x30
4459 #define TG3_TSO_FW_SBSS_ADDR            0x08001b50
4460 #define TG3_TSO_FW_SBSS_LEN             0x2c
4461 #define TG3_TSO_FW_BSS_ADDR             0x08001b80
4462 #define TG3_TSO_FW_BSS_LEN              0x894
4463
4464 static u32 tg3TsoFwText[(TG3_TSO_FW_TEXT_LEN / 4) + 1] = {
4465         0x0e000003, 0x00000000, 0x08001b24, 0x00000000, 0x10000003, 0x00000000,
4466         0x0000000d, 0x0000000d, 0x3c1d0800, 0x37bd4000, 0x03a0f021, 0x3c100800,
4467         0x26100000, 0x0e000010, 0x00000000, 0x0000000d, 0x27bdffe0, 0x3c04fefe,
4468         0xafbf0018, 0x0e0005d8, 0x34840002, 0x0e000668, 0x00000000, 0x3c030800,
4469         0x90631b68, 0x24020002, 0x3c040800, 0x24841aac, 0x14620003, 0x24050001,
4470         0x3c040800, 0x24841aa0, 0x24060006, 0x00003821, 0xafa00010, 0x0e00067c,
4471         0xafa00014, 0x8f625c50, 0x34420001, 0xaf625c50, 0x8f625c90, 0x34420001,
4472         0xaf625c90, 0x2402ffff, 0x0e000034, 0xaf625404, 0x8fbf0018, 0x03e00008,
4473         0x27bd0020, 0x00000000, 0x00000000, 0x00000000, 0x27bdffe0, 0xafbf001c,
4474         0xafb20018, 0xafb10014, 0x0e00005b, 0xafb00010, 0x24120002, 0x24110001,
4475         0x8f706820, 0x32020100, 0x10400003, 0x00000000, 0x0e0000bb, 0x00000000,
4476         0x8f706820, 0x32022000, 0x10400004, 0x32020001, 0x0e0001f0, 0x24040001,
4477         0x32020001, 0x10400003, 0x00000000, 0x0e0000a3, 0x00000000, 0x3c020800,
4478         0x90421b98, 0x14520003, 0x00000000, 0x0e0004c0, 0x00000000, 0x0a00003c,
4479         0xaf715028, 0x8fbf001c, 0x8fb20018, 0x8fb10014, 0x8fb00010, 0x03e00008,
4480         0x27bd0020, 0x27bdffe0, 0x3c040800, 0x24841ac0, 0x00002821, 0x00003021,
4481         0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014, 0x3c040800,
4482         0x248423d8, 0xa4800000, 0x3c010800, 0xa0201b98, 0x3c010800, 0xac201b9c,
4483         0x3c010800, 0xac201ba0, 0x3c010800, 0xac201ba4, 0x3c010800, 0xac201bac,
4484         0x3c010800, 0xac201bb8, 0x3c010800, 0xac201bbc, 0x8f624434, 0x3c010800,
4485         0xac221b88, 0x8f624438, 0x3c010800, 0xac221b8c, 0x8f624410, 0xac80f7a8,
4486         0x3c010800, 0xac201b84, 0x3c010800, 0xac2023e0, 0x3c010800, 0xac2023c8,
4487         0x3c010800, 0xac2023cc, 0x3c010800, 0xac202400, 0x3c010800, 0xac221b90,
4488         0x8f620068, 0x24030007, 0x00021702, 0x10430005, 0x00000000, 0x8f620068,
4489         0x00021702, 0x14400004, 0x24020001, 0x3c010800, 0x0a000097, 0xac20240c,
4490         0xac820034, 0x3c040800, 0x24841acc, 0x3c050800, 0x8ca5240c, 0x00003021,
4491         0x00003821, 0xafa00010, 0x0e00067c, 0xafa00014, 0x8fbf0018, 0x03e00008,
4492         0x27bd0020, 0x27bdffe0, 0x3c040800, 0x24841ad8, 0x00002821, 0x00003021,
4493         0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014, 0x0e00005b,
4494         0x00000000, 0x0e0000b4, 0x00002021, 0x8fbf0018, 0x03e00008, 0x27bd0020,
4495         0x24020001, 0x8f636820, 0x00821004, 0x00021027, 0x00621824, 0x03e00008,
4496         0xaf636820, 0x27bdffd0, 0xafbf002c, 0xafb60028, 0xafb50024, 0xafb40020,
4497         0xafb3001c, 0xafb20018, 0xafb10014, 0xafb00010, 0x8f675c5c, 0x3c030800,
4498         0x24631bbc, 0x8c620000, 0x14470005, 0x3c0200ff, 0x3c020800, 0x90421b98,
4499         0x14400119, 0x3c0200ff, 0x3442fff8, 0x00e28824, 0xac670000, 0x00111902,
4500         0x306300ff, 0x30e20003, 0x000211c0, 0x00622825, 0x00a04021, 0x00071602,
4501         0x3c030800, 0x90631b98, 0x3044000f, 0x14600036, 0x00804821, 0x24020001,
4502         0x3c010800, 0xa0221b98, 0x00051100, 0x00821025, 0x3c010800, 0xac201b9c,
4503         0x3c010800, 0xac201ba0, 0x3c010800, 0xac201ba4, 0x3c010800, 0xac201bac,
4504         0x3c010800, 0xac201bb8, 0x3c010800, 0xac201bb0, 0x3c010800, 0xac201bb4,
4505         0x3c010800, 0xa42223d8, 0x9622000c, 0x30437fff, 0x3c010800, 0xa4222410,
4506         0x30428000, 0x3c010800, 0xa4231bc6, 0x10400005, 0x24020001, 0x3c010800,
4507         0xac2223f4, 0x0a000102, 0x2406003e, 0x24060036, 0x3c010800, 0xac2023f4,
4508         0x9622000a, 0x3c030800, 0x94631bc6, 0x3c010800, 0xac2023f0, 0x3c010800,
4509         0xac2023f8, 0x00021302, 0x00021080, 0x00c21021, 0x00621821, 0x3c010800,
4510         0xa42223d0, 0x3c010800, 0x0a000115, 0xa4231b96, 0x9622000c, 0x3c010800,
4511         0xa42223ec, 0x3c040800, 0x24841b9c, 0x8c820000, 0x00021100, 0x3c010800,
4512         0x00220821, 0xac311bc8, 0x8c820000, 0x00021100, 0x3c010800, 0x00220821,
4513         0xac271bcc, 0x8c820000, 0x25030001, 0x306601ff, 0x00021100, 0x3c010800,
4514         0x00220821, 0xac261bd0, 0x8c820000, 0x00021100, 0x3c010800, 0x00220821,
4515         0xac291bd4, 0x96230008, 0x3c020800, 0x8c421bac, 0x00432821, 0x3c010800,
4516         0xac251bac, 0x9622000a, 0x30420004, 0x14400018, 0x00061100, 0x8f630c14,
4517         0x3063000f, 0x2c620002, 0x1440000b, 0x3c02c000, 0x8f630c14, 0x3c020800,
4518         0x8c421b40, 0x3063000f, 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002,
4519         0x1040fff7, 0x3c02c000, 0x00e21825, 0xaf635c5c, 0x8f625c50, 0x30420002,
4520         0x10400014, 0x00000000, 0x0a000147, 0x00000000, 0x3c030800, 0x8c631b80,
4521         0x3c040800, 0x94841b94, 0x01221025, 0x3c010800, 0xa42223da, 0x24020001,
4522         0x3c010800, 0xac221bb8, 0x24630001, 0x0085202a, 0x3c010800, 0x10800003,
4523         0xac231b80, 0x3c010800, 0xa4251b94, 0x3c060800, 0x24c61b9c, 0x8cc20000,
4524         0x24420001, 0xacc20000, 0x28420080, 0x14400005, 0x00000000, 0x0e000656,
4525         0x24040002, 0x0a0001e6, 0x00000000, 0x3c020800, 0x8c421bb8, 0x10400078,
4526         0x24020001, 0x3c050800, 0x90a51b98, 0x14a20072, 0x00000000, 0x3c150800,
4527         0x96b51b96, 0x3c040800, 0x8c841bac, 0x32a3ffff, 0x0083102a, 0x1440006c,
4528         0x00000000, 0x14830003, 0x00000000, 0x3c010800, 0xac2523f0, 0x1060005c,
4529         0x00009021, 0x24d60004, 0x0060a021, 0x24d30014, 0x8ec20000, 0x00028100,
4530         0x3c110800, 0x02308821, 0x0e000625, 0x8e311bc8, 0x00402821, 0x10a00054,
4531         0x00000000, 0x9628000a, 0x31020040, 0x10400005, 0x2407180c, 0x8e22000c,
4532         0x2407188c, 0x00021400, 0xaca20018, 0x3c030800, 0x00701821, 0x8c631bd0,
4533         0x3c020800, 0x00501021, 0x8c421bd4, 0x00031d00, 0x00021400, 0x00621825,
4534         0xaca30014, 0x8ec30004, 0x96220008, 0x00432023, 0x3242ffff, 0x3083ffff,
4535         0x00431021, 0x0282102a, 0x14400002, 0x02b23023, 0x00803021, 0x8e620000,
4536         0x30c4ffff, 0x00441021, 0xae620000, 0x8e220000, 0xaca20000, 0x8e220004,
4537         0x8e63fff4, 0x00431021, 0xaca20004, 0xa4a6000e, 0x8e62fff4, 0x00441021,
4538         0xae62fff4, 0x96230008, 0x0043102a, 0x14400005, 0x02469021, 0x8e62fff0,
4539         0xae60fff4, 0x24420001, 0xae62fff0, 0xaca00008, 0x3242ffff, 0x14540008,
4540         0x24020305, 0x31020080, 0x54400001, 0x34e70010, 0x24020905, 0xa4a2000c,
4541         0x0a0001cb, 0x34e70020, 0xa4a2000c, 0x3c020800, 0x8c4223f0, 0x10400003,
4542         0x3c024b65, 0x0a0001d3, 0x34427654, 0x3c02b49a, 0x344289ab, 0xaca2001c,
4543         0x30e2ffff, 0xaca20010, 0x0e0005a2, 0x00a02021, 0x3242ffff, 0x0054102b,
4544         0x1440ffa9, 0x00000000, 0x24020002, 0x3c010800, 0x0a0001e6, 0xa0221b98,
4545         0x8ec2083c, 0x24420001, 0x0a0001e6, 0xaec2083c, 0x0e0004c0, 0x00000000,
4546         0x8fbf002c, 0x8fb60028, 0x8fb50024, 0x8fb40020, 0x8fb3001c, 0x8fb20018,
4547         0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0030, 0x27bdffd0, 0xafbf0028,
4548         0xafb30024, 0xafb20020, 0xafb1001c, 0xafb00018, 0x8f725c9c, 0x3c0200ff,
4549         0x3442fff8, 0x3c070800, 0x24e71bb4, 0x02428824, 0x9623000e, 0x8ce20000,
4550         0x00431021, 0xace20000, 0x8e220010, 0x30420020, 0x14400011, 0x00809821,
4551         0x0e00063b, 0x02202021, 0x3c02c000, 0x02421825, 0xaf635c9c, 0x8f625c90,
4552         0x30420002, 0x1040011e, 0x00000000, 0xaf635c9c, 0x8f625c90, 0x30420002,
4553         0x10400119, 0x00000000, 0x0a00020d, 0x00000000, 0x8e240008, 0x8e230014,
4554         0x00041402, 0x000231c0, 0x00031502, 0x304201ff, 0x2442ffff, 0x3042007f,
4555         0x00031942, 0x30637800, 0x00021100, 0x24424000, 0x00624821, 0x9522000a,
4556         0x3084ffff, 0x30420008, 0x104000b0, 0x000429c0, 0x3c020800, 0x8c422400,
4557         0x14400024, 0x24c50008, 0x94c20014, 0x3c010800, 0xa42223d0, 0x8cc40010,
4558         0x00041402, 0x3c010800, 0xa42223d2, 0x3c010800, 0xa42423d4, 0x94c2000e,
4559         0x3083ffff, 0x00431023, 0x3c010800, 0xac222408, 0x94c2001a, 0x3c010800,
4560         0xac262400, 0x3c010800, 0xac322404, 0x3c010800, 0xac2223fc, 0x3c02c000,
4561         0x02421825, 0xaf635c9c, 0x8f625c90, 0x30420002, 0x104000e5, 0x00000000,
4562         0xaf635c9c, 0x8f625c90, 0x30420002, 0x104000e0, 0x00000000, 0x0a000246,
4563         0x00000000, 0x94c2000e, 0x3c030800, 0x946323d4, 0x00434023, 0x3103ffff,
4564         0x2c620008, 0x1040001c, 0x00000000, 0x94c20014, 0x24420028, 0x00a22821,
4565         0x00031042, 0x1840000b, 0x00002021, 0x24e60848, 0x00403821, 0x94a30000,
4566         0x8cc20000, 0x24840001, 0x00431021, 0xacc20000, 0x0087102a, 0x1440fff9,
4567         0x24a50002, 0x31020001, 0x1040001f, 0x3c024000, 0x3c040800, 0x248423fc,
4568         0xa0a00001, 0x94a30000, 0x8c820000, 0x00431021, 0x0a000285, 0xac820000,
4569         0x8f626800, 0x3c030010, 0x00431024, 0x10400009, 0x00000000, 0x94c2001a,
4570         0x3c030800, 0x8c6323fc, 0x00431021, 0x3c010800, 0xac2223fc, 0x0a000286,
4571         0x3c024000, 0x94c2001a, 0x94c4001c, 0x3c030800, 0x8c6323fc, 0x00441023,
4572         0x00621821, 0x3c010800, 0xac2323fc, 0x3c024000, 0x02421825, 0xaf635c9c,
4573         0x8f625c90, 0x30420002, 0x1440fffc, 0x00000000, 0x9522000a, 0x30420010,
4574         0x1040009b, 0x00000000, 0x3c030800, 0x946323d4, 0x3c070800, 0x24e72400,
4575         0x8ce40000, 0x8f626800, 0x24630030, 0x00832821, 0x3c030010, 0x00431024,
4576         0x1440000a, 0x00000000, 0x94a20004, 0x3c040800, 0x8c842408, 0x3c030800,
4577         0x8c6323fc, 0x00441023, 0x00621821, 0x3c010800, 0xac2323fc, 0x3c040800,
4578         0x8c8423fc, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402, 0x00822021,
4579         0x00041027, 0xa4a20006, 0x3c030800, 0x8c632404, 0x3c0200ff, 0x3442fff8,
4580         0x00628824, 0x96220008, 0x24050001, 0x24034000, 0x000231c0, 0x00801021,
4581         0xa4c2001a, 0xa4c0001c, 0xace00000, 0x3c010800, 0xac251b60, 0xaf635cb8,
4582         0x8f625cb0, 0x30420002, 0x10400003, 0x00000000, 0x3c010800, 0xac201b60,
4583         0x8e220008, 0xaf625cb8, 0x8f625cb0, 0x30420002, 0x10400003, 0x00000000,
4584         0x3c010800, 0xac201b60, 0x3c020800, 0x8c421b60, 0x1040ffec, 0x00000000,
4585         0x3c040800, 0x0e00063b, 0x8c842404, 0x0a00032a, 0x00000000, 0x3c030800,
4586         0x90631b98, 0x24020002, 0x14620003, 0x3c034b65, 0x0a0002e1, 0x00008021,
4587         0x8e22001c, 0x34637654, 0x10430002, 0x24100002, 0x24100001, 0x00c02021,
4588         0x0e000350, 0x02003021, 0x24020003, 0x3c010800, 0xa0221b98, 0x24020002,
4589         0x1202000a, 0x24020001, 0x3c030800, 0x8c6323f0, 0x10620006, 0x00000000,
4590         0x3c020800, 0x944223d8, 0x00021400, 0x0a00031f, 0xae220014, 0x3c040800,
4591         0x248423da, 0x94820000, 0x00021400, 0xae220014, 0x3c020800, 0x8c421bbc,
4592         0x3c03c000, 0x3c010800, 0xa0201b98, 0x00431025, 0xaf625c5c, 0x8f625c50,
4593         0x30420002, 0x10400009, 0x00000000, 0x2484f7e2, 0x8c820000, 0x00431025,
4594         0xaf625c5c, 0x8f625c50, 0x30420002, 0x1440fffa, 0x00000000, 0x3c020800,
4595         0x24421b84, 0x8c430000, 0x24630001, 0xac430000, 0x8f630c14, 0x3063000f,
4596         0x2c620002, 0x1440000c, 0x3c024000, 0x8f630c14, 0x3c020800, 0x8c421b40,
4597         0x3063000f, 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002, 0x1040fff7,
4598         0x00000000, 0x3c024000, 0x02421825, 0xaf635c9c, 0x8f625c90, 0x30420002,
4599         0x1440fffc, 0x00000000, 0x12600003, 0x00000000, 0x0e0004c0, 0x00000000,
4600         0x8fbf0028, 0x8fb30024, 0x8fb20020, 0x8fb1001c, 0x8fb00018, 0x03e00008,
4601         0x27bd0030, 0x8f634450, 0x3c040800, 0x24841b88, 0x8c820000, 0x00031c02,
4602         0x0043102b, 0x14400007, 0x3c038000, 0x8c840004, 0x8f624450, 0x00021c02,
4603         0x0083102b, 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024,
4604         0x1440fffd, 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3c024000,
4605         0x00822025, 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00000000,
4606         0x03e00008, 0x00000000, 0x27bdffe0, 0x00805821, 0x14c00011, 0x256e0008,
4607         0x3c020800, 0x8c4223f4, 0x10400007, 0x24020016, 0x3c010800, 0xa42223d2,
4608         0x2402002a, 0x3c010800, 0x0a000364, 0xa42223d4, 0x8d670010, 0x00071402,
4609         0x3c010800, 0xa42223d2, 0x3c010800, 0xa42723d4, 0x3c040800, 0x948423d4,
4610         0x3c030800, 0x946323d2, 0x95cf0006, 0x3c020800, 0x944223d0, 0x00832023,
4611         0x01e2c023, 0x3065ffff, 0x24a20028, 0x01c24821, 0x3082ffff, 0x14c0001a,
4612         0x01226021, 0x9582000c, 0x3042003f, 0x3c010800, 0xa42223d6, 0x95820004,
4613         0x95830006, 0x3c010800, 0xac2023e4, 0x3c010800, 0xac2023e8, 0x00021400,
4614         0x00431025, 0x3c010800, 0xac221bc0, 0x95220004, 0x3c010800, 0xa4221bc4,
4615         0x95230002, 0x01e51023, 0x0043102a, 0x10400010, 0x24020001, 0x3c010800,
4616         0x0a000398, 0xac2223f8, 0x3c030800, 0x8c6323e8, 0x3c020800, 0x94421bc4,
4617         0x00431021, 0xa5220004, 0x3c020800, 0x94421bc0, 0xa5820004, 0x3c020800,
4618         0x8c421bc0, 0xa5820006, 0x3c020800, 0x8c4223f0, 0x3c0d0800, 0x8dad23e4,
4619         0x3c0a0800, 0x144000e5, 0x8d4a23e8, 0x3c020800, 0x94421bc4, 0x004a1821,
4620         0x3063ffff, 0x0062182b, 0x24020002, 0x10c2000d, 0x01435023, 0x3c020800,
4621         0x944223d6, 0x30420009, 0x10400008, 0x00000000, 0x9582000c, 0x3042fff6,
4622         0xa582000c, 0x3c020800, 0x944223d6, 0x30420009, 0x01a26823, 0x3c020800,
4623         0x8c4223f8, 0x1040004a, 0x01203821, 0x3c020800, 0x944223d2, 0x00004021,
4624         0xa520000a, 0x01e21023, 0xa5220002, 0x3082ffff, 0x00021042, 0x18400008,
4625         0x00003021, 0x00401821, 0x94e20000, 0x25080001, 0x00c23021, 0x0103102a,
4626         0x1440fffb, 0x24e70002, 0x00061c02, 0x30c2ffff, 0x00623021, 0x00061402,
4627         0x00c23021, 0x00c02821, 0x00061027, 0xa522000a, 0x00003021, 0x2527000c,
4628         0x00004021, 0x94e20000, 0x25080001, 0x00c23021, 0x2d020004, 0x1440fffb,
4629         0x24e70002, 0x95220002, 0x00004021, 0x91230009, 0x00442023, 0x01803821,
4630         0x3082ffff, 0xa4e00010, 0x00621821, 0x00021042, 0x18400010, 0x00c33021,
4631         0x00404821, 0x94e20000, 0x24e70002, 0x00c23021, 0x30e2007f, 0x14400006,
4632         0x25080001, 0x8d630000, 0x3c02007f, 0x3442ff80, 0x00625824, 0x25670008,
4633         0x0109102a, 0x1440fff3, 0x00000000, 0x30820001, 0x10400005, 0x00061c02,
4634         0xa0e00001, 0x94e20000, 0x00c23021, 0x00061c02, 0x30c2ffff, 0x00623021,
4635         0x00061402, 0x00c23021, 0x0a00047d, 0x30c6ffff, 0x24020002, 0x14c20081,
4636         0x00000000, 0x3c020800, 0x8c42240c, 0x14400007, 0x00000000, 0x3c020800,
4637         0x944223d2, 0x95230002, 0x01e21023, 0x10620077, 0x00000000, 0x3c020800,
4638         0x944223d2, 0x01e21023, 0xa5220002, 0x3c020800, 0x8c42240c, 0x1040001a,
4639         0x31e3ffff, 0x8dc70010, 0x3c020800, 0x94421b96, 0x00e04021, 0x00072c02,
4640         0x00aa2021, 0x00431023, 0x00823823, 0x00072402, 0x30e2ffff, 0x00823821,
4641         0x00071027, 0xa522000a, 0x3102ffff, 0x3c040800, 0x948423d4, 0x00453023,
4642         0x00e02821, 0x00641823, 0x006d1821, 0x00c33021, 0x00061c02, 0x30c2ffff,
4643         0x0a00047d, 0x00623021, 0x01203821, 0x00004021, 0x3082ffff, 0x00021042,
4644         0x18400008, 0x00003021, 0x00401821, 0x94e20000, 0x25080001, 0x00c23021,
4645         0x0103102a, 0x1440fffb, 0x24e70002, 0x00061c02, 0x30c2ffff, 0x00623021,
4646         0x00061402, 0x00c23021, 0x00c02821, 0x00061027, 0xa522000a, 0x00003021,
4647         0x2527000c, 0x00004021, 0x94e20000, 0x25080001, 0x00c23021, 0x2d020004,
4648         0x1440fffb, 0x24e70002, 0x95220002, 0x00004021, 0x91230009, 0x00442023,
4649         0x01803821, 0x3082ffff, 0xa4e00010, 0x3c040800, 0x948423d4, 0x00621821,
4650         0x00c33021, 0x00061c02, 0x30c2ffff, 0x00623021, 0x00061c02, 0x3c020800,
4651         0x944223d0, 0x00c34821, 0x00441023, 0x00021fc2, 0x00431021, 0x00021043,
4652         0x18400010, 0x00003021, 0x00402021, 0x94e20000, 0x24e70002, 0x00c23021,
4653         0x30e2007f, 0x14400006, 0x25080001, 0x8d630000, 0x3c02007f, 0x3442ff80,
4654         0x00625824, 0x25670008, 0x0104102a, 0x1440fff3, 0x00000000, 0x3c020800,
4655         0x944223ec, 0x00c23021, 0x3122ffff, 0x00c23021, 0x00061c02, 0x30c2ffff,
4656         0x00623021, 0x00061402, 0x00c23021, 0x00c04021, 0x00061027, 0xa5820010,
4657         0xadc00014, 0x0a00049d, 0xadc00000, 0x8dc70010, 0x00e04021, 0x11400007,
4658         0x00072c02, 0x00aa3021, 0x00061402, 0x30c3ffff, 0x00433021, 0x00061402,
4659         0x00c22821, 0x00051027, 0xa522000a, 0x3c030800, 0x946323d4, 0x3102ffff,
4660         0x01e21021, 0x00433023, 0x00cd3021, 0x00061c02, 0x30c2ffff, 0x00623021,
4661         0x00061402, 0x00c23021, 0x00c04021, 0x00061027, 0xa5820010, 0x3102ffff,
4662         0x00051c00, 0x00431025, 0xadc20010, 0x3c020800, 0x8c4223f4, 0x10400005,
4663         0x2de205eb, 0x14400002, 0x25e2fff2, 0x34028870, 0xa5c20034, 0x3c030800,
4664         0x246323e8, 0x8c620000, 0x24420001, 0xac620000, 0x3c040800, 0x8c8423e4,
4665         0x3c020800, 0x8c421bc0, 0x3303ffff, 0x00832021, 0x00431821, 0x0062102b,
4666         0x3c010800, 0xac2423e4, 0x10400003, 0x2482ffff, 0x3c010800, 0xac2223e4,
4667         0x3c010800, 0xac231bc0, 0x03e00008, 0x27bd0020, 0x27bdffb8, 0x3c050800,
4668         0x24a51b96, 0xafbf0044, 0xafbe0040, 0xafb7003c, 0xafb60038, 0xafb50034,
4669         0xafb40030, 0xafb3002c, 0xafb20028, 0xafb10024, 0xafb00020, 0x94a90000,
4670         0x3c020800, 0x944223d0, 0x3c030800, 0x8c631bb0, 0x3c040800, 0x8c841bac,
4671         0x01221023, 0x0064182a, 0xa7a9001e, 0x106000be, 0xa7a20016, 0x24be0022,
4672         0x97b6001e, 0x24b3001a, 0x24b70016, 0x8fc20000, 0x14400008, 0x00000000,
4673         0x8fc2fff8, 0x97a30016, 0x8fc4fff4, 0x00431021, 0x0082202a, 0x148000b0,
4674         0x00000000, 0x97d50818, 0x32a2ffff, 0x104000a3, 0x00009021, 0x0040a021,
4675         0x00008821, 0x0e000625, 0x00000000, 0x00403021, 0x14c00007, 0x00000000,
4676         0x3c020800, 0x8c4223dc, 0x24420001, 0x3c010800, 0x0a000596, 0xac2223dc,
4677         0x3c100800, 0x02118021, 0x8e101bc8, 0x9608000a, 0x31020040, 0x10400005,
4678         0x2407180c, 0x8e02000c, 0x2407188c, 0x00021400, 0xacc20018, 0x31020080,
4679         0x54400001, 0x34e70010, 0x3c020800, 0x00511021, 0x8c421bd0, 0x3c030800,
4680         0x00711821, 0x8c631bd4, 0x00021500, 0x00031c00, 0x00431025, 0xacc20014,
4681         0x96040008, 0x3242ffff, 0x00821021, 0x0282102a, 0x14400002, 0x02b22823,
4682         0x00802821, 0x8e020000, 0x02459021, 0xacc20000, 0x8e020004, 0x00c02021,
4683         0x26310010, 0xac820004, 0x30e2ffff, 0xac800008, 0xa485000e, 0xac820010,
4684         0x24020305, 0x0e0005a2, 0xa482000c, 0x3242ffff, 0x0054102b, 0x1440ffc5,
4685         0x3242ffff, 0x0a00058e, 0x00000000, 0x8e620000, 0x8e63fffc, 0x0043102a,
4686         0x10400067, 0x00000000, 0x8e62fff0, 0x00028900, 0x3c100800, 0x02118021,
4687         0x0e000625, 0x8e101bc8, 0x00403021, 0x14c00005, 0x00000000, 0x8e62082c,
4688         0x24420001, 0x0a000596, 0xae62082c, 0x9608000a, 0x31020040, 0x10400005,
4689         0x2407180c, 0x8e02000c, 0x2407188c, 0x00021400, 0xacc20018, 0x3c020800,
4690         0x00511021, 0x8c421bd0, 0x3c030800, 0x00711821, 0x8c631bd4, 0x00021500,
4691         0x00031c00, 0x00431025, 0xacc20014, 0x8e63fff4, 0x96020008, 0x00432023,
4692         0x3242ffff, 0x3083ffff, 0x00431021, 0x02c2102a, 0x10400003, 0x00802821,
4693         0x97a9001e, 0x01322823, 0x8e620000, 0x30a4ffff, 0x00441021, 0xae620000,
4694         0xa4c5000e, 0x8e020000, 0xacc20000, 0x8e020004, 0x8e63fff4, 0x00431021,
4695         0xacc20004, 0x8e63fff4, 0x96020008, 0x00641821, 0x0062102a, 0x14400006,
4696         0x02459021, 0x8e62fff0, 0xae60fff4, 0x24420001, 0x0a000571, 0xae62fff0,
4697         0xae63fff4, 0xacc00008, 0x3242ffff, 0x10560003, 0x31020004, 0x10400006,
4698         0x24020305, 0x31020080, 0x54400001, 0x34e70010, 0x34e70020, 0x24020905,
4699         0xa4c2000c, 0x8ee30000, 0x8ee20004, 0x14620007, 0x3c02b49a, 0x8ee20860,
4700         0x54400001, 0x34e70400, 0x3c024b65, 0x0a000588, 0x34427654, 0x344289ab,
4701         0xacc2001c, 0x30e2ffff, 0xacc20010, 0x0e0005a2, 0x00c02021, 0x3242ffff,
4702         0x0056102b, 0x1440ff9b, 0x00000000, 0x8e620000, 0x8e63fffc, 0x0043102a,
4703         0x1440ff48, 0x00000000, 0x8fbf0044, 0x8fbe0040, 0x8fb7003c, 0x8fb60038,
4704         0x8fb50034, 0x8fb40030, 0x8fb3002c, 0x8fb20028, 0x8fb10024, 0x8fb00020,
4705         0x03e00008, 0x27bd0048, 0x27bdffe8, 0xafbf0014, 0xafb00010, 0x8f624450,
4706         0x8f634410, 0x0a0005b1, 0x00808021, 0x8f626820, 0x30422000, 0x10400003,
4707         0x00000000, 0x0e0001f0, 0x00002021, 0x8f624450, 0x8f634410, 0x3042ffff,
4708         0x0043102b, 0x1440fff5, 0x00000000, 0x8f630c14, 0x3063000f, 0x2c620002,
4709         0x1440000b, 0x00000000, 0x8f630c14, 0x3c020800, 0x8c421b40, 0x3063000f,
4710         0x24420001, 0x3c010800, 0xac221b40, 0x2c620002, 0x1040fff7, 0x00000000,
4711         0xaf705c18, 0x8f625c10, 0x30420002, 0x10400009, 0x00000000, 0x8f626820,
4712         0x30422000, 0x1040fff8, 0x00000000, 0x0e0001f0, 0x00002021, 0x0a0005c4,
4713         0x00000000, 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x00000000,
4714         0x00000000, 0x00000000, 0x27bdffe8, 0x3c1bc000, 0xafbf0014, 0xafb00010,
4715         0xaf60680c, 0x8f626804, 0x34420082, 0xaf626804, 0x8f634000, 0x24020b50,
4716         0x3c010800, 0xac221b54, 0x24020b78, 0x3c010800, 0xac221b64, 0x34630002,
4717         0xaf634000, 0x0e000605, 0x00808021, 0x3c010800, 0xa0221b68, 0x304200ff,
4718         0x24030002, 0x14430005, 0x00000000, 0x3c020800, 0x8c421b54, 0x0a0005f8,
4719         0xac5000c0, 0x3c020800, 0x8c421b54, 0xac5000bc, 0x8f624434, 0x8f634438,
4720         0x8f644410, 0x3c010800, 0xac221b5c, 0x3c010800, 0xac231b6c, 0x3c010800,
4721         0xac241b58, 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x3c040800,
4722         0x8c870000, 0x3c03aa55, 0x3463aa55, 0x3c06c003, 0xac830000, 0x8cc20000,
4723         0x14430007, 0x24050002, 0x3c0355aa, 0x346355aa, 0xac830000, 0x8cc20000,
4724         0x50430001, 0x24050001, 0x3c020800, 0xac470000, 0x03e00008, 0x00a01021,
4725         0x27bdfff8, 0x18800009, 0x00002821, 0x8f63680c, 0x8f62680c, 0x1043fffe,
4726         0x00000000, 0x24a50001, 0x00a4102a, 0x1440fff9, 0x00000000, 0x03e00008,
4727         0x27bd0008, 0x8f634450, 0x3c020800, 0x8c421b5c, 0x00031c02, 0x0043102b,
4728         0x14400008, 0x3c038000, 0x3c040800, 0x8c841b6c, 0x8f624450, 0x00021c02,
4729         0x0083102b, 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024,
4730         0x1440fffd, 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3082ffff,
4731         0x2442e000, 0x2c422001, 0x14400003, 0x3c024000, 0x0a000648, 0x2402ffff,
4732         0x00822025, 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00001021,
4733         0x03e00008, 0x00000000, 0x8f624450, 0x3c030800, 0x8c631b58, 0x0a000651,
4734         0x3042ffff, 0x8f624450, 0x3042ffff, 0x0043102b, 0x1440fffc, 0x00000000,
4735         0x03e00008, 0x00000000, 0x27bdffe0, 0x00802821, 0x3c040800, 0x24841af0,
4736         0x00003021, 0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014,
4737         0x0a000660, 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x00000000,
4738         0x00000000, 0x00000000, 0x3c020800, 0x34423000, 0x3c030800, 0x34633000,
4739         0x3c040800, 0x348437ff, 0x3c010800, 0xac221b74, 0x24020040, 0x3c010800,
4740         0xac221b78, 0x3c010800, 0xac201b70, 0xac600000, 0x24630004, 0x0083102b,
4741         0x5040fffd, 0xac600000, 0x03e00008, 0x00000000, 0x00804821, 0x8faa0010,
4742         0x3c020800, 0x8c421b70, 0x3c040800, 0x8c841b78, 0x8fab0014, 0x24430001,
4743         0x0044102b, 0x3c010800, 0xac231b70, 0x14400003, 0x00004021, 0x3c010800,
4744         0xac201b70, 0x3c020800, 0x8c421b70, 0x3c030800, 0x8c631b74, 0x91240000,
4745         0x00021140, 0x00431021, 0x00481021, 0x25080001, 0xa0440000, 0x29020008,
4746         0x1440fff4, 0x25290001, 0x3c020800, 0x8c421b70, 0x3c030800, 0x8c631b74,
4747         0x8f64680c, 0x00021140, 0x00431021, 0xac440008, 0xac45000c, 0xac460010,
4748         0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c, 0x00000000, 0x00000000,
4749 };
4750
4751 static u32 tg3TsoFwRodata[] = {
4752         0x4d61696e, 0x43707542, 0x00000000, 0x4d61696e, 0x43707541, 0x00000000,
4753         0x00000000, 0x00000000, 0x73746b6f, 0x66666c64, 0x496e0000, 0x73746b6f,
4754         0x66662a2a, 0x00000000, 0x53774576, 0x656e7430, 0x00000000, 0x00000000,
4755         0x00000000, 0x00000000, 0x66617461, 0x6c457272, 0x00000000, 0x00000000,
4756         0x00000000,
4757 };
4758
4759 static u32 tg3TsoFwData[] = {
4760         0x00000000, 0x73746b6f, 0x66666c64, 0x5f76312e, 0x362e3000, 0x00000000,
4761         0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
4762         0x00000000,
4763 };
4764
4765 /* 5705 needs a special version of the TSO firmware.  */
4766 #define TG3_TSO5_FW_RELEASE_MAJOR       0x1
4767 #define TG3_TSO5_FW_RELASE_MINOR        0x2
4768 #define TG3_TSO5_FW_RELEASE_FIX         0x0
4769 #define TG3_TSO5_FW_START_ADDR          0x00010000
4770 #define TG3_TSO5_FW_TEXT_ADDR           0x00010000
4771 #define TG3_TSO5_FW_TEXT_LEN            0xe90
4772 #define TG3_TSO5_FW_RODATA_ADDR         0x00010e90
4773 #define TG3_TSO5_FW_RODATA_LEN          0x50
4774 #define TG3_TSO5_FW_DATA_ADDR           0x00010f00
4775 #define TG3_TSO5_FW_DATA_LEN            0x20
4776 #define TG3_TSO5_FW_SBSS_ADDR           0x00010f20
4777 #define TG3_TSO5_FW_SBSS_LEN            0x28
4778 #define TG3_TSO5_FW_BSS_ADDR            0x00010f50
4779 #define TG3_TSO5_FW_BSS_LEN             0x88
4780
4781 static u32 tg3Tso5FwText[(TG3_TSO5_FW_TEXT_LEN / 4) + 1] = {
4782         0x0c004003, 0x00000000, 0x00010f04, 0x00000000, 0x10000003, 0x00000000,
4783         0x0000000d, 0x0000000d, 0x3c1d0001, 0x37bde000, 0x03a0f021, 0x3c100001,
4784         0x26100000, 0x0c004010, 0x00000000, 0x0000000d, 0x27bdffe0, 0x3c04fefe,
4785         0xafbf0018, 0x0c0042e8, 0x34840002, 0x0c004364, 0x00000000, 0x3c030001,
4786         0x90630f34, 0x24020002, 0x3c040001, 0x24840e9c, 0x14620003, 0x24050001,
4787         0x3c040001, 0x24840e90, 0x24060002, 0x00003821, 0xafa00010, 0x0c004378,
4788         0xafa00014, 0x0c00402c, 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020,
4789         0x00000000, 0x00000000, 0x27bdffe0, 0xafbf001c, 0xafb20018, 0xafb10014,
4790         0x0c0042d4, 0xafb00010, 0x3c128000, 0x24110001, 0x8f706810, 0x32020400,
4791         0x10400007, 0x00000000, 0x8f641008, 0x00921024, 0x14400003, 0x00000000,
4792         0x0c004064, 0x00000000, 0x3c020001, 0x90420f56, 0x10510003, 0x32020200,
4793         0x1040fff1, 0x00000000, 0x0c0041b4, 0x00000000, 0x08004034, 0x00000000,
4794         0x8fbf001c, 0x8fb20018, 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0020,
4795         0x27bdffe0, 0x3c040001, 0x24840eb0, 0x00002821, 0x00003021, 0x00003821,
4796         0xafbf0018, 0xafa00010, 0x0c004378, 0xafa00014, 0x0000d021, 0x24020130,
4797         0xaf625000, 0x3c010001, 0xa4200f50, 0x3c010001, 0xa0200f57, 0x8fbf0018,
4798         0x03e00008, 0x27bd0020, 0x00000000, 0x00000000, 0x3c030001, 0x24630f60,
4799         0x90620000, 0x27bdfff0, 0x14400003, 0x0080c021, 0x08004073, 0x00004821,
4800         0x3c022000, 0x03021024, 0x10400003, 0x24090002, 0x08004073, 0xa0600000,
4801         0x24090001, 0x00181040, 0x30431f80, 0x346f8008, 0x1520004b, 0x25eb0028,
4802         0x3c040001, 0x00832021, 0x8c848010, 0x3c050001, 0x24a50f7a, 0x00041402,
4803         0xa0a20000, 0x3c010001, 0xa0240f7b, 0x3c020001, 0x00431021, 0x94428014,
4804         0x3c010001, 0xa0220f7c, 0x3c0c0001, 0x01836021, 0x8d8c8018, 0x304200ff,
4805         0x24420008, 0x000220c3, 0x24020001, 0x3c010001, 0xa0220f60, 0x0124102b,
4806         0x1040000c, 0x00003821, 0x24a6000e, 0x01602821, 0x8ca20000, 0x8ca30004,
4807         0x24a50008, 0x24e70001, 0xacc20000, 0xacc30004, 0x00e4102b, 0x1440fff8,
4808         0x24c60008, 0x00003821, 0x3c080001, 0x25080f7b, 0x91060000, 0x3c020001,
4809         0x90420f7c, 0x2503000d, 0x00c32821, 0x00461023, 0x00021fc2, 0x00431021,
4810         0x00021043, 0x1840000c, 0x00002021, 0x91020001, 0x00461023, 0x00021fc2,
4811         0x00431021, 0x00021843, 0x94a20000, 0x24e70001, 0x00822021, 0x00e3102a,
4812         0x1440fffb, 0x24a50002, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402,
4813         0x00822021, 0x3c02ffff, 0x01821024, 0x3083ffff, 0x00431025, 0x3c010001,
4814         0x080040fa, 0xac220f80, 0x3c050001, 0x24a50f7c, 0x90a20000, 0x3c0c0001,
4815         0x01836021, 0x8d8c8018, 0x000220c2, 0x1080000e, 0x00003821, 0x01603021,
4816         0x24a5000c, 0x8ca20000, 0x8ca30004, 0x24a50008, 0x24e70001, 0xacc20000,
4817         0xacc30004, 0x00e4102b, 0x1440fff8, 0x24c60008, 0x3c050001, 0x24a50f7c,
4818         0x90a20000, 0x30430007, 0x24020004, 0x10620011, 0x28620005, 0x10400005,
4819         0x24020002, 0x10620008, 0x000710c0, 0x080040fa, 0x00000000, 0x24020006,
4820         0x1062000e, 0x000710c0, 0x080040fa, 0x00000000, 0x00a21821, 0x9463000c,
4821         0x004b1021, 0x080040fa, 0xa4430000, 0x000710c0, 0x00a21821, 0x8c63000c,
4822         0x004b1021, 0x080040fa, 0xac430000, 0x00a21821, 0x8c63000c, 0x004b2021,
4823         0x00a21021, 0xac830000, 0x94420010, 0xa4820004, 0x95e70006, 0x3c020001,
4824         0x90420f7c, 0x3c030001, 0x90630f7a, 0x00e2c823, 0x3c020001, 0x90420f7b,
4825         0x24630028, 0x01e34021, 0x24420028, 0x15200012, 0x01e23021, 0x94c2000c,
4826         0x3c010001, 0xa4220f78, 0x94c20004, 0x94c30006, 0x3c010001, 0xa4200f76,
4827         0x3c010001, 0xa4200f72, 0x00021400, 0x00431025, 0x3c010001, 0xac220f6c,
4828         0x95020004, 0x3c010001, 0x08004124, 0xa4220f70, 0x3c020001, 0x94420f70,
4829         0x3c030001, 0x94630f72, 0x00431021, 0xa5020004, 0x3c020001, 0x94420f6c,
4830         0xa4c20004, 0x3c020001, 0x8c420f6c, 0xa4c20006, 0x3c040001, 0x94840f72,
4831         0x3c020001, 0x94420f70, 0x3c0a0001, 0x954a0f76, 0x00441821, 0x3063ffff,
4832         0x0062182a, 0x24020002, 0x1122000b, 0x00832023, 0x3c030001, 0x94630f78,
4833         0x30620009, 0x10400006, 0x3062fff6, 0xa4c2000c, 0x3c020001, 0x94420f78,
4834         0x30420009, 0x01425023, 0x24020001, 0x1122001b, 0x29220002, 0x50400005,
4835         0x24020002, 0x11200007, 0x31a2ffff, 0x08004197, 0x00000000, 0x1122001d,
4836         0x24020016, 0x08004197, 0x31a2ffff, 0x3c0e0001, 0x95ce0f80, 0x10800005,
4837         0x01806821, 0x01c42021, 0x00041c02, 0x3082ffff, 0x00627021, 0x000e1027,
4838         0xa502000a, 0x3c030001, 0x90630f7b, 0x31a2ffff, 0x00e21021, 0x0800418d,
4839         0x00432023, 0x3c020001, 0x94420f80, 0x00442021, 0x00041c02, 0x3082ffff,
4840         0x00622021, 0x00807021, 0x00041027, 0x08004185, 0xa502000a, 0x3c050001,
4841         0x24a50f7a, 0x90a30000, 0x14620002, 0x24e2fff2, 0xa5e20034, 0x90a20000,
4842         0x00e21023, 0xa5020002, 0x3c030001, 0x94630f80, 0x3c020001, 0x94420f5a,
4843         0x30e5ffff, 0x00641821, 0x00451023, 0x00622023, 0x00041c02, 0x3082ffff,
4844         0x00622021, 0x00041027, 0xa502000a, 0x3c030001, 0x90630f7c, 0x24620001,
4845         0x14a20005, 0x00807021, 0x01631021, 0x90420000, 0x08004185, 0x00026200,
4846         0x24620002, 0x14a20003, 0x306200fe, 0x004b1021, 0x944c0000, 0x3c020001,
4847         0x94420f82, 0x3183ffff, 0x3c040001, 0x90840f7b, 0x00431021, 0x00e21021,
4848         0x00442023, 0x008a2021, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402,
4849         0x00822021, 0x00806821, 0x00041027, 0xa4c20010, 0x31a2ffff, 0x000e1c00,
4850         0x00431025, 0x3c040001, 0x24840f72, 0xade20010, 0x94820000, 0x3c050001,
4851         0x94a50f76, 0x3c030001, 0x8c630f6c, 0x24420001, 0x00b92821, 0xa4820000,
4852         0x3322ffff, 0x00622021, 0x0083182b, 0x3c010001, 0xa4250f76, 0x10600003,
4853         0x24a2ffff, 0x3c010001, 0xa4220f76, 0x3c024000, 0x03021025, 0x3c010001,
4854         0xac240f6c, 0xaf621008, 0x03e00008, 0x27bd0010, 0x3c030001, 0x90630f56,
4855         0x27bdffe8, 0x24020001, 0xafbf0014, 0x10620026, 0xafb00010, 0x8f620cf4,
4856         0x2442ffff, 0x3042007f, 0x00021100, 0x8c434000, 0x3c010001, 0xac230f64,
4857         0x8c434008, 0x24444000, 0x8c5c4004, 0x30620040, 0x14400002, 0x24020088,
4858         0x24020008, 0x3c010001, 0xa4220f68, 0x30620004, 0x10400005, 0x24020001,
4859         0x3c010001, 0xa0220f57, 0x080041d5, 0x00031402, 0x3c010001, 0xa0200f57,
4860         0x00031402, 0x3c010001, 0xa4220f54, 0x9483000c, 0x24020001, 0x3c010001,
4861         0xa4200f50, 0x3c010001, 0xa0220f56, 0x3c010001, 0xa4230f62, 0x24020001,
4862         0x1342001e, 0x00000000, 0x13400005, 0x24020003, 0x13420067, 0x00000000,
4863         0x080042cf, 0x00000000, 0x3c020001, 0x94420f62, 0x241a0001, 0x3c010001,
4864         0xa4200f5e, 0x3c010001, 0xa4200f52, 0x304407ff, 0x00021bc2, 0x00031823,
4865         0x3063003e, 0x34630036, 0x00021242, 0x3042003c, 0x00621821, 0x3c010001,
4866         0xa4240f58, 0x00832021, 0x24630030, 0x3c010001, 0xa4240f5a, 0x3c010001,
4867         0xa4230f5c, 0x3c060001, 0x24c60f52, 0x94c50000, 0x94c30002, 0x3c040001,
4868         0x94840f5a, 0x00651021, 0x0044102a, 0x10400013, 0x3c108000, 0x00a31021,
4869         0xa4c20000, 0x3c02a000, 0xaf620cf4, 0x3c010001, 0xa0200f56, 0x8f641008,
4870         0x00901024, 0x14400003, 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4,
4871         0x00501024, 0x104000b7, 0x00000000, 0x0800420f, 0x00000000, 0x3c030001,
4872         0x94630f50, 0x00851023, 0xa4c40000, 0x00621821, 0x3042ffff, 0x3c010001,
4873         0xa4230f50, 0xaf620ce8, 0x3c020001, 0x94420f68, 0x34420024, 0xaf620cec,
4874         0x94c30002, 0x3c020001, 0x94420f50, 0x14620012, 0x3c028000, 0x3c108000,
4875         0x3c02a000, 0xaf620cf4, 0x3c010001, 0xa0200f56, 0x8f641008, 0x00901024,
4876         0x14400003, 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4, 0x00501024,
4877         0x1440fff7, 0x00000000, 0x080042cf, 0x241a0003, 0xaf620cf4, 0x3c108000,
4878         0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064, 0x00000000,
4879         0x8f620cf4, 0x00501024, 0x1440fff7, 0x00000000, 0x080042cf, 0x241a0003,
4880         0x3c070001, 0x24e70f50, 0x94e20000, 0x03821021, 0xaf620ce0, 0x3c020001,
4881         0x8c420f64, 0xaf620ce4, 0x3c050001, 0x94a50f54, 0x94e30000, 0x3c040001,
4882         0x94840f58, 0x3c020001, 0x94420f5e, 0x00a32823, 0x00822023, 0x30a6ffff,
4883         0x3083ffff, 0x00c3102b, 0x14400043, 0x00000000, 0x3c020001, 0x94420f5c,
4884         0x00021400, 0x00621025, 0xaf620ce8, 0x94e20000, 0x3c030001, 0x94630f54,
4885         0x00441021, 0xa4e20000, 0x3042ffff, 0x14430021, 0x3c020008, 0x3c020001,
4886         0x90420f57, 0x10400006, 0x3c03000c, 0x3c020001, 0x94420f68, 0x34630624,
4887         0x0800427c, 0x0000d021, 0x3c020001, 0x94420f68, 0x3c030008, 0x34630624,
4888         0x00431025, 0xaf620cec, 0x3c108000, 0x3c02a000, 0xaf620cf4, 0x3c010001,
4889         0xa0200f56, 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064,
4890         0x00000000, 0x8f620cf4, 0x00501024, 0x10400015, 0x00000000, 0x08004283,
4891         0x00000000, 0x3c030001, 0x94630f68, 0x34420624, 0x3c108000, 0x00621825,
4892         0x3c028000, 0xaf630cec, 0xaf620cf4, 0x8f641008, 0x00901024, 0x14400003,
4893         0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4, 0x00501024, 0x1440fff7,
4894         0x00000000, 0x3c010001, 0x080042cf, 0xa4200f5e, 0x3c020001, 0x94420f5c,
4895         0x00021400, 0x00c21025, 0xaf620ce8, 0x3c020001, 0x90420f57, 0x10400009,
4896         0x3c03000c, 0x3c020001, 0x94420f68, 0x34630624, 0x0000d021, 0x00431025,
4897         0xaf620cec, 0x080042c1, 0x3c108000, 0x3c020001, 0x94420f68, 0x3c030008,
4898         0x34630604, 0x00431025, 0xaf620cec, 0x3c020001, 0x94420f5e, 0x00451021,
4899         0x3c010001, 0xa4220f5e, 0x3c108000, 0x3c02a000, 0xaf620cf4, 0x3c010001,
4900         0xa0200f56, 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064,
4901         0x00000000, 0x8f620cf4, 0x00501024, 0x1440fff7, 0x00000000, 0x8fbf0014,
4902         0x8fb00010, 0x03e00008, 0x27bd0018, 0x00000000, 0x27bdffe0, 0x3c040001,
4903         0x24840ec0, 0x00002821, 0x00003021, 0x00003821, 0xafbf0018, 0xafa00010,
4904         0x0c004378, 0xafa00014, 0x0000d021, 0x24020130, 0xaf625000, 0x3c010001,
4905         0xa4200f50, 0x3c010001, 0xa0200f57, 0x8fbf0018, 0x03e00008, 0x27bd0020,
4906         0x27bdffe8, 0x3c1bc000, 0xafbf0014, 0xafb00010, 0xaf60680c, 0x8f626804,
4907         0x34420082, 0xaf626804, 0x8f634000, 0x24020b50, 0x3c010001, 0xac220f20,
4908         0x24020b78, 0x3c010001, 0xac220f30, 0x34630002, 0xaf634000, 0x0c004315,
4909         0x00808021, 0x3c010001, 0xa0220f34, 0x304200ff, 0x24030002, 0x14430005,
4910         0x00000000, 0x3c020001, 0x8c420f20, 0x08004308, 0xac5000c0, 0x3c020001,
4911         0x8c420f20, 0xac5000bc, 0x8f624434, 0x8f634438, 0x8f644410, 0x3c010001,
4912         0xac220f28, 0x3c010001, 0xac230f38, 0x3c010001, 0xac240f24, 0x8fbf0014,
4913         0x8fb00010, 0x03e00008, 0x27bd0018, 0x03e00008, 0x24020001, 0x27bdfff8,
4914         0x18800009, 0x00002821, 0x8f63680c, 0x8f62680c, 0x1043fffe, 0x00000000,
4915         0x24a50001, 0x00a4102a, 0x1440fff9, 0x00000000, 0x03e00008, 0x27bd0008,
4916         0x8f634450, 0x3c020001, 0x8c420f28, 0x00031c02, 0x0043102b, 0x14400008,
4917         0x3c038000, 0x3c040001, 0x8c840f38, 0x8f624450, 0x00021c02, 0x0083102b,
4918         0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024, 0x1440fffd,
4919         0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3082ffff, 0x2442e000,
4920         0x2c422001, 0x14400003, 0x3c024000, 0x08004347, 0x2402ffff, 0x00822025,
4921         0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00001021, 0x03e00008,
4922         0x00000000, 0x8f624450, 0x3c030001, 0x8c630f24, 0x08004350, 0x3042ffff,
4923         0x8f624450, 0x3042ffff, 0x0043102b, 0x1440fffc, 0x00000000, 0x03e00008,
4924         0x00000000, 0x27bdffe0, 0x00802821, 0x3c040001, 0x24840ed0, 0x00003021,
4925         0x00003821, 0xafbf0018, 0xafa00010, 0x0c004378, 0xafa00014, 0x0800435f,
4926         0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x3c020001, 0x3442d600,
4927         0x3c030001, 0x3463d600, 0x3c040001, 0x3484ddff, 0x3c010001, 0xac220f40,
4928         0x24020040, 0x3c010001, 0xac220f44, 0x3c010001, 0xac200f3c, 0xac600000,
4929         0x24630004, 0x0083102b, 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000,
4930         0x00804821, 0x8faa0010, 0x3c020001, 0x8c420f3c, 0x3c040001, 0x8c840f44,
4931         0x8fab0014, 0x24430001, 0x0044102b, 0x3c010001, 0xac230f3c, 0x14400003,
4932         0x00004021, 0x3c010001, 0xac200f3c, 0x3c020001, 0x8c420f3c, 0x3c030001,
4933         0x8c630f40, 0x91240000, 0x00021140, 0x00431021, 0x00481021, 0x25080001,
4934         0xa0440000, 0x29020008, 0x1440fff4, 0x25290001, 0x3c020001, 0x8c420f3c,
4935         0x3c030001, 0x8c630f40, 0x8f64680c, 0x00021140, 0x00431021, 0xac440008,
4936         0xac45000c, 0xac460010, 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c,
4937         0x00000000, 0x00000000, 0x00000000,
4938 };
4939
4940 static u32 tg3Tso5FwRodata[(TG3_TSO5_FW_RODATA_LEN / 4) + 1] = {
4941         0x4d61696e, 0x43707542, 0x00000000, 0x4d61696e, 0x43707541, 0x00000000,
4942         0x00000000, 0x00000000, 0x73746b6f, 0x66666c64, 0x00000000, 0x00000000,
4943         0x73746b6f, 0x66666c64, 0x00000000, 0x00000000, 0x66617461, 0x6c457272,
4944         0x00000000, 0x00000000, 0x00000000,
4945 };
4946
4947 static u32 tg3Tso5FwData[(TG3_TSO5_FW_DATA_LEN / 4) + 1] = {
4948         0x00000000, 0x73746b6f, 0x66666c64, 0x5f76312e, 0x322e3000, 0x00000000,
4949         0x00000000, 0x00000000, 0x00000000,
4950 };
4951
4952 /* tp->lock is held. */
4953 static int tg3_load_tso_firmware(struct tg3 *tp)
4954 {
4955         struct fw_info info;
4956         unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
4957         int err, i;
4958
4959         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
4960                 return 0;
4961
4962         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
4963                 info.text_base = TG3_TSO5_FW_TEXT_ADDR;
4964                 info.text_len = TG3_TSO5_FW_TEXT_LEN;
4965                 info.text_data = &tg3Tso5FwText[0];
4966                 info.rodata_base = TG3_TSO5_FW_RODATA_ADDR;
4967                 info.rodata_len = TG3_TSO5_FW_RODATA_LEN;
4968                 info.rodata_data = &tg3Tso5FwRodata[0];
4969                 info.data_base = TG3_TSO5_FW_DATA_ADDR;
4970                 info.data_len = TG3_TSO5_FW_DATA_LEN;
4971                 info.data_data = &tg3Tso5FwData[0];
4972                 cpu_base = RX_CPU_BASE;
4973                 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
4974                 cpu_scratch_size = (info.text_len +
4975                                     info.rodata_len +
4976                                     info.data_len +
4977                                     TG3_TSO5_FW_SBSS_LEN +
4978                                     TG3_TSO5_FW_BSS_LEN);
4979         } else {
4980                 info.text_base = TG3_TSO_FW_TEXT_ADDR;
4981                 info.text_len = TG3_TSO_FW_TEXT_LEN;
4982                 info.text_data = &tg3TsoFwText[0];
4983                 info.rodata_base = TG3_TSO_FW_RODATA_ADDR;
4984                 info.rodata_len = TG3_TSO_FW_RODATA_LEN;
4985                 info.rodata_data = &tg3TsoFwRodata[0];
4986                 info.data_base = TG3_TSO_FW_DATA_ADDR;
4987                 info.data_len = TG3_TSO_FW_DATA_LEN;
4988                 info.data_data = &tg3TsoFwData[0];
4989                 cpu_base = TX_CPU_BASE;
4990                 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
4991                 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
4992         }
4993
4994         err = tg3_load_firmware_cpu(tp, cpu_base,
4995                                     cpu_scratch_base, cpu_scratch_size,
4996                                     &info);
4997         if (err)
4998                 return err;
4999
5000         /* Now startup the cpu. */
5001         tw32(cpu_base + CPU_STATE, 0xffffffff);
5002         tw32_f(cpu_base + CPU_PC,    info.text_base);
5003
5004         for (i = 0; i < 5; i++) {
5005                 if (tr32(cpu_base + CPU_PC) == info.text_base)
5006                         break;
5007                 tw32(cpu_base + CPU_STATE, 0xffffffff);
5008                 tw32(cpu_base + CPU_MODE,  CPU_MODE_HALT);
5009                 tw32_f(cpu_base + CPU_PC,    info.text_base);
5010                 udelay(1000);
5011         }
5012         if (i >= 5) {
5013                 printk(KERN_ERR PFX "tg3_load_tso_firmware fails for %s "
5014                        "to set CPU PC, is %08x should be %08x\n",
5015                        tp->dev->name, tr32(cpu_base + CPU_PC),
5016                        info.text_base);
5017                 return -ENODEV;
5018         }
5019         tw32(cpu_base + CPU_STATE, 0xffffffff);
5020         tw32_f(cpu_base + CPU_MODE,  0x00000000);
5021         return 0;
5022 }
5023
5024 #endif /* TG3_TSO_SUPPORT != 0 */
5025
5026 /* tp->lock is held. */
5027 static void __tg3_set_mac_addr(struct tg3 *tp)
5028 {
5029         u32 addr_high, addr_low;
5030         int i;
5031
5032         addr_high = ((tp->dev->dev_addr[0] << 8) |
5033                      tp->dev->dev_addr[1]);
5034         addr_low = ((tp->dev->dev_addr[2] << 24) |
5035                     (tp->dev->dev_addr[3] << 16) |
5036                     (tp->dev->dev_addr[4] <<  8) |
5037                     (tp->dev->dev_addr[5] <<  0));
5038         for (i = 0; i < 4; i++) {
5039                 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
5040                 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
5041         }
5042
5043         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
5044             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
5045                 for (i = 0; i < 12; i++) {
5046                         tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
5047                         tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
5048                 }
5049         }
5050
5051         addr_high = (tp->dev->dev_addr[0] +
5052                      tp->dev->dev_addr[1] +
5053                      tp->dev->dev_addr[2] +
5054                      tp->dev->dev_addr[3] +
5055                      tp->dev->dev_addr[4] +
5056                      tp->dev->dev_addr[5]) &
5057                 TX_BACKOFF_SEED_MASK;
5058         tw32(MAC_TX_BACKOFF_SEED, addr_high);
5059 }
5060
5061 static int tg3_set_mac_addr(struct net_device *dev, void *p)
5062 {
5063         struct tg3 *tp = netdev_priv(dev);
5064         struct sockaddr *addr = p;
5065
5066         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
5067
5068         spin_lock_irq(&tp->lock);
5069         __tg3_set_mac_addr(tp);
5070         spin_unlock_irq(&tp->lock);
5071
5072         return 0;
5073 }
5074
5075 /* tp->lock is held. */
5076 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
5077                            dma_addr_t mapping, u32 maxlen_flags,
5078                            u32 nic_addr)
5079 {
5080         tg3_write_mem(tp,
5081                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
5082                       ((u64) mapping >> 32));
5083         tg3_write_mem(tp,
5084                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
5085                       ((u64) mapping & 0xffffffff));
5086         tg3_write_mem(tp,
5087                       (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
5088                        maxlen_flags);
5089
5090         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
5091                 tg3_write_mem(tp,
5092                               (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
5093                               nic_addr);
5094 }
5095
5096 static void __tg3_set_rx_mode(struct net_device *);
5097 static void tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
5098 {
5099         tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
5100         tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
5101         tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
5102         tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
5103         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5104                 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
5105                 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
5106         }
5107         tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
5108         tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
5109         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5110                 u32 val = ec->stats_block_coalesce_usecs;
5111
5112                 if (!netif_carrier_ok(tp->dev))
5113                         val = 0;
5114
5115                 tw32(HOSTCC_STAT_COAL_TICKS, val);
5116         }
5117 }
5118
5119 /* tp->lock is held. */
5120 static int tg3_reset_hw(struct tg3 *tp)
5121 {
5122         u32 val, rdmac_mode;
5123         int i, err, limit;
5124
5125         tg3_disable_ints(tp);
5126
5127         tg3_stop_fw(tp);
5128
5129         tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
5130
5131         if (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) {
5132                 tg3_abort_hw(tp, 1);
5133         }
5134
5135         err = tg3_chip_reset(tp);
5136         if (err)
5137                 return err;
5138
5139         tg3_write_sig_legacy(tp, RESET_KIND_INIT);
5140
5141         /* This works around an issue with Athlon chipsets on
5142          * B3 tigon3 silicon.  This bit has no effect on any
5143          * other revision.  But do not set this on PCI Express
5144          * chips.
5145          */
5146         if (!(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
5147                 tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
5148         tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
5149
5150         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
5151             (tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
5152                 val = tr32(TG3PCI_PCISTATE);
5153                 val |= PCISTATE_RETRY_SAME_DMA;
5154                 tw32(TG3PCI_PCISTATE, val);
5155         }
5156
5157         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_BX) {
5158                 /* Enable some hw fixes.  */
5159                 val = tr32(TG3PCI_MSI_DATA);
5160                 val |= (1 << 26) | (1 << 28) | (1 << 29);
5161                 tw32(TG3PCI_MSI_DATA, val);
5162         }
5163
5164         /* Descriptor ring init may make accesses to the
5165          * NIC SRAM area to setup the TX descriptors, so we
5166          * can only do this after the hardware has been
5167          * successfully reset.
5168          */
5169         tg3_init_rings(tp);
5170
5171         /* This value is determined during the probe time DMA
5172          * engine test, tg3_test_dma.
5173          */
5174         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
5175
5176         tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
5177                           GRC_MODE_4X_NIC_SEND_RINGS |
5178                           GRC_MODE_NO_TX_PHDR_CSUM |
5179                           GRC_MODE_NO_RX_PHDR_CSUM);
5180         tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
5181         if (tp->tg3_flags & TG3_FLAG_NO_TX_PSEUDO_CSUM)
5182                 tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
5183         if (tp->tg3_flags & TG3_FLAG_NO_RX_PSEUDO_CSUM)
5184                 tp->grc_mode |= GRC_MODE_NO_RX_PHDR_CSUM;
5185
5186         tw32(GRC_MODE,
5187              tp->grc_mode |
5188              (GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP));
5189
5190         /* Setup the timer prescalar register.  Clock is always 66Mhz. */
5191         val = tr32(GRC_MISC_CFG);
5192         val &= ~0xff;
5193         val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
5194         tw32(GRC_MISC_CFG, val);
5195
5196         /* Initialize MBUF/DESC pool. */
5197         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
5198                 /* Do nothing.  */
5199         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
5200                 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
5201                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
5202                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
5203                 else
5204                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
5205                 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
5206                 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
5207         }
5208 #if TG3_TSO_SUPPORT != 0
5209         else if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
5210                 int fw_len;
5211
5212                 fw_len = (TG3_TSO5_FW_TEXT_LEN +
5213                           TG3_TSO5_FW_RODATA_LEN +
5214                           TG3_TSO5_FW_DATA_LEN +
5215                           TG3_TSO5_FW_SBSS_LEN +
5216                           TG3_TSO5_FW_BSS_LEN);
5217                 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
5218                 tw32(BUFMGR_MB_POOL_ADDR,
5219                      NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
5220                 tw32(BUFMGR_MB_POOL_SIZE,
5221                      NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
5222         }
5223 #endif
5224
5225         if (!(tp->tg3_flags & TG3_FLAG_JUMBO_ENABLE)) {
5226                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
5227                      tp->bufmgr_config.mbuf_read_dma_low_water);
5228                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
5229                      tp->bufmgr_config.mbuf_mac_rx_low_water);
5230                 tw32(BUFMGR_MB_HIGH_WATER,
5231                      tp->bufmgr_config.mbuf_high_water);
5232         } else {
5233                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
5234                      tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
5235                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
5236                      tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
5237                 tw32(BUFMGR_MB_HIGH_WATER,
5238                      tp->bufmgr_config.mbuf_high_water_jumbo);
5239         }
5240         tw32(BUFMGR_DMA_LOW_WATER,
5241              tp->bufmgr_config.dma_low_water);
5242         tw32(BUFMGR_DMA_HIGH_WATER,
5243              tp->bufmgr_config.dma_high_water);
5244
5245         tw32(BUFMGR_MODE, BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE);
5246         for (i = 0; i < 2000; i++) {
5247                 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
5248                         break;
5249                 udelay(10);
5250         }
5251         if (i >= 2000) {
5252                 printk(KERN_ERR PFX "tg3_reset_hw cannot enable BUFMGR for %s.\n",
5253                        tp->dev->name);
5254                 return -ENODEV;
5255         }
5256
5257         /* Setup replenish threshold. */
5258         tw32(RCVBDI_STD_THRESH, tp->rx_pending / 8);
5259
5260         /* Initialize TG3_BDINFO's at:
5261          *  RCVDBDI_STD_BD:     standard eth size rx ring
5262          *  RCVDBDI_JUMBO_BD:   jumbo frame rx ring
5263          *  RCVDBDI_MINI_BD:    small frame rx ring (??? does not work)
5264          *
5265          * like so:
5266          *  TG3_BDINFO_HOST_ADDR:       high/low parts of DMA address of ring
5267          *  TG3_BDINFO_MAXLEN_FLAGS:    (rx max buffer size << 16) |
5268          *                              ring attribute flags
5269          *  TG3_BDINFO_NIC_ADDR:        location of descriptors in nic SRAM
5270          *
5271          * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
5272          * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
5273          *
5274          * The size of each ring is fixed in the firmware, but the location is
5275          * configurable.
5276          */
5277         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
5278              ((u64) tp->rx_std_mapping >> 32));
5279         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
5280              ((u64) tp->rx_std_mapping & 0xffffffff));
5281         tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
5282              NIC_SRAM_RX_BUFFER_DESC);
5283
5284         /* Don't even try to program the JUMBO/MINI buffer descriptor
5285          * configs on 5705.
5286          */
5287         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
5288                 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS,
5289                      RX_STD_MAX_SIZE_5705 << BDINFO_FLAGS_MAXLEN_SHIFT);
5290         } else {
5291                 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS,
5292                      RX_STD_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT);
5293
5294                 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
5295                      BDINFO_FLAGS_DISABLED);
5296
5297                 /* Setup replenish threshold. */
5298                 tw32(RCVBDI_JUMBO_THRESH, tp->rx_jumbo_pending / 8);
5299
5300                 if (tp->tg3_flags & TG3_FLAG_JUMBO_ENABLE) {
5301                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
5302                              ((u64) tp->rx_jumbo_mapping >> 32));
5303                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
5304                              ((u64) tp->rx_jumbo_mapping & 0xffffffff));
5305                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
5306                              RX_JUMBO_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT);
5307                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
5308                              NIC_SRAM_RX_JUMBO_BUFFER_DESC);
5309                 } else {
5310                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
5311                              BDINFO_FLAGS_DISABLED);
5312                 }
5313
5314         }
5315
5316         /* There is only one send ring on 5705/5750, no need to explicitly
5317          * disable the others.
5318          */
5319         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5320                 /* Clear out send RCB ring in SRAM. */
5321                 for (i = NIC_SRAM_SEND_RCB; i < NIC_SRAM_RCV_RET_RCB; i += TG3_BDINFO_SIZE)
5322                         tg3_write_mem(tp, i + TG3_BDINFO_MAXLEN_FLAGS,
5323                                       BDINFO_FLAGS_DISABLED);
5324         }
5325
5326         tp->tx_prod = 0;
5327         tp->tx_cons = 0;
5328         tw32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0);
5329         tw32_tx_mbox(MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0);
5330
5331         tg3_set_bdinfo(tp, NIC_SRAM_SEND_RCB,
5332                        tp->tx_desc_mapping,
5333                        (TG3_TX_RING_SIZE <<
5334                         BDINFO_FLAGS_MAXLEN_SHIFT),
5335                        NIC_SRAM_TX_BUFFER_DESC);
5336
5337         /* There is only one receive return ring on 5705/5750, no need
5338          * to explicitly disable the others.
5339          */
5340         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5341                 for (i = NIC_SRAM_RCV_RET_RCB; i < NIC_SRAM_STATS_BLK;
5342                      i += TG3_BDINFO_SIZE) {
5343                         tg3_write_mem(tp, i + TG3_BDINFO_MAXLEN_FLAGS,
5344                                       BDINFO_FLAGS_DISABLED);
5345                 }
5346         }
5347
5348         tp->rx_rcb_ptr = 0;
5349         tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW, 0);
5350
5351         tg3_set_bdinfo(tp, NIC_SRAM_RCV_RET_RCB,
5352                        tp->rx_rcb_mapping,
5353                        (TG3_RX_RCB_RING_SIZE(tp) <<
5354                         BDINFO_FLAGS_MAXLEN_SHIFT),
5355                        0);
5356
5357         tp->rx_std_ptr = tp->rx_pending;
5358         tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
5359                      tp->rx_std_ptr);
5360
5361         tp->rx_jumbo_ptr = (tp->tg3_flags & TG3_FLAG_JUMBO_ENABLE) ?
5362                                                 tp->rx_jumbo_pending : 0;
5363         tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
5364                      tp->rx_jumbo_ptr);
5365
5366         /* Initialize MAC address and backoff seed. */
5367         __tg3_set_mac_addr(tp);
5368
5369         /* MTU + ethernet header + FCS + optional VLAN tag */
5370         tw32(MAC_RX_MTU_SIZE, tp->dev->mtu + ETH_HLEN + 8);
5371
5372         /* The slot time is changed by tg3_setup_phy if we
5373          * run at gigabit with half duplex.
5374          */
5375         tw32(MAC_TX_LENGTHS,
5376              (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
5377              (6 << TX_LENGTHS_IPG_SHIFT) |
5378              (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
5379
5380         /* Receive rules. */
5381         tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
5382         tw32(RCVLPC_CONFIG, 0x0181);
5383
5384         /* Calculate RDMAC_MODE setting early, we need it to determine
5385          * the RCVLPC_STATE_ENABLE mask.
5386          */
5387         rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
5388                       RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
5389                       RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
5390                       RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
5391                       RDMAC_MODE_LNGREAD_ENAB);
5392         if (tp->tg3_flags & TG3_FLAG_SPLIT_MODE)
5393                 rdmac_mode |= RDMAC_MODE_SPLIT_ENABLE;
5394
5395         /* If statement applies to 5705 and 5750 PCI devices only */
5396         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
5397              tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
5398             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)) {
5399                 if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE &&
5400                     (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
5401                      tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
5402                         rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
5403                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
5404                            !(tp->tg3_flags2 & TG3_FLG2_IS_5788)) {
5405                         rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
5406                 }
5407         }
5408
5409         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)
5410                 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
5411
5412 #if TG3_TSO_SUPPORT != 0
5413         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
5414                 rdmac_mode |= (1 << 27);
5415 #endif
5416
5417         /* Receive/send statistics. */
5418         if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
5419             (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
5420                 val = tr32(RCVLPC_STATS_ENABLE);
5421                 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
5422                 tw32(RCVLPC_STATS_ENABLE, val);
5423         } else {
5424                 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
5425         }
5426         tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
5427         tw32(SNDDATAI_STATSENAB, 0xffffff);
5428         tw32(SNDDATAI_STATSCTRL,
5429              (SNDDATAI_SCTRL_ENABLE |
5430               SNDDATAI_SCTRL_FASTUPD));
5431
5432         /* Setup host coalescing engine. */
5433         tw32(HOSTCC_MODE, 0);
5434         for (i = 0; i < 2000; i++) {
5435                 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
5436                         break;
5437                 udelay(10);
5438         }
5439
5440         tg3_set_coalesce(tp, &tp->coal);
5441
5442         /* set status block DMA address */
5443         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
5444              ((u64) tp->status_mapping >> 32));
5445         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
5446              ((u64) tp->status_mapping & 0xffffffff));
5447
5448         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5449                 /* Status/statistics block address.  See tg3_timer,
5450                  * the tg3_periodic_fetch_stats call there, and
5451                  * tg3_get_stats to see how this works for 5705/5750 chips.
5452                  */
5453                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
5454                      ((u64) tp->stats_mapping >> 32));
5455                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
5456                      ((u64) tp->stats_mapping & 0xffffffff));
5457                 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
5458                 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
5459         }
5460
5461         tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
5462
5463         tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
5464         tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
5465         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
5466                 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
5467
5468         /* Clear statistics/status block in chip, and status block in ram. */
5469         for (i = NIC_SRAM_STATS_BLK;
5470              i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
5471              i += sizeof(u32)) {
5472                 tg3_write_mem(tp, i, 0);
5473                 udelay(40);
5474         }
5475         memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
5476
5477         tp->mac_mode = MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
5478                 MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE | MAC_MODE_FHDE_ENABLE;
5479         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
5480         udelay(40);
5481
5482         /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
5483          * If TG3_FLAG_EEPROM_WRITE_PROT is set, we should read the
5484          * register to preserve the GPIO settings for LOMs. The GPIOs,
5485          * whether used as inputs or outputs, are set by boot code after
5486          * reset.
5487          */
5488         if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
5489                 u32 gpio_mask;
5490
5491                 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE2 |
5492                             GRC_LCLCTRL_GPIO_OUTPUT0 | GRC_LCLCTRL_GPIO_OUTPUT2;
5493
5494                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
5495                         gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
5496                                      GRC_LCLCTRL_GPIO_OUTPUT3;
5497
5498                 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
5499
5500                 /* GPIO1 must be driven high for eeprom write protect */
5501                 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
5502                                        GRC_LCLCTRL_GPIO_OUTPUT1);
5503         }
5504         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
5505         udelay(100);
5506
5507         tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0);
5508         tr32(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW);
5509         tp->last_tag = 0;
5510
5511         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5512                 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
5513                 udelay(40);
5514         }
5515
5516         val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
5517                WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
5518                WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
5519                WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
5520                WDMAC_MODE_LNGREAD_ENAB);
5521
5522         /* If statement applies to 5705 and 5750 PCI devices only */
5523         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
5524              tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
5525             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
5526                 if ((tp->tg3_flags & TG3_FLG2_TSO_CAPABLE) &&
5527                     (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
5528                      tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
5529                         /* nothing */
5530                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
5531                            !(tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
5532                            !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
5533                         val |= WDMAC_MODE_RX_ACCEL;
5534                 }
5535         }
5536
5537         tw32_f(WDMAC_MODE, val);
5538         udelay(40);
5539
5540         if ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0) {
5541                 val = tr32(TG3PCI_X_CAPS);
5542                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) {
5543                         val &= ~PCIX_CAPS_BURST_MASK;
5544                         val |= (PCIX_CAPS_MAX_BURST_CPIOB << PCIX_CAPS_BURST_SHIFT);
5545                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
5546                         val &= ~(PCIX_CAPS_SPLIT_MASK | PCIX_CAPS_BURST_MASK);
5547                         val |= (PCIX_CAPS_MAX_BURST_CPIOB << PCIX_CAPS_BURST_SHIFT);
5548                         if (tp->tg3_flags & TG3_FLAG_SPLIT_MODE)
5549                                 val |= (tp->split_mode_max_reqs <<
5550                                         PCIX_CAPS_SPLIT_SHIFT);
5551                 }
5552                 tw32(TG3PCI_X_CAPS, val);
5553         }
5554
5555         tw32_f(RDMAC_MODE, rdmac_mode);
5556         udelay(40);
5557
5558         tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
5559         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
5560                 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
5561         tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
5562         tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
5563         tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
5564         tw32(RCVDBDI_MODE, RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ);
5565         tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
5566 #if TG3_TSO_SUPPORT != 0
5567         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
5568                 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
5569 #endif
5570         tw32(SNDBDI_MODE, SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE);
5571         tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
5572
5573         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
5574                 err = tg3_load_5701_a0_firmware_fix(tp);
5575                 if (err)
5576                         return err;
5577         }
5578
5579 #if TG3_TSO_SUPPORT != 0
5580         if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
5581                 err = tg3_load_tso_firmware(tp);
5582                 if (err)
5583                         return err;
5584         }
5585 #endif
5586
5587         tp->tx_mode = TX_MODE_ENABLE;
5588         tw32_f(MAC_TX_MODE, tp->tx_mode);
5589         udelay(100);
5590
5591         tp->rx_mode = RX_MODE_ENABLE;
5592         tw32_f(MAC_RX_MODE, tp->rx_mode);
5593         udelay(10);
5594
5595         if (tp->link_config.phy_is_low_power) {
5596                 tp->link_config.phy_is_low_power = 0;
5597                 tp->link_config.speed = tp->link_config.orig_speed;
5598                 tp->link_config.duplex = tp->link_config.orig_duplex;
5599                 tp->link_config.autoneg = tp->link_config.orig_autoneg;
5600         }
5601
5602         tp->mi_mode = MAC_MI_MODE_BASE;
5603         tw32_f(MAC_MI_MODE, tp->mi_mode);
5604         udelay(80);
5605
5606         tw32(MAC_LED_CTRL, tp->led_ctrl);
5607
5608         tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
5609         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
5610                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
5611                 udelay(10);
5612         }
5613         tw32_f(MAC_RX_MODE, tp->rx_mode);
5614         udelay(10);
5615
5616         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
5617                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) &&
5618                         !(tp->tg3_flags2 & TG3_FLG2_SERDES_PREEMPHASIS)) {
5619                         /* Set drive transmission level to 1.2V  */
5620                         /* only if the signal pre-emphasis bit is not set  */
5621                         val = tr32(MAC_SERDES_CFG);
5622                         val &= 0xfffff000;
5623                         val |= 0x880;
5624                         tw32(MAC_SERDES_CFG, val);
5625                 }
5626                 if (tp->pci_chip_rev_id == CHIPREV_ID_5703_A1)
5627                         tw32(MAC_SERDES_CFG, 0x616000);
5628         }
5629
5630         /* Prevent chip from dropping frames when flow control
5631          * is enabled.
5632          */
5633         tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, 2);
5634
5635         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
5636             (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
5637                 /* Use hardware link auto-negotiation */
5638                 tp->tg3_flags2 |= TG3_FLG2_HW_AUTONEG;
5639         }
5640
5641         err = tg3_setup_phy(tp, 1);
5642         if (err)
5643                 return err;
5644
5645         if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
5646                 u32 tmp;
5647
5648                 /* Clear CRC stats. */
5649                 if (!tg3_readphy(tp, 0x1e, &tmp)) {
5650                         tg3_writephy(tp, 0x1e, tmp | 0x8000);
5651                         tg3_readphy(tp, 0x14, &tmp);
5652                 }
5653         }
5654
5655         __tg3_set_rx_mode(tp->dev);
5656
5657         /* Initialize receive rules. */
5658         tw32(MAC_RCV_RULE_0,  0xc2000000 & RCV_RULE_DISABLE_MASK);
5659         tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
5660         tw32(MAC_RCV_RULE_1,  0x86000004 & RCV_RULE_DISABLE_MASK);
5661         tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
5662
5663         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
5664                 limit = 8;
5665         else
5666                 limit = 16;
5667         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF)
5668                 limit -= 4;
5669         switch (limit) {
5670         case 16:
5671                 tw32(MAC_RCV_RULE_15,  0); tw32(MAC_RCV_VALUE_15,  0);
5672         case 15:
5673                 tw32(MAC_RCV_RULE_14,  0); tw32(MAC_RCV_VALUE_14,  0);
5674         case 14:
5675                 tw32(MAC_RCV_RULE_13,  0); tw32(MAC_RCV_VALUE_13,  0);
5676         case 13:
5677                 tw32(MAC_RCV_RULE_12,  0); tw32(MAC_RCV_VALUE_12,  0);
5678         case 12:
5679                 tw32(MAC_RCV_RULE_11,  0); tw32(MAC_RCV_VALUE_11,  0);
5680         case 11:
5681                 tw32(MAC_RCV_RULE_10,  0); tw32(MAC_RCV_VALUE_10,  0);
5682         case 10:
5683                 tw32(MAC_RCV_RULE_9,  0); tw32(MAC_RCV_VALUE_9,  0);
5684         case 9:
5685                 tw32(MAC_RCV_RULE_8,  0); tw32(MAC_RCV_VALUE_8,  0);
5686         case 8:
5687                 tw32(MAC_RCV_RULE_7,  0); tw32(MAC_RCV_VALUE_7,  0);
5688         case 7:
5689                 tw32(MAC_RCV_RULE_6,  0); tw32(MAC_RCV_VALUE_6,  0);
5690         case 6:
5691                 tw32(MAC_RCV_RULE_5,  0); tw32(MAC_RCV_VALUE_5,  0);
5692         case 5:
5693                 tw32(MAC_RCV_RULE_4,  0); tw32(MAC_RCV_VALUE_4,  0);
5694         case 4:
5695                 /* tw32(MAC_RCV_RULE_3,  0); tw32(MAC_RCV_VALUE_3,  0); */
5696         case 3:
5697                 /* tw32(MAC_RCV_RULE_2,  0); tw32(MAC_RCV_VALUE_2,  0); */
5698         case 2:
5699         case 1:
5700
5701         default:
5702                 break;
5703         };
5704
5705         tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
5706
5707         if (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE)
5708                 tg3_enable_ints(tp);
5709
5710         return 0;
5711 }
5712
5713 /* Called at device open time to get the chip ready for
5714  * packet processing.  Invoked with tp->lock held.
5715  */
5716 static int tg3_init_hw(struct tg3 *tp)
5717 {
5718         int err;
5719
5720         /* Force the chip into D0. */
5721         err = tg3_set_power_state(tp, 0);
5722         if (err)
5723                 goto out;
5724
5725         tg3_switch_clocks(tp);
5726
5727         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
5728
5729         err = tg3_reset_hw(tp);
5730
5731 out:
5732         return err;
5733 }
5734
5735 #define TG3_STAT_ADD32(PSTAT, REG) \
5736 do {    u32 __val = tr32(REG); \
5737         (PSTAT)->low += __val; \
5738         if ((PSTAT)->low < __val) \
5739                 (PSTAT)->high += 1; \
5740 } while (0)
5741
5742 static void tg3_periodic_fetch_stats(struct tg3 *tp)
5743 {
5744         struct tg3_hw_stats *sp = tp->hw_stats;
5745
5746         if (!netif_carrier_ok(tp->dev))
5747                 return;
5748
5749         TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
5750         TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
5751         TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
5752         TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
5753         TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
5754         TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
5755         TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
5756         TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
5757         TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
5758         TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
5759         TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
5760         TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
5761         TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
5762
5763         TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
5764         TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
5765         TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
5766         TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
5767         TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
5768         TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
5769         TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
5770         TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
5771         TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
5772         TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
5773         TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
5774         TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
5775         TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
5776         TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
5777 }
5778
5779 static void tg3_timer(unsigned long __opaque)
5780 {
5781         struct tg3 *tp = (struct tg3 *) __opaque;
5782         unsigned long flags;
5783
5784         spin_lock_irqsave(&tp->lock, flags);
5785         spin_lock(&tp->tx_lock);
5786
5787         if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) {
5788                 /* All of this garbage is because when using non-tagged
5789                  * IRQ status the mailbox/status_block protocol the chip
5790                  * uses with the cpu is race prone.
5791                  */
5792                 if (tp->hw_status->status & SD_STATUS_UPDATED) {
5793                         tw32(GRC_LOCAL_CTRL,
5794                              tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
5795                 } else {
5796                         tw32(HOSTCC_MODE, tp->coalesce_mode |
5797                              (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
5798                 }
5799
5800                 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
5801                         tp->tg3_flags2 |= TG3_FLG2_RESTART_TIMER;
5802                         spin_unlock(&tp->tx_lock);
5803                         spin_unlock_irqrestore(&tp->lock, flags);
5804                         schedule_work(&tp->reset_task);
5805                         return;
5806                 }
5807         }
5808
5809         /* This part only runs once per second. */
5810         if (!--tp->timer_counter) {
5811                 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
5812                         tg3_periodic_fetch_stats(tp);
5813
5814                 if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
5815                         u32 mac_stat;
5816                         int phy_event;
5817
5818                         mac_stat = tr32(MAC_STATUS);
5819
5820                         phy_event = 0;
5821                         if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) {
5822                                 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
5823                                         phy_event = 1;
5824                         } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
5825                                 phy_event = 1;
5826
5827                         if (phy_event)
5828                                 tg3_setup_phy(tp, 0);
5829                 } else if (tp->tg3_flags & TG3_FLAG_POLL_SERDES) {
5830                         u32 mac_stat = tr32(MAC_STATUS);
5831                         int need_setup = 0;
5832
5833                         if (netif_carrier_ok(tp->dev) &&
5834                             (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
5835                                 need_setup = 1;
5836                         }
5837                         if (! netif_carrier_ok(tp->dev) &&
5838                             (mac_stat & (MAC_STATUS_PCS_SYNCED |
5839                                          MAC_STATUS_SIGNAL_DET))) {
5840                                 need_setup = 1;
5841                         }
5842                         if (need_setup) {
5843                                 tw32_f(MAC_MODE,
5844                                      (tp->mac_mode &
5845                                       ~MAC_MODE_PORT_MODE_MASK));
5846                                 udelay(40);
5847                                 tw32_f(MAC_MODE, tp->mac_mode);
5848                                 udelay(40);
5849                                 tg3_setup_phy(tp, 0);
5850                         }
5851                 }
5852
5853                 tp->timer_counter = tp->timer_multiplier;
5854         }
5855
5856         /* Heartbeat is only sent once every 120 seconds.  */
5857         if (!--tp->asf_counter) {
5858                 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
5859                         u32 val;
5860
5861                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_ALIVE);
5862                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
5863                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, 3);
5864                         val = tr32(GRC_RX_CPU_EVENT);
5865                         val |= (1 << 14);
5866                         tw32(GRC_RX_CPU_EVENT, val);
5867                 }
5868                 tp->asf_counter = tp->asf_multiplier;
5869         }
5870
5871         spin_unlock(&tp->tx_lock);
5872         spin_unlock_irqrestore(&tp->lock, flags);
5873
5874         tp->timer.expires = jiffies + tp->timer_offset;
5875         add_timer(&tp->timer);
5876 }
5877
5878 static int tg3_test_interrupt(struct tg3 *tp)
5879 {
5880         struct net_device *dev = tp->dev;
5881         int err, i;
5882         u32 int_mbox = 0;
5883
5884         tg3_disable_ints(tp);
5885
5886         free_irq(tp->pdev->irq, dev);
5887
5888         err = request_irq(tp->pdev->irq, tg3_test_isr,
5889                           SA_SHIRQ | SA_SAMPLE_RANDOM, dev->name, dev);
5890         if (err)
5891                 return err;
5892
5893         tg3_enable_ints(tp);
5894
5895         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
5896                HOSTCC_MODE_NOW);
5897
5898         for (i = 0; i < 5; i++) {
5899                 int_mbox = tr32(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW);
5900                 if (int_mbox != 0)
5901                         break;
5902                 msleep(10);
5903         }
5904
5905         tg3_disable_ints(tp);
5906
5907         free_irq(tp->pdev->irq, dev);
5908         
5909         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI)
5910                 err = request_irq(tp->pdev->irq, tg3_msi,
5911                                   SA_SAMPLE_RANDOM, dev->name, dev);
5912         else {
5913                 irqreturn_t (*fn)(int, void *, struct pt_regs *)=tg3_interrupt;
5914                 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
5915                         fn = tg3_interrupt_tagged;
5916                 err = request_irq(tp->pdev->irq, fn,
5917                                   SA_SHIRQ | SA_SAMPLE_RANDOM, dev->name, dev);
5918         }
5919
5920         if (err)
5921                 return err;
5922
5923         if (int_mbox != 0)
5924                 return 0;
5925
5926         return -EIO;
5927 }
5928
5929 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
5930  * successfully restored
5931  */
5932 static int tg3_test_msi(struct tg3 *tp)
5933 {
5934         struct net_device *dev = tp->dev;
5935         int err;
5936         u16 pci_cmd;
5937
5938         if (!(tp->tg3_flags2 & TG3_FLG2_USING_MSI))
5939                 return 0;
5940
5941         /* Turn off SERR reporting in case MSI terminates with Master
5942          * Abort.
5943          */
5944         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
5945         pci_write_config_word(tp->pdev, PCI_COMMAND,
5946                               pci_cmd & ~PCI_COMMAND_SERR);
5947
5948         err = tg3_test_interrupt(tp);
5949
5950         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
5951
5952         if (!err)
5953                 return 0;
5954
5955         /* other failures */
5956         if (err != -EIO)
5957                 return err;
5958
5959         /* MSI test failed, go back to INTx mode */
5960         printk(KERN_WARNING PFX "%s: No interrupt was generated using MSI, "
5961                "switching to INTx mode. Please report this failure to "
5962                "the PCI maintainer and include system chipset information.\n",
5963                        tp->dev->name);
5964
5965         free_irq(tp->pdev->irq, dev);
5966         pci_disable_msi(tp->pdev);
5967
5968         tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
5969
5970         {
5971                 irqreturn_t (*fn)(int, void *, struct pt_regs *)=tg3_interrupt;
5972                 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
5973                         fn = tg3_interrupt_tagged;
5974
5975                 err = request_irq(tp->pdev->irq, fn,
5976                                   SA_SHIRQ | SA_SAMPLE_RANDOM, dev->name, dev);
5977         }
5978         if (err)
5979                 return err;
5980
5981         /* Need to reset the chip because the MSI cycle may have terminated
5982          * with Master Abort.
5983          */
5984         spin_lock_irq(&tp->lock);
5985         spin_lock(&tp->tx_lock);
5986
5987         tg3_halt(tp, 1);
5988         err = tg3_init_hw(tp);
5989
5990         spin_unlock(&tp->tx_lock);
5991         spin_unlock_irq(&tp->lock);
5992
5993         if (err)
5994                 free_irq(tp->pdev->irq, dev);
5995
5996         return err;
5997 }
5998
5999 static int tg3_open(struct net_device *dev)
6000 {
6001         struct tg3 *tp = netdev_priv(dev);
6002         int err;
6003
6004         spin_lock_irq(&tp->lock);
6005         spin_lock(&tp->tx_lock);
6006
6007         tg3_disable_ints(tp);
6008         tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
6009
6010         spin_unlock(&tp->tx_lock);
6011         spin_unlock_irq(&tp->lock);
6012
6013         /* The placement of this call is tied
6014          * to the setup and use of Host TX descriptors.
6015          */
6016         err = tg3_alloc_consistent(tp);
6017         if (err)
6018                 return err;
6019
6020         if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
6021             (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5750_AX) &&
6022             (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5750_BX)) {
6023                 /* All MSI supporting chips should support tagged
6024                  * status.  Assert that this is the case.
6025                  */
6026                 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) {
6027                         printk(KERN_WARNING PFX "%s: MSI without TAGGED? "
6028                                "Not using MSI.\n", tp->dev->name);
6029                 } else if (pci_enable_msi(tp->pdev) == 0) {
6030                         u32 msi_mode;
6031
6032                         msi_mode = tr32(MSGINT_MODE);
6033                         tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
6034                         tp->tg3_flags2 |= TG3_FLG2_USING_MSI;
6035                 }
6036         }
6037         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI)
6038                 err = request_irq(tp->pdev->irq, tg3_msi,
6039                                   SA_SAMPLE_RANDOM, dev->name, dev);
6040         else {
6041                 irqreturn_t (*fn)(int, void *, struct pt_regs *)=tg3_interrupt;
6042                 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
6043                         fn = tg3_interrupt_tagged;
6044
6045                 err = request_irq(tp->pdev->irq, fn,
6046                                   SA_SHIRQ | SA_SAMPLE_RANDOM, dev->name, dev);
6047         }
6048
6049         if (err) {
6050                 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6051                         pci_disable_msi(tp->pdev);
6052                         tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
6053                 }
6054                 tg3_free_consistent(tp);
6055                 return err;
6056         }
6057
6058         spin_lock_irq(&tp->lock);
6059         spin_lock(&tp->tx_lock);
6060
6061         err = tg3_init_hw(tp);
6062         if (err) {
6063                 tg3_halt(tp, 1);
6064                 tg3_free_rings(tp);
6065         } else {
6066                 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
6067                         tp->timer_offset = HZ;
6068                 else
6069                         tp->timer_offset = HZ / 10;
6070
6071                 BUG_ON(tp->timer_offset > HZ);
6072                 tp->timer_counter = tp->timer_multiplier =
6073                         (HZ / tp->timer_offset);
6074                 tp->asf_counter = tp->asf_multiplier =
6075                         ((HZ / tp->timer_offset) * 120);
6076
6077                 init_timer(&tp->timer);
6078                 tp->timer.expires = jiffies + tp->timer_offset;
6079                 tp->timer.data = (unsigned long) tp;
6080                 tp->timer.function = tg3_timer;
6081         }
6082
6083         spin_unlock(&tp->tx_lock);
6084         spin_unlock_irq(&tp->lock);
6085
6086         if (err) {
6087                 free_irq(tp->pdev->irq, dev);
6088                 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6089                         pci_disable_msi(tp->pdev);
6090                         tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
6091                 }
6092                 tg3_free_consistent(tp);
6093                 return err;
6094         }
6095
6096         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6097                 err = tg3_test_msi(tp);
6098
6099                 if (err) {
6100                         spin_lock_irq(&tp->lock);
6101                         spin_lock(&tp->tx_lock);
6102
6103                         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6104                                 pci_disable_msi(tp->pdev);
6105                                 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
6106                         }
6107                         tg3_halt(tp, 1);
6108                         tg3_free_rings(tp);
6109                         tg3_free_consistent(tp);
6110
6111                         spin_unlock(&tp->tx_lock);
6112                         spin_unlock_irq(&tp->lock);
6113
6114                         return err;
6115                 }
6116         }
6117
6118         spin_lock_irq(&tp->lock);
6119         spin_lock(&tp->tx_lock);
6120
6121         add_timer(&tp->timer);
6122         tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
6123         tg3_enable_ints(tp);
6124
6125         spin_unlock(&tp->tx_lock);
6126         spin_unlock_irq(&tp->lock);
6127
6128         netif_start_queue(dev);
6129
6130         return 0;
6131 }
6132
6133 #if 0
6134 /*static*/ void tg3_dump_state(struct tg3 *tp)
6135 {
6136         u32 val32, val32_2, val32_3, val32_4, val32_5;
6137         u16 val16;
6138         int i;
6139
6140         pci_read_config_word(tp->pdev, PCI_STATUS, &val16);
6141         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE, &val32);
6142         printk("DEBUG: PCI status [%04x] TG3PCI state[%08x]\n",
6143                val16, val32);
6144
6145         /* MAC block */
6146         printk("DEBUG: MAC_MODE[%08x] MAC_STATUS[%08x]\n",
6147                tr32(MAC_MODE), tr32(MAC_STATUS));
6148         printk("       MAC_EVENT[%08x] MAC_LED_CTRL[%08x]\n",
6149                tr32(MAC_EVENT), tr32(MAC_LED_CTRL));
6150         printk("DEBUG: MAC_TX_MODE[%08x] MAC_TX_STATUS[%08x]\n",
6151                tr32(MAC_TX_MODE), tr32(MAC_TX_STATUS));
6152         printk("       MAC_RX_MODE[%08x] MAC_RX_STATUS[%08x]\n",
6153                tr32(MAC_RX_MODE), tr32(MAC_RX_STATUS));
6154
6155         /* Send data initiator control block */
6156         printk("DEBUG: SNDDATAI_MODE[%08x] SNDDATAI_STATUS[%08x]\n",
6157                tr32(SNDDATAI_MODE), tr32(SNDDATAI_STATUS));
6158         printk("       SNDDATAI_STATSCTRL[%08x]\n",
6159                tr32(SNDDATAI_STATSCTRL));
6160
6161         /* Send data completion control block */
6162         printk("DEBUG: SNDDATAC_MODE[%08x]\n", tr32(SNDDATAC_MODE));
6163
6164         /* Send BD ring selector block */
6165         printk("DEBUG: SNDBDS_MODE[%08x] SNDBDS_STATUS[%08x]\n",
6166                tr32(SNDBDS_MODE), tr32(SNDBDS_STATUS));
6167
6168         /* Send BD initiator control block */
6169         printk("DEBUG: SNDBDI_MODE[%08x] SNDBDI_STATUS[%08x]\n",
6170                tr32(SNDBDI_MODE), tr32(SNDBDI_STATUS));
6171
6172         /* Send BD completion control block */
6173         printk("DEBUG: SNDBDC_MODE[%08x]\n", tr32(SNDBDC_MODE));
6174
6175         /* Receive list placement control block */
6176         printk("DEBUG: RCVLPC_MODE[%08x] RCVLPC_STATUS[%08x]\n",
6177                tr32(RCVLPC_MODE), tr32(RCVLPC_STATUS));
6178         printk("       RCVLPC_STATSCTRL[%08x]\n",
6179                tr32(RCVLPC_STATSCTRL));
6180
6181         /* Receive data and receive BD initiator control block */
6182         printk("DEBUG: RCVDBDI_MODE[%08x] RCVDBDI_STATUS[%08x]\n",
6183                tr32(RCVDBDI_MODE), tr32(RCVDBDI_STATUS));
6184
6185         /* Receive data completion control block */
6186         printk("DEBUG: RCVDCC_MODE[%08x]\n",
6187                tr32(RCVDCC_MODE));
6188
6189         /* Receive BD initiator control block */
6190         printk("DEBUG: RCVBDI_MODE[%08x] RCVBDI_STATUS[%08x]\n",
6191                tr32(RCVBDI_MODE), tr32(RCVBDI_STATUS));
6192
6193         /* Receive BD completion control block */
6194         printk("DEBUG: RCVCC_MODE[%08x] RCVCC_STATUS[%08x]\n",
6195                tr32(RCVCC_MODE), tr32(RCVCC_STATUS));
6196
6197         /* Receive list selector control block */
6198         printk("DEBUG: RCVLSC_MODE[%08x] RCVLSC_STATUS[%08x]\n",
6199                tr32(RCVLSC_MODE), tr32(RCVLSC_STATUS));
6200
6201         /* Mbuf cluster free block */
6202         printk("DEBUG: MBFREE_MODE[%08x] MBFREE_STATUS[%08x]\n",
6203                tr32(MBFREE_MODE), tr32(MBFREE_STATUS));
6204
6205         /* Host coalescing control block */
6206         printk("DEBUG: HOSTCC_MODE[%08x] HOSTCC_STATUS[%08x]\n",
6207                tr32(HOSTCC_MODE), tr32(HOSTCC_STATUS));
6208         printk("DEBUG: HOSTCC_STATS_BLK_HOST_ADDR[%08x%08x]\n",
6209                tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
6210                tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
6211         printk("DEBUG: HOSTCC_STATUS_BLK_HOST_ADDR[%08x%08x]\n",
6212                tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
6213                tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
6214         printk("DEBUG: HOSTCC_STATS_BLK_NIC_ADDR[%08x]\n",
6215                tr32(HOSTCC_STATS_BLK_NIC_ADDR));
6216         printk("DEBUG: HOSTCC_STATUS_BLK_NIC_ADDR[%08x]\n",
6217                tr32(HOSTCC_STATUS_BLK_NIC_ADDR));
6218
6219         /* Memory arbiter control block */
6220         printk("DEBUG: MEMARB_MODE[%08x] MEMARB_STATUS[%08x]\n",
6221                tr32(MEMARB_MODE), tr32(MEMARB_STATUS));
6222
6223         /* Buffer manager control block */
6224         printk("DEBUG: BUFMGR_MODE[%08x] BUFMGR_STATUS[%08x]\n",
6225                tr32(BUFMGR_MODE), tr32(BUFMGR_STATUS));
6226         printk("DEBUG: BUFMGR_MB_POOL_ADDR[%08x] BUFMGR_MB_POOL_SIZE[%08x]\n",
6227                tr32(BUFMGR_MB_POOL_ADDR), tr32(BUFMGR_MB_POOL_SIZE));
6228         printk("DEBUG: BUFMGR_DMA_DESC_POOL_ADDR[%08x] "
6229                "BUFMGR_DMA_DESC_POOL_SIZE[%08x]\n",
6230                tr32(BUFMGR_DMA_DESC_POOL_ADDR),
6231                tr32(BUFMGR_DMA_DESC_POOL_SIZE));
6232
6233         /* Read DMA control block */
6234         printk("DEBUG: RDMAC_MODE[%08x] RDMAC_STATUS[%08x]\n",
6235                tr32(RDMAC_MODE), tr32(RDMAC_STATUS));
6236
6237         /* Write DMA control block */
6238         printk("DEBUG: WDMAC_MODE[%08x] WDMAC_STATUS[%08x]\n",
6239                tr32(WDMAC_MODE), tr32(WDMAC_STATUS));
6240
6241         /* DMA completion block */
6242         printk("DEBUG: DMAC_MODE[%08x]\n",
6243                tr32(DMAC_MODE));
6244
6245         /* GRC block */
6246         printk("DEBUG: GRC_MODE[%08x] GRC_MISC_CFG[%08x]\n",
6247                tr32(GRC_MODE), tr32(GRC_MISC_CFG));
6248         printk("DEBUG: GRC_LOCAL_CTRL[%08x]\n",
6249                tr32(GRC_LOCAL_CTRL));
6250
6251         /* TG3_BDINFOs */
6252         printk("DEBUG: RCVDBDI_JUMBO_BD[%08x%08x:%08x:%08x]\n",
6253                tr32(RCVDBDI_JUMBO_BD + 0x0),
6254                tr32(RCVDBDI_JUMBO_BD + 0x4),
6255                tr32(RCVDBDI_JUMBO_BD + 0x8),
6256                tr32(RCVDBDI_JUMBO_BD + 0xc));
6257         printk("DEBUG: RCVDBDI_STD_BD[%08x%08x:%08x:%08x]\n",
6258                tr32(RCVDBDI_STD_BD + 0x0),
6259                tr32(RCVDBDI_STD_BD + 0x4),
6260                tr32(RCVDBDI_STD_BD + 0x8),
6261                tr32(RCVDBDI_STD_BD + 0xc));
6262         printk("DEBUG: RCVDBDI_MINI_BD[%08x%08x:%08x:%08x]\n",
6263                tr32(RCVDBDI_MINI_BD + 0x0),
6264                tr32(RCVDBDI_MINI_BD + 0x4),
6265                tr32(RCVDBDI_MINI_BD + 0x8),
6266                tr32(RCVDBDI_MINI_BD + 0xc));
6267
6268         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x0, &val32);
6269         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x4, &val32_2);
6270         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x8, &val32_3);
6271         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0xc, &val32_4);
6272         printk("DEBUG: SRAM_SEND_RCB_0[%08x%08x:%08x:%08x]\n",
6273                val32, val32_2, val32_3, val32_4);
6274
6275         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x0, &val32);
6276         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x4, &val32_2);
6277         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x8, &val32_3);
6278         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0xc, &val32_4);
6279         printk("DEBUG: SRAM_RCV_RET_RCB_0[%08x%08x:%08x:%08x]\n",
6280                val32, val32_2, val32_3, val32_4);
6281
6282         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x0, &val32);
6283         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x4, &val32_2);
6284         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x8, &val32_3);
6285         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0xc, &val32_4);
6286         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x10, &val32_5);
6287         printk("DEBUG: SRAM_STATUS_BLK[%08x:%08x:%08x:%08x:%08x]\n",
6288                val32, val32_2, val32_3, val32_4, val32_5);
6289
6290         /* SW status block */
6291         printk("DEBUG: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
6292                tp->hw_status->status,
6293                tp->hw_status->status_tag,
6294                tp->hw_status->rx_jumbo_consumer,
6295                tp->hw_status->rx_consumer,
6296                tp->hw_status->rx_mini_consumer,
6297                tp->hw_status->idx[0].rx_producer,
6298                tp->hw_status->idx[0].tx_consumer);
6299
6300         /* SW statistics block */
6301         printk("DEBUG: Host statistics block [%08x:%08x:%08x:%08x]\n",
6302                ((u32 *)tp->hw_stats)[0],
6303                ((u32 *)tp->hw_stats)[1],
6304                ((u32 *)tp->hw_stats)[2],
6305                ((u32 *)tp->hw_stats)[3]);
6306
6307         /* Mailboxes */
6308         printk("DEBUG: SNDHOST_PROD[%08x%08x] SNDNIC_PROD[%08x%08x]\n",
6309                tr32(MAILBOX_SNDHOST_PROD_IDX_0 + 0x0),
6310                tr32(MAILBOX_SNDHOST_PROD_IDX_0 + 0x4),
6311                tr32(MAILBOX_SNDNIC_PROD_IDX_0 + 0x0),
6312                tr32(MAILBOX_SNDNIC_PROD_IDX_0 + 0x4));
6313
6314         /* NIC side send descriptors. */
6315         for (i = 0; i < 6; i++) {
6316                 unsigned long txd;
6317
6318                 txd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_TX_BUFFER_DESC
6319                         + (i * sizeof(struct tg3_tx_buffer_desc));
6320                 printk("DEBUG: NIC TXD(%d)[%08x:%08x:%08x:%08x]\n",
6321                        i,
6322                        readl(txd + 0x0), readl(txd + 0x4),
6323                        readl(txd + 0x8), readl(txd + 0xc));
6324         }
6325
6326         /* NIC side RX descriptors. */
6327         for (i = 0; i < 6; i++) {
6328                 unsigned long rxd;
6329
6330                 rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_BUFFER_DESC
6331                         + (i * sizeof(struct tg3_rx_buffer_desc));
6332                 printk("DEBUG: NIC RXD_STD(%d)[0][%08x:%08x:%08x:%08x]\n",
6333                        i,
6334                        readl(rxd + 0x0), readl(rxd + 0x4),
6335                        readl(rxd + 0x8), readl(rxd + 0xc));
6336                 rxd += (4 * sizeof(u32));
6337                 printk("DEBUG: NIC RXD_STD(%d)[1][%08x:%08x:%08x:%08x]\n",
6338                        i,
6339                        readl(rxd + 0x0), readl(rxd + 0x4),
6340                        readl(rxd + 0x8), readl(rxd + 0xc));
6341         }
6342
6343         for (i = 0; i < 6; i++) {
6344                 unsigned long rxd;
6345
6346                 rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_JUMBO_BUFFER_DESC
6347                         + (i * sizeof(struct tg3_rx_buffer_desc));
6348                 printk("DEBUG: NIC RXD_JUMBO(%d)[0][%08x:%08x:%08x:%08x]\n",
6349                        i,
6350                        readl(rxd + 0x0), readl(rxd + 0x4),
6351                        readl(rxd + 0x8), readl(rxd + 0xc));
6352                 rxd += (4 * sizeof(u32));
6353                 printk("DEBUG: NIC RXD_JUMBO(%d)[1][%08x:%08x:%08x:%08x]\n",
6354                        i,
6355                        readl(rxd + 0x0), readl(rxd + 0x4),
6356                        readl(rxd + 0x8), readl(rxd + 0xc));
6357         }
6358 }
6359 #endif
6360
6361 static struct net_device_stats *tg3_get_stats(struct net_device *);
6362 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *);
6363
6364 static int tg3_close(struct net_device *dev)
6365 {
6366         struct tg3 *tp = netdev_priv(dev);
6367
6368         netif_stop_queue(dev);
6369
6370         del_timer_sync(&tp->timer);
6371
6372         spin_lock_irq(&tp->lock);
6373         spin_lock(&tp->tx_lock);
6374 #if 0
6375         tg3_dump_state(tp);
6376 #endif
6377
6378         tg3_disable_ints(tp);
6379
6380         tg3_halt(tp, 1);
6381         tg3_free_rings(tp);
6382         tp->tg3_flags &=
6383                 ~(TG3_FLAG_INIT_COMPLETE |
6384                   TG3_FLAG_GOT_SERDES_FLOWCTL);
6385         netif_carrier_off(tp->dev);
6386
6387         spin_unlock(&tp->tx_lock);
6388         spin_unlock_irq(&tp->lock);
6389
6390         free_irq(tp->pdev->irq, dev);
6391         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6392                 pci_disable_msi(tp->pdev);
6393                 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
6394         }
6395
6396         memcpy(&tp->net_stats_prev, tg3_get_stats(tp->dev),
6397                sizeof(tp->net_stats_prev));
6398         memcpy(&tp->estats_prev, tg3_get_estats(tp),
6399                sizeof(tp->estats_prev));
6400
6401         tg3_free_consistent(tp);
6402
6403         return 0;
6404 }
6405
6406 static inline unsigned long get_stat64(tg3_stat64_t *val)
6407 {
6408         unsigned long ret;
6409
6410 #if (BITS_PER_LONG == 32)
6411         ret = val->low;
6412 #else
6413         ret = ((u64)val->high << 32) | ((u64)val->low);
6414 #endif
6415         return ret;
6416 }
6417
6418 static unsigned long calc_crc_errors(struct tg3 *tp)
6419 {
6420         struct tg3_hw_stats *hw_stats = tp->hw_stats;
6421
6422         if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
6423             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
6424              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
6425                 unsigned long flags;
6426                 u32 val;
6427
6428                 spin_lock_irqsave(&tp->lock, flags);
6429                 if (!tg3_readphy(tp, 0x1e, &val)) {
6430                         tg3_writephy(tp, 0x1e, val | 0x8000);
6431                         tg3_readphy(tp, 0x14, &val);
6432                 } else
6433                         val = 0;
6434                 spin_unlock_irqrestore(&tp->lock, flags);
6435
6436                 tp->phy_crc_errors += val;
6437
6438                 return tp->phy_crc_errors;
6439         }
6440
6441         return get_stat64(&hw_stats->rx_fcs_errors);
6442 }
6443
6444 #define ESTAT_ADD(member) \
6445         estats->member =        old_estats->member + \
6446                                 get_stat64(&hw_stats->member)
6447
6448 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *tp)
6449 {
6450         struct tg3_ethtool_stats *estats = &tp->estats;
6451         struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
6452         struct tg3_hw_stats *hw_stats = tp->hw_stats;
6453
6454         if (!hw_stats)
6455                 return old_estats;
6456
6457         ESTAT_ADD(rx_octets);
6458         ESTAT_ADD(rx_fragments);
6459         ESTAT_ADD(rx_ucast_packets);
6460         ESTAT_ADD(rx_mcast_packets);
6461         ESTAT_ADD(rx_bcast_packets);
6462         ESTAT_ADD(rx_fcs_errors);
6463         ESTAT_ADD(rx_align_errors);
6464         ESTAT_ADD(rx_xon_pause_rcvd);
6465         ESTAT_ADD(rx_xoff_pause_rcvd);
6466         ESTAT_ADD(rx_mac_ctrl_rcvd);
6467         ESTAT_ADD(rx_xoff_entered);
6468         ESTAT_ADD(rx_frame_too_long_errors);
6469         ESTAT_ADD(rx_jabbers);
6470         ESTAT_ADD(rx_undersize_packets);
6471         ESTAT_ADD(rx_in_length_errors);
6472         ESTAT_ADD(rx_out_length_errors);
6473         ESTAT_ADD(rx_64_or_less_octet_packets);
6474         ESTAT_ADD(rx_65_to_127_octet_packets);
6475         ESTAT_ADD(rx_128_to_255_octet_packets);
6476         ESTAT_ADD(rx_256_to_511_octet_packets);
6477         ESTAT_ADD(rx_512_to_1023_octet_packets);
6478         ESTAT_ADD(rx_1024_to_1522_octet_packets);
6479         ESTAT_ADD(rx_1523_to_2047_octet_packets);
6480         ESTAT_ADD(rx_2048_to_4095_octet_packets);
6481         ESTAT_ADD(rx_4096_to_8191_octet_packets);
6482         ESTAT_ADD(rx_8192_to_9022_octet_packets);
6483
6484         ESTAT_ADD(tx_octets);
6485         ESTAT_ADD(tx_collisions);
6486         ESTAT_ADD(tx_xon_sent);
6487         ESTAT_ADD(tx_xoff_sent);
6488         ESTAT_ADD(tx_flow_control);
6489         ESTAT_ADD(tx_mac_errors);
6490         ESTAT_ADD(tx_single_collisions);
6491         ESTAT_ADD(tx_mult_collisions);
6492         ESTAT_ADD(tx_deferred);
6493         ESTAT_ADD(tx_excessive_collisions);
6494         ESTAT_ADD(tx_late_collisions);
6495         ESTAT_ADD(tx_collide_2times);
6496         ESTAT_ADD(tx_collide_3times);
6497         ESTAT_ADD(tx_collide_4times);
6498         ESTAT_ADD(tx_collide_5times);
6499         ESTAT_ADD(tx_collide_6times);
6500         ESTAT_ADD(tx_collide_7times);
6501         ESTAT_ADD(tx_collide_8times);
6502         ESTAT_ADD(tx_collide_9times);
6503         ESTAT_ADD(tx_collide_10times);
6504         ESTAT_ADD(tx_collide_11times);
6505         ESTAT_ADD(tx_collide_12times);
6506         ESTAT_ADD(tx_collide_13times);
6507         ESTAT_ADD(tx_collide_14times);
6508         ESTAT_ADD(tx_collide_15times);
6509         ESTAT_ADD(tx_ucast_packets);
6510         ESTAT_ADD(tx_mcast_packets);
6511         ESTAT_ADD(tx_bcast_packets);
6512         ESTAT_ADD(tx_carrier_sense_errors);
6513         ESTAT_ADD(tx_discards);
6514         ESTAT_ADD(tx_errors);
6515
6516         ESTAT_ADD(dma_writeq_full);
6517         ESTAT_ADD(dma_write_prioq_full);
6518         ESTAT_ADD(rxbds_empty);
6519         ESTAT_ADD(rx_discards);
6520         ESTAT_ADD(rx_errors);
6521         ESTAT_ADD(rx_threshold_hit);
6522
6523         ESTAT_ADD(dma_readq_full);
6524         ESTAT_ADD(dma_read_prioq_full);
6525         ESTAT_ADD(tx_comp_queue_full);
6526
6527         ESTAT_ADD(ring_set_send_prod_index);
6528         ESTAT_ADD(ring_status_update);
6529         ESTAT_ADD(nic_irqs);
6530         ESTAT_ADD(nic_avoided_irqs);
6531         ESTAT_ADD(nic_tx_threshold_hit);
6532
6533         return estats;
6534 }
6535
6536 static struct net_device_stats *tg3_get_stats(struct net_device *dev)
6537 {
6538         struct tg3 *tp = netdev_priv(dev);
6539         struct net_device_stats *stats = &tp->net_stats;
6540         struct net_device_stats *old_stats = &tp->net_stats_prev;
6541         struct tg3_hw_stats *hw_stats = tp->hw_stats;
6542
6543         if (!hw_stats)
6544                 return old_stats;
6545
6546         stats->rx_packets = old_stats->rx_packets +
6547                 get_stat64(&hw_stats->rx_ucast_packets) +
6548                 get_stat64(&hw_stats->rx_mcast_packets) +
6549                 get_stat64(&hw_stats->rx_bcast_packets);
6550                 
6551         stats->tx_packets = old_stats->tx_packets +
6552                 get_stat64(&hw_stats->tx_ucast_packets) +
6553                 get_stat64(&hw_stats->tx_mcast_packets) +
6554                 get_stat64(&hw_stats->tx_bcast_packets);
6555
6556         stats->rx_bytes = old_stats->rx_bytes +
6557                 get_stat64(&hw_stats->rx_octets);
6558         stats->tx_bytes = old_stats->tx_bytes +
6559                 get_stat64(&hw_stats->tx_octets);
6560
6561         stats->rx_errors = old_stats->rx_errors +
6562                 get_stat64(&hw_stats->rx_errors) +
6563                 get_stat64(&hw_stats->rx_discards);
6564         stats->tx_errors = old_stats->tx_errors +
6565                 get_stat64(&hw_stats->tx_errors) +
6566                 get_stat64(&hw_stats->tx_mac_errors) +
6567                 get_stat64(&hw_stats->tx_carrier_sense_errors) +
6568                 get_stat64(&hw_stats->tx_discards);
6569
6570         stats->multicast = old_stats->multicast +
6571                 get_stat64(&hw_stats->rx_mcast_packets);
6572         stats->collisions = old_stats->collisions +
6573                 get_stat64(&hw_stats->tx_collisions);
6574
6575         stats->rx_length_errors = old_stats->rx_length_errors +
6576                 get_stat64(&hw_stats->rx_frame_too_long_errors) +
6577                 get_stat64(&hw_stats->rx_undersize_packets);
6578
6579         stats->rx_over_errors = old_stats->rx_over_errors +
6580                 get_stat64(&hw_stats->rxbds_empty);
6581         stats->rx_frame_errors = old_stats->rx_frame_errors +
6582                 get_stat64(&hw_stats->rx_align_errors);
6583         stats->tx_aborted_errors = old_stats->tx_aborted_errors +
6584                 get_stat64(&hw_stats->tx_discards);
6585         stats->tx_carrier_errors = old_stats->tx_carrier_errors +
6586                 get_stat64(&hw_stats->tx_carrier_sense_errors);
6587
6588         stats->rx_crc_errors = old_stats->rx_crc_errors +
6589                 calc_crc_errors(tp);
6590
6591         return stats;
6592 }
6593
6594 static inline u32 calc_crc(unsigned char *buf, int len)
6595 {
6596         u32 reg;
6597         u32 tmp;
6598         int j, k;
6599
6600         reg = 0xffffffff;
6601
6602         for (j = 0; j < len; j++) {
6603                 reg ^= buf[j];
6604
6605                 for (k = 0; k < 8; k++) {
6606                         tmp = reg & 0x01;
6607
6608                         reg >>= 1;
6609
6610                         if (tmp) {
6611                                 reg ^= 0xedb88320;
6612                         }
6613                 }
6614         }
6615
6616         return ~reg;
6617 }
6618
6619 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
6620 {
6621         /* accept or reject all multicast frames */
6622         tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
6623         tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
6624         tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
6625         tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
6626 }
6627
6628 static void __tg3_set_rx_mode(struct net_device *dev)
6629 {
6630         struct tg3 *tp = netdev_priv(dev);
6631         u32 rx_mode;
6632
6633         rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
6634                                   RX_MODE_KEEP_VLAN_TAG);
6635
6636         /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
6637          * flag clear.
6638          */
6639 #if TG3_VLAN_TAG_USED
6640         if (!tp->vlgrp &&
6641             !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
6642                 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
6643 #else
6644         /* By definition, VLAN is disabled always in this
6645          * case.
6646          */
6647         if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
6648                 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
6649 #endif
6650
6651         if (dev->flags & IFF_PROMISC) {
6652                 /* Promiscuous mode. */
6653                 rx_mode |= RX_MODE_PROMISC;
6654         } else if (dev->flags & IFF_ALLMULTI) {
6655                 /* Accept all multicast. */
6656                 tg3_set_multi (tp, 1);
6657         } else if (dev->mc_count < 1) {
6658                 /* Reject all multicast. */
6659                 tg3_set_multi (tp, 0);
6660         } else {
6661                 /* Accept one or more multicast(s). */
6662                 struct dev_mc_list *mclist;
6663                 unsigned int i;
6664                 u32 mc_filter[4] = { 0, };
6665                 u32 regidx;
6666                 u32 bit;
6667                 u32 crc;
6668
6669                 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
6670                      i++, mclist = mclist->next) {
6671
6672                         crc = calc_crc (mclist->dmi_addr, ETH_ALEN);
6673                         bit = ~crc & 0x7f;
6674                         regidx = (bit & 0x60) >> 5;
6675                         bit &= 0x1f;
6676                         mc_filter[regidx] |= (1 << bit);
6677                 }
6678
6679                 tw32(MAC_HASH_REG_0, mc_filter[0]);
6680                 tw32(MAC_HASH_REG_1, mc_filter[1]);
6681                 tw32(MAC_HASH_REG_2, mc_filter[2]);
6682                 tw32(MAC_HASH_REG_3, mc_filter[3]);
6683         }
6684
6685         if (rx_mode != tp->rx_mode) {
6686                 tp->rx_mode = rx_mode;
6687                 tw32_f(MAC_RX_MODE, rx_mode);
6688                 udelay(10);
6689         }
6690 }
6691
6692 static void tg3_set_rx_mode(struct net_device *dev)
6693 {
6694         struct tg3 *tp = netdev_priv(dev);
6695
6696         spin_lock_irq(&tp->lock);
6697         spin_lock(&tp->tx_lock);
6698         __tg3_set_rx_mode(dev);
6699         spin_unlock(&tp->tx_lock);
6700         spin_unlock_irq(&tp->lock);
6701 }
6702
6703 #define TG3_REGDUMP_LEN         (32 * 1024)
6704
6705 static int tg3_get_regs_len(struct net_device *dev)
6706 {
6707         return TG3_REGDUMP_LEN;
6708 }
6709
6710 static void tg3_get_regs(struct net_device *dev,
6711                 struct ethtool_regs *regs, void *_p)
6712 {
6713         u32 *p = _p;
6714         struct tg3 *tp = netdev_priv(dev);
6715         u8 *orig_p = _p;
6716         int i;
6717
6718         regs->version = 0;
6719
6720         memset(p, 0, TG3_REGDUMP_LEN);
6721
6722         spin_lock_irq(&tp->lock);
6723         spin_lock(&tp->tx_lock);
6724
6725 #define __GET_REG32(reg)        (*(p)++ = tr32(reg))
6726 #define GET_REG32_LOOP(base,len)                \
6727 do {    p = (u32 *)(orig_p + (base));           \
6728         for (i = 0; i < len; i += 4)            \
6729                 __GET_REG32((base) + i);        \
6730 } while (0)
6731 #define GET_REG32_1(reg)                        \
6732 do {    p = (u32 *)(orig_p + (reg));            \
6733         __GET_REG32((reg));                     \
6734 } while (0)
6735
6736         GET_REG32_LOOP(TG3PCI_VENDOR, 0xb0);
6737         GET_REG32_LOOP(MAILBOX_INTERRUPT_0, 0x200);
6738         GET_REG32_LOOP(MAC_MODE, 0x4f0);
6739         GET_REG32_LOOP(SNDDATAI_MODE, 0xe0);
6740         GET_REG32_1(SNDDATAC_MODE);
6741         GET_REG32_LOOP(SNDBDS_MODE, 0x80);
6742         GET_REG32_LOOP(SNDBDI_MODE, 0x48);
6743         GET_REG32_1(SNDBDC_MODE);
6744         GET_REG32_LOOP(RCVLPC_MODE, 0x20);
6745         GET_REG32_LOOP(RCVLPC_SELLST_BASE, 0x15c);
6746         GET_REG32_LOOP(RCVDBDI_MODE, 0x0c);
6747         GET_REG32_LOOP(RCVDBDI_JUMBO_BD, 0x3c);
6748         GET_REG32_LOOP(RCVDBDI_BD_PROD_IDX_0, 0x44);
6749         GET_REG32_1(RCVDCC_MODE);
6750         GET_REG32_LOOP(RCVBDI_MODE, 0x20);
6751         GET_REG32_LOOP(RCVCC_MODE, 0x14);
6752         GET_REG32_LOOP(RCVLSC_MODE, 0x08);
6753         GET_REG32_1(MBFREE_MODE);
6754         GET_REG32_LOOP(HOSTCC_MODE, 0x100);
6755         GET_REG32_LOOP(MEMARB_MODE, 0x10);
6756         GET_REG32_LOOP(BUFMGR_MODE, 0x58);
6757         GET_REG32_LOOP(RDMAC_MODE, 0x08);
6758         GET_REG32_LOOP(WDMAC_MODE, 0x08);
6759         GET_REG32_LOOP(RX_CPU_BASE, 0x280);
6760         GET_REG32_LOOP(TX_CPU_BASE, 0x280);
6761         GET_REG32_LOOP(GRCMBOX_INTERRUPT_0, 0x110);
6762         GET_REG32_LOOP(FTQ_RESET, 0x120);
6763         GET_REG32_LOOP(MSGINT_MODE, 0x0c);
6764         GET_REG32_1(DMAC_MODE);
6765         GET_REG32_LOOP(GRC_MODE, 0x4c);
6766         if (tp->tg3_flags & TG3_FLAG_NVRAM)
6767                 GET_REG32_LOOP(NVRAM_CMD, 0x24);
6768
6769 #undef __GET_REG32
6770 #undef GET_REG32_LOOP
6771 #undef GET_REG32_1
6772
6773         spin_unlock(&tp->tx_lock);
6774         spin_unlock_irq(&tp->lock);
6775 }
6776
6777 static int tg3_get_eeprom_len(struct net_device *dev)
6778 {
6779         struct tg3 *tp = netdev_priv(dev);
6780
6781         return tp->nvram_size;
6782 }
6783
6784 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val);
6785
6786 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
6787 {
6788         struct tg3 *tp = netdev_priv(dev);
6789         int ret;
6790         u8  *pd;
6791         u32 i, offset, len, val, b_offset, b_count;
6792
6793         offset = eeprom->offset;
6794         len = eeprom->len;
6795         eeprom->len = 0;
6796
6797         eeprom->magic = TG3_EEPROM_MAGIC;
6798
6799         if (offset & 3) {
6800                 /* adjustments to start on required 4 byte boundary */
6801                 b_offset = offset & 3;
6802                 b_count = 4 - b_offset;
6803                 if (b_count > len) {
6804                         /* i.e. offset=1 len=2 */
6805                         b_count = len;
6806                 }
6807                 ret = tg3_nvram_read(tp, offset-b_offset, &val);
6808                 if (ret)
6809                         return ret;
6810                 val = cpu_to_le32(val);
6811                 memcpy(data, ((char*)&val) + b_offset, b_count);
6812                 len -= b_count;
6813                 offset += b_count;
6814                 eeprom->len += b_count;
6815         }
6816
6817         /* read bytes upto the last 4 byte boundary */
6818         pd = &data[eeprom->len];
6819         for (i = 0; i < (len - (len & 3)); i += 4) {
6820                 ret = tg3_nvram_read(tp, offset + i, &val);
6821                 if (ret) {
6822                         eeprom->len += i;
6823                         return ret;
6824                 }
6825                 val = cpu_to_le32(val);
6826                 memcpy(pd + i, &val, 4);
6827         }
6828         eeprom->len += i;
6829
6830         if (len & 3) {
6831                 /* read last bytes not ending on 4 byte boundary */
6832                 pd = &data[eeprom->len];
6833                 b_count = len & 3;
6834                 b_offset = offset + len - b_count;
6835                 ret = tg3_nvram_read(tp, b_offset, &val);
6836                 if (ret)
6837                         return ret;
6838                 val = cpu_to_le32(val);
6839                 memcpy(pd, ((char*)&val), b_count);
6840                 eeprom->len += b_count;
6841         }
6842         return 0;
6843 }
6844
6845 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf); 
6846
6847 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
6848 {
6849         struct tg3 *tp = netdev_priv(dev);
6850         int ret;
6851         u32 offset, len, b_offset, odd_len, start, end;
6852         u8 *buf;
6853
6854         if (eeprom->magic != TG3_EEPROM_MAGIC)
6855                 return -EINVAL;
6856
6857         offset = eeprom->offset;
6858         len = eeprom->len;
6859
6860         if ((b_offset = (offset & 3))) {
6861                 /* adjustments to start on required 4 byte boundary */
6862                 ret = tg3_nvram_read(tp, offset-b_offset, &start);
6863                 if (ret)
6864                         return ret;
6865                 start = cpu_to_le32(start);
6866                 len += b_offset;
6867                 offset &= ~3;
6868                 if (len < 4)
6869                         len = 4;
6870         }
6871
6872         odd_len = 0;
6873         if (len & 3) {
6874                 /* adjustments to end on required 4 byte boundary */
6875                 odd_len = 1;
6876                 len = (len + 3) & ~3;
6877                 ret = tg3_nvram_read(tp, offset+len-4, &end);
6878                 if (ret)
6879                         return ret;
6880                 end = cpu_to_le32(end);
6881         }
6882
6883         buf = data;
6884         if (b_offset || odd_len) {
6885                 buf = kmalloc(len, GFP_KERNEL);
6886                 if (buf == 0)
6887                         return -ENOMEM;
6888                 if (b_offset)
6889                         memcpy(buf, &start, 4);
6890                 if (odd_len)
6891                         memcpy(buf+len-4, &end, 4);
6892                 memcpy(buf + b_offset, data, eeprom->len);
6893         }
6894
6895         ret = tg3_nvram_write_block(tp, offset, len, buf);
6896
6897         if (buf != data)
6898                 kfree(buf);
6899
6900         return ret;
6901 }
6902
6903 static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
6904 {
6905         struct tg3 *tp = netdev_priv(dev);
6906   
6907         cmd->supported = (SUPPORTED_Autoneg);
6908
6909         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
6910                 cmd->supported |= (SUPPORTED_1000baseT_Half |
6911                                    SUPPORTED_1000baseT_Full);
6912
6913         if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES))
6914                 cmd->supported |= (SUPPORTED_100baseT_Half |
6915                                   SUPPORTED_100baseT_Full |
6916                                   SUPPORTED_10baseT_Half |
6917                                   SUPPORTED_10baseT_Full |
6918                                   SUPPORTED_MII);
6919         else
6920                 cmd->supported |= SUPPORTED_FIBRE;
6921   
6922         cmd->advertising = tp->link_config.advertising;
6923         if (netif_running(dev)) {
6924                 cmd->speed = tp->link_config.active_speed;
6925                 cmd->duplex = tp->link_config.active_duplex;
6926         }
6927         cmd->port = 0;
6928         cmd->phy_address = PHY_ADDR;
6929         cmd->transceiver = 0;
6930         cmd->autoneg = tp->link_config.autoneg;
6931         cmd->maxtxpkt = 0;
6932         cmd->maxrxpkt = 0;
6933         return 0;
6934 }
6935   
6936 static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
6937 {
6938         struct tg3 *tp = netdev_priv(dev);
6939   
6940         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
6941                 /* These are the only valid advertisement bits allowed.  */
6942                 if (cmd->autoneg == AUTONEG_ENABLE &&
6943                     (cmd->advertising & ~(ADVERTISED_1000baseT_Half |
6944                                           ADVERTISED_1000baseT_Full |
6945                                           ADVERTISED_Autoneg |
6946                                           ADVERTISED_FIBRE)))
6947                         return -EINVAL;
6948         }
6949
6950         spin_lock_irq(&tp->lock);
6951         spin_lock(&tp->tx_lock);
6952
6953         tp->link_config.autoneg = cmd->autoneg;
6954         if (cmd->autoneg == AUTONEG_ENABLE) {
6955                 tp->link_config.advertising = cmd->advertising;
6956                 tp->link_config.speed = SPEED_INVALID;
6957                 tp->link_config.duplex = DUPLEX_INVALID;
6958         } else {
6959                 tp->link_config.advertising = 0;
6960                 tp->link_config.speed = cmd->speed;
6961                 tp->link_config.duplex = cmd->duplex;
6962         }
6963   
6964         if (netif_running(dev))
6965                 tg3_setup_phy(tp, 1);
6966
6967         spin_unlock(&tp->tx_lock);
6968         spin_unlock_irq(&tp->lock);
6969   
6970         return 0;
6971 }
6972   
6973 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
6974 {
6975         struct tg3 *tp = netdev_priv(dev);
6976   
6977         strcpy(info->driver, DRV_MODULE_NAME);
6978         strcpy(info->version, DRV_MODULE_VERSION);
6979         strcpy(info->bus_info, pci_name(tp->pdev));
6980 }
6981   
6982 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
6983 {
6984         struct tg3 *tp = netdev_priv(dev);
6985   
6986         wol->supported = WAKE_MAGIC;
6987         wol->wolopts = 0;
6988         if (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)
6989                 wol->wolopts = WAKE_MAGIC;
6990         memset(&wol->sopass, 0, sizeof(wol->sopass));
6991 }
6992   
6993 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
6994 {
6995         struct tg3 *tp = netdev_priv(dev);
6996   
6997         if (wol->wolopts & ~WAKE_MAGIC)
6998                 return -EINVAL;
6999         if ((wol->wolopts & WAKE_MAGIC) &&
7000             tp->tg3_flags2 & TG3_FLG2_PHY_SERDES &&
7001             !(tp->tg3_flags & TG3_FLAG_SERDES_WOL_CAP))
7002                 return -EINVAL;
7003   
7004         spin_lock_irq(&tp->lock);
7005         if (wol->wolopts & WAKE_MAGIC)
7006                 tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
7007         else
7008                 tp->tg3_flags &= ~TG3_FLAG_WOL_ENABLE;
7009         spin_unlock_irq(&tp->lock);
7010   
7011         return 0;
7012 }
7013   
7014 static u32 tg3_get_msglevel(struct net_device *dev)
7015 {
7016         struct tg3 *tp = netdev_priv(dev);
7017         return tp->msg_enable;
7018 }
7019   
7020 static void tg3_set_msglevel(struct net_device *dev, u32 value)
7021 {
7022         struct tg3 *tp = netdev_priv(dev);
7023         tp->msg_enable = value;
7024 }
7025   
7026 #if TG3_TSO_SUPPORT != 0
7027 static int tg3_set_tso(struct net_device *dev, u32 value)
7028 {
7029         struct tg3 *tp = netdev_priv(dev);
7030
7031         if (!(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
7032                 if (value)
7033                         return -EINVAL;
7034                 return 0;
7035         }
7036         return ethtool_op_set_tso(dev, value);
7037 }
7038 #endif
7039   
7040 static int tg3_nway_reset(struct net_device *dev)
7041 {
7042         struct tg3 *tp = netdev_priv(dev);
7043         u32 bmcr;
7044         int r;
7045   
7046         if (!netif_running(dev))
7047                 return -EAGAIN;
7048
7049         spin_lock_irq(&tp->lock);
7050         r = -EINVAL;
7051         tg3_readphy(tp, MII_BMCR, &bmcr);
7052         if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
7053             (bmcr & BMCR_ANENABLE)) {
7054                 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART);
7055                 r = 0;
7056         }
7057         spin_unlock_irq(&tp->lock);
7058   
7059         return r;
7060 }
7061   
7062 static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
7063 {
7064         struct tg3 *tp = netdev_priv(dev);
7065   
7066         ering->rx_max_pending = TG3_RX_RING_SIZE - 1;
7067         ering->rx_mini_max_pending = 0;
7068         ering->rx_jumbo_max_pending = TG3_RX_JUMBO_RING_SIZE - 1;
7069
7070         ering->rx_pending = tp->rx_pending;
7071         ering->rx_mini_pending = 0;
7072         ering->rx_jumbo_pending = tp->rx_jumbo_pending;
7073         ering->tx_pending = tp->tx_pending;
7074 }
7075   
7076 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
7077 {
7078         struct tg3 *tp = netdev_priv(dev);
7079   
7080         if ((ering->rx_pending > TG3_RX_RING_SIZE - 1) ||
7081             (ering->rx_jumbo_pending > TG3_RX_JUMBO_RING_SIZE - 1) ||
7082             (ering->tx_pending > TG3_TX_RING_SIZE - 1))
7083                 return -EINVAL;
7084   
7085         if (netif_running(dev))
7086                 tg3_netif_stop(tp);
7087
7088         spin_lock_irq(&tp->lock);
7089         spin_lock(&tp->tx_lock);
7090   
7091         tp->rx_pending = ering->rx_pending;
7092
7093         if ((tp->tg3_flags2 & TG3_FLG2_MAX_RXPEND_64) &&
7094             tp->rx_pending > 63)
7095                 tp->rx_pending = 63;
7096         tp->rx_jumbo_pending = ering->rx_jumbo_pending;
7097         tp->tx_pending = ering->tx_pending;
7098
7099         if (netif_running(dev)) {
7100                 tg3_halt(tp, 1);
7101                 tg3_init_hw(tp);
7102                 tg3_netif_start(tp);
7103         }
7104
7105         spin_unlock(&tp->tx_lock);
7106         spin_unlock_irq(&tp->lock);
7107   
7108         return 0;
7109 }
7110   
7111 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
7112 {
7113         struct tg3 *tp = netdev_priv(dev);
7114   
7115         epause->autoneg = (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG) != 0;
7116         epause->rx_pause = (tp->tg3_flags & TG3_FLAG_RX_PAUSE) != 0;
7117         epause->tx_pause = (tp->tg3_flags & TG3_FLAG_TX_PAUSE) != 0;
7118 }
7119   
7120 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
7121 {
7122         struct tg3 *tp = netdev_priv(dev);
7123   
7124         if (netif_running(dev))
7125                 tg3_netif_stop(tp);
7126
7127         spin_lock_irq(&tp->lock);
7128         spin_lock(&tp->tx_lock);
7129         if (epause->autoneg)
7130                 tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
7131         else
7132                 tp->tg3_flags &= ~TG3_FLAG_PAUSE_AUTONEG;
7133         if (epause->rx_pause)
7134                 tp->tg3_flags |= TG3_FLAG_RX_PAUSE;
7135         else
7136                 tp->tg3_flags &= ~TG3_FLAG_RX_PAUSE;
7137         if (epause->tx_pause)
7138                 tp->tg3_flags |= TG3_FLAG_TX_PAUSE;
7139         else
7140                 tp->tg3_flags &= ~TG3_FLAG_TX_PAUSE;
7141
7142         if (netif_running(dev)) {
7143                 tg3_halt(tp, 1);
7144                 tg3_init_hw(tp);
7145                 tg3_netif_start(tp);
7146         }
7147         spin_unlock(&tp->tx_lock);
7148         spin_unlock_irq(&tp->lock);
7149   
7150         return 0;
7151 }
7152   
7153 static u32 tg3_get_rx_csum(struct net_device *dev)
7154 {
7155         struct tg3 *tp = netdev_priv(dev);
7156         return (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0;
7157 }
7158   
7159 static int tg3_set_rx_csum(struct net_device *dev, u32 data)
7160 {
7161         struct tg3 *tp = netdev_priv(dev);
7162   
7163         if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
7164                 if (data != 0)
7165                         return -EINVAL;
7166                 return 0;
7167         }
7168   
7169         spin_lock_irq(&tp->lock);
7170         if (data)
7171                 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
7172         else
7173                 tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
7174         spin_unlock_irq(&tp->lock);
7175   
7176         return 0;
7177 }
7178   
7179 static int tg3_set_tx_csum(struct net_device *dev, u32 data)
7180 {
7181         struct tg3 *tp = netdev_priv(dev);
7182   
7183         if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
7184                 if (data != 0)
7185                         return -EINVAL;
7186                 return 0;
7187         }
7188   
7189         if (data)
7190                 dev->features |= NETIF_F_IP_CSUM;
7191         else
7192                 dev->features &= ~NETIF_F_IP_CSUM;
7193
7194         return 0;
7195 }
7196
7197 static int tg3_get_stats_count (struct net_device *dev)
7198 {
7199         return TG3_NUM_STATS;
7200 }
7201
7202 static void tg3_get_strings (struct net_device *dev, u32 stringset, u8 *buf)
7203 {
7204         switch (stringset) {
7205         case ETH_SS_STATS:
7206                 memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
7207                 break;
7208         default:
7209                 WARN_ON(1);     /* we need a WARN() */
7210                 break;
7211         }
7212 }
7213
7214 static void tg3_get_ethtool_stats (struct net_device *dev,
7215                                    struct ethtool_stats *estats, u64 *tmp_stats)
7216 {
7217         struct tg3 *tp = netdev_priv(dev);
7218         memcpy(tmp_stats, tg3_get_estats(tp), sizeof(tp->estats));
7219 }
7220
7221 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
7222 {
7223         struct mii_ioctl_data *data = if_mii(ifr);
7224         struct tg3 *tp = netdev_priv(dev);
7225         int err;
7226
7227         switch(cmd) {
7228         case SIOCGMIIPHY:
7229                 data->phy_id = PHY_ADDR;
7230
7231                 /* fallthru */
7232         case SIOCGMIIREG: {
7233                 u32 mii_regval;
7234
7235                 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
7236                         break;                  /* We have no PHY */
7237
7238                 spin_lock_irq(&tp->lock);
7239                 err = tg3_readphy(tp, data->reg_num & 0x1f, &mii_regval);
7240                 spin_unlock_irq(&tp->lock);
7241
7242                 data->val_out = mii_regval;
7243
7244                 return err;
7245         }
7246
7247         case SIOCSMIIREG:
7248                 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
7249                         break;                  /* We have no PHY */
7250
7251                 if (!capable(CAP_NET_ADMIN))
7252                         return -EPERM;
7253
7254                 spin_lock_irq(&tp->lock);
7255                 err = tg3_writephy(tp, data->reg_num & 0x1f, data->val_in);
7256                 spin_unlock_irq(&tp->lock);
7257
7258                 return err;
7259
7260         default:
7261                 /* do nothing */
7262                 break;
7263         }
7264         return -EOPNOTSUPP;
7265 }
7266
7267 #if TG3_VLAN_TAG_USED
7268 static void tg3_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
7269 {
7270         struct tg3 *tp = netdev_priv(dev);
7271
7272         spin_lock_irq(&tp->lock);
7273         spin_lock(&tp->tx_lock);
7274
7275         tp->vlgrp = grp;
7276
7277         /* Update RX_MODE_KEEP_VLAN_TAG bit in RX_MODE register. */
7278         __tg3_set_rx_mode(dev);
7279
7280         spin_unlock(&tp->tx_lock);
7281         spin_unlock_irq(&tp->lock);
7282 }
7283
7284 static void tg3_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
7285 {
7286         struct tg3 *tp = netdev_priv(dev);
7287
7288         spin_lock_irq(&tp->lock);
7289         spin_lock(&tp->tx_lock);
7290         if (tp->vlgrp)
7291                 tp->vlgrp->vlan_devices[vid] = NULL;
7292         spin_unlock(&tp->tx_lock);
7293         spin_unlock_irq(&tp->lock);
7294 }
7295 #endif
7296
7297 static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
7298 {
7299         struct tg3 *tp = netdev_priv(dev);
7300
7301         memcpy(ec, &tp->coal, sizeof(*ec));
7302         return 0;
7303 }
7304
7305 static struct ethtool_ops tg3_ethtool_ops = {
7306         .get_settings           = tg3_get_settings,
7307         .set_settings           = tg3_set_settings,
7308         .get_drvinfo            = tg3_get_drvinfo,
7309         .get_regs_len           = tg3_get_regs_len,
7310         .get_regs               = tg3_get_regs,
7311         .get_wol                = tg3_get_wol,
7312         .set_wol                = tg3_set_wol,
7313         .get_msglevel           = tg3_get_msglevel,
7314         .set_msglevel           = tg3_set_msglevel,
7315         .nway_reset             = tg3_nway_reset,
7316         .get_link               = ethtool_op_get_link,
7317         .get_eeprom_len         = tg3_get_eeprom_len,
7318         .get_eeprom             = tg3_get_eeprom,
7319         .set_eeprom             = tg3_set_eeprom,
7320         .get_ringparam          = tg3_get_ringparam,
7321         .set_ringparam          = tg3_set_ringparam,
7322         .get_pauseparam         = tg3_get_pauseparam,
7323         .set_pauseparam         = tg3_set_pauseparam,
7324         .get_rx_csum            = tg3_get_rx_csum,
7325         .set_rx_csum            = tg3_set_rx_csum,
7326         .get_tx_csum            = ethtool_op_get_tx_csum,
7327         .set_tx_csum            = tg3_set_tx_csum,
7328         .get_sg                 = ethtool_op_get_sg,
7329         .set_sg                 = ethtool_op_set_sg,
7330 #if TG3_TSO_SUPPORT != 0
7331         .get_tso                = ethtool_op_get_tso,
7332         .set_tso                = tg3_set_tso,
7333 #endif
7334         .get_strings            = tg3_get_strings,
7335         .get_stats_count        = tg3_get_stats_count,
7336         .get_ethtool_stats      = tg3_get_ethtool_stats,
7337         .get_coalesce           = tg3_get_coalesce,
7338 };
7339
7340 static void __devinit tg3_get_eeprom_size(struct tg3 *tp)
7341 {
7342         u32 cursize, val;
7343
7344         tp->nvram_size = EEPROM_CHIP_SIZE;
7345
7346         if (tg3_nvram_read(tp, 0, &val) != 0)
7347                 return;
7348
7349         if (swab32(val) != TG3_EEPROM_MAGIC)
7350                 return;
7351
7352         /*
7353          * Size the chip by reading offsets at increasing powers of two.
7354          * When we encounter our validation signature, we know the addressing
7355          * has wrapped around, and thus have our chip size.
7356          */
7357         cursize = 0x800;
7358
7359         while (cursize < tp->nvram_size) {
7360                 if (tg3_nvram_read(tp, cursize, &val) != 0)
7361                         return;
7362
7363                 if (swab32(val) == TG3_EEPROM_MAGIC)
7364                         break;
7365
7366                 cursize <<= 1;
7367         }
7368
7369         tp->nvram_size = cursize;
7370 }
7371                 
7372 static void __devinit tg3_get_nvram_size(struct tg3 *tp)
7373 {
7374         u32 val;
7375
7376         if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
7377                 if (val != 0) {
7378                         tp->nvram_size = (val >> 16) * 1024;
7379                         return;
7380                 }
7381         }
7382         tp->nvram_size = 0x20000;
7383 }
7384
7385 static void __devinit tg3_get_nvram_info(struct tg3 *tp)
7386 {
7387         u32 nvcfg1;
7388
7389         nvcfg1 = tr32(NVRAM_CFG1);
7390         if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
7391                 tp->tg3_flags2 |= TG3_FLG2_FLASH;
7392         }
7393         else {
7394                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
7395                 tw32(NVRAM_CFG1, nvcfg1);
7396         }
7397
7398         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
7399                 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
7400                         case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
7401                                 tp->nvram_jedecnum = JEDEC_ATMEL;
7402                                 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
7403                                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
7404                                 break;
7405                         case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
7406                                 tp->nvram_jedecnum = JEDEC_ATMEL;
7407                                 tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
7408                                 break;
7409                         case FLASH_VENDOR_ATMEL_EEPROM:
7410                                 tp->nvram_jedecnum = JEDEC_ATMEL;
7411                                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
7412                                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
7413                                 break;
7414                         case FLASH_VENDOR_ST:
7415                                 tp->nvram_jedecnum = JEDEC_ST;
7416                                 tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
7417                                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
7418                                 break;
7419                         case FLASH_VENDOR_SAIFUN:
7420                                 tp->nvram_jedecnum = JEDEC_SAIFUN;
7421                                 tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
7422                                 break;
7423                         case FLASH_VENDOR_SST_SMALL:
7424                         case FLASH_VENDOR_SST_LARGE:
7425                                 tp->nvram_jedecnum = JEDEC_SST;
7426                                 tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
7427                                 break;
7428                 }
7429         }
7430         else {
7431                 tp->nvram_jedecnum = JEDEC_ATMEL;
7432                 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
7433                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
7434         }
7435 }
7436
7437 static void __devinit tg3_get_5752_nvram_info(struct tg3 *tp)
7438 {
7439         u32 nvcfg1;
7440
7441         nvcfg1 = tr32(NVRAM_CFG1);
7442
7443         /* NVRAM protection for TPM */
7444         if (nvcfg1 & (1 << 27))
7445                 tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM;
7446
7447         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
7448                 case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
7449                 case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
7450                         tp->nvram_jedecnum = JEDEC_ATMEL;
7451                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
7452                         break;
7453                 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
7454                         tp->nvram_jedecnum = JEDEC_ATMEL;
7455                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
7456                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
7457                         break;
7458                 case FLASH_5752VENDOR_ST_M45PE10:
7459                 case FLASH_5752VENDOR_ST_M45PE20:
7460                 case FLASH_5752VENDOR_ST_M45PE40:
7461                         tp->nvram_jedecnum = JEDEC_ST;
7462                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
7463                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
7464                         break;
7465         }
7466
7467         if (tp->tg3_flags2 & TG3_FLG2_FLASH) {
7468                 switch (nvcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
7469                         case FLASH_5752PAGE_SIZE_256:
7470                                 tp->nvram_pagesize = 256;
7471                                 break;
7472                         case FLASH_5752PAGE_SIZE_512:
7473                                 tp->nvram_pagesize = 512;
7474                                 break;
7475                         case FLASH_5752PAGE_SIZE_1K:
7476                                 tp->nvram_pagesize = 1024;
7477                                 break;
7478                         case FLASH_5752PAGE_SIZE_2K:
7479                                 tp->nvram_pagesize = 2048;
7480                                 break;
7481                         case FLASH_5752PAGE_SIZE_4K:
7482                                 tp->nvram_pagesize = 4096;
7483                                 break;
7484                         case FLASH_5752PAGE_SIZE_264:
7485                                 tp->nvram_pagesize = 264;
7486                                 break;
7487                 }
7488         }
7489         else {
7490                 /* For eeprom, set pagesize to maximum eeprom size */
7491                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
7492
7493                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
7494                 tw32(NVRAM_CFG1, nvcfg1);
7495         }
7496 }
7497
7498 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
7499 static void __devinit tg3_nvram_init(struct tg3 *tp)
7500 {
7501         int j;
7502
7503         if (tp->tg3_flags2 & TG3_FLG2_SUN_570X)
7504                 return;
7505
7506         tw32_f(GRC_EEPROM_ADDR,
7507              (EEPROM_ADDR_FSM_RESET |
7508               (EEPROM_DEFAULT_CLOCK_PERIOD <<
7509                EEPROM_ADDR_CLKPERD_SHIFT)));
7510
7511         /* XXX schedule_timeout() ... */
7512         for (j = 0; j < 100; j++)
7513                 udelay(10);
7514
7515         /* Enable seeprom accesses. */
7516         tw32_f(GRC_LOCAL_CTRL,
7517              tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
7518         udelay(100);
7519
7520         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
7521             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
7522                 tp->tg3_flags |= TG3_FLAG_NVRAM;
7523
7524                 tg3_enable_nvram_access(tp);
7525
7526                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
7527                         tg3_get_5752_nvram_info(tp);
7528                 else
7529                         tg3_get_nvram_info(tp);
7530
7531                 tg3_get_nvram_size(tp);
7532
7533                 tg3_disable_nvram_access(tp);
7534
7535         } else {
7536                 tp->tg3_flags &= ~(TG3_FLAG_NVRAM | TG3_FLAG_NVRAM_BUFFERED);
7537
7538                 tg3_get_eeprom_size(tp);
7539         }
7540 }
7541
7542 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
7543                                         u32 offset, u32 *val)
7544 {
7545         u32 tmp;
7546         int i;
7547
7548         if (offset > EEPROM_ADDR_ADDR_MASK ||
7549             (offset % 4) != 0)
7550                 return -EINVAL;
7551
7552         tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
7553                                         EEPROM_ADDR_DEVID_MASK |
7554                                         EEPROM_ADDR_READ);
7555         tw32(GRC_EEPROM_ADDR,
7556              tmp |
7557              (0 << EEPROM_ADDR_DEVID_SHIFT) |
7558              ((offset << EEPROM_ADDR_ADDR_SHIFT) &
7559               EEPROM_ADDR_ADDR_MASK) |
7560              EEPROM_ADDR_READ | EEPROM_ADDR_START);
7561
7562         for (i = 0; i < 10000; i++) {
7563                 tmp = tr32(GRC_EEPROM_ADDR);
7564
7565                 if (tmp & EEPROM_ADDR_COMPLETE)
7566                         break;
7567                 udelay(100);
7568         }
7569         if (!(tmp & EEPROM_ADDR_COMPLETE))
7570                 return -EBUSY;
7571
7572         *val = tr32(GRC_EEPROM_DATA);
7573         return 0;
7574 }
7575
7576 #define NVRAM_CMD_TIMEOUT 10000
7577
7578 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
7579 {
7580         int i;
7581
7582         tw32(NVRAM_CMD, nvram_cmd);
7583         for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
7584                 udelay(10);
7585                 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
7586                         udelay(10);
7587                         break;
7588                 }
7589         }
7590         if (i == NVRAM_CMD_TIMEOUT) {
7591                 return -EBUSY;
7592         }
7593         return 0;
7594 }
7595
7596 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
7597 {
7598         int ret;
7599
7600         if (tp->tg3_flags2 & TG3_FLG2_SUN_570X) {
7601                 printk(KERN_ERR PFX "Attempt to do nvram_read on Sun 570X\n");
7602                 return -EINVAL;
7603         }
7604
7605         if (!(tp->tg3_flags & TG3_FLAG_NVRAM))
7606                 return tg3_nvram_read_using_eeprom(tp, offset, val);
7607
7608         if ((tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) &&
7609                 (tp->tg3_flags2 & TG3_FLG2_FLASH) &&
7610                 (tp->nvram_jedecnum == JEDEC_ATMEL)) {
7611
7612                 offset = ((offset / tp->nvram_pagesize) <<
7613                           ATMEL_AT45DB0X1B_PAGE_POS) +
7614                         (offset % tp->nvram_pagesize);
7615         }
7616
7617         if (offset > NVRAM_ADDR_MSK)
7618                 return -EINVAL;
7619
7620         tg3_nvram_lock(tp);
7621
7622         tg3_enable_nvram_access(tp);
7623
7624         tw32(NVRAM_ADDR, offset);
7625         ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
7626                 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
7627
7628         if (ret == 0)
7629                 *val = swab32(tr32(NVRAM_RDDATA));
7630
7631         tg3_nvram_unlock(tp);
7632
7633         tg3_disable_nvram_access(tp);
7634
7635         return ret;
7636 }
7637
7638 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
7639                                     u32 offset, u32 len, u8 *buf)
7640 {
7641         int i, j, rc = 0;
7642         u32 val;
7643
7644         for (i = 0; i < len; i += 4) {
7645                 u32 addr, data;
7646
7647                 addr = offset + i;
7648
7649                 memcpy(&data, buf + i, 4);
7650
7651                 tw32(GRC_EEPROM_DATA, cpu_to_le32(data));
7652
7653                 val = tr32(GRC_EEPROM_ADDR);
7654                 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
7655
7656                 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
7657                         EEPROM_ADDR_READ);
7658                 tw32(GRC_EEPROM_ADDR, val |
7659                         (0 << EEPROM_ADDR_DEVID_SHIFT) |
7660                         (addr & EEPROM_ADDR_ADDR_MASK) |
7661                         EEPROM_ADDR_START |
7662                         EEPROM_ADDR_WRITE);
7663                 
7664                 for (j = 0; j < 10000; j++) {
7665                         val = tr32(GRC_EEPROM_ADDR);
7666
7667                         if (val & EEPROM_ADDR_COMPLETE)
7668                                 break;
7669                         udelay(100);
7670                 }
7671                 if (!(val & EEPROM_ADDR_COMPLETE)) {
7672                         rc = -EBUSY;
7673                         break;
7674                 }
7675         }
7676
7677         return rc;
7678 }
7679
7680 /* offset and length are dword aligned */
7681 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
7682                 u8 *buf)
7683 {
7684         int ret = 0;
7685         u32 pagesize = tp->nvram_pagesize;
7686         u32 pagemask = pagesize - 1;
7687         u32 nvram_cmd;
7688         u8 *tmp;
7689
7690         tmp = kmalloc(pagesize, GFP_KERNEL);
7691         if (tmp == NULL)
7692                 return -ENOMEM;
7693
7694         while (len) {
7695                 int j;
7696                 u32 phy_addr, page_off, size;
7697
7698                 phy_addr = offset & ~pagemask;
7699         
7700                 for (j = 0; j < pagesize; j += 4) {
7701                         if ((ret = tg3_nvram_read(tp, phy_addr + j,
7702                                                 (u32 *) (tmp + j))))
7703                                 break;
7704                 }
7705                 if (ret)
7706                         break;
7707
7708                 page_off = offset & pagemask;
7709                 size = pagesize;
7710                 if (len < size)
7711                         size = len;
7712
7713                 len -= size;
7714
7715                 memcpy(tmp + page_off, buf, size);
7716
7717                 offset = offset + (pagesize - page_off);
7718
7719                 tg3_enable_nvram_access(tp);
7720
7721                 /*
7722                  * Before we can erase the flash page, we need
7723                  * to issue a special "write enable" command.
7724                  */
7725                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
7726
7727                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
7728                         break;
7729
7730                 /* Erase the target page */
7731                 tw32(NVRAM_ADDR, phy_addr);
7732
7733                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
7734                         NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
7735
7736                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
7737                         break;
7738
7739                 /* Issue another write enable to start the write. */
7740                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
7741
7742                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
7743                         break;
7744
7745                 for (j = 0; j < pagesize; j += 4) {
7746                         u32 data;
7747
7748                         data = *((u32 *) (tmp + j));
7749                         tw32(NVRAM_WRDATA, cpu_to_be32(data));
7750
7751                         tw32(NVRAM_ADDR, phy_addr + j);
7752
7753                         nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
7754                                 NVRAM_CMD_WR;
7755
7756                         if (j == 0)
7757                                 nvram_cmd |= NVRAM_CMD_FIRST;
7758                         else if (j == (pagesize - 4))
7759                                 nvram_cmd |= NVRAM_CMD_LAST;
7760
7761                         if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
7762                                 break;
7763                 }
7764                 if (ret)
7765                         break;
7766         }
7767
7768         nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
7769         tg3_nvram_exec_cmd(tp, nvram_cmd);
7770
7771         kfree(tmp);
7772
7773         return ret;
7774 }
7775
7776 /* offset and length are dword aligned */
7777 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
7778                 u8 *buf)
7779 {
7780         int i, ret = 0;
7781
7782         for (i = 0; i < len; i += 4, offset += 4) {
7783                 u32 data, page_off, phy_addr, nvram_cmd;
7784
7785                 memcpy(&data, buf + i, 4);
7786                 tw32(NVRAM_WRDATA, cpu_to_be32(data));
7787
7788                 page_off = offset % tp->nvram_pagesize;
7789
7790                 if ((tp->tg3_flags2 & TG3_FLG2_FLASH) &&
7791                         (tp->nvram_jedecnum == JEDEC_ATMEL)) {
7792
7793                         phy_addr = ((offset / tp->nvram_pagesize) <<
7794                                     ATMEL_AT45DB0X1B_PAGE_POS) + page_off;
7795                 }
7796                 else {
7797                         phy_addr = offset;
7798                 }
7799
7800                 tw32(NVRAM_ADDR, phy_addr);
7801
7802                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
7803
7804                 if ((page_off == 0) || (i == 0))
7805                         nvram_cmd |= NVRAM_CMD_FIRST;
7806                 else if (page_off == (tp->nvram_pagesize - 4))
7807                         nvram_cmd |= NVRAM_CMD_LAST;
7808
7809                 if (i == (len - 4))
7810                         nvram_cmd |= NVRAM_CMD_LAST;
7811
7812                 if ((tp->nvram_jedecnum == JEDEC_ST) &&
7813                         (nvram_cmd & NVRAM_CMD_FIRST)) {
7814
7815                         if ((ret = tg3_nvram_exec_cmd(tp,
7816                                 NVRAM_CMD_WREN | NVRAM_CMD_GO |
7817                                 NVRAM_CMD_DONE)))
7818
7819                                 break;
7820                 }
7821                 if (!(tp->tg3_flags2 & TG3_FLG2_FLASH)) {
7822                         /* We always do complete word writes to eeprom. */
7823                         nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
7824                 }
7825
7826                 if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
7827                         break;
7828         }
7829         return ret;
7830 }
7831
7832 /* offset and length are dword aligned */
7833 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
7834 {
7835         int ret;
7836
7837         if (tp->tg3_flags2 & TG3_FLG2_SUN_570X) {
7838                 printk(KERN_ERR PFX "Attempt to do nvram_write on Sun 570X\n");
7839                 return -EINVAL;
7840         }
7841
7842         if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
7843                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
7844                        ~GRC_LCLCTRL_GPIO_OUTPUT1);
7845                 udelay(40);
7846         }
7847
7848         if (!(tp->tg3_flags & TG3_FLAG_NVRAM)) {
7849                 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
7850         }
7851         else {
7852                 u32 grc_mode;
7853
7854                 tg3_nvram_lock(tp);
7855
7856                 tg3_enable_nvram_access(tp);
7857                 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
7858                     !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM))
7859                         tw32(NVRAM_WRITE1, 0x406);
7860
7861                 grc_mode = tr32(GRC_MODE);
7862                 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
7863
7864                 if ((tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) ||
7865                         !(tp->tg3_flags2 & TG3_FLG2_FLASH)) {
7866
7867                         ret = tg3_nvram_write_block_buffered(tp, offset, len,
7868                                 buf);
7869                 }
7870                 else {
7871                         ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
7872                                 buf);
7873                 }
7874
7875                 grc_mode = tr32(GRC_MODE);
7876                 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
7877
7878                 tg3_disable_nvram_access(tp);
7879                 tg3_nvram_unlock(tp);
7880         }
7881
7882         if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
7883                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
7884                 udelay(40);
7885         }
7886
7887         return ret;
7888 }
7889
7890 struct subsys_tbl_ent {
7891         u16 subsys_vendor, subsys_devid;
7892         u32 phy_id;
7893 };
7894
7895 static struct subsys_tbl_ent subsys_id_to_phy_id[] = {
7896         /* Broadcom boards. */
7897         { PCI_VENDOR_ID_BROADCOM, 0x1644, PHY_ID_BCM5401 }, /* BCM95700A6 */
7898         { PCI_VENDOR_ID_BROADCOM, 0x0001, PHY_ID_BCM5701 }, /* BCM95701A5 */
7899         { PCI_VENDOR_ID_BROADCOM, 0x0002, PHY_ID_BCM8002 }, /* BCM95700T6 */
7900         { PCI_VENDOR_ID_BROADCOM, 0x0003, 0 },              /* BCM95700A9 */
7901         { PCI_VENDOR_ID_BROADCOM, 0x0005, PHY_ID_BCM5701 }, /* BCM95701T1 */
7902         { PCI_VENDOR_ID_BROADCOM, 0x0006, PHY_ID_BCM5701 }, /* BCM95701T8 */
7903         { PCI_VENDOR_ID_BROADCOM, 0x0007, 0 },              /* BCM95701A7 */
7904         { PCI_VENDOR_ID_BROADCOM, 0x0008, PHY_ID_BCM5701 }, /* BCM95701A10 */
7905         { PCI_VENDOR_ID_BROADCOM, 0x8008, PHY_ID_BCM5701 }, /* BCM95701A12 */
7906         { PCI_VENDOR_ID_BROADCOM, 0x0009, PHY_ID_BCM5703 }, /* BCM95703Ax1 */
7907         { PCI_VENDOR_ID_BROADCOM, 0x8009, PHY_ID_BCM5703 }, /* BCM95703Ax2 */
7908
7909         /* 3com boards. */
7910         { PCI_VENDOR_ID_3COM, 0x1000, PHY_ID_BCM5401 }, /* 3C996T */
7911         { PCI_VENDOR_ID_3COM, 0x1006, PHY_ID_BCM5701 }, /* 3C996BT */
7912         { PCI_VENDOR_ID_3COM, 0x1004, 0 },              /* 3C996SX */
7913         { PCI_VENDOR_ID_3COM, 0x1007, PHY_ID_BCM5701 }, /* 3C1000T */
7914         { PCI_VENDOR_ID_3COM, 0x1008, PHY_ID_BCM5701 }, /* 3C940BR01 */
7915
7916         /* DELL boards. */
7917         { PCI_VENDOR_ID_DELL, 0x00d1, PHY_ID_BCM5401 }, /* VIPER */
7918         { PCI_VENDOR_ID_DELL, 0x0106, PHY_ID_BCM5401 }, /* JAGUAR */
7919         { PCI_VENDOR_ID_DELL, 0x0109, PHY_ID_BCM5411 }, /* MERLOT */
7920         { PCI_VENDOR_ID_DELL, 0x010a, PHY_ID_BCM5411 }, /* SLIM_MERLOT */
7921
7922         /* Compaq boards. */
7923         { PCI_VENDOR_ID_COMPAQ, 0x007c, PHY_ID_BCM5701 }, /* BANSHEE */
7924         { PCI_VENDOR_ID_COMPAQ, 0x009a, PHY_ID_BCM5701 }, /* BANSHEE_2 */
7925         { PCI_VENDOR_ID_COMPAQ, 0x007d, 0 },              /* CHANGELING */
7926         { PCI_VENDOR_ID_COMPAQ, 0x0085, PHY_ID_BCM5701 }, /* NC7780 */
7927         { PCI_VENDOR_ID_COMPAQ, 0x0099, PHY_ID_BCM5701 }, /* NC7780_2 */
7928
7929         /* IBM boards. */
7930         { PCI_VENDOR_ID_IBM, 0x0281, 0 } /* IBM??? */
7931 };
7932
7933 static inline struct subsys_tbl_ent *lookup_by_subsys(struct tg3 *tp)
7934 {
7935         int i;
7936
7937         for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
7938                 if ((subsys_id_to_phy_id[i].subsys_vendor ==
7939                      tp->pdev->subsystem_vendor) &&
7940                     (subsys_id_to_phy_id[i].subsys_devid ==
7941                      tp->pdev->subsystem_device))
7942                         return &subsys_id_to_phy_id[i];
7943         }
7944         return NULL;
7945 }
7946
7947 /* Since this function may be called in D3-hot power state during
7948  * tg3_init_one(), only config cycles are allowed.
7949  */
7950 static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
7951 {
7952         u32 val;
7953
7954         /* Make sure register accesses (indirect or otherwise)
7955          * will function correctly.
7956          */
7957         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
7958                                tp->misc_host_ctrl);
7959
7960         tp->phy_id = PHY_ID_INVALID;
7961         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
7962
7963         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
7964         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
7965                 u32 nic_cfg, led_cfg;
7966                 u32 nic_phy_id, ver, cfg2 = 0, eeprom_phy_id;
7967                 int eeprom_phy_serdes = 0;
7968
7969                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
7970                 tp->nic_sram_data_cfg = nic_cfg;
7971
7972                 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
7973                 ver >>= NIC_SRAM_DATA_VER_SHIFT;
7974                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700) &&
7975                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) &&
7976                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5703) &&
7977                     (ver > 0) && (ver < 0x100))
7978                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
7979
7980                 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
7981                     NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
7982                         eeprom_phy_serdes = 1;
7983
7984                 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
7985                 if (nic_phy_id != 0) {
7986                         u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
7987                         u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
7988
7989                         eeprom_phy_id  = (id1 >> 16) << 10;
7990                         eeprom_phy_id |= (id2 & 0xfc00) << 16;
7991                         eeprom_phy_id |= (id2 & 0x03ff) <<  0;
7992                 } else
7993                         eeprom_phy_id = 0;
7994
7995                 tp->phy_id = eeprom_phy_id;
7996                 if (eeprom_phy_serdes)
7997                         tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
7998
7999                 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
8000                         led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
8001                                     SHASTA_EXT_LED_MODE_MASK);
8002                 else
8003                         led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
8004
8005                 switch (led_cfg) {
8006                 default:
8007                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
8008                         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
8009                         break;
8010
8011                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
8012                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
8013                         break;
8014
8015                 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
8016                         tp->led_ctrl = LED_CTRL_MODE_MAC;
8017                         break;
8018
8019                 case SHASTA_EXT_LED_SHARED:
8020                         tp->led_ctrl = LED_CTRL_MODE_SHARED;
8021                         if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
8022                             tp->pci_chip_rev_id != CHIPREV_ID_5750_A1)
8023                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
8024                                                  LED_CTRL_MODE_PHY_2);
8025                         break;
8026
8027                 case SHASTA_EXT_LED_MAC:
8028                         tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
8029                         break;
8030
8031                 case SHASTA_EXT_LED_COMBO:
8032                         tp->led_ctrl = LED_CTRL_MODE_COMBO;
8033                         if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0)
8034                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
8035                                                  LED_CTRL_MODE_PHY_2);
8036                         break;
8037
8038                 };
8039
8040                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
8041                      GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) &&
8042                     tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
8043                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
8044
8045                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700) &&
8046                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) &&
8047                     (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP))
8048                         tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT;
8049
8050                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
8051                         tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
8052                         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
8053                                 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
8054                 }
8055                 if (nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL)
8056                         tp->tg3_flags |= TG3_FLAG_SERDES_WOL_CAP;
8057
8058                 if (cfg2 & (1 << 17))
8059                         tp->tg3_flags2 |= TG3_FLG2_CAPACITIVE_COUPLING;
8060
8061                 /* serdes signal pre-emphasis in register 0x590 set by */
8062                 /* bootcode if bit 18 is set */
8063                 if (cfg2 & (1 << 18))
8064                         tp->tg3_flags2 |= TG3_FLG2_SERDES_PREEMPHASIS;
8065         }
8066 }
8067
8068 static int __devinit tg3_phy_probe(struct tg3 *tp)
8069 {
8070         u32 hw_phy_id_1, hw_phy_id_2;
8071         u32 hw_phy_id, hw_phy_id_masked;
8072         int err;
8073
8074         /* Reading the PHY ID register can conflict with ASF
8075          * firwmare access to the PHY hardware.
8076          */
8077         err = 0;
8078         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
8079                 hw_phy_id = hw_phy_id_masked = PHY_ID_INVALID;
8080         } else {
8081                 /* Now read the physical PHY_ID from the chip and verify
8082                  * that it is sane.  If it doesn't look good, we fall back
8083                  * to either the hard-coded table based PHY_ID and failing
8084                  * that the value found in the eeprom area.
8085                  */
8086                 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
8087                 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
8088
8089                 hw_phy_id  = (hw_phy_id_1 & 0xffff) << 10;
8090                 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
8091                 hw_phy_id |= (hw_phy_id_2 & 0x03ff) <<  0;
8092
8093                 hw_phy_id_masked = hw_phy_id & PHY_ID_MASK;
8094         }
8095
8096         if (!err && KNOWN_PHY_ID(hw_phy_id_masked)) {
8097                 tp->phy_id = hw_phy_id;
8098                 if (hw_phy_id_masked == PHY_ID_BCM8002)
8099                         tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
8100         } else {
8101                 if (tp->phy_id != PHY_ID_INVALID) {
8102                         /* Do nothing, phy ID already set up in
8103                          * tg3_get_eeprom_hw_cfg().
8104                          */
8105                 } else {
8106                         struct subsys_tbl_ent *p;
8107
8108                         /* No eeprom signature?  Try the hardcoded
8109                          * subsys device table.
8110                          */
8111                         p = lookup_by_subsys(tp);
8112                         if (!p)
8113                                 return -ENODEV;
8114
8115                         tp->phy_id = p->phy_id;
8116                         if (!tp->phy_id ||
8117                             tp->phy_id == PHY_ID_BCM8002)
8118                                 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
8119                 }
8120         }
8121
8122         if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
8123             !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
8124                 u32 bmsr, adv_reg, tg3_ctrl;
8125
8126                 tg3_readphy(tp, MII_BMSR, &bmsr);
8127                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
8128                     (bmsr & BMSR_LSTATUS))
8129                         goto skip_phy_reset;
8130                     
8131                 err = tg3_phy_reset(tp);
8132                 if (err)
8133                         return err;
8134
8135                 adv_reg = (ADVERTISE_10HALF | ADVERTISE_10FULL |
8136                            ADVERTISE_100HALF | ADVERTISE_100FULL |
8137                            ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
8138                 tg3_ctrl = 0;
8139                 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
8140                         tg3_ctrl = (MII_TG3_CTRL_ADV_1000_HALF |
8141                                     MII_TG3_CTRL_ADV_1000_FULL);
8142                         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
8143                             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
8144                                 tg3_ctrl |= (MII_TG3_CTRL_AS_MASTER |
8145                                              MII_TG3_CTRL_ENABLE_AS_MASTER);
8146                 }
8147
8148                 if (!tg3_copper_is_advertising_all(tp)) {
8149                         tg3_writephy(tp, MII_ADVERTISE, adv_reg);
8150
8151                         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
8152                                 tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
8153
8154                         tg3_writephy(tp, MII_BMCR,
8155                                      BMCR_ANENABLE | BMCR_ANRESTART);
8156                 }
8157                 tg3_phy_set_wirespeed(tp);
8158
8159                 tg3_writephy(tp, MII_ADVERTISE, adv_reg);
8160                 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
8161                         tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
8162         }
8163
8164 skip_phy_reset:
8165         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
8166                 err = tg3_init_5401phy_dsp(tp);
8167                 if (err)
8168                         return err;
8169         }
8170
8171         if (!err && ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401)) {
8172                 err = tg3_init_5401phy_dsp(tp);
8173         }
8174
8175         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
8176                 tp->link_config.advertising =
8177                         (ADVERTISED_1000baseT_Half |
8178                          ADVERTISED_1000baseT_Full |
8179                          ADVERTISED_Autoneg |
8180                          ADVERTISED_FIBRE);
8181         if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
8182                 tp->link_config.advertising &=
8183                         ~(ADVERTISED_1000baseT_Half |
8184                           ADVERTISED_1000baseT_Full);
8185
8186         return err;
8187 }
8188
8189 static void __devinit tg3_read_partno(struct tg3 *tp)
8190 {
8191         unsigned char vpd_data[256];
8192         int i;
8193
8194         if (tp->tg3_flags2 & TG3_FLG2_SUN_570X) {
8195                 /* Sun decided not to put the necessary bits in the
8196                  * NVRAM of their onboard tg3 parts :(
8197                  */
8198                 strcpy(tp->board_part_number, "Sun 570X");
8199                 return;
8200         }
8201
8202         for (i = 0; i < 256; i += 4) {
8203                 u32 tmp;
8204
8205                 if (tg3_nvram_read(tp, 0x100 + i, &tmp))
8206                         goto out_not_found;
8207
8208                 vpd_data[i + 0] = ((tmp >>  0) & 0xff);
8209                 vpd_data[i + 1] = ((tmp >>  8) & 0xff);
8210                 vpd_data[i + 2] = ((tmp >> 16) & 0xff);
8211                 vpd_data[i + 3] = ((tmp >> 24) & 0xff);
8212         }
8213
8214         /* Now parse and find the part number. */
8215         for (i = 0; i < 256; ) {
8216                 unsigned char val = vpd_data[i];
8217                 int block_end;
8218
8219                 if (val == 0x82 || val == 0x91) {
8220                         i = (i + 3 +
8221                              (vpd_data[i + 1] +
8222                               (vpd_data[i + 2] << 8)));
8223                         continue;
8224                 }
8225
8226                 if (val != 0x90)
8227                         goto out_not_found;
8228
8229                 block_end = (i + 3 +
8230                              (vpd_data[i + 1] +
8231                               (vpd_data[i + 2] << 8)));
8232                 i += 3;
8233                 while (i < block_end) {
8234                         if (vpd_data[i + 0] == 'P' &&
8235                             vpd_data[i + 1] == 'N') {
8236                                 int partno_len = vpd_data[i + 2];
8237
8238                                 if (partno_len > 24)
8239                                         goto out_not_found;
8240
8241                                 memcpy(tp->board_part_number,
8242                                        &vpd_data[i + 3],
8243                                        partno_len);
8244
8245                                 /* Success. */
8246                                 return;
8247                         }
8248                 }
8249
8250                 /* Part number not found. */
8251                 goto out_not_found;
8252         }
8253
8254 out_not_found:
8255         strcpy(tp->board_part_number, "none");
8256 }
8257
8258 #ifdef CONFIG_SPARC64
8259 static int __devinit tg3_is_sun_570X(struct tg3 *tp)
8260 {
8261         struct pci_dev *pdev = tp->pdev;
8262         struct pcidev_cookie *pcp = pdev->sysdata;
8263
8264         if (pcp != NULL) {
8265                 int node = pcp->prom_node;
8266                 u32 venid;
8267                 int err;
8268
8269                 err = prom_getproperty(node, "subsystem-vendor-id",
8270                                        (char *) &venid, sizeof(venid));
8271                 if (err == 0 || err == -1)
8272                         return 0;
8273                 if (venid == PCI_VENDOR_ID_SUN)
8274                         return 1;
8275         }
8276         return 0;
8277 }
8278 #endif
8279
8280 static int __devinit tg3_get_invariants(struct tg3 *tp)
8281 {
8282         static struct pci_device_id write_reorder_chipsets[] = {
8283                 { PCI_DEVICE(PCI_VENDOR_ID_INTEL,
8284                              PCI_DEVICE_ID_INTEL_82801AA_8) },
8285                 { PCI_DEVICE(PCI_VENDOR_ID_INTEL,
8286                              PCI_DEVICE_ID_INTEL_82801AB_8) },
8287                 { PCI_DEVICE(PCI_VENDOR_ID_INTEL,
8288                              PCI_DEVICE_ID_INTEL_82801BA_11) },
8289                 { PCI_DEVICE(PCI_VENDOR_ID_INTEL,
8290                              PCI_DEVICE_ID_INTEL_82801BA_6) },
8291                 { PCI_DEVICE(PCI_VENDOR_ID_AMD,
8292                              PCI_DEVICE_ID_AMD_FE_GATE_700C) },
8293                 { },
8294         };
8295         u32 misc_ctrl_reg;
8296         u32 cacheline_sz_reg;
8297         u32 pci_state_reg, grc_misc_cfg;
8298         u32 val;
8299         u16 pci_cmd;
8300         int err;
8301
8302 #ifdef CONFIG_SPARC64
8303         if (tg3_is_sun_570X(tp))
8304                 tp->tg3_flags2 |= TG3_FLG2_SUN_570X;
8305 #endif
8306
8307         /* If we have an AMD 762 or Intel ICH/ICH0/ICH2 chipset, write
8308          * reordering to the mailbox registers done by the host
8309          * controller can cause major troubles.  We read back from
8310          * every mailbox register write to force the writes to be
8311          * posted to the chip in order.
8312          */
8313         if (pci_dev_present(write_reorder_chipsets))
8314                 tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;
8315
8316         /* Force memory write invalidate off.  If we leave it on,
8317          * then on 5700_BX chips we have to enable a workaround.
8318          * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
8319          * to match the cacheline size.  The Broadcom driver have this
8320          * workaround but turns MWI off all the times so never uses
8321          * it.  This seems to suggest that the workaround is insufficient.
8322          */
8323         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
8324         pci_cmd &= ~PCI_COMMAND_INVALIDATE;
8325         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
8326
8327         /* It is absolutely critical that TG3PCI_MISC_HOST_CTRL
8328          * has the register indirect write enable bit set before
8329          * we try to access any of the MMIO registers.  It is also
8330          * critical that the PCI-X hw workaround situation is decided
8331          * before that as well.
8332          */
8333         pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
8334                               &misc_ctrl_reg);
8335
8336         tp->pci_chip_rev_id = (misc_ctrl_reg >>
8337                                MISC_HOST_CTRL_CHIPREV_SHIFT);
8338
8339         /* Wrong chip ID in 5752 A0. This code can be removed later
8340          * as A0 is not in production.
8341          */
8342         if (tp->pci_chip_rev_id == CHIPREV_ID_5752_A0_HW)
8343                 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
8344
8345         /* Initialize misc host control in PCI block. */
8346         tp->misc_host_ctrl |= (misc_ctrl_reg &
8347                                MISC_HOST_CTRL_CHIPREV);
8348         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
8349                                tp->misc_host_ctrl);
8350
8351         pci_read_config_dword(tp->pdev, TG3PCI_CACHELINESZ,
8352                               &cacheline_sz_reg);
8353
8354         tp->pci_cacheline_sz = (cacheline_sz_reg >>  0) & 0xff;
8355         tp->pci_lat_timer    = (cacheline_sz_reg >>  8) & 0xff;
8356         tp->pci_hdr_type     = (cacheline_sz_reg >> 16) & 0xff;
8357         tp->pci_bist         = (cacheline_sz_reg >> 24) & 0xff;
8358
8359         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
8360             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
8361                 tp->tg3_flags2 |= TG3_FLG2_5750_PLUS;
8362
8363         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) ||
8364             (tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
8365                 tp->tg3_flags2 |= TG3_FLG2_5705_PLUS;
8366
8367         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
8368                 tp->tg3_flags2 |= TG3_FLG2_HW_TSO;
8369
8370         if (pci_find_capability(tp->pdev, PCI_CAP_ID_EXP) != 0)
8371                 tp->tg3_flags2 |= TG3_FLG2_PCI_EXPRESS;
8372
8373         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
8374             tp->pci_lat_timer < 64) {
8375                 tp->pci_lat_timer = 64;
8376
8377                 cacheline_sz_reg  = ((tp->pci_cacheline_sz & 0xff) <<  0);
8378                 cacheline_sz_reg |= ((tp->pci_lat_timer    & 0xff) <<  8);
8379                 cacheline_sz_reg |= ((tp->pci_hdr_type     & 0xff) << 16);
8380                 cacheline_sz_reg |= ((tp->pci_bist         & 0xff) << 24);
8381
8382                 pci_write_config_dword(tp->pdev, TG3PCI_CACHELINESZ,
8383                                        cacheline_sz_reg);
8384         }
8385
8386         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
8387                               &pci_state_reg);
8388
8389         if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0) {
8390                 tp->tg3_flags |= TG3_FLAG_PCIX_MODE;
8391
8392                 /* If this is a 5700 BX chipset, and we are in PCI-X
8393                  * mode, enable register write workaround.
8394                  *
8395                  * The workaround is to use indirect register accesses
8396                  * for all chip writes not to mailbox registers.
8397                  */
8398                 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX) {
8399                         u32 pm_reg;
8400                         u16 pci_cmd;
8401
8402                         tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
8403
8404                         /* The chip can have it's power management PCI config
8405                          * space registers clobbered due to this bug.
8406                          * So explicitly force the chip into D0 here.
8407                          */
8408                         pci_read_config_dword(tp->pdev, TG3PCI_PM_CTRL_STAT,
8409                                               &pm_reg);
8410                         pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
8411                         pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
8412                         pci_write_config_dword(tp->pdev, TG3PCI_PM_CTRL_STAT,
8413                                                pm_reg);
8414
8415                         /* Also, force SERR#/PERR# in PCI command. */
8416                         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
8417                         pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
8418                         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
8419                 }
8420         }
8421
8422         /* Back to back register writes can cause problems on this chip,
8423          * the workaround is to read back all reg writes except those to
8424          * mailbox regs.  See tg3_write_indirect_reg32().
8425          *
8426          * PCI Express 5750_A0 rev chips need this workaround too.
8427          */
8428         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
8429             ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
8430              tp->pci_chip_rev_id == CHIPREV_ID_5750_A0))
8431                 tp->tg3_flags |= TG3_FLAG_5701_REG_WRITE_BUG;
8432
8433         if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
8434                 tp->tg3_flags |= TG3_FLAG_PCI_HIGH_SPEED;
8435         if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
8436                 tp->tg3_flags |= TG3_FLAG_PCI_32BIT;
8437
8438         /* Chip-specific fixup from Broadcom driver */
8439         if ((tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) &&
8440             (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
8441                 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
8442                 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
8443         }
8444
8445         /* Get eeprom hw config before calling tg3_set_power_state().
8446          * In particular, the TG3_FLAG_EEPROM_WRITE_PROT flag must be
8447          * determined before calling tg3_set_power_state() so that
8448          * we know whether or not to switch out of Vaux power.
8449          * When the flag is set, it means that GPIO1 is used for eeprom
8450          * write protect and also implies that it is a LOM where GPIOs
8451          * are not used to switch power.
8452          */ 
8453         tg3_get_eeprom_hw_cfg(tp);
8454
8455         /* Set up tp->grc_local_ctrl before calling tg3_set_power_state().
8456          * GPIO1 driven high will bring 5700's external PHY out of reset.
8457          * It is also used as eeprom write protect on LOMs.
8458          */
8459         tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
8460         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
8461             (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT))
8462                 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
8463                                        GRC_LCLCTRL_GPIO_OUTPUT1);
8464         /* Unused GPIO3 must be driven as output on 5752 because there
8465          * are no pull-up resistors on unused GPIO pins.
8466          */
8467         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
8468                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
8469
8470         /* Force the chip into D0. */
8471         err = tg3_set_power_state(tp, 0);
8472         if (err) {
8473                 printk(KERN_ERR PFX "(%s) transition to D0 failed\n",
8474                        pci_name(tp->pdev));
8475                 return err;
8476         }
8477
8478         /* 5700 B0 chips do not support checksumming correctly due
8479          * to hardware bugs.
8480          */
8481         if (tp->pci_chip_rev_id == CHIPREV_ID_5700_B0)
8482                 tp->tg3_flags |= TG3_FLAG_BROKEN_CHECKSUMS;
8483
8484         /* Pseudo-header checksum is done by hardware logic and not
8485          * the offload processers, so make the chip do the pseudo-
8486          * header checksums on receive.  For transmit it is more
8487          * convenient to do the pseudo-header checksum in software
8488          * as Linux does that on transmit for us in all cases.
8489          */
8490         tp->tg3_flags |= TG3_FLAG_NO_TX_PSEUDO_CSUM;
8491         tp->tg3_flags &= ~TG3_FLAG_NO_RX_PSEUDO_CSUM;
8492
8493         /* Derive initial jumbo mode from MTU assigned in
8494          * ether_setup() via the alloc_etherdev() call
8495          */
8496         if (tp->dev->mtu > ETH_DATA_LEN)
8497                 tp->tg3_flags |= TG3_FLAG_JUMBO_ENABLE;
8498
8499         /* Determine WakeOnLan speed to use. */
8500         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
8501             tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
8502             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0 ||
8503             tp->pci_chip_rev_id == CHIPREV_ID_5701_B2) {
8504                 tp->tg3_flags &= ~(TG3_FLAG_WOL_SPEED_100MB);
8505         } else {
8506                 tp->tg3_flags |= TG3_FLAG_WOL_SPEED_100MB;
8507         }
8508
8509         /* A few boards don't want Ethernet@WireSpeed phy feature */
8510         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
8511             ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
8512              (tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) &&
8513              (tp->pci_chip_rev_id != CHIPREV_ID_5705_A1)))
8514                 tp->tg3_flags2 |= TG3_FLG2_NO_ETH_WIRE_SPEED;
8515
8516         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5703_AX ||
8517             GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_AX)
8518                 tp->tg3_flags2 |= TG3_FLG2_PHY_ADC_BUG;
8519         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0)
8520                 tp->tg3_flags2 |= TG3_FLG2_PHY_5704_A0_BUG;
8521
8522         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
8523                 tp->tg3_flags2 |= TG3_FLG2_PHY_BER_BUG;
8524
8525         tp->coalesce_mode = 0;
8526         if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX &&
8527             GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX)
8528                 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
8529
8530         /* Initialize MAC MI mode, polling disabled. */
8531         tw32_f(MAC_MI_MODE, tp->mi_mode);
8532         udelay(80);
8533
8534         /* Initialize data/descriptor byte/word swapping. */
8535         val = tr32(GRC_MODE);
8536         val &= GRC_MODE_HOST_STACKUP;
8537         tw32(GRC_MODE, val | tp->grc_mode);
8538
8539         tg3_switch_clocks(tp);
8540
8541         /* Clear this out for sanity. */
8542         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
8543
8544         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
8545                               &pci_state_reg);
8546         if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
8547             (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) == 0) {
8548                 u32 chiprevid = GET_CHIP_REV_ID(tp->misc_host_ctrl);
8549
8550                 if (chiprevid == CHIPREV_ID_5701_A0 ||
8551                     chiprevid == CHIPREV_ID_5701_B0 ||
8552                     chiprevid == CHIPREV_ID_5701_B2 ||
8553                     chiprevid == CHIPREV_ID_5701_B5) {
8554                         void __iomem *sram_base;
8555
8556                         /* Write some dummy words into the SRAM status block
8557                          * area, see if it reads back correctly.  If the return
8558                          * value is bad, force enable the PCIX workaround.
8559                          */
8560                         sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
8561
8562                         writel(0x00000000, sram_base);
8563                         writel(0x00000000, sram_base + 4);
8564                         writel(0xffffffff, sram_base + 4);
8565                         if (readl(sram_base) != 0x00000000)
8566                                 tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
8567                 }
8568         }
8569
8570         udelay(50);
8571         tg3_nvram_init(tp);
8572
8573         grc_misc_cfg = tr32(GRC_MISC_CFG);
8574         grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
8575
8576         /* Broadcom's driver says that CIOBE multisplit has a bug */
8577 #if 0
8578         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
8579             grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5704CIOBE) {
8580                 tp->tg3_flags |= TG3_FLAG_SPLIT_MODE;
8581                 tp->split_mode_max_reqs = SPLIT_MODE_5704_MAX_REQ;
8582         }
8583 #endif
8584         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
8585             (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
8586              grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
8587                 tp->tg3_flags2 |= TG3_FLG2_IS_5788;
8588
8589         if (!(tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
8590             (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700))
8591                 tp->tg3_flags |= TG3_FLAG_TAGGED_STATUS;
8592         if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) {
8593                 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
8594                                       HOSTCC_MODE_CLRTICK_TXBD);
8595
8596                 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
8597                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
8598                                        tp->misc_host_ctrl);
8599         }
8600
8601         /* these are limited to 10/100 only */
8602         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
8603              (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
8604             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
8605              tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
8606              (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901 ||
8607               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901_2 ||
8608               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5705F)) ||
8609             (tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
8610              (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5751F ||
8611               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5753F)))
8612                 tp->tg3_flags |= TG3_FLAG_10_100_ONLY;
8613
8614         err = tg3_phy_probe(tp);
8615         if (err) {
8616                 printk(KERN_ERR PFX "(%s) phy probe failed, err %d\n",
8617                        pci_name(tp->pdev), err);
8618                 /* ... but do not return immediately ... */
8619         }
8620
8621         tg3_read_partno(tp);
8622
8623         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
8624                 tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
8625         } else {
8626                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
8627                         tp->tg3_flags |= TG3_FLAG_USE_MI_INTERRUPT;
8628                 else
8629                         tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
8630         }
8631
8632         /* 5700 {AX,BX} chips have a broken status block link
8633          * change bit implementation, so we must use the
8634          * status register in those cases.
8635          */
8636         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
8637                 tp->tg3_flags |= TG3_FLAG_USE_LINKCHG_REG;
8638         else
8639                 tp->tg3_flags &= ~TG3_FLAG_USE_LINKCHG_REG;
8640
8641         /* The led_ctrl is set during tg3_phy_probe, here we might
8642          * have to force the link status polling mechanism based
8643          * upon subsystem IDs.
8644          */
8645         if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
8646             !(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
8647                 tp->tg3_flags |= (TG3_FLAG_USE_MI_INTERRUPT |
8648                                   TG3_FLAG_USE_LINKCHG_REG);
8649         }
8650
8651         /* For all SERDES we poll the MAC status register. */
8652         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
8653                 tp->tg3_flags |= TG3_FLAG_POLL_SERDES;
8654         else
8655                 tp->tg3_flags &= ~TG3_FLAG_POLL_SERDES;
8656
8657         /* 5700 BX chips need to have their TX producer index mailboxes
8658          * written twice to workaround a bug.
8659          */
8660         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX)
8661                 tp->tg3_flags |= TG3_FLAG_TXD_MBOX_HWBUG;
8662         else
8663                 tp->tg3_flags &= ~TG3_FLAG_TXD_MBOX_HWBUG;
8664
8665         /* It seems all chips can get confused if TX buffers
8666          * straddle the 4GB address boundary in some cases.
8667          */
8668         tp->dev->hard_start_xmit = tg3_start_xmit;
8669
8670         tp->rx_offset = 2;
8671         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
8672             (tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0)
8673                 tp->rx_offset = 0;
8674
8675         /* By default, disable wake-on-lan.  User can change this
8676          * using ETHTOOL_SWOL.
8677          */
8678         tp->tg3_flags &= ~TG3_FLAG_WOL_ENABLE;
8679
8680         return err;
8681 }
8682
8683 #ifdef CONFIG_SPARC64
8684 static int __devinit tg3_get_macaddr_sparc(struct tg3 *tp)
8685 {
8686         struct net_device *dev = tp->dev;
8687         struct pci_dev *pdev = tp->pdev;
8688         struct pcidev_cookie *pcp = pdev->sysdata;
8689
8690         if (pcp != NULL) {
8691                 int node = pcp->prom_node;
8692
8693                 if (prom_getproplen(node, "local-mac-address") == 6) {
8694                         prom_getproperty(node, "local-mac-address",
8695                                          dev->dev_addr, 6);
8696                         return 0;
8697                 }
8698         }
8699         return -ENODEV;
8700 }
8701
8702 static int __devinit tg3_get_default_macaddr_sparc(struct tg3 *tp)
8703 {
8704         struct net_device *dev = tp->dev;
8705
8706         memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
8707         return 0;
8708 }
8709 #endif
8710
8711 static int __devinit tg3_get_device_address(struct tg3 *tp)
8712 {
8713         struct net_device *dev = tp->dev;
8714         u32 hi, lo, mac_offset;
8715
8716 #ifdef CONFIG_SPARC64
8717         if (!tg3_get_macaddr_sparc(tp))
8718                 return 0;
8719 #endif
8720
8721         mac_offset = 0x7c;
8722         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
8723             !(tp->tg3_flags & TG3_FLG2_SUN_570X)) {
8724                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
8725                         mac_offset = 0xcc;
8726                 if (tg3_nvram_lock(tp))
8727                         tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
8728                 else
8729                         tg3_nvram_unlock(tp);
8730         }
8731
8732         /* First try to get it from MAC address mailbox. */
8733         tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
8734         if ((hi >> 16) == 0x484b) {
8735                 dev->dev_addr[0] = (hi >>  8) & 0xff;
8736                 dev->dev_addr[1] = (hi >>  0) & 0xff;
8737
8738                 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
8739                 dev->dev_addr[2] = (lo >> 24) & 0xff;
8740                 dev->dev_addr[3] = (lo >> 16) & 0xff;
8741                 dev->dev_addr[4] = (lo >>  8) & 0xff;
8742                 dev->dev_addr[5] = (lo >>  0) & 0xff;
8743         }
8744         /* Next, try NVRAM. */
8745         else if (!(tp->tg3_flags & TG3_FLG2_SUN_570X) &&
8746                  !tg3_nvram_read(tp, mac_offset + 0, &hi) &&
8747                  !tg3_nvram_read(tp, mac_offset + 4, &lo)) {
8748                 dev->dev_addr[0] = ((hi >> 16) & 0xff);
8749                 dev->dev_addr[1] = ((hi >> 24) & 0xff);
8750                 dev->dev_addr[2] = ((lo >>  0) & 0xff);
8751                 dev->dev_addr[3] = ((lo >>  8) & 0xff);
8752                 dev->dev_addr[4] = ((lo >> 16) & 0xff);
8753                 dev->dev_addr[5] = ((lo >> 24) & 0xff);
8754         }
8755         /* Finally just fetch it out of the MAC control regs. */
8756         else {
8757                 hi = tr32(MAC_ADDR_0_HIGH);
8758                 lo = tr32(MAC_ADDR_0_LOW);
8759
8760                 dev->dev_addr[5] = lo & 0xff;
8761                 dev->dev_addr[4] = (lo >> 8) & 0xff;
8762                 dev->dev_addr[3] = (lo >> 16) & 0xff;
8763                 dev->dev_addr[2] = (lo >> 24) & 0xff;
8764                 dev->dev_addr[1] = hi & 0xff;
8765                 dev->dev_addr[0] = (hi >> 8) & 0xff;
8766         }
8767
8768         if (!is_valid_ether_addr(&dev->dev_addr[0])) {
8769 #ifdef CONFIG_SPARC64
8770                 if (!tg3_get_default_macaddr_sparc(tp))
8771                         return 0;
8772 #endif
8773                 return -EINVAL;
8774         }
8775         return 0;
8776 }
8777
8778 #define BOUNDARY_SINGLE_CACHELINE       1
8779 #define BOUNDARY_MULTI_CACHELINE        2
8780
8781 static u32 __devinit tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
8782 {
8783         int cacheline_size;
8784         u8 byte;
8785         int goal;
8786
8787         pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
8788         if (byte == 0)
8789                 cacheline_size = 1024;
8790         else
8791                 cacheline_size = (int) byte * 4;
8792
8793         /* On 5703 and later chips, the boundary bits have no
8794          * effect.
8795          */
8796         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
8797             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
8798             !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
8799                 goto out;
8800
8801 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
8802         goal = BOUNDARY_MULTI_CACHELINE;
8803 #else
8804 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
8805         goal = BOUNDARY_SINGLE_CACHELINE;
8806 #else
8807         goal = 0;
8808 #endif
8809 #endif
8810
8811         if (!goal)
8812                 goto out;
8813
8814         /* PCI controllers on most RISC systems tend to disconnect
8815          * when a device tries to burst across a cache-line boundary.
8816          * Therefore, letting tg3 do so just wastes PCI bandwidth.
8817          *
8818          * Unfortunately, for PCI-E there are only limited
8819          * write-side controls for this, and thus for reads
8820          * we will still get the disconnects.  We'll also waste
8821          * these PCI cycles for both read and write for chips
8822          * other than 5700 and 5701 which do not implement the
8823          * boundary bits.
8824          */
8825         if ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
8826             !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
8827                 switch (cacheline_size) {
8828                 case 16:
8829                 case 32:
8830                 case 64:
8831                 case 128:
8832                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
8833                                 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
8834                                         DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
8835                         } else {
8836                                 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
8837                                         DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
8838                         }
8839                         break;
8840
8841                 case 256:
8842                         val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
8843                                 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
8844                         break;
8845
8846                 default:
8847                         val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
8848                                 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
8849                         break;
8850                 };
8851         } else if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
8852                 switch (cacheline_size) {
8853                 case 16:
8854                 case 32:
8855                 case 64:
8856                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
8857                                 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
8858                                 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
8859                                 break;
8860                         }
8861                         /* fallthrough */
8862                 case 128:
8863                 default:
8864                         val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
8865                         val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
8866                         break;
8867                 };
8868         } else {
8869                 switch (cacheline_size) {
8870                 case 16:
8871                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
8872                                 val |= (DMA_RWCTRL_READ_BNDRY_16 |
8873                                         DMA_RWCTRL_WRITE_BNDRY_16);
8874                                 break;
8875                         }
8876                         /* fallthrough */
8877                 case 32:
8878                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
8879                                 val |= (DMA_RWCTRL_READ_BNDRY_32 |
8880                                         DMA_RWCTRL_WRITE_BNDRY_32);
8881                                 break;
8882                         }
8883                         /* fallthrough */
8884                 case 64:
8885                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
8886                                 val |= (DMA_RWCTRL_READ_BNDRY_64 |
8887                                         DMA_RWCTRL_WRITE_BNDRY_64);
8888                                 break;
8889                         }
8890                         /* fallthrough */
8891                 case 128:
8892                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
8893                                 val |= (DMA_RWCTRL_READ_BNDRY_128 |
8894                                         DMA_RWCTRL_WRITE_BNDRY_128);
8895                                 break;
8896                         }
8897                         /* fallthrough */
8898                 case 256:
8899                         val |= (DMA_RWCTRL_READ_BNDRY_256 |
8900                                 DMA_RWCTRL_WRITE_BNDRY_256);
8901                         break;
8902                 case 512:
8903                         val |= (DMA_RWCTRL_READ_BNDRY_512 |
8904                                 DMA_RWCTRL_WRITE_BNDRY_512);
8905                         break;
8906                 case 1024:
8907                 default:
8908                         val |= (DMA_RWCTRL_READ_BNDRY_1024 |
8909                                 DMA_RWCTRL_WRITE_BNDRY_1024);
8910                         break;
8911                 };
8912         }
8913
8914 out:
8915         return val;
8916 }
8917
8918 static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma, int size, int to_device)
8919 {
8920         struct tg3_internal_buffer_desc test_desc;
8921         u32 sram_dma_descs;
8922         int i, ret;
8923
8924         sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
8925
8926         tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
8927         tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
8928         tw32(RDMAC_STATUS, 0);
8929         tw32(WDMAC_STATUS, 0);
8930
8931         tw32(BUFMGR_MODE, 0);
8932         tw32(FTQ_RESET, 0);
8933
8934         test_desc.addr_hi = ((u64) buf_dma) >> 32;
8935         test_desc.addr_lo = buf_dma & 0xffffffff;
8936         test_desc.nic_mbuf = 0x00002100;
8937         test_desc.len = size;
8938
8939         /*
8940          * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
8941          * the *second* time the tg3 driver was getting loaded after an
8942          * initial scan.
8943          *
8944          * Broadcom tells me:
8945          *   ...the DMA engine is connected to the GRC block and a DMA
8946          *   reset may affect the GRC block in some unpredictable way...
8947          *   The behavior of resets to individual blocks has not been tested.
8948          *
8949          * Broadcom noted the GRC reset will also reset all sub-components.
8950          */
8951         if (to_device) {
8952                 test_desc.cqid_sqid = (13 << 8) | 2;
8953
8954                 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
8955                 udelay(40);
8956         } else {
8957                 test_desc.cqid_sqid = (16 << 8) | 7;
8958
8959                 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
8960                 udelay(40);
8961         }
8962         test_desc.flags = 0x00000005;
8963
8964         for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
8965                 u32 val;
8966
8967                 val = *(((u32 *)&test_desc) + i);
8968                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
8969                                        sram_dma_descs + (i * sizeof(u32)));
8970                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
8971         }
8972         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
8973
8974         if (to_device) {
8975                 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
8976         } else {
8977                 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
8978         }
8979
8980         ret = -ENODEV;
8981         for (i = 0; i < 40; i++) {
8982                 u32 val;
8983
8984                 if (to_device)
8985                         val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
8986                 else
8987                         val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
8988                 if ((val & 0xffff) == sram_dma_descs) {
8989                         ret = 0;
8990                         break;
8991                 }
8992
8993                 udelay(100);
8994         }
8995
8996         return ret;
8997 }
8998
8999 #define TEST_BUFFER_SIZE        0x2000
9000
9001 static int __devinit tg3_test_dma(struct tg3 *tp)
9002 {
9003         dma_addr_t buf_dma;
9004         u32 *buf, saved_dma_rwctrl;
9005         int ret;
9006
9007         buf = pci_alloc_consistent(tp->pdev, TEST_BUFFER_SIZE, &buf_dma);
9008         if (!buf) {
9009                 ret = -ENOMEM;
9010                 goto out_nofree;
9011         }
9012
9013         tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
9014                           (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
9015
9016         tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
9017
9018         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
9019                 /* DMA read watermark not used on PCIE */
9020                 tp->dma_rwctrl |= 0x00180000;
9021         } else if (!(tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
9022                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
9023                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
9024                         tp->dma_rwctrl |= 0x003f0000;
9025                 else
9026                         tp->dma_rwctrl |= 0x003f000f;
9027         } else {
9028                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
9029                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
9030                         u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
9031
9032                         if (ccval == 0x6 || ccval == 0x7)
9033                                 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
9034
9035                         /* Set bit 23 to enable PCIX hw bug fix */
9036                         tp->dma_rwctrl |= 0x009f0000;
9037                 } else {
9038                         tp->dma_rwctrl |= 0x001b000f;
9039                 }
9040         }
9041
9042         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
9043             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
9044                 tp->dma_rwctrl &= 0xfffffff0;
9045
9046         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
9047             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
9048                 /* Remove this if it causes problems for some boards. */
9049                 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
9050
9051                 /* On 5700/5701 chips, we need to set this bit.
9052                  * Otherwise the chip will issue cacheline transactions
9053                  * to streamable DMA memory with not all the byte
9054                  * enables turned on.  This is an error on several
9055                  * RISC PCI controllers, in particular sparc64.
9056                  *
9057                  * On 5703/5704 chips, this bit has been reassigned
9058                  * a different meaning.  In particular, it is used
9059                  * on those chips to enable a PCI-X workaround.
9060                  */
9061                 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
9062         }
9063
9064         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
9065
9066 #if 0
9067         /* Unneeded, already done by tg3_get_invariants.  */
9068         tg3_switch_clocks(tp);
9069 #endif
9070
9071         ret = 0;
9072         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
9073             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
9074                 goto out;
9075
9076         /* It is best to perform DMA test with maximum write burst size
9077          * to expose the 5700/5701 write DMA bug.
9078          */
9079         saved_dma_rwctrl = tp->dma_rwctrl;
9080         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
9081         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
9082
9083         while (1) {
9084                 u32 *p = buf, i;
9085
9086                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
9087                         p[i] = i;
9088
9089                 /* Send the buffer to the chip. */
9090                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1);
9091                 if (ret) {
9092                         printk(KERN_ERR "tg3_test_dma() Write the buffer failed %d\n", ret);
9093                         break;
9094                 }
9095
9096 #if 0
9097                 /* validate data reached card RAM correctly. */
9098                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
9099                         u32 val;
9100                         tg3_read_mem(tp, 0x2100 + (i*4), &val);
9101                         if (le32_to_cpu(val) != p[i]) {
9102                                 printk(KERN_ERR "  tg3_test_dma()  Card buffer corrupted on write! (%d != %d)\n", val, i);
9103                                 /* ret = -ENODEV here? */
9104                         }
9105                         p[i] = 0;
9106                 }
9107 #endif
9108                 /* Now read it back. */
9109                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0);
9110                 if (ret) {
9111                         printk(KERN_ERR "tg3_test_dma() Read the buffer failed %d\n", ret);
9112
9113                         break;
9114                 }
9115
9116                 /* Verify it. */
9117                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
9118                         if (p[i] == i)
9119                                 continue;
9120
9121                         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
9122                             DMA_RWCTRL_WRITE_BNDRY_16) {
9123                                 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
9124                                 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
9125                                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
9126                                 break;
9127                         } else {
9128                                 printk(KERN_ERR "tg3_test_dma() buffer corrupted on read back! (%d != %d)\n", p[i], i);
9129                                 ret = -ENODEV;
9130                                 goto out;
9131                         }
9132                 }
9133
9134                 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
9135                         /* Success. */
9136                         ret = 0;
9137                         break;
9138                 }
9139         }
9140         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
9141             DMA_RWCTRL_WRITE_BNDRY_16) {
9142                 /* DMA test passed without adjusting DMA boundary,
9143                  * just restore the calculated DMA boundary
9144                  */
9145                 tp->dma_rwctrl = saved_dma_rwctrl;
9146                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
9147         }
9148
9149 out:
9150         pci_free_consistent(tp->pdev, TEST_BUFFER_SIZE, buf, buf_dma);
9151 out_nofree:
9152         return ret;
9153 }
9154
9155 static void __devinit tg3_init_link_config(struct tg3 *tp)
9156 {
9157         tp->link_config.advertising =
9158                 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
9159                  ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
9160                  ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full |
9161                  ADVERTISED_Autoneg | ADVERTISED_MII);
9162         tp->link_config.speed = SPEED_INVALID;
9163         tp->link_config.duplex = DUPLEX_INVALID;
9164         tp->link_config.autoneg = AUTONEG_ENABLE;
9165         netif_carrier_off(tp->dev);
9166         tp->link_config.active_speed = SPEED_INVALID;
9167         tp->link_config.active_duplex = DUPLEX_INVALID;
9168         tp->link_config.phy_is_low_power = 0;
9169         tp->link_config.orig_speed = SPEED_INVALID;
9170         tp->link_config.orig_duplex = DUPLEX_INVALID;
9171         tp->link_config.orig_autoneg = AUTONEG_INVALID;
9172 }
9173
9174 static void __devinit tg3_init_bufmgr_config(struct tg3 *tp)
9175 {
9176         tp->bufmgr_config.mbuf_read_dma_low_water =
9177                 DEFAULT_MB_RDMA_LOW_WATER;
9178         tp->bufmgr_config.mbuf_mac_rx_low_water =
9179                 DEFAULT_MB_MACRX_LOW_WATER;
9180         tp->bufmgr_config.mbuf_high_water =
9181                 DEFAULT_MB_HIGH_WATER;
9182
9183         tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
9184                 DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
9185         tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
9186                 DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
9187         tp->bufmgr_config.mbuf_high_water_jumbo =
9188                 DEFAULT_MB_HIGH_WATER_JUMBO;
9189
9190         tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
9191         tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
9192 }
9193
9194 static char * __devinit tg3_phy_string(struct tg3 *tp)
9195 {
9196         switch (tp->phy_id & PHY_ID_MASK) {
9197         case PHY_ID_BCM5400:    return "5400";
9198         case PHY_ID_BCM5401:    return "5401";
9199         case PHY_ID_BCM5411:    return "5411";
9200         case PHY_ID_BCM5701:    return "5701";
9201         case PHY_ID_BCM5703:    return "5703";
9202         case PHY_ID_BCM5704:    return "5704";
9203         case PHY_ID_BCM5705:    return "5705";
9204         case PHY_ID_BCM5750:    return "5750";
9205         case PHY_ID_BCM5752:    return "5752";
9206         case PHY_ID_BCM8002:    return "8002/serdes";
9207         case 0:                 return "serdes";
9208         default:                return "unknown";
9209         };
9210 }
9211
9212 static struct pci_dev * __devinit tg3_find_5704_peer(struct tg3 *tp)
9213 {
9214         struct pci_dev *peer;
9215         unsigned int func, devnr = tp->pdev->devfn & ~7;
9216
9217         for (func = 0; func < 8; func++) {
9218                 peer = pci_get_slot(tp->pdev->bus, devnr | func);
9219                 if (peer && peer != tp->pdev)
9220                         break;
9221                 pci_dev_put(peer);
9222         }
9223         if (!peer || peer == tp->pdev)
9224                 BUG();
9225
9226         /*
9227          * We don't need to keep the refcount elevated; there's no way
9228          * to remove one half of this device without removing the other
9229          */
9230         pci_dev_put(peer);
9231
9232         return peer;
9233 }
9234
9235 static void __devinit tg3_init_coal(struct tg3 *tp)
9236 {
9237         struct ethtool_coalesce *ec = &tp->coal;
9238
9239         memset(ec, 0, sizeof(*ec));
9240         ec->cmd = ETHTOOL_GCOALESCE;
9241         ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
9242         ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
9243         ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
9244         ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
9245         ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
9246         ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
9247         ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
9248         ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
9249         ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
9250
9251         if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
9252                                  HOSTCC_MODE_CLRTICK_TXBD)) {
9253                 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
9254                 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
9255                 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
9256                 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
9257         }
9258 }
9259
9260 static int __devinit tg3_init_one(struct pci_dev *pdev,
9261                                   const struct pci_device_id *ent)
9262 {
9263         static int tg3_version_printed = 0;
9264         unsigned long tg3reg_base, tg3reg_len;
9265         struct net_device *dev;
9266         struct tg3 *tp;
9267         int i, err, pci_using_dac, pm_cap;
9268
9269         if (tg3_version_printed++ == 0)
9270                 printk(KERN_INFO "%s", version);
9271
9272         err = pci_enable_device(pdev);
9273         if (err) {
9274                 printk(KERN_ERR PFX "Cannot enable PCI device, "
9275                        "aborting.\n");
9276                 return err;
9277         }
9278
9279         if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
9280                 printk(KERN_ERR PFX "Cannot find proper PCI device "
9281                        "base address, aborting.\n");
9282                 err = -ENODEV;
9283                 goto err_out_disable_pdev;
9284         }
9285
9286         err = pci_request_regions(pdev, DRV_MODULE_NAME);
9287         if (err) {
9288                 printk(KERN_ERR PFX "Cannot obtain PCI resources, "
9289                        "aborting.\n");
9290                 goto err_out_disable_pdev;
9291         }
9292
9293         pci_set_master(pdev);
9294
9295         /* Find power-management capability. */
9296         pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
9297         if (pm_cap == 0) {
9298                 printk(KERN_ERR PFX "Cannot find PowerManagement capability, "
9299                        "aborting.\n");
9300                 err = -EIO;
9301                 goto err_out_free_res;
9302         }
9303
9304         /* Configure DMA attributes. */
9305         err = pci_set_dma_mask(pdev, 0xffffffffffffffffULL);
9306         if (!err) {
9307                 pci_using_dac = 1;
9308                 err = pci_set_consistent_dma_mask(pdev, 0xffffffffffffffffULL);
9309                 if (err < 0) {
9310                         printk(KERN_ERR PFX "Unable to obtain 64 bit DMA "
9311                                "for consistent allocations\n");
9312                         goto err_out_free_res;
9313                 }
9314         } else {
9315                 err = pci_set_dma_mask(pdev, 0xffffffffULL);
9316                 if (err) {
9317                         printk(KERN_ERR PFX "No usable DMA configuration, "
9318                                "aborting.\n");
9319                         goto err_out_free_res;
9320                 }
9321                 pci_using_dac = 0;
9322         }
9323
9324         tg3reg_base = pci_resource_start(pdev, 0);
9325         tg3reg_len = pci_resource_len(pdev, 0);
9326
9327         dev = alloc_etherdev(sizeof(*tp));
9328         if (!dev) {
9329                 printk(KERN_ERR PFX "Etherdev alloc failed, aborting.\n");
9330                 err = -ENOMEM;
9331                 goto err_out_free_res;
9332         }
9333
9334         SET_MODULE_OWNER(dev);
9335         SET_NETDEV_DEV(dev, &pdev->dev);
9336
9337         if (pci_using_dac)
9338                 dev->features |= NETIF_F_HIGHDMA;
9339         dev->features |= NETIF_F_LLTX;
9340 #if TG3_VLAN_TAG_USED
9341         dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
9342         dev->vlan_rx_register = tg3_vlan_rx_register;
9343         dev->vlan_rx_kill_vid = tg3_vlan_rx_kill_vid;
9344 #endif
9345
9346         tp = netdev_priv(dev);
9347         tp->pdev = pdev;
9348         tp->dev = dev;
9349         tp->pm_cap = pm_cap;
9350         tp->mac_mode = TG3_DEF_MAC_MODE;
9351         tp->rx_mode = TG3_DEF_RX_MODE;
9352         tp->tx_mode = TG3_DEF_TX_MODE;
9353         tp->mi_mode = MAC_MI_MODE_BASE;
9354         if (tg3_debug > 0)
9355                 tp->msg_enable = tg3_debug;
9356         else
9357                 tp->msg_enable = TG3_DEF_MSG_ENABLE;
9358
9359         /* The word/byte swap controls here control register access byte
9360          * swapping.  DMA data byte swapping is controlled in the GRC_MODE
9361          * setting below.
9362          */
9363         tp->misc_host_ctrl =
9364                 MISC_HOST_CTRL_MASK_PCI_INT |
9365                 MISC_HOST_CTRL_WORD_SWAP |
9366                 MISC_HOST_CTRL_INDIR_ACCESS |
9367                 MISC_HOST_CTRL_PCISTATE_RW;
9368
9369         /* The NONFRM (non-frame) byte/word swap controls take effect
9370          * on descriptor entries, anything which isn't packet data.
9371          *
9372          * The StrongARM chips on the board (one for tx, one for rx)
9373          * are running in big-endian mode.
9374          */
9375         tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
9376                         GRC_MODE_WSWAP_NONFRM_DATA);
9377 #ifdef __BIG_ENDIAN
9378         tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
9379 #endif
9380         spin_lock_init(&tp->lock);
9381         spin_lock_init(&tp->tx_lock);
9382         spin_lock_init(&tp->indirect_lock);
9383         INIT_WORK(&tp->reset_task, tg3_reset_task, tp);
9384
9385         tp->regs = ioremap_nocache(tg3reg_base, tg3reg_len);
9386         if (tp->regs == 0UL) {
9387                 printk(KERN_ERR PFX "Cannot map device registers, "
9388                        "aborting.\n");
9389                 err = -ENOMEM;
9390                 goto err_out_free_dev;
9391         }
9392
9393         tg3_init_link_config(tp);
9394
9395         tg3_init_bufmgr_config(tp);
9396
9397         tp->rx_pending = TG3_DEF_RX_RING_PENDING;
9398         tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
9399         tp->tx_pending = TG3_DEF_TX_RING_PENDING;
9400
9401         dev->open = tg3_open;
9402         dev->stop = tg3_close;
9403         dev->get_stats = tg3_get_stats;
9404         dev->set_multicast_list = tg3_set_rx_mode;
9405         dev->set_mac_address = tg3_set_mac_addr;
9406         dev->do_ioctl = tg3_ioctl;
9407         dev->tx_timeout = tg3_tx_timeout;
9408         dev->poll = tg3_poll;
9409         dev->ethtool_ops = &tg3_ethtool_ops;
9410         dev->weight = 64;
9411         dev->watchdog_timeo = TG3_TX_TIMEOUT;
9412         dev->change_mtu = tg3_change_mtu;
9413         dev->irq = pdev->irq;
9414 #ifdef CONFIG_NET_POLL_CONTROLLER
9415         dev->poll_controller = tg3_poll_controller;
9416 #endif
9417
9418         err = tg3_get_invariants(tp);
9419         if (err) {
9420                 printk(KERN_ERR PFX "Problem fetching invariants of chip, "
9421                        "aborting.\n");
9422                 goto err_out_iounmap;
9423         }
9424
9425         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
9426                 tp->bufmgr_config.mbuf_read_dma_low_water =
9427                         DEFAULT_MB_RDMA_LOW_WATER_5705;
9428                 tp->bufmgr_config.mbuf_mac_rx_low_water =
9429                         DEFAULT_MB_MACRX_LOW_WATER_5705;
9430                 tp->bufmgr_config.mbuf_high_water =
9431                         DEFAULT_MB_HIGH_WATER_5705;
9432         }
9433
9434 #if TG3_TSO_SUPPORT != 0
9435         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
9436                 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
9437         }
9438         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
9439             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
9440             tp->pci_chip_rev_id == CHIPREV_ID_5705_A0 ||
9441             (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0) {
9442                 tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
9443         } else {
9444                 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
9445         }
9446
9447         /* TSO is off by default, user can enable using ethtool.  */
9448 #if 0
9449         if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)
9450                 dev->features |= NETIF_F_TSO;
9451 #endif
9452
9453 #endif
9454
9455         if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 &&
9456             !(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) &&
9457             !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
9458                 tp->tg3_flags2 |= TG3_FLG2_MAX_RXPEND_64;
9459                 tp->rx_pending = 63;
9460         }
9461
9462         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
9463                 tp->pdev_peer = tg3_find_5704_peer(tp);
9464
9465         err = tg3_get_device_address(tp);
9466         if (err) {
9467                 printk(KERN_ERR PFX "Could not obtain valid ethernet address, "
9468                        "aborting.\n");
9469                 goto err_out_iounmap;
9470         }
9471
9472         /*
9473          * Reset chip in case UNDI or EFI driver did not shutdown
9474          * DMA self test will enable WDMAC and we'll see (spurious)
9475          * pending DMA on the PCI bus at that point.
9476          */
9477         if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
9478             (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
9479                 pci_save_state(tp->pdev);
9480                 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
9481                 tg3_halt(tp, 1);
9482         }
9483
9484         err = tg3_test_dma(tp);
9485         if (err) {
9486                 printk(KERN_ERR PFX "DMA engine test failed, aborting.\n");
9487                 goto err_out_iounmap;
9488         }
9489
9490         /* Tigon3 can do ipv4 only... and some chips have buggy
9491          * checksumming.
9492          */
9493         if ((tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) == 0) {
9494                 dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM;
9495                 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
9496         } else
9497                 tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
9498
9499         if (tp->tg3_flags2 & TG3_FLG2_IS_5788)
9500                 dev->features &= ~NETIF_F_HIGHDMA;
9501
9502         /* flow control autonegotiation is default behavior */
9503         tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
9504
9505         tg3_init_coal(tp);
9506
9507         err = register_netdev(dev);
9508         if (err) {
9509                 printk(KERN_ERR PFX "Cannot register net device, "
9510                        "aborting.\n");
9511                 goto err_out_iounmap;
9512         }
9513
9514         pci_set_drvdata(pdev, dev);
9515
9516         /* Now that we have fully setup the chip, save away a snapshot
9517          * of the PCI config space.  We need to restore this after
9518          * GRC_MISC_CFG core clock resets and some resume events.
9519          */
9520         pci_save_state(tp->pdev);
9521
9522         printk(KERN_INFO "%s: Tigon3 [partno(%s) rev %04x PHY(%s)] (PCI%s:%s:%s) %sBaseT Ethernet ",
9523                dev->name,
9524                tp->board_part_number,
9525                tp->pci_chip_rev_id,
9526                tg3_phy_string(tp),
9527                ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ? "X" : ""),
9528                ((tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED) ?
9529                 ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ? "133MHz" : "66MHz") :
9530                 ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ? "100MHz" : "33MHz")),
9531                ((tp->tg3_flags & TG3_FLAG_PCI_32BIT) ? "32-bit" : "64-bit"),
9532                (tp->tg3_flags & TG3_FLAG_10_100_ONLY) ? "10/100" : "10/100/1000");
9533
9534         for (i = 0; i < 6; i++)
9535                 printk("%2.2x%c", dev->dev_addr[i],
9536                        i == 5 ? '\n' : ':');
9537
9538         printk(KERN_INFO "%s: RXcsums[%d] LinkChgREG[%d] "
9539                "MIirq[%d] ASF[%d] Split[%d] WireSpeed[%d] "
9540                "TSOcap[%d] \n",
9541                dev->name,
9542                (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0,
9543                (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) != 0,
9544                (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) != 0,
9545                (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0,
9546                (tp->tg3_flags & TG3_FLAG_SPLIT_MODE) != 0,
9547                (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED) == 0,
9548                (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) != 0);
9549         printk(KERN_INFO "%s: dma_rwctrl[%08x]\n",
9550                dev->name, tp->dma_rwctrl);
9551
9552         return 0;
9553
9554 err_out_iounmap:
9555         iounmap(tp->regs);
9556
9557 err_out_free_dev:
9558         free_netdev(dev);
9559
9560 err_out_free_res:
9561         pci_release_regions(pdev);
9562
9563 err_out_disable_pdev:
9564         pci_disable_device(pdev);
9565         pci_set_drvdata(pdev, NULL);
9566         return err;
9567 }
9568
9569 static void __devexit tg3_remove_one(struct pci_dev *pdev)
9570 {
9571         struct net_device *dev = pci_get_drvdata(pdev);
9572
9573         if (dev) {
9574                 struct tg3 *tp = netdev_priv(dev);
9575
9576                 unregister_netdev(dev);
9577                 iounmap(tp->regs);
9578                 free_netdev(dev);
9579                 pci_release_regions(pdev);
9580                 pci_disable_device(pdev);
9581                 pci_set_drvdata(pdev, NULL);
9582         }
9583 }
9584
9585 static int tg3_suspend(struct pci_dev *pdev, pm_message_t state)
9586 {
9587         struct net_device *dev = pci_get_drvdata(pdev);
9588         struct tg3 *tp = netdev_priv(dev);
9589         int err;
9590
9591         if (!netif_running(dev))
9592                 return 0;
9593
9594         tg3_netif_stop(tp);
9595
9596         del_timer_sync(&tp->timer);
9597
9598         spin_lock_irq(&tp->lock);
9599         spin_lock(&tp->tx_lock);
9600         tg3_disable_ints(tp);
9601         spin_unlock(&tp->tx_lock);
9602         spin_unlock_irq(&tp->lock);
9603
9604         netif_device_detach(dev);
9605
9606         spin_lock_irq(&tp->lock);
9607         spin_lock(&tp->tx_lock);
9608         tg3_halt(tp, 1);
9609         spin_unlock(&tp->tx_lock);
9610         spin_unlock_irq(&tp->lock);
9611
9612         err = tg3_set_power_state(tp, pci_choose_state(pdev, state));
9613         if (err) {
9614                 spin_lock_irq(&tp->lock);
9615                 spin_lock(&tp->tx_lock);
9616
9617                 tg3_init_hw(tp);
9618
9619                 tp->timer.expires = jiffies + tp->timer_offset;
9620                 add_timer(&tp->timer);
9621
9622                 netif_device_attach(dev);
9623                 tg3_netif_start(tp);
9624
9625                 spin_unlock(&tp->tx_lock);
9626                 spin_unlock_irq(&tp->lock);
9627         }
9628
9629         return err;
9630 }
9631
9632 static int tg3_resume(struct pci_dev *pdev)
9633 {
9634         struct net_device *dev = pci_get_drvdata(pdev);
9635         struct tg3 *tp = netdev_priv(dev);
9636         int err;
9637
9638         if (!netif_running(dev))
9639                 return 0;
9640
9641         pci_restore_state(tp->pdev);
9642
9643         err = tg3_set_power_state(tp, 0);
9644         if (err)
9645                 return err;
9646
9647         netif_device_attach(dev);
9648
9649         spin_lock_irq(&tp->lock);
9650         spin_lock(&tp->tx_lock);
9651
9652         tg3_init_hw(tp);
9653
9654         tp->timer.expires = jiffies + tp->timer_offset;
9655         add_timer(&tp->timer);
9656
9657         tg3_enable_ints(tp);
9658
9659         tg3_netif_start(tp);
9660
9661         spin_unlock(&tp->tx_lock);
9662         spin_unlock_irq(&tp->lock);
9663
9664         return 0;
9665 }
9666
9667 static struct pci_driver tg3_driver = {
9668         .name           = DRV_MODULE_NAME,
9669         .id_table       = tg3_pci_tbl,
9670         .probe          = tg3_init_one,
9671         .remove         = __devexit_p(tg3_remove_one),
9672         .suspend        = tg3_suspend,
9673         .resume         = tg3_resume
9674 };
9675
9676 static int __init tg3_init(void)
9677 {
9678         return pci_module_init(&tg3_driver);
9679 }
9680
9681 static void __exit tg3_cleanup(void)
9682 {
9683         pci_unregister_driver(&tg3_driver);
9684 }
9685
9686 module_init(tg3_init);
9687 module_exit(tg3_cleanup);