Merge branch 'master'
[linux-2.6] / drivers / net / tg3.c
1 /*
2  * tg3.c: Broadcom Tigon3 ethernet driver.
3  *
4  * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5  * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6  * Copyright (C) 2004 Sun Microsystems Inc.
7  * Copyright (C) 2005 Broadcom Corporation.
8  *
9  * Firmware is:
10  *      Derived from proprietary unpublished source code,
11  *      Copyright (C) 2000-2003 Broadcom Corporation.
12  *
13  *      Permission is hereby granted for the distribution of this firmware
14  *      data in hexadecimal or equivalent format, provided this copyright
15  *      notice is accompanying it.
16  */
17
18 #include <linux/config.h>
19
20 #include <linux/module.h>
21 #include <linux/moduleparam.h>
22 #include <linux/kernel.h>
23 #include <linux/types.h>
24 #include <linux/compiler.h>
25 #include <linux/slab.h>
26 #include <linux/delay.h>
27 #include <linux/init.h>
28 #include <linux/ioport.h>
29 #include <linux/pci.h>
30 #include <linux/netdevice.h>
31 #include <linux/etherdevice.h>
32 #include <linux/skbuff.h>
33 #include <linux/ethtool.h>
34 #include <linux/mii.h>
35 #include <linux/if_vlan.h>
36 #include <linux/ip.h>
37 #include <linux/tcp.h>
38 #include <linux/workqueue.h>
39 #include <linux/prefetch.h>
40 #include <linux/dma-mapping.h>
41
42 #include <net/checksum.h>
43
44 #include <asm/system.h>
45 #include <asm/io.h>
46 #include <asm/byteorder.h>
47 #include <asm/uaccess.h>
48
49 #ifdef CONFIG_SPARC64
50 #include <asm/idprom.h>
51 #include <asm/oplib.h>
52 #include <asm/pbm.h>
53 #endif
54
55 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
56 #define TG3_VLAN_TAG_USED 1
57 #else
58 #define TG3_VLAN_TAG_USED 0
59 #endif
60
61 #ifdef NETIF_F_TSO
62 #define TG3_TSO_SUPPORT 1
63 #else
64 #define TG3_TSO_SUPPORT 0
65 #endif
66
67 #include "tg3.h"
68
69 #define DRV_MODULE_NAME         "tg3"
70 #define PFX DRV_MODULE_NAME     ": "
71 #define DRV_MODULE_VERSION      "3.44"
72 #define DRV_MODULE_RELDATE      "Dec 6, 2005"
73
74 #define TG3_DEF_MAC_MODE        0
75 #define TG3_DEF_RX_MODE         0
76 #define TG3_DEF_TX_MODE         0
77 #define TG3_DEF_MSG_ENABLE        \
78         (NETIF_MSG_DRV          | \
79          NETIF_MSG_PROBE        | \
80          NETIF_MSG_LINK         | \
81          NETIF_MSG_TIMER        | \
82          NETIF_MSG_IFDOWN       | \
83          NETIF_MSG_IFUP         | \
84          NETIF_MSG_RX_ERR       | \
85          NETIF_MSG_TX_ERR)
86
87 /* length of time before we decide the hardware is borked,
88  * and dev->tx_timeout() should be called to fix the problem
89  */
90 #define TG3_TX_TIMEOUT                  (5 * HZ)
91
92 /* hardware minimum and maximum for a single frame's data payload */
93 #define TG3_MIN_MTU                     60
94 #define TG3_MAX_MTU(tp) \
95         ((tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) ? 9000 : 1500)
96
97 /* These numbers seem to be hard coded in the NIC firmware somehow.
98  * You can't change the ring sizes, but you can change where you place
99  * them in the NIC onboard memory.
100  */
101 #define TG3_RX_RING_SIZE                512
102 #define TG3_DEF_RX_RING_PENDING         200
103 #define TG3_RX_JUMBO_RING_SIZE          256
104 #define TG3_DEF_RX_JUMBO_RING_PENDING   100
105
106 /* Do not place this n-ring entries value into the tp struct itself,
107  * we really want to expose these constants to GCC so that modulo et
108  * al.  operations are done with shifts and masks instead of with
109  * hw multiply/modulo instructions.  Another solution would be to
110  * replace things like '% foo' with '& (foo - 1)'.
111  */
112 #define TG3_RX_RCB_RING_SIZE(tp)        \
113         ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ?  512 : 1024)
114
115 #define TG3_TX_RING_SIZE                512
116 #define TG3_DEF_TX_RING_PENDING         (TG3_TX_RING_SIZE - 1)
117
118 #define TG3_RX_RING_BYTES       (sizeof(struct tg3_rx_buffer_desc) * \
119                                  TG3_RX_RING_SIZE)
120 #define TG3_RX_JUMBO_RING_BYTES (sizeof(struct tg3_rx_buffer_desc) * \
121                                  TG3_RX_JUMBO_RING_SIZE)
122 #define TG3_RX_RCB_RING_BYTES(tp) (sizeof(struct tg3_rx_buffer_desc) * \
123                                    TG3_RX_RCB_RING_SIZE(tp))
124 #define TG3_TX_RING_BYTES       (sizeof(struct tg3_tx_buffer_desc) * \
125                                  TG3_TX_RING_SIZE)
126 #define TX_BUFFS_AVAIL(TP)                                              \
127         ((TP)->tx_pending -                                             \
128          (((TP)->tx_prod - (TP)->tx_cons) & (TG3_TX_RING_SIZE - 1)))
129 #define NEXT_TX(N)              (((N) + 1) & (TG3_TX_RING_SIZE - 1))
130
131 #define RX_PKT_BUF_SZ           (1536 + tp->rx_offset + 64)
132 #define RX_JUMBO_PKT_BUF_SZ     (9046 + tp->rx_offset + 64)
133
134 /* minimum number of free TX descriptors required to wake up TX process */
135 #define TG3_TX_WAKEUP_THRESH            (TG3_TX_RING_SIZE / 4)
136
137 /* number of ETHTOOL_GSTATS u64's */
138 #define TG3_NUM_STATS           (sizeof(struct tg3_ethtool_stats)/sizeof(u64))
139
140 #define TG3_NUM_TEST            6
141
142 static char version[] __devinitdata =
143         DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
144
145 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
146 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
147 MODULE_LICENSE("GPL");
148 MODULE_VERSION(DRV_MODULE_VERSION);
149
150 static int tg3_debug = -1;      /* -1 == use TG3_DEF_MSG_ENABLE as value */
151 module_param(tg3_debug, int, 0);
152 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
153
154 static struct pci_device_id tg3_pci_tbl[] = {
155         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700,
156           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
157         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701,
158           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
159         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702,
160           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
161         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703,
162           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
163         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704,
164           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
165         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE,
166           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
167         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705,
168           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
169         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2,
170           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
171         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M,
172           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
173         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2,
174           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
175         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X,
176           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
177         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X,
178           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
179         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S,
180           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
181         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3,
182           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
183         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3,
184           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
185         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782,
186           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
187         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788,
188           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
189         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789,
190           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
191         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901,
192           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
193         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2,
194           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
195         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2,
196           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
197         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F,
198           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
199         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5720,
200           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
201         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721,
202           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
203         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750,
204           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
205         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751,
206           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
207         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750M,
208           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
209         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M,
210           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
211         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F,
212           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
213         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752,
214           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
215         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M,
216           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
217         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753,
218           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
219         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M,
220           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
221         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F,
222           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
223         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714,
224           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
225         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715,
226           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
227         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780,
228           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
229         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S,
230           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
231         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781,
232           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
233         { PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX,
234           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
235         { PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX,
236           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
237         { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000,
238           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
239         { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001,
240           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
241         { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003,
242           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
243         { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100,
244           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
245         { PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3,
246           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
247         { 0, }
248 };
249
250 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
251
252 static struct {
253         const char string[ETH_GSTRING_LEN];
254 } ethtool_stats_keys[TG3_NUM_STATS] = {
255         { "rx_octets" },
256         { "rx_fragments" },
257         { "rx_ucast_packets" },
258         { "rx_mcast_packets" },
259         { "rx_bcast_packets" },
260         { "rx_fcs_errors" },
261         { "rx_align_errors" },
262         { "rx_xon_pause_rcvd" },
263         { "rx_xoff_pause_rcvd" },
264         { "rx_mac_ctrl_rcvd" },
265         { "rx_xoff_entered" },
266         { "rx_frame_too_long_errors" },
267         { "rx_jabbers" },
268         { "rx_undersize_packets" },
269         { "rx_in_length_errors" },
270         { "rx_out_length_errors" },
271         { "rx_64_or_less_octet_packets" },
272         { "rx_65_to_127_octet_packets" },
273         { "rx_128_to_255_octet_packets" },
274         { "rx_256_to_511_octet_packets" },
275         { "rx_512_to_1023_octet_packets" },
276         { "rx_1024_to_1522_octet_packets" },
277         { "rx_1523_to_2047_octet_packets" },
278         { "rx_2048_to_4095_octet_packets" },
279         { "rx_4096_to_8191_octet_packets" },
280         { "rx_8192_to_9022_octet_packets" },
281
282         { "tx_octets" },
283         { "tx_collisions" },
284
285         { "tx_xon_sent" },
286         { "tx_xoff_sent" },
287         { "tx_flow_control" },
288         { "tx_mac_errors" },
289         { "tx_single_collisions" },
290         { "tx_mult_collisions" },
291         { "tx_deferred" },
292         { "tx_excessive_collisions" },
293         { "tx_late_collisions" },
294         { "tx_collide_2times" },
295         { "tx_collide_3times" },
296         { "tx_collide_4times" },
297         { "tx_collide_5times" },
298         { "tx_collide_6times" },
299         { "tx_collide_7times" },
300         { "tx_collide_8times" },
301         { "tx_collide_9times" },
302         { "tx_collide_10times" },
303         { "tx_collide_11times" },
304         { "tx_collide_12times" },
305         { "tx_collide_13times" },
306         { "tx_collide_14times" },
307         { "tx_collide_15times" },
308         { "tx_ucast_packets" },
309         { "tx_mcast_packets" },
310         { "tx_bcast_packets" },
311         { "tx_carrier_sense_errors" },
312         { "tx_discards" },
313         { "tx_errors" },
314
315         { "dma_writeq_full" },
316         { "dma_write_prioq_full" },
317         { "rxbds_empty" },
318         { "rx_discards" },
319         { "rx_errors" },
320         { "rx_threshold_hit" },
321
322         { "dma_readq_full" },
323         { "dma_read_prioq_full" },
324         { "tx_comp_queue_full" },
325
326         { "ring_set_send_prod_index" },
327         { "ring_status_update" },
328         { "nic_irqs" },
329         { "nic_avoided_irqs" },
330         { "nic_tx_threshold_hit" }
331 };
332
333 static struct {
334         const char string[ETH_GSTRING_LEN];
335 } ethtool_test_keys[TG3_NUM_TEST] = {
336         { "nvram test     (online) " },
337         { "link test      (online) " },
338         { "register test  (offline)" },
339         { "memory test    (offline)" },
340         { "loopback test  (offline)" },
341         { "interrupt test (offline)" },
342 };
343
344 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
345 {
346         unsigned long flags;
347
348         spin_lock_irqsave(&tp->indirect_lock, flags);
349         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
350         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
351         spin_unlock_irqrestore(&tp->indirect_lock, flags);
352 }
353
354 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
355 {
356         writel(val, tp->regs + off);
357         readl(tp->regs + off);
358 }
359
360 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
361 {
362         unsigned long flags;
363         u32 val;
364
365         spin_lock_irqsave(&tp->indirect_lock, flags);
366         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
367         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
368         spin_unlock_irqrestore(&tp->indirect_lock, flags);
369         return val;
370 }
371
372 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
373 {
374         unsigned long flags;
375
376         if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
377                 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
378                                        TG3_64BIT_REG_LOW, val);
379                 return;
380         }
381         if (off == (MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW)) {
382                 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
383                                        TG3_64BIT_REG_LOW, val);
384                 return;
385         }
386
387         spin_lock_irqsave(&tp->indirect_lock, flags);
388         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
389         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
390         spin_unlock_irqrestore(&tp->indirect_lock, flags);
391
392         /* In indirect mode when disabling interrupts, we also need
393          * to clear the interrupt bit in the GRC local ctrl register.
394          */
395         if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
396             (val == 0x1)) {
397                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
398                                        tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
399         }
400 }
401
402 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
403 {
404         unsigned long flags;
405         u32 val;
406
407         spin_lock_irqsave(&tp->indirect_lock, flags);
408         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
409         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
410         spin_unlock_irqrestore(&tp->indirect_lock, flags);
411         return val;
412 }
413
414 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val)
415 {
416         tp->write32(tp, off, val);
417         if (!(tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) &&
418             !(tp->tg3_flags & TG3_FLAG_5701_REG_WRITE_BUG) &&
419             !(tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND))
420                 tp->read32(tp, off);    /* flush */
421 }
422
423 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
424 {
425         tp->write32_mbox(tp, off, val);
426         if (!(tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) &&
427             !(tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND))
428                 tp->read32_mbox(tp, off);
429 }
430
431 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
432 {
433         void __iomem *mbox = tp->regs + off;
434         writel(val, mbox);
435         if (tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG)
436                 writel(val, mbox);
437         if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
438                 readl(mbox);
439 }
440
441 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
442 {
443         writel(val, tp->regs + off);
444 }
445
446 static u32 tg3_read32(struct tg3 *tp, u32 off)
447 {
448         return (readl(tp->regs + off)); 
449 }
450
451 #define tw32_mailbox(reg, val)  tp->write32_mbox(tp, reg, val)
452 #define tw32_mailbox_f(reg, val)        tw32_mailbox_flush(tp, (reg), (val))
453 #define tw32_rx_mbox(reg, val)  tp->write32_rx_mbox(tp, reg, val)
454 #define tw32_tx_mbox(reg, val)  tp->write32_tx_mbox(tp, reg, val)
455 #define tr32_mailbox(reg)       tp->read32_mbox(tp, reg)
456
457 #define tw32(reg,val)           tp->write32(tp, reg, val)
458 #define tw32_f(reg,val)         _tw32_flush(tp,(reg),(val))
459 #define tr32(reg)               tp->read32(tp, reg)
460
461 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
462 {
463         unsigned long flags;
464
465         spin_lock_irqsave(&tp->indirect_lock, flags);
466         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
467         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
468
469         /* Always leave this as zero. */
470         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
471         spin_unlock_irqrestore(&tp->indirect_lock, flags);
472 }
473
474 static void tg3_write_mem_fast(struct tg3 *tp, u32 off, u32 val)
475 {
476         /* If no workaround is needed, write to mem space directly */
477         if (tp->write32 != tg3_write_indirect_reg32)
478                 tw32(NIC_SRAM_WIN_BASE + off, val);
479         else
480                 tg3_write_mem(tp, off, val);
481 }
482
483 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
484 {
485         unsigned long flags;
486
487         spin_lock_irqsave(&tp->indirect_lock, flags);
488         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
489         pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
490
491         /* Always leave this as zero. */
492         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
493         spin_unlock_irqrestore(&tp->indirect_lock, flags);
494 }
495
496 static void tg3_disable_ints(struct tg3 *tp)
497 {
498         tw32(TG3PCI_MISC_HOST_CTRL,
499              (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
500         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
501 }
502
503 static inline void tg3_cond_int(struct tg3 *tp)
504 {
505         if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
506             (tp->hw_status->status & SD_STATUS_UPDATED))
507                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
508 }
509
510 static void tg3_enable_ints(struct tg3 *tp)
511 {
512         tp->irq_sync = 0;
513         wmb();
514
515         tw32(TG3PCI_MISC_HOST_CTRL,
516              (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
517         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
518                        (tp->last_tag << 24));
519         tg3_cond_int(tp);
520 }
521
522 static inline unsigned int tg3_has_work(struct tg3 *tp)
523 {
524         struct tg3_hw_status *sblk = tp->hw_status;
525         unsigned int work_exists = 0;
526
527         /* check for phy events */
528         if (!(tp->tg3_flags &
529               (TG3_FLAG_USE_LINKCHG_REG |
530                TG3_FLAG_POLL_SERDES))) {
531                 if (sblk->status & SD_STATUS_LINK_CHG)
532                         work_exists = 1;
533         }
534         /* check for RX/TX work to do */
535         if (sblk->idx[0].tx_consumer != tp->tx_cons ||
536             sblk->idx[0].rx_producer != tp->rx_rcb_ptr)
537                 work_exists = 1;
538
539         return work_exists;
540 }
541
542 /* tg3_restart_ints
543  *  similar to tg3_enable_ints, but it accurately determines whether there
544  *  is new work pending and can return without flushing the PIO write
545  *  which reenables interrupts 
546  */
547 static void tg3_restart_ints(struct tg3 *tp)
548 {
549         tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
550                      tp->last_tag << 24);
551         mmiowb();
552
553         /* When doing tagged status, this work check is unnecessary.
554          * The last_tag we write above tells the chip which piece of
555          * work we've completed.
556          */
557         if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
558             tg3_has_work(tp))
559                 tw32(HOSTCC_MODE, tp->coalesce_mode |
560                      (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
561 }
562
563 static inline void tg3_netif_stop(struct tg3 *tp)
564 {
565         tp->dev->trans_start = jiffies; /* prevent tx timeout */
566         netif_poll_disable(tp->dev);
567         netif_tx_disable(tp->dev);
568 }
569
570 static inline void tg3_netif_start(struct tg3 *tp)
571 {
572         netif_wake_queue(tp->dev);
573         /* NOTE: unconditional netif_wake_queue is only appropriate
574          * so long as all callers are assured to have free tx slots
575          * (such as after tg3_init_hw)
576          */
577         netif_poll_enable(tp->dev);
578         tp->hw_status->status |= SD_STATUS_UPDATED;
579         tg3_enable_ints(tp);
580 }
581
582 static void tg3_switch_clocks(struct tg3 *tp)
583 {
584         u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
585         u32 orig_clock_ctrl;
586
587         if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
588                 return;
589
590         orig_clock_ctrl = clock_ctrl;
591         clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
592                        CLOCK_CTRL_CLKRUN_OENABLE |
593                        0x1f);
594         tp->pci_clock_ctrl = clock_ctrl;
595
596         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
597                 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
598                         tw32_f(TG3PCI_CLOCK_CTRL,
599                                clock_ctrl | CLOCK_CTRL_625_CORE);
600                         udelay(40);
601                 }
602         } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
603                 tw32_f(TG3PCI_CLOCK_CTRL,
604                      clock_ctrl |
605                      (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK));
606                 udelay(40);
607                 tw32_f(TG3PCI_CLOCK_CTRL,
608                      clock_ctrl | (CLOCK_CTRL_ALTCLK));
609                 udelay(40);
610         }
611         tw32_f(TG3PCI_CLOCK_CTRL, clock_ctrl);
612         udelay(40);
613 }
614
615 #define PHY_BUSY_LOOPS  5000
616
617 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
618 {
619         u32 frame_val;
620         unsigned int loops;
621         int ret;
622
623         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
624                 tw32_f(MAC_MI_MODE,
625                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
626                 udelay(80);
627         }
628
629         *val = 0x0;
630
631         frame_val  = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
632                       MI_COM_PHY_ADDR_MASK);
633         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
634                       MI_COM_REG_ADDR_MASK);
635         frame_val |= (MI_COM_CMD_READ | MI_COM_START);
636         
637         tw32_f(MAC_MI_COM, frame_val);
638
639         loops = PHY_BUSY_LOOPS;
640         while (loops != 0) {
641                 udelay(10);
642                 frame_val = tr32(MAC_MI_COM);
643
644                 if ((frame_val & MI_COM_BUSY) == 0) {
645                         udelay(5);
646                         frame_val = tr32(MAC_MI_COM);
647                         break;
648                 }
649                 loops -= 1;
650         }
651
652         ret = -EBUSY;
653         if (loops != 0) {
654                 *val = frame_val & MI_COM_DATA_MASK;
655                 ret = 0;
656         }
657
658         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
659                 tw32_f(MAC_MI_MODE, tp->mi_mode);
660                 udelay(80);
661         }
662
663         return ret;
664 }
665
666 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
667 {
668         u32 frame_val;
669         unsigned int loops;
670         int ret;
671
672         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
673                 tw32_f(MAC_MI_MODE,
674                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
675                 udelay(80);
676         }
677
678         frame_val  = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
679                       MI_COM_PHY_ADDR_MASK);
680         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
681                       MI_COM_REG_ADDR_MASK);
682         frame_val |= (val & MI_COM_DATA_MASK);
683         frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
684         
685         tw32_f(MAC_MI_COM, frame_val);
686
687         loops = PHY_BUSY_LOOPS;
688         while (loops != 0) {
689                 udelay(10);
690                 frame_val = tr32(MAC_MI_COM);
691                 if ((frame_val & MI_COM_BUSY) == 0) {
692                         udelay(5);
693                         frame_val = tr32(MAC_MI_COM);
694                         break;
695                 }
696                 loops -= 1;
697         }
698
699         ret = -EBUSY;
700         if (loops != 0)
701                 ret = 0;
702
703         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
704                 tw32_f(MAC_MI_MODE, tp->mi_mode);
705                 udelay(80);
706         }
707
708         return ret;
709 }
710
711 static void tg3_phy_set_wirespeed(struct tg3 *tp)
712 {
713         u32 val;
714
715         if (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED)
716                 return;
717
718         if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x7007) &&
719             !tg3_readphy(tp, MII_TG3_AUX_CTRL, &val))
720                 tg3_writephy(tp, MII_TG3_AUX_CTRL,
721                              (val | (1 << 15) | (1 << 4)));
722 }
723
724 static int tg3_bmcr_reset(struct tg3 *tp)
725 {
726         u32 phy_control;
727         int limit, err;
728
729         /* OK, reset it, and poll the BMCR_RESET bit until it
730          * clears or we time out.
731          */
732         phy_control = BMCR_RESET;
733         err = tg3_writephy(tp, MII_BMCR, phy_control);
734         if (err != 0)
735                 return -EBUSY;
736
737         limit = 5000;
738         while (limit--) {
739                 err = tg3_readphy(tp, MII_BMCR, &phy_control);
740                 if (err != 0)
741                         return -EBUSY;
742
743                 if ((phy_control & BMCR_RESET) == 0) {
744                         udelay(40);
745                         break;
746                 }
747                 udelay(10);
748         }
749         if (limit <= 0)
750                 return -EBUSY;
751
752         return 0;
753 }
754
755 static int tg3_wait_macro_done(struct tg3 *tp)
756 {
757         int limit = 100;
758
759         while (limit--) {
760                 u32 tmp32;
761
762                 if (!tg3_readphy(tp, 0x16, &tmp32)) {
763                         if ((tmp32 & 0x1000) == 0)
764                                 break;
765                 }
766         }
767         if (limit <= 0)
768                 return -EBUSY;
769
770         return 0;
771 }
772
773 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
774 {
775         static const u32 test_pat[4][6] = {
776         { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
777         { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
778         { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
779         { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
780         };
781         int chan;
782
783         for (chan = 0; chan < 4; chan++) {
784                 int i;
785
786                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
787                              (chan * 0x2000) | 0x0200);
788                 tg3_writephy(tp, 0x16, 0x0002);
789
790                 for (i = 0; i < 6; i++)
791                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
792                                      test_pat[chan][i]);
793
794                 tg3_writephy(tp, 0x16, 0x0202);
795                 if (tg3_wait_macro_done(tp)) {
796                         *resetp = 1;
797                         return -EBUSY;
798                 }
799
800                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
801                              (chan * 0x2000) | 0x0200);
802                 tg3_writephy(tp, 0x16, 0x0082);
803                 if (tg3_wait_macro_done(tp)) {
804                         *resetp = 1;
805                         return -EBUSY;
806                 }
807
808                 tg3_writephy(tp, 0x16, 0x0802);
809                 if (tg3_wait_macro_done(tp)) {
810                         *resetp = 1;
811                         return -EBUSY;
812                 }
813
814                 for (i = 0; i < 6; i += 2) {
815                         u32 low, high;
816
817                         if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
818                             tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
819                             tg3_wait_macro_done(tp)) {
820                                 *resetp = 1;
821                                 return -EBUSY;
822                         }
823                         low &= 0x7fff;
824                         high &= 0x000f;
825                         if (low != test_pat[chan][i] ||
826                             high != test_pat[chan][i+1]) {
827                                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
828                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
829                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
830
831                                 return -EBUSY;
832                         }
833                 }
834         }
835
836         return 0;
837 }
838
839 static int tg3_phy_reset_chanpat(struct tg3 *tp)
840 {
841         int chan;
842
843         for (chan = 0; chan < 4; chan++) {
844                 int i;
845
846                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
847                              (chan * 0x2000) | 0x0200);
848                 tg3_writephy(tp, 0x16, 0x0002);
849                 for (i = 0; i < 6; i++)
850                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
851                 tg3_writephy(tp, 0x16, 0x0202);
852                 if (tg3_wait_macro_done(tp))
853                         return -EBUSY;
854         }
855
856         return 0;
857 }
858
859 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
860 {
861         u32 reg32, phy9_orig;
862         int retries, do_phy_reset, err;
863
864         retries = 10;
865         do_phy_reset = 1;
866         do {
867                 if (do_phy_reset) {
868                         err = tg3_bmcr_reset(tp);
869                         if (err)
870                                 return err;
871                         do_phy_reset = 0;
872                 }
873
874                 /* Disable transmitter and interrupt.  */
875                 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
876                         continue;
877
878                 reg32 |= 0x3000;
879                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
880
881                 /* Set full-duplex, 1000 mbps.  */
882                 tg3_writephy(tp, MII_BMCR,
883                              BMCR_FULLDPLX | TG3_BMCR_SPEED1000);
884
885                 /* Set to master mode.  */
886                 if (tg3_readphy(tp, MII_TG3_CTRL, &phy9_orig))
887                         continue;
888
889                 tg3_writephy(tp, MII_TG3_CTRL,
890                              (MII_TG3_CTRL_AS_MASTER |
891                               MII_TG3_CTRL_ENABLE_AS_MASTER));
892
893                 /* Enable SM_DSP_CLOCK and 6dB.  */
894                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
895
896                 /* Block the PHY control access.  */
897                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
898                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0800);
899
900                 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
901                 if (!err)
902                         break;
903         } while (--retries);
904
905         err = tg3_phy_reset_chanpat(tp);
906         if (err)
907                 return err;
908
909         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
910         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0000);
911
912         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
913         tg3_writephy(tp, 0x16, 0x0000);
914
915         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
916             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
917                 /* Set Extended packet length bit for jumbo frames */
918                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4400);
919         }
920         else {
921                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
922         }
923
924         tg3_writephy(tp, MII_TG3_CTRL, phy9_orig);
925
926         if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32)) {
927                 reg32 &= ~0x3000;
928                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
929         } else if (!err)
930                 err = -EBUSY;
931
932         return err;
933 }
934
935 /* This will reset the tigon3 PHY if there is no valid
936  * link unless the FORCE argument is non-zero.
937  */
938 static int tg3_phy_reset(struct tg3 *tp)
939 {
940         u32 phy_status;
941         int err;
942
943         err  = tg3_readphy(tp, MII_BMSR, &phy_status);
944         err |= tg3_readphy(tp, MII_BMSR, &phy_status);
945         if (err != 0)
946                 return -EBUSY;
947
948         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
949             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
950             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
951                 err = tg3_phy_reset_5703_4_5(tp);
952                 if (err)
953                         return err;
954                 goto out;
955         }
956
957         err = tg3_bmcr_reset(tp);
958         if (err)
959                 return err;
960
961 out:
962         if (tp->tg3_flags2 & TG3_FLG2_PHY_ADC_BUG) {
963                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
964                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
965                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x2aaa);
966                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
967                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0323);
968                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
969         }
970         if (tp->tg3_flags2 & TG3_FLG2_PHY_5704_A0_BUG) {
971                 tg3_writephy(tp, 0x1c, 0x8d68);
972                 tg3_writephy(tp, 0x1c, 0x8d68);
973         }
974         if (tp->tg3_flags2 & TG3_FLG2_PHY_BER_BUG) {
975                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
976                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
977                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x310b);
978                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
979                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x9506);
980                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x401f);
981                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x14e2);
982                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
983         }
984         /* Set Extended packet length bit (bit 14) on all chips that */
985         /* support jumbo frames */
986         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
987                 /* Cannot do read-modify-write on 5401 */
988                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
989         } else if (tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) {
990                 u32 phy_reg;
991
992                 /* Set bit 14 with read-modify-write to preserve other bits */
993                 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0007) &&
994                     !tg3_readphy(tp, MII_TG3_AUX_CTRL, &phy_reg))
995                         tg3_writephy(tp, MII_TG3_AUX_CTRL, phy_reg | 0x4000);
996         }
997
998         /* Set phy register 0x10 bit 0 to high fifo elasticity to support
999          * jumbo frames transmission.
1000          */
1001         if (tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) {
1002                 u32 phy_reg;
1003
1004                 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &phy_reg))
1005                     tg3_writephy(tp, MII_TG3_EXT_CTRL,
1006                                  phy_reg | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
1007         }
1008
1009         tg3_phy_set_wirespeed(tp);
1010         return 0;
1011 }
1012
1013 static void tg3_frob_aux_power(struct tg3 *tp)
1014 {
1015         struct tg3 *tp_peer = tp;
1016
1017         if ((tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) != 0)
1018                 return;
1019
1020         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
1021                 tp_peer = pci_get_drvdata(tp->pdev_peer);
1022                 if (!tp_peer)
1023                         BUG();
1024         }
1025
1026
1027         if ((tp->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
1028             (tp_peer->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0) {
1029                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1030                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1031                         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1032                              (GRC_LCLCTRL_GPIO_OE0 |
1033                               GRC_LCLCTRL_GPIO_OE1 |
1034                               GRC_LCLCTRL_GPIO_OE2 |
1035                               GRC_LCLCTRL_GPIO_OUTPUT0 |
1036                               GRC_LCLCTRL_GPIO_OUTPUT1));
1037                         udelay(100);
1038                 } else {
1039                         u32 no_gpio2;
1040                         u32 grc_local_ctrl;
1041
1042                         if (tp_peer != tp &&
1043                             (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
1044                                 return;
1045
1046                         /* On 5753 and variants, GPIO2 cannot be used. */
1047                         no_gpio2 = tp->nic_sram_data_cfg &
1048                                     NIC_SRAM_DATA_CFG_NO_GPIO2;
1049
1050                         grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
1051                                          GRC_LCLCTRL_GPIO_OE1 |
1052                                          GRC_LCLCTRL_GPIO_OE2 |
1053                                          GRC_LCLCTRL_GPIO_OUTPUT1 |
1054                                          GRC_LCLCTRL_GPIO_OUTPUT2;
1055                         if (no_gpio2) {
1056                                 grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
1057                                                     GRC_LCLCTRL_GPIO_OUTPUT2);
1058                         }
1059                         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1060                                                 grc_local_ctrl);
1061                         udelay(100);
1062
1063                         grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
1064
1065                         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1066                                                 grc_local_ctrl);
1067                         udelay(100);
1068
1069                         if (!no_gpio2) {
1070                                 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
1071                                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1072                                        grc_local_ctrl);
1073                                 udelay(100);
1074                         }
1075                 }
1076         } else {
1077                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
1078                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
1079                         if (tp_peer != tp &&
1080                             (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
1081                                 return;
1082
1083                         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1084                              (GRC_LCLCTRL_GPIO_OE1 |
1085                               GRC_LCLCTRL_GPIO_OUTPUT1));
1086                         udelay(100);
1087
1088                         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1089                              (GRC_LCLCTRL_GPIO_OE1));
1090                         udelay(100);
1091
1092                         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1093                              (GRC_LCLCTRL_GPIO_OE1 |
1094                               GRC_LCLCTRL_GPIO_OUTPUT1));
1095                         udelay(100);
1096                 }
1097         }
1098 }
1099
1100 static int tg3_setup_phy(struct tg3 *, int);
1101
1102 #define RESET_KIND_SHUTDOWN     0
1103 #define RESET_KIND_INIT         1
1104 #define RESET_KIND_SUSPEND      2
1105
1106 static void tg3_write_sig_post_reset(struct tg3 *, int);
1107 static int tg3_halt_cpu(struct tg3 *, u32);
1108
1109 static int tg3_set_power_state(struct tg3 *tp, int state)
1110 {
1111         u32 misc_host_ctrl;
1112         u16 power_control, power_caps;
1113         int pm = tp->pm_cap;
1114
1115         /* Make sure register accesses (indirect or otherwise)
1116          * will function correctly.
1117          */
1118         pci_write_config_dword(tp->pdev,
1119                                TG3PCI_MISC_HOST_CTRL,
1120                                tp->misc_host_ctrl);
1121
1122         pci_read_config_word(tp->pdev,
1123                              pm + PCI_PM_CTRL,
1124                              &power_control);
1125         power_control |= PCI_PM_CTRL_PME_STATUS;
1126         power_control &= ~(PCI_PM_CTRL_STATE_MASK);
1127         switch (state) {
1128         case 0:
1129                 power_control |= 0;
1130                 pci_write_config_word(tp->pdev,
1131                                       pm + PCI_PM_CTRL,
1132                                       power_control);
1133                 udelay(100);    /* Delay after power state change */
1134
1135                 /* Switch out of Vaux if it is not a LOM */
1136                 if (!(tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT)) {
1137                         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
1138                         udelay(100);
1139                 }
1140
1141                 return 0;
1142
1143         case 1:
1144                 power_control |= 1;
1145                 break;
1146
1147         case 2:
1148                 power_control |= 2;
1149                 break;
1150
1151         case 3:
1152                 power_control |= 3;
1153                 break;
1154
1155         default:
1156                 printk(KERN_WARNING PFX "%s: Invalid power state (%d) "
1157                        "requested.\n",
1158                        tp->dev->name, state);
1159                 return -EINVAL;
1160         };
1161
1162         power_control |= PCI_PM_CTRL_PME_ENABLE;
1163
1164         misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
1165         tw32(TG3PCI_MISC_HOST_CTRL,
1166              misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
1167
1168         if (tp->link_config.phy_is_low_power == 0) {
1169                 tp->link_config.phy_is_low_power = 1;
1170                 tp->link_config.orig_speed = tp->link_config.speed;
1171                 tp->link_config.orig_duplex = tp->link_config.duplex;
1172                 tp->link_config.orig_autoneg = tp->link_config.autoneg;
1173         }
1174
1175         if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)) {
1176                 tp->link_config.speed = SPEED_10;
1177                 tp->link_config.duplex = DUPLEX_HALF;
1178                 tp->link_config.autoneg = AUTONEG_ENABLE;
1179                 tg3_setup_phy(tp, 0);
1180         }
1181
1182         pci_read_config_word(tp->pdev, pm + PCI_PM_PMC, &power_caps);
1183
1184         if (tp->tg3_flags & TG3_FLAG_WOL_ENABLE) {
1185                 u32 mac_mode;
1186
1187                 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
1188                         tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x5a);
1189                         udelay(40);
1190
1191                         mac_mode = MAC_MODE_PORT_MODE_MII;
1192
1193                         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 ||
1194                             !(tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB))
1195                                 mac_mode |= MAC_MODE_LINK_POLARITY;
1196                 } else {
1197                         mac_mode = MAC_MODE_PORT_MODE_TBI;
1198                 }
1199
1200                 if (!(tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
1201                         tw32(MAC_LED_CTRL, tp->led_ctrl);
1202
1203                 if (((power_caps & PCI_PM_CAP_PME_D3cold) &&
1204                      (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)))
1205                         mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
1206
1207                 tw32_f(MAC_MODE, mac_mode);
1208                 udelay(100);
1209
1210                 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
1211                 udelay(10);
1212         }
1213
1214         if (!(tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB) &&
1215             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1216              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
1217                 u32 base_val;
1218
1219                 base_val = tp->pci_clock_ctrl;
1220                 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
1221                              CLOCK_CTRL_TXCLK_DISABLE);
1222
1223                 tw32_f(TG3PCI_CLOCK_CTRL, base_val |
1224                      CLOCK_CTRL_ALTCLK |
1225                      CLOCK_CTRL_PWRDOWN_PLL133);
1226                 udelay(40);
1227         } else if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
1228                 /* do nothing */
1229         } else if (!((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
1230                      (tp->tg3_flags & TG3_FLAG_ENABLE_ASF))) {
1231                 u32 newbits1, newbits2;
1232
1233                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1234                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1235                         newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
1236                                     CLOCK_CTRL_TXCLK_DISABLE |
1237                                     CLOCK_CTRL_ALTCLK);
1238                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
1239                 } else if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
1240                         newbits1 = CLOCK_CTRL_625_CORE;
1241                         newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
1242                 } else {
1243                         newbits1 = CLOCK_CTRL_ALTCLK;
1244                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
1245                 }
1246
1247                 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1);
1248                 udelay(40);
1249
1250                 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2);
1251                 udelay(40);
1252
1253                 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
1254                         u32 newbits3;
1255
1256                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1257                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1258                                 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
1259                                             CLOCK_CTRL_TXCLK_DISABLE |
1260                                             CLOCK_CTRL_44MHZ_CORE);
1261                         } else {
1262                                 newbits3 = CLOCK_CTRL_44MHZ_CORE;
1263                         }
1264
1265                         tw32_f(TG3PCI_CLOCK_CTRL,
1266                                          tp->pci_clock_ctrl | newbits3);
1267                         udelay(40);
1268                 }
1269         }
1270
1271         tg3_frob_aux_power(tp);
1272
1273         /* Workaround for unstable PLL clock */
1274         if ((GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX) ||
1275             (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX)) {
1276                 u32 val = tr32(0x7d00);
1277
1278                 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
1279                 tw32(0x7d00, val);
1280                 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
1281                         tg3_halt_cpu(tp, RX_CPU_BASE);
1282         }
1283
1284         /* Finally, set the new power state. */
1285         pci_write_config_word(tp->pdev, pm + PCI_PM_CTRL, power_control);
1286         udelay(100);    /* Delay after power state change */
1287
1288         tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
1289
1290         return 0;
1291 }
1292
1293 static void tg3_link_report(struct tg3 *tp)
1294 {
1295         if (!netif_carrier_ok(tp->dev)) {
1296                 printk(KERN_INFO PFX "%s: Link is down.\n", tp->dev->name);
1297         } else {
1298                 printk(KERN_INFO PFX "%s: Link is up at %d Mbps, %s duplex.\n",
1299                        tp->dev->name,
1300                        (tp->link_config.active_speed == SPEED_1000 ?
1301                         1000 :
1302                         (tp->link_config.active_speed == SPEED_100 ?
1303                          100 : 10)),
1304                        (tp->link_config.active_duplex == DUPLEX_FULL ?
1305                         "full" : "half"));
1306
1307                 printk(KERN_INFO PFX "%s: Flow control is %s for TX and "
1308                        "%s for RX.\n",
1309                        tp->dev->name,
1310                        (tp->tg3_flags & TG3_FLAG_TX_PAUSE) ? "on" : "off",
1311                        (tp->tg3_flags & TG3_FLAG_RX_PAUSE) ? "on" : "off");
1312         }
1313 }
1314
1315 static void tg3_setup_flow_control(struct tg3 *tp, u32 local_adv, u32 remote_adv)
1316 {
1317         u32 new_tg3_flags = 0;
1318         u32 old_rx_mode = tp->rx_mode;
1319         u32 old_tx_mode = tp->tx_mode;
1320
1321         if (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG) {
1322
1323                 /* Convert 1000BaseX flow control bits to 1000BaseT
1324                  * bits before resolving flow control.
1325                  */
1326                 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
1327                         local_adv &= ~(ADVERTISE_PAUSE_CAP |
1328                                        ADVERTISE_PAUSE_ASYM);
1329                         remote_adv &= ~(LPA_PAUSE_CAP | LPA_PAUSE_ASYM);
1330
1331                         if (local_adv & ADVERTISE_1000XPAUSE)
1332                                 local_adv |= ADVERTISE_PAUSE_CAP;
1333                         if (local_adv & ADVERTISE_1000XPSE_ASYM)
1334                                 local_adv |= ADVERTISE_PAUSE_ASYM;
1335                         if (remote_adv & LPA_1000XPAUSE)
1336                                 remote_adv |= LPA_PAUSE_CAP;
1337                         if (remote_adv & LPA_1000XPAUSE_ASYM)
1338                                 remote_adv |= LPA_PAUSE_ASYM;
1339                 }
1340
1341                 if (local_adv & ADVERTISE_PAUSE_CAP) {
1342                         if (local_adv & ADVERTISE_PAUSE_ASYM) {
1343                                 if (remote_adv & LPA_PAUSE_CAP)
1344                                         new_tg3_flags |=
1345                                                 (TG3_FLAG_RX_PAUSE |
1346                                                 TG3_FLAG_TX_PAUSE);
1347                                 else if (remote_adv & LPA_PAUSE_ASYM)
1348                                         new_tg3_flags |=
1349                                                 (TG3_FLAG_RX_PAUSE);
1350                         } else {
1351                                 if (remote_adv & LPA_PAUSE_CAP)
1352                                         new_tg3_flags |=
1353                                                 (TG3_FLAG_RX_PAUSE |
1354                                                 TG3_FLAG_TX_PAUSE);
1355                         }
1356                 } else if (local_adv & ADVERTISE_PAUSE_ASYM) {
1357                         if ((remote_adv & LPA_PAUSE_CAP) &&
1358                         (remote_adv & LPA_PAUSE_ASYM))
1359                                 new_tg3_flags |= TG3_FLAG_TX_PAUSE;
1360                 }
1361
1362                 tp->tg3_flags &= ~(TG3_FLAG_RX_PAUSE | TG3_FLAG_TX_PAUSE);
1363                 tp->tg3_flags |= new_tg3_flags;
1364         } else {
1365                 new_tg3_flags = tp->tg3_flags;
1366         }
1367
1368         if (new_tg3_flags & TG3_FLAG_RX_PAUSE)
1369                 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1370         else
1371                 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1372
1373         if (old_rx_mode != tp->rx_mode) {
1374                 tw32_f(MAC_RX_MODE, tp->rx_mode);
1375         }
1376         
1377         if (new_tg3_flags & TG3_FLAG_TX_PAUSE)
1378                 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1379         else
1380                 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1381
1382         if (old_tx_mode != tp->tx_mode) {
1383                 tw32_f(MAC_TX_MODE, tp->tx_mode);
1384         }
1385 }
1386
1387 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
1388 {
1389         switch (val & MII_TG3_AUX_STAT_SPDMASK) {
1390         case MII_TG3_AUX_STAT_10HALF:
1391                 *speed = SPEED_10;
1392                 *duplex = DUPLEX_HALF;
1393                 break;
1394
1395         case MII_TG3_AUX_STAT_10FULL:
1396                 *speed = SPEED_10;
1397                 *duplex = DUPLEX_FULL;
1398                 break;
1399
1400         case MII_TG3_AUX_STAT_100HALF:
1401                 *speed = SPEED_100;
1402                 *duplex = DUPLEX_HALF;
1403                 break;
1404
1405         case MII_TG3_AUX_STAT_100FULL:
1406                 *speed = SPEED_100;
1407                 *duplex = DUPLEX_FULL;
1408                 break;
1409
1410         case MII_TG3_AUX_STAT_1000HALF:
1411                 *speed = SPEED_1000;
1412                 *duplex = DUPLEX_HALF;
1413                 break;
1414
1415         case MII_TG3_AUX_STAT_1000FULL:
1416                 *speed = SPEED_1000;
1417                 *duplex = DUPLEX_FULL;
1418                 break;
1419
1420         default:
1421                 *speed = SPEED_INVALID;
1422                 *duplex = DUPLEX_INVALID;
1423                 break;
1424         };
1425 }
1426
1427 static void tg3_phy_copper_begin(struct tg3 *tp)
1428 {
1429         u32 new_adv;
1430         int i;
1431
1432         if (tp->link_config.phy_is_low_power) {
1433                 /* Entering low power mode.  Disable gigabit and
1434                  * 100baseT advertisements.
1435                  */
1436                 tg3_writephy(tp, MII_TG3_CTRL, 0);
1437
1438                 new_adv = (ADVERTISE_10HALF | ADVERTISE_10FULL |
1439                            ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
1440                 if (tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB)
1441                         new_adv |= (ADVERTISE_100HALF | ADVERTISE_100FULL);
1442
1443                 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1444         } else if (tp->link_config.speed == SPEED_INVALID) {
1445                 tp->link_config.advertising =
1446                         (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
1447                          ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
1448                          ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full |
1449                          ADVERTISED_Autoneg | ADVERTISED_MII);
1450
1451                 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
1452                         tp->link_config.advertising &=
1453                                 ~(ADVERTISED_1000baseT_Half |
1454                                   ADVERTISED_1000baseT_Full);
1455
1456                 new_adv = (ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
1457                 if (tp->link_config.advertising & ADVERTISED_10baseT_Half)
1458                         new_adv |= ADVERTISE_10HALF;
1459                 if (tp->link_config.advertising & ADVERTISED_10baseT_Full)
1460                         new_adv |= ADVERTISE_10FULL;
1461                 if (tp->link_config.advertising & ADVERTISED_100baseT_Half)
1462                         new_adv |= ADVERTISE_100HALF;
1463                 if (tp->link_config.advertising & ADVERTISED_100baseT_Full)
1464                         new_adv |= ADVERTISE_100FULL;
1465                 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1466
1467                 if (tp->link_config.advertising &
1468                     (ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full)) {
1469                         new_adv = 0;
1470                         if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
1471                                 new_adv |= MII_TG3_CTRL_ADV_1000_HALF;
1472                         if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
1473                                 new_adv |= MII_TG3_CTRL_ADV_1000_FULL;
1474                         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY) &&
1475                             (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1476                              tp->pci_chip_rev_id == CHIPREV_ID_5701_B0))
1477                                 new_adv |= (MII_TG3_CTRL_AS_MASTER |
1478                                             MII_TG3_CTRL_ENABLE_AS_MASTER);
1479                         tg3_writephy(tp, MII_TG3_CTRL, new_adv);
1480                 } else {
1481                         tg3_writephy(tp, MII_TG3_CTRL, 0);
1482                 }
1483         } else {
1484                 /* Asking for a specific link mode. */
1485                 if (tp->link_config.speed == SPEED_1000) {
1486                         new_adv = ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP;
1487                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
1488
1489                         if (tp->link_config.duplex == DUPLEX_FULL)
1490                                 new_adv = MII_TG3_CTRL_ADV_1000_FULL;
1491                         else
1492                                 new_adv = MII_TG3_CTRL_ADV_1000_HALF;
1493                         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1494                             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
1495                                 new_adv |= (MII_TG3_CTRL_AS_MASTER |
1496                                             MII_TG3_CTRL_ENABLE_AS_MASTER);
1497                         tg3_writephy(tp, MII_TG3_CTRL, new_adv);
1498                 } else {
1499                         tg3_writephy(tp, MII_TG3_CTRL, 0);
1500
1501                         new_adv = ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP;
1502                         if (tp->link_config.speed == SPEED_100) {
1503                                 if (tp->link_config.duplex == DUPLEX_FULL)
1504                                         new_adv |= ADVERTISE_100FULL;
1505                                 else
1506                                         new_adv |= ADVERTISE_100HALF;
1507                         } else {
1508                                 if (tp->link_config.duplex == DUPLEX_FULL)
1509                                         new_adv |= ADVERTISE_10FULL;
1510                                 else
1511                                         new_adv |= ADVERTISE_10HALF;
1512                         }
1513                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
1514                 }
1515         }
1516
1517         if (tp->link_config.autoneg == AUTONEG_DISABLE &&
1518             tp->link_config.speed != SPEED_INVALID) {
1519                 u32 bmcr, orig_bmcr;
1520
1521                 tp->link_config.active_speed = tp->link_config.speed;
1522                 tp->link_config.active_duplex = tp->link_config.duplex;
1523
1524                 bmcr = 0;
1525                 switch (tp->link_config.speed) {
1526                 default:
1527                 case SPEED_10:
1528                         break;
1529
1530                 case SPEED_100:
1531                         bmcr |= BMCR_SPEED100;
1532                         break;
1533
1534                 case SPEED_1000:
1535                         bmcr |= TG3_BMCR_SPEED1000;
1536                         break;
1537                 };
1538
1539                 if (tp->link_config.duplex == DUPLEX_FULL)
1540                         bmcr |= BMCR_FULLDPLX;
1541
1542                 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
1543                     (bmcr != orig_bmcr)) {
1544                         tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
1545                         for (i = 0; i < 1500; i++) {
1546                                 u32 tmp;
1547
1548                                 udelay(10);
1549                                 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
1550                                     tg3_readphy(tp, MII_BMSR, &tmp))
1551                                         continue;
1552                                 if (!(tmp & BMSR_LSTATUS)) {
1553                                         udelay(40);
1554                                         break;
1555                                 }
1556                         }
1557                         tg3_writephy(tp, MII_BMCR, bmcr);
1558                         udelay(40);
1559                 }
1560         } else {
1561                 tg3_writephy(tp, MII_BMCR,
1562                              BMCR_ANENABLE | BMCR_ANRESTART);
1563         }
1564 }
1565
1566 static int tg3_init_5401phy_dsp(struct tg3 *tp)
1567 {
1568         int err;
1569
1570         /* Turn off tap power management. */
1571         /* Set Extended packet length bit */
1572         err  = tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
1573
1574         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0012);
1575         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1804);
1576
1577         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0013);
1578         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1204);
1579
1580         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
1581         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0132);
1582
1583         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
1584         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0232);
1585
1586         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1587         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0a20);
1588
1589         udelay(40);
1590
1591         return err;
1592 }
1593
1594 static int tg3_copper_is_advertising_all(struct tg3 *tp)
1595 {
1596         u32 adv_reg, all_mask;
1597
1598         if (tg3_readphy(tp, MII_ADVERTISE, &adv_reg))
1599                 return 0;
1600
1601         all_mask = (ADVERTISE_10HALF | ADVERTISE_10FULL |
1602                     ADVERTISE_100HALF | ADVERTISE_100FULL);
1603         if ((adv_reg & all_mask) != all_mask)
1604                 return 0;
1605         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
1606                 u32 tg3_ctrl;
1607
1608                 if (tg3_readphy(tp, MII_TG3_CTRL, &tg3_ctrl))
1609                         return 0;
1610
1611                 all_mask = (MII_TG3_CTRL_ADV_1000_HALF |
1612                             MII_TG3_CTRL_ADV_1000_FULL);
1613                 if ((tg3_ctrl & all_mask) != all_mask)
1614                         return 0;
1615         }
1616         return 1;
1617 }
1618
1619 static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
1620 {
1621         int current_link_up;
1622         u32 bmsr, dummy;
1623         u16 current_speed;
1624         u8 current_duplex;
1625         int i, err;
1626
1627         tw32(MAC_EVENT, 0);
1628
1629         tw32_f(MAC_STATUS,
1630              (MAC_STATUS_SYNC_CHANGED |
1631               MAC_STATUS_CFG_CHANGED |
1632               MAC_STATUS_MI_COMPLETION |
1633               MAC_STATUS_LNKSTATE_CHANGED));
1634         udelay(40);
1635
1636         tp->mi_mode = MAC_MI_MODE_BASE;
1637         tw32_f(MAC_MI_MODE, tp->mi_mode);
1638         udelay(80);
1639
1640         tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x02);
1641
1642         /* Some third-party PHYs need to be reset on link going
1643          * down.
1644          */
1645         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
1646              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
1647              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
1648             netif_carrier_ok(tp->dev)) {
1649                 tg3_readphy(tp, MII_BMSR, &bmsr);
1650                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
1651                     !(bmsr & BMSR_LSTATUS))
1652                         force_reset = 1;
1653         }
1654         if (force_reset)
1655                 tg3_phy_reset(tp);
1656
1657         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
1658                 tg3_readphy(tp, MII_BMSR, &bmsr);
1659                 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
1660                     !(tp->tg3_flags & TG3_FLAG_INIT_COMPLETE))
1661                         bmsr = 0;
1662
1663                 if (!(bmsr & BMSR_LSTATUS)) {
1664                         err = tg3_init_5401phy_dsp(tp);
1665                         if (err)
1666                                 return err;
1667
1668                         tg3_readphy(tp, MII_BMSR, &bmsr);
1669                         for (i = 0; i < 1000; i++) {
1670                                 udelay(10);
1671                                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
1672                                     (bmsr & BMSR_LSTATUS)) {
1673                                         udelay(40);
1674                                         break;
1675                                 }
1676                         }
1677
1678                         if ((tp->phy_id & PHY_ID_REV_MASK) == PHY_REV_BCM5401_B0 &&
1679                             !(bmsr & BMSR_LSTATUS) &&
1680                             tp->link_config.active_speed == SPEED_1000) {
1681                                 err = tg3_phy_reset(tp);
1682                                 if (!err)
1683                                         err = tg3_init_5401phy_dsp(tp);
1684                                 if (err)
1685                                         return err;
1686                         }
1687                 }
1688         } else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1689                    tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
1690                 /* 5701 {A0,B0} CRC bug workaround */
1691                 tg3_writephy(tp, 0x15, 0x0a75);
1692                 tg3_writephy(tp, 0x1c, 0x8c68);
1693                 tg3_writephy(tp, 0x1c, 0x8d68);
1694                 tg3_writephy(tp, 0x1c, 0x8c68);
1695         }
1696
1697         /* Clear pending interrupts... */
1698         tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
1699         tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
1700
1701         if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT)
1702                 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
1703         else
1704                 tg3_writephy(tp, MII_TG3_IMASK, ~0);
1705
1706         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1707             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1708                 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
1709                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
1710                                      MII_TG3_EXT_CTRL_LNK3_LED_MODE);
1711                 else
1712                         tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
1713         }
1714
1715         current_link_up = 0;
1716         current_speed = SPEED_INVALID;
1717         current_duplex = DUPLEX_INVALID;
1718
1719         if (tp->tg3_flags2 & TG3_FLG2_CAPACITIVE_COUPLING) {
1720                 u32 val;
1721
1722                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4007);
1723                 tg3_readphy(tp, MII_TG3_AUX_CTRL, &val);
1724                 if (!(val & (1 << 10))) {
1725                         val |= (1 << 10);
1726                         tg3_writephy(tp, MII_TG3_AUX_CTRL, val);
1727                         goto relink;
1728                 }
1729         }
1730
1731         bmsr = 0;
1732         for (i = 0; i < 100; i++) {
1733                 tg3_readphy(tp, MII_BMSR, &bmsr);
1734                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
1735                     (bmsr & BMSR_LSTATUS))
1736                         break;
1737                 udelay(40);
1738         }
1739
1740         if (bmsr & BMSR_LSTATUS) {
1741                 u32 aux_stat, bmcr;
1742
1743                 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
1744                 for (i = 0; i < 2000; i++) {
1745                         udelay(10);
1746                         if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
1747                             aux_stat)
1748                                 break;
1749                 }
1750
1751                 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
1752                                              &current_speed,
1753                                              &current_duplex);
1754
1755                 bmcr = 0;
1756                 for (i = 0; i < 200; i++) {
1757                         tg3_readphy(tp, MII_BMCR, &bmcr);
1758                         if (tg3_readphy(tp, MII_BMCR, &bmcr))
1759                                 continue;
1760                         if (bmcr && bmcr != 0x7fff)
1761                                 break;
1762                         udelay(10);
1763                 }
1764
1765                 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
1766                         if (bmcr & BMCR_ANENABLE) {
1767                                 current_link_up = 1;
1768
1769                                 /* Force autoneg restart if we are exiting
1770                                  * low power mode.
1771                                  */
1772                                 if (!tg3_copper_is_advertising_all(tp))
1773                                         current_link_up = 0;
1774                         } else {
1775                                 current_link_up = 0;
1776                         }
1777                 } else {
1778                         if (!(bmcr & BMCR_ANENABLE) &&
1779                             tp->link_config.speed == current_speed &&
1780                             tp->link_config.duplex == current_duplex) {
1781                                 current_link_up = 1;
1782                         } else {
1783                                 current_link_up = 0;
1784                         }
1785                 }
1786
1787                 tp->link_config.active_speed = current_speed;
1788                 tp->link_config.active_duplex = current_duplex;
1789         }
1790
1791         if (current_link_up == 1 &&
1792             (tp->link_config.active_duplex == DUPLEX_FULL) &&
1793             (tp->link_config.autoneg == AUTONEG_ENABLE)) {
1794                 u32 local_adv, remote_adv;
1795
1796                 if (tg3_readphy(tp, MII_ADVERTISE, &local_adv))
1797                         local_adv = 0;
1798                 local_adv &= (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
1799
1800                 if (tg3_readphy(tp, MII_LPA, &remote_adv))
1801                         remote_adv = 0;
1802
1803                 remote_adv &= (LPA_PAUSE_CAP | LPA_PAUSE_ASYM);
1804
1805                 /* If we are not advertising full pause capability,
1806                  * something is wrong.  Bring the link down and reconfigure.
1807                  */
1808                 if (local_adv != ADVERTISE_PAUSE_CAP) {
1809                         current_link_up = 0;
1810                 } else {
1811                         tg3_setup_flow_control(tp, local_adv, remote_adv);
1812                 }
1813         }
1814 relink:
1815         if (current_link_up == 0) {
1816                 u32 tmp;
1817
1818                 tg3_phy_copper_begin(tp);
1819
1820                 tg3_readphy(tp, MII_BMSR, &tmp);
1821                 if (!tg3_readphy(tp, MII_BMSR, &tmp) &&
1822                     (tmp & BMSR_LSTATUS))
1823                         current_link_up = 1;
1824         }
1825
1826         tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
1827         if (current_link_up == 1) {
1828                 if (tp->link_config.active_speed == SPEED_100 ||
1829                     tp->link_config.active_speed == SPEED_10)
1830                         tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
1831                 else
1832                         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
1833         } else
1834                 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
1835
1836         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
1837         if (tp->link_config.active_duplex == DUPLEX_HALF)
1838                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
1839
1840         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
1841         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
1842                 if ((tp->led_ctrl == LED_CTRL_MODE_PHY_2) ||
1843                     (current_link_up == 1 &&
1844                      tp->link_config.active_speed == SPEED_10))
1845                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
1846         } else {
1847                 if (current_link_up == 1)
1848                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
1849         }
1850
1851         /* ??? Without this setting Netgear GA302T PHY does not
1852          * ??? send/receive packets...
1853          */
1854         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411 &&
1855             tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) {
1856                 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
1857                 tw32_f(MAC_MI_MODE, tp->mi_mode);
1858                 udelay(80);
1859         }
1860
1861         tw32_f(MAC_MODE, tp->mac_mode);
1862         udelay(40);
1863
1864         if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
1865                 /* Polled via timer. */
1866                 tw32_f(MAC_EVENT, 0);
1867         } else {
1868                 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
1869         }
1870         udelay(40);
1871
1872         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 &&
1873             current_link_up == 1 &&
1874             tp->link_config.active_speed == SPEED_1000 &&
1875             ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ||
1876              (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED))) {
1877                 udelay(120);
1878                 tw32_f(MAC_STATUS,
1879                      (MAC_STATUS_SYNC_CHANGED |
1880                       MAC_STATUS_CFG_CHANGED));
1881                 udelay(40);
1882                 tg3_write_mem(tp,
1883                               NIC_SRAM_FIRMWARE_MBOX,
1884                               NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
1885         }
1886
1887         if (current_link_up != netif_carrier_ok(tp->dev)) {
1888                 if (current_link_up)
1889                         netif_carrier_on(tp->dev);
1890                 else
1891                         netif_carrier_off(tp->dev);
1892                 tg3_link_report(tp);
1893         }
1894
1895         return 0;
1896 }
1897
1898 struct tg3_fiber_aneginfo {
1899         int state;
1900 #define ANEG_STATE_UNKNOWN              0
1901 #define ANEG_STATE_AN_ENABLE            1
1902 #define ANEG_STATE_RESTART_INIT         2
1903 #define ANEG_STATE_RESTART              3
1904 #define ANEG_STATE_DISABLE_LINK_OK      4
1905 #define ANEG_STATE_ABILITY_DETECT_INIT  5
1906 #define ANEG_STATE_ABILITY_DETECT       6
1907 #define ANEG_STATE_ACK_DETECT_INIT      7
1908 #define ANEG_STATE_ACK_DETECT           8
1909 #define ANEG_STATE_COMPLETE_ACK_INIT    9
1910 #define ANEG_STATE_COMPLETE_ACK         10
1911 #define ANEG_STATE_IDLE_DETECT_INIT     11
1912 #define ANEG_STATE_IDLE_DETECT          12
1913 #define ANEG_STATE_LINK_OK              13
1914 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT  14
1915 #define ANEG_STATE_NEXT_PAGE_WAIT       15
1916
1917         u32 flags;
1918 #define MR_AN_ENABLE            0x00000001
1919 #define MR_RESTART_AN           0x00000002
1920 #define MR_AN_COMPLETE          0x00000004
1921 #define MR_PAGE_RX              0x00000008
1922 #define MR_NP_LOADED            0x00000010
1923 #define MR_TOGGLE_TX            0x00000020
1924 #define MR_LP_ADV_FULL_DUPLEX   0x00000040
1925 #define MR_LP_ADV_HALF_DUPLEX   0x00000080
1926 #define MR_LP_ADV_SYM_PAUSE     0x00000100
1927 #define MR_LP_ADV_ASYM_PAUSE    0x00000200
1928 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
1929 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
1930 #define MR_LP_ADV_NEXT_PAGE     0x00001000
1931 #define MR_TOGGLE_RX            0x00002000
1932 #define MR_NP_RX                0x00004000
1933
1934 #define MR_LINK_OK              0x80000000
1935
1936         unsigned long link_time, cur_time;
1937
1938         u32 ability_match_cfg;
1939         int ability_match_count;
1940
1941         char ability_match, idle_match, ack_match;
1942
1943         u32 txconfig, rxconfig;
1944 #define ANEG_CFG_NP             0x00000080
1945 #define ANEG_CFG_ACK            0x00000040
1946 #define ANEG_CFG_RF2            0x00000020
1947 #define ANEG_CFG_RF1            0x00000010
1948 #define ANEG_CFG_PS2            0x00000001
1949 #define ANEG_CFG_PS1            0x00008000
1950 #define ANEG_CFG_HD             0x00004000
1951 #define ANEG_CFG_FD             0x00002000
1952 #define ANEG_CFG_INVAL          0x00001f06
1953
1954 };
1955 #define ANEG_OK         0
1956 #define ANEG_DONE       1
1957 #define ANEG_TIMER_ENAB 2
1958 #define ANEG_FAILED     -1
1959
1960 #define ANEG_STATE_SETTLE_TIME  10000
1961
1962 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
1963                                    struct tg3_fiber_aneginfo *ap)
1964 {
1965         unsigned long delta;
1966         u32 rx_cfg_reg;
1967         int ret;
1968
1969         if (ap->state == ANEG_STATE_UNKNOWN) {
1970                 ap->rxconfig = 0;
1971                 ap->link_time = 0;
1972                 ap->cur_time = 0;
1973                 ap->ability_match_cfg = 0;
1974                 ap->ability_match_count = 0;
1975                 ap->ability_match = 0;
1976                 ap->idle_match = 0;
1977                 ap->ack_match = 0;
1978         }
1979         ap->cur_time++;
1980
1981         if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
1982                 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
1983
1984                 if (rx_cfg_reg != ap->ability_match_cfg) {
1985                         ap->ability_match_cfg = rx_cfg_reg;
1986                         ap->ability_match = 0;
1987                         ap->ability_match_count = 0;
1988                 } else {
1989                         if (++ap->ability_match_count > 1) {
1990                                 ap->ability_match = 1;
1991                                 ap->ability_match_cfg = rx_cfg_reg;
1992                         }
1993                 }
1994                 if (rx_cfg_reg & ANEG_CFG_ACK)
1995                         ap->ack_match = 1;
1996                 else
1997                         ap->ack_match = 0;
1998
1999                 ap->idle_match = 0;
2000         } else {
2001                 ap->idle_match = 1;
2002                 ap->ability_match_cfg = 0;
2003                 ap->ability_match_count = 0;
2004                 ap->ability_match = 0;
2005                 ap->ack_match = 0;
2006
2007                 rx_cfg_reg = 0;
2008         }
2009
2010         ap->rxconfig = rx_cfg_reg;
2011         ret = ANEG_OK;
2012
2013         switch(ap->state) {
2014         case ANEG_STATE_UNKNOWN:
2015                 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
2016                         ap->state = ANEG_STATE_AN_ENABLE;
2017
2018                 /* fallthru */
2019         case ANEG_STATE_AN_ENABLE:
2020                 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
2021                 if (ap->flags & MR_AN_ENABLE) {
2022                         ap->link_time = 0;
2023                         ap->cur_time = 0;
2024                         ap->ability_match_cfg = 0;
2025                         ap->ability_match_count = 0;
2026                         ap->ability_match = 0;
2027                         ap->idle_match = 0;
2028                         ap->ack_match = 0;
2029
2030                         ap->state = ANEG_STATE_RESTART_INIT;
2031                 } else {
2032                         ap->state = ANEG_STATE_DISABLE_LINK_OK;
2033                 }
2034                 break;
2035
2036         case ANEG_STATE_RESTART_INIT:
2037                 ap->link_time = ap->cur_time;
2038                 ap->flags &= ~(MR_NP_LOADED);
2039                 ap->txconfig = 0;
2040                 tw32(MAC_TX_AUTO_NEG, 0);
2041                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
2042                 tw32_f(MAC_MODE, tp->mac_mode);
2043                 udelay(40);
2044
2045                 ret = ANEG_TIMER_ENAB;
2046                 ap->state = ANEG_STATE_RESTART;
2047
2048                 /* fallthru */
2049         case ANEG_STATE_RESTART:
2050                 delta = ap->cur_time - ap->link_time;
2051                 if (delta > ANEG_STATE_SETTLE_TIME) {
2052                         ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
2053                 } else {
2054                         ret = ANEG_TIMER_ENAB;
2055                 }
2056                 break;
2057
2058         case ANEG_STATE_DISABLE_LINK_OK:
2059                 ret = ANEG_DONE;
2060                 break;
2061
2062         case ANEG_STATE_ABILITY_DETECT_INIT:
2063                 ap->flags &= ~(MR_TOGGLE_TX);
2064                 ap->txconfig = (ANEG_CFG_FD | ANEG_CFG_PS1);
2065                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
2066                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
2067                 tw32_f(MAC_MODE, tp->mac_mode);
2068                 udelay(40);
2069
2070                 ap->state = ANEG_STATE_ABILITY_DETECT;
2071                 break;
2072
2073         case ANEG_STATE_ABILITY_DETECT:
2074                 if (ap->ability_match != 0 && ap->rxconfig != 0) {
2075                         ap->state = ANEG_STATE_ACK_DETECT_INIT;
2076                 }
2077                 break;
2078
2079         case ANEG_STATE_ACK_DETECT_INIT:
2080                 ap->txconfig |= ANEG_CFG_ACK;
2081                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
2082                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
2083                 tw32_f(MAC_MODE, tp->mac_mode);
2084                 udelay(40);
2085
2086                 ap->state = ANEG_STATE_ACK_DETECT;
2087
2088                 /* fallthru */
2089         case ANEG_STATE_ACK_DETECT:
2090                 if (ap->ack_match != 0) {
2091                         if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
2092                             (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
2093                                 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
2094                         } else {
2095                                 ap->state = ANEG_STATE_AN_ENABLE;
2096                         }
2097                 } else if (ap->ability_match != 0 &&
2098                            ap->rxconfig == 0) {
2099                         ap->state = ANEG_STATE_AN_ENABLE;
2100                 }
2101                 break;
2102
2103         case ANEG_STATE_COMPLETE_ACK_INIT:
2104                 if (ap->rxconfig & ANEG_CFG_INVAL) {
2105                         ret = ANEG_FAILED;
2106                         break;
2107                 }
2108                 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
2109                                MR_LP_ADV_HALF_DUPLEX |
2110                                MR_LP_ADV_SYM_PAUSE |
2111                                MR_LP_ADV_ASYM_PAUSE |
2112                                MR_LP_ADV_REMOTE_FAULT1 |
2113                                MR_LP_ADV_REMOTE_FAULT2 |
2114                                MR_LP_ADV_NEXT_PAGE |
2115                                MR_TOGGLE_RX |
2116                                MR_NP_RX);
2117                 if (ap->rxconfig & ANEG_CFG_FD)
2118                         ap->flags |= MR_LP_ADV_FULL_DUPLEX;
2119                 if (ap->rxconfig & ANEG_CFG_HD)
2120                         ap->flags |= MR_LP_ADV_HALF_DUPLEX;
2121                 if (ap->rxconfig & ANEG_CFG_PS1)
2122                         ap->flags |= MR_LP_ADV_SYM_PAUSE;
2123                 if (ap->rxconfig & ANEG_CFG_PS2)
2124                         ap->flags |= MR_LP_ADV_ASYM_PAUSE;
2125                 if (ap->rxconfig & ANEG_CFG_RF1)
2126                         ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
2127                 if (ap->rxconfig & ANEG_CFG_RF2)
2128                         ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
2129                 if (ap->rxconfig & ANEG_CFG_NP)
2130                         ap->flags |= MR_LP_ADV_NEXT_PAGE;
2131
2132                 ap->link_time = ap->cur_time;
2133
2134                 ap->flags ^= (MR_TOGGLE_TX);
2135                 if (ap->rxconfig & 0x0008)
2136                         ap->flags |= MR_TOGGLE_RX;
2137                 if (ap->rxconfig & ANEG_CFG_NP)
2138                         ap->flags |= MR_NP_RX;
2139                 ap->flags |= MR_PAGE_RX;
2140
2141                 ap->state = ANEG_STATE_COMPLETE_ACK;
2142                 ret = ANEG_TIMER_ENAB;
2143                 break;
2144
2145         case ANEG_STATE_COMPLETE_ACK:
2146                 if (ap->ability_match != 0 &&
2147                     ap->rxconfig == 0) {
2148                         ap->state = ANEG_STATE_AN_ENABLE;
2149                         break;
2150                 }
2151                 delta = ap->cur_time - ap->link_time;
2152                 if (delta > ANEG_STATE_SETTLE_TIME) {
2153                         if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
2154                                 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
2155                         } else {
2156                                 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
2157                                     !(ap->flags & MR_NP_RX)) {
2158                                         ap->state = ANEG_STATE_IDLE_DETECT_INIT;
2159                                 } else {
2160                                         ret = ANEG_FAILED;
2161                                 }
2162                         }
2163                 }
2164                 break;
2165
2166         case ANEG_STATE_IDLE_DETECT_INIT:
2167                 ap->link_time = ap->cur_time;
2168                 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
2169                 tw32_f(MAC_MODE, tp->mac_mode);
2170                 udelay(40);
2171
2172                 ap->state = ANEG_STATE_IDLE_DETECT;
2173                 ret = ANEG_TIMER_ENAB;
2174                 break;
2175
2176         case ANEG_STATE_IDLE_DETECT:
2177                 if (ap->ability_match != 0 &&
2178                     ap->rxconfig == 0) {
2179                         ap->state = ANEG_STATE_AN_ENABLE;
2180                         break;
2181                 }
2182                 delta = ap->cur_time - ap->link_time;
2183                 if (delta > ANEG_STATE_SETTLE_TIME) {
2184                         /* XXX another gem from the Broadcom driver :( */
2185                         ap->state = ANEG_STATE_LINK_OK;
2186                 }
2187                 break;
2188
2189         case ANEG_STATE_LINK_OK:
2190                 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
2191                 ret = ANEG_DONE;
2192                 break;
2193
2194         case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
2195                 /* ??? unimplemented */
2196                 break;
2197
2198         case ANEG_STATE_NEXT_PAGE_WAIT:
2199                 /* ??? unimplemented */
2200                 break;
2201
2202         default:
2203                 ret = ANEG_FAILED;
2204                 break;
2205         };
2206
2207         return ret;
2208 }
2209
2210 static int fiber_autoneg(struct tg3 *tp, u32 *flags)
2211 {
2212         int res = 0;
2213         struct tg3_fiber_aneginfo aninfo;
2214         int status = ANEG_FAILED;
2215         unsigned int tick;
2216         u32 tmp;
2217
2218         tw32_f(MAC_TX_AUTO_NEG, 0);
2219
2220         tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
2221         tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
2222         udelay(40);
2223
2224         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
2225         udelay(40);
2226
2227         memset(&aninfo, 0, sizeof(aninfo));
2228         aninfo.flags |= MR_AN_ENABLE;
2229         aninfo.state = ANEG_STATE_UNKNOWN;
2230         aninfo.cur_time = 0;
2231         tick = 0;
2232         while (++tick < 195000) {
2233                 status = tg3_fiber_aneg_smachine(tp, &aninfo);
2234                 if (status == ANEG_DONE || status == ANEG_FAILED)
2235                         break;
2236
2237                 udelay(1);
2238         }
2239
2240         tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
2241         tw32_f(MAC_MODE, tp->mac_mode);
2242         udelay(40);
2243
2244         *flags = aninfo.flags;
2245
2246         if (status == ANEG_DONE &&
2247             (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
2248                              MR_LP_ADV_FULL_DUPLEX)))
2249                 res = 1;
2250
2251         return res;
2252 }
2253
2254 static void tg3_init_bcm8002(struct tg3 *tp)
2255 {
2256         u32 mac_status = tr32(MAC_STATUS);
2257         int i;
2258
2259         /* Reset when initting first time or we have a link. */
2260         if ((tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) &&
2261             !(mac_status & MAC_STATUS_PCS_SYNCED))
2262                 return;
2263
2264         /* Set PLL lock range. */
2265         tg3_writephy(tp, 0x16, 0x8007);
2266
2267         /* SW reset */
2268         tg3_writephy(tp, MII_BMCR, BMCR_RESET);
2269
2270         /* Wait for reset to complete. */
2271         /* XXX schedule_timeout() ... */
2272         for (i = 0; i < 500; i++)
2273                 udelay(10);
2274
2275         /* Config mode; select PMA/Ch 1 regs. */
2276         tg3_writephy(tp, 0x10, 0x8411);
2277
2278         /* Enable auto-lock and comdet, select txclk for tx. */
2279         tg3_writephy(tp, 0x11, 0x0a10);
2280
2281         tg3_writephy(tp, 0x18, 0x00a0);
2282         tg3_writephy(tp, 0x16, 0x41ff);
2283
2284         /* Assert and deassert POR. */
2285         tg3_writephy(tp, 0x13, 0x0400);
2286         udelay(40);
2287         tg3_writephy(tp, 0x13, 0x0000);
2288
2289         tg3_writephy(tp, 0x11, 0x0a50);
2290         udelay(40);
2291         tg3_writephy(tp, 0x11, 0x0a10);
2292
2293         /* Wait for signal to stabilize */
2294         /* XXX schedule_timeout() ... */
2295         for (i = 0; i < 15000; i++)
2296                 udelay(10);
2297
2298         /* Deselect the channel register so we can read the PHYID
2299          * later.
2300          */
2301         tg3_writephy(tp, 0x10, 0x8011);
2302 }
2303
2304 static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
2305 {
2306         u32 sg_dig_ctrl, sg_dig_status;
2307         u32 serdes_cfg, expected_sg_dig_ctrl;
2308         int workaround, port_a;
2309         int current_link_up;
2310
2311         serdes_cfg = 0;
2312         expected_sg_dig_ctrl = 0;
2313         workaround = 0;
2314         port_a = 1;
2315         current_link_up = 0;
2316
2317         if (tp->pci_chip_rev_id != CHIPREV_ID_5704_A0 &&
2318             tp->pci_chip_rev_id != CHIPREV_ID_5704_A1) {
2319                 workaround = 1;
2320                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
2321                         port_a = 0;
2322
2323                 /* preserve bits 0-11,13,14 for signal pre-emphasis */
2324                 /* preserve bits 20-23 for voltage regulator */
2325                 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
2326         }
2327
2328         sg_dig_ctrl = tr32(SG_DIG_CTRL);
2329
2330         if (tp->link_config.autoneg != AUTONEG_ENABLE) {
2331                 if (sg_dig_ctrl & (1 << 31)) {
2332                         if (workaround) {
2333                                 u32 val = serdes_cfg;
2334
2335                                 if (port_a)
2336                                         val |= 0xc010000;
2337                                 else
2338                                         val |= 0x4010000;
2339                                 tw32_f(MAC_SERDES_CFG, val);
2340                         }
2341                         tw32_f(SG_DIG_CTRL, 0x01388400);
2342                 }
2343                 if (mac_status & MAC_STATUS_PCS_SYNCED) {
2344                         tg3_setup_flow_control(tp, 0, 0);
2345                         current_link_up = 1;
2346                 }
2347                 goto out;
2348         }
2349
2350         /* Want auto-negotiation.  */
2351         expected_sg_dig_ctrl = 0x81388400;
2352
2353         /* Pause capability */
2354         expected_sg_dig_ctrl |= (1 << 11);
2355
2356         /* Asymettric pause */
2357         expected_sg_dig_ctrl |= (1 << 12);
2358
2359         if (sg_dig_ctrl != expected_sg_dig_ctrl) {
2360                 if (workaround)
2361                         tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
2362                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | (1 << 30));
2363                 udelay(5);
2364                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
2365
2366                 tp->tg3_flags2 |= TG3_FLG2_PHY_JUST_INITTED;
2367         } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
2368                                  MAC_STATUS_SIGNAL_DET)) {
2369                 int i;
2370
2371                 /* Giver time to negotiate (~200ms) */
2372                 for (i = 0; i < 40000; i++) {
2373                         sg_dig_status = tr32(SG_DIG_STATUS);
2374                         if (sg_dig_status & (0x3))
2375                                 break;
2376                         udelay(5);
2377                 }
2378                 mac_status = tr32(MAC_STATUS);
2379
2380                 if ((sg_dig_status & (1 << 1)) &&
2381                     (mac_status & MAC_STATUS_PCS_SYNCED)) {
2382                         u32 local_adv, remote_adv;
2383
2384                         local_adv = ADVERTISE_PAUSE_CAP;
2385                         remote_adv = 0;
2386                         if (sg_dig_status & (1 << 19))
2387                                 remote_adv |= LPA_PAUSE_CAP;
2388                         if (sg_dig_status & (1 << 20))
2389                                 remote_adv |= LPA_PAUSE_ASYM;
2390
2391                         tg3_setup_flow_control(tp, local_adv, remote_adv);
2392                         current_link_up = 1;
2393                         tp->tg3_flags2 &= ~TG3_FLG2_PHY_JUST_INITTED;
2394                 } else if (!(sg_dig_status & (1 << 1))) {
2395                         if (tp->tg3_flags2 & TG3_FLG2_PHY_JUST_INITTED)
2396                                 tp->tg3_flags2 &= ~TG3_FLG2_PHY_JUST_INITTED;
2397                         else {
2398                                 if (workaround) {
2399                                         u32 val = serdes_cfg;
2400
2401                                         if (port_a)
2402                                                 val |= 0xc010000;
2403                                         else
2404                                                 val |= 0x4010000;
2405
2406                                         tw32_f(MAC_SERDES_CFG, val);
2407                                 }
2408
2409                                 tw32_f(SG_DIG_CTRL, 0x01388400);
2410                                 udelay(40);
2411
2412                                 /* Link parallel detection - link is up */
2413                                 /* only if we have PCS_SYNC and not */
2414                                 /* receiving config code words */
2415                                 mac_status = tr32(MAC_STATUS);
2416                                 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
2417                                     !(mac_status & MAC_STATUS_RCVD_CFG)) {
2418                                         tg3_setup_flow_control(tp, 0, 0);
2419                                         current_link_up = 1;
2420                                 }
2421                         }
2422                 }
2423         }
2424
2425 out:
2426         return current_link_up;
2427 }
2428
2429 static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
2430 {
2431         int current_link_up = 0;
2432
2433         if (!(mac_status & MAC_STATUS_PCS_SYNCED)) {
2434                 tp->tg3_flags &= ~TG3_FLAG_GOT_SERDES_FLOWCTL;
2435                 goto out;
2436         }
2437
2438         if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2439                 u32 flags;
2440                 int i;
2441   
2442                 if (fiber_autoneg(tp, &flags)) {
2443                         u32 local_adv, remote_adv;
2444
2445                         local_adv = ADVERTISE_PAUSE_CAP;
2446                         remote_adv = 0;
2447                         if (flags & MR_LP_ADV_SYM_PAUSE)
2448                                 remote_adv |= LPA_PAUSE_CAP;
2449                         if (flags & MR_LP_ADV_ASYM_PAUSE)
2450                                 remote_adv |= LPA_PAUSE_ASYM;
2451
2452                         tg3_setup_flow_control(tp, local_adv, remote_adv);
2453
2454                         tp->tg3_flags |= TG3_FLAG_GOT_SERDES_FLOWCTL;
2455                         current_link_up = 1;
2456                 }
2457                 for (i = 0; i < 30; i++) {
2458                         udelay(20);
2459                         tw32_f(MAC_STATUS,
2460                                (MAC_STATUS_SYNC_CHANGED |
2461                                 MAC_STATUS_CFG_CHANGED));
2462                         udelay(40);
2463                         if ((tr32(MAC_STATUS) &
2464                              (MAC_STATUS_SYNC_CHANGED |
2465                               MAC_STATUS_CFG_CHANGED)) == 0)
2466                                 break;
2467                 }
2468
2469                 mac_status = tr32(MAC_STATUS);
2470                 if (current_link_up == 0 &&
2471                     (mac_status & MAC_STATUS_PCS_SYNCED) &&
2472                     !(mac_status & MAC_STATUS_RCVD_CFG))
2473                         current_link_up = 1;
2474         } else {
2475                 /* Forcing 1000FD link up. */
2476                 current_link_up = 1;
2477                 tp->tg3_flags |= TG3_FLAG_GOT_SERDES_FLOWCTL;
2478
2479                 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
2480                 udelay(40);
2481         }
2482
2483 out:
2484         return current_link_up;
2485 }
2486
2487 static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
2488 {
2489         u32 orig_pause_cfg;
2490         u16 orig_active_speed;
2491         u8 orig_active_duplex;
2492         u32 mac_status;
2493         int current_link_up;
2494         int i;
2495
2496         orig_pause_cfg =
2497                 (tp->tg3_flags & (TG3_FLAG_RX_PAUSE |
2498                                   TG3_FLAG_TX_PAUSE));
2499         orig_active_speed = tp->link_config.active_speed;
2500         orig_active_duplex = tp->link_config.active_duplex;
2501
2502         if (!(tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG) &&
2503             netif_carrier_ok(tp->dev) &&
2504             (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE)) {
2505                 mac_status = tr32(MAC_STATUS);
2506                 mac_status &= (MAC_STATUS_PCS_SYNCED |
2507                                MAC_STATUS_SIGNAL_DET |
2508                                MAC_STATUS_CFG_CHANGED |
2509                                MAC_STATUS_RCVD_CFG);
2510                 if (mac_status == (MAC_STATUS_PCS_SYNCED |
2511                                    MAC_STATUS_SIGNAL_DET)) {
2512                         tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
2513                                             MAC_STATUS_CFG_CHANGED));
2514                         return 0;
2515                 }
2516         }
2517
2518         tw32_f(MAC_TX_AUTO_NEG, 0);
2519
2520         tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
2521         tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
2522         tw32_f(MAC_MODE, tp->mac_mode);
2523         udelay(40);
2524
2525         if (tp->phy_id == PHY_ID_BCM8002)
2526                 tg3_init_bcm8002(tp);
2527
2528         /* Enable link change event even when serdes polling.  */
2529         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2530         udelay(40);
2531
2532         current_link_up = 0;
2533         mac_status = tr32(MAC_STATUS);
2534
2535         if (tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG)
2536                 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
2537         else
2538                 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
2539
2540         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
2541         tw32_f(MAC_MODE, tp->mac_mode);
2542         udelay(40);
2543
2544         tp->hw_status->status =
2545                 (SD_STATUS_UPDATED |
2546                  (tp->hw_status->status & ~SD_STATUS_LINK_CHG));
2547
2548         for (i = 0; i < 100; i++) {
2549                 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
2550                                     MAC_STATUS_CFG_CHANGED));
2551                 udelay(5);
2552                 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
2553                                          MAC_STATUS_CFG_CHANGED)) == 0)
2554                         break;
2555         }
2556
2557         mac_status = tr32(MAC_STATUS);
2558         if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
2559                 current_link_up = 0;
2560                 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2561                         tw32_f(MAC_MODE, (tp->mac_mode |
2562                                           MAC_MODE_SEND_CONFIGS));
2563                         udelay(1);
2564                         tw32_f(MAC_MODE, tp->mac_mode);
2565                 }
2566         }
2567
2568         if (current_link_up == 1) {
2569                 tp->link_config.active_speed = SPEED_1000;
2570                 tp->link_config.active_duplex = DUPLEX_FULL;
2571                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
2572                                     LED_CTRL_LNKLED_OVERRIDE |
2573                                     LED_CTRL_1000MBPS_ON));
2574         } else {
2575                 tp->link_config.active_speed = SPEED_INVALID;
2576                 tp->link_config.active_duplex = DUPLEX_INVALID;
2577                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
2578                                     LED_CTRL_LNKLED_OVERRIDE |
2579                                     LED_CTRL_TRAFFIC_OVERRIDE));
2580         }
2581
2582         if (current_link_up != netif_carrier_ok(tp->dev)) {
2583                 if (current_link_up)
2584                         netif_carrier_on(tp->dev);
2585                 else
2586                         netif_carrier_off(tp->dev);
2587                 tg3_link_report(tp);
2588         } else {
2589                 u32 now_pause_cfg =
2590                         tp->tg3_flags & (TG3_FLAG_RX_PAUSE |
2591                                          TG3_FLAG_TX_PAUSE);
2592                 if (orig_pause_cfg != now_pause_cfg ||
2593                     orig_active_speed != tp->link_config.active_speed ||
2594                     orig_active_duplex != tp->link_config.active_duplex)
2595                         tg3_link_report(tp);
2596         }
2597
2598         return 0;
2599 }
2600
2601 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
2602 {
2603         int current_link_up, err = 0;
2604         u32 bmsr, bmcr;
2605         u16 current_speed;
2606         u8 current_duplex;
2607
2608         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
2609         tw32_f(MAC_MODE, tp->mac_mode);
2610         udelay(40);
2611
2612         tw32(MAC_EVENT, 0);
2613
2614         tw32_f(MAC_STATUS,
2615              (MAC_STATUS_SYNC_CHANGED |
2616               MAC_STATUS_CFG_CHANGED |
2617               MAC_STATUS_MI_COMPLETION |
2618               MAC_STATUS_LNKSTATE_CHANGED));
2619         udelay(40);
2620
2621         if (force_reset)
2622                 tg3_phy_reset(tp);
2623
2624         current_link_up = 0;
2625         current_speed = SPEED_INVALID;
2626         current_duplex = DUPLEX_INVALID;
2627
2628         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
2629         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
2630
2631         err |= tg3_readphy(tp, MII_BMCR, &bmcr);
2632
2633         if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
2634             (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT)) {
2635                 /* do nothing, just check for link up at the end */
2636         } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2637                 u32 adv, new_adv;
2638
2639                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
2640                 new_adv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
2641                                   ADVERTISE_1000XPAUSE |
2642                                   ADVERTISE_1000XPSE_ASYM |
2643                                   ADVERTISE_SLCT);
2644
2645                 /* Always advertise symmetric PAUSE just like copper */
2646                 new_adv |= ADVERTISE_1000XPAUSE;
2647
2648                 if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
2649                         new_adv |= ADVERTISE_1000XHALF;
2650                 if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
2651                         new_adv |= ADVERTISE_1000XFULL;
2652
2653                 if ((new_adv != adv) || !(bmcr & BMCR_ANENABLE)) {
2654                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
2655                         bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
2656                         tg3_writephy(tp, MII_BMCR, bmcr);
2657
2658                         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2659                         tp->tg3_flags2 |= TG3_FLG2_PHY_JUST_INITTED;
2660                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2661
2662                         return err;
2663                 }
2664         } else {
2665                 u32 new_bmcr;
2666
2667                 bmcr &= ~BMCR_SPEED1000;
2668                 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
2669
2670                 if (tp->link_config.duplex == DUPLEX_FULL)
2671                         new_bmcr |= BMCR_FULLDPLX;
2672
2673                 if (new_bmcr != bmcr) {
2674                         /* BMCR_SPEED1000 is a reserved bit that needs
2675                          * to be set on write.
2676                          */
2677                         new_bmcr |= BMCR_SPEED1000;
2678
2679                         /* Force a linkdown */
2680                         if (netif_carrier_ok(tp->dev)) {
2681                                 u32 adv;
2682
2683                                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
2684                                 adv &= ~(ADVERTISE_1000XFULL |
2685                                          ADVERTISE_1000XHALF |
2686                                          ADVERTISE_SLCT);
2687                                 tg3_writephy(tp, MII_ADVERTISE, adv);
2688                                 tg3_writephy(tp, MII_BMCR, bmcr |
2689                                                            BMCR_ANRESTART |
2690                                                            BMCR_ANENABLE);
2691                                 udelay(10);
2692                                 netif_carrier_off(tp->dev);
2693                         }
2694                         tg3_writephy(tp, MII_BMCR, new_bmcr);
2695                         bmcr = new_bmcr;
2696                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
2697                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
2698                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2699                 }
2700         }
2701
2702         if (bmsr & BMSR_LSTATUS) {
2703                 current_speed = SPEED_1000;
2704                 current_link_up = 1;
2705                 if (bmcr & BMCR_FULLDPLX)
2706                         current_duplex = DUPLEX_FULL;
2707                 else
2708                         current_duplex = DUPLEX_HALF;
2709
2710                 if (bmcr & BMCR_ANENABLE) {
2711                         u32 local_adv, remote_adv, common;
2712
2713                         err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
2714                         err |= tg3_readphy(tp, MII_LPA, &remote_adv);
2715                         common = local_adv & remote_adv;
2716                         if (common & (ADVERTISE_1000XHALF |
2717                                       ADVERTISE_1000XFULL)) {
2718                                 if (common & ADVERTISE_1000XFULL)
2719                                         current_duplex = DUPLEX_FULL;
2720                                 else
2721                                         current_duplex = DUPLEX_HALF;
2722
2723                                 tg3_setup_flow_control(tp, local_adv,
2724                                                        remote_adv);
2725                         }
2726                         else
2727                                 current_link_up = 0;
2728                 }
2729         }
2730
2731         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
2732         if (tp->link_config.active_duplex == DUPLEX_HALF)
2733                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
2734
2735         tw32_f(MAC_MODE, tp->mac_mode);
2736         udelay(40);
2737
2738         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2739
2740         tp->link_config.active_speed = current_speed;
2741         tp->link_config.active_duplex = current_duplex;
2742
2743         if (current_link_up != netif_carrier_ok(tp->dev)) {
2744                 if (current_link_up)
2745                         netif_carrier_on(tp->dev);
2746                 else {
2747                         netif_carrier_off(tp->dev);
2748                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2749                 }
2750                 tg3_link_report(tp);
2751         }
2752         return err;
2753 }
2754
2755 static void tg3_serdes_parallel_detect(struct tg3 *tp)
2756 {
2757         if (tp->tg3_flags2 & TG3_FLG2_PHY_JUST_INITTED) {
2758                 /* Give autoneg time to complete. */
2759                 tp->tg3_flags2 &= ~TG3_FLG2_PHY_JUST_INITTED;
2760                 return;
2761         }
2762         if (!netif_carrier_ok(tp->dev) &&
2763             (tp->link_config.autoneg == AUTONEG_ENABLE)) {
2764                 u32 bmcr;
2765
2766                 tg3_readphy(tp, MII_BMCR, &bmcr);
2767                 if (bmcr & BMCR_ANENABLE) {
2768                         u32 phy1, phy2;
2769
2770                         /* Select shadow register 0x1f */
2771                         tg3_writephy(tp, 0x1c, 0x7c00);
2772                         tg3_readphy(tp, 0x1c, &phy1);
2773
2774                         /* Select expansion interrupt status register */
2775                         tg3_writephy(tp, 0x17, 0x0f01);
2776                         tg3_readphy(tp, 0x15, &phy2);
2777                         tg3_readphy(tp, 0x15, &phy2);
2778
2779                         if ((phy1 & 0x10) && !(phy2 & 0x20)) {
2780                                 /* We have signal detect and not receiving
2781                                  * config code words, link is up by parallel
2782                                  * detection.
2783                                  */
2784
2785                                 bmcr &= ~BMCR_ANENABLE;
2786                                 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
2787                                 tg3_writephy(tp, MII_BMCR, bmcr);
2788                                 tp->tg3_flags2 |= TG3_FLG2_PARALLEL_DETECT;
2789                         }
2790                 }
2791         }
2792         else if (netif_carrier_ok(tp->dev) &&
2793                  (tp->link_config.autoneg == AUTONEG_ENABLE) &&
2794                  (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT)) {
2795                 u32 phy2;
2796
2797                 /* Select expansion interrupt status register */
2798                 tg3_writephy(tp, 0x17, 0x0f01);
2799                 tg3_readphy(tp, 0x15, &phy2);
2800                 if (phy2 & 0x20) {
2801                         u32 bmcr;
2802
2803                         /* Config code words received, turn on autoneg. */
2804                         tg3_readphy(tp, MII_BMCR, &bmcr);
2805                         tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
2806
2807                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2808
2809                 }
2810         }
2811 }
2812
2813 static int tg3_setup_phy(struct tg3 *tp, int force_reset)
2814 {
2815         int err;
2816
2817         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
2818                 err = tg3_setup_fiber_phy(tp, force_reset);
2819         } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
2820                 err = tg3_setup_fiber_mii_phy(tp, force_reset);
2821         } else {
2822                 err = tg3_setup_copper_phy(tp, force_reset);
2823         }
2824
2825         if (tp->link_config.active_speed == SPEED_1000 &&
2826             tp->link_config.active_duplex == DUPLEX_HALF)
2827                 tw32(MAC_TX_LENGTHS,
2828                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2829                       (6 << TX_LENGTHS_IPG_SHIFT) |
2830                       (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
2831         else
2832                 tw32(MAC_TX_LENGTHS,
2833                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2834                       (6 << TX_LENGTHS_IPG_SHIFT) |
2835                       (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
2836
2837         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
2838                 if (netif_carrier_ok(tp->dev)) {
2839                         tw32(HOSTCC_STAT_COAL_TICKS,
2840                              tp->coal.stats_block_coalesce_usecs);
2841                 } else {
2842                         tw32(HOSTCC_STAT_COAL_TICKS, 0);
2843                 }
2844         }
2845
2846         return err;
2847 }
2848
2849 /* Tigon3 never reports partial packet sends.  So we do not
2850  * need special logic to handle SKBs that have not had all
2851  * of their frags sent yet, like SunGEM does.
2852  */
2853 static void tg3_tx(struct tg3 *tp)
2854 {
2855         u32 hw_idx = tp->hw_status->idx[0].tx_consumer;
2856         u32 sw_idx = tp->tx_cons;
2857
2858         while (sw_idx != hw_idx) {
2859                 struct tx_ring_info *ri = &tp->tx_buffers[sw_idx];
2860                 struct sk_buff *skb = ri->skb;
2861                 int i;
2862
2863                 if (unlikely(skb == NULL))
2864                         BUG();
2865
2866                 pci_unmap_single(tp->pdev,
2867                                  pci_unmap_addr(ri, mapping),
2868                                  skb_headlen(skb),
2869                                  PCI_DMA_TODEVICE);
2870
2871                 ri->skb = NULL;
2872
2873                 sw_idx = NEXT_TX(sw_idx);
2874
2875                 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2876                         if (unlikely(sw_idx == hw_idx))
2877                                 BUG();
2878
2879                         ri = &tp->tx_buffers[sw_idx];
2880                         if (unlikely(ri->skb != NULL))
2881                                 BUG();
2882
2883                         pci_unmap_page(tp->pdev,
2884                                        pci_unmap_addr(ri, mapping),
2885                                        skb_shinfo(skb)->frags[i].size,
2886                                        PCI_DMA_TODEVICE);
2887
2888                         sw_idx = NEXT_TX(sw_idx);
2889                 }
2890
2891                 dev_kfree_skb(skb);
2892         }
2893
2894         tp->tx_cons = sw_idx;
2895
2896         if (unlikely(netif_queue_stopped(tp->dev))) {
2897                 spin_lock(&tp->tx_lock);
2898                 if (netif_queue_stopped(tp->dev) &&
2899                     (TX_BUFFS_AVAIL(tp) > TG3_TX_WAKEUP_THRESH))
2900                         netif_wake_queue(tp->dev);
2901                 spin_unlock(&tp->tx_lock);
2902         }
2903 }
2904
2905 /* Returns size of skb allocated or < 0 on error.
2906  *
2907  * We only need to fill in the address because the other members
2908  * of the RX descriptor are invariant, see tg3_init_rings.
2909  *
2910  * Note the purposeful assymetry of cpu vs. chip accesses.  For
2911  * posting buffers we only dirty the first cache line of the RX
2912  * descriptor (containing the address).  Whereas for the RX status
2913  * buffers the cpu only reads the last cacheline of the RX descriptor
2914  * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
2915  */
2916 static int tg3_alloc_rx_skb(struct tg3 *tp, u32 opaque_key,
2917                             int src_idx, u32 dest_idx_unmasked)
2918 {
2919         struct tg3_rx_buffer_desc *desc;
2920         struct ring_info *map, *src_map;
2921         struct sk_buff *skb;
2922         dma_addr_t mapping;
2923         int skb_size, dest_idx;
2924
2925         src_map = NULL;
2926         switch (opaque_key) {
2927         case RXD_OPAQUE_RING_STD:
2928                 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
2929                 desc = &tp->rx_std[dest_idx];
2930                 map = &tp->rx_std_buffers[dest_idx];
2931                 if (src_idx >= 0)
2932                         src_map = &tp->rx_std_buffers[src_idx];
2933                 skb_size = tp->rx_pkt_buf_sz;
2934                 break;
2935
2936         case RXD_OPAQUE_RING_JUMBO:
2937                 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
2938                 desc = &tp->rx_jumbo[dest_idx];
2939                 map = &tp->rx_jumbo_buffers[dest_idx];
2940                 if (src_idx >= 0)
2941                         src_map = &tp->rx_jumbo_buffers[src_idx];
2942                 skb_size = RX_JUMBO_PKT_BUF_SZ;
2943                 break;
2944
2945         default:
2946                 return -EINVAL;
2947         };
2948
2949         /* Do not overwrite any of the map or rp information
2950          * until we are sure we can commit to a new buffer.
2951          *
2952          * Callers depend upon this behavior and assume that
2953          * we leave everything unchanged if we fail.
2954          */
2955         skb = dev_alloc_skb(skb_size);
2956         if (skb == NULL)
2957                 return -ENOMEM;
2958
2959         skb->dev = tp->dev;
2960         skb_reserve(skb, tp->rx_offset);
2961
2962         mapping = pci_map_single(tp->pdev, skb->data,
2963                                  skb_size - tp->rx_offset,
2964                                  PCI_DMA_FROMDEVICE);
2965
2966         map->skb = skb;
2967         pci_unmap_addr_set(map, mapping, mapping);
2968
2969         if (src_map != NULL)
2970                 src_map->skb = NULL;
2971
2972         desc->addr_hi = ((u64)mapping >> 32);
2973         desc->addr_lo = ((u64)mapping & 0xffffffff);
2974
2975         return skb_size;
2976 }
2977
2978 /* We only need to move over in the address because the other
2979  * members of the RX descriptor are invariant.  See notes above
2980  * tg3_alloc_rx_skb for full details.
2981  */
2982 static void tg3_recycle_rx(struct tg3 *tp, u32 opaque_key,
2983                            int src_idx, u32 dest_idx_unmasked)
2984 {
2985         struct tg3_rx_buffer_desc *src_desc, *dest_desc;
2986         struct ring_info *src_map, *dest_map;
2987         int dest_idx;
2988
2989         switch (opaque_key) {
2990         case RXD_OPAQUE_RING_STD:
2991                 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
2992                 dest_desc = &tp->rx_std[dest_idx];
2993                 dest_map = &tp->rx_std_buffers[dest_idx];
2994                 src_desc = &tp->rx_std[src_idx];
2995                 src_map = &tp->rx_std_buffers[src_idx];
2996                 break;
2997
2998         case RXD_OPAQUE_RING_JUMBO:
2999                 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
3000                 dest_desc = &tp->rx_jumbo[dest_idx];
3001                 dest_map = &tp->rx_jumbo_buffers[dest_idx];
3002                 src_desc = &tp->rx_jumbo[src_idx];
3003                 src_map = &tp->rx_jumbo_buffers[src_idx];
3004                 break;
3005
3006         default:
3007                 return;
3008         };
3009
3010         dest_map->skb = src_map->skb;
3011         pci_unmap_addr_set(dest_map, mapping,
3012                            pci_unmap_addr(src_map, mapping));
3013         dest_desc->addr_hi = src_desc->addr_hi;
3014         dest_desc->addr_lo = src_desc->addr_lo;
3015
3016         src_map->skb = NULL;
3017 }
3018
3019 #if TG3_VLAN_TAG_USED
3020 static int tg3_vlan_rx(struct tg3 *tp, struct sk_buff *skb, u16 vlan_tag)
3021 {
3022         return vlan_hwaccel_receive_skb(skb, tp->vlgrp, vlan_tag);
3023 }
3024 #endif
3025
3026 /* The RX ring scheme is composed of multiple rings which post fresh
3027  * buffers to the chip, and one special ring the chip uses to report
3028  * status back to the host.
3029  *
3030  * The special ring reports the status of received packets to the
3031  * host.  The chip does not write into the original descriptor the
3032  * RX buffer was obtained from.  The chip simply takes the original
3033  * descriptor as provided by the host, updates the status and length
3034  * field, then writes this into the next status ring entry.
3035  *
3036  * Each ring the host uses to post buffers to the chip is described
3037  * by a TG3_BDINFO entry in the chips SRAM area.  When a packet arrives,
3038  * it is first placed into the on-chip ram.  When the packet's length
3039  * is known, it walks down the TG3_BDINFO entries to select the ring.
3040  * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
3041  * which is within the range of the new packet's length is chosen.
3042  *
3043  * The "separate ring for rx status" scheme may sound queer, but it makes
3044  * sense from a cache coherency perspective.  If only the host writes
3045  * to the buffer post rings, and only the chip writes to the rx status
3046  * rings, then cache lines never move beyond shared-modified state.
3047  * If both the host and chip were to write into the same ring, cache line
3048  * eviction could occur since both entities want it in an exclusive state.
3049  */
3050 static int tg3_rx(struct tg3 *tp, int budget)
3051 {
3052         u32 work_mask;
3053         u32 sw_idx = tp->rx_rcb_ptr;
3054         u16 hw_idx;
3055         int received;
3056
3057         hw_idx = tp->hw_status->idx[0].rx_producer;
3058         /*
3059          * We need to order the read of hw_idx and the read of
3060          * the opaque cookie.
3061          */
3062         rmb();
3063         work_mask = 0;
3064         received = 0;
3065         while (sw_idx != hw_idx && budget > 0) {
3066                 struct tg3_rx_buffer_desc *desc = &tp->rx_rcb[sw_idx];
3067                 unsigned int len;
3068                 struct sk_buff *skb;
3069                 dma_addr_t dma_addr;
3070                 u32 opaque_key, desc_idx, *post_ptr;
3071
3072                 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
3073                 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
3074                 if (opaque_key == RXD_OPAQUE_RING_STD) {
3075                         dma_addr = pci_unmap_addr(&tp->rx_std_buffers[desc_idx],
3076                                                   mapping);
3077                         skb = tp->rx_std_buffers[desc_idx].skb;
3078                         post_ptr = &tp->rx_std_ptr;
3079                 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
3080                         dma_addr = pci_unmap_addr(&tp->rx_jumbo_buffers[desc_idx],
3081                                                   mapping);
3082                         skb = tp->rx_jumbo_buffers[desc_idx].skb;
3083                         post_ptr = &tp->rx_jumbo_ptr;
3084                 }
3085                 else {
3086                         goto next_pkt_nopost;
3087                 }
3088
3089                 work_mask |= opaque_key;
3090
3091                 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
3092                     (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
3093                 drop_it:
3094                         tg3_recycle_rx(tp, opaque_key,
3095                                        desc_idx, *post_ptr);
3096                 drop_it_no_recycle:
3097                         /* Other statistics kept track of by card. */
3098                         tp->net_stats.rx_dropped++;
3099                         goto next_pkt;
3100                 }
3101
3102                 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4; /* omit crc */
3103
3104                 if (len > RX_COPY_THRESHOLD 
3105                         && tp->rx_offset == 2
3106                         /* rx_offset != 2 iff this is a 5701 card running
3107                          * in PCI-X mode [see tg3_get_invariants()] */
3108                 ) {
3109                         int skb_size;
3110
3111                         skb_size = tg3_alloc_rx_skb(tp, opaque_key,
3112                                                     desc_idx, *post_ptr);
3113                         if (skb_size < 0)
3114                                 goto drop_it;
3115
3116                         pci_unmap_single(tp->pdev, dma_addr,
3117                                          skb_size - tp->rx_offset,
3118                                          PCI_DMA_FROMDEVICE);
3119
3120                         skb_put(skb, len);
3121                 } else {
3122                         struct sk_buff *copy_skb;
3123
3124                         tg3_recycle_rx(tp, opaque_key,
3125                                        desc_idx, *post_ptr);
3126
3127                         copy_skb = dev_alloc_skb(len + 2);
3128                         if (copy_skb == NULL)
3129                                 goto drop_it_no_recycle;
3130
3131                         copy_skb->dev = tp->dev;
3132                         skb_reserve(copy_skb, 2);
3133                         skb_put(copy_skb, len);
3134                         pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
3135                         memcpy(copy_skb->data, skb->data, len);
3136                         pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
3137
3138                         /* We'll reuse the original ring buffer. */
3139                         skb = copy_skb;
3140                 }
3141
3142                 if ((tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) &&
3143                     (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
3144                     (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
3145                       >> RXD_TCPCSUM_SHIFT) == 0xffff))
3146                         skb->ip_summed = CHECKSUM_UNNECESSARY;
3147                 else
3148                         skb->ip_summed = CHECKSUM_NONE;
3149
3150                 skb->protocol = eth_type_trans(skb, tp->dev);
3151 #if TG3_VLAN_TAG_USED
3152                 if (tp->vlgrp != NULL &&
3153                     desc->type_flags & RXD_FLAG_VLAN) {
3154                         tg3_vlan_rx(tp, skb,
3155                                     desc->err_vlan & RXD_VLAN_MASK);
3156                 } else
3157 #endif
3158                         netif_receive_skb(skb);
3159
3160                 tp->dev->last_rx = jiffies;
3161                 received++;
3162                 budget--;
3163
3164 next_pkt:
3165                 (*post_ptr)++;
3166 next_pkt_nopost:
3167                 sw_idx++;
3168                 sw_idx %= TG3_RX_RCB_RING_SIZE(tp);
3169
3170                 /* Refresh hw_idx to see if there is new work */
3171                 if (sw_idx == hw_idx) {
3172                         hw_idx = tp->hw_status->idx[0].rx_producer;
3173                         rmb();
3174                 }
3175         }
3176
3177         /* ACK the status ring. */
3178         tp->rx_rcb_ptr = sw_idx;
3179         tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW, sw_idx);
3180
3181         /* Refill RX ring(s). */
3182         if (work_mask & RXD_OPAQUE_RING_STD) {
3183                 sw_idx = tp->rx_std_ptr % TG3_RX_RING_SIZE;
3184                 tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
3185                              sw_idx);
3186         }
3187         if (work_mask & RXD_OPAQUE_RING_JUMBO) {
3188                 sw_idx = tp->rx_jumbo_ptr % TG3_RX_JUMBO_RING_SIZE;
3189                 tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
3190                              sw_idx);
3191         }
3192         mmiowb();
3193
3194         return received;
3195 }
3196
3197 static int tg3_poll(struct net_device *netdev, int *budget)
3198 {
3199         struct tg3 *tp = netdev_priv(netdev);
3200         struct tg3_hw_status *sblk = tp->hw_status;
3201         int done;
3202
3203         /* handle link change and other phy events */
3204         if (!(tp->tg3_flags &
3205               (TG3_FLAG_USE_LINKCHG_REG |
3206                TG3_FLAG_POLL_SERDES))) {
3207                 if (sblk->status & SD_STATUS_LINK_CHG) {
3208                         sblk->status = SD_STATUS_UPDATED |
3209                                 (sblk->status & ~SD_STATUS_LINK_CHG);
3210                         spin_lock(&tp->lock);
3211                         tg3_setup_phy(tp, 0);
3212                         spin_unlock(&tp->lock);
3213                 }
3214         }
3215
3216         /* run TX completion thread */
3217         if (sblk->idx[0].tx_consumer != tp->tx_cons) {
3218                 tg3_tx(tp);
3219         }
3220
3221         /* run RX thread, within the bounds set by NAPI.
3222          * All RX "locking" is done by ensuring outside
3223          * code synchronizes with dev->poll()
3224          */
3225         if (sblk->idx[0].rx_producer != tp->rx_rcb_ptr) {
3226                 int orig_budget = *budget;
3227                 int work_done;
3228
3229                 if (orig_budget > netdev->quota)
3230                         orig_budget = netdev->quota;
3231
3232                 work_done = tg3_rx(tp, orig_budget);
3233
3234                 *budget -= work_done;
3235                 netdev->quota -= work_done;
3236         }
3237
3238         if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) {
3239                 tp->last_tag = sblk->status_tag;
3240                 rmb();
3241         } else
3242                 sblk->status &= ~SD_STATUS_UPDATED;
3243
3244         /* if no more work, tell net stack and NIC we're done */
3245         done = !tg3_has_work(tp);
3246         if (done) {
3247                 netif_rx_complete(netdev);
3248                 tg3_restart_ints(tp);
3249         }
3250
3251         return (done ? 0 : 1);
3252 }
3253
3254 static void tg3_irq_quiesce(struct tg3 *tp)
3255 {
3256         BUG_ON(tp->irq_sync);
3257
3258         tp->irq_sync = 1;
3259         smp_mb();
3260
3261         synchronize_irq(tp->pdev->irq);
3262 }
3263
3264 static inline int tg3_irq_sync(struct tg3 *tp)
3265 {
3266         return tp->irq_sync;
3267 }
3268
3269 /* Fully shutdown all tg3 driver activity elsewhere in the system.
3270  * If irq_sync is non-zero, then the IRQ handler must be synchronized
3271  * with as well.  Most of the time, this is not necessary except when
3272  * shutting down the device.
3273  */
3274 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
3275 {
3276         if (irq_sync)
3277                 tg3_irq_quiesce(tp);
3278         spin_lock_bh(&tp->lock);
3279         spin_lock(&tp->tx_lock);
3280 }
3281
3282 static inline void tg3_full_unlock(struct tg3 *tp)
3283 {
3284         spin_unlock(&tp->tx_lock);
3285         spin_unlock_bh(&tp->lock);
3286 }
3287
3288 /* MSI ISR - No need to check for interrupt sharing and no need to
3289  * flush status block and interrupt mailbox. PCI ordering rules
3290  * guarantee that MSI will arrive after the status block.
3291  */
3292 static irqreturn_t tg3_msi(int irq, void *dev_id, struct pt_regs *regs)
3293 {
3294         struct net_device *dev = dev_id;
3295         struct tg3 *tp = netdev_priv(dev);
3296
3297         prefetch(tp->hw_status);
3298         prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
3299         /*
3300          * Writing any value to intr-mbox-0 clears PCI INTA# and
3301          * chip-internal interrupt pending events.
3302          * Writing non-zero to intr-mbox-0 additional tells the
3303          * NIC to stop sending us irqs, engaging "in-intr-handler"
3304          * event coalescing.
3305          */
3306         tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
3307         if (likely(!tg3_irq_sync(tp)))
3308                 netif_rx_schedule(dev);         /* schedule NAPI poll */
3309
3310         return IRQ_RETVAL(1);
3311 }
3312
3313 static irqreturn_t tg3_interrupt(int irq, void *dev_id, struct pt_regs *regs)
3314 {
3315         struct net_device *dev = dev_id;
3316         struct tg3 *tp = netdev_priv(dev);
3317         struct tg3_hw_status *sblk = tp->hw_status;
3318         unsigned int handled = 1;
3319
3320         /* In INTx mode, it is possible for the interrupt to arrive at
3321          * the CPU before the status block posted prior to the interrupt.
3322          * Reading the PCI State register will confirm whether the
3323          * interrupt is ours and will flush the status block.
3324          */
3325         if ((sblk->status & SD_STATUS_UPDATED) ||
3326             !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
3327                 /*
3328                  * Writing any value to intr-mbox-0 clears PCI INTA# and
3329                  * chip-internal interrupt pending events.
3330                  * Writing non-zero to intr-mbox-0 additional tells the
3331                  * NIC to stop sending us irqs, engaging "in-intr-handler"
3332                  * event coalescing.
3333                  */
3334                 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
3335                              0x00000001);
3336                 if (tg3_irq_sync(tp))
3337                         goto out;
3338                 sblk->status &= ~SD_STATUS_UPDATED;
3339                 if (likely(tg3_has_work(tp))) {
3340                         prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
3341                         netif_rx_schedule(dev);         /* schedule NAPI poll */
3342                 } else {
3343                         /* No work, shared interrupt perhaps?  re-enable
3344                          * interrupts, and flush that PCI write
3345                          */
3346                         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
3347                                 0x00000000);
3348                 }
3349         } else {        /* shared interrupt */
3350                 handled = 0;
3351         }
3352 out:
3353         return IRQ_RETVAL(handled);
3354 }
3355
3356 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id, struct pt_regs *regs)
3357 {
3358         struct net_device *dev = dev_id;
3359         struct tg3 *tp = netdev_priv(dev);
3360         struct tg3_hw_status *sblk = tp->hw_status;
3361         unsigned int handled = 1;
3362
3363         /* In INTx mode, it is possible for the interrupt to arrive at
3364          * the CPU before the status block posted prior to the interrupt.
3365          * Reading the PCI State register will confirm whether the
3366          * interrupt is ours and will flush the status block.
3367          */
3368         if ((sblk->status_tag != tp->last_tag) ||
3369             !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
3370                 /*
3371                  * writing any value to intr-mbox-0 clears PCI INTA# and
3372                  * chip-internal interrupt pending events.
3373                  * writing non-zero to intr-mbox-0 additional tells the
3374                  * NIC to stop sending us irqs, engaging "in-intr-handler"
3375                  * event coalescing.
3376                  */
3377                 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
3378                              0x00000001);
3379                 if (tg3_irq_sync(tp))
3380                         goto out;
3381                 if (netif_rx_schedule_prep(dev)) {
3382                         prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
3383                         /* Update last_tag to mark that this status has been
3384                          * seen. Because interrupt may be shared, we may be
3385                          * racing with tg3_poll(), so only update last_tag
3386                          * if tg3_poll() is not scheduled.
3387                          */
3388                         tp->last_tag = sblk->status_tag;
3389                         __netif_rx_schedule(dev);
3390                 }
3391         } else {        /* shared interrupt */
3392                 handled = 0;
3393         }
3394 out:
3395         return IRQ_RETVAL(handled);
3396 }
3397
3398 /* ISR for interrupt test */
3399 static irqreturn_t tg3_test_isr(int irq, void *dev_id,
3400                 struct pt_regs *regs)
3401 {
3402         struct net_device *dev = dev_id;
3403         struct tg3 *tp = netdev_priv(dev);
3404         struct tg3_hw_status *sblk = tp->hw_status;
3405
3406         if ((sblk->status & SD_STATUS_UPDATED) ||
3407             !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
3408                 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
3409                              0x00000001);
3410                 return IRQ_RETVAL(1);
3411         }
3412         return IRQ_RETVAL(0);
3413 }
3414
3415 static int tg3_init_hw(struct tg3 *);
3416 static int tg3_halt(struct tg3 *, int, int);
3417
3418 #ifdef CONFIG_NET_POLL_CONTROLLER
3419 static void tg3_poll_controller(struct net_device *dev)
3420 {
3421         struct tg3 *tp = netdev_priv(dev);
3422
3423         tg3_interrupt(tp->pdev->irq, dev, NULL);
3424 }
3425 #endif
3426
3427 static void tg3_reset_task(void *_data)
3428 {
3429         struct tg3 *tp = _data;
3430         unsigned int restart_timer;
3431
3432         tg3_netif_stop(tp);
3433
3434         tg3_full_lock(tp, 1);
3435
3436         restart_timer = tp->tg3_flags2 & TG3_FLG2_RESTART_TIMER;
3437         tp->tg3_flags2 &= ~TG3_FLG2_RESTART_TIMER;
3438
3439         tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
3440         tg3_init_hw(tp);
3441
3442         tg3_netif_start(tp);
3443
3444         tg3_full_unlock(tp);
3445
3446         if (restart_timer)
3447                 mod_timer(&tp->timer, jiffies + 1);
3448 }
3449
3450 static void tg3_tx_timeout(struct net_device *dev)
3451 {
3452         struct tg3 *tp = netdev_priv(dev);
3453
3454         printk(KERN_ERR PFX "%s: transmit timed out, resetting\n",
3455                dev->name);
3456
3457         schedule_work(&tp->reset_task);
3458 }
3459
3460 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
3461 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
3462 {
3463         u32 base = (u32) mapping & 0xffffffff;
3464
3465         return ((base > 0xffffdcc0) &&
3466                 (base + len + 8 < base));
3467 }
3468
3469 static void tg3_set_txd(struct tg3 *, int, dma_addr_t, int, u32, u32);
3470
3471 static int tigon3_4gb_hwbug_workaround(struct tg3 *tp, struct sk_buff *skb,
3472                                        u32 last_plus_one, u32 *start,
3473                                        u32 base_flags, u32 mss)
3474 {
3475         struct sk_buff *new_skb = skb_copy(skb, GFP_ATOMIC);
3476         dma_addr_t new_addr = 0;
3477         u32 entry = *start;
3478         int i, ret = 0;
3479
3480         if (!new_skb) {
3481                 ret = -1;
3482         } else {
3483                 /* New SKB is guaranteed to be linear. */
3484                 entry = *start;
3485                 new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
3486                                           PCI_DMA_TODEVICE);
3487                 /* Make sure new skb does not cross any 4G boundaries.
3488                  * Drop the packet if it does.
3489                  */
3490                 if (tg3_4g_overflow_test(new_addr, new_skb->len)) {
3491                         ret = -1;
3492                         dev_kfree_skb(new_skb);
3493                         new_skb = NULL;
3494                 } else {
3495                         tg3_set_txd(tp, entry, new_addr, new_skb->len,
3496                                     base_flags, 1 | (mss << 1));
3497                         *start = NEXT_TX(entry);
3498                 }
3499         }
3500
3501         /* Now clean up the sw ring entries. */
3502         i = 0;
3503         while (entry != last_plus_one) {
3504                 int len;
3505
3506                 if (i == 0)
3507                         len = skb_headlen(skb);
3508                 else
3509                         len = skb_shinfo(skb)->frags[i-1].size;
3510                 pci_unmap_single(tp->pdev,
3511                                  pci_unmap_addr(&tp->tx_buffers[entry], mapping),
3512                                  len, PCI_DMA_TODEVICE);
3513                 if (i == 0) {
3514                         tp->tx_buffers[entry].skb = new_skb;
3515                         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, new_addr);
3516                 } else {
3517                         tp->tx_buffers[entry].skb = NULL;
3518                 }
3519                 entry = NEXT_TX(entry);
3520                 i++;
3521         }
3522
3523         dev_kfree_skb(skb);
3524
3525         return ret;
3526 }
3527
3528 static void tg3_set_txd(struct tg3 *tp, int entry,
3529                         dma_addr_t mapping, int len, u32 flags,
3530                         u32 mss_and_is_end)
3531 {
3532         struct tg3_tx_buffer_desc *txd = &tp->tx_ring[entry];
3533         int is_end = (mss_and_is_end & 0x1);
3534         u32 mss = (mss_and_is_end >> 1);
3535         u32 vlan_tag = 0;
3536
3537         if (is_end)
3538                 flags |= TXD_FLAG_END;
3539         if (flags & TXD_FLAG_VLAN) {
3540                 vlan_tag = flags >> 16;
3541                 flags &= 0xffff;
3542         }
3543         vlan_tag |= (mss << TXD_MSS_SHIFT);
3544
3545         txd->addr_hi = ((u64) mapping >> 32);
3546         txd->addr_lo = ((u64) mapping & 0xffffffff);
3547         txd->len_flags = (len << TXD_LEN_SHIFT) | flags;
3548         txd->vlan_tag = vlan_tag << TXD_VLAN_TAG_SHIFT;
3549 }
3550
3551 static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
3552 {
3553         struct tg3 *tp = netdev_priv(dev);
3554         dma_addr_t mapping;
3555         u32 len, entry, base_flags, mss;
3556         int would_hit_hwbug;
3557
3558         len = skb_headlen(skb);
3559
3560         /* No BH disabling for tx_lock here.  We are running in BH disabled
3561          * context and TX reclaim runs via tp->poll inside of a software
3562          * interrupt.  Furthermore, IRQ processing runs lockless so we have
3563          * no IRQ context deadlocks to worry about either.  Rejoice!
3564          */
3565         if (!spin_trylock(&tp->tx_lock))
3566                 return NETDEV_TX_LOCKED; 
3567
3568         if (unlikely(TX_BUFFS_AVAIL(tp) <= (skb_shinfo(skb)->nr_frags + 1))) {
3569                 if (!netif_queue_stopped(dev)) {
3570                         netif_stop_queue(dev);
3571
3572                         /* This is a hard error, log it. */
3573                         printk(KERN_ERR PFX "%s: BUG! Tx Ring full when "
3574                                "queue awake!\n", dev->name);
3575                 }
3576                 spin_unlock(&tp->tx_lock);
3577                 return NETDEV_TX_BUSY;
3578         }
3579
3580         entry = tp->tx_prod;
3581         base_flags = 0;
3582         if (skb->ip_summed == CHECKSUM_HW)
3583                 base_flags |= TXD_FLAG_TCPUDP_CSUM;
3584 #if TG3_TSO_SUPPORT != 0
3585         mss = 0;
3586         if (skb->len > (tp->dev->mtu + ETH_HLEN) &&
3587             (mss = skb_shinfo(skb)->tso_size) != 0) {
3588                 int tcp_opt_len, ip_tcp_len;
3589
3590                 if (skb_header_cloned(skb) &&
3591                     pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
3592                         dev_kfree_skb(skb);
3593                         goto out_unlock;
3594                 }
3595
3596                 tcp_opt_len = ((skb->h.th->doff - 5) * 4);
3597                 ip_tcp_len = (skb->nh.iph->ihl * 4) + sizeof(struct tcphdr);
3598
3599                 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
3600                                TXD_FLAG_CPU_POST_DMA);
3601
3602                 skb->nh.iph->check = 0;
3603                 skb->nh.iph->tot_len = ntohs(mss + ip_tcp_len + tcp_opt_len);
3604                 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
3605                         skb->h.th->check = 0;
3606                         base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
3607                 }
3608                 else {
3609                         skb->h.th->check =
3610                                 ~csum_tcpudp_magic(skb->nh.iph->saddr,
3611                                                    skb->nh.iph->daddr,
3612                                                    0, IPPROTO_TCP, 0);
3613                 }
3614
3615                 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO) ||
3616                     (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)) {
3617                         if (tcp_opt_len || skb->nh.iph->ihl > 5) {
3618                                 int tsflags;
3619
3620                                 tsflags = ((skb->nh.iph->ihl - 5) +
3621                                            (tcp_opt_len >> 2));
3622                                 mss |= (tsflags << 11);
3623                         }
3624                 } else {
3625                         if (tcp_opt_len || skb->nh.iph->ihl > 5) {
3626                                 int tsflags;
3627
3628                                 tsflags = ((skb->nh.iph->ihl - 5) +
3629                                            (tcp_opt_len >> 2));
3630                                 base_flags |= tsflags << 12;
3631                         }
3632                 }
3633         }
3634 #else
3635         mss = 0;
3636 #endif
3637 #if TG3_VLAN_TAG_USED
3638         if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
3639                 base_flags |= (TXD_FLAG_VLAN |
3640                                (vlan_tx_tag_get(skb) << 16));
3641 #endif
3642
3643         /* Queue skb data, a.k.a. the main skb fragment. */
3644         mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
3645
3646         tp->tx_buffers[entry].skb = skb;
3647         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
3648
3649         would_hit_hwbug = 0;
3650
3651         if (tg3_4g_overflow_test(mapping, len))
3652                 would_hit_hwbug = 1;
3653
3654         tg3_set_txd(tp, entry, mapping, len, base_flags,
3655                     (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
3656
3657         entry = NEXT_TX(entry);
3658
3659         /* Now loop through additional data fragments, and queue them. */
3660         if (skb_shinfo(skb)->nr_frags > 0) {
3661                 unsigned int i, last;
3662
3663                 last = skb_shinfo(skb)->nr_frags - 1;
3664                 for (i = 0; i <= last; i++) {
3665                         skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3666
3667                         len = frag->size;
3668                         mapping = pci_map_page(tp->pdev,
3669                                                frag->page,
3670                                                frag->page_offset,
3671                                                len, PCI_DMA_TODEVICE);
3672
3673                         tp->tx_buffers[entry].skb = NULL;
3674                         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
3675
3676                         if (tg3_4g_overflow_test(mapping, len))
3677                                 would_hit_hwbug = 1;
3678
3679                         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
3680                                 tg3_set_txd(tp, entry, mapping, len,
3681                                             base_flags, (i == last)|(mss << 1));
3682                         else
3683                                 tg3_set_txd(tp, entry, mapping, len,
3684                                             base_flags, (i == last));
3685
3686                         entry = NEXT_TX(entry);
3687                 }
3688         }
3689
3690         if (would_hit_hwbug) {
3691                 u32 last_plus_one = entry;
3692                 u32 start;
3693
3694                 start = entry - 1 - skb_shinfo(skb)->nr_frags;
3695                 start &= (TG3_TX_RING_SIZE - 1);
3696
3697                 /* If the workaround fails due to memory/mapping
3698                  * failure, silently drop this packet.
3699                  */
3700                 if (tigon3_4gb_hwbug_workaround(tp, skb, last_plus_one,
3701                                                 &start, base_flags, mss))
3702                         goto out_unlock;
3703
3704                 entry = start;
3705         }
3706
3707         /* Packets are ready, update Tx producer idx local and on card. */
3708         tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry);
3709
3710         tp->tx_prod = entry;
3711         if (TX_BUFFS_AVAIL(tp) <= (MAX_SKB_FRAGS + 1)) {
3712                 netif_stop_queue(dev);
3713                 if (TX_BUFFS_AVAIL(tp) > TG3_TX_WAKEUP_THRESH)
3714                         netif_wake_queue(tp->dev);
3715         }
3716
3717 out_unlock:
3718         mmiowb();
3719         spin_unlock(&tp->tx_lock);
3720
3721         dev->trans_start = jiffies;
3722
3723         return NETDEV_TX_OK;
3724 }
3725
3726 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
3727                                int new_mtu)
3728 {
3729         dev->mtu = new_mtu;
3730
3731         if (new_mtu > ETH_DATA_LEN) {
3732                 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
3733                         tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
3734                         ethtool_op_set_tso(dev, 0);
3735                 }
3736                 else
3737                         tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE;
3738         } else {
3739                 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
3740                         tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
3741                 tp->tg3_flags &= ~TG3_FLAG_JUMBO_RING_ENABLE;
3742         }
3743 }
3744
3745 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
3746 {
3747         struct tg3 *tp = netdev_priv(dev);
3748
3749         if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
3750                 return -EINVAL;
3751
3752         if (!netif_running(dev)) {
3753                 /* We'll just catch it later when the
3754                  * device is up'd.
3755                  */
3756                 tg3_set_mtu(dev, tp, new_mtu);
3757                 return 0;
3758         }
3759
3760         tg3_netif_stop(tp);
3761
3762         tg3_full_lock(tp, 1);
3763
3764         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
3765
3766         tg3_set_mtu(dev, tp, new_mtu);
3767
3768         tg3_init_hw(tp);
3769
3770         tg3_netif_start(tp);
3771
3772         tg3_full_unlock(tp);
3773
3774         return 0;
3775 }
3776
3777 /* Free up pending packets in all rx/tx rings.
3778  *
3779  * The chip has been shut down and the driver detached from
3780  * the networking, so no interrupts or new tx packets will
3781  * end up in the driver.  tp->{tx,}lock is not held and we are not
3782  * in an interrupt context and thus may sleep.
3783  */
3784 static void tg3_free_rings(struct tg3 *tp)
3785 {
3786         struct ring_info *rxp;
3787         int i;
3788
3789         for (i = 0; i < TG3_RX_RING_SIZE; i++) {
3790                 rxp = &tp->rx_std_buffers[i];
3791
3792                 if (rxp->skb == NULL)
3793                         continue;
3794                 pci_unmap_single(tp->pdev,
3795                                  pci_unmap_addr(rxp, mapping),
3796                                  tp->rx_pkt_buf_sz - tp->rx_offset,
3797                                  PCI_DMA_FROMDEVICE);
3798                 dev_kfree_skb_any(rxp->skb);
3799                 rxp->skb = NULL;
3800         }
3801
3802         for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
3803                 rxp = &tp->rx_jumbo_buffers[i];
3804
3805                 if (rxp->skb == NULL)
3806                         continue;
3807                 pci_unmap_single(tp->pdev,
3808                                  pci_unmap_addr(rxp, mapping),
3809                                  RX_JUMBO_PKT_BUF_SZ - tp->rx_offset,
3810                                  PCI_DMA_FROMDEVICE);
3811                 dev_kfree_skb_any(rxp->skb);
3812                 rxp->skb = NULL;
3813         }
3814
3815         for (i = 0; i < TG3_TX_RING_SIZE; ) {
3816                 struct tx_ring_info *txp;
3817                 struct sk_buff *skb;
3818                 int j;
3819
3820                 txp = &tp->tx_buffers[i];
3821                 skb = txp->skb;
3822
3823                 if (skb == NULL) {
3824                         i++;
3825                         continue;
3826                 }
3827
3828                 pci_unmap_single(tp->pdev,
3829                                  pci_unmap_addr(txp, mapping),
3830                                  skb_headlen(skb),
3831                                  PCI_DMA_TODEVICE);
3832                 txp->skb = NULL;
3833
3834                 i++;
3835
3836                 for (j = 0; j < skb_shinfo(skb)->nr_frags; j++) {
3837                         txp = &tp->tx_buffers[i & (TG3_TX_RING_SIZE - 1)];
3838                         pci_unmap_page(tp->pdev,
3839                                        pci_unmap_addr(txp, mapping),
3840                                        skb_shinfo(skb)->frags[j].size,
3841                                        PCI_DMA_TODEVICE);
3842                         i++;
3843                 }
3844
3845                 dev_kfree_skb_any(skb);
3846         }
3847 }
3848
3849 /* Initialize tx/rx rings for packet processing.
3850  *
3851  * The chip has been shut down and the driver detached from
3852  * the networking, so no interrupts or new tx packets will
3853  * end up in the driver.  tp->{tx,}lock are held and thus
3854  * we may not sleep.
3855  */
3856 static void tg3_init_rings(struct tg3 *tp)
3857 {
3858         u32 i;
3859
3860         /* Free up all the SKBs. */
3861         tg3_free_rings(tp);
3862
3863         /* Zero out all descriptors. */
3864         memset(tp->rx_std, 0, TG3_RX_RING_BYTES);
3865         memset(tp->rx_jumbo, 0, TG3_RX_JUMBO_RING_BYTES);
3866         memset(tp->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
3867         memset(tp->tx_ring, 0, TG3_TX_RING_BYTES);
3868
3869         tp->rx_pkt_buf_sz = RX_PKT_BUF_SZ;
3870         if ((tp->tg3_flags2 & TG3_FLG2_5780_CLASS) &&
3871             (tp->dev->mtu > ETH_DATA_LEN))
3872                 tp->rx_pkt_buf_sz = RX_JUMBO_PKT_BUF_SZ;
3873
3874         /* Initialize invariants of the rings, we only set this
3875          * stuff once.  This works because the card does not
3876          * write into the rx buffer posting rings.
3877          */
3878         for (i = 0; i < TG3_RX_RING_SIZE; i++) {
3879                 struct tg3_rx_buffer_desc *rxd;
3880
3881                 rxd = &tp->rx_std[i];
3882                 rxd->idx_len = (tp->rx_pkt_buf_sz - tp->rx_offset - 64)
3883                         << RXD_LEN_SHIFT;
3884                 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
3885                 rxd->opaque = (RXD_OPAQUE_RING_STD |
3886                                (i << RXD_OPAQUE_INDEX_SHIFT));
3887         }
3888
3889         if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
3890                 for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
3891                         struct tg3_rx_buffer_desc *rxd;
3892
3893                         rxd = &tp->rx_jumbo[i];
3894                         rxd->idx_len = (RX_JUMBO_PKT_BUF_SZ - tp->rx_offset - 64)
3895                                 << RXD_LEN_SHIFT;
3896                         rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
3897                                 RXD_FLAG_JUMBO;
3898                         rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
3899                                (i << RXD_OPAQUE_INDEX_SHIFT));
3900                 }
3901         }
3902
3903         /* Now allocate fresh SKBs for each rx ring. */
3904         for (i = 0; i < tp->rx_pending; i++) {
3905                 if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_STD,
3906                                      -1, i) < 0)
3907                         break;
3908         }
3909
3910         if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
3911                 for (i = 0; i < tp->rx_jumbo_pending; i++) {
3912                         if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_JUMBO,
3913                                              -1, i) < 0)
3914                                 break;
3915                 }
3916         }
3917 }
3918
3919 /*
3920  * Must not be invoked with interrupt sources disabled and
3921  * the hardware shutdown down.
3922  */
3923 static void tg3_free_consistent(struct tg3 *tp)
3924 {
3925         kfree(tp->rx_std_buffers);
3926         tp->rx_std_buffers = NULL;
3927         if (tp->rx_std) {
3928                 pci_free_consistent(tp->pdev, TG3_RX_RING_BYTES,
3929                                     tp->rx_std, tp->rx_std_mapping);
3930                 tp->rx_std = NULL;
3931         }
3932         if (tp->rx_jumbo) {
3933                 pci_free_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
3934                                     tp->rx_jumbo, tp->rx_jumbo_mapping);
3935                 tp->rx_jumbo = NULL;
3936         }
3937         if (tp->rx_rcb) {
3938                 pci_free_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
3939                                     tp->rx_rcb, tp->rx_rcb_mapping);
3940                 tp->rx_rcb = NULL;
3941         }
3942         if (tp->tx_ring) {
3943                 pci_free_consistent(tp->pdev, TG3_TX_RING_BYTES,
3944                         tp->tx_ring, tp->tx_desc_mapping);
3945                 tp->tx_ring = NULL;
3946         }
3947         if (tp->hw_status) {
3948                 pci_free_consistent(tp->pdev, TG3_HW_STATUS_SIZE,
3949                                     tp->hw_status, tp->status_mapping);
3950                 tp->hw_status = NULL;
3951         }
3952         if (tp->hw_stats) {
3953                 pci_free_consistent(tp->pdev, sizeof(struct tg3_hw_stats),
3954                                     tp->hw_stats, tp->stats_mapping);
3955                 tp->hw_stats = NULL;
3956         }
3957 }
3958
3959 /*
3960  * Must not be invoked with interrupt sources disabled and
3961  * the hardware shutdown down.  Can sleep.
3962  */
3963 static int tg3_alloc_consistent(struct tg3 *tp)
3964 {
3965         tp->rx_std_buffers = kmalloc((sizeof(struct ring_info) *
3966                                       (TG3_RX_RING_SIZE +
3967                                        TG3_RX_JUMBO_RING_SIZE)) +
3968                                      (sizeof(struct tx_ring_info) *
3969                                       TG3_TX_RING_SIZE),
3970                                      GFP_KERNEL);
3971         if (!tp->rx_std_buffers)
3972                 return -ENOMEM;
3973
3974         memset(tp->rx_std_buffers, 0,
3975                (sizeof(struct ring_info) *
3976                 (TG3_RX_RING_SIZE +
3977                  TG3_RX_JUMBO_RING_SIZE)) +
3978                (sizeof(struct tx_ring_info) *
3979                 TG3_TX_RING_SIZE));
3980
3981         tp->rx_jumbo_buffers = &tp->rx_std_buffers[TG3_RX_RING_SIZE];
3982         tp->tx_buffers = (struct tx_ring_info *)
3983                 &tp->rx_jumbo_buffers[TG3_RX_JUMBO_RING_SIZE];
3984
3985         tp->rx_std = pci_alloc_consistent(tp->pdev, TG3_RX_RING_BYTES,
3986                                           &tp->rx_std_mapping);
3987         if (!tp->rx_std)
3988                 goto err_out;
3989
3990         tp->rx_jumbo = pci_alloc_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
3991                                             &tp->rx_jumbo_mapping);
3992
3993         if (!tp->rx_jumbo)
3994                 goto err_out;
3995
3996         tp->rx_rcb = pci_alloc_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
3997                                           &tp->rx_rcb_mapping);
3998         if (!tp->rx_rcb)
3999                 goto err_out;
4000
4001         tp->tx_ring = pci_alloc_consistent(tp->pdev, TG3_TX_RING_BYTES,
4002                                            &tp->tx_desc_mapping);
4003         if (!tp->tx_ring)
4004                 goto err_out;
4005
4006         tp->hw_status = pci_alloc_consistent(tp->pdev,
4007                                              TG3_HW_STATUS_SIZE,
4008                                              &tp->status_mapping);
4009         if (!tp->hw_status)
4010                 goto err_out;
4011
4012         tp->hw_stats = pci_alloc_consistent(tp->pdev,
4013                                             sizeof(struct tg3_hw_stats),
4014                                             &tp->stats_mapping);
4015         if (!tp->hw_stats)
4016                 goto err_out;
4017
4018         memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
4019         memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
4020
4021         return 0;
4022
4023 err_out:
4024         tg3_free_consistent(tp);
4025         return -ENOMEM;
4026 }
4027
4028 #define MAX_WAIT_CNT 1000
4029
4030 /* To stop a block, clear the enable bit and poll till it
4031  * clears.  tp->lock is held.
4032  */
4033 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, int silent)
4034 {
4035         unsigned int i;
4036         u32 val;
4037
4038         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
4039                 switch (ofs) {
4040                 case RCVLSC_MODE:
4041                 case DMAC_MODE:
4042                 case MBFREE_MODE:
4043                 case BUFMGR_MODE:
4044                 case MEMARB_MODE:
4045                         /* We can't enable/disable these bits of the
4046                          * 5705/5750, just say success.
4047                          */
4048                         return 0;
4049
4050                 default:
4051                         break;
4052                 };
4053         }
4054
4055         val = tr32(ofs);
4056         val &= ~enable_bit;
4057         tw32_f(ofs, val);
4058
4059         for (i = 0; i < MAX_WAIT_CNT; i++) {
4060                 udelay(100);
4061                 val = tr32(ofs);
4062                 if ((val & enable_bit) == 0)
4063                         break;
4064         }
4065
4066         if (i == MAX_WAIT_CNT && !silent) {
4067                 printk(KERN_ERR PFX "tg3_stop_block timed out, "
4068                        "ofs=%lx enable_bit=%x\n",
4069                        ofs, enable_bit);
4070                 return -ENODEV;
4071         }
4072
4073         return 0;
4074 }
4075
4076 /* tp->lock is held. */
4077 static int tg3_abort_hw(struct tg3 *tp, int silent)
4078 {
4079         int i, err;
4080
4081         tg3_disable_ints(tp);
4082
4083         tp->rx_mode &= ~RX_MODE_ENABLE;
4084         tw32_f(MAC_RX_MODE, tp->rx_mode);
4085         udelay(10);
4086
4087         err  = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
4088         err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
4089         err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
4090         err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
4091         err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
4092         err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
4093
4094         err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
4095         err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
4096         err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
4097         err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
4098         err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
4099         err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
4100         err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
4101
4102         tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
4103         tw32_f(MAC_MODE, tp->mac_mode);
4104         udelay(40);
4105
4106         tp->tx_mode &= ~TX_MODE_ENABLE;
4107         tw32_f(MAC_TX_MODE, tp->tx_mode);
4108
4109         for (i = 0; i < MAX_WAIT_CNT; i++) {
4110                 udelay(100);
4111                 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
4112                         break;
4113         }
4114         if (i >= MAX_WAIT_CNT) {
4115                 printk(KERN_ERR PFX "tg3_abort_hw timed out for %s, "
4116                        "TX_MODE_ENABLE will not clear MAC_TX_MODE=%08x\n",
4117                        tp->dev->name, tr32(MAC_TX_MODE));
4118                 err |= -ENODEV;
4119         }
4120
4121         err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
4122         err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
4123         err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
4124
4125         tw32(FTQ_RESET, 0xffffffff);
4126         tw32(FTQ_RESET, 0x00000000);
4127
4128         err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
4129         err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
4130
4131         if (tp->hw_status)
4132                 memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
4133         if (tp->hw_stats)
4134                 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
4135
4136         return err;
4137 }
4138
4139 /* tp->lock is held. */
4140 static int tg3_nvram_lock(struct tg3 *tp)
4141 {
4142         if (tp->tg3_flags & TG3_FLAG_NVRAM) {
4143                 int i;
4144
4145                 tw32(NVRAM_SWARB, SWARB_REQ_SET1);
4146                 for (i = 0; i < 8000; i++) {
4147                         if (tr32(NVRAM_SWARB) & SWARB_GNT1)
4148                                 break;
4149                         udelay(20);
4150                 }
4151                 if (i == 8000)
4152                         return -ENODEV;
4153         }
4154         return 0;
4155 }
4156
4157 /* tp->lock is held. */
4158 static void tg3_nvram_unlock(struct tg3 *tp)
4159 {
4160         if (tp->tg3_flags & TG3_FLAG_NVRAM)
4161                 tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
4162 }
4163
4164 /* tp->lock is held. */
4165 static void tg3_enable_nvram_access(struct tg3 *tp)
4166 {
4167         if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
4168             !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM)) {
4169                 u32 nvaccess = tr32(NVRAM_ACCESS);
4170
4171                 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
4172         }
4173 }
4174
4175 /* tp->lock is held. */
4176 static void tg3_disable_nvram_access(struct tg3 *tp)
4177 {
4178         if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
4179             !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM)) {
4180                 u32 nvaccess = tr32(NVRAM_ACCESS);
4181
4182                 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
4183         }
4184 }
4185
4186 /* tp->lock is held. */
4187 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
4188 {
4189         if (!(tp->tg3_flags2 & TG3_FLG2_SUN_570X))
4190                 tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
4191                               NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
4192
4193         if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
4194                 switch (kind) {
4195                 case RESET_KIND_INIT:
4196                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4197                                       DRV_STATE_START);
4198                         break;
4199
4200                 case RESET_KIND_SHUTDOWN:
4201                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4202                                       DRV_STATE_UNLOAD);
4203                         break;
4204
4205                 case RESET_KIND_SUSPEND:
4206                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4207                                       DRV_STATE_SUSPEND);
4208                         break;
4209
4210                 default:
4211                         break;
4212                 };
4213         }
4214 }
4215
4216 /* tp->lock is held. */
4217 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
4218 {
4219         if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
4220                 switch (kind) {
4221                 case RESET_KIND_INIT:
4222                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4223                                       DRV_STATE_START_DONE);
4224                         break;
4225
4226                 case RESET_KIND_SHUTDOWN:
4227                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4228                                       DRV_STATE_UNLOAD_DONE);
4229                         break;
4230
4231                 default:
4232                         break;
4233                 };
4234         }
4235 }
4236
4237 /* tp->lock is held. */
4238 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
4239 {
4240         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
4241                 switch (kind) {
4242                 case RESET_KIND_INIT:
4243                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4244                                       DRV_STATE_START);
4245                         break;
4246
4247                 case RESET_KIND_SHUTDOWN:
4248                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4249                                       DRV_STATE_UNLOAD);
4250                         break;
4251
4252                 case RESET_KIND_SUSPEND:
4253                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4254                                       DRV_STATE_SUSPEND);
4255                         break;
4256
4257                 default:
4258                         break;
4259                 };
4260         }
4261 }
4262
4263 static void tg3_stop_fw(struct tg3 *);
4264
4265 /* tp->lock is held. */
4266 static int tg3_chip_reset(struct tg3 *tp)
4267 {
4268         u32 val;
4269         void (*write_op)(struct tg3 *, u32, u32);
4270         int i;
4271
4272         if (!(tp->tg3_flags2 & TG3_FLG2_SUN_570X))
4273                 tg3_nvram_lock(tp);
4274
4275         /*
4276          * We must avoid the readl() that normally takes place.
4277          * It locks machines, causes machine checks, and other
4278          * fun things.  So, temporarily disable the 5701
4279          * hardware workaround, while we do the reset.
4280          */
4281         write_op = tp->write32;
4282         if (write_op == tg3_write_flush_reg32)
4283                 tp->write32 = tg3_write32;
4284
4285         /* do the reset */
4286         val = GRC_MISC_CFG_CORECLK_RESET;
4287
4288         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
4289                 if (tr32(0x7e2c) == 0x60) {
4290                         tw32(0x7e2c, 0x20);
4291                 }
4292                 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
4293                         tw32(GRC_MISC_CFG, (1 << 29));
4294                         val |= (1 << 29);
4295                 }
4296         }
4297
4298         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
4299                 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
4300         tw32(GRC_MISC_CFG, val);
4301
4302         /* restore 5701 hardware bug workaround write method */
4303         tp->write32 = write_op;
4304
4305         /* Unfortunately, we have to delay before the PCI read back.
4306          * Some 575X chips even will not respond to a PCI cfg access
4307          * when the reset command is given to the chip.
4308          *
4309          * How do these hardware designers expect things to work
4310          * properly if the PCI write is posted for a long period
4311          * of time?  It is always necessary to have some method by
4312          * which a register read back can occur to push the write
4313          * out which does the reset.
4314          *
4315          * For most tg3 variants the trick below was working.
4316          * Ho hum...
4317          */
4318         udelay(120);
4319
4320         /* Flush PCI posted writes.  The normal MMIO registers
4321          * are inaccessible at this time so this is the only
4322          * way to make this reliably (actually, this is no longer
4323          * the case, see above).  I tried to use indirect
4324          * register read/write but this upset some 5701 variants.
4325          */
4326         pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
4327
4328         udelay(120);
4329
4330         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
4331                 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) {
4332                         int i;
4333                         u32 cfg_val;
4334
4335                         /* Wait for link training to complete.  */
4336                         for (i = 0; i < 5000; i++)
4337                                 udelay(100);
4338
4339                         pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
4340                         pci_write_config_dword(tp->pdev, 0xc4,
4341                                                cfg_val | (1 << 15));
4342                 }
4343                 /* Set PCIE max payload size and clear error status.  */
4344                 pci_write_config_dword(tp->pdev, 0xd8, 0xf5000);
4345         }
4346
4347         /* Re-enable indirect register accesses. */
4348         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
4349                                tp->misc_host_ctrl);
4350
4351         /* Set MAX PCI retry to zero. */
4352         val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
4353         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
4354             (tp->tg3_flags & TG3_FLAG_PCIX_MODE))
4355                 val |= PCISTATE_RETRY_SAME_DMA;
4356         pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
4357
4358         pci_restore_state(tp->pdev);
4359
4360         /* Make sure PCI-X relaxed ordering bit is clear. */
4361         pci_read_config_dword(tp->pdev, TG3PCI_X_CAPS, &val);
4362         val &= ~PCIX_CAPS_RELAXED_ORDERING;
4363         pci_write_config_dword(tp->pdev, TG3PCI_X_CAPS, val);
4364
4365         if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
4366                 u32 val;
4367
4368                 /* Chip reset on 5780 will reset MSI enable bit,
4369                  * so need to restore it.
4370                  */
4371                 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
4372                         u16 ctrl;
4373
4374                         pci_read_config_word(tp->pdev,
4375                                              tp->msi_cap + PCI_MSI_FLAGS,
4376                                              &ctrl);
4377                         pci_write_config_word(tp->pdev,
4378                                               tp->msi_cap + PCI_MSI_FLAGS,
4379                                               ctrl | PCI_MSI_FLAGS_ENABLE);
4380                         val = tr32(MSGINT_MODE);
4381                         tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
4382                 }
4383
4384                 val = tr32(MEMARB_MODE);
4385                 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
4386
4387         } else
4388                 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
4389
4390         if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A3) {
4391                 tg3_stop_fw(tp);
4392                 tw32(0x5000, 0x400);
4393         }
4394
4395         tw32(GRC_MODE, tp->grc_mode);
4396
4397         if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0) {
4398                 u32 val = tr32(0xc4);
4399
4400                 tw32(0xc4, val | (1 << 15));
4401         }
4402
4403         if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
4404             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
4405                 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
4406                 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0)
4407                         tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
4408                 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
4409         }
4410
4411         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
4412                 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
4413                 tw32_f(MAC_MODE, tp->mac_mode);
4414         } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
4415                 tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
4416                 tw32_f(MAC_MODE, tp->mac_mode);
4417         } else
4418                 tw32_f(MAC_MODE, 0);
4419         udelay(40);
4420
4421         if (!(tp->tg3_flags2 & TG3_FLG2_SUN_570X)) {
4422                 /* Wait for firmware initialization to complete. */
4423                 for (i = 0; i < 100000; i++) {
4424                         tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
4425                         if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
4426                                 break;
4427                         udelay(10);
4428                 }
4429                 if (i >= 100000) {
4430                         printk(KERN_ERR PFX "tg3_reset_hw timed out for %s, "
4431                                "firmware will not restart magic=%08x\n",
4432                                tp->dev->name, val);
4433                         return -ENODEV;
4434                 }
4435         }
4436
4437         if ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
4438             tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
4439                 u32 val = tr32(0x7c00);
4440
4441                 tw32(0x7c00, val | (1 << 25));
4442         }
4443
4444         /* Reprobe ASF enable state.  */
4445         tp->tg3_flags &= ~TG3_FLAG_ENABLE_ASF;
4446         tp->tg3_flags2 &= ~TG3_FLG2_ASF_NEW_HANDSHAKE;
4447         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
4448         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
4449                 u32 nic_cfg;
4450
4451                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
4452                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
4453                         tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
4454                         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
4455                                 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
4456                 }
4457         }
4458
4459         return 0;
4460 }
4461
4462 /* tp->lock is held. */
4463 static void tg3_stop_fw(struct tg3 *tp)
4464 {
4465         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
4466                 u32 val;
4467                 int i;
4468
4469                 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
4470                 val = tr32(GRC_RX_CPU_EVENT);
4471                 val |= (1 << 14);
4472                 tw32(GRC_RX_CPU_EVENT, val);
4473
4474                 /* Wait for RX cpu to ACK the event.  */
4475                 for (i = 0; i < 100; i++) {
4476                         if (!(tr32(GRC_RX_CPU_EVENT) & (1 << 14)))
4477                                 break;
4478                         udelay(1);
4479                 }
4480         }
4481 }
4482
4483 /* tp->lock is held. */
4484 static int tg3_halt(struct tg3 *tp, int kind, int silent)
4485 {
4486         int err;
4487
4488         tg3_stop_fw(tp);
4489
4490         tg3_write_sig_pre_reset(tp, kind);
4491
4492         tg3_abort_hw(tp, silent);
4493         err = tg3_chip_reset(tp);
4494
4495         tg3_write_sig_legacy(tp, kind);
4496         tg3_write_sig_post_reset(tp, kind);
4497
4498         if (err)
4499                 return err;
4500
4501         return 0;
4502 }
4503
4504 #define TG3_FW_RELEASE_MAJOR    0x0
4505 #define TG3_FW_RELASE_MINOR     0x0
4506 #define TG3_FW_RELEASE_FIX      0x0
4507 #define TG3_FW_START_ADDR       0x08000000
4508 #define TG3_FW_TEXT_ADDR        0x08000000
4509 #define TG3_FW_TEXT_LEN         0x9c0
4510 #define TG3_FW_RODATA_ADDR      0x080009c0
4511 #define TG3_FW_RODATA_LEN       0x60
4512 #define TG3_FW_DATA_ADDR        0x08000a40
4513 #define TG3_FW_DATA_LEN         0x20
4514 #define TG3_FW_SBSS_ADDR        0x08000a60
4515 #define TG3_FW_SBSS_LEN         0xc
4516 #define TG3_FW_BSS_ADDR         0x08000a70
4517 #define TG3_FW_BSS_LEN          0x10
4518
4519 static u32 tg3FwText[(TG3_FW_TEXT_LEN / sizeof(u32)) + 1] = {
4520         0x00000000, 0x10000003, 0x00000000, 0x0000000d, 0x0000000d, 0x3c1d0800,
4521         0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100000, 0x0e000018, 0x00000000,
4522         0x0000000d, 0x3c1d0800, 0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100034,
4523         0x0e00021c, 0x00000000, 0x0000000d, 0x00000000, 0x00000000, 0x00000000,
4524         0x27bdffe0, 0x3c1cc000, 0xafbf0018, 0xaf80680c, 0x0e00004c, 0x241b2105,
4525         0x97850000, 0x97870002, 0x9782002c, 0x9783002e, 0x3c040800, 0x248409c0,
4526         0xafa00014, 0x00021400, 0x00621825, 0x00052c00, 0xafa30010, 0x8f860010,
4527         0x00e52825, 0x0e000060, 0x24070102, 0x3c02ac00, 0x34420100, 0x3c03ac01,
4528         0x34630100, 0xaf820490, 0x3c02ffff, 0xaf820494, 0xaf830498, 0xaf82049c,
4529         0x24020001, 0xaf825ce0, 0x0e00003f, 0xaf825d00, 0x0e000140, 0x00000000,
4530         0x8fbf0018, 0x03e00008, 0x27bd0020, 0x2402ffff, 0xaf825404, 0x8f835400,
4531         0x34630400, 0xaf835400, 0xaf825404, 0x3c020800, 0x24420034, 0xaf82541c,
4532         0x03e00008, 0xaf805400, 0x00000000, 0x00000000, 0x3c020800, 0x34423000,
4533         0x3c030800, 0x34633000, 0x3c040800, 0x348437ff, 0x3c010800, 0xac220a64,
4534         0x24020040, 0x3c010800, 0xac220a68, 0x3c010800, 0xac200a60, 0xac600000,
4535         0x24630004, 0x0083102b, 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000,
4536         0x00804821, 0x8faa0010, 0x3c020800, 0x8c420a60, 0x3c040800, 0x8c840a68,
4537         0x8fab0014, 0x24430001, 0x0044102b, 0x3c010800, 0xac230a60, 0x14400003,
4538         0x00004021, 0x3c010800, 0xac200a60, 0x3c020800, 0x8c420a60, 0x3c030800,
4539         0x8c630a64, 0x91240000, 0x00021140, 0x00431021, 0x00481021, 0x25080001,
4540         0xa0440000, 0x29020008, 0x1440fff4, 0x25290001, 0x3c020800, 0x8c420a60,
4541         0x3c030800, 0x8c630a64, 0x8f84680c, 0x00021140, 0x00431021, 0xac440008,
4542         0xac45000c, 0xac460010, 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c,
4543         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4544         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4545         0, 0, 0, 0, 0, 0,
4546         0x02000008, 0x00000000, 0x0a0001e3, 0x3c0a0001, 0x0a0001e3, 0x3c0a0002,
4547         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4548         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4549         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4550         0x0a0001e3, 0x3c0a0007, 0x0a0001e3, 0x3c0a0008, 0x0a0001e3, 0x3c0a0009,
4551         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a000b,
4552         0x0a0001e3, 0x3c0a000c, 0x0a0001e3, 0x3c0a000d, 0x0a0001e3, 0x00000000,
4553         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a000e, 0x0a0001e3, 0x00000000,
4554         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4555         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4556         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a0013, 0x0a0001e3, 0x3c0a0014,
4557         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4558         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4559         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4560         0x27bdffe0, 0x00001821, 0x00001021, 0xafbf0018, 0xafb10014, 0xafb00010,
4561         0x3c010800, 0x00220821, 0xac200a70, 0x3c010800, 0x00220821, 0xac200a74,
4562         0x3c010800, 0x00220821, 0xac200a78, 0x24630001, 0x1860fff5, 0x2442000c,
4563         0x24110001, 0x8f906810, 0x32020004, 0x14400005, 0x24040001, 0x3c020800,
4564         0x8c420a78, 0x18400003, 0x00002021, 0x0e000182, 0x00000000, 0x32020001,
4565         0x10400003, 0x00000000, 0x0e000169, 0x00000000, 0x0a000153, 0xaf915028,
4566         0x8fbf0018, 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0020, 0x3c050800,
4567         0x8ca50a70, 0x3c060800, 0x8cc60a80, 0x3c070800, 0x8ce70a78, 0x27bdffe0,
4568         0x3c040800, 0x248409d0, 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014,
4569         0x0e00017b, 0x00002021, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x24020001,
4570         0x8f836810, 0x00821004, 0x00021027, 0x00621824, 0x03e00008, 0xaf836810,
4571         0x27bdffd8, 0xafbf0024, 0x1080002e, 0xafb00020, 0x8f825cec, 0xafa20018,
4572         0x8f825cec, 0x3c100800, 0x26100a78, 0xafa2001c, 0x34028000, 0xaf825cec,
4573         0x8e020000, 0x18400016, 0x00000000, 0x3c020800, 0x94420a74, 0x8fa3001c,
4574         0x000221c0, 0xac830004, 0x8fa2001c, 0x3c010800, 0x0e000201, 0xac220a74,
4575         0x10400005, 0x00000000, 0x8e020000, 0x24420001, 0x0a0001df, 0xae020000,
4576         0x3c020800, 0x8c420a70, 0x00021c02, 0x000321c0, 0x0a0001c5, 0xafa2001c,
4577         0x0e000201, 0x00000000, 0x1040001f, 0x00000000, 0x8e020000, 0x8fa3001c,
4578         0x24420001, 0x3c010800, 0xac230a70, 0x3c010800, 0xac230a74, 0x0a0001df,
4579         0xae020000, 0x3c100800, 0x26100a78, 0x8e020000, 0x18400028, 0x00000000,
4580         0x0e000201, 0x00000000, 0x14400024, 0x00000000, 0x8e020000, 0x3c030800,
4581         0x8c630a70, 0x2442ffff, 0xafa3001c, 0x18400006, 0xae020000, 0x00031402,
4582         0x000221c0, 0x8c820004, 0x3c010800, 0xac220a70, 0x97a2001e, 0x2442ff00,
4583         0x2c420300, 0x1440000b, 0x24024000, 0x3c040800, 0x248409dc, 0xafa00010,
4584         0xafa00014, 0x8fa6001c, 0x24050008, 0x0e000060, 0x00003821, 0x0a0001df,
4585         0x00000000, 0xaf825cf8, 0x3c020800, 0x8c420a40, 0x8fa3001c, 0x24420001,
4586         0xaf835cf8, 0x3c010800, 0xac220a40, 0x8fbf0024, 0x8fb00020, 0x03e00008,
4587         0x27bd0028, 0x27bdffe0, 0x3c040800, 0x248409e8, 0x00002821, 0x00003021,
4588         0x00003821, 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014, 0x8fbf0018,
4589         0x03e00008, 0x27bd0020, 0x8f82680c, 0x8f85680c, 0x00021827, 0x0003182b,
4590         0x00031823, 0x00431024, 0x00441021, 0x00a2282b, 0x10a00006, 0x00000000,
4591         0x00401821, 0x8f82680c, 0x0043102b, 0x1440fffd, 0x00000000, 0x03e00008,
4592         0x00000000, 0x3c040800, 0x8c840000, 0x3c030800, 0x8c630a40, 0x0064102b,
4593         0x54400002, 0x00831023, 0x00641023, 0x2c420008, 0x03e00008, 0x38420001,
4594         0x27bdffe0, 0x00802821, 0x3c040800, 0x24840a00, 0x00003021, 0x00003821,
4595         0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014, 0x0a000216, 0x00000000,
4596         0x8fbf0018, 0x03e00008, 0x27bd0020, 0x00000000, 0x27bdffe0, 0x3c1cc000,
4597         0xafbf0018, 0x0e00004c, 0xaf80680c, 0x3c040800, 0x24840a10, 0x03802821,
4598         0x00003021, 0x00003821, 0xafa00010, 0x0e000060, 0xafa00014, 0x2402ffff,
4599         0xaf825404, 0x3c0200aa, 0x0e000234, 0xaf825434, 0x8fbf0018, 0x03e00008,
4600         0x27bd0020, 0x00000000, 0x00000000, 0x00000000, 0x27bdffe8, 0xafb00010,
4601         0x24100001, 0xafbf0014, 0x3c01c003, 0xac200000, 0x8f826810, 0x30422000,
4602         0x10400003, 0x00000000, 0x0e000246, 0x00000000, 0x0a00023a, 0xaf905428,
4603         0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x27bdfff8, 0x8f845d0c,
4604         0x3c0200ff, 0x3c030800, 0x8c630a50, 0x3442fff8, 0x00821024, 0x1043001e,
4605         0x3c0500ff, 0x34a5fff8, 0x3c06c003, 0x3c074000, 0x00851824, 0x8c620010,
4606         0x3c010800, 0xac230a50, 0x30420008, 0x10400005, 0x00871025, 0x8cc20000,
4607         0x24420001, 0xacc20000, 0x00871025, 0xaf825d0c, 0x8fa20000, 0x24420001,
4608         0xafa20000, 0x8fa20000, 0x8fa20000, 0x24420001, 0xafa20000, 0x8fa20000,
4609         0x8f845d0c, 0x3c030800, 0x8c630a50, 0x00851024, 0x1443ffe8, 0x00851824,
4610         0x27bd0008, 0x03e00008, 0x00000000, 0x00000000, 0x00000000
4611 };
4612
4613 static u32 tg3FwRodata[(TG3_FW_RODATA_LEN / sizeof(u32)) + 1] = {
4614         0x35373031, 0x726c7341, 0x00000000, 0x00000000, 0x53774576, 0x656e7430,
4615         0x00000000, 0x726c7045, 0x76656e74, 0x31000000, 0x556e6b6e, 0x45766e74,
4616         0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x66617461, 0x6c457272,
4617         0x00000000, 0x00000000, 0x4d61696e, 0x43707542, 0x00000000, 0x00000000,
4618         0x00000000
4619 };
4620
4621 #if 0 /* All zeros, don't eat up space with it. */
4622 u32 tg3FwData[(TG3_FW_DATA_LEN / sizeof(u32)) + 1] = {
4623         0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
4624         0x00000000, 0x00000000, 0x00000000, 0x00000000
4625 };
4626 #endif
4627
4628 #define RX_CPU_SCRATCH_BASE     0x30000
4629 #define RX_CPU_SCRATCH_SIZE     0x04000
4630 #define TX_CPU_SCRATCH_BASE     0x34000
4631 #define TX_CPU_SCRATCH_SIZE     0x04000
4632
4633 /* tp->lock is held. */
4634 static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
4635 {
4636         int i;
4637
4638         if (offset == TX_CPU_BASE &&
4639             (tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
4640                 BUG();
4641
4642         if (offset == RX_CPU_BASE) {
4643                 for (i = 0; i < 10000; i++) {
4644                         tw32(offset + CPU_STATE, 0xffffffff);
4645                         tw32(offset + CPU_MODE,  CPU_MODE_HALT);
4646                         if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
4647                                 break;
4648                 }
4649
4650                 tw32(offset + CPU_STATE, 0xffffffff);
4651                 tw32_f(offset + CPU_MODE,  CPU_MODE_HALT);
4652                 udelay(10);
4653         } else {
4654                 for (i = 0; i < 10000; i++) {
4655                         tw32(offset + CPU_STATE, 0xffffffff);
4656                         tw32(offset + CPU_MODE,  CPU_MODE_HALT);
4657                         if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
4658                                 break;
4659                 }
4660         }
4661
4662         if (i >= 10000) {
4663                 printk(KERN_ERR PFX "tg3_reset_cpu timed out for %s, "
4664                        "and %s CPU\n",
4665                        tp->dev->name,
4666                        (offset == RX_CPU_BASE ? "RX" : "TX"));
4667                 return -ENODEV;
4668         }
4669         return 0;
4670 }
4671
4672 struct fw_info {
4673         unsigned int text_base;
4674         unsigned int text_len;
4675         u32 *text_data;
4676         unsigned int rodata_base;
4677         unsigned int rodata_len;
4678         u32 *rodata_data;
4679         unsigned int data_base;
4680         unsigned int data_len;
4681         u32 *data_data;
4682 };
4683
4684 /* tp->lock is held. */
4685 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base, u32 cpu_scratch_base,
4686                                  int cpu_scratch_size, struct fw_info *info)
4687 {
4688         int err, i;
4689         void (*write_op)(struct tg3 *, u32, u32);
4690
4691         if (cpu_base == TX_CPU_BASE &&
4692             (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
4693                 printk(KERN_ERR PFX "tg3_load_firmware_cpu: Trying to load "
4694                        "TX cpu firmware on %s which is 5705.\n",
4695                        tp->dev->name);
4696                 return -EINVAL;
4697         }
4698
4699         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
4700                 write_op = tg3_write_mem;
4701         else
4702                 write_op = tg3_write_indirect_reg32;
4703
4704         /* It is possible that bootcode is still loading at this point.
4705          * Get the nvram lock first before halting the cpu.
4706          */
4707         tg3_nvram_lock(tp);
4708         err = tg3_halt_cpu(tp, cpu_base);
4709         tg3_nvram_unlock(tp);
4710         if (err)
4711                 goto out;
4712
4713         for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
4714                 write_op(tp, cpu_scratch_base + i, 0);
4715         tw32(cpu_base + CPU_STATE, 0xffffffff);
4716         tw32(cpu_base + CPU_MODE, tr32(cpu_base+CPU_MODE)|CPU_MODE_HALT);
4717         for (i = 0; i < (info->text_len / sizeof(u32)); i++)
4718                 write_op(tp, (cpu_scratch_base +
4719                               (info->text_base & 0xffff) +
4720                               (i * sizeof(u32))),
4721                          (info->text_data ?
4722                           info->text_data[i] : 0));
4723         for (i = 0; i < (info->rodata_len / sizeof(u32)); i++)
4724                 write_op(tp, (cpu_scratch_base +
4725                               (info->rodata_base & 0xffff) +
4726                               (i * sizeof(u32))),
4727                          (info->rodata_data ?
4728                           info->rodata_data[i] : 0));
4729         for (i = 0; i < (info->data_len / sizeof(u32)); i++)
4730                 write_op(tp, (cpu_scratch_base +
4731                               (info->data_base & 0xffff) +
4732                               (i * sizeof(u32))),
4733                          (info->data_data ?
4734                           info->data_data[i] : 0));
4735
4736         err = 0;
4737
4738 out:
4739         return err;
4740 }
4741
4742 /* tp->lock is held. */
4743 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
4744 {
4745         struct fw_info info;
4746         int err, i;
4747
4748         info.text_base = TG3_FW_TEXT_ADDR;
4749         info.text_len = TG3_FW_TEXT_LEN;
4750         info.text_data = &tg3FwText[0];
4751         info.rodata_base = TG3_FW_RODATA_ADDR;
4752         info.rodata_len = TG3_FW_RODATA_LEN;
4753         info.rodata_data = &tg3FwRodata[0];
4754         info.data_base = TG3_FW_DATA_ADDR;
4755         info.data_len = TG3_FW_DATA_LEN;
4756         info.data_data = NULL;
4757
4758         err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
4759                                     RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
4760                                     &info);
4761         if (err)
4762                 return err;
4763
4764         err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
4765                                     TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
4766                                     &info);
4767         if (err)
4768                 return err;
4769
4770         /* Now startup only the RX cpu. */
4771         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
4772         tw32_f(RX_CPU_BASE + CPU_PC,    TG3_FW_TEXT_ADDR);
4773
4774         for (i = 0; i < 5; i++) {
4775                 if (tr32(RX_CPU_BASE + CPU_PC) == TG3_FW_TEXT_ADDR)
4776                         break;
4777                 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
4778                 tw32(RX_CPU_BASE + CPU_MODE,  CPU_MODE_HALT);
4779                 tw32_f(RX_CPU_BASE + CPU_PC,    TG3_FW_TEXT_ADDR);
4780                 udelay(1000);
4781         }
4782         if (i >= 5) {
4783                 printk(KERN_ERR PFX "tg3_load_firmware fails for %s "
4784                        "to set RX CPU PC, is %08x should be %08x\n",
4785                        tp->dev->name, tr32(RX_CPU_BASE + CPU_PC),
4786                        TG3_FW_TEXT_ADDR);
4787                 return -ENODEV;
4788         }
4789         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
4790         tw32_f(RX_CPU_BASE + CPU_MODE,  0x00000000);
4791
4792         return 0;
4793 }
4794
4795 #if TG3_TSO_SUPPORT != 0
4796
4797 #define TG3_TSO_FW_RELEASE_MAJOR        0x1
4798 #define TG3_TSO_FW_RELASE_MINOR         0x6
4799 #define TG3_TSO_FW_RELEASE_FIX          0x0
4800 #define TG3_TSO_FW_START_ADDR           0x08000000
4801 #define TG3_TSO_FW_TEXT_ADDR            0x08000000
4802 #define TG3_TSO_FW_TEXT_LEN             0x1aa0
4803 #define TG3_TSO_FW_RODATA_ADDR          0x08001aa0
4804 #define TG3_TSO_FW_RODATA_LEN           0x60
4805 #define TG3_TSO_FW_DATA_ADDR            0x08001b20
4806 #define TG3_TSO_FW_DATA_LEN             0x30
4807 #define TG3_TSO_FW_SBSS_ADDR            0x08001b50
4808 #define TG3_TSO_FW_SBSS_LEN             0x2c
4809 #define TG3_TSO_FW_BSS_ADDR             0x08001b80
4810 #define TG3_TSO_FW_BSS_LEN              0x894
4811
4812 static u32 tg3TsoFwText[(TG3_TSO_FW_TEXT_LEN / 4) + 1] = {
4813         0x0e000003, 0x00000000, 0x08001b24, 0x00000000, 0x10000003, 0x00000000,
4814         0x0000000d, 0x0000000d, 0x3c1d0800, 0x37bd4000, 0x03a0f021, 0x3c100800,
4815         0x26100000, 0x0e000010, 0x00000000, 0x0000000d, 0x27bdffe0, 0x3c04fefe,
4816         0xafbf0018, 0x0e0005d8, 0x34840002, 0x0e000668, 0x00000000, 0x3c030800,
4817         0x90631b68, 0x24020002, 0x3c040800, 0x24841aac, 0x14620003, 0x24050001,
4818         0x3c040800, 0x24841aa0, 0x24060006, 0x00003821, 0xafa00010, 0x0e00067c,
4819         0xafa00014, 0x8f625c50, 0x34420001, 0xaf625c50, 0x8f625c90, 0x34420001,
4820         0xaf625c90, 0x2402ffff, 0x0e000034, 0xaf625404, 0x8fbf0018, 0x03e00008,
4821         0x27bd0020, 0x00000000, 0x00000000, 0x00000000, 0x27bdffe0, 0xafbf001c,
4822         0xafb20018, 0xafb10014, 0x0e00005b, 0xafb00010, 0x24120002, 0x24110001,
4823         0x8f706820, 0x32020100, 0x10400003, 0x00000000, 0x0e0000bb, 0x00000000,
4824         0x8f706820, 0x32022000, 0x10400004, 0x32020001, 0x0e0001f0, 0x24040001,
4825         0x32020001, 0x10400003, 0x00000000, 0x0e0000a3, 0x00000000, 0x3c020800,
4826         0x90421b98, 0x14520003, 0x00000000, 0x0e0004c0, 0x00000000, 0x0a00003c,
4827         0xaf715028, 0x8fbf001c, 0x8fb20018, 0x8fb10014, 0x8fb00010, 0x03e00008,
4828         0x27bd0020, 0x27bdffe0, 0x3c040800, 0x24841ac0, 0x00002821, 0x00003021,
4829         0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014, 0x3c040800,
4830         0x248423d8, 0xa4800000, 0x3c010800, 0xa0201b98, 0x3c010800, 0xac201b9c,
4831         0x3c010800, 0xac201ba0, 0x3c010800, 0xac201ba4, 0x3c010800, 0xac201bac,
4832         0x3c010800, 0xac201bb8, 0x3c010800, 0xac201bbc, 0x8f624434, 0x3c010800,
4833         0xac221b88, 0x8f624438, 0x3c010800, 0xac221b8c, 0x8f624410, 0xac80f7a8,
4834         0x3c010800, 0xac201b84, 0x3c010800, 0xac2023e0, 0x3c010800, 0xac2023c8,
4835         0x3c010800, 0xac2023cc, 0x3c010800, 0xac202400, 0x3c010800, 0xac221b90,
4836         0x8f620068, 0x24030007, 0x00021702, 0x10430005, 0x00000000, 0x8f620068,
4837         0x00021702, 0x14400004, 0x24020001, 0x3c010800, 0x0a000097, 0xac20240c,
4838         0xac820034, 0x3c040800, 0x24841acc, 0x3c050800, 0x8ca5240c, 0x00003021,
4839         0x00003821, 0xafa00010, 0x0e00067c, 0xafa00014, 0x8fbf0018, 0x03e00008,
4840         0x27bd0020, 0x27bdffe0, 0x3c040800, 0x24841ad8, 0x00002821, 0x00003021,
4841         0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014, 0x0e00005b,
4842         0x00000000, 0x0e0000b4, 0x00002021, 0x8fbf0018, 0x03e00008, 0x27bd0020,
4843         0x24020001, 0x8f636820, 0x00821004, 0x00021027, 0x00621824, 0x03e00008,
4844         0xaf636820, 0x27bdffd0, 0xafbf002c, 0xafb60028, 0xafb50024, 0xafb40020,
4845         0xafb3001c, 0xafb20018, 0xafb10014, 0xafb00010, 0x8f675c5c, 0x3c030800,
4846         0x24631bbc, 0x8c620000, 0x14470005, 0x3c0200ff, 0x3c020800, 0x90421b98,
4847         0x14400119, 0x3c0200ff, 0x3442fff8, 0x00e28824, 0xac670000, 0x00111902,
4848         0x306300ff, 0x30e20003, 0x000211c0, 0x00622825, 0x00a04021, 0x00071602,
4849         0x3c030800, 0x90631b98, 0x3044000f, 0x14600036, 0x00804821, 0x24020001,
4850         0x3c010800, 0xa0221b98, 0x00051100, 0x00821025, 0x3c010800, 0xac201b9c,
4851         0x3c010800, 0xac201ba0, 0x3c010800, 0xac201ba4, 0x3c010800, 0xac201bac,
4852         0x3c010800, 0xac201bb8, 0x3c010800, 0xac201bb0, 0x3c010800, 0xac201bb4,
4853         0x3c010800, 0xa42223d8, 0x9622000c, 0x30437fff, 0x3c010800, 0xa4222410,
4854         0x30428000, 0x3c010800, 0xa4231bc6, 0x10400005, 0x24020001, 0x3c010800,
4855         0xac2223f4, 0x0a000102, 0x2406003e, 0x24060036, 0x3c010800, 0xac2023f4,
4856         0x9622000a, 0x3c030800, 0x94631bc6, 0x3c010800, 0xac2023f0, 0x3c010800,
4857         0xac2023f8, 0x00021302, 0x00021080, 0x00c21021, 0x00621821, 0x3c010800,
4858         0xa42223d0, 0x3c010800, 0x0a000115, 0xa4231b96, 0x9622000c, 0x3c010800,
4859         0xa42223ec, 0x3c040800, 0x24841b9c, 0x8c820000, 0x00021100, 0x3c010800,
4860         0x00220821, 0xac311bc8, 0x8c820000, 0x00021100, 0x3c010800, 0x00220821,
4861         0xac271bcc, 0x8c820000, 0x25030001, 0x306601ff, 0x00021100, 0x3c010800,
4862         0x00220821, 0xac261bd0, 0x8c820000, 0x00021100, 0x3c010800, 0x00220821,
4863         0xac291bd4, 0x96230008, 0x3c020800, 0x8c421bac, 0x00432821, 0x3c010800,
4864         0xac251bac, 0x9622000a, 0x30420004, 0x14400018, 0x00061100, 0x8f630c14,
4865         0x3063000f, 0x2c620002, 0x1440000b, 0x3c02c000, 0x8f630c14, 0x3c020800,
4866         0x8c421b40, 0x3063000f, 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002,
4867         0x1040fff7, 0x3c02c000, 0x00e21825, 0xaf635c5c, 0x8f625c50, 0x30420002,
4868         0x10400014, 0x00000000, 0x0a000147, 0x00000000, 0x3c030800, 0x8c631b80,
4869         0x3c040800, 0x94841b94, 0x01221025, 0x3c010800, 0xa42223da, 0x24020001,
4870         0x3c010800, 0xac221bb8, 0x24630001, 0x0085202a, 0x3c010800, 0x10800003,
4871         0xac231b80, 0x3c010800, 0xa4251b94, 0x3c060800, 0x24c61b9c, 0x8cc20000,
4872         0x24420001, 0xacc20000, 0x28420080, 0x14400005, 0x00000000, 0x0e000656,
4873         0x24040002, 0x0a0001e6, 0x00000000, 0x3c020800, 0x8c421bb8, 0x10400078,
4874         0x24020001, 0x3c050800, 0x90a51b98, 0x14a20072, 0x00000000, 0x3c150800,
4875         0x96b51b96, 0x3c040800, 0x8c841bac, 0x32a3ffff, 0x0083102a, 0x1440006c,
4876         0x00000000, 0x14830003, 0x00000000, 0x3c010800, 0xac2523f0, 0x1060005c,
4877         0x00009021, 0x24d60004, 0x0060a021, 0x24d30014, 0x8ec20000, 0x00028100,
4878         0x3c110800, 0x02308821, 0x0e000625, 0x8e311bc8, 0x00402821, 0x10a00054,
4879         0x00000000, 0x9628000a, 0x31020040, 0x10400005, 0x2407180c, 0x8e22000c,
4880         0x2407188c, 0x00021400, 0xaca20018, 0x3c030800, 0x00701821, 0x8c631bd0,
4881         0x3c020800, 0x00501021, 0x8c421bd4, 0x00031d00, 0x00021400, 0x00621825,
4882         0xaca30014, 0x8ec30004, 0x96220008, 0x00432023, 0x3242ffff, 0x3083ffff,
4883         0x00431021, 0x0282102a, 0x14400002, 0x02b23023, 0x00803021, 0x8e620000,
4884         0x30c4ffff, 0x00441021, 0xae620000, 0x8e220000, 0xaca20000, 0x8e220004,
4885         0x8e63fff4, 0x00431021, 0xaca20004, 0xa4a6000e, 0x8e62fff4, 0x00441021,
4886         0xae62fff4, 0x96230008, 0x0043102a, 0x14400005, 0x02469021, 0x8e62fff0,
4887         0xae60fff4, 0x24420001, 0xae62fff0, 0xaca00008, 0x3242ffff, 0x14540008,
4888         0x24020305, 0x31020080, 0x54400001, 0x34e70010, 0x24020905, 0xa4a2000c,
4889         0x0a0001cb, 0x34e70020, 0xa4a2000c, 0x3c020800, 0x8c4223f0, 0x10400003,
4890         0x3c024b65, 0x0a0001d3, 0x34427654, 0x3c02b49a, 0x344289ab, 0xaca2001c,
4891         0x30e2ffff, 0xaca20010, 0x0e0005a2, 0x00a02021, 0x3242ffff, 0x0054102b,
4892         0x1440ffa9, 0x00000000, 0x24020002, 0x3c010800, 0x0a0001e6, 0xa0221b98,
4893         0x8ec2083c, 0x24420001, 0x0a0001e6, 0xaec2083c, 0x0e0004c0, 0x00000000,
4894         0x8fbf002c, 0x8fb60028, 0x8fb50024, 0x8fb40020, 0x8fb3001c, 0x8fb20018,
4895         0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0030, 0x27bdffd0, 0xafbf0028,
4896         0xafb30024, 0xafb20020, 0xafb1001c, 0xafb00018, 0x8f725c9c, 0x3c0200ff,
4897         0x3442fff8, 0x3c070800, 0x24e71bb4, 0x02428824, 0x9623000e, 0x8ce20000,
4898         0x00431021, 0xace20000, 0x8e220010, 0x30420020, 0x14400011, 0x00809821,
4899         0x0e00063b, 0x02202021, 0x3c02c000, 0x02421825, 0xaf635c9c, 0x8f625c90,
4900         0x30420002, 0x1040011e, 0x00000000, 0xaf635c9c, 0x8f625c90, 0x30420002,
4901         0x10400119, 0x00000000, 0x0a00020d, 0x00000000, 0x8e240008, 0x8e230014,
4902         0x00041402, 0x000231c0, 0x00031502, 0x304201ff, 0x2442ffff, 0x3042007f,
4903         0x00031942, 0x30637800, 0x00021100, 0x24424000, 0x00624821, 0x9522000a,
4904         0x3084ffff, 0x30420008, 0x104000b0, 0x000429c0, 0x3c020800, 0x8c422400,
4905         0x14400024, 0x24c50008, 0x94c20014, 0x3c010800, 0xa42223d0, 0x8cc40010,
4906         0x00041402, 0x3c010800, 0xa42223d2, 0x3c010800, 0xa42423d4, 0x94c2000e,
4907         0x3083ffff, 0x00431023, 0x3c010800, 0xac222408, 0x94c2001a, 0x3c010800,
4908         0xac262400, 0x3c010800, 0xac322404, 0x3c010800, 0xac2223fc, 0x3c02c000,
4909         0x02421825, 0xaf635c9c, 0x8f625c90, 0x30420002, 0x104000e5, 0x00000000,
4910         0xaf635c9c, 0x8f625c90, 0x30420002, 0x104000e0, 0x00000000, 0x0a000246,
4911         0x00000000, 0x94c2000e, 0x3c030800, 0x946323d4, 0x00434023, 0x3103ffff,
4912         0x2c620008, 0x1040001c, 0x00000000, 0x94c20014, 0x24420028, 0x00a22821,
4913         0x00031042, 0x1840000b, 0x00002021, 0x24e60848, 0x00403821, 0x94a30000,
4914         0x8cc20000, 0x24840001, 0x00431021, 0xacc20000, 0x0087102a, 0x1440fff9,
4915         0x24a50002, 0x31020001, 0x1040001f, 0x3c024000, 0x3c040800, 0x248423fc,
4916         0xa0a00001, 0x94a30000, 0x8c820000, 0x00431021, 0x0a000285, 0xac820000,
4917         0x8f626800, 0x3c030010, 0x00431024, 0x10400009, 0x00000000, 0x94c2001a,
4918         0x3c030800, 0x8c6323fc, 0x00431021, 0x3c010800, 0xac2223fc, 0x0a000286,
4919         0x3c024000, 0x94c2001a, 0x94c4001c, 0x3c030800, 0x8c6323fc, 0x00441023,
4920         0x00621821, 0x3c010800, 0xac2323fc, 0x3c024000, 0x02421825, 0xaf635c9c,
4921         0x8f625c90, 0x30420002, 0x1440fffc, 0x00000000, 0x9522000a, 0x30420010,
4922         0x1040009b, 0x00000000, 0x3c030800, 0x946323d4, 0x3c070800, 0x24e72400,
4923         0x8ce40000, 0x8f626800, 0x24630030, 0x00832821, 0x3c030010, 0x00431024,
4924         0x1440000a, 0x00000000, 0x94a20004, 0x3c040800, 0x8c842408, 0x3c030800,
4925         0x8c6323fc, 0x00441023, 0x00621821, 0x3c010800, 0xac2323fc, 0x3c040800,
4926         0x8c8423fc, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402, 0x00822021,
4927         0x00041027, 0xa4a20006, 0x3c030800, 0x8c632404, 0x3c0200ff, 0x3442fff8,
4928         0x00628824, 0x96220008, 0x24050001, 0x24034000, 0x000231c0, 0x00801021,
4929         0xa4c2001a, 0xa4c0001c, 0xace00000, 0x3c010800, 0xac251b60, 0xaf635cb8,
4930         0x8f625cb0, 0x30420002, 0x10400003, 0x00000000, 0x3c010800, 0xac201b60,
4931         0x8e220008, 0xaf625cb8, 0x8f625cb0, 0x30420002, 0x10400003, 0x00000000,
4932         0x3c010800, 0xac201b60, 0x3c020800, 0x8c421b60, 0x1040ffec, 0x00000000,
4933         0x3c040800, 0x0e00063b, 0x8c842404, 0x0a00032a, 0x00000000, 0x3c030800,
4934         0x90631b98, 0x24020002, 0x14620003, 0x3c034b65, 0x0a0002e1, 0x00008021,
4935         0x8e22001c, 0x34637654, 0x10430002, 0x24100002, 0x24100001, 0x00c02021,
4936         0x0e000350, 0x02003021, 0x24020003, 0x3c010800, 0xa0221b98, 0x24020002,
4937         0x1202000a, 0x24020001, 0x3c030800, 0x8c6323f0, 0x10620006, 0x00000000,
4938         0x3c020800, 0x944223d8, 0x00021400, 0x0a00031f, 0xae220014, 0x3c040800,
4939         0x248423da, 0x94820000, 0x00021400, 0xae220014, 0x3c020800, 0x8c421bbc,
4940         0x3c03c000, 0x3c010800, 0xa0201b98, 0x00431025, 0xaf625c5c, 0x8f625c50,
4941         0x30420002, 0x10400009, 0x00000000, 0x2484f7e2, 0x8c820000, 0x00431025,
4942         0xaf625c5c, 0x8f625c50, 0x30420002, 0x1440fffa, 0x00000000, 0x3c020800,
4943         0x24421b84, 0x8c430000, 0x24630001, 0xac430000, 0x8f630c14, 0x3063000f,
4944         0x2c620002, 0x1440000c, 0x3c024000, 0x8f630c14, 0x3c020800, 0x8c421b40,
4945         0x3063000f, 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002, 0x1040fff7,
4946         0x00000000, 0x3c024000, 0x02421825, 0xaf635c9c, 0x8f625c90, 0x30420002,
4947         0x1440fffc, 0x00000000, 0x12600003, 0x00000000, 0x0e0004c0, 0x00000000,
4948         0x8fbf0028, 0x8fb30024, 0x8fb20020, 0x8fb1001c, 0x8fb00018, 0x03e00008,
4949         0x27bd0030, 0x8f634450, 0x3c040800, 0x24841b88, 0x8c820000, 0x00031c02,
4950         0x0043102b, 0x14400007, 0x3c038000, 0x8c840004, 0x8f624450, 0x00021c02,
4951         0x0083102b, 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024,
4952         0x1440fffd, 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3c024000,
4953         0x00822025, 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00000000,
4954         0x03e00008, 0x00000000, 0x27bdffe0, 0x00805821, 0x14c00011, 0x256e0008,
4955         0x3c020800, 0x8c4223f4, 0x10400007, 0x24020016, 0x3c010800, 0xa42223d2,
4956         0x2402002a, 0x3c010800, 0x0a000364, 0xa42223d4, 0x8d670010, 0x00071402,
4957         0x3c010800, 0xa42223d2, 0x3c010800, 0xa42723d4, 0x3c040800, 0x948423d4,
4958         0x3c030800, 0x946323d2, 0x95cf0006, 0x3c020800, 0x944223d0, 0x00832023,
4959         0x01e2c023, 0x3065ffff, 0x24a20028, 0x01c24821, 0x3082ffff, 0x14c0001a,
4960         0x01226021, 0x9582000c, 0x3042003f, 0x3c010800, 0xa42223d6, 0x95820004,
4961         0x95830006, 0x3c010800, 0xac2023e4, 0x3c010800, 0xac2023e8, 0x00021400,
4962         0x00431025, 0x3c010800, 0xac221bc0, 0x95220004, 0x3c010800, 0xa4221bc4,
4963         0x95230002, 0x01e51023, 0x0043102a, 0x10400010, 0x24020001, 0x3c010800,
4964         0x0a000398, 0xac2223f8, 0x3c030800, 0x8c6323e8, 0x3c020800, 0x94421bc4,
4965         0x00431021, 0xa5220004, 0x3c020800, 0x94421bc0, 0xa5820004, 0x3c020800,
4966         0x8c421bc0, 0xa5820006, 0x3c020800, 0x8c4223f0, 0x3c0d0800, 0x8dad23e4,
4967         0x3c0a0800, 0x144000e5, 0x8d4a23e8, 0x3c020800, 0x94421bc4, 0x004a1821,
4968         0x3063ffff, 0x0062182b, 0x24020002, 0x10c2000d, 0x01435023, 0x3c020800,
4969         0x944223d6, 0x30420009, 0x10400008, 0x00000000, 0x9582000c, 0x3042fff6,
4970         0xa582000c, 0x3c020800, 0x944223d6, 0x30420009, 0x01a26823, 0x3c020800,
4971         0x8c4223f8, 0x1040004a, 0x01203821, 0x3c020800, 0x944223d2, 0x00004021,
4972         0xa520000a, 0x01e21023, 0xa5220002, 0x3082ffff, 0x00021042, 0x18400008,
4973         0x00003021, 0x00401821, 0x94e20000, 0x25080001, 0x00c23021, 0x0103102a,
4974         0x1440fffb, 0x24e70002, 0x00061c02, 0x30c2ffff, 0x00623021, 0x00061402,
4975         0x00c23021, 0x00c02821, 0x00061027, 0xa522000a, 0x00003021, 0x2527000c,
4976         0x00004021, 0x94e20000, 0x25080001, 0x00c23021, 0x2d020004, 0x1440fffb,
4977         0x24e70002, 0x95220002, 0x00004021, 0x91230009, 0x00442023, 0x01803821,
4978         0x3082ffff, 0xa4e00010, 0x00621821, 0x00021042, 0x18400010, 0x00c33021,
4979         0x00404821, 0x94e20000, 0x24e70002, 0x00c23021, 0x30e2007f, 0x14400006,
4980         0x25080001, 0x8d630000, 0x3c02007f, 0x3442ff80, 0x00625824, 0x25670008,
4981         0x0109102a, 0x1440fff3, 0x00000000, 0x30820001, 0x10400005, 0x00061c02,
4982         0xa0e00001, 0x94e20000, 0x00c23021, 0x00061c02, 0x30c2ffff, 0x00623021,
4983         0x00061402, 0x00c23021, 0x0a00047d, 0x30c6ffff, 0x24020002, 0x14c20081,
4984         0x00000000, 0x3c020800, 0x8c42240c, 0x14400007, 0x00000000, 0x3c020800,
4985         0x944223d2, 0x95230002, 0x01e21023, 0x10620077, 0x00000000, 0x3c020800,
4986         0x944223d2, 0x01e21023, 0xa5220002, 0x3c020800, 0x8c42240c, 0x1040001a,
4987         0x31e3ffff, 0x8dc70010, 0x3c020800, 0x94421b96, 0x00e04021, 0x00072c02,
4988         0x00aa2021, 0x00431023, 0x00823823, 0x00072402, 0x30e2ffff, 0x00823821,
4989         0x00071027, 0xa522000a, 0x3102ffff, 0x3c040800, 0x948423d4, 0x00453023,
4990         0x00e02821, 0x00641823, 0x006d1821, 0x00c33021, 0x00061c02, 0x30c2ffff,
4991         0x0a00047d, 0x00623021, 0x01203821, 0x00004021, 0x3082ffff, 0x00021042,
4992         0x18400008, 0x00003021, 0x00401821, 0x94e20000, 0x25080001, 0x00c23021,
4993         0x0103102a, 0x1440fffb, 0x24e70002, 0x00061c02, 0x30c2ffff, 0x00623021,
4994         0x00061402, 0x00c23021, 0x00c02821, 0x00061027, 0xa522000a, 0x00003021,
4995         0x2527000c, 0x00004021, 0x94e20000, 0x25080001, 0x00c23021, 0x2d020004,
4996         0x1440fffb, 0x24e70002, 0x95220002, 0x00004021, 0x91230009, 0x00442023,
4997         0x01803821, 0x3082ffff, 0xa4e00010, 0x3c040800, 0x948423d4, 0x00621821,
4998         0x00c33021, 0x00061c02, 0x30c2ffff, 0x00623021, 0x00061c02, 0x3c020800,
4999         0x944223d0, 0x00c34821, 0x00441023, 0x00021fc2, 0x00431021, 0x00021043,
5000         0x18400010, 0x00003021, 0x00402021, 0x94e20000, 0x24e70002, 0x00c23021,
5001         0x30e2007f, 0x14400006, 0x25080001, 0x8d630000, 0x3c02007f, 0x3442ff80,
5002         0x00625824, 0x25670008, 0x0104102a, 0x1440fff3, 0x00000000, 0x3c020800,
5003         0x944223ec, 0x00c23021, 0x3122ffff, 0x00c23021, 0x00061c02, 0x30c2ffff,
5004         0x00623021, 0x00061402, 0x00c23021, 0x00c04021, 0x00061027, 0xa5820010,
5005         0xadc00014, 0x0a00049d, 0xadc00000, 0x8dc70010, 0x00e04021, 0x11400007,
5006         0x00072c02, 0x00aa3021, 0x00061402, 0x30c3ffff, 0x00433021, 0x00061402,
5007         0x00c22821, 0x00051027, 0xa522000a, 0x3c030800, 0x946323d4, 0x3102ffff,
5008         0x01e21021, 0x00433023, 0x00cd3021, 0x00061c02, 0x30c2ffff, 0x00623021,
5009         0x00061402, 0x00c23021, 0x00c04021, 0x00061027, 0xa5820010, 0x3102ffff,
5010         0x00051c00, 0x00431025, 0xadc20010, 0x3c020800, 0x8c4223f4, 0x10400005,
5011         0x2de205eb, 0x14400002, 0x25e2fff2, 0x34028870, 0xa5c20034, 0x3c030800,
5012         0x246323e8, 0x8c620000, 0x24420001, 0xac620000, 0x3c040800, 0x8c8423e4,
5013         0x3c020800, 0x8c421bc0, 0x3303ffff, 0x00832021, 0x00431821, 0x0062102b,
5014         0x3c010800, 0xac2423e4, 0x10400003, 0x2482ffff, 0x3c010800, 0xac2223e4,
5015         0x3c010800, 0xac231bc0, 0x03e00008, 0x27bd0020, 0x27bdffb8, 0x3c050800,
5016         0x24a51b96, 0xafbf0044, 0xafbe0040, 0xafb7003c, 0xafb60038, 0xafb50034,
5017         0xafb40030, 0xafb3002c, 0xafb20028, 0xafb10024, 0xafb00020, 0x94a90000,
5018         0x3c020800, 0x944223d0, 0x3c030800, 0x8c631bb0, 0x3c040800, 0x8c841bac,
5019         0x01221023, 0x0064182a, 0xa7a9001e, 0x106000be, 0xa7a20016, 0x24be0022,
5020         0x97b6001e, 0x24b3001a, 0x24b70016, 0x8fc20000, 0x14400008, 0x00000000,
5021         0x8fc2fff8, 0x97a30016, 0x8fc4fff4, 0x00431021, 0x0082202a, 0x148000b0,
5022         0x00000000, 0x97d50818, 0x32a2ffff, 0x104000a3, 0x00009021, 0x0040a021,
5023         0x00008821, 0x0e000625, 0x00000000, 0x00403021, 0x14c00007, 0x00000000,
5024         0x3c020800, 0x8c4223dc, 0x24420001, 0x3c010800, 0x0a000596, 0xac2223dc,
5025         0x3c100800, 0x02118021, 0x8e101bc8, 0x9608000a, 0x31020040, 0x10400005,
5026         0x2407180c, 0x8e02000c, 0x2407188c, 0x00021400, 0xacc20018, 0x31020080,
5027         0x54400001, 0x34e70010, 0x3c020800, 0x00511021, 0x8c421bd0, 0x3c030800,
5028         0x00711821, 0x8c631bd4, 0x00021500, 0x00031c00, 0x00431025, 0xacc20014,
5029         0x96040008, 0x3242ffff, 0x00821021, 0x0282102a, 0x14400002, 0x02b22823,
5030         0x00802821, 0x8e020000, 0x02459021, 0xacc20000, 0x8e020004, 0x00c02021,
5031         0x26310010, 0xac820004, 0x30e2ffff, 0xac800008, 0xa485000e, 0xac820010,
5032         0x24020305, 0x0e0005a2, 0xa482000c, 0x3242ffff, 0x0054102b, 0x1440ffc5,
5033         0x3242ffff, 0x0a00058e, 0x00000000, 0x8e620000, 0x8e63fffc, 0x0043102a,
5034         0x10400067, 0x00000000, 0x8e62fff0, 0x00028900, 0x3c100800, 0x02118021,
5035         0x0e000625, 0x8e101bc8, 0x00403021, 0x14c00005, 0x00000000, 0x8e62082c,
5036         0x24420001, 0x0a000596, 0xae62082c, 0x9608000a, 0x31020040, 0x10400005,
5037         0x2407180c, 0x8e02000c, 0x2407188c, 0x00021400, 0xacc20018, 0x3c020800,
5038         0x00511021, 0x8c421bd0, 0x3c030800, 0x00711821, 0x8c631bd4, 0x00021500,
5039         0x00031c00, 0x00431025, 0xacc20014, 0x8e63fff4, 0x96020008, 0x00432023,
5040         0x3242ffff, 0x3083ffff, 0x00431021, 0x02c2102a, 0x10400003, 0x00802821,
5041         0x97a9001e, 0x01322823, 0x8e620000, 0x30a4ffff, 0x00441021, 0xae620000,
5042         0xa4c5000e, 0x8e020000, 0xacc20000, 0x8e020004, 0x8e63fff4, 0x00431021,
5043         0xacc20004, 0x8e63fff4, 0x96020008, 0x00641821, 0x0062102a, 0x14400006,
5044         0x02459021, 0x8e62fff0, 0xae60fff4, 0x24420001, 0x0a000571, 0xae62fff0,
5045         0xae63fff4, 0xacc00008, 0x3242ffff, 0x10560003, 0x31020004, 0x10400006,
5046         0x24020305, 0x31020080, 0x54400001, 0x34e70010, 0x34e70020, 0x24020905,
5047         0xa4c2000c, 0x8ee30000, 0x8ee20004, 0x14620007, 0x3c02b49a, 0x8ee20860,
5048         0x54400001, 0x34e70400, 0x3c024b65, 0x0a000588, 0x34427654, 0x344289ab,
5049         0xacc2001c, 0x30e2ffff, 0xacc20010, 0x0e0005a2, 0x00c02021, 0x3242ffff,
5050         0x0056102b, 0x1440ff9b, 0x00000000, 0x8e620000, 0x8e63fffc, 0x0043102a,
5051         0x1440ff48, 0x00000000, 0x8fbf0044, 0x8fbe0040, 0x8fb7003c, 0x8fb60038,
5052         0x8fb50034, 0x8fb40030, 0x8fb3002c, 0x8fb20028, 0x8fb10024, 0x8fb00020,
5053         0x03e00008, 0x27bd0048, 0x27bdffe8, 0xafbf0014, 0xafb00010, 0x8f624450,
5054         0x8f634410, 0x0a0005b1, 0x00808021, 0x8f626820, 0x30422000, 0x10400003,
5055         0x00000000, 0x0e0001f0, 0x00002021, 0x8f624450, 0x8f634410, 0x3042ffff,
5056         0x0043102b, 0x1440fff5, 0x00000000, 0x8f630c14, 0x3063000f, 0x2c620002,
5057         0x1440000b, 0x00000000, 0x8f630c14, 0x3c020800, 0x8c421b40, 0x3063000f,
5058         0x24420001, 0x3c010800, 0xac221b40, 0x2c620002, 0x1040fff7, 0x00000000,
5059         0xaf705c18, 0x8f625c10, 0x30420002, 0x10400009, 0x00000000, 0x8f626820,
5060         0x30422000, 0x1040fff8, 0x00000000, 0x0e0001f0, 0x00002021, 0x0a0005c4,
5061         0x00000000, 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x00000000,
5062         0x00000000, 0x00000000, 0x27bdffe8, 0x3c1bc000, 0xafbf0014, 0xafb00010,
5063         0xaf60680c, 0x8f626804, 0x34420082, 0xaf626804, 0x8f634000, 0x24020b50,
5064         0x3c010800, 0xac221b54, 0x24020b78, 0x3c010800, 0xac221b64, 0x34630002,
5065         0xaf634000, 0x0e000605, 0x00808021, 0x3c010800, 0xa0221b68, 0x304200ff,
5066         0x24030002, 0x14430005, 0x00000000, 0x3c020800, 0x8c421b54, 0x0a0005f8,
5067         0xac5000c0, 0x3c020800, 0x8c421b54, 0xac5000bc, 0x8f624434, 0x8f634438,
5068         0x8f644410, 0x3c010800, 0xac221b5c, 0x3c010800, 0xac231b6c, 0x3c010800,
5069         0xac241b58, 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x3c040800,
5070         0x8c870000, 0x3c03aa55, 0x3463aa55, 0x3c06c003, 0xac830000, 0x8cc20000,
5071         0x14430007, 0x24050002, 0x3c0355aa, 0x346355aa, 0xac830000, 0x8cc20000,
5072         0x50430001, 0x24050001, 0x3c020800, 0xac470000, 0x03e00008, 0x00a01021,
5073         0x27bdfff8, 0x18800009, 0x00002821, 0x8f63680c, 0x8f62680c, 0x1043fffe,
5074         0x00000000, 0x24a50001, 0x00a4102a, 0x1440fff9, 0x00000000, 0x03e00008,
5075         0x27bd0008, 0x8f634450, 0x3c020800, 0x8c421b5c, 0x00031c02, 0x0043102b,
5076         0x14400008, 0x3c038000, 0x3c040800, 0x8c841b6c, 0x8f624450, 0x00021c02,
5077         0x0083102b, 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024,
5078         0x1440fffd, 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3082ffff,
5079         0x2442e000, 0x2c422001, 0x14400003, 0x3c024000, 0x0a000648, 0x2402ffff,
5080         0x00822025, 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00001021,
5081         0x03e00008, 0x00000000, 0x8f624450, 0x3c030800, 0x8c631b58, 0x0a000651,
5082         0x3042ffff, 0x8f624450, 0x3042ffff, 0x0043102b, 0x1440fffc, 0x00000000,
5083         0x03e00008, 0x00000000, 0x27bdffe0, 0x00802821, 0x3c040800, 0x24841af0,
5084         0x00003021, 0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014,
5085         0x0a000660, 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x00000000,
5086         0x00000000, 0x00000000, 0x3c020800, 0x34423000, 0x3c030800, 0x34633000,
5087         0x3c040800, 0x348437ff, 0x3c010800, 0xac221b74, 0x24020040, 0x3c010800,
5088         0xac221b78, 0x3c010800, 0xac201b70, 0xac600000, 0x24630004, 0x0083102b,
5089         0x5040fffd, 0xac600000, 0x03e00008, 0x00000000, 0x00804821, 0x8faa0010,
5090         0x3c020800, 0x8c421b70, 0x3c040800, 0x8c841b78, 0x8fab0014, 0x24430001,
5091         0x0044102b, 0x3c010800, 0xac231b70, 0x14400003, 0x00004021, 0x3c010800,
5092         0xac201b70, 0x3c020800, 0x8c421b70, 0x3c030800, 0x8c631b74, 0x91240000,
5093         0x00021140, 0x00431021, 0x00481021, 0x25080001, 0xa0440000, 0x29020008,
5094         0x1440fff4, 0x25290001, 0x3c020800, 0x8c421b70, 0x3c030800, 0x8c631b74,
5095         0x8f64680c, 0x00021140, 0x00431021, 0xac440008, 0xac45000c, 0xac460010,
5096         0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c, 0x00000000, 0x00000000,
5097 };
5098
5099 static u32 tg3TsoFwRodata[] = {
5100         0x4d61696e, 0x43707542, 0x00000000, 0x4d61696e, 0x43707541, 0x00000000,
5101         0x00000000, 0x00000000, 0x73746b6f, 0x66666c64, 0x496e0000, 0x73746b6f,
5102         0x66662a2a, 0x00000000, 0x53774576, 0x656e7430, 0x00000000, 0x00000000,
5103         0x00000000, 0x00000000, 0x66617461, 0x6c457272, 0x00000000, 0x00000000,
5104         0x00000000,
5105 };
5106
5107 static u32 tg3TsoFwData[] = {
5108         0x00000000, 0x73746b6f, 0x66666c64, 0x5f76312e, 0x362e3000, 0x00000000,
5109         0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
5110         0x00000000,
5111 };
5112
5113 /* 5705 needs a special version of the TSO firmware.  */
5114 #define TG3_TSO5_FW_RELEASE_MAJOR       0x1
5115 #define TG3_TSO5_FW_RELASE_MINOR        0x2
5116 #define TG3_TSO5_FW_RELEASE_FIX         0x0
5117 #define TG3_TSO5_FW_START_ADDR          0x00010000
5118 #define TG3_TSO5_FW_TEXT_ADDR           0x00010000
5119 #define TG3_TSO5_FW_TEXT_LEN            0xe90
5120 #define TG3_TSO5_FW_RODATA_ADDR         0x00010e90
5121 #define TG3_TSO5_FW_RODATA_LEN          0x50
5122 #define TG3_TSO5_FW_DATA_ADDR           0x00010f00
5123 #define TG3_TSO5_FW_DATA_LEN            0x20
5124 #define TG3_TSO5_FW_SBSS_ADDR           0x00010f20
5125 #define TG3_TSO5_FW_SBSS_LEN            0x28
5126 #define TG3_TSO5_FW_BSS_ADDR            0x00010f50
5127 #define TG3_TSO5_FW_BSS_LEN             0x88
5128
5129 static u32 tg3Tso5FwText[(TG3_TSO5_FW_TEXT_LEN / 4) + 1] = {
5130         0x0c004003, 0x00000000, 0x00010f04, 0x00000000, 0x10000003, 0x00000000,
5131         0x0000000d, 0x0000000d, 0x3c1d0001, 0x37bde000, 0x03a0f021, 0x3c100001,
5132         0x26100000, 0x0c004010, 0x00000000, 0x0000000d, 0x27bdffe0, 0x3c04fefe,
5133         0xafbf0018, 0x0c0042e8, 0x34840002, 0x0c004364, 0x00000000, 0x3c030001,
5134         0x90630f34, 0x24020002, 0x3c040001, 0x24840e9c, 0x14620003, 0x24050001,
5135         0x3c040001, 0x24840e90, 0x24060002, 0x00003821, 0xafa00010, 0x0c004378,
5136         0xafa00014, 0x0c00402c, 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020,
5137         0x00000000, 0x00000000, 0x27bdffe0, 0xafbf001c, 0xafb20018, 0xafb10014,
5138         0x0c0042d4, 0xafb00010, 0x3c128000, 0x24110001, 0x8f706810, 0x32020400,
5139         0x10400007, 0x00000000, 0x8f641008, 0x00921024, 0x14400003, 0x00000000,
5140         0x0c004064, 0x00000000, 0x3c020001, 0x90420f56, 0x10510003, 0x32020200,
5141         0x1040fff1, 0x00000000, 0x0c0041b4, 0x00000000, 0x08004034, 0x00000000,
5142         0x8fbf001c, 0x8fb20018, 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0020,
5143         0x27bdffe0, 0x3c040001, 0x24840eb0, 0x00002821, 0x00003021, 0x00003821,
5144         0xafbf0018, 0xafa00010, 0x0c004378, 0xafa00014, 0x0000d021, 0x24020130,
5145         0xaf625000, 0x3c010001, 0xa4200f50, 0x3c010001, 0xa0200f57, 0x8fbf0018,
5146         0x03e00008, 0x27bd0020, 0x00000000, 0x00000000, 0x3c030001, 0x24630f60,
5147         0x90620000, 0x27bdfff0, 0x14400003, 0x0080c021, 0x08004073, 0x00004821,
5148         0x3c022000, 0x03021024, 0x10400003, 0x24090002, 0x08004073, 0xa0600000,
5149         0x24090001, 0x00181040, 0x30431f80, 0x346f8008, 0x1520004b, 0x25eb0028,
5150         0x3c040001, 0x00832021, 0x8c848010, 0x3c050001, 0x24a50f7a, 0x00041402,
5151         0xa0a20000, 0x3c010001, 0xa0240f7b, 0x3c020001, 0x00431021, 0x94428014,
5152         0x3c010001, 0xa0220f7c, 0x3c0c0001, 0x01836021, 0x8d8c8018, 0x304200ff,
5153         0x24420008, 0x000220c3, 0x24020001, 0x3c010001, 0xa0220f60, 0x0124102b,
5154         0x1040000c, 0x00003821, 0x24a6000e, 0x01602821, 0x8ca20000, 0x8ca30004,
5155         0x24a50008, 0x24e70001, 0xacc20000, 0xacc30004, 0x00e4102b, 0x1440fff8,
5156         0x24c60008, 0x00003821, 0x3c080001, 0x25080f7b, 0x91060000, 0x3c020001,
5157         0x90420f7c, 0x2503000d, 0x00c32821, 0x00461023, 0x00021fc2, 0x00431021,
5158         0x00021043, 0x1840000c, 0x00002021, 0x91020001, 0x00461023, 0x00021fc2,
5159         0x00431021, 0x00021843, 0x94a20000, 0x24e70001, 0x00822021, 0x00e3102a,
5160         0x1440fffb, 0x24a50002, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402,
5161         0x00822021, 0x3c02ffff, 0x01821024, 0x3083ffff, 0x00431025, 0x3c010001,
5162         0x080040fa, 0xac220f80, 0x3c050001, 0x24a50f7c, 0x90a20000, 0x3c0c0001,
5163         0x01836021, 0x8d8c8018, 0x000220c2, 0x1080000e, 0x00003821, 0x01603021,
5164         0x24a5000c, 0x8ca20000, 0x8ca30004, 0x24a50008, 0x24e70001, 0xacc20000,
5165         0xacc30004, 0x00e4102b, 0x1440fff8, 0x24c60008, 0x3c050001, 0x24a50f7c,
5166         0x90a20000, 0x30430007, 0x24020004, 0x10620011, 0x28620005, 0x10400005,
5167         0x24020002, 0x10620008, 0x000710c0, 0x080040fa, 0x00000000, 0x24020006,
5168         0x1062000e, 0x000710c0, 0x080040fa, 0x00000000, 0x00a21821, 0x9463000c,
5169         0x004b1021, 0x080040fa, 0xa4430000, 0x000710c0, 0x00a21821, 0x8c63000c,
5170         0x004b1021, 0x080040fa, 0xac430000, 0x00a21821, 0x8c63000c, 0x004b2021,
5171         0x00a21021, 0xac830000, 0x94420010, 0xa4820004, 0x95e70006, 0x3c020001,
5172         0x90420f7c, 0x3c030001, 0x90630f7a, 0x00e2c823, 0x3c020001, 0x90420f7b,
5173         0x24630028, 0x01e34021, 0x24420028, 0x15200012, 0x01e23021, 0x94c2000c,
5174         0x3c010001, 0xa4220f78, 0x94c20004, 0x94c30006, 0x3c010001, 0xa4200f76,
5175         0x3c010001, 0xa4200f72, 0x00021400, 0x00431025, 0x3c010001, 0xac220f6c,
5176         0x95020004, 0x3c010001, 0x08004124, 0xa4220f70, 0x3c020001, 0x94420f70,
5177         0x3c030001, 0x94630f72, 0x00431021, 0xa5020004, 0x3c020001, 0x94420f6c,
5178         0xa4c20004, 0x3c020001, 0x8c420f6c, 0xa4c20006, 0x3c040001, 0x94840f72,
5179         0x3c020001, 0x94420f70, 0x3c0a0001, 0x954a0f76, 0x00441821, 0x3063ffff,
5180         0x0062182a, 0x24020002, 0x1122000b, 0x00832023, 0x3c030001, 0x94630f78,
5181         0x30620009, 0x10400006, 0x3062fff6, 0xa4c2000c, 0x3c020001, 0x94420f78,
5182         0x30420009, 0x01425023, 0x24020001, 0x1122001b, 0x29220002, 0x50400005,
5183         0x24020002, 0x11200007, 0x31a2ffff, 0x08004197, 0x00000000, 0x1122001d,
5184         0x24020016, 0x08004197, 0x31a2ffff, 0x3c0e0001, 0x95ce0f80, 0x10800005,
5185         0x01806821, 0x01c42021, 0x00041c02, 0x3082ffff, 0x00627021, 0x000e1027,
5186         0xa502000a, 0x3c030001, 0x90630f7b, 0x31a2ffff, 0x00e21021, 0x0800418d,
5187         0x00432023, 0x3c020001, 0x94420f80, 0x00442021, 0x00041c02, 0x3082ffff,
5188         0x00622021, 0x00807021, 0x00041027, 0x08004185, 0xa502000a, 0x3c050001,
5189         0x24a50f7a, 0x90a30000, 0x14620002, 0x24e2fff2, 0xa5e20034, 0x90a20000,
5190         0x00e21023, 0xa5020002, 0x3c030001, 0x94630f80, 0x3c020001, 0x94420f5a,
5191         0x30e5ffff, 0x00641821, 0x00451023, 0x00622023, 0x00041c02, 0x3082ffff,
5192         0x00622021, 0x00041027, 0xa502000a, 0x3c030001, 0x90630f7c, 0x24620001,
5193         0x14a20005, 0x00807021, 0x01631021, 0x90420000, 0x08004185, 0x00026200,
5194         0x24620002, 0x14a20003, 0x306200fe, 0x004b1021, 0x944c0000, 0x3c020001,
5195         0x94420f82, 0x3183ffff, 0x3c040001, 0x90840f7b, 0x00431021, 0x00e21021,
5196         0x00442023, 0x008a2021, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402,
5197         0x00822021, 0x00806821, 0x00041027, 0xa4c20010, 0x31a2ffff, 0x000e1c00,
5198         0x00431025, 0x3c040001, 0x24840f72, 0xade20010, 0x94820000, 0x3c050001,
5199         0x94a50f76, 0x3c030001, 0x8c630f6c, 0x24420001, 0x00b92821, 0xa4820000,
5200         0x3322ffff, 0x00622021, 0x0083182b, 0x3c010001, 0xa4250f76, 0x10600003,
5201         0x24a2ffff, 0x3c010001, 0xa4220f76, 0x3c024000, 0x03021025, 0x3c010001,
5202         0xac240f6c, 0xaf621008, 0x03e00008, 0x27bd0010, 0x3c030001, 0x90630f56,
5203         0x27bdffe8, 0x24020001, 0xafbf0014, 0x10620026, 0xafb00010, 0x8f620cf4,
5204         0x2442ffff, 0x3042007f, 0x00021100, 0x8c434000, 0x3c010001, 0xac230f64,
5205         0x8c434008, 0x24444000, 0x8c5c4004, 0x30620040, 0x14400002, 0x24020088,
5206         0x24020008, 0x3c010001, 0xa4220f68, 0x30620004, 0x10400005, 0x24020001,
5207         0x3c010001, 0xa0220f57, 0x080041d5, 0x00031402, 0x3c010001, 0xa0200f57,
5208         0x00031402, 0x3c010001, 0xa4220f54, 0x9483000c, 0x24020001, 0x3c010001,
5209         0xa4200f50, 0x3c010001, 0xa0220f56, 0x3c010001, 0xa4230f62, 0x24020001,
5210         0x1342001e, 0x00000000, 0x13400005, 0x24020003, 0x13420067, 0x00000000,
5211         0x080042cf, 0x00000000, 0x3c020001, 0x94420f62, 0x241a0001, 0x3c010001,
5212         0xa4200f5e, 0x3c010001, 0xa4200f52, 0x304407ff, 0x00021bc2, 0x00031823,
5213         0x3063003e, 0x34630036, 0x00021242, 0x3042003c, 0x00621821, 0x3c010001,
5214         0xa4240f58, 0x00832021, 0x24630030, 0x3c010001, 0xa4240f5a, 0x3c010001,
5215         0xa4230f5c, 0x3c060001, 0x24c60f52, 0x94c50000, 0x94c30002, 0x3c040001,
5216         0x94840f5a, 0x00651021, 0x0044102a, 0x10400013, 0x3c108000, 0x00a31021,
5217         0xa4c20000, 0x3c02a000, 0xaf620cf4, 0x3c010001, 0xa0200f56, 0x8f641008,
5218         0x00901024, 0x14400003, 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4,
5219         0x00501024, 0x104000b7, 0x00000000, 0x0800420f, 0x00000000, 0x3c030001,
5220         0x94630f50, 0x00851023, 0xa4c40000, 0x00621821, 0x3042ffff, 0x3c010001,
5221         0xa4230f50, 0xaf620ce8, 0x3c020001, 0x94420f68, 0x34420024, 0xaf620cec,
5222         0x94c30002, 0x3c020001, 0x94420f50, 0x14620012, 0x3c028000, 0x3c108000,
5223         0x3c02a000, 0xaf620cf4, 0x3c010001, 0xa0200f56, 0x8f641008, 0x00901024,
5224         0x14400003, 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4, 0x00501024,
5225         0x1440fff7, 0x00000000, 0x080042cf, 0x241a0003, 0xaf620cf4, 0x3c108000,
5226         0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064, 0x00000000,
5227         0x8f620cf4, 0x00501024, 0x1440fff7, 0x00000000, 0x080042cf, 0x241a0003,
5228         0x3c070001, 0x24e70f50, 0x94e20000, 0x03821021, 0xaf620ce0, 0x3c020001,
5229         0x8c420f64, 0xaf620ce4, 0x3c050001, 0x94a50f54, 0x94e30000, 0x3c040001,
5230         0x94840f58, 0x3c020001, 0x94420f5e, 0x00a32823, 0x00822023, 0x30a6ffff,
5231         0x3083ffff, 0x00c3102b, 0x14400043, 0x00000000, 0x3c020001, 0x94420f5c,
5232         0x00021400, 0x00621025, 0xaf620ce8, 0x94e20000, 0x3c030001, 0x94630f54,
5233         0x00441021, 0xa4e20000, 0x3042ffff, 0x14430021, 0x3c020008, 0x3c020001,
5234         0x90420f57, 0x10400006, 0x3c03000c, 0x3c020001, 0x94420f68, 0x34630624,
5235         0x0800427c, 0x0000d021, 0x3c020001, 0x94420f68, 0x3c030008, 0x34630624,
5236         0x00431025, 0xaf620cec, 0x3c108000, 0x3c02a000, 0xaf620cf4, 0x3c010001,
5237         0xa0200f56, 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064,
5238         0x00000000, 0x8f620cf4, 0x00501024, 0x10400015, 0x00000000, 0x08004283,
5239         0x00000000, 0x3c030001, 0x94630f68, 0x34420624, 0x3c108000, 0x00621825,
5240         0x3c028000, 0xaf630cec, 0xaf620cf4, 0x8f641008, 0x00901024, 0x14400003,
5241         0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4, 0x00501024, 0x1440fff7,
5242         0x00000000, 0x3c010001, 0x080042cf, 0xa4200f5e, 0x3c020001, 0x94420f5c,
5243         0x00021400, 0x00c21025, 0xaf620ce8, 0x3c020001, 0x90420f57, 0x10400009,
5244         0x3c03000c, 0x3c020001, 0x94420f68, 0x34630624, 0x0000d021, 0x00431025,
5245         0xaf620cec, 0x080042c1, 0x3c108000, 0x3c020001, 0x94420f68, 0x3c030008,
5246         0x34630604, 0x00431025, 0xaf620cec, 0x3c020001, 0x94420f5e, 0x00451021,
5247         0x3c010001, 0xa4220f5e, 0x3c108000, 0x3c02a000, 0xaf620cf4, 0x3c010001,
5248         0xa0200f56, 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064,
5249         0x00000000, 0x8f620cf4, 0x00501024, 0x1440fff7, 0x00000000, 0x8fbf0014,
5250         0x8fb00010, 0x03e00008, 0x27bd0018, 0x00000000, 0x27bdffe0, 0x3c040001,
5251         0x24840ec0, 0x00002821, 0x00003021, 0x00003821, 0xafbf0018, 0xafa00010,
5252         0x0c004378, 0xafa00014, 0x0000d021, 0x24020130, 0xaf625000, 0x3c010001,
5253         0xa4200f50, 0x3c010001, 0xa0200f57, 0x8fbf0018, 0x03e00008, 0x27bd0020,
5254         0x27bdffe8, 0x3c1bc000, 0xafbf0014, 0xafb00010, 0xaf60680c, 0x8f626804,
5255         0x34420082, 0xaf626804, 0x8f634000, 0x24020b50, 0x3c010001, 0xac220f20,
5256         0x24020b78, 0x3c010001, 0xac220f30, 0x34630002, 0xaf634000, 0x0c004315,
5257         0x00808021, 0x3c010001, 0xa0220f34, 0x304200ff, 0x24030002, 0x14430005,
5258         0x00000000, 0x3c020001, 0x8c420f20, 0x08004308, 0xac5000c0, 0x3c020001,
5259         0x8c420f20, 0xac5000bc, 0x8f624434, 0x8f634438, 0x8f644410, 0x3c010001,
5260         0xac220f28, 0x3c010001, 0xac230f38, 0x3c010001, 0xac240f24, 0x8fbf0014,
5261         0x8fb00010, 0x03e00008, 0x27bd0018, 0x03e00008, 0x24020001, 0x27bdfff8,
5262         0x18800009, 0x00002821, 0x8f63680c, 0x8f62680c, 0x1043fffe, 0x00000000,
5263         0x24a50001, 0x00a4102a, 0x1440fff9, 0x00000000, 0x03e00008, 0x27bd0008,
5264         0x8f634450, 0x3c020001, 0x8c420f28, 0x00031c02, 0x0043102b, 0x14400008,
5265         0x3c038000, 0x3c040001, 0x8c840f38, 0x8f624450, 0x00021c02, 0x0083102b,
5266         0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024, 0x1440fffd,
5267         0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3082ffff, 0x2442e000,
5268         0x2c422001, 0x14400003, 0x3c024000, 0x08004347, 0x2402ffff, 0x00822025,
5269         0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00001021, 0x03e00008,
5270         0x00000000, 0x8f624450, 0x3c030001, 0x8c630f24, 0x08004350, 0x3042ffff,
5271         0x8f624450, 0x3042ffff, 0x0043102b, 0x1440fffc, 0x00000000, 0x03e00008,
5272         0x00000000, 0x27bdffe0, 0x00802821, 0x3c040001, 0x24840ed0, 0x00003021,
5273         0x00003821, 0xafbf0018, 0xafa00010, 0x0c004378, 0xafa00014, 0x0800435f,
5274         0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x3c020001, 0x3442d600,
5275         0x3c030001, 0x3463d600, 0x3c040001, 0x3484ddff, 0x3c010001, 0xac220f40,
5276         0x24020040, 0x3c010001, 0xac220f44, 0x3c010001, 0xac200f3c, 0xac600000,
5277         0x24630004, 0x0083102b, 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000,
5278         0x00804821, 0x8faa0010, 0x3c020001, 0x8c420f3c, 0x3c040001, 0x8c840f44,
5279         0x8fab0014, 0x24430001, 0x0044102b, 0x3c010001, 0xac230f3c, 0x14400003,
5280         0x00004021, 0x3c010001, 0xac200f3c, 0x3c020001, 0x8c420f3c, 0x3c030001,
5281         0x8c630f40, 0x91240000, 0x00021140, 0x00431021, 0x00481021, 0x25080001,
5282         0xa0440000, 0x29020008, 0x1440fff4, 0x25290001, 0x3c020001, 0x8c420f3c,
5283         0x3c030001, 0x8c630f40, 0x8f64680c, 0x00021140, 0x00431021, 0xac440008,
5284         0xac45000c, 0xac460010, 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c,
5285         0x00000000, 0x00000000, 0x00000000,
5286 };
5287
5288 static u32 tg3Tso5FwRodata[(TG3_TSO5_FW_RODATA_LEN / 4) + 1] = {
5289         0x4d61696e, 0x43707542, 0x00000000, 0x4d61696e, 0x43707541, 0x00000000,
5290         0x00000000, 0x00000000, 0x73746b6f, 0x66666c64, 0x00000000, 0x00000000,
5291         0x73746b6f, 0x66666c64, 0x00000000, 0x00000000, 0x66617461, 0x6c457272,
5292         0x00000000, 0x00000000, 0x00000000,
5293 };
5294
5295 static u32 tg3Tso5FwData[(TG3_TSO5_FW_DATA_LEN / 4) + 1] = {
5296         0x00000000, 0x73746b6f, 0x66666c64, 0x5f76312e, 0x322e3000, 0x00000000,
5297         0x00000000, 0x00000000, 0x00000000,
5298 };
5299
5300 /* tp->lock is held. */
5301 static int tg3_load_tso_firmware(struct tg3 *tp)
5302 {
5303         struct fw_info info;
5304         unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
5305         int err, i;
5306
5307         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
5308                 return 0;
5309
5310         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
5311                 info.text_base = TG3_TSO5_FW_TEXT_ADDR;
5312                 info.text_len = TG3_TSO5_FW_TEXT_LEN;
5313                 info.text_data = &tg3Tso5FwText[0];
5314                 info.rodata_base = TG3_TSO5_FW_RODATA_ADDR;
5315                 info.rodata_len = TG3_TSO5_FW_RODATA_LEN;
5316                 info.rodata_data = &tg3Tso5FwRodata[0];
5317                 info.data_base = TG3_TSO5_FW_DATA_ADDR;
5318                 info.data_len = TG3_TSO5_FW_DATA_LEN;
5319                 info.data_data = &tg3Tso5FwData[0];
5320                 cpu_base = RX_CPU_BASE;
5321                 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
5322                 cpu_scratch_size = (info.text_len +
5323                                     info.rodata_len +
5324                                     info.data_len +
5325                                     TG3_TSO5_FW_SBSS_LEN +
5326                                     TG3_TSO5_FW_BSS_LEN);
5327         } else {
5328                 info.text_base = TG3_TSO_FW_TEXT_ADDR;
5329                 info.text_len = TG3_TSO_FW_TEXT_LEN;
5330                 info.text_data = &tg3TsoFwText[0];
5331                 info.rodata_base = TG3_TSO_FW_RODATA_ADDR;
5332                 info.rodata_len = TG3_TSO_FW_RODATA_LEN;
5333                 info.rodata_data = &tg3TsoFwRodata[0];
5334                 info.data_base = TG3_TSO_FW_DATA_ADDR;
5335                 info.data_len = TG3_TSO_FW_DATA_LEN;
5336                 info.data_data = &tg3TsoFwData[0];
5337                 cpu_base = TX_CPU_BASE;
5338                 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
5339                 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
5340         }
5341
5342         err = tg3_load_firmware_cpu(tp, cpu_base,
5343                                     cpu_scratch_base, cpu_scratch_size,
5344                                     &info);
5345         if (err)
5346                 return err;
5347
5348         /* Now startup the cpu. */
5349         tw32(cpu_base + CPU_STATE, 0xffffffff);
5350         tw32_f(cpu_base + CPU_PC,    info.text_base);
5351
5352         for (i = 0; i < 5; i++) {
5353                 if (tr32(cpu_base + CPU_PC) == info.text_base)
5354                         break;
5355                 tw32(cpu_base + CPU_STATE, 0xffffffff);
5356                 tw32(cpu_base + CPU_MODE,  CPU_MODE_HALT);
5357                 tw32_f(cpu_base + CPU_PC,    info.text_base);
5358                 udelay(1000);
5359         }
5360         if (i >= 5) {
5361                 printk(KERN_ERR PFX "tg3_load_tso_firmware fails for %s "
5362                        "to set CPU PC, is %08x should be %08x\n",
5363                        tp->dev->name, tr32(cpu_base + CPU_PC),
5364                        info.text_base);
5365                 return -ENODEV;
5366         }
5367         tw32(cpu_base + CPU_STATE, 0xffffffff);
5368         tw32_f(cpu_base + CPU_MODE,  0x00000000);
5369         return 0;
5370 }
5371
5372 #endif /* TG3_TSO_SUPPORT != 0 */
5373
5374 /* tp->lock is held. */
5375 static void __tg3_set_mac_addr(struct tg3 *tp)
5376 {
5377         u32 addr_high, addr_low;
5378         int i;
5379
5380         addr_high = ((tp->dev->dev_addr[0] << 8) |
5381                      tp->dev->dev_addr[1]);
5382         addr_low = ((tp->dev->dev_addr[2] << 24) |
5383                     (tp->dev->dev_addr[3] << 16) |
5384                     (tp->dev->dev_addr[4] <<  8) |
5385                     (tp->dev->dev_addr[5] <<  0));
5386         for (i = 0; i < 4; i++) {
5387                 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
5388                 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
5389         }
5390
5391         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
5392             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
5393                 for (i = 0; i < 12; i++) {
5394                         tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
5395                         tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
5396                 }
5397         }
5398
5399         addr_high = (tp->dev->dev_addr[0] +
5400                      tp->dev->dev_addr[1] +
5401                      tp->dev->dev_addr[2] +
5402                      tp->dev->dev_addr[3] +
5403                      tp->dev->dev_addr[4] +
5404                      tp->dev->dev_addr[5]) &
5405                 TX_BACKOFF_SEED_MASK;
5406         tw32(MAC_TX_BACKOFF_SEED, addr_high);
5407 }
5408
5409 static int tg3_set_mac_addr(struct net_device *dev, void *p)
5410 {
5411         struct tg3 *tp = netdev_priv(dev);
5412         struct sockaddr *addr = p;
5413
5414         if (!is_valid_ether_addr(addr->sa_data))
5415                 return -EINVAL;
5416
5417         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
5418
5419         spin_lock_bh(&tp->lock);
5420         __tg3_set_mac_addr(tp);
5421         spin_unlock_bh(&tp->lock);
5422
5423         return 0;
5424 }
5425
5426 /* tp->lock is held. */
5427 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
5428                            dma_addr_t mapping, u32 maxlen_flags,
5429                            u32 nic_addr)
5430 {
5431         tg3_write_mem(tp,
5432                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
5433                       ((u64) mapping >> 32));
5434         tg3_write_mem(tp,
5435                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
5436                       ((u64) mapping & 0xffffffff));
5437         tg3_write_mem(tp,
5438                       (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
5439                        maxlen_flags);
5440
5441         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
5442                 tg3_write_mem(tp,
5443                               (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
5444                               nic_addr);
5445 }
5446
5447 static void __tg3_set_rx_mode(struct net_device *);
5448 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
5449 {
5450         tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
5451         tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
5452         tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
5453         tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
5454         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5455                 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
5456                 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
5457         }
5458         tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
5459         tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
5460         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5461                 u32 val = ec->stats_block_coalesce_usecs;
5462
5463                 if (!netif_carrier_ok(tp->dev))
5464                         val = 0;
5465
5466                 tw32(HOSTCC_STAT_COAL_TICKS, val);
5467         }
5468 }
5469
5470 /* tp->lock is held. */
5471 static int tg3_reset_hw(struct tg3 *tp)
5472 {
5473         u32 val, rdmac_mode;
5474         int i, err, limit;
5475
5476         tg3_disable_ints(tp);
5477
5478         tg3_stop_fw(tp);
5479
5480         tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
5481
5482         if (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) {
5483                 tg3_abort_hw(tp, 1);
5484         }
5485
5486         err = tg3_chip_reset(tp);
5487         if (err)
5488                 return err;
5489
5490         tg3_write_sig_legacy(tp, RESET_KIND_INIT);
5491
5492         /* This works around an issue with Athlon chipsets on
5493          * B3 tigon3 silicon.  This bit has no effect on any
5494          * other revision.  But do not set this on PCI Express
5495          * chips.
5496          */
5497         if (!(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
5498                 tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
5499         tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
5500
5501         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
5502             (tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
5503                 val = tr32(TG3PCI_PCISTATE);
5504                 val |= PCISTATE_RETRY_SAME_DMA;
5505                 tw32(TG3PCI_PCISTATE, val);
5506         }
5507
5508         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_BX) {
5509                 /* Enable some hw fixes.  */
5510                 val = tr32(TG3PCI_MSI_DATA);
5511                 val |= (1 << 26) | (1 << 28) | (1 << 29);
5512                 tw32(TG3PCI_MSI_DATA, val);
5513         }
5514
5515         /* Descriptor ring init may make accesses to the
5516          * NIC SRAM area to setup the TX descriptors, so we
5517          * can only do this after the hardware has been
5518          * successfully reset.
5519          */
5520         tg3_init_rings(tp);
5521
5522         /* This value is determined during the probe time DMA
5523          * engine test, tg3_test_dma.
5524          */
5525         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
5526
5527         tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
5528                           GRC_MODE_4X_NIC_SEND_RINGS |
5529                           GRC_MODE_NO_TX_PHDR_CSUM |
5530                           GRC_MODE_NO_RX_PHDR_CSUM);
5531         tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
5532         if (tp->tg3_flags & TG3_FLAG_NO_TX_PSEUDO_CSUM)
5533                 tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
5534         if (tp->tg3_flags & TG3_FLAG_NO_RX_PSEUDO_CSUM)
5535                 tp->grc_mode |= GRC_MODE_NO_RX_PHDR_CSUM;
5536
5537         tw32(GRC_MODE,
5538              tp->grc_mode |
5539              (GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP));
5540
5541         /* Setup the timer prescalar register.  Clock is always 66Mhz. */
5542         val = tr32(GRC_MISC_CFG);
5543         val &= ~0xff;
5544         val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
5545         tw32(GRC_MISC_CFG, val);
5546
5547         /* Initialize MBUF/DESC pool. */
5548         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
5549                 /* Do nothing.  */
5550         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
5551                 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
5552                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
5553                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
5554                 else
5555                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
5556                 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
5557                 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
5558         }
5559 #if TG3_TSO_SUPPORT != 0
5560         else if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
5561                 int fw_len;
5562
5563                 fw_len = (TG3_TSO5_FW_TEXT_LEN +
5564                           TG3_TSO5_FW_RODATA_LEN +
5565                           TG3_TSO5_FW_DATA_LEN +
5566                           TG3_TSO5_FW_SBSS_LEN +
5567                           TG3_TSO5_FW_BSS_LEN);
5568                 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
5569                 tw32(BUFMGR_MB_POOL_ADDR,
5570                      NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
5571                 tw32(BUFMGR_MB_POOL_SIZE,
5572                      NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
5573         }
5574 #endif
5575
5576         if (tp->dev->mtu <= ETH_DATA_LEN) {
5577                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
5578                      tp->bufmgr_config.mbuf_read_dma_low_water);
5579                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
5580                      tp->bufmgr_config.mbuf_mac_rx_low_water);
5581                 tw32(BUFMGR_MB_HIGH_WATER,
5582                      tp->bufmgr_config.mbuf_high_water);
5583         } else {
5584                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
5585                      tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
5586                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
5587                      tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
5588                 tw32(BUFMGR_MB_HIGH_WATER,
5589                      tp->bufmgr_config.mbuf_high_water_jumbo);
5590         }
5591         tw32(BUFMGR_DMA_LOW_WATER,
5592              tp->bufmgr_config.dma_low_water);
5593         tw32(BUFMGR_DMA_HIGH_WATER,
5594              tp->bufmgr_config.dma_high_water);
5595
5596         tw32(BUFMGR_MODE, BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE);
5597         for (i = 0; i < 2000; i++) {
5598                 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
5599                         break;
5600                 udelay(10);
5601         }
5602         if (i >= 2000) {
5603                 printk(KERN_ERR PFX "tg3_reset_hw cannot enable BUFMGR for %s.\n",
5604                        tp->dev->name);
5605                 return -ENODEV;
5606         }
5607
5608         /* Setup replenish threshold. */
5609         tw32(RCVBDI_STD_THRESH, tp->rx_pending / 8);
5610
5611         /* Initialize TG3_BDINFO's at:
5612          *  RCVDBDI_STD_BD:     standard eth size rx ring
5613          *  RCVDBDI_JUMBO_BD:   jumbo frame rx ring
5614          *  RCVDBDI_MINI_BD:    small frame rx ring (??? does not work)
5615          *
5616          * like so:
5617          *  TG3_BDINFO_HOST_ADDR:       high/low parts of DMA address of ring
5618          *  TG3_BDINFO_MAXLEN_FLAGS:    (rx max buffer size << 16) |
5619          *                              ring attribute flags
5620          *  TG3_BDINFO_NIC_ADDR:        location of descriptors in nic SRAM
5621          *
5622          * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
5623          * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
5624          *
5625          * The size of each ring is fixed in the firmware, but the location is
5626          * configurable.
5627          */
5628         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
5629              ((u64) tp->rx_std_mapping >> 32));
5630         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
5631              ((u64) tp->rx_std_mapping & 0xffffffff));
5632         tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
5633              NIC_SRAM_RX_BUFFER_DESC);
5634
5635         /* Don't even try to program the JUMBO/MINI buffer descriptor
5636          * configs on 5705.
5637          */
5638         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
5639                 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS,
5640                      RX_STD_MAX_SIZE_5705 << BDINFO_FLAGS_MAXLEN_SHIFT);
5641         } else {
5642                 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS,
5643                      RX_STD_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT);
5644
5645                 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
5646                      BDINFO_FLAGS_DISABLED);
5647
5648                 /* Setup replenish threshold. */
5649                 tw32(RCVBDI_JUMBO_THRESH, tp->rx_jumbo_pending / 8);
5650
5651                 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
5652                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
5653                              ((u64) tp->rx_jumbo_mapping >> 32));
5654                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
5655                              ((u64) tp->rx_jumbo_mapping & 0xffffffff));
5656                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
5657                              RX_JUMBO_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT);
5658                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
5659                              NIC_SRAM_RX_JUMBO_BUFFER_DESC);
5660                 } else {
5661                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
5662                              BDINFO_FLAGS_DISABLED);
5663                 }
5664
5665         }
5666
5667         /* There is only one send ring on 5705/5750, no need to explicitly
5668          * disable the others.
5669          */
5670         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5671                 /* Clear out send RCB ring in SRAM. */
5672                 for (i = NIC_SRAM_SEND_RCB; i < NIC_SRAM_RCV_RET_RCB; i += TG3_BDINFO_SIZE)
5673                         tg3_write_mem(tp, i + TG3_BDINFO_MAXLEN_FLAGS,
5674                                       BDINFO_FLAGS_DISABLED);
5675         }
5676
5677         tp->tx_prod = 0;
5678         tp->tx_cons = 0;
5679         tw32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0);
5680         tw32_tx_mbox(MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0);
5681
5682         tg3_set_bdinfo(tp, NIC_SRAM_SEND_RCB,
5683                        tp->tx_desc_mapping,
5684                        (TG3_TX_RING_SIZE <<
5685                         BDINFO_FLAGS_MAXLEN_SHIFT),
5686                        NIC_SRAM_TX_BUFFER_DESC);
5687
5688         /* There is only one receive return ring on 5705/5750, no need
5689          * to explicitly disable the others.
5690          */
5691         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5692                 for (i = NIC_SRAM_RCV_RET_RCB; i < NIC_SRAM_STATS_BLK;
5693                      i += TG3_BDINFO_SIZE) {
5694                         tg3_write_mem(tp, i + TG3_BDINFO_MAXLEN_FLAGS,
5695                                       BDINFO_FLAGS_DISABLED);
5696                 }
5697         }
5698
5699         tp->rx_rcb_ptr = 0;
5700         tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW, 0);
5701
5702         tg3_set_bdinfo(tp, NIC_SRAM_RCV_RET_RCB,
5703                        tp->rx_rcb_mapping,
5704                        (TG3_RX_RCB_RING_SIZE(tp) <<
5705                         BDINFO_FLAGS_MAXLEN_SHIFT),
5706                        0);
5707
5708         tp->rx_std_ptr = tp->rx_pending;
5709         tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
5710                      tp->rx_std_ptr);
5711
5712         tp->rx_jumbo_ptr = (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) ?
5713                                                 tp->rx_jumbo_pending : 0;
5714         tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
5715                      tp->rx_jumbo_ptr);
5716
5717         /* Initialize MAC address and backoff seed. */
5718         __tg3_set_mac_addr(tp);
5719
5720         /* MTU + ethernet header + FCS + optional VLAN tag */
5721         tw32(MAC_RX_MTU_SIZE, tp->dev->mtu + ETH_HLEN + 8);
5722
5723         /* The slot time is changed by tg3_setup_phy if we
5724          * run at gigabit with half duplex.
5725          */
5726         tw32(MAC_TX_LENGTHS,
5727              (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
5728              (6 << TX_LENGTHS_IPG_SHIFT) |
5729              (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
5730
5731         /* Receive rules. */
5732         tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
5733         tw32(RCVLPC_CONFIG, 0x0181);
5734
5735         /* Calculate RDMAC_MODE setting early, we need it to determine
5736          * the RCVLPC_STATE_ENABLE mask.
5737          */
5738         rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
5739                       RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
5740                       RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
5741                       RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
5742                       RDMAC_MODE_LNGREAD_ENAB);
5743         if (tp->tg3_flags & TG3_FLAG_SPLIT_MODE)
5744                 rdmac_mode |= RDMAC_MODE_SPLIT_ENABLE;
5745
5746         /* If statement applies to 5705 and 5750 PCI devices only */
5747         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
5748              tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
5749             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)) {
5750                 if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE &&
5751                     (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
5752                      tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
5753                         rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
5754                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
5755                            !(tp->tg3_flags2 & TG3_FLG2_IS_5788)) {
5756                         rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
5757                 }
5758         }
5759
5760         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)
5761                 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
5762
5763 #if TG3_TSO_SUPPORT != 0
5764         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
5765                 rdmac_mode |= (1 << 27);
5766 #endif
5767
5768         /* Receive/send statistics. */
5769         if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
5770             (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
5771                 val = tr32(RCVLPC_STATS_ENABLE);
5772                 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
5773                 tw32(RCVLPC_STATS_ENABLE, val);
5774         } else {
5775                 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
5776         }
5777         tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
5778         tw32(SNDDATAI_STATSENAB, 0xffffff);
5779         tw32(SNDDATAI_STATSCTRL,
5780              (SNDDATAI_SCTRL_ENABLE |
5781               SNDDATAI_SCTRL_FASTUPD));
5782
5783         /* Setup host coalescing engine. */
5784         tw32(HOSTCC_MODE, 0);
5785         for (i = 0; i < 2000; i++) {
5786                 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
5787                         break;
5788                 udelay(10);
5789         }
5790
5791         __tg3_set_coalesce(tp, &tp->coal);
5792
5793         /* set status block DMA address */
5794         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
5795              ((u64) tp->status_mapping >> 32));
5796         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
5797              ((u64) tp->status_mapping & 0xffffffff));
5798
5799         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5800                 /* Status/statistics block address.  See tg3_timer,
5801                  * the tg3_periodic_fetch_stats call there, and
5802                  * tg3_get_stats to see how this works for 5705/5750 chips.
5803                  */
5804                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
5805                      ((u64) tp->stats_mapping >> 32));
5806                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
5807                      ((u64) tp->stats_mapping & 0xffffffff));
5808                 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
5809                 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
5810         }
5811
5812         tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
5813
5814         tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
5815         tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
5816         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
5817                 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
5818
5819         /* Clear statistics/status block in chip, and status block in ram. */
5820         for (i = NIC_SRAM_STATS_BLK;
5821              i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
5822              i += sizeof(u32)) {
5823                 tg3_write_mem(tp, i, 0);
5824                 udelay(40);
5825         }
5826         memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
5827
5828         if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
5829                 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
5830                 /* reset to prevent losing 1st rx packet intermittently */
5831                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
5832                 udelay(10);
5833         }
5834
5835         tp->mac_mode = MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
5836                 MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE | MAC_MODE_FHDE_ENABLE;
5837         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
5838         udelay(40);
5839
5840         /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
5841          * If TG3_FLAG_EEPROM_WRITE_PROT is set, we should read the
5842          * register to preserve the GPIO settings for LOMs. The GPIOs,
5843          * whether used as inputs or outputs, are set by boot code after
5844          * reset.
5845          */
5846         if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
5847                 u32 gpio_mask;
5848
5849                 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE2 |
5850                             GRC_LCLCTRL_GPIO_OUTPUT0 | GRC_LCLCTRL_GPIO_OUTPUT2;
5851
5852                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
5853                         gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
5854                                      GRC_LCLCTRL_GPIO_OUTPUT3;
5855
5856                 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
5857
5858                 /* GPIO1 must be driven high for eeprom write protect */
5859                 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
5860                                        GRC_LCLCTRL_GPIO_OUTPUT1);
5861         }
5862         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
5863         udelay(100);
5864
5865         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0);
5866         tp->last_tag = 0;
5867
5868         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5869                 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
5870                 udelay(40);
5871         }
5872
5873         val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
5874                WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
5875                WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
5876                WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
5877                WDMAC_MODE_LNGREAD_ENAB);
5878
5879         /* If statement applies to 5705 and 5750 PCI devices only */
5880         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
5881              tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
5882             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
5883                 if ((tp->tg3_flags & TG3_FLG2_TSO_CAPABLE) &&
5884                     (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
5885                      tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
5886                         /* nothing */
5887                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
5888                            !(tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
5889                            !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
5890                         val |= WDMAC_MODE_RX_ACCEL;
5891                 }
5892         }
5893
5894         tw32_f(WDMAC_MODE, val);
5895         udelay(40);
5896
5897         if ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0) {
5898                 val = tr32(TG3PCI_X_CAPS);
5899                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) {
5900                         val &= ~PCIX_CAPS_BURST_MASK;
5901                         val |= (PCIX_CAPS_MAX_BURST_CPIOB << PCIX_CAPS_BURST_SHIFT);
5902                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
5903                         val &= ~(PCIX_CAPS_SPLIT_MASK | PCIX_CAPS_BURST_MASK);
5904                         val |= (PCIX_CAPS_MAX_BURST_CPIOB << PCIX_CAPS_BURST_SHIFT);
5905                         if (tp->tg3_flags & TG3_FLAG_SPLIT_MODE)
5906                                 val |= (tp->split_mode_max_reqs <<
5907                                         PCIX_CAPS_SPLIT_SHIFT);
5908                 }
5909                 tw32(TG3PCI_X_CAPS, val);
5910         }
5911
5912         tw32_f(RDMAC_MODE, rdmac_mode);
5913         udelay(40);
5914
5915         tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
5916         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
5917                 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
5918         tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
5919         tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
5920         tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
5921         tw32(RCVDBDI_MODE, RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ);
5922         tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
5923 #if TG3_TSO_SUPPORT != 0
5924         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
5925                 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
5926 #endif
5927         tw32(SNDBDI_MODE, SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE);
5928         tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
5929
5930         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
5931                 err = tg3_load_5701_a0_firmware_fix(tp);
5932                 if (err)
5933                         return err;
5934         }
5935
5936 #if TG3_TSO_SUPPORT != 0
5937         if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
5938                 err = tg3_load_tso_firmware(tp);
5939                 if (err)
5940                         return err;
5941         }
5942 #endif
5943
5944         tp->tx_mode = TX_MODE_ENABLE;
5945         tw32_f(MAC_TX_MODE, tp->tx_mode);
5946         udelay(100);
5947
5948         tp->rx_mode = RX_MODE_ENABLE;
5949         tw32_f(MAC_RX_MODE, tp->rx_mode);
5950         udelay(10);
5951
5952         if (tp->link_config.phy_is_low_power) {
5953                 tp->link_config.phy_is_low_power = 0;
5954                 tp->link_config.speed = tp->link_config.orig_speed;
5955                 tp->link_config.duplex = tp->link_config.orig_duplex;
5956                 tp->link_config.autoneg = tp->link_config.orig_autoneg;
5957         }
5958
5959         tp->mi_mode = MAC_MI_MODE_BASE;
5960         tw32_f(MAC_MI_MODE, tp->mi_mode);
5961         udelay(80);
5962
5963         tw32(MAC_LED_CTRL, tp->led_ctrl);
5964
5965         tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
5966         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
5967                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
5968                 udelay(10);
5969         }
5970         tw32_f(MAC_RX_MODE, tp->rx_mode);
5971         udelay(10);
5972
5973         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
5974                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) &&
5975                         !(tp->tg3_flags2 & TG3_FLG2_SERDES_PREEMPHASIS)) {
5976                         /* Set drive transmission level to 1.2V  */
5977                         /* only if the signal pre-emphasis bit is not set  */
5978                         val = tr32(MAC_SERDES_CFG);
5979                         val &= 0xfffff000;
5980                         val |= 0x880;
5981                         tw32(MAC_SERDES_CFG, val);
5982                 }
5983                 if (tp->pci_chip_rev_id == CHIPREV_ID_5703_A1)
5984                         tw32(MAC_SERDES_CFG, 0x616000);
5985         }
5986
5987         /* Prevent chip from dropping frames when flow control
5988          * is enabled.
5989          */
5990         tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, 2);
5991
5992         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
5993             (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
5994                 /* Use hardware link auto-negotiation */
5995                 tp->tg3_flags2 |= TG3_FLG2_HW_AUTONEG;
5996         }
5997
5998         err = tg3_setup_phy(tp, 1);
5999         if (err)
6000                 return err;
6001
6002         if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
6003                 u32 tmp;
6004
6005                 /* Clear CRC stats. */
6006                 if (!tg3_readphy(tp, 0x1e, &tmp)) {
6007                         tg3_writephy(tp, 0x1e, tmp | 0x8000);
6008                         tg3_readphy(tp, 0x14, &tmp);
6009                 }
6010         }
6011
6012         __tg3_set_rx_mode(tp->dev);
6013
6014         /* Initialize receive rules. */
6015         tw32(MAC_RCV_RULE_0,  0xc2000000 & RCV_RULE_DISABLE_MASK);
6016         tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
6017         tw32(MAC_RCV_RULE_1,  0x86000004 & RCV_RULE_DISABLE_MASK);
6018         tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
6019
6020         if ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
6021             !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
6022                 limit = 8;
6023         else
6024                 limit = 16;
6025         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF)
6026                 limit -= 4;
6027         switch (limit) {
6028         case 16:
6029                 tw32(MAC_RCV_RULE_15,  0); tw32(MAC_RCV_VALUE_15,  0);
6030         case 15:
6031                 tw32(MAC_RCV_RULE_14,  0); tw32(MAC_RCV_VALUE_14,  0);
6032         case 14:
6033                 tw32(MAC_RCV_RULE_13,  0); tw32(MAC_RCV_VALUE_13,  0);
6034         case 13:
6035                 tw32(MAC_RCV_RULE_12,  0); tw32(MAC_RCV_VALUE_12,  0);
6036         case 12:
6037                 tw32(MAC_RCV_RULE_11,  0); tw32(MAC_RCV_VALUE_11,  0);
6038         case 11:
6039                 tw32(MAC_RCV_RULE_10,  0); tw32(MAC_RCV_VALUE_10,  0);
6040         case 10:
6041                 tw32(MAC_RCV_RULE_9,  0); tw32(MAC_RCV_VALUE_9,  0);
6042         case 9:
6043                 tw32(MAC_RCV_RULE_8,  0); tw32(MAC_RCV_VALUE_8,  0);
6044         case 8:
6045                 tw32(MAC_RCV_RULE_7,  0); tw32(MAC_RCV_VALUE_7,  0);
6046         case 7:
6047                 tw32(MAC_RCV_RULE_6,  0); tw32(MAC_RCV_VALUE_6,  0);
6048         case 6:
6049                 tw32(MAC_RCV_RULE_5,  0); tw32(MAC_RCV_VALUE_5,  0);
6050         case 5:
6051                 tw32(MAC_RCV_RULE_4,  0); tw32(MAC_RCV_VALUE_4,  0);
6052         case 4:
6053                 /* tw32(MAC_RCV_RULE_3,  0); tw32(MAC_RCV_VALUE_3,  0); */
6054         case 3:
6055                 /* tw32(MAC_RCV_RULE_2,  0); tw32(MAC_RCV_VALUE_2,  0); */
6056         case 2:
6057         case 1:
6058
6059         default:
6060                 break;
6061         };
6062
6063         tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
6064
6065         return 0;
6066 }
6067
6068 /* Called at device open time to get the chip ready for
6069  * packet processing.  Invoked with tp->lock held.
6070  */
6071 static int tg3_init_hw(struct tg3 *tp)
6072 {
6073         int err;
6074
6075         /* Force the chip into D0. */
6076         err = tg3_set_power_state(tp, 0);
6077         if (err)
6078                 goto out;
6079
6080         tg3_switch_clocks(tp);
6081
6082         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
6083
6084         err = tg3_reset_hw(tp);
6085
6086 out:
6087         return err;
6088 }
6089
6090 #define TG3_STAT_ADD32(PSTAT, REG) \
6091 do {    u32 __val = tr32(REG); \
6092         (PSTAT)->low += __val; \
6093         if ((PSTAT)->low < __val) \
6094                 (PSTAT)->high += 1; \
6095 } while (0)
6096
6097 static void tg3_periodic_fetch_stats(struct tg3 *tp)
6098 {
6099         struct tg3_hw_stats *sp = tp->hw_stats;
6100
6101         if (!netif_carrier_ok(tp->dev))
6102                 return;
6103
6104         TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
6105         TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
6106         TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
6107         TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
6108         TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
6109         TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
6110         TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
6111         TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
6112         TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
6113         TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
6114         TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
6115         TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
6116         TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
6117
6118         TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
6119         TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
6120         TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
6121         TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
6122         TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
6123         TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
6124         TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
6125         TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
6126         TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
6127         TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
6128         TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
6129         TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
6130         TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
6131         TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
6132 }
6133
6134 static void tg3_timer(unsigned long __opaque)
6135 {
6136         struct tg3 *tp = (struct tg3 *) __opaque;
6137
6138         spin_lock(&tp->lock);
6139
6140         if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) {
6141                 /* All of this garbage is because when using non-tagged
6142                  * IRQ status the mailbox/status_block protocol the chip
6143                  * uses with the cpu is race prone.
6144                  */
6145                 if (tp->hw_status->status & SD_STATUS_UPDATED) {
6146                         tw32(GRC_LOCAL_CTRL,
6147                              tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
6148                 } else {
6149                         tw32(HOSTCC_MODE, tp->coalesce_mode |
6150                              (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
6151                 }
6152
6153                 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
6154                         tp->tg3_flags2 |= TG3_FLG2_RESTART_TIMER;
6155                         spin_unlock(&tp->lock);
6156                         schedule_work(&tp->reset_task);
6157                         return;
6158                 }
6159         }
6160
6161         /* This part only runs once per second. */
6162         if (!--tp->timer_counter) {
6163                 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
6164                         tg3_periodic_fetch_stats(tp);
6165
6166                 if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
6167                         u32 mac_stat;
6168                         int phy_event;
6169
6170                         mac_stat = tr32(MAC_STATUS);
6171
6172                         phy_event = 0;
6173                         if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) {
6174                                 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
6175                                         phy_event = 1;
6176                         } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
6177                                 phy_event = 1;
6178
6179                         if (phy_event)
6180                                 tg3_setup_phy(tp, 0);
6181                 } else if (tp->tg3_flags & TG3_FLAG_POLL_SERDES) {
6182                         u32 mac_stat = tr32(MAC_STATUS);
6183                         int need_setup = 0;
6184
6185                         if (netif_carrier_ok(tp->dev) &&
6186                             (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
6187                                 need_setup = 1;
6188                         }
6189                         if (! netif_carrier_ok(tp->dev) &&
6190                             (mac_stat & (MAC_STATUS_PCS_SYNCED |
6191                                          MAC_STATUS_SIGNAL_DET))) {
6192                                 need_setup = 1;
6193                         }
6194                         if (need_setup) {
6195                                 tw32_f(MAC_MODE,
6196                                      (tp->mac_mode &
6197                                       ~MAC_MODE_PORT_MODE_MASK));
6198                                 udelay(40);
6199                                 tw32_f(MAC_MODE, tp->mac_mode);
6200                                 udelay(40);
6201                                 tg3_setup_phy(tp, 0);
6202                         }
6203                 } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
6204                         tg3_serdes_parallel_detect(tp);
6205
6206                 tp->timer_counter = tp->timer_multiplier;
6207         }
6208
6209         /* Heartbeat is only sent once every 2 seconds.  */
6210         if (!--tp->asf_counter) {
6211                 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
6212                         u32 val;
6213
6214                         tg3_write_mem_fast(tp, NIC_SRAM_FW_CMD_MBOX,
6215                                            FWCMD_NICDRV_ALIVE2);
6216                         tg3_write_mem_fast(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
6217                         /* 5 seconds timeout */
6218                         tg3_write_mem_fast(tp, NIC_SRAM_FW_CMD_DATA_MBOX, 5);
6219                         val = tr32(GRC_RX_CPU_EVENT);
6220                         val |= (1 << 14);
6221                         tw32(GRC_RX_CPU_EVENT, val);
6222                 }
6223                 tp->asf_counter = tp->asf_multiplier;
6224         }
6225
6226         spin_unlock(&tp->lock);
6227
6228         tp->timer.expires = jiffies + tp->timer_offset;
6229         add_timer(&tp->timer);
6230 }
6231
6232 static int tg3_test_interrupt(struct tg3 *tp)
6233 {
6234         struct net_device *dev = tp->dev;
6235         int err, i;
6236         u32 int_mbox = 0;
6237
6238         if (!netif_running(dev))
6239                 return -ENODEV;
6240
6241         tg3_disable_ints(tp);
6242
6243         free_irq(tp->pdev->irq, dev);
6244
6245         err = request_irq(tp->pdev->irq, tg3_test_isr,
6246                           SA_SHIRQ | SA_SAMPLE_RANDOM, dev->name, dev);
6247         if (err)
6248                 return err;
6249
6250         tp->hw_status->status &= ~SD_STATUS_UPDATED;
6251         tg3_enable_ints(tp);
6252
6253         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
6254                HOSTCC_MODE_NOW);
6255
6256         for (i = 0; i < 5; i++) {
6257                 int_mbox = tr32_mailbox(MAILBOX_INTERRUPT_0 +
6258                                         TG3_64BIT_REG_LOW);
6259                 if (int_mbox != 0)
6260                         break;
6261                 msleep(10);
6262         }
6263
6264         tg3_disable_ints(tp);
6265
6266         free_irq(tp->pdev->irq, dev);
6267         
6268         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI)
6269                 err = request_irq(tp->pdev->irq, tg3_msi,
6270                                   SA_SAMPLE_RANDOM, dev->name, dev);
6271         else {
6272                 irqreturn_t (*fn)(int, void *, struct pt_regs *)=tg3_interrupt;
6273                 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
6274                         fn = tg3_interrupt_tagged;
6275                 err = request_irq(tp->pdev->irq, fn,
6276                                   SA_SHIRQ | SA_SAMPLE_RANDOM, dev->name, dev);
6277         }
6278
6279         if (err)
6280                 return err;
6281
6282         if (int_mbox != 0)
6283                 return 0;
6284
6285         return -EIO;
6286 }
6287
6288 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
6289  * successfully restored
6290  */
6291 static int tg3_test_msi(struct tg3 *tp)
6292 {
6293         struct net_device *dev = tp->dev;
6294         int err;
6295         u16 pci_cmd;
6296
6297         if (!(tp->tg3_flags2 & TG3_FLG2_USING_MSI))
6298                 return 0;
6299
6300         /* Turn off SERR reporting in case MSI terminates with Master
6301          * Abort.
6302          */
6303         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
6304         pci_write_config_word(tp->pdev, PCI_COMMAND,
6305                               pci_cmd & ~PCI_COMMAND_SERR);
6306
6307         err = tg3_test_interrupt(tp);
6308
6309         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
6310
6311         if (!err)
6312                 return 0;
6313
6314         /* other failures */
6315         if (err != -EIO)
6316                 return err;
6317
6318         /* MSI test failed, go back to INTx mode */
6319         printk(KERN_WARNING PFX "%s: No interrupt was generated using MSI, "
6320                "switching to INTx mode. Please report this failure to "
6321                "the PCI maintainer and include system chipset information.\n",
6322                        tp->dev->name);
6323
6324         free_irq(tp->pdev->irq, dev);
6325         pci_disable_msi(tp->pdev);
6326
6327         tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
6328
6329         {
6330                 irqreturn_t (*fn)(int, void *, struct pt_regs *)=tg3_interrupt;
6331                 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
6332                         fn = tg3_interrupt_tagged;
6333
6334                 err = request_irq(tp->pdev->irq, fn,
6335                                   SA_SHIRQ | SA_SAMPLE_RANDOM, dev->name, dev);
6336         }
6337         if (err)
6338                 return err;
6339
6340         /* Need to reset the chip because the MSI cycle may have terminated
6341          * with Master Abort.
6342          */
6343         tg3_full_lock(tp, 1);
6344
6345         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
6346         err = tg3_init_hw(tp);
6347
6348         tg3_full_unlock(tp);
6349
6350         if (err)
6351                 free_irq(tp->pdev->irq, dev);
6352
6353         return err;
6354 }
6355
6356 static int tg3_open(struct net_device *dev)
6357 {
6358         struct tg3 *tp = netdev_priv(dev);
6359         int err;
6360
6361         tg3_full_lock(tp, 0);
6362
6363         tg3_disable_ints(tp);
6364         tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
6365
6366         tg3_full_unlock(tp);
6367
6368         /* The placement of this call is tied
6369          * to the setup and use of Host TX descriptors.
6370          */
6371         err = tg3_alloc_consistent(tp);
6372         if (err)
6373                 return err;
6374
6375         if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
6376             (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5750_AX) &&
6377             (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5750_BX)) {
6378                 /* All MSI supporting chips should support tagged
6379                  * status.  Assert that this is the case.
6380                  */
6381                 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) {
6382                         printk(KERN_WARNING PFX "%s: MSI without TAGGED? "
6383                                "Not using MSI.\n", tp->dev->name);
6384                 } else if (pci_enable_msi(tp->pdev) == 0) {
6385                         u32 msi_mode;
6386
6387                         msi_mode = tr32(MSGINT_MODE);
6388                         tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
6389                         tp->tg3_flags2 |= TG3_FLG2_USING_MSI;
6390                 }
6391         }
6392         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI)
6393                 err = request_irq(tp->pdev->irq, tg3_msi,
6394                                   SA_SAMPLE_RANDOM, dev->name, dev);
6395         else {
6396                 irqreturn_t (*fn)(int, void *, struct pt_regs *)=tg3_interrupt;
6397                 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
6398                         fn = tg3_interrupt_tagged;
6399
6400                 err = request_irq(tp->pdev->irq, fn,
6401                                   SA_SHIRQ | SA_SAMPLE_RANDOM, dev->name, dev);
6402         }
6403
6404         if (err) {
6405                 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6406                         pci_disable_msi(tp->pdev);
6407                         tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
6408                 }
6409                 tg3_free_consistent(tp);
6410                 return err;
6411         }
6412
6413         tg3_full_lock(tp, 0);
6414
6415         err = tg3_init_hw(tp);
6416         if (err) {
6417                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
6418                 tg3_free_rings(tp);
6419         } else {
6420                 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
6421                         tp->timer_offset = HZ;
6422                 else
6423                         tp->timer_offset = HZ / 10;
6424
6425                 BUG_ON(tp->timer_offset > HZ);
6426                 tp->timer_counter = tp->timer_multiplier =
6427                         (HZ / tp->timer_offset);
6428                 tp->asf_counter = tp->asf_multiplier =
6429                         ((HZ / tp->timer_offset) * 2);
6430
6431                 init_timer(&tp->timer);
6432                 tp->timer.expires = jiffies + tp->timer_offset;
6433                 tp->timer.data = (unsigned long) tp;
6434                 tp->timer.function = tg3_timer;
6435         }
6436
6437         tg3_full_unlock(tp);
6438
6439         if (err) {
6440                 free_irq(tp->pdev->irq, dev);
6441                 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6442                         pci_disable_msi(tp->pdev);
6443                         tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
6444                 }
6445                 tg3_free_consistent(tp);
6446                 return err;
6447         }
6448
6449         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6450                 err = tg3_test_msi(tp);
6451
6452                 if (err) {
6453                         tg3_full_lock(tp, 0);
6454
6455                         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6456                                 pci_disable_msi(tp->pdev);
6457                                 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
6458                         }
6459                         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
6460                         tg3_free_rings(tp);
6461                         tg3_free_consistent(tp);
6462
6463                         tg3_full_unlock(tp);
6464
6465                         return err;
6466                 }
6467         }
6468
6469         tg3_full_lock(tp, 0);
6470
6471         add_timer(&tp->timer);
6472         tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
6473         tg3_enable_ints(tp);
6474
6475         tg3_full_unlock(tp);
6476
6477         netif_start_queue(dev);
6478
6479         return 0;
6480 }
6481
6482 #if 0
6483 /*static*/ void tg3_dump_state(struct tg3 *tp)
6484 {
6485         u32 val32, val32_2, val32_3, val32_4, val32_5;
6486         u16 val16;
6487         int i;
6488
6489         pci_read_config_word(tp->pdev, PCI_STATUS, &val16);
6490         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE, &val32);
6491         printk("DEBUG: PCI status [%04x] TG3PCI state[%08x]\n",
6492                val16, val32);
6493
6494         /* MAC block */
6495         printk("DEBUG: MAC_MODE[%08x] MAC_STATUS[%08x]\n",
6496                tr32(MAC_MODE), tr32(MAC_STATUS));
6497         printk("       MAC_EVENT[%08x] MAC_LED_CTRL[%08x]\n",
6498                tr32(MAC_EVENT), tr32(MAC_LED_CTRL));
6499         printk("DEBUG: MAC_TX_MODE[%08x] MAC_TX_STATUS[%08x]\n",
6500                tr32(MAC_TX_MODE), tr32(MAC_TX_STATUS));
6501         printk("       MAC_RX_MODE[%08x] MAC_RX_STATUS[%08x]\n",
6502                tr32(MAC_RX_MODE), tr32(MAC_RX_STATUS));
6503
6504         /* Send data initiator control block */
6505         printk("DEBUG: SNDDATAI_MODE[%08x] SNDDATAI_STATUS[%08x]\n",
6506                tr32(SNDDATAI_MODE), tr32(SNDDATAI_STATUS));
6507         printk("       SNDDATAI_STATSCTRL[%08x]\n",
6508                tr32(SNDDATAI_STATSCTRL));
6509
6510         /* Send data completion control block */
6511         printk("DEBUG: SNDDATAC_MODE[%08x]\n", tr32(SNDDATAC_MODE));
6512
6513         /* Send BD ring selector block */
6514         printk("DEBUG: SNDBDS_MODE[%08x] SNDBDS_STATUS[%08x]\n",
6515                tr32(SNDBDS_MODE), tr32(SNDBDS_STATUS));
6516
6517         /* Send BD initiator control block */
6518         printk("DEBUG: SNDBDI_MODE[%08x] SNDBDI_STATUS[%08x]\n",
6519                tr32(SNDBDI_MODE), tr32(SNDBDI_STATUS));
6520
6521         /* Send BD completion control block */
6522         printk("DEBUG: SNDBDC_MODE[%08x]\n", tr32(SNDBDC_MODE));
6523
6524         /* Receive list placement control block */
6525         printk("DEBUG: RCVLPC_MODE[%08x] RCVLPC_STATUS[%08x]\n",
6526                tr32(RCVLPC_MODE), tr32(RCVLPC_STATUS));
6527         printk("       RCVLPC_STATSCTRL[%08x]\n",
6528                tr32(RCVLPC_STATSCTRL));
6529
6530         /* Receive data and receive BD initiator control block */
6531         printk("DEBUG: RCVDBDI_MODE[%08x] RCVDBDI_STATUS[%08x]\n",
6532                tr32(RCVDBDI_MODE), tr32(RCVDBDI_STATUS));
6533
6534         /* Receive data completion control block */
6535         printk("DEBUG: RCVDCC_MODE[%08x]\n",
6536                tr32(RCVDCC_MODE));
6537
6538         /* Receive BD initiator control block */
6539         printk("DEBUG: RCVBDI_MODE[%08x] RCVBDI_STATUS[%08x]\n",
6540                tr32(RCVBDI_MODE), tr32(RCVBDI_STATUS));
6541
6542         /* Receive BD completion control block */
6543         printk("DEBUG: RCVCC_MODE[%08x] RCVCC_STATUS[%08x]\n",
6544                tr32(RCVCC_MODE), tr32(RCVCC_STATUS));
6545
6546         /* Receive list selector control block */
6547         printk("DEBUG: RCVLSC_MODE[%08x] RCVLSC_STATUS[%08x]\n",
6548                tr32(RCVLSC_MODE), tr32(RCVLSC_STATUS));
6549
6550         /* Mbuf cluster free block */
6551         printk("DEBUG: MBFREE_MODE[%08x] MBFREE_STATUS[%08x]\n",
6552                tr32(MBFREE_MODE), tr32(MBFREE_STATUS));
6553
6554         /* Host coalescing control block */
6555         printk("DEBUG: HOSTCC_MODE[%08x] HOSTCC_STATUS[%08x]\n",
6556                tr32(HOSTCC_MODE), tr32(HOSTCC_STATUS));
6557         printk("DEBUG: HOSTCC_STATS_BLK_HOST_ADDR[%08x%08x]\n",
6558                tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
6559                tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
6560         printk("DEBUG: HOSTCC_STATUS_BLK_HOST_ADDR[%08x%08x]\n",
6561                tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
6562                tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
6563         printk("DEBUG: HOSTCC_STATS_BLK_NIC_ADDR[%08x]\n",
6564                tr32(HOSTCC_STATS_BLK_NIC_ADDR));
6565         printk("DEBUG: HOSTCC_STATUS_BLK_NIC_ADDR[%08x]\n",
6566                tr32(HOSTCC_STATUS_BLK_NIC_ADDR));
6567
6568         /* Memory arbiter control block */
6569         printk("DEBUG: MEMARB_MODE[%08x] MEMARB_STATUS[%08x]\n",
6570                tr32(MEMARB_MODE), tr32(MEMARB_STATUS));
6571
6572         /* Buffer manager control block */
6573         printk("DEBUG: BUFMGR_MODE[%08x] BUFMGR_STATUS[%08x]\n",
6574                tr32(BUFMGR_MODE), tr32(BUFMGR_STATUS));
6575         printk("DEBUG: BUFMGR_MB_POOL_ADDR[%08x] BUFMGR_MB_POOL_SIZE[%08x]\n",
6576                tr32(BUFMGR_MB_POOL_ADDR), tr32(BUFMGR_MB_POOL_SIZE));
6577         printk("DEBUG: BUFMGR_DMA_DESC_POOL_ADDR[%08x] "
6578                "BUFMGR_DMA_DESC_POOL_SIZE[%08x]\n",
6579                tr32(BUFMGR_DMA_DESC_POOL_ADDR),
6580                tr32(BUFMGR_DMA_DESC_POOL_SIZE));
6581
6582         /* Read DMA control block */
6583         printk("DEBUG: RDMAC_MODE[%08x] RDMAC_STATUS[%08x]\n",
6584                tr32(RDMAC_MODE), tr32(RDMAC_STATUS));
6585
6586         /* Write DMA control block */
6587         printk("DEBUG: WDMAC_MODE[%08x] WDMAC_STATUS[%08x]\n",
6588                tr32(WDMAC_MODE), tr32(WDMAC_STATUS));
6589
6590         /* DMA completion block */
6591         printk("DEBUG: DMAC_MODE[%08x]\n",
6592                tr32(DMAC_MODE));
6593
6594         /* GRC block */
6595         printk("DEBUG: GRC_MODE[%08x] GRC_MISC_CFG[%08x]\n",
6596                tr32(GRC_MODE), tr32(GRC_MISC_CFG));
6597         printk("DEBUG: GRC_LOCAL_CTRL[%08x]\n",
6598                tr32(GRC_LOCAL_CTRL));
6599
6600         /* TG3_BDINFOs */
6601         printk("DEBUG: RCVDBDI_JUMBO_BD[%08x%08x:%08x:%08x]\n",
6602                tr32(RCVDBDI_JUMBO_BD + 0x0),
6603                tr32(RCVDBDI_JUMBO_BD + 0x4),
6604                tr32(RCVDBDI_JUMBO_BD + 0x8),
6605                tr32(RCVDBDI_JUMBO_BD + 0xc));
6606         printk("DEBUG: RCVDBDI_STD_BD[%08x%08x:%08x:%08x]\n",
6607                tr32(RCVDBDI_STD_BD + 0x0),
6608                tr32(RCVDBDI_STD_BD + 0x4),
6609                tr32(RCVDBDI_STD_BD + 0x8),
6610                tr32(RCVDBDI_STD_BD + 0xc));
6611         printk("DEBUG: RCVDBDI_MINI_BD[%08x%08x:%08x:%08x]\n",
6612                tr32(RCVDBDI_MINI_BD + 0x0),
6613                tr32(RCVDBDI_MINI_BD + 0x4),
6614                tr32(RCVDBDI_MINI_BD + 0x8),
6615                tr32(RCVDBDI_MINI_BD + 0xc));
6616
6617         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x0, &val32);
6618         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x4, &val32_2);
6619         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x8, &val32_3);
6620         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0xc, &val32_4);
6621         printk("DEBUG: SRAM_SEND_RCB_0[%08x%08x:%08x:%08x]\n",
6622                val32, val32_2, val32_3, val32_4);
6623
6624         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x0, &val32);
6625         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x4, &val32_2);
6626         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x8, &val32_3);
6627         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0xc, &val32_4);
6628         printk("DEBUG: SRAM_RCV_RET_RCB_0[%08x%08x:%08x:%08x]\n",
6629                val32, val32_2, val32_3, val32_4);
6630
6631         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x0, &val32);
6632         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x4, &val32_2);
6633         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x8, &val32_3);
6634         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0xc, &val32_4);
6635         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x10, &val32_5);
6636         printk("DEBUG: SRAM_STATUS_BLK[%08x:%08x:%08x:%08x:%08x]\n",
6637                val32, val32_2, val32_3, val32_4, val32_5);
6638
6639         /* SW status block */
6640         printk("DEBUG: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
6641                tp->hw_status->status,
6642                tp->hw_status->status_tag,
6643                tp->hw_status->rx_jumbo_consumer,
6644                tp->hw_status->rx_consumer,
6645                tp->hw_status->rx_mini_consumer,
6646                tp->hw_status->idx[0].rx_producer,
6647                tp->hw_status->idx[0].tx_consumer);
6648
6649         /* SW statistics block */
6650         printk("DEBUG: Host statistics block [%08x:%08x:%08x:%08x]\n",
6651                ((u32 *)tp->hw_stats)[0],
6652                ((u32 *)tp->hw_stats)[1],
6653                ((u32 *)tp->hw_stats)[2],
6654                ((u32 *)tp->hw_stats)[3]);
6655
6656         /* Mailboxes */
6657         printk("DEBUG: SNDHOST_PROD[%08x%08x] SNDNIC_PROD[%08x%08x]\n",
6658                tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + 0x0),
6659                tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + 0x4),
6660                tr32_mailbox(MAILBOX_SNDNIC_PROD_IDX_0 + 0x0),
6661                tr32_mailbox(MAILBOX_SNDNIC_PROD_IDX_0 + 0x4));
6662
6663         /* NIC side send descriptors. */
6664         for (i = 0; i < 6; i++) {
6665                 unsigned long txd;
6666
6667                 txd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_TX_BUFFER_DESC
6668                         + (i * sizeof(struct tg3_tx_buffer_desc));
6669                 printk("DEBUG: NIC TXD(%d)[%08x:%08x:%08x:%08x]\n",
6670                        i,
6671                        readl(txd + 0x0), readl(txd + 0x4),
6672                        readl(txd + 0x8), readl(txd + 0xc));
6673         }
6674
6675         /* NIC side RX descriptors. */
6676         for (i = 0; i < 6; i++) {
6677                 unsigned long rxd;
6678
6679                 rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_BUFFER_DESC
6680                         + (i * sizeof(struct tg3_rx_buffer_desc));
6681                 printk("DEBUG: NIC RXD_STD(%d)[0][%08x:%08x:%08x:%08x]\n",
6682                        i,
6683                        readl(rxd + 0x0), readl(rxd + 0x4),
6684                        readl(rxd + 0x8), readl(rxd + 0xc));
6685                 rxd += (4 * sizeof(u32));
6686                 printk("DEBUG: NIC RXD_STD(%d)[1][%08x:%08x:%08x:%08x]\n",
6687                        i,
6688                        readl(rxd + 0x0), readl(rxd + 0x4),
6689                        readl(rxd + 0x8), readl(rxd + 0xc));
6690         }
6691
6692         for (i = 0; i < 6; i++) {
6693                 unsigned long rxd;
6694
6695                 rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_JUMBO_BUFFER_DESC
6696                         + (i * sizeof(struct tg3_rx_buffer_desc));
6697                 printk("DEBUG: NIC RXD_JUMBO(%d)[0][%08x:%08x:%08x:%08x]\n",
6698                        i,
6699                        readl(rxd + 0x0), readl(rxd + 0x4),
6700                        readl(rxd + 0x8), readl(rxd + 0xc));
6701                 rxd += (4 * sizeof(u32));
6702                 printk("DEBUG: NIC RXD_JUMBO(%d)[1][%08x:%08x:%08x:%08x]\n",
6703                        i,
6704                        readl(rxd + 0x0), readl(rxd + 0x4),
6705                        readl(rxd + 0x8), readl(rxd + 0xc));
6706         }
6707 }
6708 #endif
6709
6710 static struct net_device_stats *tg3_get_stats(struct net_device *);
6711 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *);
6712
6713 static int tg3_close(struct net_device *dev)
6714 {
6715         struct tg3 *tp = netdev_priv(dev);
6716
6717         netif_stop_queue(dev);
6718
6719         del_timer_sync(&tp->timer);
6720
6721         tg3_full_lock(tp, 1);
6722 #if 0
6723         tg3_dump_state(tp);
6724 #endif
6725
6726         tg3_disable_ints(tp);
6727
6728         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
6729         tg3_free_rings(tp);
6730         tp->tg3_flags &=
6731                 ~(TG3_FLAG_INIT_COMPLETE |
6732                   TG3_FLAG_GOT_SERDES_FLOWCTL);
6733         netif_carrier_off(tp->dev);
6734
6735         tg3_full_unlock(tp);
6736
6737         free_irq(tp->pdev->irq, dev);
6738         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6739                 pci_disable_msi(tp->pdev);
6740                 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
6741         }
6742
6743         memcpy(&tp->net_stats_prev, tg3_get_stats(tp->dev),
6744                sizeof(tp->net_stats_prev));
6745         memcpy(&tp->estats_prev, tg3_get_estats(tp),
6746                sizeof(tp->estats_prev));
6747
6748         tg3_free_consistent(tp);
6749
6750         return 0;
6751 }
6752
6753 static inline unsigned long get_stat64(tg3_stat64_t *val)
6754 {
6755         unsigned long ret;
6756
6757 #if (BITS_PER_LONG == 32)
6758         ret = val->low;
6759 #else
6760         ret = ((u64)val->high << 32) | ((u64)val->low);
6761 #endif
6762         return ret;
6763 }
6764
6765 static unsigned long calc_crc_errors(struct tg3 *tp)
6766 {
6767         struct tg3_hw_stats *hw_stats = tp->hw_stats;
6768
6769         if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
6770             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
6771              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
6772                 u32 val;
6773
6774                 spin_lock_bh(&tp->lock);
6775                 if (!tg3_readphy(tp, 0x1e, &val)) {
6776                         tg3_writephy(tp, 0x1e, val | 0x8000);
6777                         tg3_readphy(tp, 0x14, &val);
6778                 } else
6779                         val = 0;
6780                 spin_unlock_bh(&tp->lock);
6781
6782                 tp->phy_crc_errors += val;
6783
6784                 return tp->phy_crc_errors;
6785         }
6786
6787         return get_stat64(&hw_stats->rx_fcs_errors);
6788 }
6789
6790 #define ESTAT_ADD(member) \
6791         estats->member =        old_estats->member + \
6792                                 get_stat64(&hw_stats->member)
6793
6794 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *tp)
6795 {
6796         struct tg3_ethtool_stats *estats = &tp->estats;
6797         struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
6798         struct tg3_hw_stats *hw_stats = tp->hw_stats;
6799
6800         if (!hw_stats)
6801                 return old_estats;
6802
6803         ESTAT_ADD(rx_octets);
6804         ESTAT_ADD(rx_fragments);
6805         ESTAT_ADD(rx_ucast_packets);
6806         ESTAT_ADD(rx_mcast_packets);
6807         ESTAT_ADD(rx_bcast_packets);
6808         ESTAT_ADD(rx_fcs_errors);
6809         ESTAT_ADD(rx_align_errors);
6810         ESTAT_ADD(rx_xon_pause_rcvd);
6811         ESTAT_ADD(rx_xoff_pause_rcvd);
6812         ESTAT_ADD(rx_mac_ctrl_rcvd);
6813         ESTAT_ADD(rx_xoff_entered);
6814         ESTAT_ADD(rx_frame_too_long_errors);
6815         ESTAT_ADD(rx_jabbers);
6816         ESTAT_ADD(rx_undersize_packets);
6817         ESTAT_ADD(rx_in_length_errors);
6818         ESTAT_ADD(rx_out_length_errors);
6819         ESTAT_ADD(rx_64_or_less_octet_packets);
6820         ESTAT_ADD(rx_65_to_127_octet_packets);
6821         ESTAT_ADD(rx_128_to_255_octet_packets);
6822         ESTAT_ADD(rx_256_to_511_octet_packets);
6823         ESTAT_ADD(rx_512_to_1023_octet_packets);
6824         ESTAT_ADD(rx_1024_to_1522_octet_packets);
6825         ESTAT_ADD(rx_1523_to_2047_octet_packets);
6826         ESTAT_ADD(rx_2048_to_4095_octet_packets);
6827         ESTAT_ADD(rx_4096_to_8191_octet_packets);
6828         ESTAT_ADD(rx_8192_to_9022_octet_packets);
6829
6830         ESTAT_ADD(tx_octets);
6831         ESTAT_ADD(tx_collisions);
6832         ESTAT_ADD(tx_xon_sent);
6833         ESTAT_ADD(tx_xoff_sent);
6834         ESTAT_ADD(tx_flow_control);
6835         ESTAT_ADD(tx_mac_errors);
6836         ESTAT_ADD(tx_single_collisions);
6837         ESTAT_ADD(tx_mult_collisions);
6838         ESTAT_ADD(tx_deferred);
6839         ESTAT_ADD(tx_excessive_collisions);
6840         ESTAT_ADD(tx_late_collisions);
6841         ESTAT_ADD(tx_collide_2times);
6842         ESTAT_ADD(tx_collide_3times);
6843         ESTAT_ADD(tx_collide_4times);
6844         ESTAT_ADD(tx_collide_5times);
6845         ESTAT_ADD(tx_collide_6times);
6846         ESTAT_ADD(tx_collide_7times);
6847         ESTAT_ADD(tx_collide_8times);
6848         ESTAT_ADD(tx_collide_9times);
6849         ESTAT_ADD(tx_collide_10times);
6850         ESTAT_ADD(tx_collide_11times);
6851         ESTAT_ADD(tx_collide_12times);
6852         ESTAT_ADD(tx_collide_13times);
6853         ESTAT_ADD(tx_collide_14times);
6854         ESTAT_ADD(tx_collide_15times);
6855         ESTAT_ADD(tx_ucast_packets);
6856         ESTAT_ADD(tx_mcast_packets);
6857         ESTAT_ADD(tx_bcast_packets);
6858         ESTAT_ADD(tx_carrier_sense_errors);
6859         ESTAT_ADD(tx_discards);
6860         ESTAT_ADD(tx_errors);
6861
6862         ESTAT_ADD(dma_writeq_full);
6863         ESTAT_ADD(dma_write_prioq_full);
6864         ESTAT_ADD(rxbds_empty);
6865         ESTAT_ADD(rx_discards);
6866         ESTAT_ADD(rx_errors);
6867         ESTAT_ADD(rx_threshold_hit);
6868
6869         ESTAT_ADD(dma_readq_full);
6870         ESTAT_ADD(dma_read_prioq_full);
6871         ESTAT_ADD(tx_comp_queue_full);
6872
6873         ESTAT_ADD(ring_set_send_prod_index);
6874         ESTAT_ADD(ring_status_update);
6875         ESTAT_ADD(nic_irqs);
6876         ESTAT_ADD(nic_avoided_irqs);
6877         ESTAT_ADD(nic_tx_threshold_hit);
6878
6879         return estats;
6880 }
6881
6882 static struct net_device_stats *tg3_get_stats(struct net_device *dev)
6883 {
6884         struct tg3 *tp = netdev_priv(dev);
6885         struct net_device_stats *stats = &tp->net_stats;
6886         struct net_device_stats *old_stats = &tp->net_stats_prev;
6887         struct tg3_hw_stats *hw_stats = tp->hw_stats;
6888
6889         if (!hw_stats)
6890                 return old_stats;
6891
6892         stats->rx_packets = old_stats->rx_packets +
6893                 get_stat64(&hw_stats->rx_ucast_packets) +
6894                 get_stat64(&hw_stats->rx_mcast_packets) +
6895                 get_stat64(&hw_stats->rx_bcast_packets);
6896                 
6897         stats->tx_packets = old_stats->tx_packets +
6898                 get_stat64(&hw_stats->tx_ucast_packets) +
6899                 get_stat64(&hw_stats->tx_mcast_packets) +
6900                 get_stat64(&hw_stats->tx_bcast_packets);
6901
6902         stats->rx_bytes = old_stats->rx_bytes +
6903                 get_stat64(&hw_stats->rx_octets);
6904         stats->tx_bytes = old_stats->tx_bytes +
6905                 get_stat64(&hw_stats->tx_octets);
6906
6907         stats->rx_errors = old_stats->rx_errors +
6908                 get_stat64(&hw_stats->rx_errors);
6909         stats->tx_errors = old_stats->tx_errors +
6910                 get_stat64(&hw_stats->tx_errors) +
6911                 get_stat64(&hw_stats->tx_mac_errors) +
6912                 get_stat64(&hw_stats->tx_carrier_sense_errors) +
6913                 get_stat64(&hw_stats->tx_discards);
6914
6915         stats->multicast = old_stats->multicast +
6916                 get_stat64(&hw_stats->rx_mcast_packets);
6917         stats->collisions = old_stats->collisions +
6918                 get_stat64(&hw_stats->tx_collisions);
6919
6920         stats->rx_length_errors = old_stats->rx_length_errors +
6921                 get_stat64(&hw_stats->rx_frame_too_long_errors) +
6922                 get_stat64(&hw_stats->rx_undersize_packets);
6923
6924         stats->rx_over_errors = old_stats->rx_over_errors +
6925                 get_stat64(&hw_stats->rxbds_empty);
6926         stats->rx_frame_errors = old_stats->rx_frame_errors +
6927                 get_stat64(&hw_stats->rx_align_errors);
6928         stats->tx_aborted_errors = old_stats->tx_aborted_errors +
6929                 get_stat64(&hw_stats->tx_discards);
6930         stats->tx_carrier_errors = old_stats->tx_carrier_errors +
6931                 get_stat64(&hw_stats->tx_carrier_sense_errors);
6932
6933         stats->rx_crc_errors = old_stats->rx_crc_errors +
6934                 calc_crc_errors(tp);
6935
6936         stats->rx_missed_errors = old_stats->rx_missed_errors +
6937                 get_stat64(&hw_stats->rx_discards);
6938
6939         return stats;
6940 }
6941
6942 static inline u32 calc_crc(unsigned char *buf, int len)
6943 {
6944         u32 reg;
6945         u32 tmp;
6946         int j, k;
6947
6948         reg = 0xffffffff;
6949
6950         for (j = 0; j < len; j++) {
6951                 reg ^= buf[j];
6952
6953                 for (k = 0; k < 8; k++) {
6954                         tmp = reg & 0x01;
6955
6956                         reg >>= 1;
6957
6958                         if (tmp) {
6959                                 reg ^= 0xedb88320;
6960                         }
6961                 }
6962         }
6963
6964         return ~reg;
6965 }
6966
6967 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
6968 {
6969         /* accept or reject all multicast frames */
6970         tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
6971         tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
6972         tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
6973         tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
6974 }
6975
6976 static void __tg3_set_rx_mode(struct net_device *dev)
6977 {
6978         struct tg3 *tp = netdev_priv(dev);
6979         u32 rx_mode;
6980
6981         rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
6982                                   RX_MODE_KEEP_VLAN_TAG);
6983
6984         /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
6985          * flag clear.
6986          */
6987 #if TG3_VLAN_TAG_USED
6988         if (!tp->vlgrp &&
6989             !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
6990                 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
6991 #else
6992         /* By definition, VLAN is disabled always in this
6993          * case.
6994          */
6995         if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
6996                 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
6997 #endif
6998
6999         if (dev->flags & IFF_PROMISC) {
7000                 /* Promiscuous mode. */
7001                 rx_mode |= RX_MODE_PROMISC;
7002         } else if (dev->flags & IFF_ALLMULTI) {
7003                 /* Accept all multicast. */
7004                 tg3_set_multi (tp, 1);
7005         } else if (dev->mc_count < 1) {
7006                 /* Reject all multicast. */
7007                 tg3_set_multi (tp, 0);
7008         } else {
7009                 /* Accept one or more multicast(s). */
7010                 struct dev_mc_list *mclist;
7011                 unsigned int i;
7012                 u32 mc_filter[4] = { 0, };
7013                 u32 regidx;
7014                 u32 bit;
7015                 u32 crc;
7016
7017                 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
7018                      i++, mclist = mclist->next) {
7019
7020                         crc = calc_crc (mclist->dmi_addr, ETH_ALEN);
7021                         bit = ~crc & 0x7f;
7022                         regidx = (bit & 0x60) >> 5;
7023                         bit &= 0x1f;
7024                         mc_filter[regidx] |= (1 << bit);
7025                 }
7026
7027                 tw32(MAC_HASH_REG_0, mc_filter[0]);
7028                 tw32(MAC_HASH_REG_1, mc_filter[1]);
7029                 tw32(MAC_HASH_REG_2, mc_filter[2]);
7030                 tw32(MAC_HASH_REG_3, mc_filter[3]);
7031         }
7032
7033         if (rx_mode != tp->rx_mode) {
7034                 tp->rx_mode = rx_mode;
7035                 tw32_f(MAC_RX_MODE, rx_mode);
7036                 udelay(10);
7037         }
7038 }
7039
7040 static void tg3_set_rx_mode(struct net_device *dev)
7041 {
7042         struct tg3 *tp = netdev_priv(dev);
7043
7044         tg3_full_lock(tp, 0);
7045         __tg3_set_rx_mode(dev);
7046         tg3_full_unlock(tp);
7047 }
7048
7049 #define TG3_REGDUMP_LEN         (32 * 1024)
7050
7051 static int tg3_get_regs_len(struct net_device *dev)
7052 {
7053         return TG3_REGDUMP_LEN;
7054 }
7055
7056 static void tg3_get_regs(struct net_device *dev,
7057                 struct ethtool_regs *regs, void *_p)
7058 {
7059         u32 *p = _p;
7060         struct tg3 *tp = netdev_priv(dev);
7061         u8 *orig_p = _p;
7062         int i;
7063
7064         regs->version = 0;
7065
7066         memset(p, 0, TG3_REGDUMP_LEN);
7067
7068         tg3_full_lock(tp, 0);
7069
7070 #define __GET_REG32(reg)        (*(p)++ = tr32(reg))
7071 #define GET_REG32_LOOP(base,len)                \
7072 do {    p = (u32 *)(orig_p + (base));           \
7073         for (i = 0; i < len; i += 4)            \
7074                 __GET_REG32((base) + i);        \
7075 } while (0)
7076 #define GET_REG32_1(reg)                        \
7077 do {    p = (u32 *)(orig_p + (reg));            \
7078         __GET_REG32((reg));                     \
7079 } while (0)
7080
7081         GET_REG32_LOOP(TG3PCI_VENDOR, 0xb0);
7082         GET_REG32_LOOP(MAILBOX_INTERRUPT_0, 0x200);
7083         GET_REG32_LOOP(MAC_MODE, 0x4f0);
7084         GET_REG32_LOOP(SNDDATAI_MODE, 0xe0);
7085         GET_REG32_1(SNDDATAC_MODE);
7086         GET_REG32_LOOP(SNDBDS_MODE, 0x80);
7087         GET_REG32_LOOP(SNDBDI_MODE, 0x48);
7088         GET_REG32_1(SNDBDC_MODE);
7089         GET_REG32_LOOP(RCVLPC_MODE, 0x20);
7090         GET_REG32_LOOP(RCVLPC_SELLST_BASE, 0x15c);
7091         GET_REG32_LOOP(RCVDBDI_MODE, 0x0c);
7092         GET_REG32_LOOP(RCVDBDI_JUMBO_BD, 0x3c);
7093         GET_REG32_LOOP(RCVDBDI_BD_PROD_IDX_0, 0x44);
7094         GET_REG32_1(RCVDCC_MODE);
7095         GET_REG32_LOOP(RCVBDI_MODE, 0x20);
7096         GET_REG32_LOOP(RCVCC_MODE, 0x14);
7097         GET_REG32_LOOP(RCVLSC_MODE, 0x08);
7098         GET_REG32_1(MBFREE_MODE);
7099         GET_REG32_LOOP(HOSTCC_MODE, 0x100);
7100         GET_REG32_LOOP(MEMARB_MODE, 0x10);
7101         GET_REG32_LOOP(BUFMGR_MODE, 0x58);
7102         GET_REG32_LOOP(RDMAC_MODE, 0x08);
7103         GET_REG32_LOOP(WDMAC_MODE, 0x08);
7104         GET_REG32_LOOP(RX_CPU_BASE, 0x280);
7105         GET_REG32_LOOP(TX_CPU_BASE, 0x280);
7106         GET_REG32_LOOP(GRCMBOX_INTERRUPT_0, 0x110);
7107         GET_REG32_LOOP(FTQ_RESET, 0x120);
7108         GET_REG32_LOOP(MSGINT_MODE, 0x0c);
7109         GET_REG32_1(DMAC_MODE);
7110         GET_REG32_LOOP(GRC_MODE, 0x4c);
7111         if (tp->tg3_flags & TG3_FLAG_NVRAM)
7112                 GET_REG32_LOOP(NVRAM_CMD, 0x24);
7113
7114 #undef __GET_REG32
7115 #undef GET_REG32_LOOP
7116 #undef GET_REG32_1
7117
7118         tg3_full_unlock(tp);
7119 }
7120
7121 static int tg3_get_eeprom_len(struct net_device *dev)
7122 {
7123         struct tg3 *tp = netdev_priv(dev);
7124
7125         return tp->nvram_size;
7126 }
7127
7128 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val);
7129
7130 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
7131 {
7132         struct tg3 *tp = netdev_priv(dev);
7133         int ret;
7134         u8  *pd;
7135         u32 i, offset, len, val, b_offset, b_count;
7136
7137         offset = eeprom->offset;
7138         len = eeprom->len;
7139         eeprom->len = 0;
7140
7141         eeprom->magic = TG3_EEPROM_MAGIC;
7142
7143         if (offset & 3) {
7144                 /* adjustments to start on required 4 byte boundary */
7145                 b_offset = offset & 3;
7146                 b_count = 4 - b_offset;
7147                 if (b_count > len) {
7148                         /* i.e. offset=1 len=2 */
7149                         b_count = len;
7150                 }
7151                 ret = tg3_nvram_read(tp, offset-b_offset, &val);
7152                 if (ret)
7153                         return ret;
7154                 val = cpu_to_le32(val);
7155                 memcpy(data, ((char*)&val) + b_offset, b_count);
7156                 len -= b_count;
7157                 offset += b_count;
7158                 eeprom->len += b_count;
7159         }
7160
7161         /* read bytes upto the last 4 byte boundary */
7162         pd = &data[eeprom->len];
7163         for (i = 0; i < (len - (len & 3)); i += 4) {
7164                 ret = tg3_nvram_read(tp, offset + i, &val);
7165                 if (ret) {
7166                         eeprom->len += i;
7167                         return ret;
7168                 }
7169                 val = cpu_to_le32(val);
7170                 memcpy(pd + i, &val, 4);
7171         }
7172         eeprom->len += i;
7173
7174         if (len & 3) {
7175                 /* read last bytes not ending on 4 byte boundary */
7176                 pd = &data[eeprom->len];
7177                 b_count = len & 3;
7178                 b_offset = offset + len - b_count;
7179                 ret = tg3_nvram_read(tp, b_offset, &val);
7180                 if (ret)
7181                         return ret;
7182                 val = cpu_to_le32(val);
7183                 memcpy(pd, ((char*)&val), b_count);
7184                 eeprom->len += b_count;
7185         }
7186         return 0;
7187 }
7188
7189 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf); 
7190
7191 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
7192 {
7193         struct tg3 *tp = netdev_priv(dev);
7194         int ret;
7195         u32 offset, len, b_offset, odd_len, start, end;
7196         u8 *buf;
7197
7198         if (eeprom->magic != TG3_EEPROM_MAGIC)
7199                 return -EINVAL;
7200
7201         offset = eeprom->offset;
7202         len = eeprom->len;
7203
7204         if ((b_offset = (offset & 3))) {
7205                 /* adjustments to start on required 4 byte boundary */
7206                 ret = tg3_nvram_read(tp, offset-b_offset, &start);
7207                 if (ret)
7208                         return ret;
7209                 start = cpu_to_le32(start);
7210                 len += b_offset;
7211                 offset &= ~3;
7212                 if (len < 4)
7213                         len = 4;
7214         }
7215
7216         odd_len = 0;
7217         if (len & 3) {
7218                 /* adjustments to end on required 4 byte boundary */
7219                 odd_len = 1;
7220                 len = (len + 3) & ~3;
7221                 ret = tg3_nvram_read(tp, offset+len-4, &end);
7222                 if (ret)
7223                         return ret;
7224                 end = cpu_to_le32(end);
7225         }
7226
7227         buf = data;
7228         if (b_offset || odd_len) {
7229                 buf = kmalloc(len, GFP_KERNEL);
7230                 if (buf == 0)
7231                         return -ENOMEM;
7232                 if (b_offset)
7233                         memcpy(buf, &start, 4);
7234                 if (odd_len)
7235                         memcpy(buf+len-4, &end, 4);
7236                 memcpy(buf + b_offset, data, eeprom->len);
7237         }
7238
7239         ret = tg3_nvram_write_block(tp, offset, len, buf);
7240
7241         if (buf != data)
7242                 kfree(buf);
7243
7244         return ret;
7245 }
7246
7247 static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
7248 {
7249         struct tg3 *tp = netdev_priv(dev);
7250   
7251         cmd->supported = (SUPPORTED_Autoneg);
7252
7253         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
7254                 cmd->supported |= (SUPPORTED_1000baseT_Half |
7255                                    SUPPORTED_1000baseT_Full);
7256
7257         if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES))
7258                 cmd->supported |= (SUPPORTED_100baseT_Half |
7259                                   SUPPORTED_100baseT_Full |
7260                                   SUPPORTED_10baseT_Half |
7261                                   SUPPORTED_10baseT_Full |
7262                                   SUPPORTED_MII);
7263         else
7264                 cmd->supported |= SUPPORTED_FIBRE;
7265   
7266         cmd->advertising = tp->link_config.advertising;
7267         if (netif_running(dev)) {
7268                 cmd->speed = tp->link_config.active_speed;
7269                 cmd->duplex = tp->link_config.active_duplex;
7270         }
7271         cmd->port = 0;
7272         cmd->phy_address = PHY_ADDR;
7273         cmd->transceiver = 0;
7274         cmd->autoneg = tp->link_config.autoneg;
7275         cmd->maxtxpkt = 0;
7276         cmd->maxrxpkt = 0;
7277         return 0;
7278 }
7279   
7280 static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
7281 {
7282         struct tg3 *tp = netdev_priv(dev);
7283   
7284         if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) { 
7285                 /* These are the only valid advertisement bits allowed.  */
7286                 if (cmd->autoneg == AUTONEG_ENABLE &&
7287                     (cmd->advertising & ~(ADVERTISED_1000baseT_Half |
7288                                           ADVERTISED_1000baseT_Full |
7289                                           ADVERTISED_Autoneg |
7290                                           ADVERTISED_FIBRE)))
7291                         return -EINVAL;
7292                 /* Fiber can only do SPEED_1000.  */
7293                 else if ((cmd->autoneg != AUTONEG_ENABLE) &&
7294                          (cmd->speed != SPEED_1000))
7295                         return -EINVAL;
7296         /* Copper cannot force SPEED_1000.  */
7297         } else if ((cmd->autoneg != AUTONEG_ENABLE) &&
7298                    (cmd->speed == SPEED_1000))
7299                 return -EINVAL;
7300         else if ((cmd->speed == SPEED_1000) &&
7301                  (tp->tg3_flags2 & TG3_FLAG_10_100_ONLY))
7302                 return -EINVAL;
7303
7304         tg3_full_lock(tp, 0);
7305
7306         tp->link_config.autoneg = cmd->autoneg;
7307         if (cmd->autoneg == AUTONEG_ENABLE) {
7308                 tp->link_config.advertising = cmd->advertising;
7309                 tp->link_config.speed = SPEED_INVALID;
7310                 tp->link_config.duplex = DUPLEX_INVALID;
7311         } else {
7312                 tp->link_config.advertising = 0;
7313                 tp->link_config.speed = cmd->speed;
7314                 tp->link_config.duplex = cmd->duplex;
7315         }
7316   
7317         if (netif_running(dev))
7318                 tg3_setup_phy(tp, 1);
7319
7320         tg3_full_unlock(tp);
7321   
7322         return 0;
7323 }
7324   
7325 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
7326 {
7327         struct tg3 *tp = netdev_priv(dev);
7328   
7329         strcpy(info->driver, DRV_MODULE_NAME);
7330         strcpy(info->version, DRV_MODULE_VERSION);
7331         strcpy(info->bus_info, pci_name(tp->pdev));
7332 }
7333   
7334 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
7335 {
7336         struct tg3 *tp = netdev_priv(dev);
7337   
7338         wol->supported = WAKE_MAGIC;
7339         wol->wolopts = 0;
7340         if (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)
7341                 wol->wolopts = WAKE_MAGIC;
7342         memset(&wol->sopass, 0, sizeof(wol->sopass));
7343 }
7344   
7345 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
7346 {
7347         struct tg3 *tp = netdev_priv(dev);
7348   
7349         if (wol->wolopts & ~WAKE_MAGIC)
7350                 return -EINVAL;
7351         if ((wol->wolopts & WAKE_MAGIC) &&
7352             tp->tg3_flags2 & TG3_FLG2_PHY_SERDES &&
7353             !(tp->tg3_flags & TG3_FLAG_SERDES_WOL_CAP))
7354                 return -EINVAL;
7355   
7356         spin_lock_bh(&tp->lock);
7357         if (wol->wolopts & WAKE_MAGIC)
7358                 tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
7359         else
7360                 tp->tg3_flags &= ~TG3_FLAG_WOL_ENABLE;
7361         spin_unlock_bh(&tp->lock);
7362   
7363         return 0;
7364 }
7365   
7366 static u32 tg3_get_msglevel(struct net_device *dev)
7367 {
7368         struct tg3 *tp = netdev_priv(dev);
7369         return tp->msg_enable;
7370 }
7371   
7372 static void tg3_set_msglevel(struct net_device *dev, u32 value)
7373 {
7374         struct tg3 *tp = netdev_priv(dev);
7375         tp->msg_enable = value;
7376 }
7377   
7378 #if TG3_TSO_SUPPORT != 0
7379 static int tg3_set_tso(struct net_device *dev, u32 value)
7380 {
7381         struct tg3 *tp = netdev_priv(dev);
7382
7383         if (!(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
7384                 if (value)
7385                         return -EINVAL;
7386                 return 0;
7387         }
7388         return ethtool_op_set_tso(dev, value);
7389 }
7390 #endif
7391   
7392 static int tg3_nway_reset(struct net_device *dev)
7393 {
7394         struct tg3 *tp = netdev_priv(dev);
7395         u32 bmcr;
7396         int r;
7397   
7398         if (!netif_running(dev))
7399                 return -EAGAIN;
7400
7401         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
7402                 return -EINVAL;
7403
7404         spin_lock_bh(&tp->lock);
7405         r = -EINVAL;
7406         tg3_readphy(tp, MII_BMCR, &bmcr);
7407         if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
7408             ((bmcr & BMCR_ANENABLE) ||
7409              (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT))) {
7410                 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
7411                                            BMCR_ANENABLE);
7412                 r = 0;
7413         }
7414         spin_unlock_bh(&tp->lock);
7415   
7416         return r;
7417 }
7418   
7419 static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
7420 {
7421         struct tg3 *tp = netdev_priv(dev);
7422   
7423         ering->rx_max_pending = TG3_RX_RING_SIZE - 1;
7424         ering->rx_mini_max_pending = 0;
7425         ering->rx_jumbo_max_pending = TG3_RX_JUMBO_RING_SIZE - 1;
7426
7427         ering->rx_pending = tp->rx_pending;
7428         ering->rx_mini_pending = 0;
7429         ering->rx_jumbo_pending = tp->rx_jumbo_pending;
7430         ering->tx_pending = tp->tx_pending;
7431 }
7432   
7433 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
7434 {
7435         struct tg3 *tp = netdev_priv(dev);
7436         int irq_sync = 0;
7437   
7438         if ((ering->rx_pending > TG3_RX_RING_SIZE - 1) ||
7439             (ering->rx_jumbo_pending > TG3_RX_JUMBO_RING_SIZE - 1) ||
7440             (ering->tx_pending > TG3_TX_RING_SIZE - 1))
7441                 return -EINVAL;
7442   
7443         if (netif_running(dev)) {
7444                 tg3_netif_stop(tp);
7445                 irq_sync = 1;
7446         }
7447
7448         tg3_full_lock(tp, irq_sync);
7449   
7450         tp->rx_pending = ering->rx_pending;
7451
7452         if ((tp->tg3_flags2 & TG3_FLG2_MAX_RXPEND_64) &&
7453             tp->rx_pending > 63)
7454                 tp->rx_pending = 63;
7455         tp->rx_jumbo_pending = ering->rx_jumbo_pending;
7456         tp->tx_pending = ering->tx_pending;
7457
7458         if (netif_running(dev)) {
7459                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
7460                 tg3_init_hw(tp);
7461                 tg3_netif_start(tp);
7462         }
7463
7464         tg3_full_unlock(tp);
7465   
7466         return 0;
7467 }
7468   
7469 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
7470 {
7471         struct tg3 *tp = netdev_priv(dev);
7472   
7473         epause->autoneg = (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG) != 0;
7474         epause->rx_pause = (tp->tg3_flags & TG3_FLAG_RX_PAUSE) != 0;
7475         epause->tx_pause = (tp->tg3_flags & TG3_FLAG_TX_PAUSE) != 0;
7476 }
7477   
7478 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
7479 {
7480         struct tg3 *tp = netdev_priv(dev);
7481         int irq_sync = 0;
7482   
7483         if (netif_running(dev)) {
7484                 tg3_netif_stop(tp);
7485                 irq_sync = 1;
7486         }
7487
7488         tg3_full_lock(tp, irq_sync);
7489
7490         if (epause->autoneg)
7491                 tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
7492         else
7493                 tp->tg3_flags &= ~TG3_FLAG_PAUSE_AUTONEG;
7494         if (epause->rx_pause)
7495                 tp->tg3_flags |= TG3_FLAG_RX_PAUSE;
7496         else
7497                 tp->tg3_flags &= ~TG3_FLAG_RX_PAUSE;
7498         if (epause->tx_pause)
7499                 tp->tg3_flags |= TG3_FLAG_TX_PAUSE;
7500         else
7501                 tp->tg3_flags &= ~TG3_FLAG_TX_PAUSE;
7502
7503         if (netif_running(dev)) {
7504                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
7505                 tg3_init_hw(tp);
7506                 tg3_netif_start(tp);
7507         }
7508
7509         tg3_full_unlock(tp);
7510   
7511         return 0;
7512 }
7513   
7514 static u32 tg3_get_rx_csum(struct net_device *dev)
7515 {
7516         struct tg3 *tp = netdev_priv(dev);
7517         return (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0;
7518 }
7519   
7520 static int tg3_set_rx_csum(struct net_device *dev, u32 data)
7521 {
7522         struct tg3 *tp = netdev_priv(dev);
7523   
7524         if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
7525                 if (data != 0)
7526                         return -EINVAL;
7527                 return 0;
7528         }
7529   
7530         spin_lock_bh(&tp->lock);
7531         if (data)
7532                 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
7533         else
7534                 tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
7535         spin_unlock_bh(&tp->lock);
7536   
7537         return 0;
7538 }
7539   
7540 static int tg3_set_tx_csum(struct net_device *dev, u32 data)
7541 {
7542         struct tg3 *tp = netdev_priv(dev);
7543   
7544         if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
7545                 if (data != 0)
7546                         return -EINVAL;
7547                 return 0;
7548         }
7549   
7550         if (data)
7551                 dev->features |= NETIF_F_IP_CSUM;
7552         else
7553                 dev->features &= ~NETIF_F_IP_CSUM;
7554
7555         return 0;
7556 }
7557
7558 static int tg3_get_stats_count (struct net_device *dev)
7559 {
7560         return TG3_NUM_STATS;
7561 }
7562
7563 static int tg3_get_test_count (struct net_device *dev)
7564 {
7565         return TG3_NUM_TEST;
7566 }
7567
7568 static void tg3_get_strings (struct net_device *dev, u32 stringset, u8 *buf)
7569 {
7570         switch (stringset) {
7571         case ETH_SS_STATS:
7572                 memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
7573                 break;
7574         case ETH_SS_TEST:
7575                 memcpy(buf, &ethtool_test_keys, sizeof(ethtool_test_keys));
7576                 break;
7577         default:
7578                 WARN_ON(1);     /* we need a WARN() */
7579                 break;
7580         }
7581 }
7582
7583 static int tg3_phys_id(struct net_device *dev, u32 data)
7584 {
7585         struct tg3 *tp = netdev_priv(dev);
7586         int i;
7587
7588         if (!netif_running(tp->dev))
7589                 return -EAGAIN;
7590
7591         if (data == 0)
7592                 data = 2;
7593
7594         for (i = 0; i < (data * 2); i++) {
7595                 if ((i % 2) == 0)
7596                         tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
7597                                            LED_CTRL_1000MBPS_ON |
7598                                            LED_CTRL_100MBPS_ON |
7599                                            LED_CTRL_10MBPS_ON |
7600                                            LED_CTRL_TRAFFIC_OVERRIDE |
7601                                            LED_CTRL_TRAFFIC_BLINK |
7602                                            LED_CTRL_TRAFFIC_LED);
7603         
7604                 else
7605                         tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
7606                                            LED_CTRL_TRAFFIC_OVERRIDE);
7607
7608                 if (msleep_interruptible(500))
7609                         break;
7610         }
7611         tw32(MAC_LED_CTRL, tp->led_ctrl);
7612         return 0;
7613 }
7614
7615 static void tg3_get_ethtool_stats (struct net_device *dev,
7616                                    struct ethtool_stats *estats, u64 *tmp_stats)
7617 {
7618         struct tg3 *tp = netdev_priv(dev);
7619         memcpy(tmp_stats, tg3_get_estats(tp), sizeof(tp->estats));
7620 }
7621
7622 #define NVRAM_TEST_SIZE 0x100
7623
7624 static int tg3_test_nvram(struct tg3 *tp)
7625 {
7626         u32 *buf, csum;
7627         int i, j, err = 0;
7628
7629         buf = kmalloc(NVRAM_TEST_SIZE, GFP_KERNEL);
7630         if (buf == NULL)
7631                 return -ENOMEM;
7632
7633         for (i = 0, j = 0; i < NVRAM_TEST_SIZE; i += 4, j++) {
7634                 u32 val;
7635
7636                 if ((err = tg3_nvram_read(tp, i, &val)) != 0)
7637                         break;
7638                 buf[j] = cpu_to_le32(val);
7639         }
7640         if (i < NVRAM_TEST_SIZE)
7641                 goto out;
7642
7643         err = -EIO;
7644         if (cpu_to_be32(buf[0]) != TG3_EEPROM_MAGIC)
7645                 goto out;
7646
7647         /* Bootstrap checksum at offset 0x10 */
7648         csum = calc_crc((unsigned char *) buf, 0x10);
7649         if(csum != cpu_to_le32(buf[0x10/4]))
7650                 goto out;
7651
7652         /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
7653         csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
7654         if (csum != cpu_to_le32(buf[0xfc/4]))
7655                  goto out;
7656
7657         err = 0;
7658
7659 out:
7660         kfree(buf);
7661         return err;
7662 }
7663
7664 #define TG3_SERDES_TIMEOUT_SEC  2
7665 #define TG3_COPPER_TIMEOUT_SEC  6
7666
7667 static int tg3_test_link(struct tg3 *tp)
7668 {
7669         int i, max;
7670
7671         if (!netif_running(tp->dev))
7672                 return -ENODEV;
7673
7674         if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
7675                 max = TG3_SERDES_TIMEOUT_SEC;
7676         else
7677                 max = TG3_COPPER_TIMEOUT_SEC;
7678
7679         for (i = 0; i < max; i++) {
7680                 if (netif_carrier_ok(tp->dev))
7681                         return 0;
7682
7683                 if (msleep_interruptible(1000))
7684                         break;
7685         }
7686
7687         return -EIO;
7688 }
7689
7690 /* Only test the commonly used registers */
7691 static int tg3_test_registers(struct tg3 *tp)
7692 {
7693         int i, is_5705;
7694         u32 offset, read_mask, write_mask, val, save_val, read_val;
7695         static struct {
7696                 u16 offset;
7697                 u16 flags;
7698 #define TG3_FL_5705     0x1
7699 #define TG3_FL_NOT_5705 0x2
7700 #define TG3_FL_NOT_5788 0x4
7701                 u32 read_mask;
7702                 u32 write_mask;
7703         } reg_tbl[] = {
7704                 /* MAC Control Registers */
7705                 { MAC_MODE, TG3_FL_NOT_5705,
7706                         0x00000000, 0x00ef6f8c },
7707                 { MAC_MODE, TG3_FL_5705,
7708                         0x00000000, 0x01ef6b8c },
7709                 { MAC_STATUS, TG3_FL_NOT_5705,
7710                         0x03800107, 0x00000000 },
7711                 { MAC_STATUS, TG3_FL_5705,
7712                         0x03800100, 0x00000000 },
7713                 { MAC_ADDR_0_HIGH, 0x0000,
7714                         0x00000000, 0x0000ffff },
7715                 { MAC_ADDR_0_LOW, 0x0000,
7716                         0x00000000, 0xffffffff },
7717                 { MAC_RX_MTU_SIZE, 0x0000,
7718                         0x00000000, 0x0000ffff },
7719                 { MAC_TX_MODE, 0x0000,
7720                         0x00000000, 0x00000070 },
7721                 { MAC_TX_LENGTHS, 0x0000,
7722                         0x00000000, 0x00003fff },
7723                 { MAC_RX_MODE, TG3_FL_NOT_5705,
7724                         0x00000000, 0x000007fc },
7725                 { MAC_RX_MODE, TG3_FL_5705,
7726                         0x00000000, 0x000007dc },
7727                 { MAC_HASH_REG_0, 0x0000,
7728                         0x00000000, 0xffffffff },
7729                 { MAC_HASH_REG_1, 0x0000,
7730                         0x00000000, 0xffffffff },
7731                 { MAC_HASH_REG_2, 0x0000,
7732                         0x00000000, 0xffffffff },
7733                 { MAC_HASH_REG_3, 0x0000,
7734                         0x00000000, 0xffffffff },
7735
7736                 /* Receive Data and Receive BD Initiator Control Registers. */
7737                 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
7738                         0x00000000, 0xffffffff },
7739                 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
7740                         0x00000000, 0xffffffff },
7741                 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
7742                         0x00000000, 0x00000003 },
7743                 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
7744                         0x00000000, 0xffffffff },
7745                 { RCVDBDI_STD_BD+0, 0x0000,
7746                         0x00000000, 0xffffffff },
7747                 { RCVDBDI_STD_BD+4, 0x0000,
7748                         0x00000000, 0xffffffff },
7749                 { RCVDBDI_STD_BD+8, 0x0000,
7750                         0x00000000, 0xffff0002 },
7751                 { RCVDBDI_STD_BD+0xc, 0x0000,
7752                         0x00000000, 0xffffffff },
7753         
7754                 /* Receive BD Initiator Control Registers. */
7755                 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
7756                         0x00000000, 0xffffffff },
7757                 { RCVBDI_STD_THRESH, TG3_FL_5705,
7758                         0x00000000, 0x000003ff },
7759                 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
7760                         0x00000000, 0xffffffff },
7761         
7762                 /* Host Coalescing Control Registers. */
7763                 { HOSTCC_MODE, TG3_FL_NOT_5705,
7764                         0x00000000, 0x00000004 },
7765                 { HOSTCC_MODE, TG3_FL_5705,
7766                         0x00000000, 0x000000f6 },
7767                 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
7768                         0x00000000, 0xffffffff },
7769                 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
7770                         0x00000000, 0x000003ff },
7771                 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
7772                         0x00000000, 0xffffffff },
7773                 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
7774                         0x00000000, 0x000003ff },
7775                 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
7776                         0x00000000, 0xffffffff },
7777                 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
7778                         0x00000000, 0x000000ff },
7779                 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
7780                         0x00000000, 0xffffffff },
7781                 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
7782                         0x00000000, 0x000000ff },
7783                 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
7784                         0x00000000, 0xffffffff },
7785                 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
7786                         0x00000000, 0xffffffff },
7787                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
7788                         0x00000000, 0xffffffff },
7789                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
7790                         0x00000000, 0x000000ff },
7791                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
7792                         0x00000000, 0xffffffff },
7793                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
7794                         0x00000000, 0x000000ff },
7795                 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
7796                         0x00000000, 0xffffffff },
7797                 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
7798                         0x00000000, 0xffffffff },
7799                 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
7800                         0x00000000, 0xffffffff },
7801                 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
7802                         0x00000000, 0xffffffff },
7803                 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
7804                         0x00000000, 0xffffffff },
7805                 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
7806                         0xffffffff, 0x00000000 },
7807                 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
7808                         0xffffffff, 0x00000000 },
7809
7810                 /* Buffer Manager Control Registers. */
7811                 { BUFMGR_MB_POOL_ADDR, 0x0000,
7812                         0x00000000, 0x007fff80 },
7813                 { BUFMGR_MB_POOL_SIZE, 0x0000,
7814                         0x00000000, 0x007fffff },
7815                 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
7816                         0x00000000, 0x0000003f },
7817                 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
7818                         0x00000000, 0x000001ff },
7819                 { BUFMGR_MB_HIGH_WATER, 0x0000,
7820                         0x00000000, 0x000001ff },
7821                 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
7822                         0xffffffff, 0x00000000 },
7823                 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
7824                         0xffffffff, 0x00000000 },
7825         
7826                 /* Mailbox Registers */
7827                 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
7828                         0x00000000, 0x000001ff },
7829                 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
7830                         0x00000000, 0x000001ff },
7831                 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
7832                         0x00000000, 0x000007ff },
7833                 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
7834                         0x00000000, 0x000001ff },
7835
7836                 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
7837         };
7838
7839         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
7840                 is_5705 = 1;
7841         else
7842                 is_5705 = 0;
7843
7844         for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
7845                 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
7846                         continue;
7847
7848                 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
7849                         continue;
7850
7851                 if ((tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
7852                     (reg_tbl[i].flags & TG3_FL_NOT_5788))
7853                         continue;
7854
7855                 offset = (u32) reg_tbl[i].offset;
7856                 read_mask = reg_tbl[i].read_mask;
7857                 write_mask = reg_tbl[i].write_mask;
7858
7859                 /* Save the original register content */
7860                 save_val = tr32(offset);
7861
7862                 /* Determine the read-only value. */
7863                 read_val = save_val & read_mask;
7864
7865                 /* Write zero to the register, then make sure the read-only bits
7866                  * are not changed and the read/write bits are all zeros.
7867                  */
7868                 tw32(offset, 0);
7869
7870                 val = tr32(offset);
7871
7872                 /* Test the read-only and read/write bits. */
7873                 if (((val & read_mask) != read_val) || (val & write_mask))
7874                         goto out;
7875
7876                 /* Write ones to all the bits defined by RdMask and WrMask, then
7877                  * make sure the read-only bits are not changed and the
7878                  * read/write bits are all ones.
7879                  */
7880                 tw32(offset, read_mask | write_mask);
7881
7882                 val = tr32(offset);
7883
7884                 /* Test the read-only bits. */
7885                 if ((val & read_mask) != read_val)
7886                         goto out;
7887
7888                 /* Test the read/write bits. */
7889                 if ((val & write_mask) != write_mask)
7890                         goto out;
7891
7892                 tw32(offset, save_val);
7893         }
7894
7895         return 0;
7896
7897 out:
7898         printk(KERN_ERR PFX "Register test failed at offset %x\n", offset);
7899         tw32(offset, save_val);
7900         return -EIO;
7901 }
7902
7903 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
7904 {
7905         static u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
7906         int i;
7907         u32 j;
7908
7909         for (i = 0; i < sizeof(test_pattern)/sizeof(u32); i++) {
7910                 for (j = 0; j < len; j += 4) {
7911                         u32 val;
7912
7913                         tg3_write_mem(tp, offset + j, test_pattern[i]);
7914                         tg3_read_mem(tp, offset + j, &val);
7915                         if (val != test_pattern[i])
7916                                 return -EIO;
7917                 }
7918         }
7919         return 0;
7920 }
7921
7922 static int tg3_test_memory(struct tg3 *tp)
7923 {
7924         static struct mem_entry {
7925                 u32 offset;
7926                 u32 len;
7927         } mem_tbl_570x[] = {
7928                 { 0x00000000, 0x01000},
7929                 { 0x00002000, 0x1c000},
7930                 { 0xffffffff, 0x00000}
7931         }, mem_tbl_5705[] = {
7932                 { 0x00000100, 0x0000c},
7933                 { 0x00000200, 0x00008},
7934                 { 0x00000b50, 0x00400},
7935                 { 0x00004000, 0x00800},
7936                 { 0x00006000, 0x01000},
7937                 { 0x00008000, 0x02000},
7938                 { 0x00010000, 0x0e000},
7939                 { 0xffffffff, 0x00000}
7940         };
7941         struct mem_entry *mem_tbl;
7942         int err = 0;
7943         int i;
7944
7945         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
7946                 mem_tbl = mem_tbl_5705;
7947         else
7948                 mem_tbl = mem_tbl_570x;
7949
7950         for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
7951                 if ((err = tg3_do_mem_test(tp, mem_tbl[i].offset,
7952                     mem_tbl[i].len)) != 0)
7953                         break;
7954         }
7955         
7956         return err;
7957 }
7958
7959 #define TG3_MAC_LOOPBACK        0
7960 #define TG3_PHY_LOOPBACK        1
7961
7962 static int tg3_run_loopback(struct tg3 *tp, int loopback_mode)
7963 {
7964         u32 mac_mode, rx_start_idx, rx_idx, tx_idx, opaque_key;
7965         u32 desc_idx;
7966         struct sk_buff *skb, *rx_skb;
7967         u8 *tx_data;
7968         dma_addr_t map;
7969         int num_pkts, tx_len, rx_len, i, err;
7970         struct tg3_rx_buffer_desc *desc;
7971
7972         if (loopback_mode == TG3_MAC_LOOPBACK) {
7973                 /* HW errata - mac loopback fails in some cases on 5780.
7974                  * Normal traffic and PHY loopback are not affected by
7975                  * errata.
7976                  */
7977                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780)
7978                         return 0;
7979
7980                 mac_mode = (tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK) |
7981                            MAC_MODE_PORT_INT_LPBACK | MAC_MODE_LINK_POLARITY |
7982                            MAC_MODE_PORT_MODE_GMII;
7983                 tw32(MAC_MODE, mac_mode);
7984         } else if (loopback_mode == TG3_PHY_LOOPBACK) {
7985                 tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK | BMCR_FULLDPLX |
7986                                            BMCR_SPEED1000);
7987                 udelay(40);
7988                 /* reset to prevent losing 1st rx packet intermittently */
7989                 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
7990                         tw32_f(MAC_RX_MODE, RX_MODE_RESET);
7991                         udelay(10);
7992                         tw32_f(MAC_RX_MODE, tp->rx_mode);
7993                 }
7994                 mac_mode = (tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK) |
7995                            MAC_MODE_LINK_POLARITY | MAC_MODE_PORT_MODE_GMII;
7996                 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401)
7997                         mac_mode &= ~MAC_MODE_LINK_POLARITY;
7998                 tw32(MAC_MODE, mac_mode);
7999         }
8000         else
8001                 return -EINVAL;
8002
8003         err = -EIO;
8004
8005         tx_len = 1514;
8006         skb = dev_alloc_skb(tx_len);
8007         tx_data = skb_put(skb, tx_len);
8008         memcpy(tx_data, tp->dev->dev_addr, 6);
8009         memset(tx_data + 6, 0x0, 8);
8010
8011         tw32(MAC_RX_MTU_SIZE, tx_len + 4);
8012
8013         for (i = 14; i < tx_len; i++)
8014                 tx_data[i] = (u8) (i & 0xff);
8015
8016         map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
8017
8018         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
8019              HOSTCC_MODE_NOW);
8020
8021         udelay(10);
8022
8023         rx_start_idx = tp->hw_status->idx[0].rx_producer;
8024
8025         num_pkts = 0;
8026
8027         tg3_set_txd(tp, tp->tx_prod, map, tx_len, 0, 1);
8028
8029         tp->tx_prod++;
8030         num_pkts++;
8031
8032         tw32_tx_mbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW,
8033                      tp->tx_prod);
8034         tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW);
8035
8036         udelay(10);
8037
8038         for (i = 0; i < 10; i++) {
8039                 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
8040                        HOSTCC_MODE_NOW);
8041
8042                 udelay(10);
8043
8044                 tx_idx = tp->hw_status->idx[0].tx_consumer;
8045                 rx_idx = tp->hw_status->idx[0].rx_producer;
8046                 if ((tx_idx == tp->tx_prod) &&
8047                     (rx_idx == (rx_start_idx + num_pkts)))
8048                         break;
8049         }
8050
8051         pci_unmap_single(tp->pdev, map, tx_len, PCI_DMA_TODEVICE);
8052         dev_kfree_skb(skb);
8053
8054         if (tx_idx != tp->tx_prod)
8055                 goto out;
8056
8057         if (rx_idx != rx_start_idx + num_pkts)
8058                 goto out;
8059
8060         desc = &tp->rx_rcb[rx_start_idx];
8061         desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
8062         opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
8063         if (opaque_key != RXD_OPAQUE_RING_STD)
8064                 goto out;
8065
8066         if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
8067             (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
8068                 goto out;
8069
8070         rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4;
8071         if (rx_len != tx_len)
8072                 goto out;
8073
8074         rx_skb = tp->rx_std_buffers[desc_idx].skb;
8075
8076         map = pci_unmap_addr(&tp->rx_std_buffers[desc_idx], mapping);
8077         pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len, PCI_DMA_FROMDEVICE);
8078
8079         for (i = 14; i < tx_len; i++) {
8080                 if (*(rx_skb->data + i) != (u8) (i & 0xff))
8081                         goto out;
8082         }
8083         err = 0;
8084         
8085         /* tg3_free_rings will unmap and free the rx_skb */
8086 out:
8087         return err;
8088 }
8089
8090 #define TG3_MAC_LOOPBACK_FAILED         1
8091 #define TG3_PHY_LOOPBACK_FAILED         2
8092 #define TG3_LOOPBACK_FAILED             (TG3_MAC_LOOPBACK_FAILED |      \
8093                                          TG3_PHY_LOOPBACK_FAILED)
8094
8095 static int tg3_test_loopback(struct tg3 *tp)
8096 {
8097         int err = 0;
8098
8099         if (!netif_running(tp->dev))
8100                 return TG3_LOOPBACK_FAILED;
8101
8102         tg3_reset_hw(tp);
8103
8104         if (tg3_run_loopback(tp, TG3_MAC_LOOPBACK))
8105                 err |= TG3_MAC_LOOPBACK_FAILED;
8106         if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
8107                 if (tg3_run_loopback(tp, TG3_PHY_LOOPBACK))
8108                         err |= TG3_PHY_LOOPBACK_FAILED;
8109         }
8110
8111         return err;
8112 }
8113
8114 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
8115                           u64 *data)
8116 {
8117         struct tg3 *tp = netdev_priv(dev);
8118
8119         memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
8120
8121         if (tg3_test_nvram(tp) != 0) {
8122                 etest->flags |= ETH_TEST_FL_FAILED;
8123                 data[0] = 1;
8124         }
8125         if (tg3_test_link(tp) != 0) {
8126                 etest->flags |= ETH_TEST_FL_FAILED;
8127                 data[1] = 1;
8128         }
8129         if (etest->flags & ETH_TEST_FL_OFFLINE) {
8130                 int irq_sync = 0;
8131
8132                 if (netif_running(dev)) {
8133                         tg3_netif_stop(tp);
8134                         irq_sync = 1;
8135                 }
8136
8137                 tg3_full_lock(tp, irq_sync);
8138
8139                 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
8140                 tg3_nvram_lock(tp);
8141                 tg3_halt_cpu(tp, RX_CPU_BASE);
8142                 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
8143                         tg3_halt_cpu(tp, TX_CPU_BASE);
8144                 tg3_nvram_unlock(tp);
8145
8146                 if (tg3_test_registers(tp) != 0) {
8147                         etest->flags |= ETH_TEST_FL_FAILED;
8148                         data[2] = 1;
8149                 }
8150                 if (tg3_test_memory(tp) != 0) {
8151                         etest->flags |= ETH_TEST_FL_FAILED;
8152                         data[3] = 1;
8153                 }
8154                 if ((data[4] = tg3_test_loopback(tp)) != 0)
8155                         etest->flags |= ETH_TEST_FL_FAILED;
8156
8157                 tg3_full_unlock(tp);
8158
8159                 if (tg3_test_interrupt(tp) != 0) {
8160                         etest->flags |= ETH_TEST_FL_FAILED;
8161                         data[5] = 1;
8162                 }
8163
8164                 tg3_full_lock(tp, 0);
8165
8166                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
8167                 if (netif_running(dev)) {
8168                         tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
8169                         tg3_init_hw(tp);
8170                         tg3_netif_start(tp);
8171                 }
8172
8173                 tg3_full_unlock(tp);
8174         }
8175 }
8176
8177 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
8178 {
8179         struct mii_ioctl_data *data = if_mii(ifr);
8180         struct tg3 *tp = netdev_priv(dev);
8181         int err;
8182
8183         switch(cmd) {
8184         case SIOCGMIIPHY:
8185                 data->phy_id = PHY_ADDR;
8186
8187                 /* fallthru */
8188         case SIOCGMIIREG: {
8189                 u32 mii_regval;
8190
8191                 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
8192                         break;                  /* We have no PHY */
8193
8194                 spin_lock_bh(&tp->lock);
8195                 err = tg3_readphy(tp, data->reg_num & 0x1f, &mii_regval);
8196                 spin_unlock_bh(&tp->lock);
8197
8198                 data->val_out = mii_regval;
8199
8200                 return err;
8201         }
8202
8203         case SIOCSMIIREG:
8204                 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
8205                         break;                  /* We have no PHY */
8206
8207                 if (!capable(CAP_NET_ADMIN))
8208                         return -EPERM;
8209
8210                 spin_lock_bh(&tp->lock);
8211                 err = tg3_writephy(tp, data->reg_num & 0x1f, data->val_in);
8212                 spin_unlock_bh(&tp->lock);
8213
8214                 return err;
8215
8216         default:
8217                 /* do nothing */
8218                 break;
8219         }
8220         return -EOPNOTSUPP;
8221 }
8222
8223 #if TG3_VLAN_TAG_USED
8224 static void tg3_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
8225 {
8226         struct tg3 *tp = netdev_priv(dev);
8227
8228         tg3_full_lock(tp, 0);
8229
8230         tp->vlgrp = grp;
8231
8232         /* Update RX_MODE_KEEP_VLAN_TAG bit in RX_MODE register. */
8233         __tg3_set_rx_mode(dev);
8234
8235         tg3_full_unlock(tp);
8236 }
8237
8238 static void tg3_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
8239 {
8240         struct tg3 *tp = netdev_priv(dev);
8241
8242         tg3_full_lock(tp, 0);
8243         if (tp->vlgrp)
8244                 tp->vlgrp->vlan_devices[vid] = NULL;
8245         tg3_full_unlock(tp);
8246 }
8247 #endif
8248
8249 static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
8250 {
8251         struct tg3 *tp = netdev_priv(dev);
8252
8253         memcpy(ec, &tp->coal, sizeof(*ec));
8254         return 0;
8255 }
8256
8257 static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
8258 {
8259         struct tg3 *tp = netdev_priv(dev);
8260         u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
8261         u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
8262
8263         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
8264                 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
8265                 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
8266                 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
8267                 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
8268         }
8269
8270         if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
8271             (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
8272             (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
8273             (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
8274             (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
8275             (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
8276             (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
8277             (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
8278             (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
8279             (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
8280                 return -EINVAL;
8281
8282         /* No rx interrupts will be generated if both are zero */
8283         if ((ec->rx_coalesce_usecs == 0) &&
8284             (ec->rx_max_coalesced_frames == 0))
8285                 return -EINVAL;
8286
8287         /* No tx interrupts will be generated if both are zero */
8288         if ((ec->tx_coalesce_usecs == 0) &&
8289             (ec->tx_max_coalesced_frames == 0))
8290                 return -EINVAL;
8291
8292         /* Only copy relevant parameters, ignore all others. */
8293         tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
8294         tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
8295         tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
8296         tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
8297         tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
8298         tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
8299         tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
8300         tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
8301         tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
8302
8303         if (netif_running(dev)) {
8304                 tg3_full_lock(tp, 0);
8305                 __tg3_set_coalesce(tp, &tp->coal);
8306                 tg3_full_unlock(tp);
8307         }
8308         return 0;
8309 }
8310
8311 static struct ethtool_ops tg3_ethtool_ops = {
8312         .get_settings           = tg3_get_settings,
8313         .set_settings           = tg3_set_settings,
8314         .get_drvinfo            = tg3_get_drvinfo,
8315         .get_regs_len           = tg3_get_regs_len,
8316         .get_regs               = tg3_get_regs,
8317         .get_wol                = tg3_get_wol,
8318         .set_wol                = tg3_set_wol,
8319         .get_msglevel           = tg3_get_msglevel,
8320         .set_msglevel           = tg3_set_msglevel,
8321         .nway_reset             = tg3_nway_reset,
8322         .get_link               = ethtool_op_get_link,
8323         .get_eeprom_len         = tg3_get_eeprom_len,
8324         .get_eeprom             = tg3_get_eeprom,
8325         .set_eeprom             = tg3_set_eeprom,
8326         .get_ringparam          = tg3_get_ringparam,
8327         .set_ringparam          = tg3_set_ringparam,
8328         .get_pauseparam         = tg3_get_pauseparam,
8329         .set_pauseparam         = tg3_set_pauseparam,
8330         .get_rx_csum            = tg3_get_rx_csum,
8331         .set_rx_csum            = tg3_set_rx_csum,
8332         .get_tx_csum            = ethtool_op_get_tx_csum,
8333         .set_tx_csum            = tg3_set_tx_csum,
8334         .get_sg                 = ethtool_op_get_sg,
8335         .set_sg                 = ethtool_op_set_sg,
8336 #if TG3_TSO_SUPPORT != 0
8337         .get_tso                = ethtool_op_get_tso,
8338         .set_tso                = tg3_set_tso,
8339 #endif
8340         .self_test_count        = tg3_get_test_count,
8341         .self_test              = tg3_self_test,
8342         .get_strings            = tg3_get_strings,
8343         .phys_id                = tg3_phys_id,
8344         .get_stats_count        = tg3_get_stats_count,
8345         .get_ethtool_stats      = tg3_get_ethtool_stats,
8346         .get_coalesce           = tg3_get_coalesce,
8347         .set_coalesce           = tg3_set_coalesce,
8348         .get_perm_addr          = ethtool_op_get_perm_addr,
8349 };
8350
8351 static void __devinit tg3_get_eeprom_size(struct tg3 *tp)
8352 {
8353         u32 cursize, val;
8354
8355         tp->nvram_size = EEPROM_CHIP_SIZE;
8356
8357         if (tg3_nvram_read(tp, 0, &val) != 0)
8358                 return;
8359
8360         if (swab32(val) != TG3_EEPROM_MAGIC)
8361                 return;
8362
8363         /*
8364          * Size the chip by reading offsets at increasing powers of two.
8365          * When we encounter our validation signature, we know the addressing
8366          * has wrapped around, and thus have our chip size.
8367          */
8368         cursize = 0x800;
8369
8370         while (cursize < tp->nvram_size) {
8371                 if (tg3_nvram_read(tp, cursize, &val) != 0)
8372                         return;
8373
8374                 if (swab32(val) == TG3_EEPROM_MAGIC)
8375                         break;
8376
8377                 cursize <<= 1;
8378         }
8379
8380         tp->nvram_size = cursize;
8381 }
8382                 
8383 static void __devinit tg3_get_nvram_size(struct tg3 *tp)
8384 {
8385         u32 val;
8386
8387         if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
8388                 if (val != 0) {
8389                         tp->nvram_size = (val >> 16) * 1024;
8390                         return;
8391                 }
8392         }
8393         tp->nvram_size = 0x20000;
8394 }
8395
8396 static void __devinit tg3_get_nvram_info(struct tg3 *tp)
8397 {
8398         u32 nvcfg1;
8399
8400         nvcfg1 = tr32(NVRAM_CFG1);
8401         if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
8402                 tp->tg3_flags2 |= TG3_FLG2_FLASH;
8403         }
8404         else {
8405                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
8406                 tw32(NVRAM_CFG1, nvcfg1);
8407         }
8408
8409         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) ||
8410             (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
8411                 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
8412                         case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
8413                                 tp->nvram_jedecnum = JEDEC_ATMEL;
8414                                 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
8415                                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
8416                                 break;
8417                         case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
8418                                 tp->nvram_jedecnum = JEDEC_ATMEL;
8419                                 tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
8420                                 break;
8421                         case FLASH_VENDOR_ATMEL_EEPROM:
8422                                 tp->nvram_jedecnum = JEDEC_ATMEL;
8423                                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
8424                                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
8425                                 break;
8426                         case FLASH_VENDOR_ST:
8427                                 tp->nvram_jedecnum = JEDEC_ST;
8428                                 tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
8429                                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
8430                                 break;
8431                         case FLASH_VENDOR_SAIFUN:
8432                                 tp->nvram_jedecnum = JEDEC_SAIFUN;
8433                                 tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
8434                                 break;
8435                         case FLASH_VENDOR_SST_SMALL:
8436                         case FLASH_VENDOR_SST_LARGE:
8437                                 tp->nvram_jedecnum = JEDEC_SST;
8438                                 tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
8439                                 break;
8440                 }
8441         }
8442         else {
8443                 tp->nvram_jedecnum = JEDEC_ATMEL;
8444                 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
8445                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
8446         }
8447 }
8448
8449 static void __devinit tg3_get_5752_nvram_info(struct tg3 *tp)
8450 {
8451         u32 nvcfg1;
8452
8453         nvcfg1 = tr32(NVRAM_CFG1);
8454
8455         /* NVRAM protection for TPM */
8456         if (nvcfg1 & (1 << 27))
8457                 tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM;
8458
8459         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
8460                 case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
8461                 case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
8462                         tp->nvram_jedecnum = JEDEC_ATMEL;
8463                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
8464                         break;
8465                 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
8466                         tp->nvram_jedecnum = JEDEC_ATMEL;
8467                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
8468                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
8469                         break;
8470                 case FLASH_5752VENDOR_ST_M45PE10:
8471                 case FLASH_5752VENDOR_ST_M45PE20:
8472                 case FLASH_5752VENDOR_ST_M45PE40:
8473                         tp->nvram_jedecnum = JEDEC_ST;
8474                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
8475                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
8476                         break;
8477         }
8478
8479         if (tp->tg3_flags2 & TG3_FLG2_FLASH) {
8480                 switch (nvcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
8481                         case FLASH_5752PAGE_SIZE_256:
8482                                 tp->nvram_pagesize = 256;
8483                                 break;
8484                         case FLASH_5752PAGE_SIZE_512:
8485                                 tp->nvram_pagesize = 512;
8486                                 break;
8487                         case FLASH_5752PAGE_SIZE_1K:
8488                                 tp->nvram_pagesize = 1024;
8489                                 break;
8490                         case FLASH_5752PAGE_SIZE_2K:
8491                                 tp->nvram_pagesize = 2048;
8492                                 break;
8493                         case FLASH_5752PAGE_SIZE_4K:
8494                                 tp->nvram_pagesize = 4096;
8495                                 break;
8496                         case FLASH_5752PAGE_SIZE_264:
8497                                 tp->nvram_pagesize = 264;
8498                                 break;
8499                 }
8500         }
8501         else {
8502                 /* For eeprom, set pagesize to maximum eeprom size */
8503                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
8504
8505                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
8506                 tw32(NVRAM_CFG1, nvcfg1);
8507         }
8508 }
8509
8510 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
8511 static void __devinit tg3_nvram_init(struct tg3 *tp)
8512 {
8513         int j;
8514
8515         if (tp->tg3_flags2 & TG3_FLG2_SUN_570X)
8516                 return;
8517
8518         tw32_f(GRC_EEPROM_ADDR,
8519              (EEPROM_ADDR_FSM_RESET |
8520               (EEPROM_DEFAULT_CLOCK_PERIOD <<
8521                EEPROM_ADDR_CLKPERD_SHIFT)));
8522
8523         /* XXX schedule_timeout() ... */
8524         for (j = 0; j < 100; j++)
8525                 udelay(10);
8526
8527         /* Enable seeprom accesses. */
8528         tw32_f(GRC_LOCAL_CTRL,
8529              tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
8530         udelay(100);
8531
8532         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
8533             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
8534                 tp->tg3_flags |= TG3_FLAG_NVRAM;
8535
8536                 tg3_enable_nvram_access(tp);
8537
8538                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
8539                         tg3_get_5752_nvram_info(tp);
8540                 else
8541                         tg3_get_nvram_info(tp);
8542
8543                 tg3_get_nvram_size(tp);
8544
8545                 tg3_disable_nvram_access(tp);
8546
8547         } else {
8548                 tp->tg3_flags &= ~(TG3_FLAG_NVRAM | TG3_FLAG_NVRAM_BUFFERED);
8549
8550                 tg3_get_eeprom_size(tp);
8551         }
8552 }
8553
8554 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
8555                                         u32 offset, u32 *val)
8556 {
8557         u32 tmp;
8558         int i;
8559
8560         if (offset > EEPROM_ADDR_ADDR_MASK ||
8561             (offset % 4) != 0)
8562                 return -EINVAL;
8563
8564         tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
8565                                         EEPROM_ADDR_DEVID_MASK |
8566                                         EEPROM_ADDR_READ);
8567         tw32(GRC_EEPROM_ADDR,
8568              tmp |
8569              (0 << EEPROM_ADDR_DEVID_SHIFT) |
8570              ((offset << EEPROM_ADDR_ADDR_SHIFT) &
8571               EEPROM_ADDR_ADDR_MASK) |
8572              EEPROM_ADDR_READ | EEPROM_ADDR_START);
8573
8574         for (i = 0; i < 10000; i++) {
8575                 tmp = tr32(GRC_EEPROM_ADDR);
8576
8577                 if (tmp & EEPROM_ADDR_COMPLETE)
8578                         break;
8579                 udelay(100);
8580         }
8581         if (!(tmp & EEPROM_ADDR_COMPLETE))
8582                 return -EBUSY;
8583
8584         *val = tr32(GRC_EEPROM_DATA);
8585         return 0;
8586 }
8587
8588 #define NVRAM_CMD_TIMEOUT 10000
8589
8590 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
8591 {
8592         int i;
8593
8594         tw32(NVRAM_CMD, nvram_cmd);
8595         for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
8596                 udelay(10);
8597                 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
8598                         udelay(10);
8599                         break;
8600                 }
8601         }
8602         if (i == NVRAM_CMD_TIMEOUT) {
8603                 return -EBUSY;
8604         }
8605         return 0;
8606 }
8607
8608 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
8609 {
8610         int ret;
8611
8612         if (tp->tg3_flags2 & TG3_FLG2_SUN_570X) {
8613                 printk(KERN_ERR PFX "Attempt to do nvram_read on Sun 570X\n");
8614                 return -EINVAL;
8615         }
8616
8617         if (!(tp->tg3_flags & TG3_FLAG_NVRAM))
8618                 return tg3_nvram_read_using_eeprom(tp, offset, val);
8619
8620         if ((tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) &&
8621                 (tp->tg3_flags2 & TG3_FLG2_FLASH) &&
8622                 (tp->nvram_jedecnum == JEDEC_ATMEL)) {
8623
8624                 offset = ((offset / tp->nvram_pagesize) <<
8625                           ATMEL_AT45DB0X1B_PAGE_POS) +
8626                         (offset % tp->nvram_pagesize);
8627         }
8628
8629         if (offset > NVRAM_ADDR_MSK)
8630                 return -EINVAL;
8631
8632         tg3_nvram_lock(tp);
8633
8634         tg3_enable_nvram_access(tp);
8635
8636         tw32(NVRAM_ADDR, offset);
8637         ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
8638                 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
8639
8640         if (ret == 0)
8641                 *val = swab32(tr32(NVRAM_RDDATA));
8642
8643         tg3_nvram_unlock(tp);
8644
8645         tg3_disable_nvram_access(tp);
8646
8647         return ret;
8648 }
8649
8650 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
8651                                     u32 offset, u32 len, u8 *buf)
8652 {
8653         int i, j, rc = 0;
8654         u32 val;
8655
8656         for (i = 0; i < len; i += 4) {
8657                 u32 addr, data;
8658
8659                 addr = offset + i;
8660
8661                 memcpy(&data, buf + i, 4);
8662
8663                 tw32(GRC_EEPROM_DATA, cpu_to_le32(data));
8664
8665                 val = tr32(GRC_EEPROM_ADDR);
8666                 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
8667
8668                 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
8669                         EEPROM_ADDR_READ);
8670                 tw32(GRC_EEPROM_ADDR, val |
8671                         (0 << EEPROM_ADDR_DEVID_SHIFT) |
8672                         (addr & EEPROM_ADDR_ADDR_MASK) |
8673                         EEPROM_ADDR_START |
8674                         EEPROM_ADDR_WRITE);
8675                 
8676                 for (j = 0; j < 10000; j++) {
8677                         val = tr32(GRC_EEPROM_ADDR);
8678
8679                         if (val & EEPROM_ADDR_COMPLETE)
8680                                 break;
8681                         udelay(100);
8682                 }
8683                 if (!(val & EEPROM_ADDR_COMPLETE)) {
8684                         rc = -EBUSY;
8685                         break;
8686                 }
8687         }
8688
8689         return rc;
8690 }
8691
8692 /* offset and length are dword aligned */
8693 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
8694                 u8 *buf)
8695 {
8696         int ret = 0;
8697         u32 pagesize = tp->nvram_pagesize;
8698         u32 pagemask = pagesize - 1;
8699         u32 nvram_cmd;
8700         u8 *tmp;
8701
8702         tmp = kmalloc(pagesize, GFP_KERNEL);
8703         if (tmp == NULL)
8704                 return -ENOMEM;
8705
8706         while (len) {
8707                 int j;
8708                 u32 phy_addr, page_off, size;
8709
8710                 phy_addr = offset & ~pagemask;
8711         
8712                 for (j = 0; j < pagesize; j += 4) {
8713                         if ((ret = tg3_nvram_read(tp, phy_addr + j,
8714                                                 (u32 *) (tmp + j))))
8715                                 break;
8716                 }
8717                 if (ret)
8718                         break;
8719
8720                 page_off = offset & pagemask;
8721                 size = pagesize;
8722                 if (len < size)
8723                         size = len;
8724
8725                 len -= size;
8726
8727                 memcpy(tmp + page_off, buf, size);
8728
8729                 offset = offset + (pagesize - page_off);
8730
8731                 tg3_enable_nvram_access(tp);
8732
8733                 /*
8734                  * Before we can erase the flash page, we need
8735                  * to issue a special "write enable" command.
8736                  */
8737                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
8738
8739                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
8740                         break;
8741
8742                 /* Erase the target page */
8743                 tw32(NVRAM_ADDR, phy_addr);
8744
8745                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
8746                         NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
8747
8748                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
8749                         break;
8750
8751                 /* Issue another write enable to start the write. */
8752                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
8753
8754                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
8755                         break;
8756
8757                 for (j = 0; j < pagesize; j += 4) {
8758                         u32 data;
8759
8760                         data = *((u32 *) (tmp + j));
8761                         tw32(NVRAM_WRDATA, cpu_to_be32(data));
8762
8763                         tw32(NVRAM_ADDR, phy_addr + j);
8764
8765                         nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
8766                                 NVRAM_CMD_WR;
8767
8768                         if (j == 0)
8769                                 nvram_cmd |= NVRAM_CMD_FIRST;
8770                         else if (j == (pagesize - 4))
8771                                 nvram_cmd |= NVRAM_CMD_LAST;
8772
8773                         if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
8774                                 break;
8775                 }
8776                 if (ret)
8777                         break;
8778         }
8779
8780         nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
8781         tg3_nvram_exec_cmd(tp, nvram_cmd);
8782
8783         kfree(tmp);
8784
8785         return ret;
8786 }
8787
8788 /* offset and length are dword aligned */
8789 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
8790                 u8 *buf)
8791 {
8792         int i, ret = 0;
8793
8794         for (i = 0; i < len; i += 4, offset += 4) {
8795                 u32 data, page_off, phy_addr, nvram_cmd;
8796
8797                 memcpy(&data, buf + i, 4);
8798                 tw32(NVRAM_WRDATA, cpu_to_be32(data));
8799
8800                 page_off = offset % tp->nvram_pagesize;
8801
8802                 if ((tp->tg3_flags2 & TG3_FLG2_FLASH) &&
8803                         (tp->nvram_jedecnum == JEDEC_ATMEL)) {
8804
8805                         phy_addr = ((offset / tp->nvram_pagesize) <<
8806                                     ATMEL_AT45DB0X1B_PAGE_POS) + page_off;
8807                 }
8808                 else {
8809                         phy_addr = offset;
8810                 }
8811
8812                 tw32(NVRAM_ADDR, phy_addr);
8813
8814                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
8815
8816                 if ((page_off == 0) || (i == 0))
8817                         nvram_cmd |= NVRAM_CMD_FIRST;
8818                 else if (page_off == (tp->nvram_pagesize - 4))
8819                         nvram_cmd |= NVRAM_CMD_LAST;
8820
8821                 if (i == (len - 4))
8822                         nvram_cmd |= NVRAM_CMD_LAST;
8823
8824                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752) &&
8825                     (tp->nvram_jedecnum == JEDEC_ST) &&
8826                     (nvram_cmd & NVRAM_CMD_FIRST)) {
8827
8828                         if ((ret = tg3_nvram_exec_cmd(tp,
8829                                 NVRAM_CMD_WREN | NVRAM_CMD_GO |
8830                                 NVRAM_CMD_DONE)))
8831
8832                                 break;
8833                 }
8834                 if (!(tp->tg3_flags2 & TG3_FLG2_FLASH)) {
8835                         /* We always do complete word writes to eeprom. */
8836                         nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
8837                 }
8838
8839                 if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
8840                         break;
8841         }
8842         return ret;
8843 }
8844
8845 /* offset and length are dword aligned */
8846 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
8847 {
8848         int ret;
8849
8850         if (tp->tg3_flags2 & TG3_FLG2_SUN_570X) {
8851                 printk(KERN_ERR PFX "Attempt to do nvram_write on Sun 570X\n");
8852                 return -EINVAL;
8853         }
8854
8855         if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
8856                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
8857                        ~GRC_LCLCTRL_GPIO_OUTPUT1);
8858                 udelay(40);
8859         }
8860
8861         if (!(tp->tg3_flags & TG3_FLAG_NVRAM)) {
8862                 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
8863         }
8864         else {
8865                 u32 grc_mode;
8866
8867                 tg3_nvram_lock(tp);
8868
8869                 tg3_enable_nvram_access(tp);
8870                 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
8871                     !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM))
8872                         tw32(NVRAM_WRITE1, 0x406);
8873
8874                 grc_mode = tr32(GRC_MODE);
8875                 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
8876
8877                 if ((tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) ||
8878                         !(tp->tg3_flags2 & TG3_FLG2_FLASH)) {
8879
8880                         ret = tg3_nvram_write_block_buffered(tp, offset, len,
8881                                 buf);
8882                 }
8883                 else {
8884                         ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
8885                                 buf);
8886                 }
8887
8888                 grc_mode = tr32(GRC_MODE);
8889                 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
8890
8891                 tg3_disable_nvram_access(tp);
8892                 tg3_nvram_unlock(tp);
8893         }
8894
8895         if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
8896                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
8897                 udelay(40);
8898         }
8899
8900         return ret;
8901 }
8902
8903 struct subsys_tbl_ent {
8904         u16 subsys_vendor, subsys_devid;
8905         u32 phy_id;
8906 };
8907
8908 static struct subsys_tbl_ent subsys_id_to_phy_id[] = {
8909         /* Broadcom boards. */
8910         { PCI_VENDOR_ID_BROADCOM, 0x1644, PHY_ID_BCM5401 }, /* BCM95700A6 */
8911         { PCI_VENDOR_ID_BROADCOM, 0x0001, PHY_ID_BCM5701 }, /* BCM95701A5 */
8912         { PCI_VENDOR_ID_BROADCOM, 0x0002, PHY_ID_BCM8002 }, /* BCM95700T6 */
8913         { PCI_VENDOR_ID_BROADCOM, 0x0003, 0 },              /* BCM95700A9 */
8914         { PCI_VENDOR_ID_BROADCOM, 0x0005, PHY_ID_BCM5701 }, /* BCM95701T1 */
8915         { PCI_VENDOR_ID_BROADCOM, 0x0006, PHY_ID_BCM5701 }, /* BCM95701T8 */
8916         { PCI_VENDOR_ID_BROADCOM, 0x0007, 0 },              /* BCM95701A7 */
8917         { PCI_VENDOR_ID_BROADCOM, 0x0008, PHY_ID_BCM5701 }, /* BCM95701A10 */
8918         { PCI_VENDOR_ID_BROADCOM, 0x8008, PHY_ID_BCM5701 }, /* BCM95701A12 */
8919         { PCI_VENDOR_ID_BROADCOM, 0x0009, PHY_ID_BCM5703 }, /* BCM95703Ax1 */
8920         { PCI_VENDOR_ID_BROADCOM, 0x8009, PHY_ID_BCM5703 }, /* BCM95703Ax2 */
8921
8922         /* 3com boards. */
8923         { PCI_VENDOR_ID_3COM, 0x1000, PHY_ID_BCM5401 }, /* 3C996T */
8924         { PCI_VENDOR_ID_3COM, 0x1006, PHY_ID_BCM5701 }, /* 3C996BT */
8925         { PCI_VENDOR_ID_3COM, 0x1004, 0 },              /* 3C996SX */
8926         { PCI_VENDOR_ID_3COM, 0x1007, PHY_ID_BCM5701 }, /* 3C1000T */
8927         { PCI_VENDOR_ID_3COM, 0x1008, PHY_ID_BCM5701 }, /* 3C940BR01 */
8928
8929         /* DELL boards. */
8930         { PCI_VENDOR_ID_DELL, 0x00d1, PHY_ID_BCM5401 }, /* VIPER */
8931         { PCI_VENDOR_ID_DELL, 0x0106, PHY_ID_BCM5401 }, /* JAGUAR */
8932         { PCI_VENDOR_ID_DELL, 0x0109, PHY_ID_BCM5411 }, /* MERLOT */
8933         { PCI_VENDOR_ID_DELL, 0x010a, PHY_ID_BCM5411 }, /* SLIM_MERLOT */
8934
8935         /* Compaq boards. */
8936         { PCI_VENDOR_ID_COMPAQ, 0x007c, PHY_ID_BCM5701 }, /* BANSHEE */
8937         { PCI_VENDOR_ID_COMPAQ, 0x009a, PHY_ID_BCM5701 }, /* BANSHEE_2 */
8938         { PCI_VENDOR_ID_COMPAQ, 0x007d, 0 },              /* CHANGELING */
8939         { PCI_VENDOR_ID_COMPAQ, 0x0085, PHY_ID_BCM5701 }, /* NC7780 */
8940         { PCI_VENDOR_ID_COMPAQ, 0x0099, PHY_ID_BCM5701 }, /* NC7780_2 */
8941
8942         /* IBM boards. */
8943         { PCI_VENDOR_ID_IBM, 0x0281, 0 } /* IBM??? */
8944 };
8945
8946 static inline struct subsys_tbl_ent *lookup_by_subsys(struct tg3 *tp)
8947 {
8948         int i;
8949
8950         for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
8951                 if ((subsys_id_to_phy_id[i].subsys_vendor ==
8952                      tp->pdev->subsystem_vendor) &&
8953                     (subsys_id_to_phy_id[i].subsys_devid ==
8954                      tp->pdev->subsystem_device))
8955                         return &subsys_id_to_phy_id[i];
8956         }
8957         return NULL;
8958 }
8959
8960 /* Since this function may be called in D3-hot power state during
8961  * tg3_init_one(), only config cycles are allowed.
8962  */
8963 static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
8964 {
8965         u32 val;
8966
8967         /* Make sure register accesses (indirect or otherwise)
8968          * will function correctly.
8969          */
8970         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
8971                                tp->misc_host_ctrl);
8972
8973         tp->phy_id = PHY_ID_INVALID;
8974         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
8975
8976         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
8977         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
8978                 u32 nic_cfg, led_cfg;
8979                 u32 nic_phy_id, ver, cfg2 = 0, eeprom_phy_id;
8980                 int eeprom_phy_serdes = 0;
8981
8982                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
8983                 tp->nic_sram_data_cfg = nic_cfg;
8984
8985                 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
8986                 ver >>= NIC_SRAM_DATA_VER_SHIFT;
8987                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700) &&
8988                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) &&
8989                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5703) &&
8990                     (ver > 0) && (ver < 0x100))
8991                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
8992
8993                 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
8994                     NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
8995                         eeprom_phy_serdes = 1;
8996
8997                 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
8998                 if (nic_phy_id != 0) {
8999                         u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
9000                         u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
9001
9002                         eeprom_phy_id  = (id1 >> 16) << 10;
9003                         eeprom_phy_id |= (id2 & 0xfc00) << 16;
9004                         eeprom_phy_id |= (id2 & 0x03ff) <<  0;
9005                 } else
9006                         eeprom_phy_id = 0;
9007
9008                 tp->phy_id = eeprom_phy_id;
9009                 if (eeprom_phy_serdes) {
9010                         if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
9011                                 tp->tg3_flags2 |= TG3_FLG2_MII_SERDES;
9012                         else
9013                                 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
9014                 }
9015
9016                 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
9017                         led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
9018                                     SHASTA_EXT_LED_MODE_MASK);
9019                 else
9020                         led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
9021
9022                 switch (led_cfg) {
9023                 default:
9024                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
9025                         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
9026                         break;
9027
9028                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
9029                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
9030                         break;
9031
9032                 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
9033                         tp->led_ctrl = LED_CTRL_MODE_MAC;
9034
9035                         /* Default to PHY_1_MODE if 0 (MAC_MODE) is
9036                          * read on some older 5700/5701 bootcode.
9037                          */
9038                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
9039                             ASIC_REV_5700 ||
9040                             GET_ASIC_REV(tp->pci_chip_rev_id) ==
9041                             ASIC_REV_5701)
9042                                 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
9043
9044                         break;
9045
9046                 case SHASTA_EXT_LED_SHARED:
9047                         tp->led_ctrl = LED_CTRL_MODE_SHARED;
9048                         if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
9049                             tp->pci_chip_rev_id != CHIPREV_ID_5750_A1)
9050                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
9051                                                  LED_CTRL_MODE_PHY_2);
9052                         break;
9053
9054                 case SHASTA_EXT_LED_MAC:
9055                         tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
9056                         break;
9057
9058                 case SHASTA_EXT_LED_COMBO:
9059                         tp->led_ctrl = LED_CTRL_MODE_COMBO;
9060                         if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0)
9061                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
9062                                                  LED_CTRL_MODE_PHY_2);
9063                         break;
9064
9065                 };
9066
9067                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
9068                      GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) &&
9069                     tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
9070                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
9071
9072                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700) &&
9073                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) &&
9074                     (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP))
9075                         tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT;
9076
9077                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
9078                         tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
9079                         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
9080                                 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
9081                 }
9082                 if (nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL)
9083                         tp->tg3_flags |= TG3_FLAG_SERDES_WOL_CAP;
9084
9085                 if (cfg2 & (1 << 17))
9086                         tp->tg3_flags2 |= TG3_FLG2_CAPACITIVE_COUPLING;
9087
9088                 /* serdes signal pre-emphasis in register 0x590 set by */
9089                 /* bootcode if bit 18 is set */
9090                 if (cfg2 & (1 << 18))
9091                         tp->tg3_flags2 |= TG3_FLG2_SERDES_PREEMPHASIS;
9092         }
9093 }
9094
9095 static int __devinit tg3_phy_probe(struct tg3 *tp)
9096 {
9097         u32 hw_phy_id_1, hw_phy_id_2;
9098         u32 hw_phy_id, hw_phy_id_masked;
9099         int err;
9100
9101         /* Reading the PHY ID register can conflict with ASF
9102          * firwmare access to the PHY hardware.
9103          */
9104         err = 0;
9105         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
9106                 hw_phy_id = hw_phy_id_masked = PHY_ID_INVALID;
9107         } else {
9108                 /* Now read the physical PHY_ID from the chip and verify
9109                  * that it is sane.  If it doesn't look good, we fall back
9110                  * to either the hard-coded table based PHY_ID and failing
9111                  * that the value found in the eeprom area.
9112                  */
9113                 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
9114                 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
9115
9116                 hw_phy_id  = (hw_phy_id_1 & 0xffff) << 10;
9117                 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
9118                 hw_phy_id |= (hw_phy_id_2 & 0x03ff) <<  0;
9119
9120                 hw_phy_id_masked = hw_phy_id & PHY_ID_MASK;
9121         }
9122
9123         if (!err && KNOWN_PHY_ID(hw_phy_id_masked)) {
9124                 tp->phy_id = hw_phy_id;
9125                 if (hw_phy_id_masked == PHY_ID_BCM8002)
9126                         tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
9127                 else
9128                         tp->tg3_flags2 &= ~TG3_FLG2_PHY_SERDES;
9129         } else {
9130                 if (tp->phy_id != PHY_ID_INVALID) {
9131                         /* Do nothing, phy ID already set up in
9132                          * tg3_get_eeprom_hw_cfg().
9133                          */
9134                 } else {
9135                         struct subsys_tbl_ent *p;
9136
9137                         /* No eeprom signature?  Try the hardcoded
9138                          * subsys device table.
9139                          */
9140                         p = lookup_by_subsys(tp);
9141                         if (!p)
9142                                 return -ENODEV;
9143
9144                         tp->phy_id = p->phy_id;
9145                         if (!tp->phy_id ||
9146                             tp->phy_id == PHY_ID_BCM8002)
9147                                 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
9148                 }
9149         }
9150
9151         if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) &&
9152             !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
9153                 u32 bmsr, adv_reg, tg3_ctrl;
9154
9155                 tg3_readphy(tp, MII_BMSR, &bmsr);
9156                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
9157                     (bmsr & BMSR_LSTATUS))
9158                         goto skip_phy_reset;
9159                     
9160                 err = tg3_phy_reset(tp);
9161                 if (err)
9162                         return err;
9163
9164                 adv_reg = (ADVERTISE_10HALF | ADVERTISE_10FULL |
9165                            ADVERTISE_100HALF | ADVERTISE_100FULL |
9166                            ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
9167                 tg3_ctrl = 0;
9168                 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
9169                         tg3_ctrl = (MII_TG3_CTRL_ADV_1000_HALF |
9170                                     MII_TG3_CTRL_ADV_1000_FULL);
9171                         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
9172                             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
9173                                 tg3_ctrl |= (MII_TG3_CTRL_AS_MASTER |
9174                                              MII_TG3_CTRL_ENABLE_AS_MASTER);
9175                 }
9176
9177                 if (!tg3_copper_is_advertising_all(tp)) {
9178                         tg3_writephy(tp, MII_ADVERTISE, adv_reg);
9179
9180                         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
9181                                 tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
9182
9183                         tg3_writephy(tp, MII_BMCR,
9184                                      BMCR_ANENABLE | BMCR_ANRESTART);
9185                 }
9186                 tg3_phy_set_wirespeed(tp);
9187
9188                 tg3_writephy(tp, MII_ADVERTISE, adv_reg);
9189                 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
9190                         tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
9191         }
9192
9193 skip_phy_reset:
9194         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
9195                 err = tg3_init_5401phy_dsp(tp);
9196                 if (err)
9197                         return err;
9198         }
9199
9200         if (!err && ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401)) {
9201                 err = tg3_init_5401phy_dsp(tp);
9202         }
9203
9204         if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
9205                 tp->link_config.advertising =
9206                         (ADVERTISED_1000baseT_Half |
9207                          ADVERTISED_1000baseT_Full |
9208                          ADVERTISED_Autoneg |
9209                          ADVERTISED_FIBRE);
9210         if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
9211                 tp->link_config.advertising &=
9212                         ~(ADVERTISED_1000baseT_Half |
9213                           ADVERTISED_1000baseT_Full);
9214
9215         return err;
9216 }
9217
9218 static void __devinit tg3_read_partno(struct tg3 *tp)
9219 {
9220         unsigned char vpd_data[256];
9221         int i;
9222
9223         if (tp->tg3_flags2 & TG3_FLG2_SUN_570X) {
9224                 /* Sun decided not to put the necessary bits in the
9225                  * NVRAM of their onboard tg3 parts :(
9226                  */
9227                 strcpy(tp->board_part_number, "Sun 570X");
9228                 return;
9229         }
9230
9231         for (i = 0; i < 256; i += 4) {
9232                 u32 tmp;
9233
9234                 if (tg3_nvram_read(tp, 0x100 + i, &tmp))
9235                         goto out_not_found;
9236
9237                 vpd_data[i + 0] = ((tmp >>  0) & 0xff);
9238                 vpd_data[i + 1] = ((tmp >>  8) & 0xff);
9239                 vpd_data[i + 2] = ((tmp >> 16) & 0xff);
9240                 vpd_data[i + 3] = ((tmp >> 24) & 0xff);
9241         }
9242
9243         /* Now parse and find the part number. */
9244         for (i = 0; i < 256; ) {
9245                 unsigned char val = vpd_data[i];
9246                 int block_end;
9247
9248                 if (val == 0x82 || val == 0x91) {
9249                         i = (i + 3 +
9250                              (vpd_data[i + 1] +
9251                               (vpd_data[i + 2] << 8)));
9252                         continue;
9253                 }
9254
9255                 if (val != 0x90)
9256                         goto out_not_found;
9257
9258                 block_end = (i + 3 +
9259                              (vpd_data[i + 1] +
9260                               (vpd_data[i + 2] << 8)));
9261                 i += 3;
9262                 while (i < block_end) {
9263                         if (vpd_data[i + 0] == 'P' &&
9264                             vpd_data[i + 1] == 'N') {
9265                                 int partno_len = vpd_data[i + 2];
9266
9267                                 if (partno_len > 24)
9268                                         goto out_not_found;
9269
9270                                 memcpy(tp->board_part_number,
9271                                        &vpd_data[i + 3],
9272                                        partno_len);
9273
9274                                 /* Success. */
9275                                 return;
9276                         }
9277                 }
9278
9279                 /* Part number not found. */
9280                 goto out_not_found;
9281         }
9282
9283 out_not_found:
9284         strcpy(tp->board_part_number, "none");
9285 }
9286
9287 #ifdef CONFIG_SPARC64
9288 static int __devinit tg3_is_sun_570X(struct tg3 *tp)
9289 {
9290         struct pci_dev *pdev = tp->pdev;
9291         struct pcidev_cookie *pcp = pdev->sysdata;
9292
9293         if (pcp != NULL) {
9294                 int node = pcp->prom_node;
9295                 u32 venid;
9296                 int err;
9297
9298                 err = prom_getproperty(node, "subsystem-vendor-id",
9299                                        (char *) &venid, sizeof(venid));
9300                 if (err == 0 || err == -1)
9301                         return 0;
9302                 if (venid == PCI_VENDOR_ID_SUN)
9303                         return 1;
9304         }
9305         return 0;
9306 }
9307 #endif
9308
9309 static int __devinit tg3_get_invariants(struct tg3 *tp)
9310 {
9311         static struct pci_device_id write_reorder_chipsets[] = {
9312                 { PCI_DEVICE(PCI_VENDOR_ID_AMD,
9313                              PCI_DEVICE_ID_AMD_FE_GATE_700C) },
9314                 { PCI_DEVICE(PCI_VENDOR_ID_VIA,
9315                              PCI_DEVICE_ID_VIA_8385_0) },
9316                 { },
9317         };
9318         u32 misc_ctrl_reg;
9319         u32 cacheline_sz_reg;
9320         u32 pci_state_reg, grc_misc_cfg;
9321         u32 val;
9322         u16 pci_cmd;
9323         int err;
9324
9325 #ifdef CONFIG_SPARC64
9326         if (tg3_is_sun_570X(tp))
9327                 tp->tg3_flags2 |= TG3_FLG2_SUN_570X;
9328 #endif
9329
9330         /* Force memory write invalidate off.  If we leave it on,
9331          * then on 5700_BX chips we have to enable a workaround.
9332          * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
9333          * to match the cacheline size.  The Broadcom driver have this
9334          * workaround but turns MWI off all the times so never uses
9335          * it.  This seems to suggest that the workaround is insufficient.
9336          */
9337         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
9338         pci_cmd &= ~PCI_COMMAND_INVALIDATE;
9339         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
9340
9341         /* It is absolutely critical that TG3PCI_MISC_HOST_CTRL
9342          * has the register indirect write enable bit set before
9343          * we try to access any of the MMIO registers.  It is also
9344          * critical that the PCI-X hw workaround situation is decided
9345          * before that as well.
9346          */
9347         pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
9348                               &misc_ctrl_reg);
9349
9350         tp->pci_chip_rev_id = (misc_ctrl_reg >>
9351                                MISC_HOST_CTRL_CHIPREV_SHIFT);
9352
9353         /* Wrong chip ID in 5752 A0. This code can be removed later
9354          * as A0 is not in production.
9355          */
9356         if (tp->pci_chip_rev_id == CHIPREV_ID_5752_A0_HW)
9357                 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
9358
9359         /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
9360          * we need to disable memory and use config. cycles
9361          * only to access all registers. The 5702/03 chips
9362          * can mistakenly decode the special cycles from the
9363          * ICH chipsets as memory write cycles, causing corruption
9364          * of register and memory space. Only certain ICH bridges
9365          * will drive special cycles with non-zero data during the
9366          * address phase which can fall within the 5703's address
9367          * range. This is not an ICH bug as the PCI spec allows
9368          * non-zero address during special cycles. However, only
9369          * these ICH bridges are known to drive non-zero addresses
9370          * during special cycles.
9371          *
9372          * Since special cycles do not cross PCI bridges, we only
9373          * enable this workaround if the 5703 is on the secondary
9374          * bus of these ICH bridges.
9375          */
9376         if ((tp->pci_chip_rev_id == CHIPREV_ID_5703_A1) ||
9377             (tp->pci_chip_rev_id == CHIPREV_ID_5703_A2)) {
9378                 static struct tg3_dev_id {
9379                         u32     vendor;
9380                         u32     device;
9381                         u32     rev;
9382                 } ich_chipsets[] = {
9383                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
9384                           PCI_ANY_ID },
9385                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
9386                           PCI_ANY_ID },
9387                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
9388                           0xa },
9389                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
9390                           PCI_ANY_ID },
9391                         { },
9392                 };
9393                 struct tg3_dev_id *pci_id = &ich_chipsets[0];
9394                 struct pci_dev *bridge = NULL;
9395
9396                 while (pci_id->vendor != 0) {
9397                         bridge = pci_get_device(pci_id->vendor, pci_id->device,
9398                                                 bridge);
9399                         if (!bridge) {
9400                                 pci_id++;
9401                                 continue;
9402                         }
9403                         if (pci_id->rev != PCI_ANY_ID) {
9404                                 u8 rev;
9405
9406                                 pci_read_config_byte(bridge, PCI_REVISION_ID,
9407                                                      &rev);
9408                                 if (rev > pci_id->rev)
9409                                         continue;
9410                         }
9411                         if (bridge->subordinate &&
9412                             (bridge->subordinate->number ==
9413                              tp->pdev->bus->number)) {
9414
9415                                 tp->tg3_flags2 |= TG3_FLG2_ICH_WORKAROUND;
9416                                 pci_dev_put(bridge);
9417                                 break;
9418                         }
9419                 }
9420         }
9421
9422         /* Find msi capability. */
9423         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 ||
9424             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
9425                 tp->tg3_flags2 |= TG3_FLG2_5780_CLASS;
9426                 tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI);
9427         }
9428
9429         /* Initialize misc host control in PCI block. */
9430         tp->misc_host_ctrl |= (misc_ctrl_reg &
9431                                MISC_HOST_CTRL_CHIPREV);
9432         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
9433                                tp->misc_host_ctrl);
9434
9435         pci_read_config_dword(tp->pdev, TG3PCI_CACHELINESZ,
9436                               &cacheline_sz_reg);
9437
9438         tp->pci_cacheline_sz = (cacheline_sz_reg >>  0) & 0xff;
9439         tp->pci_lat_timer    = (cacheline_sz_reg >>  8) & 0xff;
9440         tp->pci_hdr_type     = (cacheline_sz_reg >> 16) & 0xff;
9441         tp->pci_bist         = (cacheline_sz_reg >> 24) & 0xff;
9442
9443         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
9444             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
9445             (tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
9446                 tp->tg3_flags2 |= TG3_FLG2_5750_PLUS;
9447
9448         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) ||
9449             (tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
9450                 tp->tg3_flags2 |= TG3_FLG2_5705_PLUS;
9451
9452         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
9453                 tp->tg3_flags2 |= TG3_FLG2_HW_TSO;
9454
9455         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705 &&
9456             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5750 &&
9457             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752)
9458                 tp->tg3_flags2 |= TG3_FLG2_JUMBO_CAPABLE;
9459
9460         if (pci_find_capability(tp->pdev, PCI_CAP_ID_EXP) != 0)
9461                 tp->tg3_flags2 |= TG3_FLG2_PCI_EXPRESS;
9462
9463         /* If we have an AMD 762 or VIA K8T800 chipset, write
9464          * reordering to the mailbox registers done by the host
9465          * controller can cause major troubles.  We read back from
9466          * every mailbox register write to force the writes to be
9467          * posted to the chip in order.
9468          */
9469         if (pci_dev_present(write_reorder_chipsets) &&
9470             !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
9471                 tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;
9472
9473         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
9474             tp->pci_lat_timer < 64) {
9475                 tp->pci_lat_timer = 64;
9476
9477                 cacheline_sz_reg  = ((tp->pci_cacheline_sz & 0xff) <<  0);
9478                 cacheline_sz_reg |= ((tp->pci_lat_timer    & 0xff) <<  8);
9479                 cacheline_sz_reg |= ((tp->pci_hdr_type     & 0xff) << 16);
9480                 cacheline_sz_reg |= ((tp->pci_bist         & 0xff) << 24);
9481
9482                 pci_write_config_dword(tp->pdev, TG3PCI_CACHELINESZ,
9483                                        cacheline_sz_reg);
9484         }
9485
9486         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
9487                               &pci_state_reg);
9488
9489         if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0) {
9490                 tp->tg3_flags |= TG3_FLAG_PCIX_MODE;
9491
9492                 /* If this is a 5700 BX chipset, and we are in PCI-X
9493                  * mode, enable register write workaround.
9494                  *
9495                  * The workaround is to use indirect register accesses
9496                  * for all chip writes not to mailbox registers.
9497                  */
9498                 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX) {
9499                         u32 pm_reg;
9500                         u16 pci_cmd;
9501
9502                         tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
9503
9504                         /* The chip can have it's power management PCI config
9505                          * space registers clobbered due to this bug.
9506                          * So explicitly force the chip into D0 here.
9507                          */
9508                         pci_read_config_dword(tp->pdev, TG3PCI_PM_CTRL_STAT,
9509                                               &pm_reg);
9510                         pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
9511                         pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
9512                         pci_write_config_dword(tp->pdev, TG3PCI_PM_CTRL_STAT,
9513                                                pm_reg);
9514
9515                         /* Also, force SERR#/PERR# in PCI command. */
9516                         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
9517                         pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
9518                         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
9519                 }
9520         }
9521
9522         /* 5700 BX chips need to have their TX producer index mailboxes
9523          * written twice to workaround a bug.
9524          */
9525         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX)
9526                 tp->tg3_flags |= TG3_FLAG_TXD_MBOX_HWBUG;
9527
9528         /* Back to back register writes can cause problems on this chip,
9529          * the workaround is to read back all reg writes except those to
9530          * mailbox regs.  See tg3_write_indirect_reg32().
9531          *
9532          * PCI Express 5750_A0 rev chips need this workaround too.
9533          */
9534         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
9535             ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
9536              tp->pci_chip_rev_id == CHIPREV_ID_5750_A0))
9537                 tp->tg3_flags |= TG3_FLAG_5701_REG_WRITE_BUG;
9538
9539         if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
9540                 tp->tg3_flags |= TG3_FLAG_PCI_HIGH_SPEED;
9541         if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
9542                 tp->tg3_flags |= TG3_FLAG_PCI_32BIT;
9543
9544         /* Chip-specific fixup from Broadcom driver */
9545         if ((tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) &&
9546             (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
9547                 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
9548                 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
9549         }
9550
9551         /* Default fast path register access methods */
9552         tp->read32 = tg3_read32;
9553         tp->write32 = tg3_write32;
9554         tp->read32_mbox = tg3_read32;
9555         tp->write32_mbox = tg3_write32;
9556         tp->write32_tx_mbox = tg3_write32;
9557         tp->write32_rx_mbox = tg3_write32;
9558
9559         /* Various workaround register access methods */
9560         if (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG)
9561                 tp->write32 = tg3_write_indirect_reg32;
9562         else if (tp->tg3_flags & TG3_FLAG_5701_REG_WRITE_BUG)
9563                 tp->write32 = tg3_write_flush_reg32;
9564
9565         if ((tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG) ||
9566             (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)) {
9567                 tp->write32_tx_mbox = tg3_write32_tx_mbox;
9568                 if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
9569                         tp->write32_rx_mbox = tg3_write_flush_reg32;
9570         }
9571
9572         if (tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND) {
9573                 tp->read32 = tg3_read_indirect_reg32;
9574                 tp->write32 = tg3_write_indirect_reg32;
9575                 tp->read32_mbox = tg3_read_indirect_mbox;
9576                 tp->write32_mbox = tg3_write_indirect_mbox;
9577                 tp->write32_tx_mbox = tg3_write_indirect_mbox;
9578                 tp->write32_rx_mbox = tg3_write_indirect_mbox;
9579
9580                 iounmap(tp->regs);
9581                 tp->regs = NULL;
9582
9583                 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
9584                 pci_cmd &= ~PCI_COMMAND_MEMORY;
9585                 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
9586         }
9587
9588         /* Get eeprom hw config before calling tg3_set_power_state().
9589          * In particular, the TG3_FLAG_EEPROM_WRITE_PROT flag must be
9590          * determined before calling tg3_set_power_state() so that
9591          * we know whether or not to switch out of Vaux power.
9592          * When the flag is set, it means that GPIO1 is used for eeprom
9593          * write protect and also implies that it is a LOM where GPIOs
9594          * are not used to switch power.
9595          */ 
9596         tg3_get_eeprom_hw_cfg(tp);
9597
9598         /* Set up tp->grc_local_ctrl before calling tg3_set_power_state().
9599          * GPIO1 driven high will bring 5700's external PHY out of reset.
9600          * It is also used as eeprom write protect on LOMs.
9601          */
9602         tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
9603         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
9604             (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT))
9605                 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
9606                                        GRC_LCLCTRL_GPIO_OUTPUT1);
9607         /* Unused GPIO3 must be driven as output on 5752 because there
9608          * are no pull-up resistors on unused GPIO pins.
9609          */
9610         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
9611                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
9612
9613         /* Force the chip into D0. */
9614         err = tg3_set_power_state(tp, 0);
9615         if (err) {
9616                 printk(KERN_ERR PFX "(%s) transition to D0 failed\n",
9617                        pci_name(tp->pdev));
9618                 return err;
9619         }
9620
9621         /* 5700 B0 chips do not support checksumming correctly due
9622          * to hardware bugs.
9623          */
9624         if (tp->pci_chip_rev_id == CHIPREV_ID_5700_B0)
9625                 tp->tg3_flags |= TG3_FLAG_BROKEN_CHECKSUMS;
9626
9627         /* Pseudo-header checksum is done by hardware logic and not
9628          * the offload processers, so make the chip do the pseudo-
9629          * header checksums on receive.  For transmit it is more
9630          * convenient to do the pseudo-header checksum in software
9631          * as Linux does that on transmit for us in all cases.
9632          */
9633         tp->tg3_flags |= TG3_FLAG_NO_TX_PSEUDO_CSUM;
9634         tp->tg3_flags &= ~TG3_FLAG_NO_RX_PSEUDO_CSUM;
9635
9636         /* Derive initial jumbo mode from MTU assigned in
9637          * ether_setup() via the alloc_etherdev() call
9638          */
9639         if (tp->dev->mtu > ETH_DATA_LEN &&
9640             !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
9641                 tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE;
9642
9643         /* Determine WakeOnLan speed to use. */
9644         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
9645             tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
9646             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0 ||
9647             tp->pci_chip_rev_id == CHIPREV_ID_5701_B2) {
9648                 tp->tg3_flags &= ~(TG3_FLAG_WOL_SPEED_100MB);
9649         } else {
9650                 tp->tg3_flags |= TG3_FLAG_WOL_SPEED_100MB;
9651         }
9652
9653         /* A few boards don't want Ethernet@WireSpeed phy feature */
9654         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
9655             ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
9656              (tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) &&
9657              (tp->pci_chip_rev_id != CHIPREV_ID_5705_A1)) ||
9658             (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES))
9659                 tp->tg3_flags2 |= TG3_FLG2_NO_ETH_WIRE_SPEED;
9660
9661         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5703_AX ||
9662             GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_AX)
9663                 tp->tg3_flags2 |= TG3_FLG2_PHY_ADC_BUG;
9664         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0)
9665                 tp->tg3_flags2 |= TG3_FLG2_PHY_5704_A0_BUG;
9666
9667         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
9668                 tp->tg3_flags2 |= TG3_FLG2_PHY_BER_BUG;
9669
9670         tp->coalesce_mode = 0;
9671         if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX &&
9672             GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX)
9673                 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
9674
9675         /* Initialize MAC MI mode, polling disabled. */
9676         tw32_f(MAC_MI_MODE, tp->mi_mode);
9677         udelay(80);
9678
9679         /* Initialize data/descriptor byte/word swapping. */
9680         val = tr32(GRC_MODE);
9681         val &= GRC_MODE_HOST_STACKUP;
9682         tw32(GRC_MODE, val | tp->grc_mode);
9683
9684         tg3_switch_clocks(tp);
9685
9686         /* Clear this out for sanity. */
9687         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
9688
9689         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
9690                               &pci_state_reg);
9691         if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
9692             (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) == 0) {
9693                 u32 chiprevid = GET_CHIP_REV_ID(tp->misc_host_ctrl);
9694
9695                 if (chiprevid == CHIPREV_ID_5701_A0 ||
9696                     chiprevid == CHIPREV_ID_5701_B0 ||
9697                     chiprevid == CHIPREV_ID_5701_B2 ||
9698                     chiprevid == CHIPREV_ID_5701_B5) {
9699                         void __iomem *sram_base;
9700
9701                         /* Write some dummy words into the SRAM status block
9702                          * area, see if it reads back correctly.  If the return
9703                          * value is bad, force enable the PCIX workaround.
9704                          */
9705                         sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
9706
9707                         writel(0x00000000, sram_base);
9708                         writel(0x00000000, sram_base + 4);
9709                         writel(0xffffffff, sram_base + 4);
9710                         if (readl(sram_base) != 0x00000000)
9711                                 tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
9712                 }
9713         }
9714
9715         udelay(50);
9716         tg3_nvram_init(tp);
9717
9718         grc_misc_cfg = tr32(GRC_MISC_CFG);
9719         grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
9720
9721         /* Broadcom's driver says that CIOBE multisplit has a bug */
9722 #if 0
9723         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
9724             grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5704CIOBE) {
9725                 tp->tg3_flags |= TG3_FLAG_SPLIT_MODE;
9726                 tp->split_mode_max_reqs = SPLIT_MODE_5704_MAX_REQ;
9727         }
9728 #endif
9729         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
9730             (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
9731              grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
9732                 tp->tg3_flags2 |= TG3_FLG2_IS_5788;
9733
9734         if (!(tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
9735             (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700))
9736                 tp->tg3_flags |= TG3_FLAG_TAGGED_STATUS;
9737         if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) {
9738                 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
9739                                       HOSTCC_MODE_CLRTICK_TXBD);
9740
9741                 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
9742                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
9743                                        tp->misc_host_ctrl);
9744         }
9745
9746         /* these are limited to 10/100 only */
9747         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
9748              (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
9749             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
9750              tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
9751              (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901 ||
9752               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901_2 ||
9753               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5705F)) ||
9754             (tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
9755              (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5751F ||
9756               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5753F)))
9757                 tp->tg3_flags |= TG3_FLAG_10_100_ONLY;
9758
9759         err = tg3_phy_probe(tp);
9760         if (err) {
9761                 printk(KERN_ERR PFX "(%s) phy probe failed, err %d\n",
9762                        pci_name(tp->pdev), err);
9763                 /* ... but do not return immediately ... */
9764         }
9765
9766         tg3_read_partno(tp);
9767
9768         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
9769                 tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
9770         } else {
9771                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
9772                         tp->tg3_flags |= TG3_FLAG_USE_MI_INTERRUPT;
9773                 else
9774                         tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
9775         }
9776
9777         /* 5700 {AX,BX} chips have a broken status block link
9778          * change bit implementation, so we must use the
9779          * status register in those cases.
9780          */
9781         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
9782                 tp->tg3_flags |= TG3_FLAG_USE_LINKCHG_REG;
9783         else
9784                 tp->tg3_flags &= ~TG3_FLAG_USE_LINKCHG_REG;
9785
9786         /* The led_ctrl is set during tg3_phy_probe, here we might
9787          * have to force the link status polling mechanism based
9788          * upon subsystem IDs.
9789          */
9790         if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
9791             !(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
9792                 tp->tg3_flags |= (TG3_FLAG_USE_MI_INTERRUPT |
9793                                   TG3_FLAG_USE_LINKCHG_REG);
9794         }
9795
9796         /* For all SERDES we poll the MAC status register. */
9797         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
9798                 tp->tg3_flags |= TG3_FLAG_POLL_SERDES;
9799         else
9800                 tp->tg3_flags &= ~TG3_FLAG_POLL_SERDES;
9801
9802         /* It seems all chips can get confused if TX buffers
9803          * straddle the 4GB address boundary in some cases.
9804          */
9805         tp->dev->hard_start_xmit = tg3_start_xmit;
9806
9807         tp->rx_offset = 2;
9808         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
9809             (tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0)
9810                 tp->rx_offset = 0;
9811
9812         /* By default, disable wake-on-lan.  User can change this
9813          * using ETHTOOL_SWOL.
9814          */
9815         tp->tg3_flags &= ~TG3_FLAG_WOL_ENABLE;
9816
9817         return err;
9818 }
9819
9820 #ifdef CONFIG_SPARC64
9821 static int __devinit tg3_get_macaddr_sparc(struct tg3 *tp)
9822 {
9823         struct net_device *dev = tp->dev;
9824         struct pci_dev *pdev = tp->pdev;
9825         struct pcidev_cookie *pcp = pdev->sysdata;
9826
9827         if (pcp != NULL) {
9828                 int node = pcp->prom_node;
9829
9830                 if (prom_getproplen(node, "local-mac-address") == 6) {
9831                         prom_getproperty(node, "local-mac-address",
9832                                          dev->dev_addr, 6);
9833                         memcpy(dev->perm_addr, dev->dev_addr, 6);
9834                         return 0;
9835                 }
9836         }
9837         return -ENODEV;
9838 }
9839
9840 static int __devinit tg3_get_default_macaddr_sparc(struct tg3 *tp)
9841 {
9842         struct net_device *dev = tp->dev;
9843
9844         memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
9845         memcpy(dev->perm_addr, idprom->id_ethaddr, 6);
9846         return 0;
9847 }
9848 #endif
9849
9850 static int __devinit tg3_get_device_address(struct tg3 *tp)
9851 {
9852         struct net_device *dev = tp->dev;
9853         u32 hi, lo, mac_offset;
9854
9855 #ifdef CONFIG_SPARC64
9856         if (!tg3_get_macaddr_sparc(tp))
9857                 return 0;
9858 #endif
9859
9860         mac_offset = 0x7c;
9861         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
9862              !(tp->tg3_flags & TG3_FLG2_SUN_570X)) ||
9863             (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
9864                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
9865                         mac_offset = 0xcc;
9866                 if (tg3_nvram_lock(tp))
9867                         tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
9868                 else
9869                         tg3_nvram_unlock(tp);
9870         }
9871
9872         /* First try to get it from MAC address mailbox. */
9873         tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
9874         if ((hi >> 16) == 0x484b) {
9875                 dev->dev_addr[0] = (hi >>  8) & 0xff;
9876                 dev->dev_addr[1] = (hi >>  0) & 0xff;
9877
9878                 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
9879                 dev->dev_addr[2] = (lo >> 24) & 0xff;
9880                 dev->dev_addr[3] = (lo >> 16) & 0xff;
9881                 dev->dev_addr[4] = (lo >>  8) & 0xff;
9882                 dev->dev_addr[5] = (lo >>  0) & 0xff;
9883         }
9884         /* Next, try NVRAM. */
9885         else if (!(tp->tg3_flags & TG3_FLG2_SUN_570X) &&
9886                  !tg3_nvram_read(tp, mac_offset + 0, &hi) &&
9887                  !tg3_nvram_read(tp, mac_offset + 4, &lo)) {
9888                 dev->dev_addr[0] = ((hi >> 16) & 0xff);
9889                 dev->dev_addr[1] = ((hi >> 24) & 0xff);
9890                 dev->dev_addr[2] = ((lo >>  0) & 0xff);
9891                 dev->dev_addr[3] = ((lo >>  8) & 0xff);
9892                 dev->dev_addr[4] = ((lo >> 16) & 0xff);
9893                 dev->dev_addr[5] = ((lo >> 24) & 0xff);
9894         }
9895         /* Finally just fetch it out of the MAC control regs. */
9896         else {
9897                 hi = tr32(MAC_ADDR_0_HIGH);
9898                 lo = tr32(MAC_ADDR_0_LOW);
9899
9900                 dev->dev_addr[5] = lo & 0xff;
9901                 dev->dev_addr[4] = (lo >> 8) & 0xff;
9902                 dev->dev_addr[3] = (lo >> 16) & 0xff;
9903                 dev->dev_addr[2] = (lo >> 24) & 0xff;
9904                 dev->dev_addr[1] = hi & 0xff;
9905                 dev->dev_addr[0] = (hi >> 8) & 0xff;
9906         }
9907
9908         if (!is_valid_ether_addr(&dev->dev_addr[0])) {
9909 #ifdef CONFIG_SPARC64
9910                 if (!tg3_get_default_macaddr_sparc(tp))
9911                         return 0;
9912 #endif
9913                 return -EINVAL;
9914         }
9915         memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
9916         return 0;
9917 }
9918
9919 #define BOUNDARY_SINGLE_CACHELINE       1
9920 #define BOUNDARY_MULTI_CACHELINE        2
9921
9922 static u32 __devinit tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
9923 {
9924         int cacheline_size;
9925         u8 byte;
9926         int goal;
9927
9928         pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
9929         if (byte == 0)
9930                 cacheline_size = 1024;
9931         else
9932                 cacheline_size = (int) byte * 4;
9933
9934         /* On 5703 and later chips, the boundary bits have no
9935          * effect.
9936          */
9937         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
9938             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
9939             !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
9940                 goto out;
9941
9942 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
9943         goal = BOUNDARY_MULTI_CACHELINE;
9944 #else
9945 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
9946         goal = BOUNDARY_SINGLE_CACHELINE;
9947 #else
9948         goal = 0;
9949 #endif
9950 #endif
9951
9952         if (!goal)
9953                 goto out;
9954
9955         /* PCI controllers on most RISC systems tend to disconnect
9956          * when a device tries to burst across a cache-line boundary.
9957          * Therefore, letting tg3 do so just wastes PCI bandwidth.
9958          *
9959          * Unfortunately, for PCI-E there are only limited
9960          * write-side controls for this, and thus for reads
9961          * we will still get the disconnects.  We'll also waste
9962          * these PCI cycles for both read and write for chips
9963          * other than 5700 and 5701 which do not implement the
9964          * boundary bits.
9965          */
9966         if ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
9967             !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
9968                 switch (cacheline_size) {
9969                 case 16:
9970                 case 32:
9971                 case 64:
9972                 case 128:
9973                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
9974                                 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
9975                                         DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
9976                         } else {
9977                                 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
9978                                         DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
9979                         }
9980                         break;
9981
9982                 case 256:
9983                         val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
9984                                 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
9985                         break;
9986
9987                 default:
9988                         val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
9989                                 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
9990                         break;
9991                 };
9992         } else if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
9993                 switch (cacheline_size) {
9994                 case 16:
9995                 case 32:
9996                 case 64:
9997                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
9998                                 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
9999                                 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
10000                                 break;
10001                         }
10002                         /* fallthrough */
10003                 case 128:
10004                 default:
10005                         val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
10006                         val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
10007                         break;
10008                 };
10009         } else {
10010                 switch (cacheline_size) {
10011                 case 16:
10012                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
10013                                 val |= (DMA_RWCTRL_READ_BNDRY_16 |
10014                                         DMA_RWCTRL_WRITE_BNDRY_16);
10015                                 break;
10016                         }
10017                         /* fallthrough */
10018                 case 32:
10019                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
10020                                 val |= (DMA_RWCTRL_READ_BNDRY_32 |
10021                                         DMA_RWCTRL_WRITE_BNDRY_32);
10022                                 break;
10023                         }
10024                         /* fallthrough */
10025                 case 64:
10026                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
10027                                 val |= (DMA_RWCTRL_READ_BNDRY_64 |
10028                                         DMA_RWCTRL_WRITE_BNDRY_64);
10029                                 break;
10030                         }
10031                         /* fallthrough */
10032                 case 128:
10033                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
10034                                 val |= (DMA_RWCTRL_READ_BNDRY_128 |
10035                                         DMA_RWCTRL_WRITE_BNDRY_128);
10036                                 break;
10037                         }
10038                         /* fallthrough */
10039                 case 256:
10040                         val |= (DMA_RWCTRL_READ_BNDRY_256 |
10041                                 DMA_RWCTRL_WRITE_BNDRY_256);
10042                         break;
10043                 case 512:
10044                         val |= (DMA_RWCTRL_READ_BNDRY_512 |
10045                                 DMA_RWCTRL_WRITE_BNDRY_512);
10046                         break;
10047                 case 1024:
10048                 default:
10049                         val |= (DMA_RWCTRL_READ_BNDRY_1024 |
10050                                 DMA_RWCTRL_WRITE_BNDRY_1024);
10051                         break;
10052                 };
10053         }
10054
10055 out:
10056         return val;
10057 }
10058
10059 static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma, int size, int to_device)
10060 {
10061         struct tg3_internal_buffer_desc test_desc;
10062         u32 sram_dma_descs;
10063         int i, ret;
10064
10065         sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
10066
10067         tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
10068         tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
10069         tw32(RDMAC_STATUS, 0);
10070         tw32(WDMAC_STATUS, 0);
10071
10072         tw32(BUFMGR_MODE, 0);
10073         tw32(FTQ_RESET, 0);
10074
10075         test_desc.addr_hi = ((u64) buf_dma) >> 32;
10076         test_desc.addr_lo = buf_dma & 0xffffffff;
10077         test_desc.nic_mbuf = 0x00002100;
10078         test_desc.len = size;
10079
10080         /*
10081          * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
10082          * the *second* time the tg3 driver was getting loaded after an
10083          * initial scan.
10084          *
10085          * Broadcom tells me:
10086          *   ...the DMA engine is connected to the GRC block and a DMA
10087          *   reset may affect the GRC block in some unpredictable way...
10088          *   The behavior of resets to individual blocks has not been tested.
10089          *
10090          * Broadcom noted the GRC reset will also reset all sub-components.
10091          */
10092         if (to_device) {
10093                 test_desc.cqid_sqid = (13 << 8) | 2;
10094
10095                 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
10096                 udelay(40);
10097         } else {
10098                 test_desc.cqid_sqid = (16 << 8) | 7;
10099
10100                 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
10101                 udelay(40);
10102         }
10103         test_desc.flags = 0x00000005;
10104
10105         for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
10106                 u32 val;
10107
10108                 val = *(((u32 *)&test_desc) + i);
10109                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
10110                                        sram_dma_descs + (i * sizeof(u32)));
10111                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
10112         }
10113         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
10114
10115         if (to_device) {
10116                 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
10117         } else {
10118                 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
10119         }
10120
10121         ret = -ENODEV;
10122         for (i = 0; i < 40; i++) {
10123                 u32 val;
10124
10125                 if (to_device)
10126                         val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
10127                 else
10128                         val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
10129                 if ((val & 0xffff) == sram_dma_descs) {
10130                         ret = 0;
10131                         break;
10132                 }
10133
10134                 udelay(100);
10135         }
10136
10137         return ret;
10138 }
10139
10140 #define TEST_BUFFER_SIZE        0x2000
10141
10142 static int __devinit tg3_test_dma(struct tg3 *tp)
10143 {
10144         dma_addr_t buf_dma;
10145         u32 *buf, saved_dma_rwctrl;
10146         int ret;
10147
10148         buf = pci_alloc_consistent(tp->pdev, TEST_BUFFER_SIZE, &buf_dma);
10149         if (!buf) {
10150                 ret = -ENOMEM;
10151                 goto out_nofree;
10152         }
10153
10154         tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
10155                           (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
10156
10157         tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
10158
10159         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
10160                 /* DMA read watermark not used on PCIE */
10161                 tp->dma_rwctrl |= 0x00180000;
10162         } else if (!(tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
10163                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
10164                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
10165                         tp->dma_rwctrl |= 0x003f0000;
10166                 else
10167                         tp->dma_rwctrl |= 0x003f000f;
10168         } else {
10169                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
10170                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
10171                         u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
10172
10173                         if (ccval == 0x6 || ccval == 0x7)
10174                                 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
10175
10176                         /* Set bit 23 to enable PCIX hw bug fix */
10177                         tp->dma_rwctrl |= 0x009f0000;
10178                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) {
10179                         /* 5780 always in PCIX mode */
10180                         tp->dma_rwctrl |= 0x00144000;
10181                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
10182                         /* 5714 always in PCIX mode */
10183                         tp->dma_rwctrl |= 0x00148000;
10184                 } else {
10185                         tp->dma_rwctrl |= 0x001b000f;
10186                 }
10187         }
10188
10189         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
10190             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
10191                 tp->dma_rwctrl &= 0xfffffff0;
10192
10193         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
10194             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
10195                 /* Remove this if it causes problems for some boards. */
10196                 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
10197
10198                 /* On 5700/5701 chips, we need to set this bit.
10199                  * Otherwise the chip will issue cacheline transactions
10200                  * to streamable DMA memory with not all the byte
10201                  * enables turned on.  This is an error on several
10202                  * RISC PCI controllers, in particular sparc64.
10203                  *
10204                  * On 5703/5704 chips, this bit has been reassigned
10205                  * a different meaning.  In particular, it is used
10206                  * on those chips to enable a PCI-X workaround.
10207                  */
10208                 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
10209         }
10210
10211         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
10212
10213 #if 0
10214         /* Unneeded, already done by tg3_get_invariants.  */
10215         tg3_switch_clocks(tp);
10216 #endif
10217
10218         ret = 0;
10219         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
10220             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
10221                 goto out;
10222
10223         /* It is best to perform DMA test with maximum write burst size
10224          * to expose the 5700/5701 write DMA bug.
10225          */
10226         saved_dma_rwctrl = tp->dma_rwctrl;
10227         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
10228         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
10229
10230         while (1) {
10231                 u32 *p = buf, i;
10232
10233                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
10234                         p[i] = i;
10235
10236                 /* Send the buffer to the chip. */
10237                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1);
10238                 if (ret) {
10239                         printk(KERN_ERR "tg3_test_dma() Write the buffer failed %d\n", ret);
10240                         break;
10241                 }
10242
10243 #if 0
10244                 /* validate data reached card RAM correctly. */
10245                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
10246                         u32 val;
10247                         tg3_read_mem(tp, 0x2100 + (i*4), &val);
10248                         if (le32_to_cpu(val) != p[i]) {
10249                                 printk(KERN_ERR "  tg3_test_dma()  Card buffer corrupted on write! (%d != %d)\n", val, i);
10250                                 /* ret = -ENODEV here? */
10251                         }
10252                         p[i] = 0;
10253                 }
10254 #endif
10255                 /* Now read it back. */
10256                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0);
10257                 if (ret) {
10258                         printk(KERN_ERR "tg3_test_dma() Read the buffer failed %d\n", ret);
10259
10260                         break;
10261                 }
10262
10263                 /* Verify it. */
10264                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
10265                         if (p[i] == i)
10266                                 continue;
10267
10268                         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
10269                             DMA_RWCTRL_WRITE_BNDRY_16) {
10270                                 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
10271                                 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
10272                                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
10273                                 break;
10274                         } else {
10275                                 printk(KERN_ERR "tg3_test_dma() buffer corrupted on read back! (%d != %d)\n", p[i], i);
10276                                 ret = -ENODEV;
10277                                 goto out;
10278                         }
10279                 }
10280
10281                 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
10282                         /* Success. */
10283                         ret = 0;
10284                         break;
10285                 }
10286         }
10287         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
10288             DMA_RWCTRL_WRITE_BNDRY_16) {
10289                 static struct pci_device_id dma_wait_state_chipsets[] = {
10290                         { PCI_DEVICE(PCI_VENDOR_ID_APPLE,
10291                                      PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
10292                         { },
10293                 };
10294
10295                 /* DMA test passed without adjusting DMA boundary,
10296                  * now look for chipsets that are known to expose the
10297                  * DMA bug without failing the test.
10298                  */
10299                 if (pci_dev_present(dma_wait_state_chipsets)) {
10300                         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
10301                         tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
10302                 }
10303                 else
10304                         /* Safe to use the calculated DMA boundary. */
10305                         tp->dma_rwctrl = saved_dma_rwctrl;
10306
10307                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
10308         }
10309
10310 out:
10311         pci_free_consistent(tp->pdev, TEST_BUFFER_SIZE, buf, buf_dma);
10312 out_nofree:
10313         return ret;
10314 }
10315
10316 static void __devinit tg3_init_link_config(struct tg3 *tp)
10317 {
10318         tp->link_config.advertising =
10319                 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
10320                  ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
10321                  ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full |
10322                  ADVERTISED_Autoneg | ADVERTISED_MII);
10323         tp->link_config.speed = SPEED_INVALID;
10324         tp->link_config.duplex = DUPLEX_INVALID;
10325         tp->link_config.autoneg = AUTONEG_ENABLE;
10326         netif_carrier_off(tp->dev);
10327         tp->link_config.active_speed = SPEED_INVALID;
10328         tp->link_config.active_duplex = DUPLEX_INVALID;
10329         tp->link_config.phy_is_low_power = 0;
10330         tp->link_config.orig_speed = SPEED_INVALID;
10331         tp->link_config.orig_duplex = DUPLEX_INVALID;
10332         tp->link_config.orig_autoneg = AUTONEG_INVALID;
10333 }
10334
10335 static void __devinit tg3_init_bufmgr_config(struct tg3 *tp)
10336 {
10337         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
10338                 tp->bufmgr_config.mbuf_read_dma_low_water =
10339                         DEFAULT_MB_RDMA_LOW_WATER_5705;
10340                 tp->bufmgr_config.mbuf_mac_rx_low_water =
10341                         DEFAULT_MB_MACRX_LOW_WATER_5705;
10342                 tp->bufmgr_config.mbuf_high_water =
10343                         DEFAULT_MB_HIGH_WATER_5705;
10344
10345                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
10346                         DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
10347                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
10348                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
10349                 tp->bufmgr_config.mbuf_high_water_jumbo =
10350                         DEFAULT_MB_HIGH_WATER_JUMBO_5780;
10351         } else {
10352                 tp->bufmgr_config.mbuf_read_dma_low_water =
10353                         DEFAULT_MB_RDMA_LOW_WATER;
10354                 tp->bufmgr_config.mbuf_mac_rx_low_water =
10355                         DEFAULT_MB_MACRX_LOW_WATER;
10356                 tp->bufmgr_config.mbuf_high_water =
10357                         DEFAULT_MB_HIGH_WATER;
10358
10359                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
10360                         DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
10361                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
10362                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
10363                 tp->bufmgr_config.mbuf_high_water_jumbo =
10364                         DEFAULT_MB_HIGH_WATER_JUMBO;
10365         }
10366
10367         tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
10368         tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
10369 }
10370
10371 static char * __devinit tg3_phy_string(struct tg3 *tp)
10372 {
10373         switch (tp->phy_id & PHY_ID_MASK) {
10374         case PHY_ID_BCM5400:    return "5400";
10375         case PHY_ID_BCM5401:    return "5401";
10376         case PHY_ID_BCM5411:    return "5411";
10377         case PHY_ID_BCM5701:    return "5701";
10378         case PHY_ID_BCM5703:    return "5703";
10379         case PHY_ID_BCM5704:    return "5704";
10380         case PHY_ID_BCM5705:    return "5705";
10381         case PHY_ID_BCM5750:    return "5750";
10382         case PHY_ID_BCM5752:    return "5752";
10383         case PHY_ID_BCM5714:    return "5714";
10384         case PHY_ID_BCM5780:    return "5780";
10385         case PHY_ID_BCM8002:    return "8002/serdes";
10386         case 0:                 return "serdes";
10387         default:                return "unknown";
10388         };
10389 }
10390
10391 static char * __devinit tg3_bus_string(struct tg3 *tp, char *str)
10392 {
10393         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
10394                 strcpy(str, "PCI Express");
10395                 return str;
10396         } else if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) {
10397                 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
10398
10399                 strcpy(str, "PCIX:");
10400
10401                 if ((clock_ctrl == 7) ||
10402                     ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
10403                      GRC_MISC_CFG_BOARD_ID_5704CIOBE))
10404                         strcat(str, "133MHz");
10405                 else if (clock_ctrl == 0)
10406                         strcat(str, "33MHz");
10407                 else if (clock_ctrl == 2)
10408                         strcat(str, "50MHz");
10409                 else if (clock_ctrl == 4)
10410                         strcat(str, "66MHz");
10411                 else if (clock_ctrl == 6)
10412                         strcat(str, "100MHz");
10413                 else if (clock_ctrl == 7)
10414                         strcat(str, "133MHz");
10415         } else {
10416                 strcpy(str, "PCI:");
10417                 if (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED)
10418                         strcat(str, "66MHz");
10419                 else
10420                         strcat(str, "33MHz");
10421         }
10422         if (tp->tg3_flags & TG3_FLAG_PCI_32BIT)
10423                 strcat(str, ":32-bit");
10424         else
10425                 strcat(str, ":64-bit");
10426         return str;
10427 }
10428
10429 static struct pci_dev * __devinit tg3_find_5704_peer(struct tg3 *tp)
10430 {
10431         struct pci_dev *peer;
10432         unsigned int func, devnr = tp->pdev->devfn & ~7;
10433
10434         for (func = 0; func < 8; func++) {
10435                 peer = pci_get_slot(tp->pdev->bus, devnr | func);
10436                 if (peer && peer != tp->pdev)
10437                         break;
10438                 pci_dev_put(peer);
10439         }
10440         if (!peer || peer == tp->pdev)
10441                 BUG();
10442
10443         /*
10444          * We don't need to keep the refcount elevated; there's no way
10445          * to remove one half of this device without removing the other
10446          */
10447         pci_dev_put(peer);
10448
10449         return peer;
10450 }
10451
10452 static void __devinit tg3_init_coal(struct tg3 *tp)
10453 {
10454         struct ethtool_coalesce *ec = &tp->coal;
10455
10456         memset(ec, 0, sizeof(*ec));
10457         ec->cmd = ETHTOOL_GCOALESCE;
10458         ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
10459         ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
10460         ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
10461         ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
10462         ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
10463         ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
10464         ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
10465         ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
10466         ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
10467
10468         if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
10469                                  HOSTCC_MODE_CLRTICK_TXBD)) {
10470                 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
10471                 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
10472                 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
10473                 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
10474         }
10475
10476         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
10477                 ec->rx_coalesce_usecs_irq = 0;
10478                 ec->tx_coalesce_usecs_irq = 0;
10479                 ec->stats_block_coalesce_usecs = 0;
10480         }
10481 }
10482
10483 static int __devinit tg3_init_one(struct pci_dev *pdev,
10484                                   const struct pci_device_id *ent)
10485 {
10486         static int tg3_version_printed = 0;
10487         unsigned long tg3reg_base, tg3reg_len;
10488         struct net_device *dev;
10489         struct tg3 *tp;
10490         int i, err, pci_using_dac, pm_cap;
10491         char str[40];
10492
10493         if (tg3_version_printed++ == 0)
10494                 printk(KERN_INFO "%s", version);
10495
10496         err = pci_enable_device(pdev);
10497         if (err) {
10498                 printk(KERN_ERR PFX "Cannot enable PCI device, "
10499                        "aborting.\n");
10500                 return err;
10501         }
10502
10503         if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
10504                 printk(KERN_ERR PFX "Cannot find proper PCI device "
10505                        "base address, aborting.\n");
10506                 err = -ENODEV;
10507                 goto err_out_disable_pdev;
10508         }
10509
10510         err = pci_request_regions(pdev, DRV_MODULE_NAME);
10511         if (err) {
10512                 printk(KERN_ERR PFX "Cannot obtain PCI resources, "
10513                        "aborting.\n");
10514                 goto err_out_disable_pdev;
10515         }
10516
10517         pci_set_master(pdev);
10518
10519         /* Find power-management capability. */
10520         pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
10521         if (pm_cap == 0) {
10522                 printk(KERN_ERR PFX "Cannot find PowerManagement capability, "
10523                        "aborting.\n");
10524                 err = -EIO;
10525                 goto err_out_free_res;
10526         }
10527
10528         /* Configure DMA attributes. */
10529         err = pci_set_dma_mask(pdev, DMA_64BIT_MASK);
10530         if (!err) {
10531                 pci_using_dac = 1;
10532                 err = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
10533                 if (err < 0) {
10534                         printk(KERN_ERR PFX "Unable to obtain 64 bit DMA "
10535                                "for consistent allocations\n");
10536                         goto err_out_free_res;
10537                 }
10538         } else {
10539                 err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
10540                 if (err) {
10541                         printk(KERN_ERR PFX "No usable DMA configuration, "
10542                                "aborting.\n");
10543                         goto err_out_free_res;
10544                 }
10545                 pci_using_dac = 0;
10546         }
10547
10548         tg3reg_base = pci_resource_start(pdev, 0);
10549         tg3reg_len = pci_resource_len(pdev, 0);
10550
10551         dev = alloc_etherdev(sizeof(*tp));
10552         if (!dev) {
10553                 printk(KERN_ERR PFX "Etherdev alloc failed, aborting.\n");
10554                 err = -ENOMEM;
10555                 goto err_out_free_res;
10556         }
10557
10558         SET_MODULE_OWNER(dev);
10559         SET_NETDEV_DEV(dev, &pdev->dev);
10560
10561         if (pci_using_dac)
10562                 dev->features |= NETIF_F_HIGHDMA;
10563         dev->features |= NETIF_F_LLTX;
10564 #if TG3_VLAN_TAG_USED
10565         dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
10566         dev->vlan_rx_register = tg3_vlan_rx_register;
10567         dev->vlan_rx_kill_vid = tg3_vlan_rx_kill_vid;
10568 #endif
10569
10570         tp = netdev_priv(dev);
10571         tp->pdev = pdev;
10572         tp->dev = dev;
10573         tp->pm_cap = pm_cap;
10574         tp->mac_mode = TG3_DEF_MAC_MODE;
10575         tp->rx_mode = TG3_DEF_RX_MODE;
10576         tp->tx_mode = TG3_DEF_TX_MODE;
10577         tp->mi_mode = MAC_MI_MODE_BASE;
10578         if (tg3_debug > 0)
10579                 tp->msg_enable = tg3_debug;
10580         else
10581                 tp->msg_enable = TG3_DEF_MSG_ENABLE;
10582
10583         /* The word/byte swap controls here control register access byte
10584          * swapping.  DMA data byte swapping is controlled in the GRC_MODE
10585          * setting below.
10586          */
10587         tp->misc_host_ctrl =
10588                 MISC_HOST_CTRL_MASK_PCI_INT |
10589                 MISC_HOST_CTRL_WORD_SWAP |
10590                 MISC_HOST_CTRL_INDIR_ACCESS |
10591                 MISC_HOST_CTRL_PCISTATE_RW;
10592
10593         /* The NONFRM (non-frame) byte/word swap controls take effect
10594          * on descriptor entries, anything which isn't packet data.
10595          *
10596          * The StrongARM chips on the board (one for tx, one for rx)
10597          * are running in big-endian mode.
10598          */
10599         tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
10600                         GRC_MODE_WSWAP_NONFRM_DATA);
10601 #ifdef __BIG_ENDIAN
10602         tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
10603 #endif
10604         spin_lock_init(&tp->lock);
10605         spin_lock_init(&tp->tx_lock);
10606         spin_lock_init(&tp->indirect_lock);
10607         INIT_WORK(&tp->reset_task, tg3_reset_task, tp);
10608
10609         tp->regs = ioremap_nocache(tg3reg_base, tg3reg_len);
10610         if (tp->regs == 0UL) {
10611                 printk(KERN_ERR PFX "Cannot map device registers, "
10612                        "aborting.\n");
10613                 err = -ENOMEM;
10614                 goto err_out_free_dev;
10615         }
10616
10617         tg3_init_link_config(tp);
10618
10619         tp->rx_pending = TG3_DEF_RX_RING_PENDING;
10620         tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
10621         tp->tx_pending = TG3_DEF_TX_RING_PENDING;
10622
10623         dev->open = tg3_open;
10624         dev->stop = tg3_close;
10625         dev->get_stats = tg3_get_stats;
10626         dev->set_multicast_list = tg3_set_rx_mode;
10627         dev->set_mac_address = tg3_set_mac_addr;
10628         dev->do_ioctl = tg3_ioctl;
10629         dev->tx_timeout = tg3_tx_timeout;
10630         dev->poll = tg3_poll;
10631         dev->ethtool_ops = &tg3_ethtool_ops;
10632         dev->weight = 64;
10633         dev->watchdog_timeo = TG3_TX_TIMEOUT;
10634         dev->change_mtu = tg3_change_mtu;
10635         dev->irq = pdev->irq;
10636 #ifdef CONFIG_NET_POLL_CONTROLLER
10637         dev->poll_controller = tg3_poll_controller;
10638 #endif
10639
10640         err = tg3_get_invariants(tp);
10641         if (err) {
10642                 printk(KERN_ERR PFX "Problem fetching invariants of chip, "
10643                        "aborting.\n");
10644                 goto err_out_iounmap;
10645         }
10646
10647         tg3_init_bufmgr_config(tp);
10648
10649 #if TG3_TSO_SUPPORT != 0
10650         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
10651                 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
10652         }
10653         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
10654             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
10655             tp->pci_chip_rev_id == CHIPREV_ID_5705_A0 ||
10656             (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0) {
10657                 tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
10658         } else {
10659                 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
10660         }
10661
10662         /* TSO is off by default, user can enable using ethtool.  */
10663 #if 0
10664         if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)
10665                 dev->features |= NETIF_F_TSO;
10666 #endif
10667
10668 #endif
10669
10670         if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 &&
10671             !(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) &&
10672             !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
10673                 tp->tg3_flags2 |= TG3_FLG2_MAX_RXPEND_64;
10674                 tp->rx_pending = 63;
10675         }
10676
10677         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
10678                 tp->pdev_peer = tg3_find_5704_peer(tp);
10679
10680         err = tg3_get_device_address(tp);
10681         if (err) {
10682                 printk(KERN_ERR PFX "Could not obtain valid ethernet address, "
10683                        "aborting.\n");
10684                 goto err_out_iounmap;
10685         }
10686
10687         /*
10688          * Reset chip in case UNDI or EFI driver did not shutdown
10689          * DMA self test will enable WDMAC and we'll see (spurious)
10690          * pending DMA on the PCI bus at that point.
10691          */
10692         if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
10693             (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
10694                 pci_save_state(tp->pdev);
10695                 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
10696                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10697         }
10698
10699         err = tg3_test_dma(tp);
10700         if (err) {
10701                 printk(KERN_ERR PFX "DMA engine test failed, aborting.\n");
10702                 goto err_out_iounmap;
10703         }
10704
10705         /* Tigon3 can do ipv4 only... and some chips have buggy
10706          * checksumming.
10707          */
10708         if ((tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) == 0) {
10709                 dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM;
10710                 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
10711         } else
10712                 tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
10713
10714         if (tp->tg3_flags2 & TG3_FLG2_IS_5788)
10715                 dev->features &= ~NETIF_F_HIGHDMA;
10716
10717         /* flow control autonegotiation is default behavior */
10718         tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
10719
10720         tg3_init_coal(tp);
10721
10722         /* Now that we have fully setup the chip, save away a snapshot
10723          * of the PCI config space.  We need to restore this after
10724          * GRC_MISC_CFG core clock resets and some resume events.
10725          */
10726         pci_save_state(tp->pdev);
10727
10728         err = register_netdev(dev);
10729         if (err) {
10730                 printk(KERN_ERR PFX "Cannot register net device, "
10731                        "aborting.\n");
10732                 goto err_out_iounmap;
10733         }
10734
10735         pci_set_drvdata(pdev, dev);
10736
10737         printk(KERN_INFO "%s: Tigon3 [partno(%s) rev %04x PHY(%s)] (%s) %sBaseT Ethernet ",
10738                dev->name,
10739                tp->board_part_number,
10740                tp->pci_chip_rev_id,
10741                tg3_phy_string(tp),
10742                tg3_bus_string(tp, str),
10743                (tp->tg3_flags & TG3_FLAG_10_100_ONLY) ? "10/100" : "10/100/1000");
10744
10745         for (i = 0; i < 6; i++)
10746                 printk("%2.2x%c", dev->dev_addr[i],
10747                        i == 5 ? '\n' : ':');
10748
10749         printk(KERN_INFO "%s: RXcsums[%d] LinkChgREG[%d] "
10750                "MIirq[%d] ASF[%d] Split[%d] WireSpeed[%d] "
10751                "TSOcap[%d] \n",
10752                dev->name,
10753                (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0,
10754                (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) != 0,
10755                (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) != 0,
10756                (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0,
10757                (tp->tg3_flags & TG3_FLAG_SPLIT_MODE) != 0,
10758                (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED) == 0,
10759                (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) != 0);
10760         printk(KERN_INFO "%s: dma_rwctrl[%08x]\n",
10761                dev->name, tp->dma_rwctrl);
10762
10763         return 0;
10764
10765 err_out_iounmap:
10766         if (tp->regs) {
10767                 iounmap(tp->regs);
10768                 tp->regs = NULL;
10769         }
10770
10771 err_out_free_dev:
10772         free_netdev(dev);
10773
10774 err_out_free_res:
10775         pci_release_regions(pdev);
10776
10777 err_out_disable_pdev:
10778         pci_disable_device(pdev);
10779         pci_set_drvdata(pdev, NULL);
10780         return err;
10781 }
10782
10783 static void __devexit tg3_remove_one(struct pci_dev *pdev)
10784 {
10785         struct net_device *dev = pci_get_drvdata(pdev);
10786
10787         if (dev) {
10788                 struct tg3 *tp = netdev_priv(dev);
10789
10790                 unregister_netdev(dev);
10791                 if (tp->regs) {
10792                         iounmap(tp->regs);
10793                         tp->regs = NULL;
10794                 }
10795                 free_netdev(dev);
10796                 pci_release_regions(pdev);
10797                 pci_disable_device(pdev);
10798                 pci_set_drvdata(pdev, NULL);
10799         }
10800 }
10801
10802 static int tg3_suspend(struct pci_dev *pdev, pm_message_t state)
10803 {
10804         struct net_device *dev = pci_get_drvdata(pdev);
10805         struct tg3 *tp = netdev_priv(dev);
10806         int err;
10807
10808         if (!netif_running(dev))
10809                 return 0;
10810
10811         tg3_netif_stop(tp);
10812
10813         del_timer_sync(&tp->timer);
10814
10815         tg3_full_lock(tp, 1);
10816         tg3_disable_ints(tp);
10817         tg3_full_unlock(tp);
10818
10819         netif_device_detach(dev);
10820
10821         tg3_full_lock(tp, 0);
10822         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10823         tg3_full_unlock(tp);
10824
10825         err = tg3_set_power_state(tp, pci_choose_state(pdev, state));
10826         if (err) {
10827                 tg3_full_lock(tp, 0);
10828
10829                 tg3_init_hw(tp);
10830
10831                 tp->timer.expires = jiffies + tp->timer_offset;
10832                 add_timer(&tp->timer);
10833
10834                 netif_device_attach(dev);
10835                 tg3_netif_start(tp);
10836
10837                 tg3_full_unlock(tp);
10838         }
10839
10840         return err;
10841 }
10842
10843 static int tg3_resume(struct pci_dev *pdev)
10844 {
10845         struct net_device *dev = pci_get_drvdata(pdev);
10846         struct tg3 *tp = netdev_priv(dev);
10847         int err;
10848
10849         if (!netif_running(dev))
10850                 return 0;
10851
10852         pci_restore_state(tp->pdev);
10853
10854         err = tg3_set_power_state(tp, 0);
10855         if (err)
10856                 return err;
10857
10858         netif_device_attach(dev);
10859
10860         tg3_full_lock(tp, 0);
10861
10862         tg3_init_hw(tp);
10863
10864         tp->timer.expires = jiffies + tp->timer_offset;
10865         add_timer(&tp->timer);
10866
10867         tg3_netif_start(tp);
10868
10869         tg3_full_unlock(tp);
10870
10871         return 0;
10872 }
10873
10874 static struct pci_driver tg3_driver = {
10875         .name           = DRV_MODULE_NAME,
10876         .id_table       = tg3_pci_tbl,
10877         .probe          = tg3_init_one,
10878         .remove         = __devexit_p(tg3_remove_one),
10879         .suspend        = tg3_suspend,
10880         .resume         = tg3_resume
10881 };
10882
10883 static int __init tg3_init(void)
10884 {
10885         return pci_module_init(&tg3_driver);
10886 }
10887
10888 static void __exit tg3_cleanup(void)
10889 {
10890         pci_unregister_driver(&tg3_driver);
10891 }
10892
10893 module_init(tg3_init);
10894 module_exit(tg3_cleanup);