[PATCH] libertas: clean up scan debug messages
[linux-2.6] / drivers / net / tg3.c
1 /*
2  * tg3.c: Broadcom Tigon3 ethernet driver.
3  *
4  * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5  * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6  * Copyright (C) 2004 Sun Microsystems Inc.
7  * Copyright (C) 2005-2007 Broadcom Corporation.
8  *
9  * Firmware is:
10  *      Derived from proprietary unpublished source code,
11  *      Copyright (C) 2000-2003 Broadcom Corporation.
12  *
13  *      Permission is hereby granted for the distribution of this firmware
14  *      data in hexadecimal or equivalent format, provided this copyright
15  *      notice is accompanying it.
16  */
17
18
19 #include <linux/module.h>
20 #include <linux/moduleparam.h>
21 #include <linux/kernel.h>
22 #include <linux/types.h>
23 #include <linux/compiler.h>
24 #include <linux/slab.h>
25 #include <linux/delay.h>
26 #include <linux/in.h>
27 #include <linux/init.h>
28 #include <linux/ioport.h>
29 #include <linux/pci.h>
30 #include <linux/netdevice.h>
31 #include <linux/etherdevice.h>
32 #include <linux/skbuff.h>
33 #include <linux/ethtool.h>
34 #include <linux/mii.h>
35 #include <linux/if_vlan.h>
36 #include <linux/ip.h>
37 #include <linux/tcp.h>
38 #include <linux/workqueue.h>
39 #include <linux/prefetch.h>
40 #include <linux/dma-mapping.h>
41
42 #include <net/checksum.h>
43 #include <net/ip.h>
44
45 #include <asm/system.h>
46 #include <asm/io.h>
47 #include <asm/byteorder.h>
48 #include <asm/uaccess.h>
49
50 #ifdef CONFIG_SPARC
51 #include <asm/idprom.h>
52 #include <asm/prom.h>
53 #endif
54
55 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
56 #define TG3_VLAN_TAG_USED 1
57 #else
58 #define TG3_VLAN_TAG_USED 0
59 #endif
60
61 #define TG3_TSO_SUPPORT 1
62
63 #include "tg3.h"
64
65 #define DRV_MODULE_NAME         "tg3"
66 #define PFX DRV_MODULE_NAME     ": "
67 #define DRV_MODULE_VERSION      "3.82"
68 #define DRV_MODULE_RELDATE      "October 5, 2007"
69
70 #define TG3_DEF_MAC_MODE        0
71 #define TG3_DEF_RX_MODE         0
72 #define TG3_DEF_TX_MODE         0
73 #define TG3_DEF_MSG_ENABLE        \
74         (NETIF_MSG_DRV          | \
75          NETIF_MSG_PROBE        | \
76          NETIF_MSG_LINK         | \
77          NETIF_MSG_TIMER        | \
78          NETIF_MSG_IFDOWN       | \
79          NETIF_MSG_IFUP         | \
80          NETIF_MSG_RX_ERR       | \
81          NETIF_MSG_TX_ERR)
82
83 /* length of time before we decide the hardware is borked,
84  * and dev->tx_timeout() should be called to fix the problem
85  */
86 #define TG3_TX_TIMEOUT                  (5 * HZ)
87
88 /* hardware minimum and maximum for a single frame's data payload */
89 #define TG3_MIN_MTU                     60
90 #define TG3_MAX_MTU(tp) \
91         ((tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) ? 9000 : 1500)
92
93 /* These numbers seem to be hard coded in the NIC firmware somehow.
94  * You can't change the ring sizes, but you can change where you place
95  * them in the NIC onboard memory.
96  */
97 #define TG3_RX_RING_SIZE                512
98 #define TG3_DEF_RX_RING_PENDING         200
99 #define TG3_RX_JUMBO_RING_SIZE          256
100 #define TG3_DEF_RX_JUMBO_RING_PENDING   100
101
102 /* Do not place this n-ring entries value into the tp struct itself,
103  * we really want to expose these constants to GCC so that modulo et
104  * al.  operations are done with shifts and masks instead of with
105  * hw multiply/modulo instructions.  Another solution would be to
106  * replace things like '% foo' with '& (foo - 1)'.
107  */
108 #define TG3_RX_RCB_RING_SIZE(tp)        \
109         ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ?  512 : 1024)
110
111 #define TG3_TX_RING_SIZE                512
112 #define TG3_DEF_TX_RING_PENDING         (TG3_TX_RING_SIZE - 1)
113
114 #define TG3_RX_RING_BYTES       (sizeof(struct tg3_rx_buffer_desc) * \
115                                  TG3_RX_RING_SIZE)
116 #define TG3_RX_JUMBO_RING_BYTES (sizeof(struct tg3_rx_buffer_desc) * \
117                                  TG3_RX_JUMBO_RING_SIZE)
118 #define TG3_RX_RCB_RING_BYTES(tp) (sizeof(struct tg3_rx_buffer_desc) * \
119                                    TG3_RX_RCB_RING_SIZE(tp))
120 #define TG3_TX_RING_BYTES       (sizeof(struct tg3_tx_buffer_desc) * \
121                                  TG3_TX_RING_SIZE)
122 #define NEXT_TX(N)              (((N) + 1) & (TG3_TX_RING_SIZE - 1))
123
124 #define RX_PKT_BUF_SZ           (1536 + tp->rx_offset + 64)
125 #define RX_JUMBO_PKT_BUF_SZ     (9046 + tp->rx_offset + 64)
126
127 /* minimum number of free TX descriptors required to wake up TX process */
128 #define TG3_TX_WAKEUP_THRESH(tp)                ((tp)->tx_pending / 4)
129
130 /* number of ETHTOOL_GSTATS u64's */
131 #define TG3_NUM_STATS           (sizeof(struct tg3_ethtool_stats)/sizeof(u64))
132
133 #define TG3_NUM_TEST            6
134
135 static char version[] __devinitdata =
136         DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
137
138 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
139 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
140 MODULE_LICENSE("GPL");
141 MODULE_VERSION(DRV_MODULE_VERSION);
142
143 static int tg3_debug = -1;      /* -1 == use TG3_DEF_MSG_ENABLE as value */
144 module_param(tg3_debug, int, 0);
145 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
146
147 static struct pci_device_id tg3_pci_tbl[] = {
148         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
149         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
150         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
151         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
152         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
153         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
154         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
155         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
156         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
157         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
158         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
159         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
160         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
161         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
162         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
163         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
164         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
165         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
166         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901)},
167         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2)},
168         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
169         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F)},
170         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5720)},
171         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
172         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
173         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750)},
174         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
175         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750M)},
176         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
177         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F)},
178         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
179         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
180         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
181         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
182         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F)},
183         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
184         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
185         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
186         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
187         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
188         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
189         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
190         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
191         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F)},
192         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
193         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
194         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
195         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
196         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
197         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
198         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
199         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
200         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
201         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
202         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
203         {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
204         {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
205         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
206         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
207         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
208         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
209         {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
210         {}
211 };
212
213 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
214
215 static const struct {
216         const char string[ETH_GSTRING_LEN];
217 } ethtool_stats_keys[TG3_NUM_STATS] = {
218         { "rx_octets" },
219         { "rx_fragments" },
220         { "rx_ucast_packets" },
221         { "rx_mcast_packets" },
222         { "rx_bcast_packets" },
223         { "rx_fcs_errors" },
224         { "rx_align_errors" },
225         { "rx_xon_pause_rcvd" },
226         { "rx_xoff_pause_rcvd" },
227         { "rx_mac_ctrl_rcvd" },
228         { "rx_xoff_entered" },
229         { "rx_frame_too_long_errors" },
230         { "rx_jabbers" },
231         { "rx_undersize_packets" },
232         { "rx_in_length_errors" },
233         { "rx_out_length_errors" },
234         { "rx_64_or_less_octet_packets" },
235         { "rx_65_to_127_octet_packets" },
236         { "rx_128_to_255_octet_packets" },
237         { "rx_256_to_511_octet_packets" },
238         { "rx_512_to_1023_octet_packets" },
239         { "rx_1024_to_1522_octet_packets" },
240         { "rx_1523_to_2047_octet_packets" },
241         { "rx_2048_to_4095_octet_packets" },
242         { "rx_4096_to_8191_octet_packets" },
243         { "rx_8192_to_9022_octet_packets" },
244
245         { "tx_octets" },
246         { "tx_collisions" },
247
248         { "tx_xon_sent" },
249         { "tx_xoff_sent" },
250         { "tx_flow_control" },
251         { "tx_mac_errors" },
252         { "tx_single_collisions" },
253         { "tx_mult_collisions" },
254         { "tx_deferred" },
255         { "tx_excessive_collisions" },
256         { "tx_late_collisions" },
257         { "tx_collide_2times" },
258         { "tx_collide_3times" },
259         { "tx_collide_4times" },
260         { "tx_collide_5times" },
261         { "tx_collide_6times" },
262         { "tx_collide_7times" },
263         { "tx_collide_8times" },
264         { "tx_collide_9times" },
265         { "tx_collide_10times" },
266         { "tx_collide_11times" },
267         { "tx_collide_12times" },
268         { "tx_collide_13times" },
269         { "tx_collide_14times" },
270         { "tx_collide_15times" },
271         { "tx_ucast_packets" },
272         { "tx_mcast_packets" },
273         { "tx_bcast_packets" },
274         { "tx_carrier_sense_errors" },
275         { "tx_discards" },
276         { "tx_errors" },
277
278         { "dma_writeq_full" },
279         { "dma_write_prioq_full" },
280         { "rxbds_empty" },
281         { "rx_discards" },
282         { "rx_errors" },
283         { "rx_threshold_hit" },
284
285         { "dma_readq_full" },
286         { "dma_read_prioq_full" },
287         { "tx_comp_queue_full" },
288
289         { "ring_set_send_prod_index" },
290         { "ring_status_update" },
291         { "nic_irqs" },
292         { "nic_avoided_irqs" },
293         { "nic_tx_threshold_hit" }
294 };
295
296 static const struct {
297         const char string[ETH_GSTRING_LEN];
298 } ethtool_test_keys[TG3_NUM_TEST] = {
299         { "nvram test     (online) " },
300         { "link test      (online) " },
301         { "register test  (offline)" },
302         { "memory test    (offline)" },
303         { "loopback test  (offline)" },
304         { "interrupt test (offline)" },
305 };
306
307 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
308 {
309         writel(val, tp->regs + off);
310 }
311
312 static u32 tg3_read32(struct tg3 *tp, u32 off)
313 {
314         return (readl(tp->regs + off));
315 }
316
317 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
318 {
319         unsigned long flags;
320
321         spin_lock_irqsave(&tp->indirect_lock, flags);
322         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
323         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
324         spin_unlock_irqrestore(&tp->indirect_lock, flags);
325 }
326
327 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
328 {
329         writel(val, tp->regs + off);
330         readl(tp->regs + off);
331 }
332
333 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
334 {
335         unsigned long flags;
336         u32 val;
337
338         spin_lock_irqsave(&tp->indirect_lock, flags);
339         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
340         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
341         spin_unlock_irqrestore(&tp->indirect_lock, flags);
342         return val;
343 }
344
345 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
346 {
347         unsigned long flags;
348
349         if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
350                 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
351                                        TG3_64BIT_REG_LOW, val);
352                 return;
353         }
354         if (off == (MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW)) {
355                 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
356                                        TG3_64BIT_REG_LOW, val);
357                 return;
358         }
359
360         spin_lock_irqsave(&tp->indirect_lock, flags);
361         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
362         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
363         spin_unlock_irqrestore(&tp->indirect_lock, flags);
364
365         /* In indirect mode when disabling interrupts, we also need
366          * to clear the interrupt bit in the GRC local ctrl register.
367          */
368         if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
369             (val == 0x1)) {
370                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
371                                        tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
372         }
373 }
374
375 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
376 {
377         unsigned long flags;
378         u32 val;
379
380         spin_lock_irqsave(&tp->indirect_lock, flags);
381         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
382         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
383         spin_unlock_irqrestore(&tp->indirect_lock, flags);
384         return val;
385 }
386
387 /* usec_wait specifies the wait time in usec when writing to certain registers
388  * where it is unsafe to read back the register without some delay.
389  * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
390  * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
391  */
392 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
393 {
394         if ((tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) ||
395             (tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND))
396                 /* Non-posted methods */
397                 tp->write32(tp, off, val);
398         else {
399                 /* Posted method */
400                 tg3_write32(tp, off, val);
401                 if (usec_wait)
402                         udelay(usec_wait);
403                 tp->read32(tp, off);
404         }
405         /* Wait again after the read for the posted method to guarantee that
406          * the wait time is met.
407          */
408         if (usec_wait)
409                 udelay(usec_wait);
410 }
411
412 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
413 {
414         tp->write32_mbox(tp, off, val);
415         if (!(tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) &&
416             !(tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND))
417                 tp->read32_mbox(tp, off);
418 }
419
420 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
421 {
422         void __iomem *mbox = tp->regs + off;
423         writel(val, mbox);
424         if (tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG)
425                 writel(val, mbox);
426         if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
427                 readl(mbox);
428 }
429
430 static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
431 {
432         return (readl(tp->regs + off + GRCMBOX_BASE));
433 }
434
435 static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
436 {
437         writel(val, tp->regs + off + GRCMBOX_BASE);
438 }
439
440 #define tw32_mailbox(reg, val)  tp->write32_mbox(tp, reg, val)
441 #define tw32_mailbox_f(reg, val)        tw32_mailbox_flush(tp, (reg), (val))
442 #define tw32_rx_mbox(reg, val)  tp->write32_rx_mbox(tp, reg, val)
443 #define tw32_tx_mbox(reg, val)  tp->write32_tx_mbox(tp, reg, val)
444 #define tr32_mailbox(reg)       tp->read32_mbox(tp, reg)
445
446 #define tw32(reg,val)           tp->write32(tp, reg, val)
447 #define tw32_f(reg,val)         _tw32_flush(tp,(reg),(val), 0)
448 #define tw32_wait_f(reg,val,us) _tw32_flush(tp,(reg),(val), (us))
449 #define tr32(reg)               tp->read32(tp, reg)
450
451 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
452 {
453         unsigned long flags;
454
455         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) &&
456             (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
457                 return;
458
459         spin_lock_irqsave(&tp->indirect_lock, flags);
460         if (tp->tg3_flags & TG3_FLAG_SRAM_USE_CONFIG) {
461                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
462                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
463
464                 /* Always leave this as zero. */
465                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
466         } else {
467                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
468                 tw32_f(TG3PCI_MEM_WIN_DATA, val);
469
470                 /* Always leave this as zero. */
471                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
472         }
473         spin_unlock_irqrestore(&tp->indirect_lock, flags);
474 }
475
476 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
477 {
478         unsigned long flags;
479
480         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) &&
481             (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
482                 *val = 0;
483                 return;
484         }
485
486         spin_lock_irqsave(&tp->indirect_lock, flags);
487         if (tp->tg3_flags & TG3_FLAG_SRAM_USE_CONFIG) {
488                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
489                 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
490
491                 /* Always leave this as zero. */
492                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
493         } else {
494                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
495                 *val = tr32(TG3PCI_MEM_WIN_DATA);
496
497                 /* Always leave this as zero. */
498                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
499         }
500         spin_unlock_irqrestore(&tp->indirect_lock, flags);
501 }
502
503 static void tg3_disable_ints(struct tg3 *tp)
504 {
505         tw32(TG3PCI_MISC_HOST_CTRL,
506              (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
507         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
508 }
509
510 static inline void tg3_cond_int(struct tg3 *tp)
511 {
512         if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
513             (tp->hw_status->status & SD_STATUS_UPDATED))
514                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
515         else
516                 tw32(HOSTCC_MODE, tp->coalesce_mode |
517                      (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
518 }
519
520 static void tg3_enable_ints(struct tg3 *tp)
521 {
522         tp->irq_sync = 0;
523         wmb();
524
525         tw32(TG3PCI_MISC_HOST_CTRL,
526              (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
527         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
528                        (tp->last_tag << 24));
529         if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI)
530                 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
531                                (tp->last_tag << 24));
532         tg3_cond_int(tp);
533 }
534
535 static inline unsigned int tg3_has_work(struct tg3 *tp)
536 {
537         struct tg3_hw_status *sblk = tp->hw_status;
538         unsigned int work_exists = 0;
539
540         /* check for phy events */
541         if (!(tp->tg3_flags &
542               (TG3_FLAG_USE_LINKCHG_REG |
543                TG3_FLAG_POLL_SERDES))) {
544                 if (sblk->status & SD_STATUS_LINK_CHG)
545                         work_exists = 1;
546         }
547         /* check for RX/TX work to do */
548         if (sblk->idx[0].tx_consumer != tp->tx_cons ||
549             sblk->idx[0].rx_producer != tp->rx_rcb_ptr)
550                 work_exists = 1;
551
552         return work_exists;
553 }
554
555 /* tg3_restart_ints
556  *  similar to tg3_enable_ints, but it accurately determines whether there
557  *  is new work pending and can return without flushing the PIO write
558  *  which reenables interrupts
559  */
560 static void tg3_restart_ints(struct tg3 *tp)
561 {
562         tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
563                      tp->last_tag << 24);
564         mmiowb();
565
566         /* When doing tagged status, this work check is unnecessary.
567          * The last_tag we write above tells the chip which piece of
568          * work we've completed.
569          */
570         if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
571             tg3_has_work(tp))
572                 tw32(HOSTCC_MODE, tp->coalesce_mode |
573                      (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
574 }
575
576 static inline void tg3_netif_stop(struct tg3 *tp)
577 {
578         tp->dev->trans_start = jiffies; /* prevent tx timeout */
579         napi_disable(&tp->napi);
580         netif_tx_disable(tp->dev);
581 }
582
583 static inline void tg3_netif_start(struct tg3 *tp)
584 {
585         netif_wake_queue(tp->dev);
586         /* NOTE: unconditional netif_wake_queue is only appropriate
587          * so long as all callers are assured to have free tx slots
588          * (such as after tg3_init_hw)
589          */
590         napi_enable(&tp->napi);
591         tp->hw_status->status |= SD_STATUS_UPDATED;
592         tg3_enable_ints(tp);
593 }
594
595 static void tg3_switch_clocks(struct tg3 *tp)
596 {
597         u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
598         u32 orig_clock_ctrl;
599
600         if ((tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) ||
601             (tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
602                 return;
603
604         orig_clock_ctrl = clock_ctrl;
605         clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
606                        CLOCK_CTRL_CLKRUN_OENABLE |
607                        0x1f);
608         tp->pci_clock_ctrl = clock_ctrl;
609
610         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
611                 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
612                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
613                                     clock_ctrl | CLOCK_CTRL_625_CORE, 40);
614                 }
615         } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
616                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
617                             clock_ctrl |
618                             (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
619                             40);
620                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
621                             clock_ctrl | (CLOCK_CTRL_ALTCLK),
622                             40);
623         }
624         tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
625 }
626
627 #define PHY_BUSY_LOOPS  5000
628
629 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
630 {
631         u32 frame_val;
632         unsigned int loops;
633         int ret;
634
635         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
636                 tw32_f(MAC_MI_MODE,
637                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
638                 udelay(80);
639         }
640
641         *val = 0x0;
642
643         frame_val  = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
644                       MI_COM_PHY_ADDR_MASK);
645         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
646                       MI_COM_REG_ADDR_MASK);
647         frame_val |= (MI_COM_CMD_READ | MI_COM_START);
648
649         tw32_f(MAC_MI_COM, frame_val);
650
651         loops = PHY_BUSY_LOOPS;
652         while (loops != 0) {
653                 udelay(10);
654                 frame_val = tr32(MAC_MI_COM);
655
656                 if ((frame_val & MI_COM_BUSY) == 0) {
657                         udelay(5);
658                         frame_val = tr32(MAC_MI_COM);
659                         break;
660                 }
661                 loops -= 1;
662         }
663
664         ret = -EBUSY;
665         if (loops != 0) {
666                 *val = frame_val & MI_COM_DATA_MASK;
667                 ret = 0;
668         }
669
670         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
671                 tw32_f(MAC_MI_MODE, tp->mi_mode);
672                 udelay(80);
673         }
674
675         return ret;
676 }
677
678 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
679 {
680         u32 frame_val;
681         unsigned int loops;
682         int ret;
683
684         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 &&
685             (reg == MII_TG3_CTRL || reg == MII_TG3_AUX_CTRL))
686                 return 0;
687
688         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
689                 tw32_f(MAC_MI_MODE,
690                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
691                 udelay(80);
692         }
693
694         frame_val  = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
695                       MI_COM_PHY_ADDR_MASK);
696         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
697                       MI_COM_REG_ADDR_MASK);
698         frame_val |= (val & MI_COM_DATA_MASK);
699         frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
700
701         tw32_f(MAC_MI_COM, frame_val);
702
703         loops = PHY_BUSY_LOOPS;
704         while (loops != 0) {
705                 udelay(10);
706                 frame_val = tr32(MAC_MI_COM);
707                 if ((frame_val & MI_COM_BUSY) == 0) {
708                         udelay(5);
709                         frame_val = tr32(MAC_MI_COM);
710                         break;
711                 }
712                 loops -= 1;
713         }
714
715         ret = -EBUSY;
716         if (loops != 0)
717                 ret = 0;
718
719         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
720                 tw32_f(MAC_MI_MODE, tp->mi_mode);
721                 udelay(80);
722         }
723
724         return ret;
725 }
726
727 static void tg3_phy_toggle_automdix(struct tg3 *tp, int enable)
728 {
729         u32 phy;
730
731         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
732             (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES))
733                 return;
734
735         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
736                 u32 ephy;
737
738                 if (!tg3_readphy(tp, MII_TG3_EPHY_TEST, &ephy)) {
739                         tg3_writephy(tp, MII_TG3_EPHY_TEST,
740                                      ephy | MII_TG3_EPHY_SHADOW_EN);
741                         if (!tg3_readphy(tp, MII_TG3_EPHYTST_MISCCTRL, &phy)) {
742                                 if (enable)
743                                         phy |= MII_TG3_EPHYTST_MISCCTRL_MDIX;
744                                 else
745                                         phy &= ~MII_TG3_EPHYTST_MISCCTRL_MDIX;
746                                 tg3_writephy(tp, MII_TG3_EPHYTST_MISCCTRL, phy);
747                         }
748                         tg3_writephy(tp, MII_TG3_EPHY_TEST, ephy);
749                 }
750         } else {
751                 phy = MII_TG3_AUXCTL_MISC_RDSEL_MISC |
752                       MII_TG3_AUXCTL_SHDWSEL_MISC;
753                 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, phy) &&
754                     !tg3_readphy(tp, MII_TG3_AUX_CTRL, &phy)) {
755                         if (enable)
756                                 phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
757                         else
758                                 phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
759                         phy |= MII_TG3_AUXCTL_MISC_WREN;
760                         tg3_writephy(tp, MII_TG3_AUX_CTRL, phy);
761                 }
762         }
763 }
764
765 static void tg3_phy_set_wirespeed(struct tg3 *tp)
766 {
767         u32 val;
768
769         if (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED)
770                 return;
771
772         if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x7007) &&
773             !tg3_readphy(tp, MII_TG3_AUX_CTRL, &val))
774                 tg3_writephy(tp, MII_TG3_AUX_CTRL,
775                              (val | (1 << 15) | (1 << 4)));
776 }
777
778 static int tg3_bmcr_reset(struct tg3 *tp)
779 {
780         u32 phy_control;
781         int limit, err;
782
783         /* OK, reset it, and poll the BMCR_RESET bit until it
784          * clears or we time out.
785          */
786         phy_control = BMCR_RESET;
787         err = tg3_writephy(tp, MII_BMCR, phy_control);
788         if (err != 0)
789                 return -EBUSY;
790
791         limit = 5000;
792         while (limit--) {
793                 err = tg3_readphy(tp, MII_BMCR, &phy_control);
794                 if (err != 0)
795                         return -EBUSY;
796
797                 if ((phy_control & BMCR_RESET) == 0) {
798                         udelay(40);
799                         break;
800                 }
801                 udelay(10);
802         }
803         if (limit <= 0)
804                 return -EBUSY;
805
806         return 0;
807 }
808
809 static int tg3_wait_macro_done(struct tg3 *tp)
810 {
811         int limit = 100;
812
813         while (limit--) {
814                 u32 tmp32;
815
816                 if (!tg3_readphy(tp, 0x16, &tmp32)) {
817                         if ((tmp32 & 0x1000) == 0)
818                                 break;
819                 }
820         }
821         if (limit <= 0)
822                 return -EBUSY;
823
824         return 0;
825 }
826
827 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
828 {
829         static const u32 test_pat[4][6] = {
830         { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
831         { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
832         { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
833         { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
834         };
835         int chan;
836
837         for (chan = 0; chan < 4; chan++) {
838                 int i;
839
840                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
841                              (chan * 0x2000) | 0x0200);
842                 tg3_writephy(tp, 0x16, 0x0002);
843
844                 for (i = 0; i < 6; i++)
845                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
846                                      test_pat[chan][i]);
847
848                 tg3_writephy(tp, 0x16, 0x0202);
849                 if (tg3_wait_macro_done(tp)) {
850                         *resetp = 1;
851                         return -EBUSY;
852                 }
853
854                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
855                              (chan * 0x2000) | 0x0200);
856                 tg3_writephy(tp, 0x16, 0x0082);
857                 if (tg3_wait_macro_done(tp)) {
858                         *resetp = 1;
859                         return -EBUSY;
860                 }
861
862                 tg3_writephy(tp, 0x16, 0x0802);
863                 if (tg3_wait_macro_done(tp)) {
864                         *resetp = 1;
865                         return -EBUSY;
866                 }
867
868                 for (i = 0; i < 6; i += 2) {
869                         u32 low, high;
870
871                         if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
872                             tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
873                             tg3_wait_macro_done(tp)) {
874                                 *resetp = 1;
875                                 return -EBUSY;
876                         }
877                         low &= 0x7fff;
878                         high &= 0x000f;
879                         if (low != test_pat[chan][i] ||
880                             high != test_pat[chan][i+1]) {
881                                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
882                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
883                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
884
885                                 return -EBUSY;
886                         }
887                 }
888         }
889
890         return 0;
891 }
892
893 static int tg3_phy_reset_chanpat(struct tg3 *tp)
894 {
895         int chan;
896
897         for (chan = 0; chan < 4; chan++) {
898                 int i;
899
900                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
901                              (chan * 0x2000) | 0x0200);
902                 tg3_writephy(tp, 0x16, 0x0002);
903                 for (i = 0; i < 6; i++)
904                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
905                 tg3_writephy(tp, 0x16, 0x0202);
906                 if (tg3_wait_macro_done(tp))
907                         return -EBUSY;
908         }
909
910         return 0;
911 }
912
913 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
914 {
915         u32 reg32, phy9_orig;
916         int retries, do_phy_reset, err;
917
918         retries = 10;
919         do_phy_reset = 1;
920         do {
921                 if (do_phy_reset) {
922                         err = tg3_bmcr_reset(tp);
923                         if (err)
924                                 return err;
925                         do_phy_reset = 0;
926                 }
927
928                 /* Disable transmitter and interrupt.  */
929                 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
930                         continue;
931
932                 reg32 |= 0x3000;
933                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
934
935                 /* Set full-duplex, 1000 mbps.  */
936                 tg3_writephy(tp, MII_BMCR,
937                              BMCR_FULLDPLX | TG3_BMCR_SPEED1000);
938
939                 /* Set to master mode.  */
940                 if (tg3_readphy(tp, MII_TG3_CTRL, &phy9_orig))
941                         continue;
942
943                 tg3_writephy(tp, MII_TG3_CTRL,
944                              (MII_TG3_CTRL_AS_MASTER |
945                               MII_TG3_CTRL_ENABLE_AS_MASTER));
946
947                 /* Enable SM_DSP_CLOCK and 6dB.  */
948                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
949
950                 /* Block the PHY control access.  */
951                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
952                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0800);
953
954                 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
955                 if (!err)
956                         break;
957         } while (--retries);
958
959         err = tg3_phy_reset_chanpat(tp);
960         if (err)
961                 return err;
962
963         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
964         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0000);
965
966         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
967         tg3_writephy(tp, 0x16, 0x0000);
968
969         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
970             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
971                 /* Set Extended packet length bit for jumbo frames */
972                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4400);
973         }
974         else {
975                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
976         }
977
978         tg3_writephy(tp, MII_TG3_CTRL, phy9_orig);
979
980         if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32)) {
981                 reg32 &= ~0x3000;
982                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
983         } else if (!err)
984                 err = -EBUSY;
985
986         return err;
987 }
988
989 static void tg3_link_report(struct tg3 *);
990
991 /* This will reset the tigon3 PHY if there is no valid
992  * link unless the FORCE argument is non-zero.
993  */
994 static int tg3_phy_reset(struct tg3 *tp)
995 {
996         u32 phy_status;
997         int err;
998
999         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1000                 u32 val;
1001
1002                 val = tr32(GRC_MISC_CFG);
1003                 tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
1004                 udelay(40);
1005         }
1006         err  = tg3_readphy(tp, MII_BMSR, &phy_status);
1007         err |= tg3_readphy(tp, MII_BMSR, &phy_status);
1008         if (err != 0)
1009                 return -EBUSY;
1010
1011         if (netif_running(tp->dev) && netif_carrier_ok(tp->dev)) {
1012                 netif_carrier_off(tp->dev);
1013                 tg3_link_report(tp);
1014         }
1015
1016         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
1017             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
1018             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
1019                 err = tg3_phy_reset_5703_4_5(tp);
1020                 if (err)
1021                         return err;
1022                 goto out;
1023         }
1024
1025         err = tg3_bmcr_reset(tp);
1026         if (err)
1027                 return err;
1028
1029 out:
1030         if (tp->tg3_flags2 & TG3_FLG2_PHY_ADC_BUG) {
1031                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1032                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1033                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x2aaa);
1034                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1035                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0323);
1036                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1037         }
1038         if (tp->tg3_flags2 & TG3_FLG2_PHY_5704_A0_BUG) {
1039                 tg3_writephy(tp, 0x1c, 0x8d68);
1040                 tg3_writephy(tp, 0x1c, 0x8d68);
1041         }
1042         if (tp->tg3_flags2 & TG3_FLG2_PHY_BER_BUG) {
1043                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1044                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1045                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x310b);
1046                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1047                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x9506);
1048                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x401f);
1049                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x14e2);
1050                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1051         }
1052         else if (tp->tg3_flags2 & TG3_FLG2_PHY_JITTER_BUG) {
1053                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1054                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1055                 if (tp->tg3_flags2 & TG3_FLG2_PHY_ADJUST_TRIM) {
1056                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
1057                         tg3_writephy(tp, MII_TG3_TEST1,
1058                                      MII_TG3_TEST1_TRIM_EN | 0x4);
1059                 } else
1060                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
1061                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1062         }
1063         /* Set Extended packet length bit (bit 14) on all chips that */
1064         /* support jumbo frames */
1065         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
1066                 /* Cannot do read-modify-write on 5401 */
1067                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
1068         } else if (tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) {
1069                 u32 phy_reg;
1070
1071                 /* Set bit 14 with read-modify-write to preserve other bits */
1072                 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0007) &&
1073                     !tg3_readphy(tp, MII_TG3_AUX_CTRL, &phy_reg))
1074                         tg3_writephy(tp, MII_TG3_AUX_CTRL, phy_reg | 0x4000);
1075         }
1076
1077         /* Set phy register 0x10 bit 0 to high fifo elasticity to support
1078          * jumbo frames transmission.
1079          */
1080         if (tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) {
1081                 u32 phy_reg;
1082
1083                 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &phy_reg))
1084                     tg3_writephy(tp, MII_TG3_EXT_CTRL,
1085                                  phy_reg | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
1086         }
1087
1088         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1089                 /* adjust output voltage */
1090                 tg3_writephy(tp, MII_TG3_EPHY_PTEST, 0x12);
1091         }
1092
1093         tg3_phy_toggle_automdix(tp, 1);
1094         tg3_phy_set_wirespeed(tp);
1095         return 0;
1096 }
1097
1098 static void tg3_frob_aux_power(struct tg3 *tp)
1099 {
1100         struct tg3 *tp_peer = tp;
1101
1102         if ((tp->tg3_flags2 & TG3_FLG2_IS_NIC) == 0)
1103                 return;
1104
1105         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
1106             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)) {
1107                 struct net_device *dev_peer;
1108
1109                 dev_peer = pci_get_drvdata(tp->pdev_peer);
1110                 /* remove_one() may have been run on the peer. */
1111                 if (!dev_peer)
1112                         tp_peer = tp;
1113                 else
1114                         tp_peer = netdev_priv(dev_peer);
1115         }
1116
1117         if ((tp->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
1118             (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0 ||
1119             (tp_peer->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
1120             (tp_peer->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0) {
1121                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1122                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1123                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1124                                     (GRC_LCLCTRL_GPIO_OE0 |
1125                                      GRC_LCLCTRL_GPIO_OE1 |
1126                                      GRC_LCLCTRL_GPIO_OE2 |
1127                                      GRC_LCLCTRL_GPIO_OUTPUT0 |
1128                                      GRC_LCLCTRL_GPIO_OUTPUT1),
1129                                     100);
1130                 } else {
1131                         u32 no_gpio2;
1132                         u32 grc_local_ctrl = 0;
1133
1134                         if (tp_peer != tp &&
1135                             (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
1136                                 return;
1137
1138                         /* Workaround to prevent overdrawing Amps. */
1139                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
1140                             ASIC_REV_5714) {
1141                                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
1142                                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1143                                             grc_local_ctrl, 100);
1144                         }
1145
1146                         /* On 5753 and variants, GPIO2 cannot be used. */
1147                         no_gpio2 = tp->nic_sram_data_cfg &
1148                                     NIC_SRAM_DATA_CFG_NO_GPIO2;
1149
1150                         grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
1151                                          GRC_LCLCTRL_GPIO_OE1 |
1152                                          GRC_LCLCTRL_GPIO_OE2 |
1153                                          GRC_LCLCTRL_GPIO_OUTPUT1 |
1154                                          GRC_LCLCTRL_GPIO_OUTPUT2;
1155                         if (no_gpio2) {
1156                                 grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
1157                                                     GRC_LCLCTRL_GPIO_OUTPUT2);
1158                         }
1159                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1160                                                     grc_local_ctrl, 100);
1161
1162                         grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
1163
1164                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1165                                                     grc_local_ctrl, 100);
1166
1167                         if (!no_gpio2) {
1168                                 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
1169                                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1170                                             grc_local_ctrl, 100);
1171                         }
1172                 }
1173         } else {
1174                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
1175                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
1176                         if (tp_peer != tp &&
1177                             (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
1178                                 return;
1179
1180                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1181                                     (GRC_LCLCTRL_GPIO_OE1 |
1182                                      GRC_LCLCTRL_GPIO_OUTPUT1), 100);
1183
1184                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1185                                     GRC_LCLCTRL_GPIO_OE1, 100);
1186
1187                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1188                                     (GRC_LCLCTRL_GPIO_OE1 |
1189                                      GRC_LCLCTRL_GPIO_OUTPUT1), 100);
1190                 }
1191         }
1192 }
1193
1194 static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
1195 {
1196         if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
1197                 return 1;
1198         else if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411) {
1199                 if (speed != SPEED_10)
1200                         return 1;
1201         } else if (speed == SPEED_10)
1202                 return 1;
1203
1204         return 0;
1205 }
1206
1207 static int tg3_setup_phy(struct tg3 *, int);
1208
1209 #define RESET_KIND_SHUTDOWN     0
1210 #define RESET_KIND_INIT         1
1211 #define RESET_KIND_SUSPEND      2
1212
1213 static void tg3_write_sig_post_reset(struct tg3 *, int);
1214 static int tg3_halt_cpu(struct tg3 *, u32);
1215 static int tg3_nvram_lock(struct tg3 *);
1216 static void tg3_nvram_unlock(struct tg3 *);
1217
1218 static void tg3_power_down_phy(struct tg3 *tp)
1219 {
1220         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
1221                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
1222                         u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
1223                         u32 serdes_cfg = tr32(MAC_SERDES_CFG);
1224
1225                         sg_dig_ctrl |=
1226                                 SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
1227                         tw32(SG_DIG_CTRL, sg_dig_ctrl);
1228                         tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
1229                 }
1230                 return;
1231         }
1232
1233         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1234                 u32 val;
1235
1236                 tg3_bmcr_reset(tp);
1237                 val = tr32(GRC_MISC_CFG);
1238                 tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
1239                 udelay(40);
1240                 return;
1241         } else {
1242                 tg3_writephy(tp, MII_TG3_EXT_CTRL,
1243                              MII_TG3_EXT_CTRL_FORCE_LED_OFF);
1244                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x01b2);
1245         }
1246
1247         /* The PHY should not be powered down on some chips because
1248          * of bugs.
1249          */
1250         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1251             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
1252             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 &&
1253              (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)))
1254                 return;
1255         tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
1256 }
1257
1258 static int tg3_set_power_state(struct tg3 *tp, pci_power_t state)
1259 {
1260         u32 misc_host_ctrl;
1261         u16 power_control, power_caps;
1262         int pm = tp->pm_cap;
1263
1264         /* Make sure register accesses (indirect or otherwise)
1265          * will function correctly.
1266          */
1267         pci_write_config_dword(tp->pdev,
1268                                TG3PCI_MISC_HOST_CTRL,
1269                                tp->misc_host_ctrl);
1270
1271         pci_read_config_word(tp->pdev,
1272                              pm + PCI_PM_CTRL,
1273                              &power_control);
1274         power_control |= PCI_PM_CTRL_PME_STATUS;
1275         power_control &= ~(PCI_PM_CTRL_STATE_MASK);
1276         switch (state) {
1277         case PCI_D0:
1278                 power_control |= 0;
1279                 pci_write_config_word(tp->pdev,
1280                                       pm + PCI_PM_CTRL,
1281                                       power_control);
1282                 udelay(100);    /* Delay after power state change */
1283
1284                 /* Switch out of Vaux if it is a NIC */
1285                 if (tp->tg3_flags2 & TG3_FLG2_IS_NIC)
1286                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl, 100);
1287
1288                 return 0;
1289
1290         case PCI_D1:
1291                 power_control |= 1;
1292                 break;
1293
1294         case PCI_D2:
1295                 power_control |= 2;
1296                 break;
1297
1298         case PCI_D3hot:
1299                 power_control |= 3;
1300                 break;
1301
1302         default:
1303                 printk(KERN_WARNING PFX "%s: Invalid power state (%d) "
1304                        "requested.\n",
1305                        tp->dev->name, state);
1306                 return -EINVAL;
1307         };
1308
1309         power_control |= PCI_PM_CTRL_PME_ENABLE;
1310
1311         misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
1312         tw32(TG3PCI_MISC_HOST_CTRL,
1313              misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
1314
1315         if (tp->link_config.phy_is_low_power == 0) {
1316                 tp->link_config.phy_is_low_power = 1;
1317                 tp->link_config.orig_speed = tp->link_config.speed;
1318                 tp->link_config.orig_duplex = tp->link_config.duplex;
1319                 tp->link_config.orig_autoneg = tp->link_config.autoneg;
1320         }
1321
1322         if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)) {
1323                 tp->link_config.speed = SPEED_10;
1324                 tp->link_config.duplex = DUPLEX_HALF;
1325                 tp->link_config.autoneg = AUTONEG_ENABLE;
1326                 tg3_setup_phy(tp, 0);
1327         }
1328
1329         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1330                 u32 val;
1331
1332                 val = tr32(GRC_VCPU_EXT_CTRL);
1333                 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
1334         } else if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
1335                 int i;
1336                 u32 val;
1337
1338                 for (i = 0; i < 200; i++) {
1339                         tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
1340                         if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
1341                                 break;
1342                         msleep(1);
1343                 }
1344         }
1345         if (tp->tg3_flags & TG3_FLAG_WOL_CAP)
1346                 tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
1347                                                      WOL_DRV_STATE_SHUTDOWN |
1348                                                      WOL_DRV_WOL |
1349                                                      WOL_SET_MAGIC_PKT);
1350
1351         pci_read_config_word(tp->pdev, pm + PCI_PM_PMC, &power_caps);
1352
1353         if (tp->tg3_flags & TG3_FLAG_WOL_ENABLE) {
1354                 u32 mac_mode;
1355
1356                 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
1357                         tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x5a);
1358                         udelay(40);
1359
1360                         if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
1361                                 mac_mode = MAC_MODE_PORT_MODE_GMII;
1362                         else
1363                                 mac_mode = MAC_MODE_PORT_MODE_MII;
1364
1365                         mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
1366                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
1367                             ASIC_REV_5700) {
1368                                 u32 speed = (tp->tg3_flags &
1369                                              TG3_FLAG_WOL_SPEED_100MB) ?
1370                                              SPEED_100 : SPEED_10;
1371                                 if (tg3_5700_link_polarity(tp, speed))
1372                                         mac_mode |= MAC_MODE_LINK_POLARITY;
1373                                 else
1374                                         mac_mode &= ~MAC_MODE_LINK_POLARITY;
1375                         }
1376                 } else {
1377                         mac_mode = MAC_MODE_PORT_MODE_TBI;
1378                 }
1379
1380                 if (!(tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
1381                         tw32(MAC_LED_CTRL, tp->led_ctrl);
1382
1383                 if (((power_caps & PCI_PM_CAP_PME_D3cold) &&
1384                      (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)))
1385                         mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
1386
1387                 tw32_f(MAC_MODE, mac_mode);
1388                 udelay(100);
1389
1390                 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
1391                 udelay(10);
1392         }
1393
1394         if (!(tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB) &&
1395             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1396              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
1397                 u32 base_val;
1398
1399                 base_val = tp->pci_clock_ctrl;
1400                 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
1401                              CLOCK_CTRL_TXCLK_DISABLE);
1402
1403                 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
1404                             CLOCK_CTRL_PWRDOWN_PLL133, 40);
1405         } else if ((tp->tg3_flags2 & TG3_FLG2_5780_CLASS) ||
1406                    (tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) ||
1407                    (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)) {
1408                 /* do nothing */
1409         } else if (!((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
1410                      (tp->tg3_flags & TG3_FLAG_ENABLE_ASF))) {
1411                 u32 newbits1, newbits2;
1412
1413                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1414                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1415                         newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
1416                                     CLOCK_CTRL_TXCLK_DISABLE |
1417                                     CLOCK_CTRL_ALTCLK);
1418                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
1419                 } else if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
1420                         newbits1 = CLOCK_CTRL_625_CORE;
1421                         newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
1422                 } else {
1423                         newbits1 = CLOCK_CTRL_ALTCLK;
1424                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
1425                 }
1426
1427                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
1428                             40);
1429
1430                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
1431                             40);
1432
1433                 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
1434                         u32 newbits3;
1435
1436                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1437                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1438                                 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
1439                                             CLOCK_CTRL_TXCLK_DISABLE |
1440                                             CLOCK_CTRL_44MHZ_CORE);
1441                         } else {
1442                                 newbits3 = CLOCK_CTRL_44MHZ_CORE;
1443                         }
1444
1445                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
1446                                     tp->pci_clock_ctrl | newbits3, 40);
1447                 }
1448         }
1449
1450         if (!(tp->tg3_flags & TG3_FLAG_WOL_ENABLE) &&
1451             !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
1452                 tg3_power_down_phy(tp);
1453
1454         tg3_frob_aux_power(tp);
1455
1456         /* Workaround for unstable PLL clock */
1457         if ((GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX) ||
1458             (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX)) {
1459                 u32 val = tr32(0x7d00);
1460
1461                 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
1462                 tw32(0x7d00, val);
1463                 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
1464                         int err;
1465
1466                         err = tg3_nvram_lock(tp);
1467                         tg3_halt_cpu(tp, RX_CPU_BASE);
1468                         if (!err)
1469                                 tg3_nvram_unlock(tp);
1470                 }
1471         }
1472
1473         tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
1474
1475         /* Finally, set the new power state. */
1476         pci_write_config_word(tp->pdev, pm + PCI_PM_CTRL, power_control);
1477         udelay(100);    /* Delay after power state change */
1478
1479         return 0;
1480 }
1481
1482 static void tg3_link_report(struct tg3 *tp)
1483 {
1484         if (!netif_carrier_ok(tp->dev)) {
1485                 if (netif_msg_link(tp))
1486                         printk(KERN_INFO PFX "%s: Link is down.\n",
1487                                tp->dev->name);
1488         } else if (netif_msg_link(tp)) {
1489                 printk(KERN_INFO PFX "%s: Link is up at %d Mbps, %s duplex.\n",
1490                        tp->dev->name,
1491                        (tp->link_config.active_speed == SPEED_1000 ?
1492                         1000 :
1493                         (tp->link_config.active_speed == SPEED_100 ?
1494                          100 : 10)),
1495                        (tp->link_config.active_duplex == DUPLEX_FULL ?
1496                         "full" : "half"));
1497
1498                 printk(KERN_INFO PFX "%s: Flow control is %s for TX and "
1499                        "%s for RX.\n",
1500                        tp->dev->name,
1501                        (tp->tg3_flags & TG3_FLAG_TX_PAUSE) ? "on" : "off",
1502                        (tp->tg3_flags & TG3_FLAG_RX_PAUSE) ? "on" : "off");
1503         }
1504 }
1505
1506 static void tg3_setup_flow_control(struct tg3 *tp, u32 local_adv, u32 remote_adv)
1507 {
1508         u32 new_tg3_flags = 0;
1509         u32 old_rx_mode = tp->rx_mode;
1510         u32 old_tx_mode = tp->tx_mode;
1511
1512         if (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG) {
1513
1514                 /* Convert 1000BaseX flow control bits to 1000BaseT
1515                  * bits before resolving flow control.
1516                  */
1517                 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
1518                         local_adv &= ~(ADVERTISE_PAUSE_CAP |
1519                                        ADVERTISE_PAUSE_ASYM);
1520                         remote_adv &= ~(LPA_PAUSE_CAP | LPA_PAUSE_ASYM);
1521
1522                         if (local_adv & ADVERTISE_1000XPAUSE)
1523                                 local_adv |= ADVERTISE_PAUSE_CAP;
1524                         if (local_adv & ADVERTISE_1000XPSE_ASYM)
1525                                 local_adv |= ADVERTISE_PAUSE_ASYM;
1526                         if (remote_adv & LPA_1000XPAUSE)
1527                                 remote_adv |= LPA_PAUSE_CAP;
1528                         if (remote_adv & LPA_1000XPAUSE_ASYM)
1529                                 remote_adv |= LPA_PAUSE_ASYM;
1530                 }
1531
1532                 if (local_adv & ADVERTISE_PAUSE_CAP) {
1533                         if (local_adv & ADVERTISE_PAUSE_ASYM) {
1534                                 if (remote_adv & LPA_PAUSE_CAP)
1535                                         new_tg3_flags |=
1536                                                 (TG3_FLAG_RX_PAUSE |
1537                                                 TG3_FLAG_TX_PAUSE);
1538                                 else if (remote_adv & LPA_PAUSE_ASYM)
1539                                         new_tg3_flags |=
1540                                                 (TG3_FLAG_RX_PAUSE);
1541                         } else {
1542                                 if (remote_adv & LPA_PAUSE_CAP)
1543                                         new_tg3_flags |=
1544                                                 (TG3_FLAG_RX_PAUSE |
1545                                                 TG3_FLAG_TX_PAUSE);
1546                         }
1547                 } else if (local_adv & ADVERTISE_PAUSE_ASYM) {
1548                         if ((remote_adv & LPA_PAUSE_CAP) &&
1549                         (remote_adv & LPA_PAUSE_ASYM))
1550                                 new_tg3_flags |= TG3_FLAG_TX_PAUSE;
1551                 }
1552
1553                 tp->tg3_flags &= ~(TG3_FLAG_RX_PAUSE | TG3_FLAG_TX_PAUSE);
1554                 tp->tg3_flags |= new_tg3_flags;
1555         } else {
1556                 new_tg3_flags = tp->tg3_flags;
1557         }
1558
1559         if (new_tg3_flags & TG3_FLAG_RX_PAUSE)
1560                 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1561         else
1562                 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1563
1564         if (old_rx_mode != tp->rx_mode) {
1565                 tw32_f(MAC_RX_MODE, tp->rx_mode);
1566         }
1567
1568         if (new_tg3_flags & TG3_FLAG_TX_PAUSE)
1569                 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1570         else
1571                 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1572
1573         if (old_tx_mode != tp->tx_mode) {
1574                 tw32_f(MAC_TX_MODE, tp->tx_mode);
1575         }
1576 }
1577
1578 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
1579 {
1580         switch (val & MII_TG3_AUX_STAT_SPDMASK) {
1581         case MII_TG3_AUX_STAT_10HALF:
1582                 *speed = SPEED_10;
1583                 *duplex = DUPLEX_HALF;
1584                 break;
1585
1586         case MII_TG3_AUX_STAT_10FULL:
1587                 *speed = SPEED_10;
1588                 *duplex = DUPLEX_FULL;
1589                 break;
1590
1591         case MII_TG3_AUX_STAT_100HALF:
1592                 *speed = SPEED_100;
1593                 *duplex = DUPLEX_HALF;
1594                 break;
1595
1596         case MII_TG3_AUX_STAT_100FULL:
1597                 *speed = SPEED_100;
1598                 *duplex = DUPLEX_FULL;
1599                 break;
1600
1601         case MII_TG3_AUX_STAT_1000HALF:
1602                 *speed = SPEED_1000;
1603                 *duplex = DUPLEX_HALF;
1604                 break;
1605
1606         case MII_TG3_AUX_STAT_1000FULL:
1607                 *speed = SPEED_1000;
1608                 *duplex = DUPLEX_FULL;
1609                 break;
1610
1611         default:
1612                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1613                         *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
1614                                  SPEED_10;
1615                         *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
1616                                   DUPLEX_HALF;
1617                         break;
1618                 }
1619                 *speed = SPEED_INVALID;
1620                 *duplex = DUPLEX_INVALID;
1621                 break;
1622         };
1623 }
1624
1625 static void tg3_phy_copper_begin(struct tg3 *tp)
1626 {
1627         u32 new_adv;
1628         int i;
1629
1630         if (tp->link_config.phy_is_low_power) {
1631                 /* Entering low power mode.  Disable gigabit and
1632                  * 100baseT advertisements.
1633                  */
1634                 tg3_writephy(tp, MII_TG3_CTRL, 0);
1635
1636                 new_adv = (ADVERTISE_10HALF | ADVERTISE_10FULL |
1637                            ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
1638                 if (tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB)
1639                         new_adv |= (ADVERTISE_100HALF | ADVERTISE_100FULL);
1640
1641                 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1642         } else if (tp->link_config.speed == SPEED_INVALID) {
1643                 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
1644                         tp->link_config.advertising &=
1645                                 ~(ADVERTISED_1000baseT_Half |
1646                                   ADVERTISED_1000baseT_Full);
1647
1648                 new_adv = (ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
1649                 if (tp->link_config.advertising & ADVERTISED_10baseT_Half)
1650                         new_adv |= ADVERTISE_10HALF;
1651                 if (tp->link_config.advertising & ADVERTISED_10baseT_Full)
1652                         new_adv |= ADVERTISE_10FULL;
1653                 if (tp->link_config.advertising & ADVERTISED_100baseT_Half)
1654                         new_adv |= ADVERTISE_100HALF;
1655                 if (tp->link_config.advertising & ADVERTISED_100baseT_Full)
1656                         new_adv |= ADVERTISE_100FULL;
1657                 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1658
1659                 if (tp->link_config.advertising &
1660                     (ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full)) {
1661                         new_adv = 0;
1662                         if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
1663                                 new_adv |= MII_TG3_CTRL_ADV_1000_HALF;
1664                         if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
1665                                 new_adv |= MII_TG3_CTRL_ADV_1000_FULL;
1666                         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY) &&
1667                             (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1668                              tp->pci_chip_rev_id == CHIPREV_ID_5701_B0))
1669                                 new_adv |= (MII_TG3_CTRL_AS_MASTER |
1670                                             MII_TG3_CTRL_ENABLE_AS_MASTER);
1671                         tg3_writephy(tp, MII_TG3_CTRL, new_adv);
1672                 } else {
1673                         tg3_writephy(tp, MII_TG3_CTRL, 0);
1674                 }
1675         } else {
1676                 /* Asking for a specific link mode. */
1677                 if (tp->link_config.speed == SPEED_1000) {
1678                         new_adv = ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP;
1679                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
1680
1681                         if (tp->link_config.duplex == DUPLEX_FULL)
1682                                 new_adv = MII_TG3_CTRL_ADV_1000_FULL;
1683                         else
1684                                 new_adv = MII_TG3_CTRL_ADV_1000_HALF;
1685                         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1686                             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
1687                                 new_adv |= (MII_TG3_CTRL_AS_MASTER |
1688                                             MII_TG3_CTRL_ENABLE_AS_MASTER);
1689                         tg3_writephy(tp, MII_TG3_CTRL, new_adv);
1690                 } else {
1691                         tg3_writephy(tp, MII_TG3_CTRL, 0);
1692
1693                         new_adv = ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP;
1694                         if (tp->link_config.speed == SPEED_100) {
1695                                 if (tp->link_config.duplex == DUPLEX_FULL)
1696                                         new_adv |= ADVERTISE_100FULL;
1697                                 else
1698                                         new_adv |= ADVERTISE_100HALF;
1699                         } else {
1700                                 if (tp->link_config.duplex == DUPLEX_FULL)
1701                                         new_adv |= ADVERTISE_10FULL;
1702                                 else
1703                                         new_adv |= ADVERTISE_10HALF;
1704                         }
1705                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
1706                 }
1707         }
1708
1709         if (tp->link_config.autoneg == AUTONEG_DISABLE &&
1710             tp->link_config.speed != SPEED_INVALID) {
1711                 u32 bmcr, orig_bmcr;
1712
1713                 tp->link_config.active_speed = tp->link_config.speed;
1714                 tp->link_config.active_duplex = tp->link_config.duplex;
1715
1716                 bmcr = 0;
1717                 switch (tp->link_config.speed) {
1718                 default:
1719                 case SPEED_10:
1720                         break;
1721
1722                 case SPEED_100:
1723                         bmcr |= BMCR_SPEED100;
1724                         break;
1725
1726                 case SPEED_1000:
1727                         bmcr |= TG3_BMCR_SPEED1000;
1728                         break;
1729                 };
1730
1731                 if (tp->link_config.duplex == DUPLEX_FULL)
1732                         bmcr |= BMCR_FULLDPLX;
1733
1734                 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
1735                     (bmcr != orig_bmcr)) {
1736                         tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
1737                         for (i = 0; i < 1500; i++) {
1738                                 u32 tmp;
1739
1740                                 udelay(10);
1741                                 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
1742                                     tg3_readphy(tp, MII_BMSR, &tmp))
1743                                         continue;
1744                                 if (!(tmp & BMSR_LSTATUS)) {
1745                                         udelay(40);
1746                                         break;
1747                                 }
1748                         }
1749                         tg3_writephy(tp, MII_BMCR, bmcr);
1750                         udelay(40);
1751                 }
1752         } else {
1753                 tg3_writephy(tp, MII_BMCR,
1754                              BMCR_ANENABLE | BMCR_ANRESTART);
1755         }
1756 }
1757
1758 static int tg3_init_5401phy_dsp(struct tg3 *tp)
1759 {
1760         int err;
1761
1762         /* Turn off tap power management. */
1763         /* Set Extended packet length bit */
1764         err  = tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
1765
1766         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0012);
1767         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1804);
1768
1769         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0013);
1770         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1204);
1771
1772         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
1773         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0132);
1774
1775         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
1776         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0232);
1777
1778         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1779         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0a20);
1780
1781         udelay(40);
1782
1783         return err;
1784 }
1785
1786 static int tg3_copper_is_advertising_all(struct tg3 *tp, u32 mask)
1787 {
1788         u32 adv_reg, all_mask = 0;
1789
1790         if (mask & ADVERTISED_10baseT_Half)
1791                 all_mask |= ADVERTISE_10HALF;
1792         if (mask & ADVERTISED_10baseT_Full)
1793                 all_mask |= ADVERTISE_10FULL;
1794         if (mask & ADVERTISED_100baseT_Half)
1795                 all_mask |= ADVERTISE_100HALF;
1796         if (mask & ADVERTISED_100baseT_Full)
1797                 all_mask |= ADVERTISE_100FULL;
1798
1799         if (tg3_readphy(tp, MII_ADVERTISE, &adv_reg))
1800                 return 0;
1801
1802         if ((adv_reg & all_mask) != all_mask)
1803                 return 0;
1804         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
1805                 u32 tg3_ctrl;
1806
1807                 all_mask = 0;
1808                 if (mask & ADVERTISED_1000baseT_Half)
1809                         all_mask |= ADVERTISE_1000HALF;
1810                 if (mask & ADVERTISED_1000baseT_Full)
1811                         all_mask |= ADVERTISE_1000FULL;
1812
1813                 if (tg3_readphy(tp, MII_TG3_CTRL, &tg3_ctrl))
1814                         return 0;
1815
1816                 if ((tg3_ctrl & all_mask) != all_mask)
1817                         return 0;
1818         }
1819         return 1;
1820 }
1821
1822 static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
1823 {
1824         int current_link_up;
1825         u32 bmsr, dummy;
1826         u16 current_speed;
1827         u8 current_duplex;
1828         int i, err;
1829
1830         tw32(MAC_EVENT, 0);
1831
1832         tw32_f(MAC_STATUS,
1833              (MAC_STATUS_SYNC_CHANGED |
1834               MAC_STATUS_CFG_CHANGED |
1835               MAC_STATUS_MI_COMPLETION |
1836               MAC_STATUS_LNKSTATE_CHANGED));
1837         udelay(40);
1838
1839         tp->mi_mode = MAC_MI_MODE_BASE;
1840         tw32_f(MAC_MI_MODE, tp->mi_mode);
1841         udelay(80);
1842
1843         tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x02);
1844
1845         /* Some third-party PHYs need to be reset on link going
1846          * down.
1847          */
1848         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
1849              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
1850              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
1851             netif_carrier_ok(tp->dev)) {
1852                 tg3_readphy(tp, MII_BMSR, &bmsr);
1853                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
1854                     !(bmsr & BMSR_LSTATUS))
1855                         force_reset = 1;
1856         }
1857         if (force_reset)
1858                 tg3_phy_reset(tp);
1859
1860         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
1861                 tg3_readphy(tp, MII_BMSR, &bmsr);
1862                 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
1863                     !(tp->tg3_flags & TG3_FLAG_INIT_COMPLETE))
1864                         bmsr = 0;
1865
1866                 if (!(bmsr & BMSR_LSTATUS)) {
1867                         err = tg3_init_5401phy_dsp(tp);
1868                         if (err)
1869                                 return err;
1870
1871                         tg3_readphy(tp, MII_BMSR, &bmsr);
1872                         for (i = 0; i < 1000; i++) {
1873                                 udelay(10);
1874                                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
1875                                     (bmsr & BMSR_LSTATUS)) {
1876                                         udelay(40);
1877                                         break;
1878                                 }
1879                         }
1880
1881                         if ((tp->phy_id & PHY_ID_REV_MASK) == PHY_REV_BCM5401_B0 &&
1882                             !(bmsr & BMSR_LSTATUS) &&
1883                             tp->link_config.active_speed == SPEED_1000) {
1884                                 err = tg3_phy_reset(tp);
1885                                 if (!err)
1886                                         err = tg3_init_5401phy_dsp(tp);
1887                                 if (err)
1888                                         return err;
1889                         }
1890                 }
1891         } else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1892                    tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
1893                 /* 5701 {A0,B0} CRC bug workaround */
1894                 tg3_writephy(tp, 0x15, 0x0a75);
1895                 tg3_writephy(tp, 0x1c, 0x8c68);
1896                 tg3_writephy(tp, 0x1c, 0x8d68);
1897                 tg3_writephy(tp, 0x1c, 0x8c68);
1898         }
1899
1900         /* Clear pending interrupts... */
1901         tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
1902         tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
1903
1904         if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT)
1905                 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
1906         else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906)
1907                 tg3_writephy(tp, MII_TG3_IMASK, ~0);
1908
1909         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1910             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1911                 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
1912                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
1913                                      MII_TG3_EXT_CTRL_LNK3_LED_MODE);
1914                 else
1915                         tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
1916         }
1917
1918         current_link_up = 0;
1919         current_speed = SPEED_INVALID;
1920         current_duplex = DUPLEX_INVALID;
1921
1922         if (tp->tg3_flags2 & TG3_FLG2_CAPACITIVE_COUPLING) {
1923                 u32 val;
1924
1925                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4007);
1926                 tg3_readphy(tp, MII_TG3_AUX_CTRL, &val);
1927                 if (!(val & (1 << 10))) {
1928                         val |= (1 << 10);
1929                         tg3_writephy(tp, MII_TG3_AUX_CTRL, val);
1930                         goto relink;
1931                 }
1932         }
1933
1934         bmsr = 0;
1935         for (i = 0; i < 100; i++) {
1936                 tg3_readphy(tp, MII_BMSR, &bmsr);
1937                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
1938                     (bmsr & BMSR_LSTATUS))
1939                         break;
1940                 udelay(40);
1941         }
1942
1943         if (bmsr & BMSR_LSTATUS) {
1944                 u32 aux_stat, bmcr;
1945
1946                 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
1947                 for (i = 0; i < 2000; i++) {
1948                         udelay(10);
1949                         if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
1950                             aux_stat)
1951                                 break;
1952                 }
1953
1954                 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
1955                                              &current_speed,
1956                                              &current_duplex);
1957
1958                 bmcr = 0;
1959                 for (i = 0; i < 200; i++) {
1960                         tg3_readphy(tp, MII_BMCR, &bmcr);
1961                         if (tg3_readphy(tp, MII_BMCR, &bmcr))
1962                                 continue;
1963                         if (bmcr && bmcr != 0x7fff)
1964                                 break;
1965                         udelay(10);
1966                 }
1967
1968                 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
1969                         if (bmcr & BMCR_ANENABLE) {
1970                                 current_link_up = 1;
1971
1972                                 /* Force autoneg restart if we are exiting
1973                                  * low power mode.
1974                                  */
1975                                 if (!tg3_copper_is_advertising_all(tp,
1976                                                 tp->link_config.advertising))
1977                                         current_link_up = 0;
1978                         } else {
1979                                 current_link_up = 0;
1980                         }
1981                 } else {
1982                         if (!(bmcr & BMCR_ANENABLE) &&
1983                             tp->link_config.speed == current_speed &&
1984                             tp->link_config.duplex == current_duplex) {
1985                                 current_link_up = 1;
1986                         } else {
1987                                 current_link_up = 0;
1988                         }
1989                 }
1990
1991                 tp->link_config.active_speed = current_speed;
1992                 tp->link_config.active_duplex = current_duplex;
1993         }
1994
1995         if (current_link_up == 1 &&
1996             (tp->link_config.active_duplex == DUPLEX_FULL) &&
1997             (tp->link_config.autoneg == AUTONEG_ENABLE)) {
1998                 u32 local_adv, remote_adv;
1999
2000                 if (tg3_readphy(tp, MII_ADVERTISE, &local_adv))
2001                         local_adv = 0;
2002                 local_adv &= (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
2003
2004                 if (tg3_readphy(tp, MII_LPA, &remote_adv))
2005                         remote_adv = 0;
2006
2007                 remote_adv &= (LPA_PAUSE_CAP | LPA_PAUSE_ASYM);
2008
2009                 /* If we are not advertising full pause capability,
2010                  * something is wrong.  Bring the link down and reconfigure.
2011                  */
2012                 if (local_adv != ADVERTISE_PAUSE_CAP) {
2013                         current_link_up = 0;
2014                 } else {
2015                         tg3_setup_flow_control(tp, local_adv, remote_adv);
2016                 }
2017         }
2018 relink:
2019         if (current_link_up == 0 || tp->link_config.phy_is_low_power) {
2020                 u32 tmp;
2021
2022                 tg3_phy_copper_begin(tp);
2023
2024                 tg3_readphy(tp, MII_BMSR, &tmp);
2025                 if (!tg3_readphy(tp, MII_BMSR, &tmp) &&
2026                     (tmp & BMSR_LSTATUS))
2027                         current_link_up = 1;
2028         }
2029
2030         tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
2031         if (current_link_up == 1) {
2032                 if (tp->link_config.active_speed == SPEED_100 ||
2033                     tp->link_config.active_speed == SPEED_10)
2034                         tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
2035                 else
2036                         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
2037         } else
2038                 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
2039
2040         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
2041         if (tp->link_config.active_duplex == DUPLEX_HALF)
2042                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
2043
2044         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
2045                 if (current_link_up == 1 &&
2046                     tg3_5700_link_polarity(tp, tp->link_config.active_speed))
2047                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
2048                 else
2049                         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
2050         }
2051
2052         /* ??? Without this setting Netgear GA302T PHY does not
2053          * ??? send/receive packets...
2054          */
2055         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411 &&
2056             tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) {
2057                 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
2058                 tw32_f(MAC_MI_MODE, tp->mi_mode);
2059                 udelay(80);
2060         }
2061
2062         tw32_f(MAC_MODE, tp->mac_mode);
2063         udelay(40);
2064
2065         if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
2066                 /* Polled via timer. */
2067                 tw32_f(MAC_EVENT, 0);
2068         } else {
2069                 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2070         }
2071         udelay(40);
2072
2073         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 &&
2074             current_link_up == 1 &&
2075             tp->link_config.active_speed == SPEED_1000 &&
2076             ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ||
2077              (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED))) {
2078                 udelay(120);
2079                 tw32_f(MAC_STATUS,
2080                      (MAC_STATUS_SYNC_CHANGED |
2081                       MAC_STATUS_CFG_CHANGED));
2082                 udelay(40);
2083                 tg3_write_mem(tp,
2084                               NIC_SRAM_FIRMWARE_MBOX,
2085                               NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
2086         }
2087
2088         if (current_link_up != netif_carrier_ok(tp->dev)) {
2089                 if (current_link_up)
2090                         netif_carrier_on(tp->dev);
2091                 else
2092                         netif_carrier_off(tp->dev);
2093                 tg3_link_report(tp);
2094         }
2095
2096         return 0;
2097 }
2098
2099 struct tg3_fiber_aneginfo {
2100         int state;
2101 #define ANEG_STATE_UNKNOWN              0
2102 #define ANEG_STATE_AN_ENABLE            1
2103 #define ANEG_STATE_RESTART_INIT         2
2104 #define ANEG_STATE_RESTART              3
2105 #define ANEG_STATE_DISABLE_LINK_OK      4
2106 #define ANEG_STATE_ABILITY_DETECT_INIT  5
2107 #define ANEG_STATE_ABILITY_DETECT       6
2108 #define ANEG_STATE_ACK_DETECT_INIT      7
2109 #define ANEG_STATE_ACK_DETECT           8
2110 #define ANEG_STATE_COMPLETE_ACK_INIT    9
2111 #define ANEG_STATE_COMPLETE_ACK         10
2112 #define ANEG_STATE_IDLE_DETECT_INIT     11
2113 #define ANEG_STATE_IDLE_DETECT          12
2114 #define ANEG_STATE_LINK_OK              13
2115 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT  14
2116 #define ANEG_STATE_NEXT_PAGE_WAIT       15
2117
2118         u32 flags;
2119 #define MR_AN_ENABLE            0x00000001
2120 #define MR_RESTART_AN           0x00000002
2121 #define MR_AN_COMPLETE          0x00000004
2122 #define MR_PAGE_RX              0x00000008
2123 #define MR_NP_LOADED            0x00000010
2124 #define MR_TOGGLE_TX            0x00000020
2125 #define MR_LP_ADV_FULL_DUPLEX   0x00000040
2126 #define MR_LP_ADV_HALF_DUPLEX   0x00000080
2127 #define MR_LP_ADV_SYM_PAUSE     0x00000100
2128 #define MR_LP_ADV_ASYM_PAUSE    0x00000200
2129 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
2130 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
2131 #define MR_LP_ADV_NEXT_PAGE     0x00001000
2132 #define MR_TOGGLE_RX            0x00002000
2133 #define MR_NP_RX                0x00004000
2134
2135 #define MR_LINK_OK              0x80000000
2136
2137         unsigned long link_time, cur_time;
2138
2139         u32 ability_match_cfg;
2140         int ability_match_count;
2141
2142         char ability_match, idle_match, ack_match;
2143
2144         u32 txconfig, rxconfig;
2145 #define ANEG_CFG_NP             0x00000080
2146 #define ANEG_CFG_ACK            0x00000040
2147 #define ANEG_CFG_RF2            0x00000020
2148 #define ANEG_CFG_RF1            0x00000010
2149 #define ANEG_CFG_PS2            0x00000001
2150 #define ANEG_CFG_PS1            0x00008000
2151 #define ANEG_CFG_HD             0x00004000
2152 #define ANEG_CFG_FD             0x00002000
2153 #define ANEG_CFG_INVAL          0x00001f06
2154
2155 };
2156 #define ANEG_OK         0
2157 #define ANEG_DONE       1
2158 #define ANEG_TIMER_ENAB 2
2159 #define ANEG_FAILED     -1
2160
2161 #define ANEG_STATE_SETTLE_TIME  10000
2162
2163 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
2164                                    struct tg3_fiber_aneginfo *ap)
2165 {
2166         unsigned long delta;
2167         u32 rx_cfg_reg;
2168         int ret;
2169
2170         if (ap->state == ANEG_STATE_UNKNOWN) {
2171                 ap->rxconfig = 0;
2172                 ap->link_time = 0;
2173                 ap->cur_time = 0;
2174                 ap->ability_match_cfg = 0;
2175                 ap->ability_match_count = 0;
2176                 ap->ability_match = 0;
2177                 ap->idle_match = 0;
2178                 ap->ack_match = 0;
2179         }
2180         ap->cur_time++;
2181
2182         if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
2183                 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
2184
2185                 if (rx_cfg_reg != ap->ability_match_cfg) {
2186                         ap->ability_match_cfg = rx_cfg_reg;
2187                         ap->ability_match = 0;
2188                         ap->ability_match_count = 0;
2189                 } else {
2190                         if (++ap->ability_match_count > 1) {
2191                                 ap->ability_match = 1;
2192                                 ap->ability_match_cfg = rx_cfg_reg;
2193                         }
2194                 }
2195                 if (rx_cfg_reg & ANEG_CFG_ACK)
2196                         ap->ack_match = 1;
2197                 else
2198                         ap->ack_match = 0;
2199
2200                 ap->idle_match = 0;
2201         } else {
2202                 ap->idle_match = 1;
2203                 ap->ability_match_cfg = 0;
2204                 ap->ability_match_count = 0;
2205                 ap->ability_match = 0;
2206                 ap->ack_match = 0;
2207
2208                 rx_cfg_reg = 0;
2209         }
2210
2211         ap->rxconfig = rx_cfg_reg;
2212         ret = ANEG_OK;
2213
2214         switch(ap->state) {
2215         case ANEG_STATE_UNKNOWN:
2216                 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
2217                         ap->state = ANEG_STATE_AN_ENABLE;
2218
2219                 /* fallthru */
2220         case ANEG_STATE_AN_ENABLE:
2221                 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
2222                 if (ap->flags & MR_AN_ENABLE) {
2223                         ap->link_time = 0;
2224                         ap->cur_time = 0;
2225                         ap->ability_match_cfg = 0;
2226                         ap->ability_match_count = 0;
2227                         ap->ability_match = 0;
2228                         ap->idle_match = 0;
2229                         ap->ack_match = 0;
2230
2231                         ap->state = ANEG_STATE_RESTART_INIT;
2232                 } else {
2233                         ap->state = ANEG_STATE_DISABLE_LINK_OK;
2234                 }
2235                 break;
2236
2237         case ANEG_STATE_RESTART_INIT:
2238                 ap->link_time = ap->cur_time;
2239                 ap->flags &= ~(MR_NP_LOADED);
2240                 ap->txconfig = 0;
2241                 tw32(MAC_TX_AUTO_NEG, 0);
2242                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
2243                 tw32_f(MAC_MODE, tp->mac_mode);
2244                 udelay(40);
2245
2246                 ret = ANEG_TIMER_ENAB;
2247                 ap->state = ANEG_STATE_RESTART;
2248
2249                 /* fallthru */
2250         case ANEG_STATE_RESTART:
2251                 delta = ap->cur_time - ap->link_time;
2252                 if (delta > ANEG_STATE_SETTLE_TIME) {
2253                         ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
2254                 } else {
2255                         ret = ANEG_TIMER_ENAB;
2256                 }
2257                 break;
2258
2259         case ANEG_STATE_DISABLE_LINK_OK:
2260                 ret = ANEG_DONE;
2261                 break;
2262
2263         case ANEG_STATE_ABILITY_DETECT_INIT:
2264                 ap->flags &= ~(MR_TOGGLE_TX);
2265                 ap->txconfig = (ANEG_CFG_FD | ANEG_CFG_PS1);
2266                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
2267                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
2268                 tw32_f(MAC_MODE, tp->mac_mode);
2269                 udelay(40);
2270
2271                 ap->state = ANEG_STATE_ABILITY_DETECT;
2272                 break;
2273
2274         case ANEG_STATE_ABILITY_DETECT:
2275                 if (ap->ability_match != 0 && ap->rxconfig != 0) {
2276                         ap->state = ANEG_STATE_ACK_DETECT_INIT;
2277                 }
2278                 break;
2279
2280         case ANEG_STATE_ACK_DETECT_INIT:
2281                 ap->txconfig |= ANEG_CFG_ACK;
2282                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
2283                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
2284                 tw32_f(MAC_MODE, tp->mac_mode);
2285                 udelay(40);
2286
2287                 ap->state = ANEG_STATE_ACK_DETECT;
2288
2289                 /* fallthru */
2290         case ANEG_STATE_ACK_DETECT:
2291                 if (ap->ack_match != 0) {
2292                         if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
2293                             (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
2294                                 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
2295                         } else {
2296                                 ap->state = ANEG_STATE_AN_ENABLE;
2297                         }
2298                 } else if (ap->ability_match != 0 &&
2299                            ap->rxconfig == 0) {
2300                         ap->state = ANEG_STATE_AN_ENABLE;
2301                 }
2302                 break;
2303
2304         case ANEG_STATE_COMPLETE_ACK_INIT:
2305                 if (ap->rxconfig & ANEG_CFG_INVAL) {
2306                         ret = ANEG_FAILED;
2307                         break;
2308                 }
2309                 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
2310                                MR_LP_ADV_HALF_DUPLEX |
2311                                MR_LP_ADV_SYM_PAUSE |
2312                                MR_LP_ADV_ASYM_PAUSE |
2313                                MR_LP_ADV_REMOTE_FAULT1 |
2314                                MR_LP_ADV_REMOTE_FAULT2 |
2315                                MR_LP_ADV_NEXT_PAGE |
2316                                MR_TOGGLE_RX |
2317                                MR_NP_RX);
2318                 if (ap->rxconfig & ANEG_CFG_FD)
2319                         ap->flags |= MR_LP_ADV_FULL_DUPLEX;
2320                 if (ap->rxconfig & ANEG_CFG_HD)
2321                         ap->flags |= MR_LP_ADV_HALF_DUPLEX;
2322                 if (ap->rxconfig & ANEG_CFG_PS1)
2323                         ap->flags |= MR_LP_ADV_SYM_PAUSE;
2324                 if (ap->rxconfig & ANEG_CFG_PS2)
2325                         ap->flags |= MR_LP_ADV_ASYM_PAUSE;
2326                 if (ap->rxconfig & ANEG_CFG_RF1)
2327                         ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
2328                 if (ap->rxconfig & ANEG_CFG_RF2)
2329                         ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
2330                 if (ap->rxconfig & ANEG_CFG_NP)
2331                         ap->flags |= MR_LP_ADV_NEXT_PAGE;
2332
2333                 ap->link_time = ap->cur_time;
2334
2335                 ap->flags ^= (MR_TOGGLE_TX);
2336                 if (ap->rxconfig & 0x0008)
2337                         ap->flags |= MR_TOGGLE_RX;
2338                 if (ap->rxconfig & ANEG_CFG_NP)
2339                         ap->flags |= MR_NP_RX;
2340                 ap->flags |= MR_PAGE_RX;
2341
2342                 ap->state = ANEG_STATE_COMPLETE_ACK;
2343                 ret = ANEG_TIMER_ENAB;
2344                 break;
2345
2346         case ANEG_STATE_COMPLETE_ACK:
2347                 if (ap->ability_match != 0 &&
2348                     ap->rxconfig == 0) {
2349                         ap->state = ANEG_STATE_AN_ENABLE;
2350                         break;
2351                 }
2352                 delta = ap->cur_time - ap->link_time;
2353                 if (delta > ANEG_STATE_SETTLE_TIME) {
2354                         if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
2355                                 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
2356                         } else {
2357                                 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
2358                                     !(ap->flags & MR_NP_RX)) {
2359                                         ap->state = ANEG_STATE_IDLE_DETECT_INIT;
2360                                 } else {
2361                                         ret = ANEG_FAILED;
2362                                 }
2363                         }
2364                 }
2365                 break;
2366
2367         case ANEG_STATE_IDLE_DETECT_INIT:
2368                 ap->link_time = ap->cur_time;
2369                 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
2370                 tw32_f(MAC_MODE, tp->mac_mode);
2371                 udelay(40);
2372
2373                 ap->state = ANEG_STATE_IDLE_DETECT;
2374                 ret = ANEG_TIMER_ENAB;
2375                 break;
2376
2377         case ANEG_STATE_IDLE_DETECT:
2378                 if (ap->ability_match != 0 &&
2379                     ap->rxconfig == 0) {
2380                         ap->state = ANEG_STATE_AN_ENABLE;
2381                         break;
2382                 }
2383                 delta = ap->cur_time - ap->link_time;
2384                 if (delta > ANEG_STATE_SETTLE_TIME) {
2385                         /* XXX another gem from the Broadcom driver :( */
2386                         ap->state = ANEG_STATE_LINK_OK;
2387                 }
2388                 break;
2389
2390         case ANEG_STATE_LINK_OK:
2391                 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
2392                 ret = ANEG_DONE;
2393                 break;
2394
2395         case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
2396                 /* ??? unimplemented */
2397                 break;
2398
2399         case ANEG_STATE_NEXT_PAGE_WAIT:
2400                 /* ??? unimplemented */
2401                 break;
2402
2403         default:
2404                 ret = ANEG_FAILED;
2405                 break;
2406         };
2407
2408         return ret;
2409 }
2410
2411 static int fiber_autoneg(struct tg3 *tp, u32 *flags)
2412 {
2413         int res = 0;
2414         struct tg3_fiber_aneginfo aninfo;
2415         int status = ANEG_FAILED;
2416         unsigned int tick;
2417         u32 tmp;
2418
2419         tw32_f(MAC_TX_AUTO_NEG, 0);
2420
2421         tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
2422         tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
2423         udelay(40);
2424
2425         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
2426         udelay(40);
2427
2428         memset(&aninfo, 0, sizeof(aninfo));
2429         aninfo.flags |= MR_AN_ENABLE;
2430         aninfo.state = ANEG_STATE_UNKNOWN;
2431         aninfo.cur_time = 0;
2432         tick = 0;
2433         while (++tick < 195000) {
2434                 status = tg3_fiber_aneg_smachine(tp, &aninfo);
2435                 if (status == ANEG_DONE || status == ANEG_FAILED)
2436                         break;
2437
2438                 udelay(1);
2439         }
2440
2441         tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
2442         tw32_f(MAC_MODE, tp->mac_mode);
2443         udelay(40);
2444
2445         *flags = aninfo.flags;
2446
2447         if (status == ANEG_DONE &&
2448             (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
2449                              MR_LP_ADV_FULL_DUPLEX)))
2450                 res = 1;
2451
2452         return res;
2453 }
2454
2455 static void tg3_init_bcm8002(struct tg3 *tp)
2456 {
2457         u32 mac_status = tr32(MAC_STATUS);
2458         int i;
2459
2460         /* Reset when initting first time or we have a link. */
2461         if ((tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) &&
2462             !(mac_status & MAC_STATUS_PCS_SYNCED))
2463                 return;
2464
2465         /* Set PLL lock range. */
2466         tg3_writephy(tp, 0x16, 0x8007);
2467
2468         /* SW reset */
2469         tg3_writephy(tp, MII_BMCR, BMCR_RESET);
2470
2471         /* Wait for reset to complete. */
2472         /* XXX schedule_timeout() ... */
2473         for (i = 0; i < 500; i++)
2474                 udelay(10);
2475
2476         /* Config mode; select PMA/Ch 1 regs. */
2477         tg3_writephy(tp, 0x10, 0x8411);
2478
2479         /* Enable auto-lock and comdet, select txclk for tx. */
2480         tg3_writephy(tp, 0x11, 0x0a10);
2481
2482         tg3_writephy(tp, 0x18, 0x00a0);
2483         tg3_writephy(tp, 0x16, 0x41ff);
2484
2485         /* Assert and deassert POR. */
2486         tg3_writephy(tp, 0x13, 0x0400);
2487         udelay(40);
2488         tg3_writephy(tp, 0x13, 0x0000);
2489
2490         tg3_writephy(tp, 0x11, 0x0a50);
2491         udelay(40);
2492         tg3_writephy(tp, 0x11, 0x0a10);
2493
2494         /* Wait for signal to stabilize */
2495         /* XXX schedule_timeout() ... */
2496         for (i = 0; i < 15000; i++)
2497                 udelay(10);
2498
2499         /* Deselect the channel register so we can read the PHYID
2500          * later.
2501          */
2502         tg3_writephy(tp, 0x10, 0x8011);
2503 }
2504
2505 static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
2506 {
2507         u32 sg_dig_ctrl, sg_dig_status;
2508         u32 serdes_cfg, expected_sg_dig_ctrl;
2509         int workaround, port_a;
2510         int current_link_up;
2511
2512         serdes_cfg = 0;
2513         expected_sg_dig_ctrl = 0;
2514         workaround = 0;
2515         port_a = 1;
2516         current_link_up = 0;
2517
2518         if (tp->pci_chip_rev_id != CHIPREV_ID_5704_A0 &&
2519             tp->pci_chip_rev_id != CHIPREV_ID_5704_A1) {
2520                 workaround = 1;
2521                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
2522                         port_a = 0;
2523
2524                 /* preserve bits 0-11,13,14 for signal pre-emphasis */
2525                 /* preserve bits 20-23 for voltage regulator */
2526                 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
2527         }
2528
2529         sg_dig_ctrl = tr32(SG_DIG_CTRL);
2530
2531         if (tp->link_config.autoneg != AUTONEG_ENABLE) {
2532                 if (sg_dig_ctrl & (1 << 31)) {
2533                         if (workaround) {
2534                                 u32 val = serdes_cfg;
2535
2536                                 if (port_a)
2537                                         val |= 0xc010000;
2538                                 else
2539                                         val |= 0x4010000;
2540                                 tw32_f(MAC_SERDES_CFG, val);
2541                         }
2542                         tw32_f(SG_DIG_CTRL, 0x01388400);
2543                 }
2544                 if (mac_status & MAC_STATUS_PCS_SYNCED) {
2545                         tg3_setup_flow_control(tp, 0, 0);
2546                         current_link_up = 1;
2547                 }
2548                 goto out;
2549         }
2550
2551         /* Want auto-negotiation.  */
2552         expected_sg_dig_ctrl = 0x81388400;
2553
2554         /* Pause capability */
2555         expected_sg_dig_ctrl |= (1 << 11);
2556
2557         /* Asymettric pause */
2558         expected_sg_dig_ctrl |= (1 << 12);
2559
2560         if (sg_dig_ctrl != expected_sg_dig_ctrl) {
2561                 if ((tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT) &&
2562                     tp->serdes_counter &&
2563                     ((mac_status & (MAC_STATUS_PCS_SYNCED |
2564                                     MAC_STATUS_RCVD_CFG)) ==
2565                      MAC_STATUS_PCS_SYNCED)) {
2566                         tp->serdes_counter--;
2567                         current_link_up = 1;
2568                         goto out;
2569                 }
2570 restart_autoneg:
2571                 if (workaround)
2572                         tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
2573                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | (1 << 30));
2574                 udelay(5);
2575                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
2576
2577                 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
2578                 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2579         } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
2580                                  MAC_STATUS_SIGNAL_DET)) {
2581                 sg_dig_status = tr32(SG_DIG_STATUS);
2582                 mac_status = tr32(MAC_STATUS);
2583
2584                 if ((sg_dig_status & (1 << 1)) &&
2585                     (mac_status & MAC_STATUS_PCS_SYNCED)) {
2586                         u32 local_adv, remote_adv;
2587
2588                         local_adv = ADVERTISE_PAUSE_CAP;
2589                         remote_adv = 0;
2590                         if (sg_dig_status & (1 << 19))
2591                                 remote_adv |= LPA_PAUSE_CAP;
2592                         if (sg_dig_status & (1 << 20))
2593                                 remote_adv |= LPA_PAUSE_ASYM;
2594
2595                         tg3_setup_flow_control(tp, local_adv, remote_adv);
2596                         current_link_up = 1;
2597                         tp->serdes_counter = 0;
2598                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2599                 } else if (!(sg_dig_status & (1 << 1))) {
2600                         if (tp->serdes_counter)
2601                                 tp->serdes_counter--;
2602                         else {
2603                                 if (workaround) {
2604                                         u32 val = serdes_cfg;
2605
2606                                         if (port_a)
2607                                                 val |= 0xc010000;
2608                                         else
2609                                                 val |= 0x4010000;
2610
2611                                         tw32_f(MAC_SERDES_CFG, val);
2612                                 }
2613
2614                                 tw32_f(SG_DIG_CTRL, 0x01388400);
2615                                 udelay(40);
2616
2617                                 /* Link parallel detection - link is up */
2618                                 /* only if we have PCS_SYNC and not */
2619                                 /* receiving config code words */
2620                                 mac_status = tr32(MAC_STATUS);
2621                                 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
2622                                     !(mac_status & MAC_STATUS_RCVD_CFG)) {
2623                                         tg3_setup_flow_control(tp, 0, 0);
2624                                         current_link_up = 1;
2625                                         tp->tg3_flags2 |=
2626                                                 TG3_FLG2_PARALLEL_DETECT;
2627                                         tp->serdes_counter =
2628                                                 SERDES_PARALLEL_DET_TIMEOUT;
2629                                 } else
2630                                         goto restart_autoneg;
2631                         }
2632                 }
2633         } else {
2634                 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
2635                 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2636         }
2637
2638 out:
2639         return current_link_up;
2640 }
2641
2642 static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
2643 {
2644         int current_link_up = 0;
2645
2646         if (!(mac_status & MAC_STATUS_PCS_SYNCED))
2647                 goto out;
2648
2649         if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2650                 u32 flags;
2651                 int i;
2652
2653                 if (fiber_autoneg(tp, &flags)) {
2654                         u32 local_adv, remote_adv;
2655
2656                         local_adv = ADVERTISE_PAUSE_CAP;
2657                         remote_adv = 0;
2658                         if (flags & MR_LP_ADV_SYM_PAUSE)
2659                                 remote_adv |= LPA_PAUSE_CAP;
2660                         if (flags & MR_LP_ADV_ASYM_PAUSE)
2661                                 remote_adv |= LPA_PAUSE_ASYM;
2662
2663                         tg3_setup_flow_control(tp, local_adv, remote_adv);
2664
2665                         current_link_up = 1;
2666                 }
2667                 for (i = 0; i < 30; i++) {
2668                         udelay(20);
2669                         tw32_f(MAC_STATUS,
2670                                (MAC_STATUS_SYNC_CHANGED |
2671                                 MAC_STATUS_CFG_CHANGED));
2672                         udelay(40);
2673                         if ((tr32(MAC_STATUS) &
2674                              (MAC_STATUS_SYNC_CHANGED |
2675                               MAC_STATUS_CFG_CHANGED)) == 0)
2676                                 break;
2677                 }
2678
2679                 mac_status = tr32(MAC_STATUS);
2680                 if (current_link_up == 0 &&
2681                     (mac_status & MAC_STATUS_PCS_SYNCED) &&
2682                     !(mac_status & MAC_STATUS_RCVD_CFG))
2683                         current_link_up = 1;
2684         } else {
2685                 /* Forcing 1000FD link up. */
2686                 current_link_up = 1;
2687
2688                 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
2689                 udelay(40);
2690
2691                 tw32_f(MAC_MODE, tp->mac_mode);
2692                 udelay(40);
2693         }
2694
2695 out:
2696         return current_link_up;
2697 }
2698
2699 static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
2700 {
2701         u32 orig_pause_cfg;
2702         u16 orig_active_speed;
2703         u8 orig_active_duplex;
2704         u32 mac_status;
2705         int current_link_up;
2706         int i;
2707
2708         orig_pause_cfg =
2709                 (tp->tg3_flags & (TG3_FLAG_RX_PAUSE |
2710                                   TG3_FLAG_TX_PAUSE));
2711         orig_active_speed = tp->link_config.active_speed;
2712         orig_active_duplex = tp->link_config.active_duplex;
2713
2714         if (!(tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG) &&
2715             netif_carrier_ok(tp->dev) &&
2716             (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE)) {
2717                 mac_status = tr32(MAC_STATUS);
2718                 mac_status &= (MAC_STATUS_PCS_SYNCED |
2719                                MAC_STATUS_SIGNAL_DET |
2720                                MAC_STATUS_CFG_CHANGED |
2721                                MAC_STATUS_RCVD_CFG);
2722                 if (mac_status == (MAC_STATUS_PCS_SYNCED |
2723                                    MAC_STATUS_SIGNAL_DET)) {
2724                         tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
2725                                             MAC_STATUS_CFG_CHANGED));
2726                         return 0;
2727                 }
2728         }
2729
2730         tw32_f(MAC_TX_AUTO_NEG, 0);
2731
2732         tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
2733         tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
2734         tw32_f(MAC_MODE, tp->mac_mode);
2735         udelay(40);
2736
2737         if (tp->phy_id == PHY_ID_BCM8002)
2738                 tg3_init_bcm8002(tp);
2739
2740         /* Enable link change event even when serdes polling.  */
2741         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2742         udelay(40);
2743
2744         current_link_up = 0;
2745         mac_status = tr32(MAC_STATUS);
2746
2747         if (tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG)
2748                 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
2749         else
2750                 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
2751
2752         tp->hw_status->status =
2753                 (SD_STATUS_UPDATED |
2754                  (tp->hw_status->status & ~SD_STATUS_LINK_CHG));
2755
2756         for (i = 0; i < 100; i++) {
2757                 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
2758                                     MAC_STATUS_CFG_CHANGED));
2759                 udelay(5);
2760                 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
2761                                          MAC_STATUS_CFG_CHANGED |
2762                                          MAC_STATUS_LNKSTATE_CHANGED)) == 0)
2763                         break;
2764         }
2765
2766         mac_status = tr32(MAC_STATUS);
2767         if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
2768                 current_link_up = 0;
2769                 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
2770                     tp->serdes_counter == 0) {
2771                         tw32_f(MAC_MODE, (tp->mac_mode |
2772                                           MAC_MODE_SEND_CONFIGS));
2773                         udelay(1);
2774                         tw32_f(MAC_MODE, tp->mac_mode);
2775                 }
2776         }
2777
2778         if (current_link_up == 1) {
2779                 tp->link_config.active_speed = SPEED_1000;
2780                 tp->link_config.active_duplex = DUPLEX_FULL;
2781                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
2782                                     LED_CTRL_LNKLED_OVERRIDE |
2783                                     LED_CTRL_1000MBPS_ON));
2784         } else {
2785                 tp->link_config.active_speed = SPEED_INVALID;
2786                 tp->link_config.active_duplex = DUPLEX_INVALID;
2787                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
2788                                     LED_CTRL_LNKLED_OVERRIDE |
2789                                     LED_CTRL_TRAFFIC_OVERRIDE));
2790         }
2791
2792         if (current_link_up != netif_carrier_ok(tp->dev)) {
2793                 if (current_link_up)
2794                         netif_carrier_on(tp->dev);
2795                 else
2796                         netif_carrier_off(tp->dev);
2797                 tg3_link_report(tp);
2798         } else {
2799                 u32 now_pause_cfg =
2800                         tp->tg3_flags & (TG3_FLAG_RX_PAUSE |
2801                                          TG3_FLAG_TX_PAUSE);
2802                 if (orig_pause_cfg != now_pause_cfg ||
2803                     orig_active_speed != tp->link_config.active_speed ||
2804                     orig_active_duplex != tp->link_config.active_duplex)
2805                         tg3_link_report(tp);
2806         }
2807
2808         return 0;
2809 }
2810
2811 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
2812 {
2813         int current_link_up, err = 0;
2814         u32 bmsr, bmcr;
2815         u16 current_speed;
2816         u8 current_duplex;
2817
2818         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
2819         tw32_f(MAC_MODE, tp->mac_mode);
2820         udelay(40);
2821
2822         tw32(MAC_EVENT, 0);
2823
2824         tw32_f(MAC_STATUS,
2825              (MAC_STATUS_SYNC_CHANGED |
2826               MAC_STATUS_CFG_CHANGED |
2827               MAC_STATUS_MI_COMPLETION |
2828               MAC_STATUS_LNKSTATE_CHANGED));
2829         udelay(40);
2830
2831         if (force_reset)
2832                 tg3_phy_reset(tp);
2833
2834         current_link_up = 0;
2835         current_speed = SPEED_INVALID;
2836         current_duplex = DUPLEX_INVALID;
2837
2838         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
2839         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
2840         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
2841                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
2842                         bmsr |= BMSR_LSTATUS;
2843                 else
2844                         bmsr &= ~BMSR_LSTATUS;
2845         }
2846
2847         err |= tg3_readphy(tp, MII_BMCR, &bmcr);
2848
2849         if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
2850             (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT)) {
2851                 /* do nothing, just check for link up at the end */
2852         } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2853                 u32 adv, new_adv;
2854
2855                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
2856                 new_adv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
2857                                   ADVERTISE_1000XPAUSE |
2858                                   ADVERTISE_1000XPSE_ASYM |
2859                                   ADVERTISE_SLCT);
2860
2861                 /* Always advertise symmetric PAUSE just like copper */
2862                 new_adv |= ADVERTISE_1000XPAUSE;
2863
2864                 if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
2865                         new_adv |= ADVERTISE_1000XHALF;
2866                 if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
2867                         new_adv |= ADVERTISE_1000XFULL;
2868
2869                 if ((new_adv != adv) || !(bmcr & BMCR_ANENABLE)) {
2870                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
2871                         bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
2872                         tg3_writephy(tp, MII_BMCR, bmcr);
2873
2874                         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2875                         tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
2876                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2877
2878                         return err;
2879                 }
2880         } else {
2881                 u32 new_bmcr;
2882
2883                 bmcr &= ~BMCR_SPEED1000;
2884                 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
2885
2886                 if (tp->link_config.duplex == DUPLEX_FULL)
2887                         new_bmcr |= BMCR_FULLDPLX;
2888
2889                 if (new_bmcr != bmcr) {
2890                         /* BMCR_SPEED1000 is a reserved bit that needs
2891                          * to be set on write.
2892                          */
2893                         new_bmcr |= BMCR_SPEED1000;
2894
2895                         /* Force a linkdown */
2896                         if (netif_carrier_ok(tp->dev)) {
2897                                 u32 adv;
2898
2899                                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
2900                                 adv &= ~(ADVERTISE_1000XFULL |
2901                                          ADVERTISE_1000XHALF |
2902                                          ADVERTISE_SLCT);
2903                                 tg3_writephy(tp, MII_ADVERTISE, adv);
2904                                 tg3_writephy(tp, MII_BMCR, bmcr |
2905                                                            BMCR_ANRESTART |
2906                                                            BMCR_ANENABLE);
2907                                 udelay(10);
2908                                 netif_carrier_off(tp->dev);
2909                         }
2910                         tg3_writephy(tp, MII_BMCR, new_bmcr);
2911                         bmcr = new_bmcr;
2912                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
2913                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
2914                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
2915                             ASIC_REV_5714) {
2916                                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
2917                                         bmsr |= BMSR_LSTATUS;
2918                                 else
2919                                         bmsr &= ~BMSR_LSTATUS;
2920                         }
2921                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2922                 }
2923         }
2924
2925         if (bmsr & BMSR_LSTATUS) {
2926                 current_speed = SPEED_1000;
2927                 current_link_up = 1;
2928                 if (bmcr & BMCR_FULLDPLX)
2929                         current_duplex = DUPLEX_FULL;
2930                 else
2931                         current_duplex = DUPLEX_HALF;
2932
2933                 if (bmcr & BMCR_ANENABLE) {
2934                         u32 local_adv, remote_adv, common;
2935
2936                         err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
2937                         err |= tg3_readphy(tp, MII_LPA, &remote_adv);
2938                         common = local_adv & remote_adv;
2939                         if (common & (ADVERTISE_1000XHALF |
2940                                       ADVERTISE_1000XFULL)) {
2941                                 if (common & ADVERTISE_1000XFULL)
2942                                         current_duplex = DUPLEX_FULL;
2943                                 else
2944                                         current_duplex = DUPLEX_HALF;
2945
2946                                 tg3_setup_flow_control(tp, local_adv,
2947                                                        remote_adv);
2948                         }
2949                         else
2950                                 current_link_up = 0;
2951                 }
2952         }
2953
2954         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
2955         if (tp->link_config.active_duplex == DUPLEX_HALF)
2956                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
2957
2958         tw32_f(MAC_MODE, tp->mac_mode);
2959         udelay(40);
2960
2961         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2962
2963         tp->link_config.active_speed = current_speed;
2964         tp->link_config.active_duplex = current_duplex;
2965
2966         if (current_link_up != netif_carrier_ok(tp->dev)) {
2967                 if (current_link_up)
2968                         netif_carrier_on(tp->dev);
2969                 else {
2970                         netif_carrier_off(tp->dev);
2971                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2972                 }
2973                 tg3_link_report(tp);
2974         }
2975         return err;
2976 }
2977
2978 static void tg3_serdes_parallel_detect(struct tg3 *tp)
2979 {
2980         if (tp->serdes_counter) {
2981                 /* Give autoneg time to complete. */
2982                 tp->serdes_counter--;
2983                 return;
2984         }
2985         if (!netif_carrier_ok(tp->dev) &&
2986             (tp->link_config.autoneg == AUTONEG_ENABLE)) {
2987                 u32 bmcr;
2988
2989                 tg3_readphy(tp, MII_BMCR, &bmcr);
2990                 if (bmcr & BMCR_ANENABLE) {
2991                         u32 phy1, phy2;
2992
2993                         /* Select shadow register 0x1f */
2994                         tg3_writephy(tp, 0x1c, 0x7c00);
2995                         tg3_readphy(tp, 0x1c, &phy1);
2996
2997                         /* Select expansion interrupt status register */
2998                         tg3_writephy(tp, 0x17, 0x0f01);
2999                         tg3_readphy(tp, 0x15, &phy2);
3000                         tg3_readphy(tp, 0x15, &phy2);
3001
3002                         if ((phy1 & 0x10) && !(phy2 & 0x20)) {
3003                                 /* We have signal detect and not receiving
3004                                  * config code words, link is up by parallel
3005                                  * detection.
3006                                  */
3007
3008                                 bmcr &= ~BMCR_ANENABLE;
3009                                 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
3010                                 tg3_writephy(tp, MII_BMCR, bmcr);
3011                                 tp->tg3_flags2 |= TG3_FLG2_PARALLEL_DETECT;
3012                         }
3013                 }
3014         }
3015         else if (netif_carrier_ok(tp->dev) &&
3016                  (tp->link_config.autoneg == AUTONEG_ENABLE) &&
3017                  (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT)) {
3018                 u32 phy2;
3019
3020                 /* Select expansion interrupt status register */
3021                 tg3_writephy(tp, 0x17, 0x0f01);
3022                 tg3_readphy(tp, 0x15, &phy2);
3023                 if (phy2 & 0x20) {
3024                         u32 bmcr;
3025
3026                         /* Config code words received, turn on autoneg. */
3027                         tg3_readphy(tp, MII_BMCR, &bmcr);
3028                         tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
3029
3030                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3031
3032                 }
3033         }
3034 }
3035
3036 static int tg3_setup_phy(struct tg3 *tp, int force_reset)
3037 {
3038         int err;
3039
3040         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
3041                 err = tg3_setup_fiber_phy(tp, force_reset);
3042         } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
3043                 err = tg3_setup_fiber_mii_phy(tp, force_reset);
3044         } else {
3045                 err = tg3_setup_copper_phy(tp, force_reset);
3046         }
3047
3048         if (tp->link_config.active_speed == SPEED_1000 &&
3049             tp->link_config.active_duplex == DUPLEX_HALF)
3050                 tw32(MAC_TX_LENGTHS,
3051                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
3052                       (6 << TX_LENGTHS_IPG_SHIFT) |
3053                       (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
3054         else
3055                 tw32(MAC_TX_LENGTHS,
3056                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
3057                       (6 << TX_LENGTHS_IPG_SHIFT) |
3058                       (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
3059
3060         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
3061                 if (netif_carrier_ok(tp->dev)) {
3062                         tw32(HOSTCC_STAT_COAL_TICKS,
3063                              tp->coal.stats_block_coalesce_usecs);
3064                 } else {
3065                         tw32(HOSTCC_STAT_COAL_TICKS, 0);
3066                 }
3067         }
3068
3069         if (tp->tg3_flags & TG3_FLAG_ASPM_WORKAROUND) {
3070                 u32 val = tr32(PCIE_PWR_MGMT_THRESH);
3071                 if (!netif_carrier_ok(tp->dev))
3072                         val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
3073                               tp->pwrmgmt_thresh;
3074                 else
3075                         val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
3076                 tw32(PCIE_PWR_MGMT_THRESH, val);
3077         }
3078
3079         return err;
3080 }
3081
3082 /* This is called whenever we suspect that the system chipset is re-
3083  * ordering the sequence of MMIO to the tx send mailbox. The symptom
3084  * is bogus tx completions. We try to recover by setting the
3085  * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
3086  * in the workqueue.
3087  */
3088 static void tg3_tx_recover(struct tg3 *tp)
3089 {
3090         BUG_ON((tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) ||
3091                tp->write32_tx_mbox == tg3_write_indirect_mbox);
3092
3093         printk(KERN_WARNING PFX "%s: The system may be re-ordering memory-"
3094                "mapped I/O cycles to the network device, attempting to "
3095                "recover. Please report the problem to the driver maintainer "
3096                "and include system chipset information.\n", tp->dev->name);
3097
3098         spin_lock(&tp->lock);
3099         tp->tg3_flags |= TG3_FLAG_TX_RECOVERY_PENDING;
3100         spin_unlock(&tp->lock);
3101 }
3102
3103 static inline u32 tg3_tx_avail(struct tg3 *tp)
3104 {
3105         smp_mb();
3106         return (tp->tx_pending -
3107                 ((tp->tx_prod - tp->tx_cons) & (TG3_TX_RING_SIZE - 1)));
3108 }
3109
3110 /* Tigon3 never reports partial packet sends.  So we do not
3111  * need special logic to handle SKBs that have not had all
3112  * of their frags sent yet, like SunGEM does.
3113  */
3114 static void tg3_tx(struct tg3 *tp)
3115 {
3116         u32 hw_idx = tp->hw_status->idx[0].tx_consumer;
3117         u32 sw_idx = tp->tx_cons;
3118
3119         while (sw_idx != hw_idx) {
3120                 struct tx_ring_info *ri = &tp->tx_buffers[sw_idx];
3121                 struct sk_buff *skb = ri->skb;
3122                 int i, tx_bug = 0;
3123
3124                 if (unlikely(skb == NULL)) {
3125                         tg3_tx_recover(tp);
3126                         return;
3127                 }
3128
3129                 pci_unmap_single(tp->pdev,
3130                                  pci_unmap_addr(ri, mapping),
3131                                  skb_headlen(skb),
3132                                  PCI_DMA_TODEVICE);
3133
3134                 ri->skb = NULL;
3135
3136                 sw_idx = NEXT_TX(sw_idx);
3137
3138                 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
3139                         ri = &tp->tx_buffers[sw_idx];
3140                         if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
3141                                 tx_bug = 1;
3142
3143                         pci_unmap_page(tp->pdev,
3144                                        pci_unmap_addr(ri, mapping),
3145                                        skb_shinfo(skb)->frags[i].size,
3146                                        PCI_DMA_TODEVICE);
3147
3148                         sw_idx = NEXT_TX(sw_idx);
3149                 }
3150
3151                 dev_kfree_skb(skb);
3152
3153                 if (unlikely(tx_bug)) {
3154                         tg3_tx_recover(tp);
3155                         return;
3156                 }
3157         }
3158
3159         tp->tx_cons = sw_idx;
3160
3161         /* Need to make the tx_cons update visible to tg3_start_xmit()
3162          * before checking for netif_queue_stopped().  Without the
3163          * memory barrier, there is a small possibility that tg3_start_xmit()
3164          * will miss it and cause the queue to be stopped forever.
3165          */
3166         smp_mb();
3167
3168         if (unlikely(netif_queue_stopped(tp->dev) &&
3169                      (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp)))) {
3170                 netif_tx_lock(tp->dev);
3171                 if (netif_queue_stopped(tp->dev) &&
3172                     (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp)))
3173                         netif_wake_queue(tp->dev);
3174                 netif_tx_unlock(tp->dev);
3175         }
3176 }
3177
3178 /* Returns size of skb allocated or < 0 on error.
3179  *
3180  * We only need to fill in the address because the other members
3181  * of the RX descriptor are invariant, see tg3_init_rings.
3182  *
3183  * Note the purposeful assymetry of cpu vs. chip accesses.  For
3184  * posting buffers we only dirty the first cache line of the RX
3185  * descriptor (containing the address).  Whereas for the RX status
3186  * buffers the cpu only reads the last cacheline of the RX descriptor
3187  * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
3188  */
3189 static int tg3_alloc_rx_skb(struct tg3 *tp, u32 opaque_key,
3190                             int src_idx, u32 dest_idx_unmasked)
3191 {
3192         struct tg3_rx_buffer_desc *desc;
3193         struct ring_info *map, *src_map;
3194         struct sk_buff *skb;
3195         dma_addr_t mapping;
3196         int skb_size, dest_idx;
3197
3198         src_map = NULL;
3199         switch (opaque_key) {
3200         case RXD_OPAQUE_RING_STD:
3201                 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
3202                 desc = &tp->rx_std[dest_idx];
3203                 map = &tp->rx_std_buffers[dest_idx];
3204                 if (src_idx >= 0)
3205                         src_map = &tp->rx_std_buffers[src_idx];
3206                 skb_size = tp->rx_pkt_buf_sz;
3207                 break;
3208
3209         case RXD_OPAQUE_RING_JUMBO:
3210                 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
3211                 desc = &tp->rx_jumbo[dest_idx];
3212                 map = &tp->rx_jumbo_buffers[dest_idx];
3213                 if (src_idx >= 0)
3214                         src_map = &tp->rx_jumbo_buffers[src_idx];
3215                 skb_size = RX_JUMBO_PKT_BUF_SZ;
3216                 break;
3217
3218         default:
3219                 return -EINVAL;
3220         };
3221
3222         /* Do not overwrite any of the map or rp information
3223          * until we are sure we can commit to a new buffer.
3224          *
3225          * Callers depend upon this behavior and assume that
3226          * we leave everything unchanged if we fail.
3227          */
3228         skb = netdev_alloc_skb(tp->dev, skb_size);
3229         if (skb == NULL)
3230                 return -ENOMEM;
3231
3232         skb_reserve(skb, tp->rx_offset);
3233
3234         mapping = pci_map_single(tp->pdev, skb->data,
3235                                  skb_size - tp->rx_offset,
3236                                  PCI_DMA_FROMDEVICE);
3237
3238         map->skb = skb;
3239         pci_unmap_addr_set(map, mapping, mapping);
3240
3241         if (src_map != NULL)
3242                 src_map->skb = NULL;
3243
3244         desc->addr_hi = ((u64)mapping >> 32);
3245         desc->addr_lo = ((u64)mapping & 0xffffffff);
3246
3247         return skb_size;
3248 }
3249
3250 /* We only need to move over in the address because the other
3251  * members of the RX descriptor are invariant.  See notes above
3252  * tg3_alloc_rx_skb for full details.
3253  */
3254 static void tg3_recycle_rx(struct tg3 *tp, u32 opaque_key,
3255                            int src_idx, u32 dest_idx_unmasked)
3256 {
3257         struct tg3_rx_buffer_desc *src_desc, *dest_desc;
3258         struct ring_info *src_map, *dest_map;
3259         int dest_idx;
3260
3261         switch (opaque_key) {
3262         case RXD_OPAQUE_RING_STD:
3263                 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
3264                 dest_desc = &tp->rx_std[dest_idx];
3265                 dest_map = &tp->rx_std_buffers[dest_idx];
3266                 src_desc = &tp->rx_std[src_idx];
3267                 src_map = &tp->rx_std_buffers[src_idx];
3268                 break;
3269
3270         case RXD_OPAQUE_RING_JUMBO:
3271                 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
3272                 dest_desc = &tp->rx_jumbo[dest_idx];
3273                 dest_map = &tp->rx_jumbo_buffers[dest_idx];
3274                 src_desc = &tp->rx_jumbo[src_idx];
3275                 src_map = &tp->rx_jumbo_buffers[src_idx];
3276                 break;
3277
3278         default:
3279                 return;
3280         };
3281
3282         dest_map->skb = src_map->skb;
3283         pci_unmap_addr_set(dest_map, mapping,
3284                            pci_unmap_addr(src_map, mapping));
3285         dest_desc->addr_hi = src_desc->addr_hi;
3286         dest_desc->addr_lo = src_desc->addr_lo;
3287
3288         src_map->skb = NULL;
3289 }
3290
3291 #if TG3_VLAN_TAG_USED
3292 static int tg3_vlan_rx(struct tg3 *tp, struct sk_buff *skb, u16 vlan_tag)
3293 {
3294         return vlan_hwaccel_receive_skb(skb, tp->vlgrp, vlan_tag);
3295 }
3296 #endif
3297
3298 /* The RX ring scheme is composed of multiple rings which post fresh
3299  * buffers to the chip, and one special ring the chip uses to report
3300  * status back to the host.
3301  *
3302  * The special ring reports the status of received packets to the
3303  * host.  The chip does not write into the original descriptor the
3304  * RX buffer was obtained from.  The chip simply takes the original
3305  * descriptor as provided by the host, updates the status and length
3306  * field, then writes this into the next status ring entry.
3307  *
3308  * Each ring the host uses to post buffers to the chip is described
3309  * by a TG3_BDINFO entry in the chips SRAM area.  When a packet arrives,
3310  * it is first placed into the on-chip ram.  When the packet's length
3311  * is known, it walks down the TG3_BDINFO entries to select the ring.
3312  * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
3313  * which is within the range of the new packet's length is chosen.
3314  *
3315  * The "separate ring for rx status" scheme may sound queer, but it makes
3316  * sense from a cache coherency perspective.  If only the host writes
3317  * to the buffer post rings, and only the chip writes to the rx status
3318  * rings, then cache lines never move beyond shared-modified state.
3319  * If both the host and chip were to write into the same ring, cache line
3320  * eviction could occur since both entities want it in an exclusive state.
3321  */
3322 static int tg3_rx(struct tg3 *tp, int budget)
3323 {
3324         u32 work_mask, rx_std_posted = 0;
3325         u32 sw_idx = tp->rx_rcb_ptr;
3326         u16 hw_idx;
3327         int received;
3328
3329         hw_idx = tp->hw_status->idx[0].rx_producer;
3330         /*
3331          * We need to order the read of hw_idx and the read of
3332          * the opaque cookie.
3333          */
3334         rmb();
3335         work_mask = 0;
3336         received = 0;
3337         while (sw_idx != hw_idx && budget > 0) {
3338                 struct tg3_rx_buffer_desc *desc = &tp->rx_rcb[sw_idx];
3339                 unsigned int len;
3340                 struct sk_buff *skb;
3341                 dma_addr_t dma_addr;
3342                 u32 opaque_key, desc_idx, *post_ptr;
3343
3344                 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
3345                 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
3346                 if (opaque_key == RXD_OPAQUE_RING_STD) {
3347                         dma_addr = pci_unmap_addr(&tp->rx_std_buffers[desc_idx],
3348                                                   mapping);
3349                         skb = tp->rx_std_buffers[desc_idx].skb;
3350                         post_ptr = &tp->rx_std_ptr;
3351                         rx_std_posted++;
3352                 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
3353                         dma_addr = pci_unmap_addr(&tp->rx_jumbo_buffers[desc_idx],
3354                                                   mapping);
3355                         skb = tp->rx_jumbo_buffers[desc_idx].skb;
3356                         post_ptr = &tp->rx_jumbo_ptr;
3357                 }
3358                 else {
3359                         goto next_pkt_nopost;
3360                 }
3361
3362                 work_mask |= opaque_key;
3363
3364                 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
3365                     (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
3366                 drop_it:
3367                         tg3_recycle_rx(tp, opaque_key,
3368                                        desc_idx, *post_ptr);
3369                 drop_it_no_recycle:
3370                         /* Other statistics kept track of by card. */
3371                         tp->net_stats.rx_dropped++;
3372                         goto next_pkt;
3373                 }
3374
3375                 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4; /* omit crc */
3376
3377                 if (len > RX_COPY_THRESHOLD
3378                         && tp->rx_offset == 2
3379                         /* rx_offset != 2 iff this is a 5701 card running
3380                          * in PCI-X mode [see tg3_get_invariants()] */
3381                 ) {
3382                         int skb_size;
3383
3384                         skb_size = tg3_alloc_rx_skb(tp, opaque_key,
3385                                                     desc_idx, *post_ptr);
3386                         if (skb_size < 0)
3387                                 goto drop_it;
3388
3389                         pci_unmap_single(tp->pdev, dma_addr,
3390                                          skb_size - tp->rx_offset,
3391                                          PCI_DMA_FROMDEVICE);
3392
3393                         skb_put(skb, len);
3394                 } else {
3395                         struct sk_buff *copy_skb;
3396
3397                         tg3_recycle_rx(tp, opaque_key,
3398                                        desc_idx, *post_ptr);
3399
3400                         copy_skb = netdev_alloc_skb(tp->dev, len + 2);
3401                         if (copy_skb == NULL)
3402                                 goto drop_it_no_recycle;
3403
3404                         skb_reserve(copy_skb, 2);
3405                         skb_put(copy_skb, len);
3406                         pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
3407                         skb_copy_from_linear_data(skb, copy_skb->data, len);
3408                         pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
3409
3410                         /* We'll reuse the original ring buffer. */
3411                         skb = copy_skb;
3412                 }
3413
3414                 if ((tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) &&
3415                     (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
3416                     (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
3417                       >> RXD_TCPCSUM_SHIFT) == 0xffff))
3418                         skb->ip_summed = CHECKSUM_UNNECESSARY;
3419                 else
3420                         skb->ip_summed = CHECKSUM_NONE;
3421
3422                 skb->protocol = eth_type_trans(skb, tp->dev);
3423 #if TG3_VLAN_TAG_USED
3424                 if (tp->vlgrp != NULL &&
3425                     desc->type_flags & RXD_FLAG_VLAN) {
3426                         tg3_vlan_rx(tp, skb,
3427                                     desc->err_vlan & RXD_VLAN_MASK);
3428                 } else
3429 #endif
3430                         netif_receive_skb(skb);
3431
3432                 tp->dev->last_rx = jiffies;
3433                 received++;
3434                 budget--;
3435
3436 next_pkt:
3437                 (*post_ptr)++;
3438
3439                 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
3440                         u32 idx = *post_ptr % TG3_RX_RING_SIZE;
3441
3442                         tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX +
3443                                      TG3_64BIT_REG_LOW, idx);
3444                         work_mask &= ~RXD_OPAQUE_RING_STD;
3445                         rx_std_posted = 0;
3446                 }
3447 next_pkt_nopost:
3448                 sw_idx++;
3449                 sw_idx &= (TG3_RX_RCB_RING_SIZE(tp) - 1);
3450
3451                 /* Refresh hw_idx to see if there is new work */
3452                 if (sw_idx == hw_idx) {
3453                         hw_idx = tp->hw_status->idx[0].rx_producer;
3454                         rmb();
3455                 }
3456         }
3457
3458         /* ACK the status ring. */
3459         tp->rx_rcb_ptr = sw_idx;
3460         tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW, sw_idx);
3461
3462         /* Refill RX ring(s). */
3463         if (work_mask & RXD_OPAQUE_RING_STD) {
3464                 sw_idx = tp->rx_std_ptr % TG3_RX_RING_SIZE;
3465                 tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
3466                              sw_idx);
3467         }
3468         if (work_mask & RXD_OPAQUE_RING_JUMBO) {
3469                 sw_idx = tp->rx_jumbo_ptr % TG3_RX_JUMBO_RING_SIZE;
3470                 tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
3471                              sw_idx);
3472         }
3473         mmiowb();
3474
3475         return received;
3476 }
3477
3478 static int tg3_poll(struct napi_struct *napi, int budget)
3479 {
3480         struct tg3 *tp = container_of(napi, struct tg3, napi);
3481         struct net_device *netdev = tp->dev;
3482         struct tg3_hw_status *sblk = tp->hw_status;
3483         int work_done = 0;
3484
3485         /* handle link change and other phy events */
3486         if (!(tp->tg3_flags &
3487               (TG3_FLAG_USE_LINKCHG_REG |
3488                TG3_FLAG_POLL_SERDES))) {
3489                 if (sblk->status & SD_STATUS_LINK_CHG) {
3490                         sblk->status = SD_STATUS_UPDATED |
3491                                 (sblk->status & ~SD_STATUS_LINK_CHG);
3492                         spin_lock(&tp->lock);
3493                         tg3_setup_phy(tp, 0);
3494                         spin_unlock(&tp->lock);
3495                 }
3496         }
3497
3498         /* run TX completion thread */
3499         if (sblk->idx[0].tx_consumer != tp->tx_cons) {
3500                 tg3_tx(tp);
3501                 if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING)) {
3502                         netif_rx_complete(netdev, napi);
3503                         schedule_work(&tp->reset_task);
3504                         return 0;
3505                 }
3506         }
3507
3508         /* run RX thread, within the bounds set by NAPI.
3509          * All RX "locking" is done by ensuring outside
3510          * code synchronizes with tg3->napi.poll()
3511          */
3512         if (sblk->idx[0].rx_producer != tp->rx_rcb_ptr)
3513                 work_done = tg3_rx(tp, budget);
3514
3515         if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) {
3516                 tp->last_tag = sblk->status_tag;
3517                 rmb();
3518         } else
3519                 sblk->status &= ~SD_STATUS_UPDATED;
3520
3521         /* if no more work, tell net stack and NIC we're done */
3522         if (!tg3_has_work(tp)) {
3523                 netif_rx_complete(netdev, napi);
3524                 tg3_restart_ints(tp);
3525         }
3526
3527         return work_done;
3528 }
3529
3530 static void tg3_irq_quiesce(struct tg3 *tp)
3531 {
3532         BUG_ON(tp->irq_sync);
3533
3534         tp->irq_sync = 1;
3535         smp_mb();
3536
3537         synchronize_irq(tp->pdev->irq);
3538 }
3539
3540 static inline int tg3_irq_sync(struct tg3 *tp)
3541 {
3542         return tp->irq_sync;
3543 }
3544
3545 /* Fully shutdown all tg3 driver activity elsewhere in the system.
3546  * If irq_sync is non-zero, then the IRQ handler must be synchronized
3547  * with as well.  Most of the time, this is not necessary except when
3548  * shutting down the device.
3549  */
3550 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
3551 {
3552         spin_lock_bh(&tp->lock);
3553         if (irq_sync)
3554                 tg3_irq_quiesce(tp);
3555 }
3556
3557 static inline void tg3_full_unlock(struct tg3 *tp)
3558 {
3559         spin_unlock_bh(&tp->lock);
3560 }
3561
3562 /* One-shot MSI handler - Chip automatically disables interrupt
3563  * after sending MSI so driver doesn't have to do it.
3564  */
3565 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
3566 {
3567         struct net_device *dev = dev_id;
3568         struct tg3 *tp = netdev_priv(dev);
3569
3570         prefetch(tp->hw_status);
3571         prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
3572
3573         if (likely(!tg3_irq_sync(tp)))
3574                 netif_rx_schedule(dev, &tp->napi);
3575
3576         return IRQ_HANDLED;
3577 }
3578
3579 /* MSI ISR - No need to check for interrupt sharing and no need to
3580  * flush status block and interrupt mailbox. PCI ordering rules
3581  * guarantee that MSI will arrive after the status block.
3582  */
3583 static irqreturn_t tg3_msi(int irq, void *dev_id)
3584 {
3585         struct net_device *dev = dev_id;
3586         struct tg3 *tp = netdev_priv(dev);
3587
3588         prefetch(tp->hw_status);
3589         prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
3590         /*
3591          * Writing any value to intr-mbox-0 clears PCI INTA# and
3592          * chip-internal interrupt pending events.
3593          * Writing non-zero to intr-mbox-0 additional tells the
3594          * NIC to stop sending us irqs, engaging "in-intr-handler"
3595          * event coalescing.
3596          */
3597         tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
3598         if (likely(!tg3_irq_sync(tp)))
3599                 netif_rx_schedule(dev, &tp->napi);
3600
3601         return IRQ_RETVAL(1);
3602 }
3603
3604 static irqreturn_t tg3_interrupt(int irq, void *dev_id)
3605 {
3606         struct net_device *dev = dev_id;
3607         struct tg3 *tp = netdev_priv(dev);
3608         struct tg3_hw_status *sblk = tp->hw_status;
3609         unsigned int handled = 1;
3610
3611         /* In INTx mode, it is possible for the interrupt to arrive at
3612          * the CPU before the status block posted prior to the interrupt.
3613          * Reading the PCI State register will confirm whether the
3614          * interrupt is ours and will flush the status block.
3615          */
3616         if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
3617                 if ((tp->tg3_flags & TG3_FLAG_CHIP_RESETTING) ||
3618                     (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
3619                         handled = 0;
3620                         goto out;
3621                 }
3622         }
3623
3624         /*
3625          * Writing any value to intr-mbox-0 clears PCI INTA# and
3626          * chip-internal interrupt pending events.
3627          * Writing non-zero to intr-mbox-0 additional tells the
3628          * NIC to stop sending us irqs, engaging "in-intr-handler"
3629          * event coalescing.
3630          *
3631          * Flush the mailbox to de-assert the IRQ immediately to prevent
3632          * spurious interrupts.  The flush impacts performance but
3633          * excessive spurious interrupts can be worse in some cases.
3634          */
3635         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
3636         if (tg3_irq_sync(tp))
3637                 goto out;
3638         sblk->status &= ~SD_STATUS_UPDATED;
3639         if (likely(tg3_has_work(tp))) {
3640                 prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
3641                 netif_rx_schedule(dev, &tp->napi);
3642         } else {
3643                 /* No work, shared interrupt perhaps?  re-enable
3644                  * interrupts, and flush that PCI write
3645                  */
3646                 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
3647                                0x00000000);
3648         }
3649 out:
3650         return IRQ_RETVAL(handled);
3651 }
3652
3653 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
3654 {
3655         struct net_device *dev = dev_id;
3656         struct tg3 *tp = netdev_priv(dev);
3657         struct tg3_hw_status *sblk = tp->hw_status;
3658         unsigned int handled = 1;
3659
3660         /* In INTx mode, it is possible for the interrupt to arrive at
3661          * the CPU before the status block posted prior to the interrupt.
3662          * Reading the PCI State register will confirm whether the
3663          * interrupt is ours and will flush the status block.
3664          */
3665         if (unlikely(sblk->status_tag == tp->last_tag)) {
3666                 if ((tp->tg3_flags & TG3_FLAG_CHIP_RESETTING) ||
3667                     (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
3668                         handled = 0;
3669                         goto out;
3670                 }
3671         }
3672
3673         /*
3674          * writing any value to intr-mbox-0 clears PCI INTA# and
3675          * chip-internal interrupt pending events.
3676          * writing non-zero to intr-mbox-0 additional tells the
3677          * NIC to stop sending us irqs, engaging "in-intr-handler"
3678          * event coalescing.
3679          *
3680          * Flush the mailbox to de-assert the IRQ immediately to prevent
3681          * spurious interrupts.  The flush impacts performance but
3682          * excessive spurious interrupts can be worse in some cases.
3683          */
3684         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
3685         if (tg3_irq_sync(tp))
3686                 goto out;
3687         if (netif_rx_schedule_prep(dev, &tp->napi)) {
3688                 prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
3689                 /* Update last_tag to mark that this status has been
3690                  * seen. Because interrupt may be shared, we may be
3691                  * racing with tg3_poll(), so only update last_tag
3692                  * if tg3_poll() is not scheduled.
3693                  */
3694                 tp->last_tag = sblk->status_tag;
3695                 __netif_rx_schedule(dev, &tp->napi);
3696         }
3697 out:
3698         return IRQ_RETVAL(handled);
3699 }
3700
3701 /* ISR for interrupt test */
3702 static irqreturn_t tg3_test_isr(int irq, void *dev_id)
3703 {
3704         struct net_device *dev = dev_id;
3705         struct tg3 *tp = netdev_priv(dev);
3706         struct tg3_hw_status *sblk = tp->hw_status;
3707
3708         if ((sblk->status & SD_STATUS_UPDATED) ||
3709             !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
3710                 tg3_disable_ints(tp);
3711                 return IRQ_RETVAL(1);
3712         }
3713         return IRQ_RETVAL(0);
3714 }
3715
3716 static int tg3_init_hw(struct tg3 *, int);
3717 static int tg3_halt(struct tg3 *, int, int);
3718
3719 /* Restart hardware after configuration changes, self-test, etc.
3720  * Invoked with tp->lock held.
3721  */
3722 static int tg3_restart_hw(struct tg3 *tp, int reset_phy)
3723 {
3724         int err;
3725
3726         err = tg3_init_hw(tp, reset_phy);
3727         if (err) {
3728                 printk(KERN_ERR PFX "%s: Failed to re-initialize device, "
3729                        "aborting.\n", tp->dev->name);
3730                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
3731                 tg3_full_unlock(tp);
3732                 del_timer_sync(&tp->timer);
3733                 tp->irq_sync = 0;
3734                 napi_enable(&tp->napi);
3735                 dev_close(tp->dev);
3736                 tg3_full_lock(tp, 0);
3737         }
3738         return err;
3739 }
3740
3741 #ifdef CONFIG_NET_POLL_CONTROLLER
3742 static void tg3_poll_controller(struct net_device *dev)
3743 {
3744         struct tg3 *tp = netdev_priv(dev);
3745
3746         tg3_interrupt(tp->pdev->irq, dev);
3747 }
3748 #endif
3749
3750 static void tg3_reset_task(struct work_struct *work)
3751 {
3752         struct tg3 *tp = container_of(work, struct tg3, reset_task);
3753         unsigned int restart_timer;
3754
3755         tg3_full_lock(tp, 0);
3756
3757         if (!netif_running(tp->dev)) {
3758                 tg3_full_unlock(tp);
3759                 return;
3760         }
3761
3762         tg3_full_unlock(tp);
3763
3764         tg3_netif_stop(tp);
3765
3766         tg3_full_lock(tp, 1);
3767
3768         restart_timer = tp->tg3_flags2 & TG3_FLG2_RESTART_TIMER;
3769         tp->tg3_flags2 &= ~TG3_FLG2_RESTART_TIMER;
3770
3771         if (tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING) {
3772                 tp->write32_tx_mbox = tg3_write32_tx_mbox;
3773                 tp->write32_rx_mbox = tg3_write_flush_reg32;
3774                 tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;
3775                 tp->tg3_flags &= ~TG3_FLAG_TX_RECOVERY_PENDING;
3776         }
3777
3778         tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
3779         if (tg3_init_hw(tp, 1))
3780                 goto out;
3781
3782         tg3_netif_start(tp);
3783
3784         if (restart_timer)
3785                 mod_timer(&tp->timer, jiffies + 1);
3786
3787 out:
3788         tg3_full_unlock(tp);
3789 }
3790
3791 static void tg3_dump_short_state(struct tg3 *tp)
3792 {
3793         printk(KERN_ERR PFX "DEBUG: MAC_TX_STATUS[%08x] MAC_RX_STATUS[%08x]\n",
3794                tr32(MAC_TX_STATUS), tr32(MAC_RX_STATUS));
3795         printk(KERN_ERR PFX "DEBUG: RDMAC_STATUS[%08x] WDMAC_STATUS[%08x]\n",
3796                tr32(RDMAC_STATUS), tr32(WDMAC_STATUS));
3797 }
3798
3799 static void tg3_tx_timeout(struct net_device *dev)
3800 {
3801         struct tg3 *tp = netdev_priv(dev);
3802
3803         if (netif_msg_tx_err(tp)) {
3804                 printk(KERN_ERR PFX "%s: transmit timed out, resetting\n",
3805                        dev->name);
3806                 tg3_dump_short_state(tp);
3807         }
3808
3809         schedule_work(&tp->reset_task);
3810 }
3811
3812 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
3813 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
3814 {
3815         u32 base = (u32) mapping & 0xffffffff;
3816
3817         return ((base > 0xffffdcc0) &&
3818                 (base + len + 8 < base));
3819 }
3820
3821 /* Test for DMA addresses > 40-bit */
3822 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
3823                                           int len)
3824 {
3825 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
3826         if (tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG)
3827                 return (((u64) mapping + len) > DMA_40BIT_MASK);
3828         return 0;
3829 #else
3830         return 0;
3831 #endif
3832 }
3833
3834 static void tg3_set_txd(struct tg3 *, int, dma_addr_t, int, u32, u32);
3835
3836 /* Workaround 4GB and 40-bit hardware DMA bugs. */
3837 static int tigon3_dma_hwbug_workaround(struct tg3 *tp, struct sk_buff *skb,
3838                                        u32 last_plus_one, u32 *start,
3839                                        u32 base_flags, u32 mss)
3840 {
3841         struct sk_buff *new_skb = skb_copy(skb, GFP_ATOMIC);
3842         dma_addr_t new_addr = 0;
3843         u32 entry = *start;
3844         int i, ret = 0;
3845
3846         if (!new_skb) {
3847                 ret = -1;
3848         } else {
3849                 /* New SKB is guaranteed to be linear. */
3850                 entry = *start;
3851                 new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
3852                                           PCI_DMA_TODEVICE);
3853                 /* Make sure new skb does not cross any 4G boundaries.
3854                  * Drop the packet if it does.
3855                  */
3856                 if (tg3_4g_overflow_test(new_addr, new_skb->len)) {
3857                         ret = -1;
3858                         dev_kfree_skb(new_skb);
3859                         new_skb = NULL;
3860                 } else {
3861                         tg3_set_txd(tp, entry, new_addr, new_skb->len,
3862                                     base_flags, 1 | (mss << 1));
3863                         *start = NEXT_TX(entry);
3864                 }
3865         }
3866
3867         /* Now clean up the sw ring entries. */
3868         i = 0;
3869         while (entry != last_plus_one) {
3870                 int len;
3871
3872                 if (i == 0)
3873                         len = skb_headlen(skb);
3874                 else
3875                         len = skb_shinfo(skb)->frags[i-1].size;
3876                 pci_unmap_single(tp->pdev,
3877                                  pci_unmap_addr(&tp->tx_buffers[entry], mapping),
3878                                  len, PCI_DMA_TODEVICE);
3879                 if (i == 0) {
3880                         tp->tx_buffers[entry].skb = new_skb;
3881                         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, new_addr);
3882                 } else {
3883                         tp->tx_buffers[entry].skb = NULL;
3884                 }
3885                 entry = NEXT_TX(entry);
3886                 i++;
3887         }
3888
3889         dev_kfree_skb(skb);
3890
3891         return ret;
3892 }
3893
3894 static void tg3_set_txd(struct tg3 *tp, int entry,
3895                         dma_addr_t mapping, int len, u32 flags,
3896                         u32 mss_and_is_end)
3897 {
3898         struct tg3_tx_buffer_desc *txd = &tp->tx_ring[entry];
3899         int is_end = (mss_and_is_end & 0x1);
3900         u32 mss = (mss_and_is_end >> 1);
3901         u32 vlan_tag = 0;
3902
3903         if (is_end)
3904                 flags |= TXD_FLAG_END;
3905         if (flags & TXD_FLAG_VLAN) {
3906                 vlan_tag = flags >> 16;
3907                 flags &= 0xffff;
3908         }
3909         vlan_tag |= (mss << TXD_MSS_SHIFT);
3910
3911         txd->addr_hi = ((u64) mapping >> 32);
3912         txd->addr_lo = ((u64) mapping & 0xffffffff);
3913         txd->len_flags = (len << TXD_LEN_SHIFT) | flags;
3914         txd->vlan_tag = vlan_tag << TXD_VLAN_TAG_SHIFT;
3915 }
3916
3917 /* hard_start_xmit for devices that don't have any bugs and
3918  * support TG3_FLG2_HW_TSO_2 only.
3919  */
3920 static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
3921 {
3922         struct tg3 *tp = netdev_priv(dev);
3923         dma_addr_t mapping;
3924         u32 len, entry, base_flags, mss;
3925
3926         len = skb_headlen(skb);
3927
3928         /* We are running in BH disabled context with netif_tx_lock
3929          * and TX reclaim runs via tp->napi.poll inside of a software
3930          * interrupt.  Furthermore, IRQ processing runs lockless so we have
3931          * no IRQ context deadlocks to worry about either.  Rejoice!
3932          */
3933         if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->nr_frags + 1))) {
3934                 if (!netif_queue_stopped(dev)) {
3935                         netif_stop_queue(dev);
3936
3937                         /* This is a hard error, log it. */
3938                         printk(KERN_ERR PFX "%s: BUG! Tx Ring full when "
3939                                "queue awake!\n", dev->name);
3940                 }
3941                 return NETDEV_TX_BUSY;
3942         }
3943
3944         entry = tp->tx_prod;
3945         base_flags = 0;
3946         mss = 0;
3947         if ((mss = skb_shinfo(skb)->gso_size) != 0) {
3948                 int tcp_opt_len, ip_tcp_len;
3949
3950                 if (skb_header_cloned(skb) &&
3951                     pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
3952                         dev_kfree_skb(skb);
3953                         goto out_unlock;
3954                 }
3955
3956                 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
3957                         mss |= (skb_headlen(skb) - ETH_HLEN) << 9;
3958                 else {
3959                         struct iphdr *iph = ip_hdr(skb);
3960
3961                         tcp_opt_len = tcp_optlen(skb);
3962                         ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
3963
3964                         iph->check = 0;
3965                         iph->tot_len = htons(mss + ip_tcp_len + tcp_opt_len);
3966                         mss |= (ip_tcp_len + tcp_opt_len) << 9;
3967                 }
3968
3969                 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
3970                                TXD_FLAG_CPU_POST_DMA);
3971
3972                 tcp_hdr(skb)->check = 0;
3973
3974         }
3975         else if (skb->ip_summed == CHECKSUM_PARTIAL)
3976                 base_flags |= TXD_FLAG_TCPUDP_CSUM;
3977 #if TG3_VLAN_TAG_USED
3978         if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
3979                 base_flags |= (TXD_FLAG_VLAN |
3980                                (vlan_tx_tag_get(skb) << 16));
3981 #endif
3982
3983         /* Queue skb data, a.k.a. the main skb fragment. */
3984         mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
3985
3986         tp->tx_buffers[entry].skb = skb;
3987         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
3988
3989         tg3_set_txd(tp, entry, mapping, len, base_flags,
3990                     (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
3991
3992         entry = NEXT_TX(entry);
3993
3994         /* Now loop through additional data fragments, and queue them. */
3995         if (skb_shinfo(skb)->nr_frags > 0) {
3996                 unsigned int i, last;
3997
3998                 last = skb_shinfo(skb)->nr_frags - 1;
3999                 for (i = 0; i <= last; i++) {
4000                         skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4001
4002                         len = frag->size;
4003                         mapping = pci_map_page(tp->pdev,
4004                                                frag->page,
4005                                                frag->page_offset,
4006                                                len, PCI_DMA_TODEVICE);
4007
4008                         tp->tx_buffers[entry].skb = NULL;
4009                         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
4010
4011                         tg3_set_txd(tp, entry, mapping, len,
4012                                     base_flags, (i == last) | (mss << 1));
4013
4014                         entry = NEXT_TX(entry);
4015                 }
4016         }
4017
4018         /* Packets are ready, update Tx producer idx local and on card. */
4019         tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry);
4020
4021         tp->tx_prod = entry;
4022         if (unlikely(tg3_tx_avail(tp) <= (MAX_SKB_FRAGS + 1))) {
4023                 netif_stop_queue(dev);
4024                 if (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp))
4025                         netif_wake_queue(tp->dev);
4026         }
4027
4028 out_unlock:
4029         mmiowb();
4030
4031         dev->trans_start = jiffies;
4032
4033         return NETDEV_TX_OK;
4034 }
4035
4036 static int tg3_start_xmit_dma_bug(struct sk_buff *, struct net_device *);
4037
4038 /* Use GSO to workaround a rare TSO bug that may be triggered when the
4039  * TSO header is greater than 80 bytes.
4040  */
4041 static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
4042 {
4043         struct sk_buff *segs, *nskb;
4044
4045         /* Estimate the number of fragments in the worst case */
4046         if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->gso_segs * 3))) {
4047                 netif_stop_queue(tp->dev);
4048                 if (tg3_tx_avail(tp) <= (skb_shinfo(skb)->gso_segs * 3))
4049                         return NETDEV_TX_BUSY;
4050
4051                 netif_wake_queue(tp->dev);
4052         }
4053
4054         segs = skb_gso_segment(skb, tp->dev->features & ~NETIF_F_TSO);
4055         if (unlikely(IS_ERR(segs)))
4056                 goto tg3_tso_bug_end;
4057
4058         do {
4059                 nskb = segs;
4060                 segs = segs->next;
4061                 nskb->next = NULL;
4062                 tg3_start_xmit_dma_bug(nskb, tp->dev);
4063         } while (segs);
4064
4065 tg3_tso_bug_end:
4066         dev_kfree_skb(skb);
4067
4068         return NETDEV_TX_OK;
4069 }
4070
4071 /* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
4072  * support TG3_FLG2_HW_TSO_1 or firmware TSO only.
4073  */
4074 static int tg3_start_xmit_dma_bug(struct sk_buff *skb, struct net_device *dev)
4075 {
4076         struct tg3 *tp = netdev_priv(dev);
4077         dma_addr_t mapping;
4078         u32 len, entry, base_flags, mss;
4079         int would_hit_hwbug;
4080
4081         len = skb_headlen(skb);
4082
4083         /* We are running in BH disabled context with netif_tx_lock
4084          * and TX reclaim runs via tp->napi.poll inside of a software
4085          * interrupt.  Furthermore, IRQ processing runs lockless so we have
4086          * no IRQ context deadlocks to worry about either.  Rejoice!
4087          */
4088         if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->nr_frags + 1))) {
4089                 if (!netif_queue_stopped(dev)) {
4090                         netif_stop_queue(dev);
4091
4092                         /* This is a hard error, log it. */
4093                         printk(KERN_ERR PFX "%s: BUG! Tx Ring full when "
4094                                "queue awake!\n", dev->name);
4095                 }
4096                 return NETDEV_TX_BUSY;
4097         }
4098
4099         entry = tp->tx_prod;
4100         base_flags = 0;
4101         if (skb->ip_summed == CHECKSUM_PARTIAL)
4102                 base_flags |= TXD_FLAG_TCPUDP_CSUM;
4103         mss = 0;
4104         if ((mss = skb_shinfo(skb)->gso_size) != 0) {
4105                 struct iphdr *iph;
4106                 int tcp_opt_len, ip_tcp_len, hdr_len;
4107
4108                 if (skb_header_cloned(skb) &&
4109                     pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
4110                         dev_kfree_skb(skb);
4111                         goto out_unlock;
4112                 }
4113
4114                 tcp_opt_len = tcp_optlen(skb);
4115                 ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
4116
4117                 hdr_len = ip_tcp_len + tcp_opt_len;
4118                 if (unlikely((ETH_HLEN + hdr_len) > 80) &&
4119                              (tp->tg3_flags2 & TG3_FLG2_TSO_BUG))
4120                         return (tg3_tso_bug(tp, skb));
4121
4122                 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
4123                                TXD_FLAG_CPU_POST_DMA);
4124
4125                 iph = ip_hdr(skb);
4126                 iph->check = 0;
4127                 iph->tot_len = htons(mss + hdr_len);
4128                 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
4129                         tcp_hdr(skb)->check = 0;
4130                         base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
4131                 } else
4132                         tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
4133                                                                  iph->daddr, 0,
4134                                                                  IPPROTO_TCP,
4135                                                                  0);
4136
4137                 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO) ||
4138                     (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)) {
4139                         if (tcp_opt_len || iph->ihl > 5) {
4140                                 int tsflags;
4141
4142                                 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
4143                                 mss |= (tsflags << 11);
4144                         }
4145                 } else {
4146                         if (tcp_opt_len || iph->ihl > 5) {
4147                                 int tsflags;
4148
4149                                 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
4150                                 base_flags |= tsflags << 12;
4151                         }
4152                 }
4153         }
4154 #if TG3_VLAN_TAG_USED
4155         if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
4156                 base_flags |= (TXD_FLAG_VLAN |
4157                                (vlan_tx_tag_get(skb) << 16));
4158 #endif
4159
4160         /* Queue skb data, a.k.a. the main skb fragment. */
4161         mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
4162
4163         tp->tx_buffers[entry].skb = skb;
4164         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
4165
4166         would_hit_hwbug = 0;
4167
4168         if (tg3_4g_overflow_test(mapping, len))
4169                 would_hit_hwbug = 1;
4170
4171         tg3_set_txd(tp, entry, mapping, len, base_flags,
4172                     (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
4173
4174         entry = NEXT_TX(entry);
4175
4176         /* Now loop through additional data fragments, and queue them. */
4177         if (skb_shinfo(skb)->nr_frags > 0) {
4178                 unsigned int i, last;
4179
4180                 last = skb_shinfo(skb)->nr_frags - 1;
4181                 for (i = 0; i <= last; i++) {
4182                         skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4183
4184                         len = frag->size;
4185                         mapping = pci_map_page(tp->pdev,
4186                                                frag->page,
4187                                                frag->page_offset,
4188                                                len, PCI_DMA_TODEVICE);
4189
4190                         tp->tx_buffers[entry].skb = NULL;
4191                         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
4192
4193                         if (tg3_4g_overflow_test(mapping, len))
4194                                 would_hit_hwbug = 1;
4195
4196                         if (tg3_40bit_overflow_test(tp, mapping, len))
4197                                 would_hit_hwbug = 1;
4198
4199                         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
4200                                 tg3_set_txd(tp, entry, mapping, len,
4201                                             base_flags, (i == last)|(mss << 1));
4202                         else
4203                                 tg3_set_txd(tp, entry, mapping, len,
4204                                             base_flags, (i == last));
4205
4206                         entry = NEXT_TX(entry);
4207                 }
4208         }
4209
4210         if (would_hit_hwbug) {
4211                 u32 last_plus_one = entry;
4212                 u32 start;
4213
4214                 start = entry - 1 - skb_shinfo(skb)->nr_frags;
4215                 start &= (TG3_TX_RING_SIZE - 1);
4216
4217                 /* If the workaround fails due to memory/mapping
4218                  * failure, silently drop this packet.
4219                  */
4220                 if (tigon3_dma_hwbug_workaround(tp, skb, last_plus_one,
4221                                                 &start, base_flags, mss))
4222                         goto out_unlock;
4223
4224                 entry = start;
4225         }
4226
4227         /* Packets are ready, update Tx producer idx local and on card. */
4228         tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry);
4229
4230         tp->tx_prod = entry;
4231         if (unlikely(tg3_tx_avail(tp) <= (MAX_SKB_FRAGS + 1))) {
4232                 netif_stop_queue(dev);
4233                 if (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp))
4234                         netif_wake_queue(tp->dev);
4235         }
4236
4237 out_unlock:
4238         mmiowb();
4239
4240         dev->trans_start = jiffies;
4241
4242         return NETDEV_TX_OK;
4243 }
4244
4245 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
4246                                int new_mtu)
4247 {
4248         dev->mtu = new_mtu;
4249
4250         if (new_mtu > ETH_DATA_LEN) {
4251                 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
4252                         tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
4253                         ethtool_op_set_tso(dev, 0);
4254                 }
4255                 else
4256                         tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE;
4257         } else {
4258                 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
4259                         tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
4260                 tp->tg3_flags &= ~TG3_FLAG_JUMBO_RING_ENABLE;
4261         }
4262 }
4263
4264 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
4265 {
4266         struct tg3 *tp = netdev_priv(dev);
4267         int err;
4268
4269         if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
4270                 return -EINVAL;
4271
4272         if (!netif_running(dev)) {
4273                 /* We'll just catch it later when the
4274                  * device is up'd.
4275                  */
4276                 tg3_set_mtu(dev, tp, new_mtu);
4277                 return 0;
4278         }
4279
4280         tg3_netif_stop(tp);
4281
4282         tg3_full_lock(tp, 1);
4283
4284         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
4285
4286         tg3_set_mtu(dev, tp, new_mtu);
4287
4288         err = tg3_restart_hw(tp, 0);
4289
4290         if (!err)
4291                 tg3_netif_start(tp);
4292
4293         tg3_full_unlock(tp);
4294
4295         return err;
4296 }
4297
4298 /* Free up pending packets in all rx/tx rings.
4299  *
4300  * The chip has been shut down and the driver detached from
4301  * the networking, so no interrupts or new tx packets will
4302  * end up in the driver.  tp->{tx,}lock is not held and we are not
4303  * in an interrupt context and thus may sleep.
4304  */
4305 static void tg3_free_rings(struct tg3 *tp)
4306 {
4307         struct ring_info *rxp;
4308         int i;
4309
4310         for (i = 0; i < TG3_RX_RING_SIZE; i++) {
4311                 rxp = &tp->rx_std_buffers[i];
4312
4313                 if (rxp->skb == NULL)
4314                         continue;
4315                 pci_unmap_single(tp->pdev,
4316                                  pci_unmap_addr(rxp, mapping),
4317                                  tp->rx_pkt_buf_sz - tp->rx_offset,
4318                                  PCI_DMA_FROMDEVICE);
4319                 dev_kfree_skb_any(rxp->skb);
4320                 rxp->skb = NULL;
4321         }
4322
4323         for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
4324                 rxp = &tp->rx_jumbo_buffers[i];
4325
4326                 if (rxp->skb == NULL)
4327                         continue;
4328                 pci_unmap_single(tp->pdev,
4329                                  pci_unmap_addr(rxp, mapping),
4330                                  RX_JUMBO_PKT_BUF_SZ - tp->rx_offset,
4331                                  PCI_DMA_FROMDEVICE);
4332                 dev_kfree_skb_any(rxp->skb);
4333                 rxp->skb = NULL;
4334         }
4335
4336         for (i = 0; i < TG3_TX_RING_SIZE; ) {
4337                 struct tx_ring_info *txp;
4338                 struct sk_buff *skb;
4339                 int j;
4340
4341                 txp = &tp->tx_buffers[i];
4342                 skb = txp->skb;
4343
4344                 if (skb == NULL) {
4345                         i++;
4346                         continue;
4347                 }
4348
4349                 pci_unmap_single(tp->pdev,
4350                                  pci_unmap_addr(txp, mapping),
4351                                  skb_headlen(skb),
4352                                  PCI_DMA_TODEVICE);
4353                 txp->skb = NULL;
4354
4355                 i++;
4356
4357                 for (j = 0; j < skb_shinfo(skb)->nr_frags; j++) {
4358                         txp = &tp->tx_buffers[i & (TG3_TX_RING_SIZE - 1)];
4359                         pci_unmap_page(tp->pdev,
4360                                        pci_unmap_addr(txp, mapping),
4361                                        skb_shinfo(skb)->frags[j].size,
4362                                        PCI_DMA_TODEVICE);
4363                         i++;
4364                 }
4365
4366                 dev_kfree_skb_any(skb);
4367         }
4368 }
4369
4370 /* Initialize tx/rx rings for packet processing.
4371  *
4372  * The chip has been shut down and the driver detached from
4373  * the networking, so no interrupts or new tx packets will
4374  * end up in the driver.  tp->{tx,}lock are held and thus
4375  * we may not sleep.
4376  */
4377 static int tg3_init_rings(struct tg3 *tp)
4378 {
4379         u32 i;
4380
4381         /* Free up all the SKBs. */
4382         tg3_free_rings(tp);
4383
4384         /* Zero out all descriptors. */
4385         memset(tp->rx_std, 0, TG3_RX_RING_BYTES);
4386         memset(tp->rx_jumbo, 0, TG3_RX_JUMBO_RING_BYTES);
4387         memset(tp->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
4388         memset(tp->tx_ring, 0, TG3_TX_RING_BYTES);
4389
4390         tp->rx_pkt_buf_sz = RX_PKT_BUF_SZ;
4391         if ((tp->tg3_flags2 & TG3_FLG2_5780_CLASS) &&
4392             (tp->dev->mtu > ETH_DATA_LEN))
4393                 tp->rx_pkt_buf_sz = RX_JUMBO_PKT_BUF_SZ;
4394
4395         /* Initialize invariants of the rings, we only set this
4396          * stuff once.  This works because the card does not
4397          * write into the rx buffer posting rings.
4398          */
4399         for (i = 0; i < TG3_RX_RING_SIZE; i++) {
4400                 struct tg3_rx_buffer_desc *rxd;
4401
4402                 rxd = &tp->rx_std[i];
4403                 rxd->idx_len = (tp->rx_pkt_buf_sz - tp->rx_offset - 64)
4404                         << RXD_LEN_SHIFT;
4405                 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
4406                 rxd->opaque = (RXD_OPAQUE_RING_STD |
4407                                (i << RXD_OPAQUE_INDEX_SHIFT));
4408         }
4409
4410         if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
4411                 for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
4412                         struct tg3_rx_buffer_desc *rxd;
4413
4414                         rxd = &tp->rx_jumbo[i];
4415                         rxd->idx_len = (RX_JUMBO_PKT_BUF_SZ - tp->rx_offset - 64)
4416                                 << RXD_LEN_SHIFT;
4417                         rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
4418                                 RXD_FLAG_JUMBO;
4419                         rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
4420                                (i << RXD_OPAQUE_INDEX_SHIFT));
4421                 }
4422         }
4423
4424         /* Now allocate fresh SKBs for each rx ring. */
4425         for (i = 0; i < tp->rx_pending; i++) {
4426                 if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_STD, -1, i) < 0) {
4427                         printk(KERN_WARNING PFX
4428                                "%s: Using a smaller RX standard ring, "
4429                                "only %d out of %d buffers were allocated "
4430                                "successfully.\n",
4431                                tp->dev->name, i, tp->rx_pending);
4432                         if (i == 0)
4433                                 return -ENOMEM;
4434                         tp->rx_pending = i;
4435                         break;
4436                 }
4437         }
4438
4439         if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
4440                 for (i = 0; i < tp->rx_jumbo_pending; i++) {
4441                         if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_JUMBO,
4442                                              -1, i) < 0) {
4443                                 printk(KERN_WARNING PFX
4444                                        "%s: Using a smaller RX jumbo ring, "
4445                                        "only %d out of %d buffers were "
4446                                        "allocated successfully.\n",
4447                                        tp->dev->name, i, tp->rx_jumbo_pending);
4448                                 if (i == 0) {
4449                                         tg3_free_rings(tp);
4450                                         return -ENOMEM;
4451                                 }
4452                                 tp->rx_jumbo_pending = i;
4453                                 break;
4454                         }
4455                 }
4456         }
4457         return 0;
4458 }
4459
4460 /*
4461  * Must not be invoked with interrupt sources disabled and
4462  * the hardware shutdown down.
4463  */
4464 static void tg3_free_consistent(struct tg3 *tp)
4465 {
4466         kfree(tp->rx_std_buffers);
4467         tp->rx_std_buffers = NULL;
4468         if (tp->rx_std) {
4469                 pci_free_consistent(tp->pdev, TG3_RX_RING_BYTES,
4470                                     tp->rx_std, tp->rx_std_mapping);
4471                 tp->rx_std = NULL;
4472         }
4473         if (tp->rx_jumbo) {
4474                 pci_free_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
4475                                     tp->rx_jumbo, tp->rx_jumbo_mapping);
4476                 tp->rx_jumbo = NULL;
4477         }
4478         if (tp->rx_rcb) {
4479                 pci_free_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
4480                                     tp->rx_rcb, tp->rx_rcb_mapping);
4481                 tp->rx_rcb = NULL;
4482         }
4483         if (tp->tx_ring) {
4484                 pci_free_consistent(tp->pdev, TG3_TX_RING_BYTES,
4485                         tp->tx_ring, tp->tx_desc_mapping);
4486                 tp->tx_ring = NULL;
4487         }
4488         if (tp->hw_status) {
4489                 pci_free_consistent(tp->pdev, TG3_HW_STATUS_SIZE,
4490                                     tp->hw_status, tp->status_mapping);
4491                 tp->hw_status = NULL;
4492         }
4493         if (tp->hw_stats) {
4494                 pci_free_consistent(tp->pdev, sizeof(struct tg3_hw_stats),
4495                                     tp->hw_stats, tp->stats_mapping);
4496                 tp->hw_stats = NULL;
4497         }
4498 }
4499
4500 /*
4501  * Must not be invoked with interrupt sources disabled and
4502  * the hardware shutdown down.  Can sleep.
4503  */
4504 static int tg3_alloc_consistent(struct tg3 *tp)
4505 {
4506         tp->rx_std_buffers = kzalloc((sizeof(struct ring_info) *
4507                                       (TG3_RX_RING_SIZE +
4508                                        TG3_RX_JUMBO_RING_SIZE)) +
4509                                      (sizeof(struct tx_ring_info) *
4510                                       TG3_TX_RING_SIZE),
4511                                      GFP_KERNEL);
4512         if (!tp->rx_std_buffers)
4513                 return -ENOMEM;
4514
4515         tp->rx_jumbo_buffers = &tp->rx_std_buffers[TG3_RX_RING_SIZE];
4516         tp->tx_buffers = (struct tx_ring_info *)
4517                 &tp->rx_jumbo_buffers[TG3_RX_JUMBO_RING_SIZE];
4518
4519         tp->rx_std = pci_alloc_consistent(tp->pdev, TG3_RX_RING_BYTES,
4520                                           &tp->rx_std_mapping);
4521         if (!tp->rx_std)
4522                 goto err_out;
4523
4524         tp->rx_jumbo = pci_alloc_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
4525                                             &tp->rx_jumbo_mapping);
4526
4527         if (!tp->rx_jumbo)
4528                 goto err_out;
4529
4530         tp->rx_rcb = pci_alloc_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
4531                                           &tp->rx_rcb_mapping);
4532         if (!tp->rx_rcb)
4533                 goto err_out;
4534
4535         tp->tx_ring = pci_alloc_consistent(tp->pdev, TG3_TX_RING_BYTES,
4536                                            &tp->tx_desc_mapping);
4537         if (!tp->tx_ring)
4538                 goto err_out;
4539
4540         tp->hw_status = pci_alloc_consistent(tp->pdev,
4541                                              TG3_HW_STATUS_SIZE,
4542                                              &tp->status_mapping);
4543         if (!tp->hw_status)
4544                 goto err_out;
4545
4546         tp->hw_stats = pci_alloc_consistent(tp->pdev,
4547                                             sizeof(struct tg3_hw_stats),
4548                                             &tp->stats_mapping);
4549         if (!tp->hw_stats)
4550                 goto err_out;
4551
4552         memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
4553         memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
4554
4555         return 0;
4556
4557 err_out:
4558         tg3_free_consistent(tp);
4559         return -ENOMEM;
4560 }
4561
4562 #define MAX_WAIT_CNT 1000
4563
4564 /* To stop a block, clear the enable bit and poll till it
4565  * clears.  tp->lock is held.
4566  */
4567 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, int silent)
4568 {
4569         unsigned int i;
4570         u32 val;
4571
4572         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
4573                 switch (ofs) {
4574                 case RCVLSC_MODE:
4575                 case DMAC_MODE:
4576                 case MBFREE_MODE:
4577                 case BUFMGR_MODE:
4578                 case MEMARB_MODE:
4579                         /* We can't enable/disable these bits of the
4580                          * 5705/5750, just say success.
4581                          */
4582                         return 0;
4583
4584                 default:
4585                         break;
4586                 };
4587         }
4588
4589         val = tr32(ofs);
4590         val &= ~enable_bit;
4591         tw32_f(ofs, val);
4592
4593         for (i = 0; i < MAX_WAIT_CNT; i++) {
4594                 udelay(100);
4595                 val = tr32(ofs);
4596                 if ((val & enable_bit) == 0)
4597                         break;
4598         }
4599
4600         if (i == MAX_WAIT_CNT && !silent) {
4601                 printk(KERN_ERR PFX "tg3_stop_block timed out, "
4602                        "ofs=%lx enable_bit=%x\n",
4603                        ofs, enable_bit);
4604                 return -ENODEV;
4605         }
4606
4607         return 0;
4608 }
4609
4610 /* tp->lock is held. */
4611 static int tg3_abort_hw(struct tg3 *tp, int silent)
4612 {
4613         int i, err;
4614
4615         tg3_disable_ints(tp);
4616
4617         tp->rx_mode &= ~RX_MODE_ENABLE;
4618         tw32_f(MAC_RX_MODE, tp->rx_mode);
4619         udelay(10);
4620
4621         err  = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
4622         err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
4623         err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
4624         err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
4625         err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
4626         err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
4627
4628         err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
4629         err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
4630         err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
4631         err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
4632         err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
4633         err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
4634         err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
4635
4636         tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
4637         tw32_f(MAC_MODE, tp->mac_mode);
4638         udelay(40);
4639
4640         tp->tx_mode &= ~TX_MODE_ENABLE;
4641         tw32_f(MAC_TX_MODE, tp->tx_mode);
4642
4643         for (i = 0; i < MAX_WAIT_CNT; i++) {
4644                 udelay(100);
4645                 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
4646                         break;
4647         }
4648         if (i >= MAX_WAIT_CNT) {
4649                 printk(KERN_ERR PFX "tg3_abort_hw timed out for %s, "
4650                        "TX_MODE_ENABLE will not clear MAC_TX_MODE=%08x\n",
4651                        tp->dev->name, tr32(MAC_TX_MODE));
4652                 err |= -ENODEV;
4653         }
4654
4655         err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
4656         err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
4657         err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
4658
4659         tw32(FTQ_RESET, 0xffffffff);
4660         tw32(FTQ_RESET, 0x00000000);
4661
4662         err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
4663         err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
4664
4665         if (tp->hw_status)
4666                 memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
4667         if (tp->hw_stats)
4668                 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
4669
4670         return err;
4671 }
4672
4673 /* tp->lock is held. */
4674 static int tg3_nvram_lock(struct tg3 *tp)
4675 {
4676         if (tp->tg3_flags & TG3_FLAG_NVRAM) {
4677                 int i;
4678
4679                 if (tp->nvram_lock_cnt == 0) {
4680                         tw32(NVRAM_SWARB, SWARB_REQ_SET1);
4681                         for (i = 0; i < 8000; i++) {
4682                                 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
4683                                         break;
4684                                 udelay(20);
4685                         }
4686                         if (i == 8000) {
4687                                 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
4688                                 return -ENODEV;
4689                         }
4690                 }
4691                 tp->nvram_lock_cnt++;
4692         }
4693         return 0;
4694 }
4695
4696 /* tp->lock is held. */
4697 static void tg3_nvram_unlock(struct tg3 *tp)
4698 {
4699         if (tp->tg3_flags & TG3_FLAG_NVRAM) {
4700                 if (tp->nvram_lock_cnt > 0)
4701                         tp->nvram_lock_cnt--;
4702                 if (tp->nvram_lock_cnt == 0)
4703                         tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
4704         }
4705 }
4706
4707 /* tp->lock is held. */
4708 static void tg3_enable_nvram_access(struct tg3 *tp)
4709 {
4710         if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
4711             !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM)) {
4712                 u32 nvaccess = tr32(NVRAM_ACCESS);
4713
4714                 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
4715         }
4716 }
4717
4718 /* tp->lock is held. */
4719 static void tg3_disable_nvram_access(struct tg3 *tp)
4720 {
4721         if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
4722             !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM)) {
4723                 u32 nvaccess = tr32(NVRAM_ACCESS);
4724
4725                 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
4726         }
4727 }
4728
4729 /* tp->lock is held. */
4730 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
4731 {
4732         tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
4733                       NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
4734
4735         if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
4736                 switch (kind) {
4737                 case RESET_KIND_INIT:
4738                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4739                                       DRV_STATE_START);
4740                         break;
4741
4742                 case RESET_KIND_SHUTDOWN:
4743                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4744                                       DRV_STATE_UNLOAD);
4745                         break;
4746
4747                 case RESET_KIND_SUSPEND:
4748                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4749                                       DRV_STATE_SUSPEND);
4750                         break;
4751
4752                 default:
4753                         break;
4754                 };
4755         }
4756 }
4757
4758 /* tp->lock is held. */
4759 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
4760 {
4761         if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
4762                 switch (kind) {
4763                 case RESET_KIND_INIT:
4764                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4765                                       DRV_STATE_START_DONE);
4766                         break;
4767
4768                 case RESET_KIND_SHUTDOWN:
4769                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4770                                       DRV_STATE_UNLOAD_DONE);
4771                         break;
4772
4773                 default:
4774                         break;
4775                 };
4776         }
4777 }
4778
4779 /* tp->lock is held. */
4780 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
4781 {
4782         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
4783                 switch (kind) {
4784                 case RESET_KIND_INIT:
4785                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4786                                       DRV_STATE_START);
4787                         break;
4788
4789                 case RESET_KIND_SHUTDOWN:
4790                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4791                                       DRV_STATE_UNLOAD);
4792                         break;
4793
4794                 case RESET_KIND_SUSPEND:
4795                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4796                                       DRV_STATE_SUSPEND);
4797                         break;
4798
4799                 default:
4800                         break;
4801                 };
4802         }
4803 }
4804
4805 static int tg3_poll_fw(struct tg3 *tp)
4806 {
4807         int i;
4808         u32 val;
4809
4810         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
4811                 /* Wait up to 20ms for init done. */
4812                 for (i = 0; i < 200; i++) {
4813                         if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
4814                                 return 0;
4815                         udelay(100);
4816                 }
4817                 return -ENODEV;
4818         }
4819
4820         /* Wait for firmware initialization to complete. */
4821         for (i = 0; i < 100000; i++) {
4822                 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
4823                 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
4824                         break;
4825                 udelay(10);
4826         }
4827
4828         /* Chip might not be fitted with firmware.  Some Sun onboard
4829          * parts are configured like that.  So don't signal the timeout
4830          * of the above loop as an error, but do report the lack of
4831          * running firmware once.
4832          */
4833         if (i >= 100000 &&
4834             !(tp->tg3_flags2 & TG3_FLG2_NO_FWARE_REPORTED)) {
4835                 tp->tg3_flags2 |= TG3_FLG2_NO_FWARE_REPORTED;
4836
4837                 printk(KERN_INFO PFX "%s: No firmware running.\n",
4838                        tp->dev->name);
4839         }
4840
4841         return 0;
4842 }
4843
4844 /* Save PCI command register before chip reset */
4845 static void tg3_save_pci_state(struct tg3 *tp)
4846 {
4847         u32 val;
4848
4849         pci_read_config_dword(tp->pdev, TG3PCI_COMMAND, &val);
4850         tp->pci_cmd = val;
4851 }
4852
4853 /* Restore PCI state after chip reset */
4854 static void tg3_restore_pci_state(struct tg3 *tp)
4855 {
4856         u32 val;
4857
4858         /* Re-enable indirect register accesses. */
4859         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
4860                                tp->misc_host_ctrl);
4861
4862         /* Set MAX PCI retry to zero. */
4863         val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
4864         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
4865             (tp->tg3_flags & TG3_FLAG_PCIX_MODE))
4866                 val |= PCISTATE_RETRY_SAME_DMA;
4867         pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
4868
4869         pci_write_config_dword(tp->pdev, TG3PCI_COMMAND, tp->pci_cmd);
4870
4871         /* Make sure PCI-X relaxed ordering bit is clear. */
4872         if (tp->pcix_cap) {
4873                 u16 pcix_cmd;
4874
4875                 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
4876                                      &pcix_cmd);
4877                 pcix_cmd &= ~PCI_X_CMD_ERO;
4878                 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
4879                                       pcix_cmd);
4880         }
4881
4882         if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
4883
4884                 /* Chip reset on 5780 will reset MSI enable bit,
4885                  * so need to restore it.
4886                  */
4887                 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
4888                         u16 ctrl;
4889
4890                         pci_read_config_word(tp->pdev,
4891                                              tp->msi_cap + PCI_MSI_FLAGS,
4892                                              &ctrl);
4893                         pci_write_config_word(tp->pdev,
4894                                               tp->msi_cap + PCI_MSI_FLAGS,
4895                                               ctrl | PCI_MSI_FLAGS_ENABLE);
4896                         val = tr32(MSGINT_MODE);
4897                         tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
4898                 }
4899         }
4900 }
4901
4902 static void tg3_stop_fw(struct tg3 *);
4903
4904 /* tp->lock is held. */
4905 static int tg3_chip_reset(struct tg3 *tp)
4906 {
4907         u32 val;
4908         void (*write_op)(struct tg3 *, u32, u32);
4909         int err;
4910
4911         tg3_nvram_lock(tp);
4912
4913         /* No matching tg3_nvram_unlock() after this because
4914          * chip reset below will undo the nvram lock.
4915          */
4916         tp->nvram_lock_cnt = 0;
4917
4918         /* GRC_MISC_CFG core clock reset will clear the memory
4919          * enable bit in PCI register 4 and the MSI enable bit
4920          * on some chips, so we save relevant registers here.
4921          */
4922         tg3_save_pci_state(tp);
4923
4924         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
4925             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
4926             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
4927             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784)
4928                 tw32(GRC_FASTBOOT_PC, 0);
4929
4930         /*
4931          * We must avoid the readl() that normally takes place.
4932          * It locks machines, causes machine checks, and other
4933          * fun things.  So, temporarily disable the 5701
4934          * hardware workaround, while we do the reset.
4935          */
4936         write_op = tp->write32;
4937         if (write_op == tg3_write_flush_reg32)
4938                 tp->write32 = tg3_write32;
4939
4940         /* Prevent the irq handler from reading or writing PCI registers
4941          * during chip reset when the memory enable bit in the PCI command
4942          * register may be cleared.  The chip does not generate interrupt
4943          * at this time, but the irq handler may still be called due to irq
4944          * sharing or irqpoll.
4945          */
4946         tp->tg3_flags |= TG3_FLAG_CHIP_RESETTING;
4947         if (tp->hw_status) {
4948                 tp->hw_status->status = 0;
4949                 tp->hw_status->status_tag = 0;
4950         }
4951         tp->last_tag = 0;
4952         smp_mb();
4953         synchronize_irq(tp->pdev->irq);
4954
4955         /* do the reset */
4956         val = GRC_MISC_CFG_CORECLK_RESET;
4957
4958         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
4959                 if (tr32(0x7e2c) == 0x60) {
4960                         tw32(0x7e2c, 0x20);
4961                 }
4962                 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
4963                         tw32(GRC_MISC_CFG, (1 << 29));
4964                         val |= (1 << 29);
4965                 }
4966         }
4967
4968         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
4969                 tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
4970                 tw32(GRC_VCPU_EXT_CTRL,
4971                      tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
4972         }
4973
4974         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
4975                 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
4976         tw32(GRC_MISC_CFG, val);
4977
4978         /* restore 5701 hardware bug workaround write method */
4979         tp->write32 = write_op;
4980
4981         /* Unfortunately, we have to delay before the PCI read back.
4982          * Some 575X chips even will not respond to a PCI cfg access
4983          * when the reset command is given to the chip.
4984          *
4985          * How do these hardware designers expect things to work
4986          * properly if the PCI write is posted for a long period
4987          * of time?  It is always necessary to have some method by
4988          * which a register read back can occur to push the write
4989          * out which does the reset.
4990          *
4991          * For most tg3 variants the trick below was working.
4992          * Ho hum...
4993          */
4994         udelay(120);
4995
4996         /* Flush PCI posted writes.  The normal MMIO registers
4997          * are inaccessible at this time so this is the only
4998          * way to make this reliably (actually, this is no longer
4999          * the case, see above).  I tried to use indirect
5000          * register read/write but this upset some 5701 variants.
5001          */
5002         pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
5003
5004         udelay(120);
5005
5006         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
5007                 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) {
5008                         int i;
5009                         u32 cfg_val;
5010
5011                         /* Wait for link training to complete.  */
5012                         for (i = 0; i < 5000; i++)
5013                                 udelay(100);
5014
5015                         pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
5016                         pci_write_config_dword(tp->pdev, 0xc4,
5017                                                cfg_val | (1 << 15));
5018                 }
5019                 /* Set PCIE max payload size and clear error status.  */
5020                 pci_write_config_dword(tp->pdev, 0xd8, 0xf5000);
5021         }
5022
5023         tg3_restore_pci_state(tp);
5024
5025         tp->tg3_flags &= ~TG3_FLAG_CHIP_RESETTING;
5026
5027         val = 0;
5028         if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
5029                 val = tr32(MEMARB_MODE);
5030         tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
5031
5032         if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A3) {
5033                 tg3_stop_fw(tp);
5034                 tw32(0x5000, 0x400);
5035         }
5036
5037         tw32(GRC_MODE, tp->grc_mode);
5038
5039         if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0) {
5040                 val = tr32(0xc4);
5041
5042                 tw32(0xc4, val | (1 << 15));
5043         }
5044
5045         if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
5046             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
5047                 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
5048                 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0)
5049                         tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
5050                 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
5051         }
5052
5053         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
5054                 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
5055                 tw32_f(MAC_MODE, tp->mac_mode);
5056         } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
5057                 tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
5058                 tw32_f(MAC_MODE, tp->mac_mode);
5059         } else
5060                 tw32_f(MAC_MODE, 0);
5061         udelay(40);
5062
5063         err = tg3_poll_fw(tp);
5064         if (err)
5065                 return err;
5066
5067         if ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
5068             tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
5069                 val = tr32(0x7c00);
5070
5071                 tw32(0x7c00, val | (1 << 25));
5072         }
5073
5074         /* Reprobe ASF enable state.  */
5075         tp->tg3_flags &= ~TG3_FLAG_ENABLE_ASF;
5076         tp->tg3_flags2 &= ~TG3_FLG2_ASF_NEW_HANDSHAKE;
5077         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
5078         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
5079                 u32 nic_cfg;
5080
5081                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
5082                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
5083                         tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
5084                         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
5085                                 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
5086                 }
5087         }
5088
5089         return 0;
5090 }
5091
5092 /* tp->lock is held. */
5093 static void tg3_stop_fw(struct tg3 *tp)
5094 {
5095         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
5096                 u32 val;
5097                 int i;
5098
5099                 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
5100                 val = tr32(GRC_RX_CPU_EVENT);
5101                 val |= (1 << 14);
5102                 tw32(GRC_RX_CPU_EVENT, val);
5103
5104                 /* Wait for RX cpu to ACK the event.  */
5105                 for (i = 0; i < 100; i++) {
5106                         if (!(tr32(GRC_RX_CPU_EVENT) & (1 << 14)))
5107                                 break;
5108                         udelay(1);
5109                 }
5110         }
5111 }
5112
5113 /* tp->lock is held. */
5114 static int tg3_halt(struct tg3 *tp, int kind, int silent)
5115 {
5116         int err;
5117
5118         tg3_stop_fw(tp);
5119
5120         tg3_write_sig_pre_reset(tp, kind);
5121
5122         tg3_abort_hw(tp, silent);
5123         err = tg3_chip_reset(tp);
5124
5125         tg3_write_sig_legacy(tp, kind);
5126         tg3_write_sig_post_reset(tp, kind);
5127
5128         if (err)
5129                 return err;
5130
5131         return 0;
5132 }
5133
5134 #define TG3_FW_RELEASE_MAJOR    0x0
5135 #define TG3_FW_RELASE_MINOR     0x0
5136 #define TG3_FW_RELEASE_FIX      0x0
5137 #define TG3_FW_START_ADDR       0x08000000
5138 #define TG3_FW_TEXT_ADDR        0x08000000
5139 #define TG3_FW_TEXT_LEN         0x9c0
5140 #define TG3_FW_RODATA_ADDR      0x080009c0
5141 #define TG3_FW_RODATA_LEN       0x60
5142 #define TG3_FW_DATA_ADDR        0x08000a40
5143 #define TG3_FW_DATA_LEN         0x20
5144 #define TG3_FW_SBSS_ADDR        0x08000a60
5145 #define TG3_FW_SBSS_LEN         0xc
5146 #define TG3_FW_BSS_ADDR         0x08000a70
5147 #define TG3_FW_BSS_LEN          0x10
5148
5149 static const u32 tg3FwText[(TG3_FW_TEXT_LEN / sizeof(u32)) + 1] = {
5150         0x00000000, 0x10000003, 0x00000000, 0x0000000d, 0x0000000d, 0x3c1d0800,
5151         0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100000, 0x0e000018, 0x00000000,
5152         0x0000000d, 0x3c1d0800, 0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100034,
5153         0x0e00021c, 0x00000000, 0x0000000d, 0x00000000, 0x00000000, 0x00000000,
5154         0x27bdffe0, 0x3c1cc000, 0xafbf0018, 0xaf80680c, 0x0e00004c, 0x241b2105,
5155         0x97850000, 0x97870002, 0x9782002c, 0x9783002e, 0x3c040800, 0x248409c0,
5156         0xafa00014, 0x00021400, 0x00621825, 0x00052c00, 0xafa30010, 0x8f860010,
5157         0x00e52825, 0x0e000060, 0x24070102, 0x3c02ac00, 0x34420100, 0x3c03ac01,
5158         0x34630100, 0xaf820490, 0x3c02ffff, 0xaf820494, 0xaf830498, 0xaf82049c,
5159         0x24020001, 0xaf825ce0, 0x0e00003f, 0xaf825d00, 0x0e000140, 0x00000000,
5160         0x8fbf0018, 0x03e00008, 0x27bd0020, 0x2402ffff, 0xaf825404, 0x8f835400,
5161         0x34630400, 0xaf835400, 0xaf825404, 0x3c020800, 0x24420034, 0xaf82541c,
5162         0x03e00008, 0xaf805400, 0x00000000, 0x00000000, 0x3c020800, 0x34423000,
5163         0x3c030800, 0x34633000, 0x3c040800, 0x348437ff, 0x3c010800, 0xac220a64,
5164         0x24020040, 0x3c010800, 0xac220a68, 0x3c010800, 0xac200a60, 0xac600000,
5165         0x24630004, 0x0083102b, 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000,
5166         0x00804821, 0x8faa0010, 0x3c020800, 0x8c420a60, 0x3c040800, 0x8c840a68,
5167         0x8fab0014, 0x24430001, 0x0044102b, 0x3c010800, 0xac230a60, 0x14400003,
5168         0x00004021, 0x3c010800, 0xac200a60, 0x3c020800, 0x8c420a60, 0x3c030800,
5169         0x8c630a64, 0x91240000, 0x00021140, 0x00431021, 0x00481021, 0x25080001,
5170         0xa0440000, 0x29020008, 0x1440fff4, 0x25290001, 0x3c020800, 0x8c420a60,
5171         0x3c030800, 0x8c630a64, 0x8f84680c, 0x00021140, 0x00431021, 0xac440008,
5172         0xac45000c, 0xac460010, 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c,
5173         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
5174         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
5175         0, 0, 0, 0, 0, 0,
5176         0x02000008, 0x00000000, 0x0a0001e3, 0x3c0a0001, 0x0a0001e3, 0x3c0a0002,
5177         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
5178         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
5179         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
5180         0x0a0001e3, 0x3c0a0007, 0x0a0001e3, 0x3c0a0008, 0x0a0001e3, 0x3c0a0009,
5181         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a000b,
5182         0x0a0001e3, 0x3c0a000c, 0x0a0001e3, 0x3c0a000d, 0x0a0001e3, 0x00000000,
5183         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a000e, 0x0a0001e3, 0x00000000,
5184         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
5185         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
5186         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a0013, 0x0a0001e3, 0x3c0a0014,
5187         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
5188         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
5189         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
5190         0x27bdffe0, 0x00001821, 0x00001021, 0xafbf0018, 0xafb10014, 0xafb00010,
5191         0x3c010800, 0x00220821, 0xac200a70, 0x3c010800, 0x00220821, 0xac200a74,
5192         0x3c010800, 0x00220821, 0xac200a78, 0x24630001, 0x1860fff5, 0x2442000c,
5193         0x24110001, 0x8f906810, 0x32020004, 0x14400005, 0x24040001, 0x3c020800,
5194         0x8c420a78, 0x18400003, 0x00002021, 0x0e000182, 0x00000000, 0x32020001,
5195         0x10400003, 0x00000000, 0x0e000169, 0x00000000, 0x0a000153, 0xaf915028,
5196         0x8fbf0018, 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0020, 0x3c050800,
5197         0x8ca50a70, 0x3c060800, 0x8cc60a80, 0x3c070800, 0x8ce70a78, 0x27bdffe0,
5198         0x3c040800, 0x248409d0, 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014,
5199         0x0e00017b, 0x00002021, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x24020001,
5200         0x8f836810, 0x00821004, 0x00021027, 0x00621824, 0x03e00008, 0xaf836810,
5201         0x27bdffd8, 0xafbf0024, 0x1080002e, 0xafb00020, 0x8f825cec, 0xafa20018,
5202         0x8f825cec, 0x3c100800, 0x26100a78, 0xafa2001c, 0x34028000, 0xaf825cec,
5203         0x8e020000, 0x18400016, 0x00000000, 0x3c020800, 0x94420a74, 0x8fa3001c,
5204         0x000221c0, 0xac830004, 0x8fa2001c, 0x3c010800, 0x0e000201, 0xac220a74,
5205         0x10400005, 0x00000000, 0x8e020000, 0x24420001, 0x0a0001df, 0xae020000,
5206         0x3c020800, 0x8c420a70, 0x00021c02, 0x000321c0, 0x0a0001c5, 0xafa2001c,
5207         0x0e000201, 0x00000000, 0x1040001f, 0x00000000, 0x8e020000, 0x8fa3001c,
5208         0x24420001, 0x3c010800, 0xac230a70, 0x3c010800, 0xac230a74, 0x0a0001df,
5209         0xae020000, 0x3c100800, 0x26100a78, 0x8e020000, 0x18400028, 0x00000000,
5210         0x0e000201, 0x00000000, 0x14400024, 0x00000000, 0x8e020000, 0x3c030800,
5211         0x8c630a70, 0x2442ffff, 0xafa3001c, 0x18400006, 0xae020000, 0x00031402,
5212         0x000221c0, 0x8c820004, 0x3c010800, 0xac220a70, 0x97a2001e, 0x2442ff00,
5213         0x2c420300, 0x1440000b, 0x24024000, 0x3c040800, 0x248409dc, 0xafa00010,
5214         0xafa00014, 0x8fa6001c, 0x24050008, 0x0e000060, 0x00003821, 0x0a0001df,
5215         0x00000000, 0xaf825cf8, 0x3c020800, 0x8c420a40, 0x8fa3001c, 0x24420001,
5216         0xaf835cf8, 0x3c010800, 0xac220a40, 0x8fbf0024, 0x8fb00020, 0x03e00008,
5217         0x27bd0028, 0x27bdffe0, 0x3c040800, 0x248409e8, 0x00002821, 0x00003021,
5218         0x00003821, 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014, 0x8fbf0018,
5219         0x03e00008, 0x27bd0020, 0x8f82680c, 0x8f85680c, 0x00021827, 0x0003182b,
5220         0x00031823, 0x00431024, 0x00441021, 0x00a2282b, 0x10a00006, 0x00000000,
5221         0x00401821, 0x8f82680c, 0x0043102b, 0x1440fffd, 0x00000000, 0x03e00008,
5222         0x00000000, 0x3c040800, 0x8c840000, 0x3c030800, 0x8c630a40, 0x0064102b,
5223         0x54400002, 0x00831023, 0x00641023, 0x2c420008, 0x03e00008, 0x38420001,
5224         0x27bdffe0, 0x00802821, 0x3c040800, 0x24840a00, 0x00003021, 0x00003821,
5225         0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014, 0x0a000216, 0x00000000,
5226         0x8fbf0018, 0x03e00008, 0x27bd0020, 0x00000000, 0x27bdffe0, 0x3c1cc000,
5227         0xafbf0018, 0x0e00004c, 0xaf80680c, 0x3c040800, 0x24840a10, 0x03802821,
5228         0x00003021, 0x00003821, 0xafa00010, 0x0e000060, 0xafa00014, 0x2402ffff,
5229         0xaf825404, 0x3c0200aa, 0x0e000234, 0xaf825434, 0x8fbf0018, 0x03e00008,
5230         0x27bd0020, 0x00000000, 0x00000000, 0x00000000, 0x27bdffe8, 0xafb00010,
5231         0x24100001, 0xafbf0014, 0x3c01c003, 0xac200000, 0x8f826810, 0x30422000,
5232         0x10400003, 0x00000000, 0x0e000246, 0x00000000, 0x0a00023a, 0xaf905428,
5233         0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x27bdfff8, 0x8f845d0c,
5234         0x3c0200ff, 0x3c030800, 0x8c630a50, 0x3442fff8, 0x00821024, 0x1043001e,
5235         0x3c0500ff, 0x34a5fff8, 0x3c06c003, 0x3c074000, 0x00851824, 0x8c620010,
5236         0x3c010800, 0xac230a50, 0x30420008, 0x10400005, 0x00871025, 0x8cc20000,
5237         0x24420001, 0xacc20000, 0x00871025, 0xaf825d0c, 0x8fa20000, 0x24420001,
5238         0xafa20000, 0x8fa20000, 0x8fa20000, 0x24420001, 0xafa20000, 0x8fa20000,
5239         0x8f845d0c, 0x3c030800, 0x8c630a50, 0x00851024, 0x1443ffe8, 0x00851824,
5240         0x27bd0008, 0x03e00008, 0x00000000, 0x00000000, 0x00000000
5241 };
5242
5243 static const u32 tg3FwRodata[(TG3_FW_RODATA_LEN / sizeof(u32)) + 1] = {
5244         0x35373031, 0x726c7341, 0x00000000, 0x00000000, 0x53774576, 0x656e7430,
5245         0x00000000, 0x726c7045, 0x76656e74, 0x31000000, 0x556e6b6e, 0x45766e74,
5246         0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x66617461, 0x6c457272,
5247         0x00000000, 0x00000000, 0x4d61696e, 0x43707542, 0x00000000, 0x00000000,
5248         0x00000000
5249 };
5250
5251 #if 0 /* All zeros, don't eat up space with it. */
5252 u32 tg3FwData[(TG3_FW_DATA_LEN / sizeof(u32)) + 1] = {
5253         0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
5254         0x00000000, 0x00000000, 0x00000000, 0x00000000
5255 };
5256 #endif
5257
5258 #define RX_CPU_SCRATCH_BASE     0x30000
5259 #define RX_CPU_SCRATCH_SIZE     0x04000
5260 #define TX_CPU_SCRATCH_BASE     0x34000
5261 #define TX_CPU_SCRATCH_SIZE     0x04000
5262
5263 /* tp->lock is held. */
5264 static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
5265 {
5266         int i;
5267
5268         BUG_ON(offset == TX_CPU_BASE &&
5269             (tp->tg3_flags2 & TG3_FLG2_5705_PLUS));
5270
5271         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
5272                 u32 val = tr32(GRC_VCPU_EXT_CTRL);
5273
5274                 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
5275                 return 0;
5276         }
5277         if (offset == RX_CPU_BASE) {
5278                 for (i = 0; i < 10000; i++) {
5279                         tw32(offset + CPU_STATE, 0xffffffff);
5280                         tw32(offset + CPU_MODE,  CPU_MODE_HALT);
5281                         if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
5282                                 break;
5283                 }
5284
5285                 tw32(offset + CPU_STATE, 0xffffffff);
5286                 tw32_f(offset + CPU_MODE,  CPU_MODE_HALT);
5287                 udelay(10);
5288         } else {
5289                 for (i = 0; i < 10000; i++) {
5290                         tw32(offset + CPU_STATE, 0xffffffff);
5291                         tw32(offset + CPU_MODE,  CPU_MODE_HALT);
5292                         if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
5293                                 break;
5294                 }
5295         }
5296
5297         if (i >= 10000) {
5298                 printk(KERN_ERR PFX "tg3_reset_cpu timed out for %s, "
5299                        "and %s CPU\n",
5300                        tp->dev->name,
5301                        (offset == RX_CPU_BASE ? "RX" : "TX"));
5302                 return -ENODEV;
5303         }
5304
5305         /* Clear firmware's nvram arbitration. */
5306         if (tp->tg3_flags & TG3_FLAG_NVRAM)
5307                 tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
5308         return 0;
5309 }
5310
5311 struct fw_info {
5312         unsigned int text_base;
5313         unsigned int text_len;
5314         const u32 *text_data;
5315         unsigned int rodata_base;
5316         unsigned int rodata_len;
5317         const u32 *rodata_data;
5318         unsigned int data_base;
5319         unsigned int data_len;
5320         const u32 *data_data;
5321 };
5322
5323 /* tp->lock is held. */
5324 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base, u32 cpu_scratch_base,
5325                                  int cpu_scratch_size, struct fw_info *info)
5326 {
5327         int err, lock_err, i;
5328         void (*write_op)(struct tg3 *, u32, u32);
5329
5330         if (cpu_base == TX_CPU_BASE &&
5331             (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5332                 printk(KERN_ERR PFX "tg3_load_firmware_cpu: Trying to load "
5333                        "TX cpu firmware on %s which is 5705.\n",
5334                        tp->dev->name);
5335                 return -EINVAL;
5336         }
5337
5338         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
5339                 write_op = tg3_write_mem;
5340         else
5341                 write_op = tg3_write_indirect_reg32;
5342
5343         /* It is possible that bootcode is still loading at this point.
5344          * Get the nvram lock first before halting the cpu.
5345          */
5346         lock_err = tg3_nvram_lock(tp);
5347         err = tg3_halt_cpu(tp, cpu_base);
5348         if (!lock_err)
5349                 tg3_nvram_unlock(tp);
5350         if (err)
5351                 goto out;
5352
5353         for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
5354                 write_op(tp, cpu_scratch_base + i, 0);
5355         tw32(cpu_base + CPU_STATE, 0xffffffff);
5356         tw32(cpu_base + CPU_MODE, tr32(cpu_base+CPU_MODE)|CPU_MODE_HALT);
5357         for (i = 0; i < (info->text_len / sizeof(u32)); i++)
5358                 write_op(tp, (cpu_scratch_base +
5359                               (info->text_base & 0xffff) +
5360                               (i * sizeof(u32))),
5361                          (info->text_data ?
5362                           info->text_data[i] : 0));
5363         for (i = 0; i < (info->rodata_len / sizeof(u32)); i++)
5364                 write_op(tp, (cpu_scratch_base +
5365                               (info->rodata_base & 0xffff) +
5366                               (i * sizeof(u32))),
5367                          (info->rodata_data ?
5368                           info->rodata_data[i] : 0));
5369         for (i = 0; i < (info->data_len / sizeof(u32)); i++)
5370                 write_op(tp, (cpu_scratch_base +
5371                               (info->data_base & 0xffff) +
5372                               (i * sizeof(u32))),
5373                          (info->data_data ?
5374                           info->data_data[i] : 0));
5375
5376         err = 0;
5377
5378 out:
5379         return err;
5380 }
5381
5382 /* tp->lock is held. */
5383 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
5384 {
5385         struct fw_info info;
5386         int err, i;
5387
5388         info.text_base = TG3_FW_TEXT_ADDR;
5389         info.text_len = TG3_FW_TEXT_LEN;
5390         info.text_data = &tg3FwText[0];
5391         info.rodata_base = TG3_FW_RODATA_ADDR;
5392         info.rodata_len = TG3_FW_RODATA_LEN;
5393         info.rodata_data = &tg3FwRodata[0];
5394         info.data_base = TG3_FW_DATA_ADDR;
5395         info.data_len = TG3_FW_DATA_LEN;
5396         info.data_data = NULL;
5397
5398         err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
5399                                     RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
5400                                     &info);
5401         if (err)
5402                 return err;
5403
5404         err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
5405                                     TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
5406                                     &info);
5407         if (err)
5408                 return err;
5409
5410         /* Now startup only the RX cpu. */
5411         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
5412         tw32_f(RX_CPU_BASE + CPU_PC,    TG3_FW_TEXT_ADDR);
5413
5414         for (i = 0; i < 5; i++) {
5415                 if (tr32(RX_CPU_BASE + CPU_PC) == TG3_FW_TEXT_ADDR)
5416                         break;
5417                 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
5418                 tw32(RX_CPU_BASE + CPU_MODE,  CPU_MODE_HALT);
5419                 tw32_f(RX_CPU_BASE + CPU_PC,    TG3_FW_TEXT_ADDR);
5420                 udelay(1000);
5421         }
5422         if (i >= 5) {
5423                 printk(KERN_ERR PFX "tg3_load_firmware fails for %s "
5424                        "to set RX CPU PC, is %08x should be %08x\n",
5425                        tp->dev->name, tr32(RX_CPU_BASE + CPU_PC),
5426                        TG3_FW_TEXT_ADDR);
5427                 return -ENODEV;
5428         }
5429         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
5430         tw32_f(RX_CPU_BASE + CPU_MODE,  0x00000000);
5431
5432         return 0;
5433 }
5434
5435
5436 #define TG3_TSO_FW_RELEASE_MAJOR        0x1
5437 #define TG3_TSO_FW_RELASE_MINOR         0x6
5438 #define TG3_TSO_FW_RELEASE_FIX          0x0
5439 #define TG3_TSO_FW_START_ADDR           0x08000000
5440 #define TG3_TSO_FW_TEXT_ADDR            0x08000000
5441 #define TG3_TSO_FW_TEXT_LEN             0x1aa0
5442 #define TG3_TSO_FW_RODATA_ADDR          0x08001aa0
5443 #define TG3_TSO_FW_RODATA_LEN           0x60
5444 #define TG3_TSO_FW_DATA_ADDR            0x08001b20
5445 #define TG3_TSO_FW_DATA_LEN             0x30
5446 #define TG3_TSO_FW_SBSS_ADDR            0x08001b50
5447 #define TG3_TSO_FW_SBSS_LEN             0x2c
5448 #define TG3_TSO_FW_BSS_ADDR             0x08001b80
5449 #define TG3_TSO_FW_BSS_LEN              0x894
5450
5451 static const u32 tg3TsoFwText[(TG3_TSO_FW_TEXT_LEN / 4) + 1] = {
5452         0x0e000003, 0x00000000, 0x08001b24, 0x00000000, 0x10000003, 0x00000000,
5453         0x0000000d, 0x0000000d, 0x3c1d0800, 0x37bd4000, 0x03a0f021, 0x3c100800,
5454         0x26100000, 0x0e000010, 0x00000000, 0x0000000d, 0x27bdffe0, 0x3c04fefe,
5455         0xafbf0018, 0x0e0005d8, 0x34840002, 0x0e000668, 0x00000000, 0x3c030800,
5456         0x90631b68, 0x24020002, 0x3c040800, 0x24841aac, 0x14620003, 0x24050001,
5457         0x3c040800, 0x24841aa0, 0x24060006, 0x00003821, 0xafa00010, 0x0e00067c,
5458         0xafa00014, 0x8f625c50, 0x34420001, 0xaf625c50, 0x8f625c90, 0x34420001,
5459         0xaf625c90, 0x2402ffff, 0x0e000034, 0xaf625404, 0x8fbf0018, 0x03e00008,
5460         0x27bd0020, 0x00000000, 0x00000000, 0x00000000, 0x27bdffe0, 0xafbf001c,
5461         0xafb20018, 0xafb10014, 0x0e00005b, 0xafb00010, 0x24120002, 0x24110001,
5462         0x8f706820, 0x32020100, 0x10400003, 0x00000000, 0x0e0000bb, 0x00000000,
5463         0x8f706820, 0x32022000, 0x10400004, 0x32020001, 0x0e0001f0, 0x24040001,
5464         0x32020001, 0x10400003, 0x00000000, 0x0e0000a3, 0x00000000, 0x3c020800,
5465         0x90421b98, 0x14520003, 0x00000000, 0x0e0004c0, 0x00000000, 0x0a00003c,
5466         0xaf715028, 0x8fbf001c, 0x8fb20018, 0x8fb10014, 0x8fb00010, 0x03e00008,
5467         0x27bd0020, 0x27bdffe0, 0x3c040800, 0x24841ac0, 0x00002821, 0x00003021,
5468         0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014, 0x3c040800,
5469         0x248423d8, 0xa4800000, 0x3c010800, 0xa0201b98, 0x3c010800, 0xac201b9c,
5470         0x3c010800, 0xac201ba0, 0x3c010800, 0xac201ba4, 0x3c010800, 0xac201bac,
5471         0x3c010800, 0xac201bb8, 0x3c010800, 0xac201bbc, 0x8f624434, 0x3c010800,
5472         0xac221b88, 0x8f624438, 0x3c010800, 0xac221b8c, 0x8f624410, 0xac80f7a8,
5473         0x3c010800, 0xac201b84, 0x3c010800, 0xac2023e0, 0x3c010800, 0xac2023c8,
5474         0x3c010800, 0xac2023cc, 0x3c010800, 0xac202400, 0x3c010800, 0xac221b90,
5475         0x8f620068, 0x24030007, 0x00021702, 0x10430005, 0x00000000, 0x8f620068,
5476         0x00021702, 0x14400004, 0x24020001, 0x3c010800, 0x0a000097, 0xac20240c,
5477         0xac820034, 0x3c040800, 0x24841acc, 0x3c050800, 0x8ca5240c, 0x00003021,
5478         0x00003821, 0xafa00010, 0x0e00067c, 0xafa00014, 0x8fbf0018, 0x03e00008,
5479         0x27bd0020, 0x27bdffe0, 0x3c040800, 0x24841ad8, 0x00002821, 0x00003021,
5480         0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014, 0x0e00005b,
5481         0x00000000, 0x0e0000b4, 0x00002021, 0x8fbf0018, 0x03e00008, 0x27bd0020,
5482         0x24020001, 0x8f636820, 0x00821004, 0x00021027, 0x00621824, 0x03e00008,
5483         0xaf636820, 0x27bdffd0, 0xafbf002c, 0xafb60028, 0xafb50024, 0xafb40020,
5484         0xafb3001c, 0xafb20018, 0xafb10014, 0xafb00010, 0x8f675c5c, 0x3c030800,
5485         0x24631bbc, 0x8c620000, 0x14470005, 0x3c0200ff, 0x3c020800, 0x90421b98,
5486         0x14400119, 0x3c0200ff, 0x3442fff8, 0x00e28824, 0xac670000, 0x00111902,
5487         0x306300ff, 0x30e20003, 0x000211c0, 0x00622825, 0x00a04021, 0x00071602,
5488         0x3c030800, 0x90631b98, 0x3044000f, 0x14600036, 0x00804821, 0x24020001,
5489         0x3c010800, 0xa0221b98, 0x00051100, 0x00821025, 0x3c010800, 0xac201b9c,
5490         0x3c010800, 0xac201ba0, 0x3c010800, 0xac201ba4, 0x3c010800, 0xac201bac,
5491         0x3c010800, 0xac201bb8, 0x3c010800, 0xac201bb0, 0x3c010800, 0xac201bb4,
5492         0x3c010800, 0xa42223d8, 0x9622000c, 0x30437fff, 0x3c010800, 0xa4222410,
5493         0x30428000, 0x3c010800, 0xa4231bc6, 0x10400005, 0x24020001, 0x3c010800,
5494         0xac2223f4, 0x0a000102, 0x2406003e, 0x24060036, 0x3c010800, 0xac2023f4,
5495         0x9622000a, 0x3c030800, 0x94631bc6, 0x3c010800, 0xac2023f0, 0x3c010800,
5496         0xac2023f8, 0x00021302, 0x00021080, 0x00c21021, 0x00621821, 0x3c010800,
5497         0xa42223d0, 0x3c010800, 0x0a000115, 0xa4231b96, 0x9622000c, 0x3c010800,
5498         0xa42223ec, 0x3c040800, 0x24841b9c, 0x8c820000, 0x00021100, 0x3c010800,
5499         0x00220821, 0xac311bc8, 0x8c820000, 0x00021100, 0x3c010800, 0x00220821,
5500         0xac271bcc, 0x8c820000, 0x25030001, 0x306601ff, 0x00021100, 0x3c010800,
5501         0x00220821, 0xac261bd0, 0x8c820000, 0x00021100, 0x3c010800, 0x00220821,
5502         0xac291bd4, 0x96230008, 0x3c020800, 0x8c421bac, 0x00432821, 0x3c010800,
5503         0xac251bac, 0x9622000a, 0x30420004, 0x14400018, 0x00061100, 0x8f630c14,
5504         0x3063000f, 0x2c620002, 0x1440000b, 0x3c02c000, 0x8f630c14, 0x3c020800,
5505         0x8c421b40, 0x3063000f, 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002,
5506         0x1040fff7, 0x3c02c000, 0x00e21825, 0xaf635c5c, 0x8f625c50, 0x30420002,
5507         0x10400014, 0x00000000, 0x0a000147, 0x00000000, 0x3c030800, 0x8c631b80,
5508         0x3c040800, 0x94841b94, 0x01221025, 0x3c010800, 0xa42223da, 0x24020001,
5509         0x3c010800, 0xac221bb8, 0x24630001, 0x0085202a, 0x3c010800, 0x10800003,
5510         0xac231b80, 0x3c010800, 0xa4251b94, 0x3c060800, 0x24c61b9c, 0x8cc20000,
5511         0x24420001, 0xacc20000, 0x28420080, 0x14400005, 0x00000000, 0x0e000656,
5512         0x24040002, 0x0a0001e6, 0x00000000, 0x3c020800, 0x8c421bb8, 0x10400078,
5513         0x24020001, 0x3c050800, 0x90a51b98, 0x14a20072, 0x00000000, 0x3c150800,
5514         0x96b51b96, 0x3c040800, 0x8c841bac, 0x32a3ffff, 0x0083102a, 0x1440006c,
5515         0x00000000, 0x14830003, 0x00000000, 0x3c010800, 0xac2523f0, 0x1060005c,
5516         0x00009021, 0x24d60004, 0x0060a021, 0x24d30014, 0x8ec20000, 0x00028100,
5517         0x3c110800, 0x02308821, 0x0e000625, 0x8e311bc8, 0x00402821, 0x10a00054,
5518         0x00000000, 0x9628000a, 0x31020040, 0x10400005, 0x2407180c, 0x8e22000c,
5519         0x2407188c, 0x00021400, 0xaca20018, 0x3c030800, 0x00701821, 0x8c631bd0,
5520         0x3c020800, 0x00501021, 0x8c421bd4, 0x00031d00, 0x00021400, 0x00621825,
5521         0xaca30014, 0x8ec30004, 0x96220008, 0x00432023, 0x3242ffff, 0x3083ffff,
5522         0x00431021, 0x0282102a, 0x14400002, 0x02b23023, 0x00803021, 0x8e620000,
5523         0x30c4ffff, 0x00441021, 0xae620000, 0x8e220000, 0xaca20000, 0x8e220004,
5524         0x8e63fff4, 0x00431021, 0xaca20004, 0xa4a6000e, 0x8e62fff4, 0x00441021,
5525         0xae62fff4, 0x96230008, 0x0043102a, 0x14400005, 0x02469021, 0x8e62fff0,
5526         0xae60fff4, 0x24420001, 0xae62fff0, 0xaca00008, 0x3242ffff, 0x14540008,
5527         0x24020305, 0x31020080, 0x54400001, 0x34e70010, 0x24020905, 0xa4a2000c,
5528         0x0a0001cb, 0x34e70020, 0xa4a2000c, 0x3c020800, 0x8c4223f0, 0x10400003,
5529         0x3c024b65, 0x0a0001d3, 0x34427654, 0x3c02b49a, 0x344289ab, 0xaca2001c,
5530         0x30e2ffff, 0xaca20010, 0x0e0005a2, 0x00a02021, 0x3242ffff, 0x0054102b,
5531         0x1440ffa9, 0x00000000, 0x24020002, 0x3c010800, 0x0a0001e6, 0xa0221b98,
5532         0x8ec2083c, 0x24420001, 0x0a0001e6, 0xaec2083c, 0x0e0004c0, 0x00000000,
5533         0x8fbf002c, 0x8fb60028, 0x8fb50024, 0x8fb40020, 0x8fb3001c, 0x8fb20018,
5534         0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0030, 0x27bdffd0, 0xafbf0028,
5535         0xafb30024, 0xafb20020, 0xafb1001c, 0xafb00018, 0x8f725c9c, 0x3c0200ff,
5536         0x3442fff8, 0x3c070800, 0x24e71bb4, 0x02428824, 0x9623000e, 0x8ce20000,
5537         0x00431021, 0xace20000, 0x8e220010, 0x30420020, 0x14400011, 0x00809821,
5538         0x0e00063b, 0x02202021, 0x3c02c000, 0x02421825, 0xaf635c9c, 0x8f625c90,
5539         0x30420002, 0x1040011e, 0x00000000, 0xaf635c9c, 0x8f625c90, 0x30420002,
5540         0x10400119, 0x00000000, 0x0a00020d, 0x00000000, 0x8e240008, 0x8e230014,
5541         0x00041402, 0x000231c0, 0x00031502, 0x304201ff, 0x2442ffff, 0x3042007f,
5542         0x00031942, 0x30637800, 0x00021100, 0x24424000, 0x00624821, 0x9522000a,
5543         0x3084ffff, 0x30420008, 0x104000b0, 0x000429c0, 0x3c020800, 0x8c422400,
5544         0x14400024, 0x24c50008, 0x94c20014, 0x3c010800, 0xa42223d0, 0x8cc40010,
5545         0x00041402, 0x3c010800, 0xa42223d2, 0x3c010800, 0xa42423d4, 0x94c2000e,
5546         0x3083ffff, 0x00431023, 0x3c010800, 0xac222408, 0x94c2001a, 0x3c010800,
5547         0xac262400, 0x3c010800, 0xac322404, 0x3c010800, 0xac2223fc, 0x3c02c000,
5548         0x02421825, 0xaf635c9c, 0x8f625c90, 0x30420002, 0x104000e5, 0x00000000,
5549         0xaf635c9c, 0x8f625c90, 0x30420002, 0x104000e0, 0x00000000, 0x0a000246,
5550         0x00000000, 0x94c2000e, 0x3c030800, 0x946323d4, 0x00434023, 0x3103ffff,
5551         0x2c620008, 0x1040001c, 0x00000000, 0x94c20014, 0x24420028, 0x00a22821,
5552         0x00031042, 0x1840000b, 0x00002021, 0x24e60848, 0x00403821, 0x94a30000,
5553         0x8cc20000, 0x24840001, 0x00431021, 0xacc20000, 0x0087102a, 0x1440fff9,
5554         0x24a50002, 0x31020001, 0x1040001f, 0x3c024000, 0x3c040800, 0x248423fc,
5555         0xa0a00001, 0x94a30000, 0x8c820000, 0x00431021, 0x0a000285, 0xac820000,
5556         0x8f626800, 0x3c030010, 0x00431024, 0x10400009, 0x00000000, 0x94c2001a,
5557         0x3c030800, 0x8c6323fc, 0x00431021, 0x3c010800, 0xac2223fc, 0x0a000286,
5558         0x3c024000, 0x94c2001a, 0x94c4001c, 0x3c030800, 0x8c6323fc, 0x00441023,
5559         0x00621821, 0x3c010800, 0xac2323fc, 0x3c024000, 0x02421825, 0xaf635c9c,
5560         0x8f625c90, 0x30420002, 0x1440fffc, 0x00000000, 0x9522000a, 0x30420010,
5561         0x1040009b, 0x00000000, 0x3c030800, 0x946323d4, 0x3c070800, 0x24e72400,
5562         0x8ce40000, 0x8f626800, 0x24630030, 0x00832821, 0x3c030010, 0x00431024,
5563         0x1440000a, 0x00000000, 0x94a20004, 0x3c040800, 0x8c842408, 0x3c030800,
5564         0x8c6323fc, 0x00441023, 0x00621821, 0x3c010800, 0xac2323fc, 0x3c040800,
5565         0x8c8423fc, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402, 0x00822021,
5566         0x00041027, 0xa4a20006, 0x3c030800, 0x8c632404, 0x3c0200ff, 0x3442fff8,
5567         0x00628824, 0x96220008, 0x24050001, 0x24034000, 0x000231c0, 0x00801021,
5568         0xa4c2001a, 0xa4c0001c, 0xace00000, 0x3c010800, 0xac251b60, 0xaf635cb8,
5569         0x8f625cb0, 0x30420002, 0x10400003, 0x00000000, 0x3c010800, 0xac201b60,
5570         0x8e220008, 0xaf625cb8, 0x8f625cb0, 0x30420002, 0x10400003, 0x00000000,
5571         0x3c010800, 0xac201b60, 0x3c020800, 0x8c421b60, 0x1040ffec, 0x00000000,
5572         0x3c040800, 0x0e00063b, 0x8c842404, 0x0a00032a, 0x00000000, 0x3c030800,
5573         0x90631b98, 0x24020002, 0x14620003, 0x3c034b65, 0x0a0002e1, 0x00008021,
5574         0x8e22001c, 0x34637654, 0x10430002, 0x24100002, 0x24100001, 0x00c02021,
5575         0x0e000350, 0x02003021, 0x24020003, 0x3c010800, 0xa0221b98, 0x24020002,
5576         0x1202000a, 0x24020001, 0x3c030800, 0x8c6323f0, 0x10620006, 0x00000000,
5577         0x3c020800, 0x944223d8, 0x00021400, 0x0a00031f, 0xae220014, 0x3c040800,
5578         0x248423da, 0x94820000, 0x00021400, 0xae220014, 0x3c020800, 0x8c421bbc,
5579         0x3c03c000, 0x3c010800, 0xa0201b98, 0x00431025, 0xaf625c5c, 0x8f625c50,
5580         0x30420002, 0x10400009, 0x00000000, 0x2484f7e2, 0x8c820000, 0x00431025,
5581         0xaf625c5c, 0x8f625c50, 0x30420002, 0x1440fffa, 0x00000000, 0x3c020800,
5582         0x24421b84, 0x8c430000, 0x24630001, 0xac430000, 0x8f630c14, 0x3063000f,
5583         0x2c620002, 0x1440000c, 0x3c024000, 0x8f630c14, 0x3c020800, 0x8c421b40,
5584         0x3063000f, 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002, 0x1040fff7,
5585         0x00000000, 0x3c024000, 0x02421825, 0xaf635c9c, 0x8f625c90, 0x30420002,
5586         0x1440fffc, 0x00000000, 0x12600003, 0x00000000, 0x0e0004c0, 0x00000000,
5587         0x8fbf0028, 0x8fb30024, 0x8fb20020, 0x8fb1001c, 0x8fb00018, 0x03e00008,
5588         0x27bd0030, 0x8f634450, 0x3c040800, 0x24841b88, 0x8c820000, 0x00031c02,
5589         0x0043102b, 0x14400007, 0x3c038000, 0x8c840004, 0x8f624450, 0x00021c02,
5590         0x0083102b, 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024,
5591         0x1440fffd, 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3c024000,
5592         0x00822025, 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00000000,
5593         0x03e00008, 0x00000000, 0x27bdffe0, 0x00805821, 0x14c00011, 0x256e0008,
5594         0x3c020800, 0x8c4223f4, 0x10400007, 0x24020016, 0x3c010800, 0xa42223d2,
5595         0x2402002a, 0x3c010800, 0x0a000364, 0xa42223d4, 0x8d670010, 0x00071402,
5596         0x3c010800, 0xa42223d2, 0x3c010800, 0xa42723d4, 0x3c040800, 0x948423d4,
5597         0x3c030800, 0x946323d2, 0x95cf0006, 0x3c020800, 0x944223d0, 0x00832023,
5598         0x01e2c023, 0x3065ffff, 0x24a20028, 0x01c24821, 0x3082ffff, 0x14c0001a,
5599         0x01226021, 0x9582000c, 0x3042003f, 0x3c010800, 0xa42223d6, 0x95820004,
5600         0x95830006, 0x3c010800, 0xac2023e4, 0x3c010800, 0xac2023e8, 0x00021400,
5601         0x00431025, 0x3c010800, 0xac221bc0, 0x95220004, 0x3c010800, 0xa4221bc4,
5602         0x95230002, 0x01e51023, 0x0043102a, 0x10400010, 0x24020001, 0x3c010800,
5603         0x0a000398, 0xac2223f8, 0x3c030800, 0x8c6323e8, 0x3c020800, 0x94421bc4,
5604         0x00431021, 0xa5220004, 0x3c020800, 0x94421bc0, 0xa5820004, 0x3c020800,
5605         0x8c421bc0, 0xa5820006, 0x3c020800, 0x8c4223f0, 0x3c0d0800, 0x8dad23e4,
5606         0x3c0a0800, 0x144000e5, 0x8d4a23e8, 0x3c020800, 0x94421bc4, 0x004a1821,
5607         0x3063ffff, 0x0062182b, 0x24020002, 0x10c2000d, 0x01435023, 0x3c020800,
5608         0x944223d6, 0x30420009, 0x10400008, 0x00000000, 0x9582000c, 0x3042fff6,
5609         0xa582000c, 0x3c020800, 0x944223d6, 0x30420009, 0x01a26823, 0x3c020800,
5610         0x8c4223f8, 0x1040004a, 0x01203821, 0x3c020800, 0x944223d2, 0x00004021,
5611         0xa520000a, 0x01e21023, 0xa5220002, 0x3082ffff, 0x00021042, 0x18400008,
5612         0x00003021, 0x00401821, 0x94e20000, 0x25080001, 0x00c23021, 0x0103102a,
5613         0x1440fffb, 0x24e70002, 0x00061c02, 0x30c2ffff, 0x00623021, 0x00061402,
5614         0x00c23021, 0x00c02821, 0x00061027, 0xa522000a, 0x00003021, 0x2527000c,
5615         0x00004021, 0x94e20000, 0x25080001, 0x00c23021, 0x2d020004, 0x1440fffb,
5616         0x24e70002, 0x95220002, 0x00004021, 0x91230009, 0x00442023, 0x01803821,
5617         0x3082ffff, 0xa4e00010, 0x00621821, 0x00021042, 0x18400010, 0x00c33021,
5618         0x00404821, 0x94e20000, 0x24e70002, 0x00c23021, 0x30e2007f, 0x14400006,
5619         0x25080001, 0x8d630000, 0x3c02007f, 0x3442ff80, 0x00625824, 0x25670008,
5620         0x0109102a, 0x1440fff3, 0x00000000, 0x30820001, 0x10400005, 0x00061c02,
5621         0xa0e00001, 0x94e20000, 0x00c23021, 0x00061c02, 0x30c2ffff, 0x00623021,
5622         0x00061402, 0x00c23021, 0x0a00047d, 0x30c6ffff, 0x24020002, 0x14c20081,
5623         0x00000000, 0x3c020800, 0x8c42240c, 0x14400007, 0x00000000, 0x3c020800,
5624         0x944223d2, 0x95230002, 0x01e21023, 0x10620077, 0x00000000, 0x3c020800,
5625         0x944223d2, 0x01e21023, 0xa5220002, 0x3c020800, 0x8c42240c, 0x1040001a,
5626         0x31e3ffff, 0x8dc70010, 0x3c020800, 0x94421b96, 0x00e04021, 0x00072c02,
5627         0x00aa2021, 0x00431023, 0x00823823, 0x00072402, 0x30e2ffff, 0x00823821,
5628         0x00071027, 0xa522000a, 0x3102ffff, 0x3c040800, 0x948423d4, 0x00453023,
5629         0x00e02821, 0x00641823, 0x006d1821, 0x00c33021, 0x00061c02, 0x30c2ffff,
5630         0x0a00047d, 0x00623021, 0x01203821, 0x00004021, 0x3082ffff, 0x00021042,
5631         0x18400008, 0x00003021, 0x00401821, 0x94e20000, 0x25080001, 0x00c23021,
5632         0x0103102a, 0x1440fffb, 0x24e70002, 0x00061c02, 0x30c2ffff, 0x00623021,
5633         0x00061402, 0x00c23021, 0x00c02821, 0x00061027, 0xa522000a, 0x00003021,
5634         0x2527000c, 0x00004021, 0x94e20000, 0x25080001, 0x00c23021, 0x2d020004,
5635         0x1440fffb, 0x24e70002, 0x95220002, 0x00004021, 0x91230009, 0x00442023,
5636         0x01803821, 0x3082ffff, 0xa4e00010, 0x3c040800, 0x948423d4, 0x00621821,
5637         0x00c33021, 0x00061c02, 0x30c2ffff, 0x00623021, 0x00061c02, 0x3c020800,
5638         0x944223d0, 0x00c34821, 0x00441023, 0x00021fc2, 0x00431021, 0x00021043,
5639         0x18400010, 0x00003021, 0x00402021, 0x94e20000, 0x24e70002, 0x00c23021,
5640         0x30e2007f, 0x14400006, 0x25080001, 0x8d630000, 0x3c02007f, 0x3442ff80,
5641         0x00625824, 0x25670008, 0x0104102a, 0x1440fff3, 0x00000000, 0x3c020800,
5642         0x944223ec, 0x00c23021, 0x3122ffff, 0x00c23021, 0x00061c02, 0x30c2ffff,
5643         0x00623021, 0x00061402, 0x00c23021, 0x00c04021, 0x00061027, 0xa5820010,
5644         0xadc00014, 0x0a00049d, 0xadc00000, 0x8dc70010, 0x00e04021, 0x11400007,
5645         0x00072c02, 0x00aa3021, 0x00061402, 0x30c3ffff, 0x00433021, 0x00061402,
5646         0x00c22821, 0x00051027, 0xa522000a, 0x3c030800, 0x946323d4, 0x3102ffff,
5647         0x01e21021, 0x00433023, 0x00cd3021, 0x00061c02, 0x30c2ffff, 0x00623021,
5648         0x00061402, 0x00c23021, 0x00c04021, 0x00061027, 0xa5820010, 0x3102ffff,
5649         0x00051c00, 0x00431025, 0xadc20010, 0x3c020800, 0x8c4223f4, 0x10400005,
5650         0x2de205eb, 0x14400002, 0x25e2fff2, 0x34028870, 0xa5c20034, 0x3c030800,
5651         0x246323e8, 0x8c620000, 0x24420001, 0xac620000, 0x3c040800, 0x8c8423e4,
5652         0x3c020800, 0x8c421bc0, 0x3303ffff, 0x00832021, 0x00431821, 0x0062102b,
5653         0x3c010800, 0xac2423e4, 0x10400003, 0x2482ffff, 0x3c010800, 0xac2223e4,
5654         0x3c010800, 0xac231bc0, 0x03e00008, 0x27bd0020, 0x27bdffb8, 0x3c050800,
5655         0x24a51b96, 0xafbf0044, 0xafbe0040, 0xafb7003c, 0xafb60038, 0xafb50034,
5656         0xafb40030, 0xafb3002c, 0xafb20028, 0xafb10024, 0xafb00020, 0x94a90000,
5657         0x3c020800, 0x944223d0, 0x3c030800, 0x8c631bb0, 0x3c040800, 0x8c841bac,
5658         0x01221023, 0x0064182a, 0xa7a9001e, 0x106000be, 0xa7a20016, 0x24be0022,
5659         0x97b6001e, 0x24b3001a, 0x24b70016, 0x8fc20000, 0x14400008, 0x00000000,
5660         0x8fc2fff8, 0x97a30016, 0x8fc4fff4, 0x00431021, 0x0082202a, 0x148000b0,
5661         0x00000000, 0x97d50818, 0x32a2ffff, 0x104000a3, 0x00009021, 0x0040a021,
5662         0x00008821, 0x0e000625, 0x00000000, 0x00403021, 0x14c00007, 0x00000000,
5663         0x3c020800, 0x8c4223dc, 0x24420001, 0x3c010800, 0x0a000596, 0xac2223dc,
5664         0x3c100800, 0x02118021, 0x8e101bc8, 0x9608000a, 0x31020040, 0x10400005,
5665         0x2407180c, 0x8e02000c, 0x2407188c, 0x00021400, 0xacc20018, 0x31020080,
5666         0x54400001, 0x34e70010, 0x3c020800, 0x00511021, 0x8c421bd0, 0x3c030800,
5667         0x00711821, 0x8c631bd4, 0x00021500, 0x00031c00, 0x00431025, 0xacc20014,
5668         0x96040008, 0x3242ffff, 0x00821021, 0x0282102a, 0x14400002, 0x02b22823,
5669         0x00802821, 0x8e020000, 0x02459021, 0xacc20000, 0x8e020004, 0x00c02021,
5670         0x26310010, 0xac820004, 0x30e2ffff, 0xac800008, 0xa485000e, 0xac820010,
5671         0x24020305, 0x0e0005a2, 0xa482000c, 0x3242ffff, 0x0054102b, 0x1440ffc5,
5672         0x3242ffff, 0x0a00058e, 0x00000000, 0x8e620000, 0x8e63fffc, 0x0043102a,
5673         0x10400067, 0x00000000, 0x8e62fff0, 0x00028900, 0x3c100800, 0x02118021,
5674         0x0e000625, 0x8e101bc8, 0x00403021, 0x14c00005, 0x00000000, 0x8e62082c,
5675         0x24420001, 0x0a000596, 0xae62082c, 0x9608000a, 0x31020040, 0x10400005,
5676         0x2407180c, 0x8e02000c, 0x2407188c, 0x00021400, 0xacc20018, 0x3c020800,
5677         0x00511021, 0x8c421bd0, 0x3c030800, 0x00711821, 0x8c631bd4, 0x00021500,
5678         0x00031c00, 0x00431025, 0xacc20014, 0x8e63fff4, 0x96020008, 0x00432023,
5679         0x3242ffff, 0x3083ffff, 0x00431021, 0x02c2102a, 0x10400003, 0x00802821,
5680         0x97a9001e, 0x01322823, 0x8e620000, 0x30a4ffff, 0x00441021, 0xae620000,
5681         0xa4c5000e, 0x8e020000, 0xacc20000, 0x8e020004, 0x8e63fff4, 0x00431021,
5682         0xacc20004, 0x8e63fff4, 0x96020008, 0x00641821, 0x0062102a, 0x14400006,
5683         0x02459021, 0x8e62fff0, 0xae60fff4, 0x24420001, 0x0a000571, 0xae62fff0,
5684         0xae63fff4, 0xacc00008, 0x3242ffff, 0x10560003, 0x31020004, 0x10400006,
5685         0x24020305, 0x31020080, 0x54400001, 0x34e70010, 0x34e70020, 0x24020905,
5686         0xa4c2000c, 0x8ee30000, 0x8ee20004, 0x14620007, 0x3c02b49a, 0x8ee20860,
5687         0x54400001, 0x34e70400, 0x3c024b65, 0x0a000588, 0x34427654, 0x344289ab,
5688         0xacc2001c, 0x30e2ffff, 0xacc20010, 0x0e0005a2, 0x00c02021, 0x3242ffff,
5689         0x0056102b, 0x1440ff9b, 0x00000000, 0x8e620000, 0x8e63fffc, 0x0043102a,
5690         0x1440ff48, 0x00000000, 0x8fbf0044, 0x8fbe0040, 0x8fb7003c, 0x8fb60038,
5691         0x8fb50034, 0x8fb40030, 0x8fb3002c, 0x8fb20028, 0x8fb10024, 0x8fb00020,
5692         0x03e00008, 0x27bd0048, 0x27bdffe8, 0xafbf0014, 0xafb00010, 0x8f624450,
5693         0x8f634410, 0x0a0005b1, 0x00808021, 0x8f626820, 0x30422000, 0x10400003,
5694         0x00000000, 0x0e0001f0, 0x00002021, 0x8f624450, 0x8f634410, 0x3042ffff,
5695         0x0043102b, 0x1440fff5, 0x00000000, 0x8f630c14, 0x3063000f, 0x2c620002,
5696         0x1440000b, 0x00000000, 0x8f630c14, 0x3c020800, 0x8c421b40, 0x3063000f,
5697         0x24420001, 0x3c010800, 0xac221b40, 0x2c620002, 0x1040fff7, 0x00000000,
5698         0xaf705c18, 0x8f625c10, 0x30420002, 0x10400009, 0x00000000, 0x8f626820,
5699         0x30422000, 0x1040fff8, 0x00000000, 0x0e0001f0, 0x00002021, 0x0a0005c4,
5700         0x00000000, 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x00000000,
5701         0x00000000, 0x00000000, 0x27bdffe8, 0x3c1bc000, 0xafbf0014, 0xafb00010,
5702         0xaf60680c, 0x8f626804, 0x34420082, 0xaf626804, 0x8f634000, 0x24020b50,
5703         0x3c010800, 0xac221b54, 0x24020b78, 0x3c010800, 0xac221b64, 0x34630002,
5704         0xaf634000, 0x0e000605, 0x00808021, 0x3c010800, 0xa0221b68, 0x304200ff,
5705         0x24030002, 0x14430005, 0x00000000, 0x3c020800, 0x8c421b54, 0x0a0005f8,
5706         0xac5000c0, 0x3c020800, 0x8c421b54, 0xac5000bc, 0x8f624434, 0x8f634438,
5707         0x8f644410, 0x3c010800, 0xac221b5c, 0x3c010800, 0xac231b6c, 0x3c010800,
5708         0xac241b58, 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x3c040800,
5709         0x8c870000, 0x3c03aa55, 0x3463aa55, 0x3c06c003, 0xac830000, 0x8cc20000,
5710         0x14430007, 0x24050002, 0x3c0355aa, 0x346355aa, 0xac830000, 0x8cc20000,
5711         0x50430001, 0x24050001, 0x3c020800, 0xac470000, 0x03e00008, 0x00a01021,
5712         0x27bdfff8, 0x18800009, 0x00002821, 0x8f63680c, 0x8f62680c, 0x1043fffe,
5713         0x00000000, 0x24a50001, 0x00a4102a, 0x1440fff9, 0x00000000, 0x03e00008,
5714         0x27bd0008, 0x8f634450, 0x3c020800, 0x8c421b5c, 0x00031c02, 0x0043102b,
5715         0x14400008, 0x3c038000, 0x3c040800, 0x8c841b6c, 0x8f624450, 0x00021c02,
5716         0x0083102b, 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024,
5717         0x1440fffd, 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3082ffff,
5718         0x2442e000, 0x2c422001, 0x14400003, 0x3c024000, 0x0a000648, 0x2402ffff,
5719         0x00822025, 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00001021,
5720         0x03e00008, 0x00000000, 0x8f624450, 0x3c030800, 0x8c631b58, 0x0a000651,
5721         0x3042ffff, 0x8f624450, 0x3042ffff, 0x0043102b, 0x1440fffc, 0x00000000,
5722         0x03e00008, 0x00000000, 0x27bdffe0, 0x00802821, 0x3c040800, 0x24841af0,
5723         0x00003021, 0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014,
5724         0x0a000660, 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x00000000,
5725         0x00000000, 0x00000000, 0x3c020800, 0x34423000, 0x3c030800, 0x34633000,
5726         0x3c040800, 0x348437ff, 0x3c010800, 0xac221b74, 0x24020040, 0x3c010800,
5727         0xac221b78, 0x3c010800, 0xac201b70, 0xac600000, 0x24630004, 0x0083102b,
5728         0x5040fffd, 0xac600000, 0x03e00008, 0x00000000, 0x00804821, 0x8faa0010,
5729         0x3c020800, 0x8c421b70, 0x3c040800, 0x8c841b78, 0x8fab0014, 0x24430001,
5730         0x0044102b, 0x3c010800, 0xac231b70, 0x14400003, 0x00004021, 0x3c010800,
5731         0xac201b70, 0x3c020800, 0x8c421b70, 0x3c030800, 0x8c631b74, 0x91240000,
5732         0x00021140, 0x00431021, 0x00481021, 0x25080001, 0xa0440000, 0x29020008,
5733         0x1440fff4, 0x25290001, 0x3c020800, 0x8c421b70, 0x3c030800, 0x8c631b74,
5734         0x8f64680c, 0x00021140, 0x00431021, 0xac440008, 0xac45000c, 0xac460010,
5735         0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c, 0x00000000, 0x00000000,
5736 };
5737
5738 static const u32 tg3TsoFwRodata[] = {
5739         0x4d61696e, 0x43707542, 0x00000000, 0x4d61696e, 0x43707541, 0x00000000,
5740         0x00000000, 0x00000000, 0x73746b6f, 0x66666c64, 0x496e0000, 0x73746b6f,
5741         0x66662a2a, 0x00000000, 0x53774576, 0x656e7430, 0x00000000, 0x00000000,
5742         0x00000000, 0x00000000, 0x66617461, 0x6c457272, 0x00000000, 0x00000000,
5743         0x00000000,
5744 };
5745
5746 static const u32 tg3TsoFwData[] = {
5747         0x00000000, 0x73746b6f, 0x66666c64, 0x5f76312e, 0x362e3000, 0x00000000,
5748         0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
5749         0x00000000,
5750 };
5751
5752 /* 5705 needs a special version of the TSO firmware.  */
5753 #define TG3_TSO5_FW_RELEASE_MAJOR       0x1
5754 #define TG3_TSO5_FW_RELASE_MINOR        0x2
5755 #define TG3_TSO5_FW_RELEASE_FIX         0x0
5756 #define TG3_TSO5_FW_START_ADDR          0x00010000
5757 #define TG3_TSO5_FW_TEXT_ADDR           0x00010000
5758 #define TG3_TSO5_FW_TEXT_LEN            0xe90
5759 #define TG3_TSO5_FW_RODATA_ADDR         0x00010e90
5760 #define TG3_TSO5_FW_RODATA_LEN          0x50
5761 #define TG3_TSO5_FW_DATA_ADDR           0x00010f00
5762 #define TG3_TSO5_FW_DATA_LEN            0x20
5763 #define TG3_TSO5_FW_SBSS_ADDR           0x00010f20
5764 #define TG3_TSO5_FW_SBSS_LEN            0x28
5765 #define TG3_TSO5_FW_BSS_ADDR            0x00010f50
5766 #define TG3_TSO5_FW_BSS_LEN             0x88
5767
5768 static const u32 tg3Tso5FwText[(TG3_TSO5_FW_TEXT_LEN / 4) + 1] = {
5769         0x0c004003, 0x00000000, 0x00010f04, 0x00000000, 0x10000003, 0x00000000,
5770         0x0000000d, 0x0000000d, 0x3c1d0001, 0x37bde000, 0x03a0f021, 0x3c100001,
5771         0x26100000, 0x0c004010, 0x00000000, 0x0000000d, 0x27bdffe0, 0x3c04fefe,
5772         0xafbf0018, 0x0c0042e8, 0x34840002, 0x0c004364, 0x00000000, 0x3c030001,
5773         0x90630f34, 0x24020002, 0x3c040001, 0x24840e9c, 0x14620003, 0x24050001,
5774         0x3c040001, 0x24840e90, 0x24060002, 0x00003821, 0xafa00010, 0x0c004378,
5775         0xafa00014, 0x0c00402c, 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020,
5776         0x00000000, 0x00000000, 0x27bdffe0, 0xafbf001c, 0xafb20018, 0xafb10014,
5777         0x0c0042d4, 0xafb00010, 0x3c128000, 0x24110001, 0x8f706810, 0x32020400,
5778         0x10400007, 0x00000000, 0x8f641008, 0x00921024, 0x14400003, 0x00000000,
5779         0x0c004064, 0x00000000, 0x3c020001, 0x90420f56, 0x10510003, 0x32020200,
5780         0x1040fff1, 0x00000000, 0x0c0041b4, 0x00000000, 0x08004034, 0x00000000,
5781         0x8fbf001c, 0x8fb20018, 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0020,
5782         0x27bdffe0, 0x3c040001, 0x24840eb0, 0x00002821, 0x00003021, 0x00003821,
5783         0xafbf0018, 0xafa00010, 0x0c004378, 0xafa00014, 0x0000d021, 0x24020130,
5784         0xaf625000, 0x3c010001, 0xa4200f50, 0x3c010001, 0xa0200f57, 0x8fbf0018,
5785         0x03e00008, 0x27bd0020, 0x00000000, 0x00000000, 0x3c030001, 0x24630f60,
5786         0x90620000, 0x27bdfff0, 0x14400003, 0x0080c021, 0x08004073, 0x00004821,
5787         0x3c022000, 0x03021024, 0x10400003, 0x24090002, 0x08004073, 0xa0600000,
5788         0x24090001, 0x00181040, 0x30431f80, 0x346f8008, 0x1520004b, 0x25eb0028,
5789         0x3c040001, 0x00832021, 0x8c848010, 0x3c050001, 0x24a50f7a, 0x00041402,
5790         0xa0a20000, 0x3c010001, 0xa0240f7b, 0x3c020001, 0x00431021, 0x94428014,
5791         0x3c010001, 0xa0220f7c, 0x3c0c0001, 0x01836021, 0x8d8c8018, 0x304200ff,
5792         0x24420008, 0x000220c3, 0x24020001, 0x3c010001, 0xa0220f60, 0x0124102b,
5793         0x1040000c, 0x00003821, 0x24a6000e, 0x01602821, 0x8ca20000, 0x8ca30004,
5794         0x24a50008, 0x24e70001, 0xacc20000, 0xacc30004, 0x00e4102b, 0x1440fff8,
5795         0x24c60008, 0x00003821, 0x3c080001, 0x25080f7b, 0x91060000, 0x3c020001,
5796         0x90420f7c, 0x2503000d, 0x00c32821, 0x00461023, 0x00021fc2, 0x00431021,
5797         0x00021043, 0x1840000c, 0x00002021, 0x91020001, 0x00461023, 0x00021fc2,
5798         0x00431021, 0x00021843, 0x94a20000, 0x24e70001, 0x00822021, 0x00e3102a,
5799         0x1440fffb, 0x24a50002, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402,
5800         0x00822021, 0x3c02ffff, 0x01821024, 0x3083ffff, 0x00431025, 0x3c010001,
5801         0x080040fa, 0xac220f80, 0x3c050001, 0x24a50f7c, 0x90a20000, 0x3c0c0001,
5802         0x01836021, 0x8d8c8018, 0x000220c2, 0x1080000e, 0x00003821, 0x01603021,
5803         0x24a5000c, 0x8ca20000, 0x8ca30004, 0x24a50008, 0x24e70001, 0xacc20000,
5804         0xacc30004, 0x00e4102b, 0x1440fff8, 0x24c60008, 0x3c050001, 0x24a50f7c,
5805         0x90a20000, 0x30430007, 0x24020004, 0x10620011, 0x28620005, 0x10400005,
5806         0x24020002, 0x10620008, 0x000710c0, 0x080040fa, 0x00000000, 0x24020006,
5807         0x1062000e, 0x000710c0, 0x080040fa, 0x00000000, 0x00a21821, 0x9463000c,
5808         0x004b1021, 0x080040fa, 0xa4430000, 0x000710c0, 0x00a21821, 0x8c63000c,
5809         0x004b1021, 0x080040fa, 0xac430000, 0x00a21821, 0x8c63000c, 0x004b2021,
5810         0x00a21021, 0xac830000, 0x94420010, 0xa4820004, 0x95e70006, 0x3c020001,
5811         0x90420f7c, 0x3c030001, 0x90630f7a, 0x00e2c823, 0x3c020001, 0x90420f7b,
5812         0x24630028, 0x01e34021, 0x24420028, 0x15200012, 0x01e23021, 0x94c2000c,
5813         0x3c010001, 0xa4220f78, 0x94c20004, 0x94c30006, 0x3c010001, 0xa4200f76,
5814         0x3c010001, 0xa4200f72, 0x00021400, 0x00431025, 0x3c010001, 0xac220f6c,
5815         0x95020004, 0x3c010001, 0x08004124, 0xa4220f70, 0x3c020001, 0x94420f70,
5816         0x3c030001, 0x94630f72, 0x00431021, 0xa5020004, 0x3c020001, 0x94420f6c,
5817         0xa4c20004, 0x3c020001, 0x8c420f6c, 0xa4c20006, 0x3c040001, 0x94840f72,
5818         0x3c020001, 0x94420f70, 0x3c0a0001, 0x954a0f76, 0x00441821, 0x3063ffff,
5819         0x0062182a, 0x24020002, 0x1122000b, 0x00832023, 0x3c030001, 0x94630f78,
5820         0x30620009, 0x10400006, 0x3062fff6, 0xa4c2000c, 0x3c020001, 0x94420f78,
5821         0x30420009, 0x01425023, 0x24020001, 0x1122001b, 0x29220002, 0x50400005,
5822         0x24020002, 0x11200007, 0x31a2ffff, 0x08004197, 0x00000000, 0x1122001d,
5823         0x24020016, 0x08004197, 0x31a2ffff, 0x3c0e0001, 0x95ce0f80, 0x10800005,
5824         0x01806821, 0x01c42021, 0x00041c02, 0x3082ffff, 0x00627021, 0x000e1027,
5825         0xa502000a, 0x3c030001, 0x90630f7b, 0x31a2ffff, 0x00e21021, 0x0800418d,
5826         0x00432023, 0x3c020001, 0x94420f80, 0x00442021, 0x00041c02, 0x3082ffff,
5827         0x00622021, 0x00807021, 0x00041027, 0x08004185, 0xa502000a, 0x3c050001,
5828         0x24a50f7a, 0x90a30000, 0x14620002, 0x24e2fff2, 0xa5e20034, 0x90a20000,
5829         0x00e21023, 0xa5020002, 0x3c030001, 0x94630f80, 0x3c020001, 0x94420f5a,
5830         0x30e5ffff, 0x00641821, 0x00451023, 0x00622023, 0x00041c02, 0x3082ffff,
5831         0x00622021, 0x00041027, 0xa502000a, 0x3c030001, 0x90630f7c, 0x24620001,
5832         0x14a20005, 0x00807021, 0x01631021, 0x90420000, 0x08004185, 0x00026200,
5833         0x24620002, 0x14a20003, 0x306200fe, 0x004b1021, 0x944c0000, 0x3c020001,
5834         0x94420f82, 0x3183ffff, 0x3c040001, 0x90840f7b, 0x00431021, 0x00e21021,
5835         0x00442023, 0x008a2021, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402,
5836         0x00822021, 0x00806821, 0x00041027, 0xa4c20010, 0x31a2ffff, 0x000e1c00,
5837         0x00431025, 0x3c040001, 0x24840f72, 0xade20010, 0x94820000, 0x3c050001,
5838         0x94a50f76, 0x3c030001, 0x8c630f6c, 0x24420001, 0x00b92821, 0xa4820000,
5839         0x3322ffff, 0x00622021, 0x0083182b, 0x3c010001, 0xa4250f76, 0x10600003,
5840         0x24a2ffff, 0x3c010001, 0xa4220f76, 0x3c024000, 0x03021025, 0x3c010001,
5841         0xac240f6c, 0xaf621008, 0x03e00008, 0x27bd0010, 0x3c030001, 0x90630f56,
5842         0x27bdffe8, 0x24020001, 0xafbf0014, 0x10620026, 0xafb00010, 0x8f620cf4,
5843         0x2442ffff, 0x3042007f, 0x00021100, 0x8c434000, 0x3c010001, 0xac230f64,
5844         0x8c434008, 0x24444000, 0x8c5c4004, 0x30620040, 0x14400002, 0x24020088,
5845         0x24020008, 0x3c010001, 0xa4220f68, 0x30620004, 0x10400005, 0x24020001,
5846         0x3c010001, 0xa0220f57, 0x080041d5, 0x00031402, 0x3c010001, 0xa0200f57,
5847         0x00031402, 0x3c010001, 0xa4220f54, 0x9483000c, 0x24020001, 0x3c010001,
5848         0xa4200f50, 0x3c010001, 0xa0220f56, 0x3c010001, 0xa4230f62, 0x24020001,
5849         0x1342001e, 0x00000000, 0x13400005, 0x24020003, 0x13420067, 0x00000000,
5850         0x080042cf, 0x00000000, 0x3c020001, 0x94420f62, 0x241a0001, 0x3c010001,
5851         0xa4200f5e, 0x3c010001, 0xa4200f52, 0x304407ff, 0x00021bc2, 0x00031823,
5852         0x3063003e, 0x34630036, 0x00021242, 0x3042003c, 0x00621821, 0x3c010001,
5853         0xa4240f58, 0x00832021, 0x24630030, 0x3c010001, 0xa4240f5a, 0x3c010001,
5854         0xa4230f5c, 0x3c060001, 0x24c60f52, 0x94c50000, 0x94c30002, 0x3c040001,
5855         0x94840f5a, 0x00651021, 0x0044102a, 0x10400013, 0x3c108000, 0x00a31021,
5856         0xa4c20000, 0x3c02a000, 0xaf620cf4, 0x3c010001, 0xa0200f56, 0x8f641008,
5857         0x00901024, 0x14400003, 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4,
5858         0x00501024, 0x104000b7, 0x00000000, 0x0800420f, 0x00000000, 0x3c030001,
5859         0x94630f50, 0x00851023, 0xa4c40000, 0x00621821, 0x3042ffff, 0x3c010001,
5860         0xa4230f50, 0xaf620ce8, 0x3c020001, 0x94420f68, 0x34420024, 0xaf620cec,
5861         0x94c30002, 0x3c020001, 0x94420f50, 0x14620012, 0x3c028000, 0x3c108000,
5862         0x3c02a000, 0xaf620cf4, 0x3c010001, 0xa0200f56, 0x8f641008, 0x00901024,
5863         0x14400003, 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4, 0x00501024,
5864         0x1440fff7, 0x00000000, 0x080042cf, 0x241a0003, 0xaf620cf4, 0x3c108000,
5865         0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064, 0x00000000,
5866         0x8f620cf4, 0x00501024, 0x1440fff7, 0x00000000, 0x080042cf, 0x241a0003,
5867         0x3c070001, 0x24e70f50, 0x94e20000, 0x03821021, 0xaf620ce0, 0x3c020001,
5868         0x8c420f64, 0xaf620ce4, 0x3c050001, 0x94a50f54, 0x94e30000, 0x3c040001,
5869         0x94840f58, 0x3c020001, 0x94420f5e, 0x00a32823, 0x00822023, 0x30a6ffff,
5870         0x3083ffff, 0x00c3102b, 0x14400043, 0x00000000, 0x3c020001, 0x94420f5c,
5871         0x00021400, 0x00621025, 0xaf620ce8, 0x94e20000, 0x3c030001, 0x94630f54,
5872         0x00441021, 0xa4e20000, 0x3042ffff, 0x14430021, 0x3c020008, 0x3c020001,
5873         0x90420f57, 0x10400006, 0x3c03000c, 0x3c020001, 0x94420f68, 0x34630624,
5874         0x0800427c, 0x0000d021, 0x3c020001, 0x94420f68, 0x3c030008, 0x34630624,
5875         0x00431025, 0xaf620cec, 0x3c108000, 0x3c02a000, 0xaf620cf4, 0x3c010001,
5876         0xa0200f56, 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064,
5877         0x00000000, 0x8f620cf4, 0x00501024, 0x10400015, 0x00000000, 0x08004283,
5878         0x00000000, 0x3c030001, 0x94630f68, 0x34420624, 0x3c108000, 0x00621825,
5879         0x3c028000, 0xaf630cec, 0xaf620cf4, 0x8f641008, 0x00901024, 0x14400003,
5880         0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4, 0x00501024, 0x1440fff7,
5881         0x00000000, 0x3c010001, 0x080042cf, 0xa4200f5e, 0x3c020001, 0x94420f5c,
5882         0x00021400, 0x00c21025, 0xaf620ce8, 0x3c020001, 0x90420f57, 0x10400009,
5883         0x3c03000c, 0x3c020001, 0x94420f68, 0x34630624, 0x0000d021, 0x00431025,
5884         0xaf620cec, 0x080042c1, 0x3c108000, 0x3c020001, 0x94420f68, 0x3c030008,
5885         0x34630604, 0x00431025, 0xaf620cec, 0x3c020001, 0x94420f5e, 0x00451021,
5886         0x3c010001, 0xa4220f5e, 0x3c108000, 0x3c02a000, 0xaf620cf4, 0x3c010001,
5887         0xa0200f56, 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064,
5888         0x00000000, 0x8f620cf4, 0x00501024, 0x1440fff7, 0x00000000, 0x8fbf0014,
5889         0x8fb00010, 0x03e00008, 0x27bd0018, 0x00000000, 0x27bdffe0, 0x3c040001,
5890         0x24840ec0, 0x00002821, 0x00003021, 0x00003821, 0xafbf0018, 0xafa00010,
5891         0x0c004378, 0xafa00014, 0x0000d021, 0x24020130, 0xaf625000, 0x3c010001,
5892         0xa4200f50, 0x3c010001, 0xa0200f57, 0x8fbf0018, 0x03e00008, 0x27bd0020,
5893         0x27bdffe8, 0x3c1bc000, 0xafbf0014, 0xafb00010, 0xaf60680c, 0x8f626804,
5894         0x34420082, 0xaf626804, 0x8f634000, 0x24020b50, 0x3c010001, 0xac220f20,
5895         0x24020b78, 0x3c010001, 0xac220f30, 0x34630002, 0xaf634000, 0x0c004315,
5896         0x00808021, 0x3c010001, 0xa0220f34, 0x304200ff, 0x24030002, 0x14430005,
5897         0x00000000, 0x3c020001, 0x8c420f20, 0x08004308, 0xac5000c0, 0x3c020001,
5898         0x8c420f20, 0xac5000bc, 0x8f624434, 0x8f634438, 0x8f644410, 0x3c010001,
5899         0xac220f28, 0x3c010001, 0xac230f38, 0x3c010001, 0xac240f24, 0x8fbf0014,
5900         0x8fb00010, 0x03e00008, 0x27bd0018, 0x03e00008, 0x24020001, 0x27bdfff8,
5901         0x18800009, 0x00002821, 0x8f63680c, 0x8f62680c, 0x1043fffe, 0x00000000,
5902         0x24a50001, 0x00a4102a, 0x1440fff9, 0x00000000, 0x03e00008, 0x27bd0008,
5903         0x8f634450, 0x3c020001, 0x8c420f28, 0x00031c02, 0x0043102b, 0x14400008,
5904         0x3c038000, 0x3c040001, 0x8c840f38, 0x8f624450, 0x00021c02, 0x0083102b,
5905         0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024, 0x1440fffd,
5906         0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3082ffff, 0x2442e000,
5907         0x2c422001, 0x14400003, 0x3c024000, 0x08004347, 0x2402ffff, 0x00822025,
5908         0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00001021, 0x03e00008,
5909         0x00000000, 0x8f624450, 0x3c030001, 0x8c630f24, 0x08004350, 0x3042ffff,
5910         0x8f624450, 0x3042ffff, 0x0043102b, 0x1440fffc, 0x00000000, 0x03e00008,
5911         0x00000000, 0x27bdffe0, 0x00802821, 0x3c040001, 0x24840ed0, 0x00003021,
5912         0x00003821, 0xafbf0018, 0xafa00010, 0x0c004378, 0xafa00014, 0x0800435f,
5913         0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x3c020001, 0x3442d600,
5914         0x3c030001, 0x3463d600, 0x3c040001, 0x3484ddff, 0x3c010001, 0xac220f40,
5915         0x24020040, 0x3c010001, 0xac220f44, 0x3c010001, 0xac200f3c, 0xac600000,
5916         0x24630004, 0x0083102b, 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000,
5917         0x00804821, 0x8faa0010, 0x3c020001, 0x8c420f3c, 0x3c040001, 0x8c840f44,
5918         0x8fab0014, 0x24430001, 0x0044102b, 0x3c010001, 0xac230f3c, 0x14400003,
5919         0x00004021, 0x3c010001, 0xac200f3c, 0x3c020001, 0x8c420f3c, 0x3c030001,
5920         0x8c630f40, 0x91240000, 0x00021140, 0x00431021, 0x00481021, 0x25080001,
5921         0xa0440000, 0x29020008, 0x1440fff4, 0x25290001, 0x3c020001, 0x8c420f3c,
5922         0x3c030001, 0x8c630f40, 0x8f64680c, 0x00021140, 0x00431021, 0xac440008,
5923         0xac45000c, 0xac460010, 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c,
5924         0x00000000, 0x00000000, 0x00000000,
5925 };
5926
5927 static const u32 tg3Tso5FwRodata[(TG3_TSO5_FW_RODATA_LEN / 4) + 1] = {
5928         0x4d61696e, 0x43707542, 0x00000000, 0x4d61696e, 0x43707541, 0x00000000,
5929         0x00000000, 0x00000000, 0x73746b6f, 0x66666c64, 0x00000000, 0x00000000,
5930         0x73746b6f, 0x66666c64, 0x00000000, 0x00000000, 0x66617461, 0x6c457272,
5931         0x00000000, 0x00000000, 0x00000000,
5932 };
5933
5934 static const u32 tg3Tso5FwData[(TG3_TSO5_FW_DATA_LEN / 4) + 1] = {
5935         0x00000000, 0x73746b6f, 0x66666c64, 0x5f76312e, 0x322e3000, 0x00000000,
5936         0x00000000, 0x00000000, 0x00000000,
5937 };
5938
5939 /* tp->lock is held. */
5940 static int tg3_load_tso_firmware(struct tg3 *tp)
5941 {
5942         struct fw_info info;
5943         unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
5944         int err, i;
5945
5946         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
5947                 return 0;
5948
5949         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
5950                 info.text_base = TG3_TSO5_FW_TEXT_ADDR;
5951                 info.text_len = TG3_TSO5_FW_TEXT_LEN;
5952                 info.text_data = &tg3Tso5FwText[0];
5953                 info.rodata_base = TG3_TSO5_FW_RODATA_ADDR;
5954                 info.rodata_len = TG3_TSO5_FW_RODATA_LEN;
5955                 info.rodata_data = &tg3Tso5FwRodata[0];
5956                 info.data_base = TG3_TSO5_FW_DATA_ADDR;
5957                 info.data_len = TG3_TSO5_FW_DATA_LEN;
5958                 info.data_data = &tg3Tso5FwData[0];
5959                 cpu_base = RX_CPU_BASE;
5960                 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
5961                 cpu_scratch_size = (info.text_len +
5962                                     info.rodata_len +
5963                                     info.data_len +
5964                                     TG3_TSO5_FW_SBSS_LEN +
5965                                     TG3_TSO5_FW_BSS_LEN);
5966         } else {
5967                 info.text_base = TG3_TSO_FW_TEXT_ADDR;
5968                 info.text_len = TG3_TSO_FW_TEXT_LEN;
5969                 info.text_data = &tg3TsoFwText[0];
5970                 info.rodata_base = TG3_TSO_FW_RODATA_ADDR;
5971                 info.rodata_len = TG3_TSO_FW_RODATA_LEN;
5972                 info.rodata_data = &tg3TsoFwRodata[0];
5973                 info.data_base = TG3_TSO_FW_DATA_ADDR;
5974                 info.data_len = TG3_TSO_FW_DATA_LEN;
5975                 info.data_data = &tg3TsoFwData[0];
5976                 cpu_base = TX_CPU_BASE;
5977                 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
5978                 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
5979         }
5980
5981         err = tg3_load_firmware_cpu(tp, cpu_base,
5982                                     cpu_scratch_base, cpu_scratch_size,
5983                                     &info);
5984         if (err)
5985                 return err;
5986
5987         /* Now startup the cpu. */
5988         tw32(cpu_base + CPU_STATE, 0xffffffff);
5989         tw32_f(cpu_base + CPU_PC,    info.text_base);
5990
5991         for (i = 0; i < 5; i++) {
5992                 if (tr32(cpu_base + CPU_PC) == info.text_base)
5993                         break;
5994                 tw32(cpu_base + CPU_STATE, 0xffffffff);
5995                 tw32(cpu_base + CPU_MODE,  CPU_MODE_HALT);
5996                 tw32_f(cpu_base + CPU_PC,    info.text_base);
5997                 udelay(1000);
5998         }
5999         if (i >= 5) {
6000                 printk(KERN_ERR PFX "tg3_load_tso_firmware fails for %s "
6001                        "to set CPU PC, is %08x should be %08x\n",
6002                        tp->dev->name, tr32(cpu_base + CPU_PC),
6003                        info.text_base);
6004                 return -ENODEV;
6005         }
6006         tw32(cpu_base + CPU_STATE, 0xffffffff);
6007         tw32_f(cpu_base + CPU_MODE,  0x00000000);
6008         return 0;
6009 }
6010
6011
6012 /* tp->lock is held. */
6013 static void __tg3_set_mac_addr(struct tg3 *tp, int skip_mac_1)
6014 {
6015         u32 addr_high, addr_low;
6016         int i;
6017
6018         addr_high = ((tp->dev->dev_addr[0] << 8) |
6019                      tp->dev->dev_addr[1]);
6020         addr_low = ((tp->dev->dev_addr[2] << 24) |
6021                     (tp->dev->dev_addr[3] << 16) |
6022                     (tp->dev->dev_addr[4] <<  8) |
6023                     (tp->dev->dev_addr[5] <<  0));
6024         for (i = 0; i < 4; i++) {
6025                 if (i == 1 && skip_mac_1)
6026                         continue;
6027                 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
6028                 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
6029         }
6030
6031         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
6032             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
6033                 for (i = 0; i < 12; i++) {
6034                         tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
6035                         tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
6036                 }
6037         }
6038
6039         addr_high = (tp->dev->dev_addr[0] +
6040                      tp->dev->dev_addr[1] +
6041                      tp->dev->dev_addr[2] +
6042                      tp->dev->dev_addr[3] +
6043                      tp->dev->dev_addr[4] +
6044                      tp->dev->dev_addr[5]) &
6045                 TX_BACKOFF_SEED_MASK;
6046         tw32(MAC_TX_BACKOFF_SEED, addr_high);
6047 }
6048
6049 static int tg3_set_mac_addr(struct net_device *dev, void *p)
6050 {
6051         struct tg3 *tp = netdev_priv(dev);
6052         struct sockaddr *addr = p;
6053         int err = 0, skip_mac_1 = 0;
6054
6055         if (!is_valid_ether_addr(addr->sa_data))
6056                 return -EINVAL;
6057
6058         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
6059
6060         if (!netif_running(dev))
6061                 return 0;
6062
6063         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
6064                 u32 addr0_high, addr0_low, addr1_high, addr1_low;
6065
6066                 addr0_high = tr32(MAC_ADDR_0_HIGH);
6067                 addr0_low = tr32(MAC_ADDR_0_LOW);
6068                 addr1_high = tr32(MAC_ADDR_1_HIGH);
6069                 addr1_low = tr32(MAC_ADDR_1_LOW);
6070
6071                 /* Skip MAC addr 1 if ASF is using it. */
6072                 if ((addr0_high != addr1_high || addr0_low != addr1_low) &&
6073                     !(addr1_high == 0 && addr1_low == 0))
6074                         skip_mac_1 = 1;
6075         }
6076         spin_lock_bh(&tp->lock);
6077         __tg3_set_mac_addr(tp, skip_mac_1);
6078         spin_unlock_bh(&tp->lock);
6079
6080         return err;
6081 }
6082
6083 /* tp->lock is held. */
6084 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
6085                            dma_addr_t mapping, u32 maxlen_flags,
6086                            u32 nic_addr)
6087 {
6088         tg3_write_mem(tp,
6089                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
6090                       ((u64) mapping >> 32));
6091         tg3_write_mem(tp,
6092                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
6093                       ((u64) mapping & 0xffffffff));
6094         tg3_write_mem(tp,
6095                       (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
6096                        maxlen_flags);
6097
6098         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
6099                 tg3_write_mem(tp,
6100                               (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
6101                               nic_addr);
6102 }
6103
6104 static void __tg3_set_rx_mode(struct net_device *);
6105 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
6106 {
6107         tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
6108         tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
6109         tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
6110         tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
6111         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6112                 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
6113                 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
6114         }
6115         tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
6116         tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
6117         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6118                 u32 val = ec->stats_block_coalesce_usecs;
6119
6120                 if (!netif_carrier_ok(tp->dev))
6121                         val = 0;
6122
6123                 tw32(HOSTCC_STAT_COAL_TICKS, val);
6124         }
6125 }
6126
6127 /* tp->lock is held. */
6128 static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
6129 {
6130         u32 val, rdmac_mode;
6131         int i, err, limit;
6132
6133         tg3_disable_ints(tp);
6134
6135         tg3_stop_fw(tp);
6136
6137         tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
6138
6139         if (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) {
6140                 tg3_abort_hw(tp, 1);
6141         }
6142
6143         if (reset_phy)
6144                 tg3_phy_reset(tp);
6145
6146         err = tg3_chip_reset(tp);
6147         if (err)
6148                 return err;
6149
6150         tg3_write_sig_legacy(tp, RESET_KIND_INIT);
6151
6152         if (tp->pci_chip_rev_id == CHIPREV_ID_5784_A0) {
6153                 val = tr32(TG3_CPMU_CTRL);
6154                 val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE);
6155                 tw32(TG3_CPMU_CTRL, val);
6156         }
6157
6158         /* This works around an issue with Athlon chipsets on
6159          * B3 tigon3 silicon.  This bit has no effect on any
6160          * other revision.  But do not set this on PCI Express
6161          * chips and don't even touch the clocks if the CPMU is present.
6162          */
6163         if (!(tp->tg3_flags & TG3_FLAG_CPMU_PRESENT)) {
6164                 if (!(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
6165                         tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
6166                 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
6167         }
6168
6169         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
6170             (tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
6171                 val = tr32(TG3PCI_PCISTATE);
6172                 val |= PCISTATE_RETRY_SAME_DMA;
6173                 tw32(TG3PCI_PCISTATE, val);
6174         }
6175
6176         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_BX) {
6177                 /* Enable some hw fixes.  */
6178                 val = tr32(TG3PCI_MSI_DATA);
6179                 val |= (1 << 26) | (1 << 28) | (1 << 29);
6180                 tw32(TG3PCI_MSI_DATA, val);
6181         }
6182
6183         /* Descriptor ring init may make accesses to the
6184          * NIC SRAM area to setup the TX descriptors, so we
6185          * can only do this after the hardware has been
6186          * successfully reset.
6187          */
6188         err = tg3_init_rings(tp);
6189         if (err)
6190                 return err;
6191
6192         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784) {
6193                 /* This value is determined during the probe time DMA
6194                  * engine test, tg3_test_dma.
6195                  */
6196                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
6197         }
6198
6199         tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
6200                           GRC_MODE_4X_NIC_SEND_RINGS |
6201                           GRC_MODE_NO_TX_PHDR_CSUM |
6202                           GRC_MODE_NO_RX_PHDR_CSUM);
6203         tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
6204
6205         /* Pseudo-header checksum is done by hardware logic and not
6206          * the offload processers, so make the chip do the pseudo-
6207          * header checksums on receive.  For transmit it is more
6208          * convenient to do the pseudo-header checksum in software
6209          * as Linux does that on transmit for us in all cases.
6210          */
6211         tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
6212
6213         tw32(GRC_MODE,
6214              tp->grc_mode |
6215              (GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP));
6216
6217         /* Setup the timer prescalar register.  Clock is always 66Mhz. */
6218         val = tr32(GRC_MISC_CFG);
6219         val &= ~0xff;
6220         val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
6221         tw32(GRC_MISC_CFG, val);
6222
6223         /* Initialize MBUF/DESC pool. */
6224         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
6225                 /* Do nothing.  */
6226         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
6227                 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
6228                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
6229                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
6230                 else
6231                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
6232                 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
6233                 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
6234         }
6235         else if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
6236                 int fw_len;
6237
6238                 fw_len = (TG3_TSO5_FW_TEXT_LEN +
6239                           TG3_TSO5_FW_RODATA_LEN +
6240                           TG3_TSO5_FW_DATA_LEN +
6241                           TG3_TSO5_FW_SBSS_LEN +
6242                           TG3_TSO5_FW_BSS_LEN);
6243                 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
6244                 tw32(BUFMGR_MB_POOL_ADDR,
6245                      NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
6246                 tw32(BUFMGR_MB_POOL_SIZE,
6247                      NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
6248         }
6249
6250         if (tp->dev->mtu <= ETH_DATA_LEN) {
6251                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
6252                      tp->bufmgr_config.mbuf_read_dma_low_water);
6253                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
6254                      tp->bufmgr_config.mbuf_mac_rx_low_water);
6255                 tw32(BUFMGR_MB_HIGH_WATER,
6256                      tp->bufmgr_config.mbuf_high_water);
6257         } else {
6258                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
6259                      tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
6260                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
6261                      tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
6262                 tw32(BUFMGR_MB_HIGH_WATER,
6263                      tp->bufmgr_config.mbuf_high_water_jumbo);
6264         }
6265         tw32(BUFMGR_DMA_LOW_WATER,
6266              tp->bufmgr_config.dma_low_water);
6267         tw32(BUFMGR_DMA_HIGH_WATER,
6268              tp->bufmgr_config.dma_high_water);
6269
6270         tw32(BUFMGR_MODE, BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE);
6271         for (i = 0; i < 2000; i++) {
6272                 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
6273                         break;
6274                 udelay(10);
6275         }
6276         if (i >= 2000) {
6277                 printk(KERN_ERR PFX "tg3_reset_hw cannot enable BUFMGR for %s.\n",
6278                        tp->dev->name);
6279                 return -ENODEV;
6280         }
6281
6282         /* Setup replenish threshold. */
6283         val = tp->rx_pending / 8;
6284         if (val == 0)
6285                 val = 1;
6286         else if (val > tp->rx_std_max_post)
6287                 val = tp->rx_std_max_post;
6288         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
6289                 if (tp->pci_chip_rev_id == CHIPREV_ID_5906_A1)
6290                         tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
6291
6292                 if (val > (TG3_RX_INTERNAL_RING_SZ_5906 / 2))
6293                         val = TG3_RX_INTERNAL_RING_SZ_5906 / 2;
6294         }
6295
6296         tw32(RCVBDI_STD_THRESH, val);
6297
6298         /* Initialize TG3_BDINFO's at:
6299          *  RCVDBDI_STD_BD:     standard eth size rx ring
6300          *  RCVDBDI_JUMBO_BD:   jumbo frame rx ring
6301          *  RCVDBDI_MINI_BD:    small frame rx ring (??? does not work)
6302          *
6303          * like so:
6304          *  TG3_BDINFO_HOST_ADDR:       high/low parts of DMA address of ring
6305          *  TG3_BDINFO_MAXLEN_FLAGS:    (rx max buffer size << 16) |
6306          *                              ring attribute flags
6307          *  TG3_BDINFO_NIC_ADDR:        location of descriptors in nic SRAM
6308          *
6309          * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
6310          * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
6311          *
6312          * The size of each ring is fixed in the firmware, but the location is
6313          * configurable.
6314          */
6315         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
6316              ((u64) tp->rx_std_mapping >> 32));
6317         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
6318              ((u64) tp->rx_std_mapping & 0xffffffff));
6319         tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
6320              NIC_SRAM_RX_BUFFER_DESC);
6321
6322         /* Don't even try to program the JUMBO/MINI buffer descriptor
6323          * configs on 5705.
6324          */
6325         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
6326                 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS,
6327                      RX_STD_MAX_SIZE_5705 << BDINFO_FLAGS_MAXLEN_SHIFT);
6328         } else {
6329                 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS,
6330                      RX_STD_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT);
6331
6332                 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
6333                      BDINFO_FLAGS_DISABLED);
6334
6335                 /* Setup replenish threshold. */
6336                 tw32(RCVBDI_JUMBO_THRESH, tp->rx_jumbo_pending / 8);
6337
6338                 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
6339                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
6340                              ((u64) tp->rx_jumbo_mapping >> 32));
6341                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
6342                              ((u64) tp->rx_jumbo_mapping & 0xffffffff));
6343                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
6344                              RX_JUMBO_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT);
6345                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
6346                              NIC_SRAM_RX_JUMBO_BUFFER_DESC);
6347                 } else {
6348                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
6349                              BDINFO_FLAGS_DISABLED);
6350                 }
6351
6352         }
6353
6354         /* There is only one send ring on 5705/5750, no need to explicitly
6355          * disable the others.
6356          */
6357         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6358                 /* Clear out send RCB ring in SRAM. */
6359                 for (i = NIC_SRAM_SEND_RCB; i < NIC_SRAM_RCV_RET_RCB; i += TG3_BDINFO_SIZE)
6360                         tg3_write_mem(tp, i + TG3_BDINFO_MAXLEN_FLAGS,
6361                                       BDINFO_FLAGS_DISABLED);
6362         }
6363
6364         tp->tx_prod = 0;
6365         tp->tx_cons = 0;
6366         tw32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0);
6367         tw32_tx_mbox(MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0);
6368
6369         tg3_set_bdinfo(tp, NIC_SRAM_SEND_RCB,
6370                        tp->tx_desc_mapping,
6371                        (TG3_TX_RING_SIZE <<
6372                         BDINFO_FLAGS_MAXLEN_SHIFT),
6373                        NIC_SRAM_TX_BUFFER_DESC);
6374
6375         /* There is only one receive return ring on 5705/5750, no need
6376          * to explicitly disable the others.
6377          */
6378         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6379                 for (i = NIC_SRAM_RCV_RET_RCB; i < NIC_SRAM_STATS_BLK;
6380                      i += TG3_BDINFO_SIZE) {
6381                         tg3_write_mem(tp, i + TG3_BDINFO_MAXLEN_FLAGS,
6382                                       BDINFO_FLAGS_DISABLED);
6383                 }
6384         }
6385
6386         tp->rx_rcb_ptr = 0;
6387         tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW, 0);
6388
6389         tg3_set_bdinfo(tp, NIC_SRAM_RCV_RET_RCB,
6390                        tp->rx_rcb_mapping,
6391                        (TG3_RX_RCB_RING_SIZE(tp) <<
6392                         BDINFO_FLAGS_MAXLEN_SHIFT),
6393                        0);
6394
6395         tp->rx_std_ptr = tp->rx_pending;
6396         tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
6397                      tp->rx_std_ptr);
6398
6399         tp->rx_jumbo_ptr = (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) ?
6400                                                 tp->rx_jumbo_pending : 0;
6401         tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
6402                      tp->rx_jumbo_ptr);
6403
6404         /* Initialize MAC address and backoff seed. */
6405         __tg3_set_mac_addr(tp, 0);
6406
6407         /* MTU + ethernet header + FCS + optional VLAN tag */
6408         tw32(MAC_RX_MTU_SIZE, tp->dev->mtu + ETH_HLEN + 8);
6409
6410         /* The slot time is changed by tg3_setup_phy if we
6411          * run at gigabit with half duplex.
6412          */
6413         tw32(MAC_TX_LENGTHS,
6414              (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
6415              (6 << TX_LENGTHS_IPG_SHIFT) |
6416              (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
6417
6418         /* Receive rules. */
6419         tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
6420         tw32(RCVLPC_CONFIG, 0x0181);
6421
6422         /* Calculate RDMAC_MODE setting early, we need it to determine
6423          * the RCVLPC_STATE_ENABLE mask.
6424          */
6425         rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
6426                       RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
6427                       RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
6428                       RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
6429                       RDMAC_MODE_LNGREAD_ENAB);
6430
6431         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784)
6432                 rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
6433                               RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
6434                               RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
6435
6436         /* If statement applies to 5705 and 5750 PCI devices only */
6437         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
6438              tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
6439             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)) {
6440                 if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE &&
6441                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
6442                         rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
6443                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
6444                            !(tp->tg3_flags2 & TG3_FLG2_IS_5788)) {
6445                         rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
6446                 }
6447         }
6448
6449         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)
6450                 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
6451
6452         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
6453                 rdmac_mode |= (1 << 27);
6454
6455         /* Receive/send statistics. */
6456         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
6457                 val = tr32(RCVLPC_STATS_ENABLE);
6458                 val &= ~RCVLPC_STATSENAB_DACK_FIX;
6459                 tw32(RCVLPC_STATS_ENABLE, val);
6460         } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
6461                    (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
6462                 val = tr32(RCVLPC_STATS_ENABLE);
6463                 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
6464                 tw32(RCVLPC_STATS_ENABLE, val);
6465         } else {
6466                 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
6467         }
6468         tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
6469         tw32(SNDDATAI_STATSENAB, 0xffffff);
6470         tw32(SNDDATAI_STATSCTRL,
6471              (SNDDATAI_SCTRL_ENABLE |
6472               SNDDATAI_SCTRL_FASTUPD));
6473
6474         /* Setup host coalescing engine. */
6475         tw32(HOSTCC_MODE, 0);
6476         for (i = 0; i < 2000; i++) {
6477                 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
6478                         break;
6479                 udelay(10);
6480         }
6481
6482         __tg3_set_coalesce(tp, &tp->coal);
6483
6484         /* set status block DMA address */
6485         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
6486              ((u64) tp->status_mapping >> 32));
6487         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
6488              ((u64) tp->status_mapping & 0xffffffff));
6489
6490         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6491                 /* Status/statistics block address.  See tg3_timer,
6492                  * the tg3_periodic_fetch_stats call there, and
6493                  * tg3_get_stats to see how this works for 5705/5750 chips.
6494                  */
6495                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
6496                      ((u64) tp->stats_mapping >> 32));
6497                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
6498                      ((u64) tp->stats_mapping & 0xffffffff));
6499                 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
6500                 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
6501         }
6502
6503         tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
6504
6505         tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
6506         tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
6507         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
6508                 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
6509
6510         /* Clear statistics/status block in chip, and status block in ram. */
6511         for (i = NIC_SRAM_STATS_BLK;
6512              i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
6513              i += sizeof(u32)) {
6514                 tg3_write_mem(tp, i, 0);
6515                 udelay(40);
6516         }
6517         memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
6518
6519         if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
6520                 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
6521                 /* reset to prevent losing 1st rx packet intermittently */
6522                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
6523                 udelay(10);
6524         }
6525
6526         tp->mac_mode = MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
6527                 MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE | MAC_MODE_FHDE_ENABLE;
6528         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
6529             !(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
6530             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
6531                 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
6532         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
6533         udelay(40);
6534
6535         /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
6536          * If TG3_FLG2_IS_NIC is zero, we should read the
6537          * register to preserve the GPIO settings for LOMs. The GPIOs,
6538          * whether used as inputs or outputs, are set by boot code after
6539          * reset.
6540          */
6541         if (!(tp->tg3_flags2 & TG3_FLG2_IS_NIC)) {
6542                 u32 gpio_mask;
6543
6544                 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
6545                             GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
6546                             GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
6547
6548                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
6549                         gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
6550                                      GRC_LCLCTRL_GPIO_OUTPUT3;
6551
6552                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
6553                         gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
6554
6555                 tp->grc_local_ctrl &= ~gpio_mask;
6556                 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
6557
6558                 /* GPIO1 must be driven high for eeprom write protect */
6559                 if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT)
6560                         tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
6561                                                GRC_LCLCTRL_GPIO_OUTPUT1);
6562         }
6563         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
6564         udelay(100);
6565
6566         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0);
6567         tp->last_tag = 0;
6568
6569         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6570                 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
6571                 udelay(40);
6572         }
6573
6574         val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
6575                WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
6576                WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
6577                WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
6578                WDMAC_MODE_LNGREAD_ENAB);
6579
6580         /* If statement applies to 5705 and 5750 PCI devices only */
6581         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
6582              tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
6583             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
6584                 if ((tp->tg3_flags & TG3_FLG2_TSO_CAPABLE) &&
6585                     (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
6586                      tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
6587                         /* nothing */
6588                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
6589                            !(tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
6590                            !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
6591                         val |= WDMAC_MODE_RX_ACCEL;
6592                 }
6593         }
6594
6595         /* Enable host coalescing bug fix */
6596         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755) ||
6597             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787) ||
6598             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784))
6599                 val |= (1 << 29);
6600
6601         tw32_f(WDMAC_MODE, val);
6602         udelay(40);
6603
6604         if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) {
6605                 u16 pcix_cmd;
6606
6607                 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
6608                                      &pcix_cmd);
6609                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) {
6610                         pcix_cmd &= ~PCI_X_CMD_MAX_READ;
6611                         pcix_cmd |= PCI_X_CMD_READ_2K;
6612                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
6613                         pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ);
6614                         pcix_cmd |= PCI_X_CMD_READ_2K;
6615                 }
6616                 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
6617                                       pcix_cmd);
6618         }
6619
6620         tw32_f(RDMAC_MODE, rdmac_mode);
6621         udelay(40);
6622
6623         tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
6624         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
6625                 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
6626         tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
6627         tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
6628         tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
6629         tw32(RCVDBDI_MODE, RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ);
6630         tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
6631         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
6632                 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
6633         tw32(SNDBDI_MODE, SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE);
6634         tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
6635
6636         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
6637                 err = tg3_load_5701_a0_firmware_fix(tp);
6638                 if (err)
6639                         return err;
6640         }
6641
6642         if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
6643                 err = tg3_load_tso_firmware(tp);
6644                 if (err)
6645                         return err;
6646         }
6647
6648         tp->tx_mode = TX_MODE_ENABLE;
6649         tw32_f(MAC_TX_MODE, tp->tx_mode);
6650         udelay(100);
6651
6652         tp->rx_mode = RX_MODE_ENABLE;
6653         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
6654                 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
6655
6656         tw32_f(MAC_RX_MODE, tp->rx_mode);
6657         udelay(10);
6658
6659         if (tp->link_config.phy_is_low_power) {
6660                 tp->link_config.phy_is_low_power = 0;
6661                 tp->link_config.speed = tp->link_config.orig_speed;
6662                 tp->link_config.duplex = tp->link_config.orig_duplex;
6663                 tp->link_config.autoneg = tp->link_config.orig_autoneg;
6664         }
6665
6666         tp->mi_mode = MAC_MI_MODE_BASE;
6667         tw32_f(MAC_MI_MODE, tp->mi_mode);
6668         udelay(80);
6669
6670         tw32(MAC_LED_CTRL, tp->led_ctrl);
6671
6672         tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
6673         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
6674                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
6675                 udelay(10);
6676         }
6677         tw32_f(MAC_RX_MODE, tp->rx_mode);
6678         udelay(10);
6679
6680         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
6681                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) &&
6682                         !(tp->tg3_flags2 & TG3_FLG2_SERDES_PREEMPHASIS)) {
6683                         /* Set drive transmission level to 1.2V  */
6684                         /* only if the signal pre-emphasis bit is not set  */
6685                         val = tr32(MAC_SERDES_CFG);
6686                         val &= 0xfffff000;
6687                         val |= 0x880;
6688                         tw32(MAC_SERDES_CFG, val);
6689                 }
6690                 if (tp->pci_chip_rev_id == CHIPREV_ID_5703_A1)
6691                         tw32(MAC_SERDES_CFG, 0x616000);
6692         }
6693
6694         /* Prevent chip from dropping frames when flow control
6695          * is enabled.
6696          */
6697         tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, 2);
6698
6699         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
6700             (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
6701                 /* Use hardware link auto-negotiation */
6702                 tp->tg3_flags2 |= TG3_FLG2_HW_AUTONEG;
6703         }
6704
6705         if ((tp->tg3_flags2 & TG3_FLG2_MII_SERDES) &&
6706             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)) {
6707                 u32 tmp;
6708
6709                 tmp = tr32(SERDES_RX_CTRL);
6710                 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
6711                 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
6712                 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
6713                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
6714         }
6715
6716         err = tg3_setup_phy(tp, 0);
6717         if (err)
6718                 return err;
6719
6720         if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
6721             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906) {
6722                 u32 tmp;
6723
6724                 /* Clear CRC stats. */
6725                 if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
6726                         tg3_writephy(tp, MII_TG3_TEST1,
6727                                      tmp | MII_TG3_TEST1_CRC_EN);
6728                         tg3_readphy(tp, 0x14, &tmp);
6729                 }
6730         }
6731
6732         __tg3_set_rx_mode(tp->dev);
6733
6734         /* Initialize receive rules. */
6735         tw32(MAC_RCV_RULE_0,  0xc2000000 & RCV_RULE_DISABLE_MASK);
6736         tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
6737         tw32(MAC_RCV_RULE_1,  0x86000004 & RCV_RULE_DISABLE_MASK);
6738         tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
6739
6740         if ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
6741             !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
6742                 limit = 8;
6743         else
6744                 limit = 16;
6745         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF)
6746                 limit -= 4;
6747         switch (limit) {
6748         case 16:
6749                 tw32(MAC_RCV_RULE_15,  0); tw32(MAC_RCV_VALUE_15,  0);
6750         case 15:
6751                 tw32(MAC_RCV_RULE_14,  0); tw32(MAC_RCV_VALUE_14,  0);
6752         case 14:
6753                 tw32(MAC_RCV_RULE_13,  0); tw32(MAC_RCV_VALUE_13,  0);
6754         case 13:
6755                 tw32(MAC_RCV_RULE_12,  0); tw32(MAC_RCV_VALUE_12,  0);
6756         case 12:
6757                 tw32(MAC_RCV_RULE_11,  0); tw32(MAC_RCV_VALUE_11,  0);
6758         case 11:
6759                 tw32(MAC_RCV_RULE_10,  0); tw32(MAC_RCV_VALUE_10,  0);
6760         case 10:
6761                 tw32(MAC_RCV_RULE_9,  0); tw32(MAC_RCV_VALUE_9,  0);
6762         case 9:
6763                 tw32(MAC_RCV_RULE_8,  0); tw32(MAC_RCV_VALUE_8,  0);
6764         case 8:
6765                 tw32(MAC_RCV_RULE_7,  0); tw32(MAC_RCV_VALUE_7,  0);
6766         case 7:
6767                 tw32(MAC_RCV_RULE_6,  0); tw32(MAC_RCV_VALUE_6,  0);
6768         case 6:
6769                 tw32(MAC_RCV_RULE_5,  0); tw32(MAC_RCV_VALUE_5,  0);
6770         case 5:
6771                 tw32(MAC_RCV_RULE_4,  0); tw32(MAC_RCV_VALUE_4,  0);
6772         case 4:
6773                 /* tw32(MAC_RCV_RULE_3,  0); tw32(MAC_RCV_VALUE_3,  0); */
6774         case 3:
6775                 /* tw32(MAC_RCV_RULE_2,  0); tw32(MAC_RCV_VALUE_2,  0); */
6776         case 2:
6777         case 1:
6778
6779         default:
6780                 break;
6781         };
6782
6783         tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
6784
6785         return 0;
6786 }
6787
6788 /* Called at device open time to get the chip ready for
6789  * packet processing.  Invoked with tp->lock held.
6790  */
6791 static int tg3_init_hw(struct tg3 *tp, int reset_phy)
6792 {
6793         int err;
6794
6795         /* Force the chip into D0. */
6796         err = tg3_set_power_state(tp, PCI_D0);
6797         if (err)
6798                 goto out;
6799
6800         tg3_switch_clocks(tp);
6801
6802         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
6803
6804         err = tg3_reset_hw(tp, reset_phy);
6805
6806 out:
6807         return err;
6808 }
6809
6810 #define TG3_STAT_ADD32(PSTAT, REG) \
6811 do {    u32 __val = tr32(REG); \
6812         (PSTAT)->low += __val; \
6813         if ((PSTAT)->low < __val) \
6814                 (PSTAT)->high += 1; \
6815 } while (0)
6816
6817 static void tg3_periodic_fetch_stats(struct tg3 *tp)
6818 {
6819         struct tg3_hw_stats *sp = tp->hw_stats;
6820
6821         if (!netif_carrier_ok(tp->dev))
6822                 return;
6823
6824         TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
6825         TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
6826         TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
6827         TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
6828         TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
6829         TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
6830         TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
6831         TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
6832         TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
6833         TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
6834         TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
6835         TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
6836         TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
6837
6838         TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
6839         TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
6840         TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
6841         TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
6842         TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
6843         TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
6844         TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
6845         TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
6846         TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
6847         TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
6848         TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
6849         TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
6850         TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
6851         TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
6852
6853         TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
6854         TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
6855         TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
6856 }
6857
6858 static void tg3_timer(unsigned long __opaque)
6859 {
6860         struct tg3 *tp = (struct tg3 *) __opaque;
6861
6862         if (tp->irq_sync)
6863                 goto restart_timer;
6864
6865         spin_lock(&tp->lock);
6866
6867         if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) {
6868                 /* All of this garbage is because when using non-tagged
6869                  * IRQ status the mailbox/status_block protocol the chip
6870                  * uses with the cpu is race prone.
6871                  */
6872                 if (tp->hw_status->status & SD_STATUS_UPDATED) {
6873                         tw32(GRC_LOCAL_CTRL,
6874                              tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
6875                 } else {
6876                         tw32(HOSTCC_MODE, tp->coalesce_mode |
6877                              (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
6878                 }
6879
6880                 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
6881                         tp->tg3_flags2 |= TG3_FLG2_RESTART_TIMER;
6882                         spin_unlock(&tp->lock);
6883                         schedule_work(&tp->reset_task);
6884                         return;
6885                 }
6886         }
6887
6888         /* This part only runs once per second. */
6889         if (!--tp->timer_counter) {
6890                 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
6891                         tg3_periodic_fetch_stats(tp);
6892
6893                 if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
6894                         u32 mac_stat;
6895                         int phy_event;
6896
6897                         mac_stat = tr32(MAC_STATUS);
6898
6899                         phy_event = 0;
6900                         if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) {
6901                                 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
6902                                         phy_event = 1;
6903                         } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
6904                                 phy_event = 1;
6905
6906                         if (phy_event)
6907                                 tg3_setup_phy(tp, 0);
6908                 } else if (tp->tg3_flags & TG3_FLAG_POLL_SERDES) {
6909                         u32 mac_stat = tr32(MAC_STATUS);
6910                         int need_setup = 0;
6911
6912                         if (netif_carrier_ok(tp->dev) &&
6913                             (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
6914                                 need_setup = 1;
6915                         }
6916                         if (! netif_carrier_ok(tp->dev) &&
6917                             (mac_stat & (MAC_STATUS_PCS_SYNCED |
6918                                          MAC_STATUS_SIGNAL_DET))) {
6919                                 need_setup = 1;
6920                         }
6921                         if (need_setup) {
6922                                 if (!tp->serdes_counter) {
6923                                         tw32_f(MAC_MODE,
6924                                              (tp->mac_mode &
6925                                               ~MAC_MODE_PORT_MODE_MASK));
6926                                         udelay(40);
6927                                         tw32_f(MAC_MODE, tp->mac_mode);
6928                                         udelay(40);
6929                                 }
6930                                 tg3_setup_phy(tp, 0);
6931                         }
6932                 } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
6933                         tg3_serdes_parallel_detect(tp);
6934
6935                 tp->timer_counter = tp->timer_multiplier;
6936         }
6937
6938         /* Heartbeat is only sent once every 2 seconds.
6939          *
6940          * The heartbeat is to tell the ASF firmware that the host
6941          * driver is still alive.  In the event that the OS crashes,
6942          * ASF needs to reset the hardware to free up the FIFO space
6943          * that may be filled with rx packets destined for the host.
6944          * If the FIFO is full, ASF will no longer function properly.
6945          *
6946          * Unintended resets have been reported on real time kernels
6947          * where the timer doesn't run on time.  Netpoll will also have
6948          * same problem.
6949          *
6950          * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
6951          * to check the ring condition when the heartbeat is expiring
6952          * before doing the reset.  This will prevent most unintended
6953          * resets.
6954          */
6955         if (!--tp->asf_counter) {
6956                 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
6957                         u32 val;
6958
6959                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
6960                                       FWCMD_NICDRV_ALIVE3);
6961                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
6962                         /* 5 seconds timeout */
6963                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, 5);
6964                         val = tr32(GRC_RX_CPU_EVENT);
6965                         val |= (1 << 14);
6966                         tw32(GRC_RX_CPU_EVENT, val);
6967                 }
6968                 tp->asf_counter = tp->asf_multiplier;
6969         }
6970
6971         spin_unlock(&tp->lock);
6972
6973 restart_timer:
6974         tp->timer.expires = jiffies + tp->timer_offset;
6975         add_timer(&tp->timer);
6976 }
6977
6978 static int tg3_request_irq(struct tg3 *tp)
6979 {
6980         irq_handler_t fn;
6981         unsigned long flags;
6982         struct net_device *dev = tp->dev;
6983
6984         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6985                 fn = tg3_msi;
6986                 if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI)
6987                         fn = tg3_msi_1shot;
6988                 flags = IRQF_SAMPLE_RANDOM;
6989         } else {
6990                 fn = tg3_interrupt;
6991                 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
6992                         fn = tg3_interrupt_tagged;
6993                 flags = IRQF_SHARED | IRQF_SAMPLE_RANDOM;
6994         }
6995         return (request_irq(tp->pdev->irq, fn, flags, dev->name, dev));
6996 }
6997
6998 static int tg3_test_interrupt(struct tg3 *tp)
6999 {
7000         struct net_device *dev = tp->dev;
7001         int err, i, intr_ok = 0;
7002
7003         if (!netif_running(dev))
7004                 return -ENODEV;
7005
7006         tg3_disable_ints(tp);
7007
7008         free_irq(tp->pdev->irq, dev);
7009
7010         err = request_irq(tp->pdev->irq, tg3_test_isr,
7011                           IRQF_SHARED | IRQF_SAMPLE_RANDOM, dev->name, dev);
7012         if (err)
7013                 return err;
7014
7015         tp->hw_status->status &= ~SD_STATUS_UPDATED;
7016         tg3_enable_ints(tp);
7017
7018         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
7019                HOSTCC_MODE_NOW);
7020
7021         for (i = 0; i < 5; i++) {
7022                 u32 int_mbox, misc_host_ctrl;
7023
7024                 int_mbox = tr32_mailbox(MAILBOX_INTERRUPT_0 +
7025                                         TG3_64BIT_REG_LOW);
7026                 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
7027
7028                 if ((int_mbox != 0) ||
7029                     (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) {
7030                         intr_ok = 1;
7031                         break;
7032                 }
7033
7034                 msleep(10);
7035         }
7036
7037         tg3_disable_ints(tp);
7038
7039         free_irq(tp->pdev->irq, dev);
7040
7041         err = tg3_request_irq(tp);
7042
7043         if (err)
7044                 return err;
7045
7046         if (intr_ok)
7047                 return 0;
7048
7049         return -EIO;
7050 }
7051
7052 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
7053  * successfully restored
7054  */
7055 static int tg3_test_msi(struct tg3 *tp)
7056 {
7057         struct net_device *dev = tp->dev;
7058         int err;
7059         u16 pci_cmd;
7060
7061         if (!(tp->tg3_flags2 & TG3_FLG2_USING_MSI))
7062                 return 0;
7063
7064         /* Turn off SERR reporting in case MSI terminates with Master
7065          * Abort.
7066          */
7067         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
7068         pci_write_config_word(tp->pdev, PCI_COMMAND,
7069                               pci_cmd & ~PCI_COMMAND_SERR);
7070
7071         err = tg3_test_interrupt(tp);
7072
7073         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
7074
7075         if (!err)
7076                 return 0;
7077
7078         /* other failures */
7079         if (err != -EIO)
7080                 return err;
7081
7082         /* MSI test failed, go back to INTx mode */
7083         printk(KERN_WARNING PFX "%s: No interrupt was generated using MSI, "
7084                "switching to INTx mode. Please report this failure to "
7085                "the PCI maintainer and include system chipset information.\n",
7086                        tp->dev->name);
7087
7088         free_irq(tp->pdev->irq, dev);
7089         pci_disable_msi(tp->pdev);
7090
7091         tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
7092
7093         err = tg3_request_irq(tp);
7094         if (err)
7095                 return err;
7096
7097         /* Need to reset the chip because the MSI cycle may have terminated
7098          * with Master Abort.
7099          */
7100         tg3_full_lock(tp, 1);
7101
7102         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
7103         err = tg3_init_hw(tp, 1);
7104
7105         tg3_full_unlock(tp);
7106
7107         if (err)
7108                 free_irq(tp->pdev->irq, dev);
7109
7110         return err;
7111 }
7112
7113 static int tg3_open(struct net_device *dev)
7114 {
7115         struct tg3 *tp = netdev_priv(dev);
7116         int err;
7117
7118         netif_carrier_off(tp->dev);
7119
7120         tg3_full_lock(tp, 0);
7121
7122         err = tg3_set_power_state(tp, PCI_D0);
7123         if (err) {
7124                 tg3_full_unlock(tp);
7125                 return err;
7126         }
7127
7128         tg3_disable_ints(tp);
7129         tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
7130
7131         tg3_full_unlock(tp);
7132
7133         /* The placement of this call is tied
7134          * to the setup and use of Host TX descriptors.
7135          */
7136         err = tg3_alloc_consistent(tp);
7137         if (err)
7138                 return err;
7139
7140         if (tp->tg3_flags & TG3_FLAG_SUPPORT_MSI) {
7141                 /* All MSI supporting chips should support tagged
7142                  * status.  Assert that this is the case.
7143                  */
7144                 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) {
7145                         printk(KERN_WARNING PFX "%s: MSI without TAGGED? "
7146                                "Not using MSI.\n", tp->dev->name);
7147                 } else if (pci_enable_msi(tp->pdev) == 0) {
7148                         u32 msi_mode;
7149
7150                         /* Hardware bug - MSI won't work if INTX disabled. */
7151                         if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
7152                                 pci_intx(tp->pdev, 1);
7153
7154                         msi_mode = tr32(MSGINT_MODE);
7155                         tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
7156                         tp->tg3_flags2 |= TG3_FLG2_USING_MSI;
7157                 }
7158         }
7159         err = tg3_request_irq(tp);
7160
7161         if (err) {
7162                 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7163                         pci_disable_msi(tp->pdev);
7164                         tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
7165                 }
7166                 tg3_free_consistent(tp);
7167                 return err;
7168         }
7169
7170         napi_enable(&tp->napi);
7171
7172         tg3_full_lock(tp, 0);
7173
7174         err = tg3_init_hw(tp, 1);
7175         if (err) {
7176                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
7177                 tg3_free_rings(tp);
7178         } else {
7179                 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
7180                         tp->timer_offset = HZ;
7181                 else
7182                         tp->timer_offset = HZ / 10;
7183
7184                 BUG_ON(tp->timer_offset > HZ);
7185                 tp->timer_counter = tp->timer_multiplier =
7186                         (HZ / tp->timer_offset);
7187                 tp->asf_counter = tp->asf_multiplier =
7188                         ((HZ / tp->timer_offset) * 2);
7189
7190                 init_timer(&tp->timer);
7191                 tp->timer.expires = jiffies + tp->timer_offset;
7192                 tp->timer.data = (unsigned long) tp;
7193                 tp->timer.function = tg3_timer;
7194         }
7195
7196         tg3_full_unlock(tp);
7197
7198         if (err) {
7199                 napi_disable(&tp->napi);
7200                 free_irq(tp->pdev->irq, dev);
7201                 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7202                         pci_disable_msi(tp->pdev);
7203                         tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
7204                 }
7205                 tg3_free_consistent(tp);
7206                 return err;
7207         }
7208
7209         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7210                 err = tg3_test_msi(tp);
7211
7212                 if (err) {
7213                         tg3_full_lock(tp, 0);
7214
7215                         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7216                                 pci_disable_msi(tp->pdev);
7217                                 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
7218                         }
7219                         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
7220                         tg3_free_rings(tp);
7221                         tg3_free_consistent(tp);
7222
7223                         tg3_full_unlock(tp);
7224
7225                         napi_disable(&tp->napi);
7226
7227                         return err;
7228                 }
7229
7230                 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7231                         if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI) {
7232                                 u32 val = tr32(PCIE_TRANSACTION_CFG);
7233
7234                                 tw32(PCIE_TRANSACTION_CFG,
7235                                      val | PCIE_TRANS_CFG_1SHOT_MSI);
7236                         }
7237                 }
7238         }
7239
7240         tg3_full_lock(tp, 0);
7241
7242         add_timer(&tp->timer);
7243         tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
7244         tg3_enable_ints(tp);
7245
7246         tg3_full_unlock(tp);
7247
7248         netif_start_queue(dev);
7249
7250         return 0;
7251 }
7252
7253 #if 0
7254 /*static*/ void tg3_dump_state(struct tg3 *tp)
7255 {
7256         u32 val32, val32_2, val32_3, val32_4, val32_5;
7257         u16 val16;
7258         int i;
7259
7260         pci_read_config_word(tp->pdev, PCI_STATUS, &val16);
7261         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE, &val32);
7262         printk("DEBUG: PCI status [%04x] TG3PCI state[%08x]\n",
7263                val16, val32);
7264
7265         /* MAC block */
7266         printk("DEBUG: MAC_MODE[%08x] MAC_STATUS[%08x]\n",
7267                tr32(MAC_MODE), tr32(MAC_STATUS));
7268         printk("       MAC_EVENT[%08x] MAC_LED_CTRL[%08x]\n",
7269                tr32(MAC_EVENT), tr32(MAC_LED_CTRL));
7270         printk("DEBUG: MAC_TX_MODE[%08x] MAC_TX_STATUS[%08x]\n",
7271                tr32(MAC_TX_MODE), tr32(MAC_TX_STATUS));
7272         printk("       MAC_RX_MODE[%08x] MAC_RX_STATUS[%08x]\n",
7273                tr32(MAC_RX_MODE), tr32(MAC_RX_STATUS));
7274
7275         /* Send data initiator control block */
7276         printk("DEBUG: SNDDATAI_MODE[%08x] SNDDATAI_STATUS[%08x]\n",
7277                tr32(SNDDATAI_MODE), tr32(SNDDATAI_STATUS));
7278         printk("       SNDDATAI_STATSCTRL[%08x]\n",
7279                tr32(SNDDATAI_STATSCTRL));
7280
7281         /* Send data completion control block */
7282         printk("DEBUG: SNDDATAC_MODE[%08x]\n", tr32(SNDDATAC_MODE));
7283
7284         /* Send BD ring selector block */
7285         printk("DEBUG: SNDBDS_MODE[%08x] SNDBDS_STATUS[%08x]\n",
7286                tr32(SNDBDS_MODE), tr32(SNDBDS_STATUS));
7287
7288         /* Send BD initiator control block */
7289         printk("DEBUG: SNDBDI_MODE[%08x] SNDBDI_STATUS[%08x]\n",
7290                tr32(SNDBDI_MODE), tr32(SNDBDI_STATUS));
7291
7292         /* Send BD completion control block */
7293         printk("DEBUG: SNDBDC_MODE[%08x]\n", tr32(SNDBDC_MODE));
7294
7295         /* Receive list placement control block */
7296         printk("DEBUG: RCVLPC_MODE[%08x] RCVLPC_STATUS[%08x]\n",
7297                tr32(RCVLPC_MODE), tr32(RCVLPC_STATUS));
7298         printk("       RCVLPC_STATSCTRL[%08x]\n",
7299                tr32(RCVLPC_STATSCTRL));
7300
7301         /* Receive data and receive BD initiator control block */
7302         printk("DEBUG: RCVDBDI_MODE[%08x] RCVDBDI_STATUS[%08x]\n",
7303                tr32(RCVDBDI_MODE), tr32(RCVDBDI_STATUS));
7304
7305         /* Receive data completion control block */
7306         printk("DEBUG: RCVDCC_MODE[%08x]\n",
7307                tr32(RCVDCC_MODE));
7308
7309         /* Receive BD initiator control block */
7310         printk("DEBUG: RCVBDI_MODE[%08x] RCVBDI_STATUS[%08x]\n",
7311                tr32(RCVBDI_MODE), tr32(RCVBDI_STATUS));
7312
7313         /* Receive BD completion control block */
7314         printk("DEBUG: RCVCC_MODE[%08x] RCVCC_STATUS[%08x]\n",
7315                tr32(RCVCC_MODE), tr32(RCVCC_STATUS));
7316
7317         /* Receive list selector control block */
7318         printk("DEBUG: RCVLSC_MODE[%08x] RCVLSC_STATUS[%08x]\n",
7319                tr32(RCVLSC_MODE), tr32(RCVLSC_STATUS));
7320
7321         /* Mbuf cluster free block */
7322         printk("DEBUG: MBFREE_MODE[%08x] MBFREE_STATUS[%08x]\n",
7323                tr32(MBFREE_MODE), tr32(MBFREE_STATUS));
7324
7325         /* Host coalescing control block */
7326         printk("DEBUG: HOSTCC_MODE[%08x] HOSTCC_STATUS[%08x]\n",
7327                tr32(HOSTCC_MODE), tr32(HOSTCC_STATUS));
7328         printk("DEBUG: HOSTCC_STATS_BLK_HOST_ADDR[%08x%08x]\n",
7329                tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
7330                tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
7331         printk("DEBUG: HOSTCC_STATUS_BLK_HOST_ADDR[%08x%08x]\n",
7332                tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
7333                tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
7334         printk("DEBUG: HOSTCC_STATS_BLK_NIC_ADDR[%08x]\n",
7335                tr32(HOSTCC_STATS_BLK_NIC_ADDR));
7336         printk("DEBUG: HOSTCC_STATUS_BLK_NIC_ADDR[%08x]\n",
7337                tr32(HOSTCC_STATUS_BLK_NIC_ADDR));
7338
7339         /* Memory arbiter control block */
7340         printk("DEBUG: MEMARB_MODE[%08x] MEMARB_STATUS[%08x]\n",
7341                tr32(MEMARB_MODE), tr32(MEMARB_STATUS));
7342
7343         /* Buffer manager control block */
7344         printk("DEBUG: BUFMGR_MODE[%08x] BUFMGR_STATUS[%08x]\n",
7345                tr32(BUFMGR_MODE), tr32(BUFMGR_STATUS));
7346         printk("DEBUG: BUFMGR_MB_POOL_ADDR[%08x] BUFMGR_MB_POOL_SIZE[%08x]\n",
7347                tr32(BUFMGR_MB_POOL_ADDR), tr32(BUFMGR_MB_POOL_SIZE));
7348         printk("DEBUG: BUFMGR_DMA_DESC_POOL_ADDR[%08x] "
7349                "BUFMGR_DMA_DESC_POOL_SIZE[%08x]\n",
7350                tr32(BUFMGR_DMA_DESC_POOL_ADDR),
7351                tr32(BUFMGR_DMA_DESC_POOL_SIZE));
7352
7353         /* Read DMA control block */
7354         printk("DEBUG: RDMAC_MODE[%08x] RDMAC_STATUS[%08x]\n",
7355                tr32(RDMAC_MODE), tr32(RDMAC_STATUS));
7356
7357         /* Write DMA control block */
7358         printk("DEBUG: WDMAC_MODE[%08x] WDMAC_STATUS[%08x]\n",
7359                tr32(WDMAC_MODE), tr32(WDMAC_STATUS));
7360
7361         /* DMA completion block */
7362         printk("DEBUG: DMAC_MODE[%08x]\n",
7363                tr32(DMAC_MODE));
7364
7365         /* GRC block */
7366         printk("DEBUG: GRC_MODE[%08x] GRC_MISC_CFG[%08x]\n",
7367                tr32(GRC_MODE), tr32(GRC_MISC_CFG));
7368         printk("DEBUG: GRC_LOCAL_CTRL[%08x]\n",
7369                tr32(GRC_LOCAL_CTRL));
7370
7371         /* TG3_BDINFOs */
7372         printk("DEBUG: RCVDBDI_JUMBO_BD[%08x%08x:%08x:%08x]\n",
7373                tr32(RCVDBDI_JUMBO_BD + 0x0),
7374                tr32(RCVDBDI_JUMBO_BD + 0x4),
7375                tr32(RCVDBDI_JUMBO_BD + 0x8),
7376                tr32(RCVDBDI_JUMBO_BD + 0xc));
7377         printk("DEBUG: RCVDBDI_STD_BD[%08x%08x:%08x:%08x]\n",
7378                tr32(RCVDBDI_STD_BD + 0x0),
7379                tr32(RCVDBDI_STD_BD + 0x4),
7380                tr32(RCVDBDI_STD_BD + 0x8),
7381                tr32(RCVDBDI_STD_BD + 0xc));
7382         printk("DEBUG: RCVDBDI_MINI_BD[%08x%08x:%08x:%08x]\n",
7383                tr32(RCVDBDI_MINI_BD + 0x0),
7384                tr32(RCVDBDI_MINI_BD + 0x4),
7385                tr32(RCVDBDI_MINI_BD + 0x8),
7386                tr32(RCVDBDI_MINI_BD + 0xc));
7387
7388         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x0, &val32);
7389         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x4, &val32_2);
7390         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x8, &val32_3);
7391         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0xc, &val32_4);
7392         printk("DEBUG: SRAM_SEND_RCB_0[%08x%08x:%08x:%08x]\n",
7393                val32, val32_2, val32_3, val32_4);
7394
7395         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x0, &val32);
7396         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x4, &val32_2);
7397         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x8, &val32_3);
7398         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0xc, &val32_4);
7399         printk("DEBUG: SRAM_RCV_RET_RCB_0[%08x%08x:%08x:%08x]\n",
7400                val32, val32_2, val32_3, val32_4);
7401
7402         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x0, &val32);
7403         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x4, &val32_2);
7404         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x8, &val32_3);
7405         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0xc, &val32_4);
7406         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x10, &val32_5);
7407         printk("DEBUG: SRAM_STATUS_BLK[%08x:%08x:%08x:%08x:%08x]\n",
7408                val32, val32_2, val32_3, val32_4, val32_5);
7409
7410         /* SW status block */
7411         printk("DEBUG: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
7412                tp->hw_status->status,
7413                tp->hw_status->status_tag,
7414                tp->hw_status->rx_jumbo_consumer,
7415                tp->hw_status->rx_consumer,
7416                tp->hw_status->rx_mini_consumer,
7417                tp->hw_status->idx[0].rx_producer,
7418                tp->hw_status->idx[0].tx_consumer);
7419
7420         /* SW statistics block */
7421         printk("DEBUG: Host statistics block [%08x:%08x:%08x:%08x]\n",
7422                ((u32 *)tp->hw_stats)[0],
7423                ((u32 *)tp->hw_stats)[1],
7424                ((u32 *)tp->hw_stats)[2],
7425                ((u32 *)tp->hw_stats)[3]);
7426
7427         /* Mailboxes */
7428         printk("DEBUG: SNDHOST_PROD[%08x%08x] SNDNIC_PROD[%08x%08x]\n",
7429                tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + 0x0),
7430                tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + 0x4),
7431                tr32_mailbox(MAILBOX_SNDNIC_PROD_IDX_0 + 0x0),
7432                tr32_mailbox(MAILBOX_SNDNIC_PROD_IDX_0 + 0x4));
7433
7434         /* NIC side send descriptors. */
7435         for (i = 0; i < 6; i++) {
7436                 unsigned long txd;
7437
7438                 txd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_TX_BUFFER_DESC
7439                         + (i * sizeof(struct tg3_tx_buffer_desc));
7440                 printk("DEBUG: NIC TXD(%d)[%08x:%08x:%08x:%08x]\n",
7441                        i,
7442                        readl(txd + 0x0), readl(txd + 0x4),
7443                        readl(txd + 0x8), readl(txd + 0xc));
7444         }
7445
7446         /* NIC side RX descriptors. */
7447         for (i = 0; i < 6; i++) {
7448                 unsigned long rxd;
7449
7450                 rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_BUFFER_DESC
7451                         + (i * sizeof(struct tg3_rx_buffer_desc));
7452                 printk("DEBUG: NIC RXD_STD(%d)[0][%08x:%08x:%08x:%08x]\n",
7453                        i,
7454                        readl(rxd + 0x0), readl(rxd + 0x4),
7455                        readl(rxd + 0x8), readl(rxd + 0xc));
7456                 rxd += (4 * sizeof(u32));
7457                 printk("DEBUG: NIC RXD_STD(%d)[1][%08x:%08x:%08x:%08x]\n",
7458                        i,
7459                        readl(rxd + 0x0), readl(rxd + 0x4),
7460                        readl(rxd + 0x8), readl(rxd + 0xc));
7461         }
7462
7463         for (i = 0; i < 6; i++) {
7464                 unsigned long rxd;
7465
7466                 rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_JUMBO_BUFFER_DESC
7467                         + (i * sizeof(struct tg3_rx_buffer_desc));
7468                 printk("DEBUG: NIC RXD_JUMBO(%d)[0][%08x:%08x:%08x:%08x]\n",
7469                        i,
7470                        readl(rxd + 0x0), readl(rxd + 0x4),
7471                        readl(rxd + 0x8), readl(rxd + 0xc));
7472                 rxd += (4 * sizeof(u32));
7473                 printk("DEBUG: NIC RXD_JUMBO(%d)[1][%08x:%08x:%08x:%08x]\n",
7474                        i,
7475                        readl(rxd + 0x0), readl(rxd + 0x4),
7476                        readl(rxd + 0x8), readl(rxd + 0xc));
7477         }
7478 }
7479 #endif
7480
7481 static struct net_device_stats *tg3_get_stats(struct net_device *);
7482 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *);
7483
7484 static int tg3_close(struct net_device *dev)
7485 {
7486         struct tg3 *tp = netdev_priv(dev);
7487
7488         napi_disable(&tp->napi);
7489         cancel_work_sync(&tp->reset_task);
7490
7491         netif_stop_queue(dev);
7492
7493         del_timer_sync(&tp->timer);
7494
7495         tg3_full_lock(tp, 1);
7496 #if 0
7497         tg3_dump_state(tp);
7498 #endif
7499
7500         tg3_disable_ints(tp);
7501
7502         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
7503         tg3_free_rings(tp);
7504         tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
7505
7506         tg3_full_unlock(tp);
7507
7508         free_irq(tp->pdev->irq, dev);
7509         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7510                 pci_disable_msi(tp->pdev);
7511                 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
7512         }
7513
7514         memcpy(&tp->net_stats_prev, tg3_get_stats(tp->dev),
7515                sizeof(tp->net_stats_prev));
7516         memcpy(&tp->estats_prev, tg3_get_estats(tp),
7517                sizeof(tp->estats_prev));
7518
7519         tg3_free_consistent(tp);
7520
7521         tg3_set_power_state(tp, PCI_D3hot);
7522
7523         netif_carrier_off(tp->dev);
7524
7525         return 0;
7526 }
7527
7528 static inline unsigned long get_stat64(tg3_stat64_t *val)
7529 {
7530         unsigned long ret;
7531
7532 #if (BITS_PER_LONG == 32)
7533         ret = val->low;
7534 #else
7535         ret = ((u64)val->high << 32) | ((u64)val->low);
7536 #endif
7537         return ret;
7538 }
7539
7540 static unsigned long calc_crc_errors(struct tg3 *tp)
7541 {
7542         struct tg3_hw_stats *hw_stats = tp->hw_stats;
7543
7544         if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
7545             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
7546              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
7547                 u32 val;
7548
7549                 spin_lock_bh(&tp->lock);
7550                 if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) {
7551                         tg3_writephy(tp, MII_TG3_TEST1,
7552                                      val | MII_TG3_TEST1_CRC_EN);
7553                         tg3_readphy(tp, 0x14, &val);
7554                 } else
7555                         val = 0;
7556                 spin_unlock_bh(&tp->lock);
7557
7558                 tp->phy_crc_errors += val;
7559
7560                 return tp->phy_crc_errors;
7561         }
7562
7563         return get_stat64(&hw_stats->rx_fcs_errors);
7564 }
7565
7566 #define ESTAT_ADD(member) \
7567         estats->member =        old_estats->member + \
7568                                 get_stat64(&hw_stats->member)
7569
7570 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *tp)
7571 {
7572         struct tg3_ethtool_stats *estats = &tp->estats;
7573         struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
7574         struct tg3_hw_stats *hw_stats = tp->hw_stats;
7575
7576         if (!hw_stats)
7577                 return old_estats;
7578
7579         ESTAT_ADD(rx_octets);
7580         ESTAT_ADD(rx_fragments);
7581         ESTAT_ADD(rx_ucast_packets);
7582         ESTAT_ADD(rx_mcast_packets);
7583         ESTAT_ADD(rx_bcast_packets);
7584         ESTAT_ADD(rx_fcs_errors);
7585         ESTAT_ADD(rx_align_errors);
7586         ESTAT_ADD(rx_xon_pause_rcvd);
7587         ESTAT_ADD(rx_xoff_pause_rcvd);
7588         ESTAT_ADD(rx_mac_ctrl_rcvd);
7589         ESTAT_ADD(rx_xoff_entered);
7590         ESTAT_ADD(rx_frame_too_long_errors);
7591         ESTAT_ADD(rx_jabbers);
7592         ESTAT_ADD(rx_undersize_packets);
7593         ESTAT_ADD(rx_in_length_errors);
7594         ESTAT_ADD(rx_out_length_errors);
7595         ESTAT_ADD(rx_64_or_less_octet_packets);
7596         ESTAT_ADD(rx_65_to_127_octet_packets);
7597         ESTAT_ADD(rx_128_to_255_octet_packets);
7598         ESTAT_ADD(rx_256_to_511_octet_packets);
7599         ESTAT_ADD(rx_512_to_1023_octet_packets);
7600         ESTAT_ADD(rx_1024_to_1522_octet_packets);
7601         ESTAT_ADD(rx_1523_to_2047_octet_packets);
7602         ESTAT_ADD(rx_2048_to_4095_octet_packets);
7603         ESTAT_ADD(rx_4096_to_8191_octet_packets);
7604         ESTAT_ADD(rx_8192_to_9022_octet_packets);
7605
7606         ESTAT_ADD(tx_octets);
7607         ESTAT_ADD(tx_collisions);
7608         ESTAT_ADD(tx_xon_sent);
7609         ESTAT_ADD(tx_xoff_sent);
7610         ESTAT_ADD(tx_flow_control);
7611         ESTAT_ADD(tx_mac_errors);
7612         ESTAT_ADD(tx_single_collisions);
7613         ESTAT_ADD(tx_mult_collisions);
7614         ESTAT_ADD(tx_deferred);
7615         ESTAT_ADD(tx_excessive_collisions);
7616         ESTAT_ADD(tx_late_collisions);
7617         ESTAT_ADD(tx_collide_2times);
7618         ESTAT_ADD(tx_collide_3times);
7619         ESTAT_ADD(tx_collide_4times);
7620         ESTAT_ADD(tx_collide_5times);
7621         ESTAT_ADD(tx_collide_6times);
7622         ESTAT_ADD(tx_collide_7times);
7623         ESTAT_ADD(tx_collide_8times);
7624         ESTAT_ADD(tx_collide_9times);
7625         ESTAT_ADD(tx_collide_10times);
7626         ESTAT_ADD(tx_collide_11times);
7627         ESTAT_ADD(tx_collide_12times);
7628         ESTAT_ADD(tx_collide_13times);
7629         ESTAT_ADD(tx_collide_14times);
7630         ESTAT_ADD(tx_collide_15times);
7631         ESTAT_ADD(tx_ucast_packets);
7632         ESTAT_ADD(tx_mcast_packets);
7633         ESTAT_ADD(tx_bcast_packets);
7634         ESTAT_ADD(tx_carrier_sense_errors);
7635         ESTAT_ADD(tx_discards);
7636         ESTAT_ADD(tx_errors);
7637
7638         ESTAT_ADD(dma_writeq_full);
7639         ESTAT_ADD(dma_write_prioq_full);
7640         ESTAT_ADD(rxbds_empty);
7641         ESTAT_ADD(rx_discards);
7642         ESTAT_ADD(rx_errors);
7643         ESTAT_ADD(rx_threshold_hit);
7644
7645         ESTAT_ADD(dma_readq_full);
7646         ESTAT_ADD(dma_read_prioq_full);
7647         ESTAT_ADD(tx_comp_queue_full);
7648
7649         ESTAT_ADD(ring_set_send_prod_index);
7650         ESTAT_ADD(ring_status_update);
7651         ESTAT_ADD(nic_irqs);
7652         ESTAT_ADD(nic_avoided_irqs);
7653         ESTAT_ADD(nic_tx_threshold_hit);
7654
7655         return estats;
7656 }
7657
7658 static struct net_device_stats *tg3_get_stats(struct net_device *dev)
7659 {
7660         struct tg3 *tp = netdev_priv(dev);
7661         struct net_device_stats *stats = &tp->net_stats;
7662         struct net_device_stats *old_stats = &tp->net_stats_prev;
7663         struct tg3_hw_stats *hw_stats = tp->hw_stats;
7664
7665         if (!hw_stats)
7666                 return old_stats;
7667
7668         stats->rx_packets = old_stats->rx_packets +
7669                 get_stat64(&hw_stats->rx_ucast_packets) +
7670                 get_stat64(&hw_stats->rx_mcast_packets) +
7671                 get_stat64(&hw_stats->rx_bcast_packets);
7672
7673         stats->tx_packets = old_stats->tx_packets +
7674                 get_stat64(&hw_stats->tx_ucast_packets) +
7675                 get_stat64(&hw_stats->tx_mcast_packets) +
7676                 get_stat64(&hw_stats->tx_bcast_packets);
7677
7678         stats->rx_bytes = old_stats->rx_bytes +
7679                 get_stat64(&hw_stats->rx_octets);
7680         stats->tx_bytes = old_stats->tx_bytes +
7681                 get_stat64(&hw_stats->tx_octets);
7682
7683         stats->rx_errors = old_stats->rx_errors +
7684                 get_stat64(&hw_stats->rx_errors);
7685         stats->tx_errors = old_stats->tx_errors +
7686                 get_stat64(&hw_stats->tx_errors) +
7687                 get_stat64(&hw_stats->tx_mac_errors) +
7688                 get_stat64(&hw_stats->tx_carrier_sense_errors) +
7689                 get_stat64(&hw_stats->tx_discards);
7690
7691         stats->multicast = old_stats->multicast +
7692                 get_stat64(&hw_stats->rx_mcast_packets);
7693         stats->collisions = old_stats->collisions +
7694                 get_stat64(&hw_stats->tx_collisions);
7695
7696         stats->rx_length_errors = old_stats->rx_length_errors +
7697                 get_stat64(&hw_stats->rx_frame_too_long_errors) +
7698                 get_stat64(&hw_stats->rx_undersize_packets);
7699
7700         stats->rx_over_errors = old_stats->rx_over_errors +
7701                 get_stat64(&hw_stats->rxbds_empty);
7702         stats->rx_frame_errors = old_stats->rx_frame_errors +
7703                 get_stat64(&hw_stats->rx_align_errors);
7704         stats->tx_aborted_errors = old_stats->tx_aborted_errors +
7705                 get_stat64(&hw_stats->tx_discards);
7706         stats->tx_carrier_errors = old_stats->tx_carrier_errors +
7707                 get_stat64(&hw_stats->tx_carrier_sense_errors);
7708
7709         stats->rx_crc_errors = old_stats->rx_crc_errors +
7710                 calc_crc_errors(tp);
7711
7712         stats->rx_missed_errors = old_stats->rx_missed_errors +
7713                 get_stat64(&hw_stats->rx_discards);
7714
7715         return stats;
7716 }
7717
7718 static inline u32 calc_crc(unsigned char *buf, int len)
7719 {
7720         u32 reg;
7721         u32 tmp;
7722         int j, k;
7723
7724         reg = 0xffffffff;
7725
7726         for (j = 0; j < len; j++) {
7727                 reg ^= buf[j];
7728
7729                 for (k = 0; k < 8; k++) {
7730                         tmp = reg & 0x01;
7731
7732                         reg >>= 1;
7733
7734                         if (tmp) {
7735                                 reg ^= 0xedb88320;
7736                         }
7737                 }
7738         }
7739
7740         return ~reg;
7741 }
7742
7743 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
7744 {
7745         /* accept or reject all multicast frames */
7746         tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
7747         tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
7748         tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
7749         tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
7750 }
7751
7752 static void __tg3_set_rx_mode(struct net_device *dev)
7753 {
7754         struct tg3 *tp = netdev_priv(dev);
7755         u32 rx_mode;
7756
7757         rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
7758                                   RX_MODE_KEEP_VLAN_TAG);
7759
7760         /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
7761          * flag clear.
7762          */
7763 #if TG3_VLAN_TAG_USED
7764         if (!tp->vlgrp &&
7765             !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
7766                 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
7767 #else
7768         /* By definition, VLAN is disabled always in this
7769          * case.
7770          */
7771         if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
7772                 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
7773 #endif
7774
7775         if (dev->flags & IFF_PROMISC) {
7776                 /* Promiscuous mode. */
7777                 rx_mode |= RX_MODE_PROMISC;
7778         } else if (dev->flags & IFF_ALLMULTI) {
7779                 /* Accept all multicast. */
7780                 tg3_set_multi (tp, 1);
7781         } else if (dev->mc_count < 1) {
7782                 /* Reject all multicast. */
7783                 tg3_set_multi (tp, 0);
7784         } else {
7785                 /* Accept one or more multicast(s). */
7786                 struct dev_mc_list *mclist;
7787                 unsigned int i;
7788                 u32 mc_filter[4] = { 0, };
7789                 u32 regidx;
7790                 u32 bit;
7791                 u32 crc;
7792
7793                 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
7794                      i++, mclist = mclist->next) {
7795
7796                         crc = calc_crc (mclist->dmi_addr, ETH_ALEN);
7797                         bit = ~crc & 0x7f;
7798                         regidx = (bit & 0x60) >> 5;
7799                         bit &= 0x1f;
7800                         mc_filter[regidx] |= (1 << bit);
7801                 }
7802
7803                 tw32(MAC_HASH_REG_0, mc_filter[0]);
7804                 tw32(MAC_HASH_REG_1, mc_filter[1]);
7805                 tw32(MAC_HASH_REG_2, mc_filter[2]);
7806                 tw32(MAC_HASH_REG_3, mc_filter[3]);
7807         }
7808
7809         if (rx_mode != tp->rx_mode) {
7810                 tp->rx_mode = rx_mode;
7811                 tw32_f(MAC_RX_MODE, rx_mode);
7812                 udelay(10);
7813         }
7814 }
7815
7816 static void tg3_set_rx_mode(struct net_device *dev)
7817 {
7818         struct tg3 *tp = netdev_priv(dev);
7819
7820         if (!netif_running(dev))
7821                 return;
7822
7823         tg3_full_lock(tp, 0);
7824         __tg3_set_rx_mode(dev);
7825         tg3_full_unlock(tp);
7826 }
7827
7828 #define TG3_REGDUMP_LEN         (32 * 1024)
7829
7830 static int tg3_get_regs_len(struct net_device *dev)
7831 {
7832         return TG3_REGDUMP_LEN;
7833 }
7834
7835 static void tg3_get_regs(struct net_device *dev,
7836                 struct ethtool_regs *regs, void *_p)
7837 {
7838         u32 *p = _p;
7839         struct tg3 *tp = netdev_priv(dev);
7840         u8 *orig_p = _p;
7841         int i;
7842
7843         regs->version = 0;
7844
7845         memset(p, 0, TG3_REGDUMP_LEN);
7846
7847         if (tp->link_config.phy_is_low_power)
7848                 return;
7849
7850         tg3_full_lock(tp, 0);
7851
7852 #define __GET_REG32(reg)        (*(p)++ = tr32(reg))
7853 #define GET_REG32_LOOP(base,len)                \
7854 do {    p = (u32 *)(orig_p + (base));           \
7855         for (i = 0; i < len; i += 4)            \
7856                 __GET_REG32((base) + i);        \
7857 } while (0)
7858 #define GET_REG32_1(reg)                        \
7859 do {    p = (u32 *)(orig_p + (reg));            \
7860         __GET_REG32((reg));                     \
7861 } while (0)
7862
7863         GET_REG32_LOOP(TG3PCI_VENDOR, 0xb0);
7864         GET_REG32_LOOP(MAILBOX_INTERRUPT_0, 0x200);
7865         GET_REG32_LOOP(MAC_MODE, 0x4f0);
7866         GET_REG32_LOOP(SNDDATAI_MODE, 0xe0);
7867         GET_REG32_1(SNDDATAC_MODE);
7868         GET_REG32_LOOP(SNDBDS_MODE, 0x80);
7869         GET_REG32_LOOP(SNDBDI_MODE, 0x48);
7870         GET_REG32_1(SNDBDC_MODE);
7871         GET_REG32_LOOP(RCVLPC_MODE, 0x20);
7872         GET_REG32_LOOP(RCVLPC_SELLST_BASE, 0x15c);
7873         GET_REG32_LOOP(RCVDBDI_MODE, 0x0c);
7874         GET_REG32_LOOP(RCVDBDI_JUMBO_BD, 0x3c);
7875         GET_REG32_LOOP(RCVDBDI_BD_PROD_IDX_0, 0x44);
7876         GET_REG32_1(RCVDCC_MODE);
7877         GET_REG32_LOOP(RCVBDI_MODE, 0x20);
7878         GET_REG32_LOOP(RCVCC_MODE, 0x14);
7879         GET_REG32_LOOP(RCVLSC_MODE, 0x08);
7880         GET_REG32_1(MBFREE_MODE);
7881         GET_REG32_LOOP(HOSTCC_MODE, 0x100);
7882         GET_REG32_LOOP(MEMARB_MODE, 0x10);
7883         GET_REG32_LOOP(BUFMGR_MODE, 0x58);
7884         GET_REG32_LOOP(RDMAC_MODE, 0x08);
7885         GET_REG32_LOOP(WDMAC_MODE, 0x08);
7886         GET_REG32_1(RX_CPU_MODE);
7887         GET_REG32_1(RX_CPU_STATE);
7888         GET_REG32_1(RX_CPU_PGMCTR);
7889         GET_REG32_1(RX_CPU_HWBKPT);
7890         GET_REG32_1(TX_CPU_MODE);
7891         GET_REG32_1(TX_CPU_STATE);
7892         GET_REG32_1(TX_CPU_PGMCTR);
7893         GET_REG32_LOOP(GRCMBOX_INTERRUPT_0, 0x110);
7894         GET_REG32_LOOP(FTQ_RESET, 0x120);
7895         GET_REG32_LOOP(MSGINT_MODE, 0x0c);
7896         GET_REG32_1(DMAC_MODE);
7897         GET_REG32_LOOP(GRC_MODE, 0x4c);
7898         if (tp->tg3_flags & TG3_FLAG_NVRAM)
7899                 GET_REG32_LOOP(NVRAM_CMD, 0x24);
7900
7901 #undef __GET_REG32
7902 #undef GET_REG32_LOOP
7903 #undef GET_REG32_1
7904
7905         tg3_full_unlock(tp);
7906 }
7907
7908 static int tg3_get_eeprom_len(struct net_device *dev)
7909 {
7910         struct tg3 *tp = netdev_priv(dev);
7911
7912         return tp->nvram_size;
7913 }
7914
7915 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val);
7916 static int tg3_nvram_read_swab(struct tg3 *tp, u32 offset, u32 *val);
7917
7918 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
7919 {
7920         struct tg3 *tp = netdev_priv(dev);
7921         int ret;
7922         u8  *pd;
7923         u32 i, offset, len, val, b_offset, b_count;
7924
7925         if (tp->link_config.phy_is_low_power)
7926                 return -EAGAIN;
7927
7928         offset = eeprom->offset;
7929         len = eeprom->len;
7930         eeprom->len = 0;
7931
7932         eeprom->magic = TG3_EEPROM_MAGIC;
7933
7934         if (offset & 3) {
7935                 /* adjustments to start on required 4 byte boundary */
7936                 b_offset = offset & 3;
7937                 b_count = 4 - b_offset;
7938                 if (b_count > len) {
7939                         /* i.e. offset=1 len=2 */
7940                         b_count = len;
7941                 }
7942                 ret = tg3_nvram_read(tp, offset-b_offset, &val);
7943                 if (ret)
7944                         return ret;
7945                 val = cpu_to_le32(val);
7946                 memcpy(data, ((char*)&val) + b_offset, b_count);
7947                 len -= b_count;
7948                 offset += b_count;
7949                 eeprom->len += b_count;
7950         }
7951
7952         /* read bytes upto the last 4 byte boundary */
7953         pd = &data[eeprom->len];
7954         for (i = 0; i < (len - (len & 3)); i += 4) {
7955                 ret = tg3_nvram_read(tp, offset + i, &val);
7956                 if (ret) {
7957                         eeprom->len += i;
7958                         return ret;
7959                 }
7960                 val = cpu_to_le32(val);
7961                 memcpy(pd + i, &val, 4);
7962         }
7963         eeprom->len += i;
7964
7965         if (len & 3) {
7966                 /* read last bytes not ending on 4 byte boundary */
7967                 pd = &data[eeprom->len];
7968                 b_count = len & 3;
7969                 b_offset = offset + len - b_count;
7970                 ret = tg3_nvram_read(tp, b_offset, &val);
7971                 if (ret)
7972                         return ret;
7973                 val = cpu_to_le32(val);
7974                 memcpy(pd, ((char*)&val), b_count);
7975                 eeprom->len += b_count;
7976         }
7977         return 0;
7978 }
7979
7980 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf);
7981
7982 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
7983 {
7984         struct tg3 *tp = netdev_priv(dev);
7985         int ret;
7986         u32 offset, len, b_offset, odd_len, start, end;
7987         u8 *buf;
7988
7989         if (tp->link_config.phy_is_low_power)
7990                 return -EAGAIN;
7991
7992         if (eeprom->magic != TG3_EEPROM_MAGIC)
7993                 return -EINVAL;
7994
7995         offset = eeprom->offset;
7996         len = eeprom->len;
7997
7998         if ((b_offset = (offset & 3))) {
7999                 /* adjustments to start on required 4 byte boundary */
8000                 ret = tg3_nvram_read(tp, offset-b_offset, &start);
8001                 if (ret)
8002                         return ret;
8003                 start = cpu_to_le32(start);
8004                 len += b_offset;
8005                 offset &= ~3;
8006                 if (len < 4)
8007                         len = 4;
8008         }
8009
8010         odd_len = 0;
8011         if (len & 3) {
8012                 /* adjustments to end on required 4 byte boundary */
8013                 odd_len = 1;
8014                 len = (len + 3) & ~3;
8015                 ret = tg3_nvram_read(tp, offset+len-4, &end);
8016                 if (ret)
8017                         return ret;
8018                 end = cpu_to_le32(end);
8019         }
8020
8021         buf = data;
8022         if (b_offset || odd_len) {
8023                 buf = kmalloc(len, GFP_KERNEL);
8024                 if (!buf)
8025                         return -ENOMEM;
8026                 if (b_offset)
8027                         memcpy(buf, &start, 4);
8028                 if (odd_len)
8029                         memcpy(buf+len-4, &end, 4);
8030                 memcpy(buf + b_offset, data, eeprom->len);
8031         }
8032
8033         ret = tg3_nvram_write_block(tp, offset, len, buf);
8034
8035         if (buf != data)
8036                 kfree(buf);
8037
8038         return ret;
8039 }
8040
8041 static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
8042 {
8043         struct tg3 *tp = netdev_priv(dev);
8044
8045         cmd->supported = (SUPPORTED_Autoneg);
8046
8047         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
8048                 cmd->supported |= (SUPPORTED_1000baseT_Half |
8049                                    SUPPORTED_1000baseT_Full);
8050
8051         if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)) {
8052                 cmd->supported |= (SUPPORTED_100baseT_Half |
8053                                   SUPPORTED_100baseT_Full |
8054                                   SUPPORTED_10baseT_Half |
8055                                   SUPPORTED_10baseT_Full |
8056                                   SUPPORTED_MII);
8057                 cmd->port = PORT_TP;
8058         } else {
8059                 cmd->supported |= SUPPORTED_FIBRE;
8060                 cmd->port = PORT_FIBRE;
8061         }
8062
8063         cmd->advertising = tp->link_config.advertising;
8064         if (netif_running(dev)) {
8065                 cmd->speed = tp->link_config.active_speed;
8066                 cmd->duplex = tp->link_config.active_duplex;
8067         }
8068         cmd->phy_address = PHY_ADDR;
8069         cmd->transceiver = 0;
8070         cmd->autoneg = tp->link_config.autoneg;
8071         cmd->maxtxpkt = 0;
8072         cmd->maxrxpkt = 0;
8073         return 0;
8074 }
8075
8076 static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
8077 {
8078         struct tg3 *tp = netdev_priv(dev);
8079
8080         if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) {
8081                 /* These are the only valid advertisement bits allowed.  */
8082                 if (cmd->autoneg == AUTONEG_ENABLE &&
8083                     (cmd->advertising & ~(ADVERTISED_1000baseT_Half |
8084                                           ADVERTISED_1000baseT_Full |
8085                                           ADVERTISED_Autoneg |
8086                                           ADVERTISED_FIBRE)))
8087                         return -EINVAL;
8088                 /* Fiber can only do SPEED_1000.  */
8089                 else if ((cmd->autoneg != AUTONEG_ENABLE) &&
8090                          (cmd->speed != SPEED_1000))
8091                         return -EINVAL;
8092         /* Copper cannot force SPEED_1000.  */
8093         } else if ((cmd->autoneg != AUTONEG_ENABLE) &&
8094                    (cmd->speed == SPEED_1000))
8095                 return -EINVAL;
8096         else if ((cmd->speed == SPEED_1000) &&
8097                  (tp->tg3_flags2 & TG3_FLAG_10_100_ONLY))
8098                 return -EINVAL;
8099
8100         tg3_full_lock(tp, 0);
8101
8102         tp->link_config.autoneg = cmd->autoneg;
8103         if (cmd->autoneg == AUTONEG_ENABLE) {
8104                 tp->link_config.advertising = (cmd->advertising |
8105                                               ADVERTISED_Autoneg);
8106                 tp->link_config.speed = SPEED_INVALID;
8107                 tp->link_config.duplex = DUPLEX_INVALID;
8108         } else {
8109                 tp->link_config.advertising = 0;
8110                 tp->link_config.speed = cmd->speed;
8111                 tp->link_config.duplex = cmd->duplex;
8112         }
8113
8114         tp->link_config.orig_speed = tp->link_config.speed;
8115         tp->link_config.orig_duplex = tp->link_config.duplex;
8116         tp->link_config.orig_autoneg = tp->link_config.autoneg;
8117
8118         if (netif_running(dev))
8119                 tg3_setup_phy(tp, 1);
8120
8121         tg3_full_unlock(tp);
8122
8123         return 0;
8124 }
8125
8126 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
8127 {
8128         struct tg3 *tp = netdev_priv(dev);
8129
8130         strcpy(info->driver, DRV_MODULE_NAME);
8131         strcpy(info->version, DRV_MODULE_VERSION);
8132         strcpy(info->fw_version, tp->fw_ver);
8133         strcpy(info->bus_info, pci_name(tp->pdev));
8134 }
8135
8136 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
8137 {
8138         struct tg3 *tp = netdev_priv(dev);
8139
8140         if (tp->tg3_flags & TG3_FLAG_WOL_CAP)
8141                 wol->supported = WAKE_MAGIC;
8142         else
8143                 wol->supported = 0;
8144         wol->wolopts = 0;
8145         if (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)
8146                 wol->wolopts = WAKE_MAGIC;
8147         memset(&wol->sopass, 0, sizeof(wol->sopass));
8148 }
8149
8150 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
8151 {
8152         struct tg3 *tp = netdev_priv(dev);
8153
8154         if (wol->wolopts & ~WAKE_MAGIC)
8155                 return -EINVAL;
8156         if ((wol->wolopts & WAKE_MAGIC) &&
8157             !(tp->tg3_flags & TG3_FLAG_WOL_CAP))
8158                 return -EINVAL;
8159
8160         spin_lock_bh(&tp->lock);
8161         if (wol->wolopts & WAKE_MAGIC)
8162                 tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
8163         else
8164                 tp->tg3_flags &= ~TG3_FLAG_WOL_ENABLE;
8165         spin_unlock_bh(&tp->lock);
8166
8167         return 0;
8168 }
8169
8170 static u32 tg3_get_msglevel(struct net_device *dev)
8171 {
8172         struct tg3 *tp = netdev_priv(dev);
8173         return tp->msg_enable;
8174 }
8175
8176 static void tg3_set_msglevel(struct net_device *dev, u32 value)
8177 {
8178         struct tg3 *tp = netdev_priv(dev);
8179         tp->msg_enable = value;
8180 }
8181
8182 static int tg3_set_tso(struct net_device *dev, u32 value)
8183 {
8184         struct tg3 *tp = netdev_priv(dev);
8185
8186         if (!(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
8187                 if (value)
8188                         return -EINVAL;
8189                 return 0;
8190         }
8191         if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_2) &&
8192             (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906)) {
8193                 if (value)
8194                         dev->features |= NETIF_F_TSO6;
8195                 else
8196                         dev->features &= ~NETIF_F_TSO6;
8197         }
8198         return ethtool_op_set_tso(dev, value);
8199 }
8200
8201 static int tg3_nway_reset(struct net_device *dev)
8202 {
8203         struct tg3 *tp = netdev_priv(dev);
8204         u32 bmcr;
8205         int r;
8206
8207         if (!netif_running(dev))
8208                 return -EAGAIN;
8209
8210         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
8211                 return -EINVAL;
8212
8213         spin_lock_bh(&tp->lock);
8214         r = -EINVAL;
8215         tg3_readphy(tp, MII_BMCR, &bmcr);
8216         if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
8217             ((bmcr & BMCR_ANENABLE) ||
8218              (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT))) {
8219                 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
8220                                            BMCR_ANENABLE);
8221                 r = 0;
8222         }
8223         spin_unlock_bh(&tp->lock);
8224
8225         return r;
8226 }
8227
8228 static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
8229 {
8230         struct tg3 *tp = netdev_priv(dev);
8231
8232         ering->rx_max_pending = TG3_RX_RING_SIZE - 1;
8233         ering->rx_mini_max_pending = 0;
8234         if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE)
8235                 ering->rx_jumbo_max_pending = TG3_RX_JUMBO_RING_SIZE - 1;
8236         else
8237                 ering->rx_jumbo_max_pending = 0;
8238
8239         ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
8240
8241         ering->rx_pending = tp->rx_pending;
8242         ering->rx_mini_pending = 0;
8243         if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE)
8244                 ering->rx_jumbo_pending = tp->rx_jumbo_pending;
8245         else
8246                 ering->rx_jumbo_pending = 0;
8247
8248         ering->tx_pending = tp->tx_pending;
8249 }
8250
8251 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
8252 {
8253         struct tg3 *tp = netdev_priv(dev);
8254         int irq_sync = 0, err = 0;
8255
8256         if ((ering->rx_pending > TG3_RX_RING_SIZE - 1) ||
8257             (ering->rx_jumbo_pending > TG3_RX_JUMBO_RING_SIZE - 1) ||
8258             (ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
8259             (ering->tx_pending <= MAX_SKB_FRAGS) ||
8260             ((tp->tg3_flags2 & TG3_FLG2_TSO_BUG) &&
8261              (ering->tx_pending <= (MAX_SKB_FRAGS * 3))))
8262                 return -EINVAL;
8263
8264         if (netif_running(dev)) {
8265                 tg3_netif_stop(tp);
8266                 irq_sync = 1;
8267         }
8268
8269         tg3_full_lock(tp, irq_sync);
8270
8271         tp->rx_pending = ering->rx_pending;
8272
8273         if ((tp->tg3_flags2 & TG3_FLG2_MAX_RXPEND_64) &&
8274             tp->rx_pending > 63)
8275                 tp->rx_pending = 63;
8276         tp->rx_jumbo_pending = ering->rx_jumbo_pending;
8277         tp->tx_pending = ering->tx_pending;
8278
8279         if (netif_running(dev)) {
8280                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
8281                 err = tg3_restart_hw(tp, 1);
8282                 if (!err)
8283                         tg3_netif_start(tp);
8284         }
8285
8286         tg3_full_unlock(tp);
8287
8288         return err;
8289 }
8290
8291 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
8292 {
8293         struct tg3 *tp = netdev_priv(dev);
8294
8295         epause->autoneg = (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG) != 0;
8296         epause->rx_pause = (tp->tg3_flags & TG3_FLAG_RX_PAUSE) != 0;
8297         epause->tx_pause = (tp->tg3_flags & TG3_FLAG_TX_PAUSE) != 0;
8298 }
8299
8300 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
8301 {
8302         struct tg3 *tp = netdev_priv(dev);
8303         int irq_sync = 0, err = 0;
8304
8305         if (netif_running(dev)) {
8306                 tg3_netif_stop(tp);
8307                 irq_sync = 1;
8308         }
8309
8310         tg3_full_lock(tp, irq_sync);
8311
8312         if (epause->autoneg)
8313                 tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
8314         else
8315                 tp->tg3_flags &= ~TG3_FLAG_PAUSE_AUTONEG;
8316         if (epause->rx_pause)
8317                 tp->tg3_flags |= TG3_FLAG_RX_PAUSE;
8318         else
8319                 tp->tg3_flags &= ~TG3_FLAG_RX_PAUSE;
8320         if (epause->tx_pause)
8321                 tp->tg3_flags |= TG3_FLAG_TX_PAUSE;
8322         else
8323                 tp->tg3_flags &= ~TG3_FLAG_TX_PAUSE;
8324
8325         if (netif_running(dev)) {
8326                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
8327                 err = tg3_restart_hw(tp, 1);
8328                 if (!err)
8329                         tg3_netif_start(tp);
8330         }
8331
8332         tg3_full_unlock(tp);
8333
8334         return err;
8335 }
8336
8337 static u32 tg3_get_rx_csum(struct net_device *dev)
8338 {
8339         struct tg3 *tp = netdev_priv(dev);
8340         return (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0;
8341 }
8342
8343 static int tg3_set_rx_csum(struct net_device *dev, u32 data)
8344 {
8345         struct tg3 *tp = netdev_priv(dev);
8346
8347         if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
8348                 if (data != 0)
8349                         return -EINVAL;
8350                 return 0;
8351         }
8352
8353         spin_lock_bh(&tp->lock);
8354         if (data)
8355                 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
8356         else
8357                 tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
8358         spin_unlock_bh(&tp->lock);
8359
8360         return 0;
8361 }
8362
8363 static int tg3_set_tx_csum(struct net_device *dev, u32 data)
8364 {
8365         struct tg3 *tp = netdev_priv(dev);
8366
8367         if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
8368                 if (data != 0)
8369                         return -EINVAL;
8370                 return 0;
8371         }
8372
8373         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
8374             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
8375             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784)
8376                 ethtool_op_set_tx_ipv6_csum(dev, data);
8377         else
8378                 ethtool_op_set_tx_csum(dev, data);
8379
8380         return 0;
8381 }
8382
8383 static int tg3_get_sset_count (struct net_device *dev, int sset)
8384 {
8385         switch (sset) {
8386         case ETH_SS_TEST:
8387                 return TG3_NUM_TEST;
8388         case ETH_SS_STATS:
8389                 return TG3_NUM_STATS;
8390         default:
8391                 return -EOPNOTSUPP;
8392         }
8393 }
8394
8395 static void tg3_get_strings (struct net_device *dev, u32 stringset, u8 *buf)
8396 {
8397         switch (stringset) {
8398         case ETH_SS_STATS:
8399                 memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
8400                 break;
8401         case ETH_SS_TEST:
8402                 memcpy(buf, &ethtool_test_keys, sizeof(ethtool_test_keys));
8403                 break;
8404         default:
8405                 WARN_ON(1);     /* we need a WARN() */
8406                 break;
8407         }
8408 }
8409
8410 static int tg3_phys_id(struct net_device *dev, u32 data)
8411 {
8412         struct tg3 *tp = netdev_priv(dev);
8413         int i;
8414
8415         if (!netif_running(tp->dev))
8416                 return -EAGAIN;
8417
8418         if (data == 0)
8419                 data = 2;
8420
8421         for (i = 0; i < (data * 2); i++) {
8422                 if ((i % 2) == 0)
8423                         tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
8424                                            LED_CTRL_1000MBPS_ON |
8425                                            LED_CTRL_100MBPS_ON |
8426                                            LED_CTRL_10MBPS_ON |
8427                                            LED_CTRL_TRAFFIC_OVERRIDE |
8428                                            LED_CTRL_TRAFFIC_BLINK |
8429                                            LED_CTRL_TRAFFIC_LED);
8430
8431                 else
8432                         tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
8433                                            LED_CTRL_TRAFFIC_OVERRIDE);
8434
8435                 if (msleep_interruptible(500))
8436                         break;
8437         }
8438         tw32(MAC_LED_CTRL, tp->led_ctrl);
8439         return 0;
8440 }
8441
8442 static void tg3_get_ethtool_stats (struct net_device *dev,
8443                                    struct ethtool_stats *estats, u64 *tmp_stats)
8444 {
8445         struct tg3 *tp = netdev_priv(dev);
8446         memcpy(tmp_stats, tg3_get_estats(tp), sizeof(tp->estats));
8447 }
8448
8449 #define NVRAM_TEST_SIZE 0x100
8450 #define NVRAM_SELFBOOT_FORMAT1_SIZE 0x14
8451 #define NVRAM_SELFBOOT_HW_SIZE 0x20
8452 #define NVRAM_SELFBOOT_DATA_SIZE 0x1c
8453
8454 static int tg3_test_nvram(struct tg3 *tp)
8455 {
8456         u32 *buf, csum, magic;
8457         int i, j, k, err = 0, size;
8458
8459         if (tg3_nvram_read_swab(tp, 0, &magic) != 0)
8460                 return -EIO;
8461
8462         if (magic == TG3_EEPROM_MAGIC)
8463                 size = NVRAM_TEST_SIZE;
8464         else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) {
8465                 if ((magic & 0xe00000) == 0x200000)
8466                         size = NVRAM_SELFBOOT_FORMAT1_SIZE;
8467                 else
8468                         return 0;
8469         } else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
8470                 size = NVRAM_SELFBOOT_HW_SIZE;
8471         else
8472                 return -EIO;
8473
8474         buf = kmalloc(size, GFP_KERNEL);
8475         if (buf == NULL)
8476                 return -ENOMEM;
8477
8478         err = -EIO;
8479         for (i = 0, j = 0; i < size; i += 4, j++) {
8480                 u32 val;
8481
8482                 if ((err = tg3_nvram_read(tp, i, &val)) != 0)
8483                         break;
8484                 buf[j] = cpu_to_le32(val);
8485         }
8486         if (i < size)
8487                 goto out;
8488
8489         /* Selfboot format */
8490         if ((cpu_to_be32(buf[0]) & TG3_EEPROM_MAGIC_FW_MSK) ==
8491             TG3_EEPROM_MAGIC_FW) {
8492                 u8 *buf8 = (u8 *) buf, csum8 = 0;
8493
8494                 for (i = 0; i < size; i++)
8495                         csum8 += buf8[i];
8496
8497                 if (csum8 == 0) {
8498                         err = 0;
8499                         goto out;
8500                 }
8501
8502                 err = -EIO;
8503                 goto out;
8504         }
8505
8506         if ((cpu_to_be32(buf[0]) & TG3_EEPROM_MAGIC_HW_MSK) ==
8507             TG3_EEPROM_MAGIC_HW) {
8508                 u8 data[NVRAM_SELFBOOT_DATA_SIZE];
8509                 u8 parity[NVRAM_SELFBOOT_DATA_SIZE];
8510                 u8 *buf8 = (u8 *) buf;
8511
8512                 /* Separate the parity bits and the data bytes.  */
8513                 for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) {
8514                         if ((i == 0) || (i == 8)) {
8515                                 int l;
8516                                 u8 msk;
8517
8518                                 for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1)
8519                                         parity[k++] = buf8[i] & msk;
8520                                 i++;
8521                         }
8522                         else if (i == 16) {
8523                                 int l;
8524                                 u8 msk;
8525
8526                                 for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1)
8527                                         parity[k++] = buf8[i] & msk;
8528                                 i++;
8529
8530                                 for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1)
8531                                         parity[k++] = buf8[i] & msk;
8532                                 i++;
8533                         }
8534                         data[j++] = buf8[i];
8535                 }
8536
8537                 err = -EIO;
8538                 for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) {
8539                         u8 hw8 = hweight8(data[i]);
8540
8541                         if ((hw8 & 0x1) && parity[i])
8542                                 goto out;
8543                         else if (!(hw8 & 0x1) && !parity[i])
8544                                 goto out;
8545                 }
8546                 err = 0;
8547                 goto out;
8548         }
8549
8550         /* Bootstrap checksum at offset 0x10 */
8551         csum = calc_crc((unsigned char *) buf, 0x10);
8552         if(csum != cpu_to_le32(buf[0x10/4]))
8553                 goto out;
8554
8555         /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
8556         csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
8557         if (csum != cpu_to_le32(buf[0xfc/4]))
8558                  goto out;
8559
8560         err = 0;
8561
8562 out:
8563         kfree(buf);
8564         return err;
8565 }
8566
8567 #define TG3_SERDES_TIMEOUT_SEC  2
8568 #define TG3_COPPER_TIMEOUT_SEC  6
8569
8570 static int tg3_test_link(struct tg3 *tp)
8571 {
8572         int i, max;
8573
8574         if (!netif_running(tp->dev))
8575                 return -ENODEV;
8576
8577         if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
8578                 max = TG3_SERDES_TIMEOUT_SEC;
8579         else
8580                 max = TG3_COPPER_TIMEOUT_SEC;
8581
8582         for (i = 0; i < max; i++) {
8583                 if (netif_carrier_ok(tp->dev))
8584                         return 0;
8585
8586                 if (msleep_interruptible(1000))
8587                         break;
8588         }
8589
8590         return -EIO;
8591 }
8592
8593 /* Only test the commonly used registers */
8594 static int tg3_test_registers(struct tg3 *tp)
8595 {
8596         int i, is_5705, is_5750;
8597         u32 offset, read_mask, write_mask, val, save_val, read_val;
8598         static struct {
8599                 u16 offset;
8600                 u16 flags;
8601 #define TG3_FL_5705     0x1
8602 #define TG3_FL_NOT_5705 0x2
8603 #define TG3_FL_NOT_5788 0x4
8604 #define TG3_FL_NOT_5750 0x8
8605                 u32 read_mask;
8606                 u32 write_mask;
8607         } reg_tbl[] = {
8608                 /* MAC Control Registers */
8609                 { MAC_MODE, TG3_FL_NOT_5705,
8610                         0x00000000, 0x00ef6f8c },
8611                 { MAC_MODE, TG3_FL_5705,
8612                         0x00000000, 0x01ef6b8c },
8613                 { MAC_STATUS, TG3_FL_NOT_5705,
8614                         0x03800107, 0x00000000 },
8615                 { MAC_STATUS, TG3_FL_5705,
8616                         0x03800100, 0x00000000 },
8617                 { MAC_ADDR_0_HIGH, 0x0000,
8618                         0x00000000, 0x0000ffff },
8619                 { MAC_ADDR_0_LOW, 0x0000,
8620                         0x00000000, 0xffffffff },
8621                 { MAC_RX_MTU_SIZE, 0x0000,
8622                         0x00000000, 0x0000ffff },
8623                 { MAC_TX_MODE, 0x0000,
8624                         0x00000000, 0x00000070 },
8625                 { MAC_TX_LENGTHS, 0x0000,
8626                         0x00000000, 0x00003fff },
8627                 { MAC_RX_MODE, TG3_FL_NOT_5705,
8628                         0x00000000, 0x000007fc },
8629                 { MAC_RX_MODE, TG3_FL_5705,
8630                         0x00000000, 0x000007dc },
8631                 { MAC_HASH_REG_0, 0x0000,
8632                         0x00000000, 0xffffffff },
8633                 { MAC_HASH_REG_1, 0x0000,
8634                         0x00000000, 0xffffffff },
8635                 { MAC_HASH_REG_2, 0x0000,
8636                         0x00000000, 0xffffffff },
8637                 { MAC_HASH_REG_3, 0x0000,
8638                         0x00000000, 0xffffffff },
8639
8640                 /* Receive Data and Receive BD Initiator Control Registers. */
8641                 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
8642                         0x00000000, 0xffffffff },
8643                 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
8644                         0x00000000, 0xffffffff },
8645                 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
8646                         0x00000000, 0x00000003 },
8647                 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
8648                         0x00000000, 0xffffffff },
8649                 { RCVDBDI_STD_BD+0, 0x0000,
8650                         0x00000000, 0xffffffff },
8651                 { RCVDBDI_STD_BD+4, 0x0000,
8652                         0x00000000, 0xffffffff },
8653                 { RCVDBDI_STD_BD+8, 0x0000,
8654                         0x00000000, 0xffff0002 },
8655                 { RCVDBDI_STD_BD+0xc, 0x0000,
8656                         0x00000000, 0xffffffff },
8657
8658                 /* Receive BD Initiator Control Registers. */
8659                 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
8660                         0x00000000, 0xffffffff },
8661                 { RCVBDI_STD_THRESH, TG3_FL_5705,
8662                         0x00000000, 0x000003ff },
8663                 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
8664                         0x00000000, 0xffffffff },
8665
8666                 /* Host Coalescing Control Registers. */
8667                 { HOSTCC_MODE, TG3_FL_NOT_5705,
8668                         0x00000000, 0x00000004 },
8669                 { HOSTCC_MODE, TG3_FL_5705,
8670                         0x00000000, 0x000000f6 },
8671                 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
8672                         0x00000000, 0xffffffff },
8673                 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
8674                         0x00000000, 0x000003ff },
8675                 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
8676                         0x00000000, 0xffffffff },
8677                 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
8678                         0x00000000, 0x000003ff },
8679                 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
8680                         0x00000000, 0xffffffff },
8681                 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
8682                         0x00000000, 0x000000ff },
8683                 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
8684                         0x00000000, 0xffffffff },
8685                 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
8686                         0x00000000, 0x000000ff },
8687                 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
8688                         0x00000000, 0xffffffff },
8689                 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
8690                         0x00000000, 0xffffffff },
8691                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
8692                         0x00000000, 0xffffffff },
8693                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
8694                         0x00000000, 0x000000ff },
8695                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
8696                         0x00000000, 0xffffffff },
8697                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
8698                         0x00000000, 0x000000ff },
8699                 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
8700                         0x00000000, 0xffffffff },
8701                 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
8702                         0x00000000, 0xffffffff },
8703                 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
8704                         0x00000000, 0xffffffff },
8705                 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
8706                         0x00000000, 0xffffffff },
8707                 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
8708                         0x00000000, 0xffffffff },
8709                 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
8710                         0xffffffff, 0x00000000 },
8711                 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
8712                         0xffffffff, 0x00000000 },
8713
8714                 /* Buffer Manager Control Registers. */
8715                 { BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750,
8716                         0x00000000, 0x007fff80 },
8717                 { BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750,
8718                         0x00000000, 0x007fffff },
8719                 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
8720                         0x00000000, 0x0000003f },
8721                 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
8722                         0x00000000, 0x000001ff },
8723                 { BUFMGR_MB_HIGH_WATER, 0x0000,
8724                         0x00000000, 0x000001ff },
8725                 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
8726                         0xffffffff, 0x00000000 },
8727                 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
8728                         0xffffffff, 0x00000000 },
8729
8730                 /* Mailbox Registers */
8731                 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
8732                         0x00000000, 0x000001ff },
8733                 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
8734                         0x00000000, 0x000001ff },
8735                 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
8736                         0x00000000, 0x000007ff },
8737                 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
8738                         0x00000000, 0x000001ff },
8739
8740                 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
8741         };
8742
8743         is_5705 = is_5750 = 0;
8744         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
8745                 is_5705 = 1;
8746                 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
8747                         is_5750 = 1;
8748         }
8749
8750         for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
8751                 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
8752                         continue;
8753
8754                 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
8755                         continue;
8756
8757                 if ((tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
8758                     (reg_tbl[i].flags & TG3_FL_NOT_5788))
8759                         continue;
8760
8761                 if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750))
8762                         continue;
8763
8764                 offset = (u32) reg_tbl[i].offset;
8765                 read_mask = reg_tbl[i].read_mask;
8766                 write_mask = reg_tbl[i].write_mask;
8767
8768                 /* Save the original register content */
8769                 save_val = tr32(offset);
8770
8771                 /* Determine the read-only value. */
8772                 read_val = save_val & read_mask;
8773
8774                 /* Write zero to the register, then make sure the read-only bits
8775                  * are not changed and the read/write bits are all zeros.
8776                  */
8777                 tw32(offset, 0);
8778
8779                 val = tr32(offset);
8780
8781                 /* Test the read-only and read/write bits. */
8782                 if (((val & read_mask) != read_val) || (val & write_mask))
8783                         goto out;
8784
8785                 /* Write ones to all the bits defined by RdMask and WrMask, then
8786                  * make sure the read-only bits are not changed and the
8787                  * read/write bits are all ones.
8788                  */
8789                 tw32(offset, read_mask | write_mask);
8790
8791                 val = tr32(offset);
8792
8793                 /* Test the read-only bits. */
8794                 if ((val & read_mask) != read_val)
8795                         goto out;
8796
8797                 /* Test the read/write bits. */
8798                 if ((val & write_mask) != write_mask)
8799                         goto out;
8800
8801                 tw32(offset, save_val);
8802         }
8803
8804         return 0;
8805
8806 out:
8807         if (netif_msg_hw(tp))
8808                 printk(KERN_ERR PFX "Register test failed at offset %x\n",
8809                        offset);
8810         tw32(offset, save_val);
8811         return -EIO;
8812 }
8813
8814 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
8815 {
8816         static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
8817         int i;
8818         u32 j;
8819
8820         for (i = 0; i < sizeof(test_pattern)/sizeof(u32); i++) {
8821                 for (j = 0; j < len; j += 4) {
8822                         u32 val;
8823
8824                         tg3_write_mem(tp, offset + j, test_pattern[i]);
8825                         tg3_read_mem(tp, offset + j, &val);
8826                         if (val != test_pattern[i])
8827                                 return -EIO;
8828                 }
8829         }
8830         return 0;
8831 }
8832
8833 static int tg3_test_memory(struct tg3 *tp)
8834 {
8835         static struct mem_entry {
8836                 u32 offset;
8837                 u32 len;
8838         } mem_tbl_570x[] = {
8839                 { 0x00000000, 0x00b50},
8840                 { 0x00002000, 0x1c000},
8841                 { 0xffffffff, 0x00000}
8842         }, mem_tbl_5705[] = {
8843                 { 0x00000100, 0x0000c},
8844                 { 0x00000200, 0x00008},
8845                 { 0x00004000, 0x00800},
8846                 { 0x00006000, 0x01000},
8847                 { 0x00008000, 0x02000},
8848                 { 0x00010000, 0x0e000},
8849                 { 0xffffffff, 0x00000}
8850         }, mem_tbl_5755[] = {
8851                 { 0x00000200, 0x00008},
8852                 { 0x00004000, 0x00800},
8853                 { 0x00006000, 0x00800},
8854                 { 0x00008000, 0x02000},
8855                 { 0x00010000, 0x0c000},
8856                 { 0xffffffff, 0x00000}
8857         }, mem_tbl_5906[] = {
8858                 { 0x00000200, 0x00008},
8859                 { 0x00004000, 0x00400},
8860                 { 0x00006000, 0x00400},
8861                 { 0x00008000, 0x01000},
8862                 { 0x00010000, 0x01000},
8863                 { 0xffffffff, 0x00000}
8864         };
8865         struct mem_entry *mem_tbl;
8866         int err = 0;
8867         int i;
8868
8869         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
8870                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
8871                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
8872                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784)
8873                         mem_tbl = mem_tbl_5755;
8874                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
8875                         mem_tbl = mem_tbl_5906;
8876                 else
8877                         mem_tbl = mem_tbl_5705;
8878         } else
8879                 mem_tbl = mem_tbl_570x;
8880
8881         for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
8882                 if ((err = tg3_do_mem_test(tp, mem_tbl[i].offset,
8883                     mem_tbl[i].len)) != 0)
8884                         break;
8885         }
8886
8887         return err;
8888 }
8889
8890 #define TG3_MAC_LOOPBACK        0
8891 #define TG3_PHY_LOOPBACK        1
8892
8893 static int tg3_run_loopback(struct tg3 *tp, int loopback_mode)
8894 {
8895         u32 mac_mode, rx_start_idx, rx_idx, tx_idx, opaque_key;
8896         u32 desc_idx;
8897         struct sk_buff *skb, *rx_skb;
8898         u8 *tx_data;
8899         dma_addr_t map;
8900         int num_pkts, tx_len, rx_len, i, err;
8901         struct tg3_rx_buffer_desc *desc;
8902
8903         if (loopback_mode == TG3_MAC_LOOPBACK) {
8904                 /* HW errata - mac loopback fails in some cases on 5780.
8905                  * Normal traffic and PHY loopback are not affected by
8906                  * errata.
8907                  */
8908                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780)
8909                         return 0;
8910
8911                 mac_mode = (tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK) |
8912                            MAC_MODE_PORT_INT_LPBACK;
8913                 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
8914                         mac_mode |= MAC_MODE_LINK_POLARITY;
8915                 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
8916                         mac_mode |= MAC_MODE_PORT_MODE_MII;
8917                 else
8918                         mac_mode |= MAC_MODE_PORT_MODE_GMII;
8919                 tw32(MAC_MODE, mac_mode);
8920         } else if (loopback_mode == TG3_PHY_LOOPBACK) {
8921                 u32 val;
8922
8923                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
8924                         u32 phytest;
8925
8926                         if (!tg3_readphy(tp, MII_TG3_EPHY_TEST, &phytest)) {
8927                                 u32 phy;
8928
8929                                 tg3_writephy(tp, MII_TG3_EPHY_TEST,
8930                                              phytest | MII_TG3_EPHY_SHADOW_EN);
8931                                 if (!tg3_readphy(tp, 0x1b, &phy))
8932                                         tg3_writephy(tp, 0x1b, phy & ~0x20);
8933                                 tg3_writephy(tp, MII_TG3_EPHY_TEST, phytest);
8934                         }
8935                         val = BMCR_LOOPBACK | BMCR_FULLDPLX | BMCR_SPEED100;
8936                 } else
8937                         val = BMCR_LOOPBACK | BMCR_FULLDPLX | BMCR_SPEED1000;
8938
8939                 tg3_phy_toggle_automdix(tp, 0);
8940
8941                 tg3_writephy(tp, MII_BMCR, val);
8942                 udelay(40);
8943
8944                 mac_mode = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
8945                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
8946                         tg3_writephy(tp, MII_TG3_EPHY_PTEST, 0x1800);
8947                         mac_mode |= MAC_MODE_PORT_MODE_MII;
8948                 } else
8949                         mac_mode |= MAC_MODE_PORT_MODE_GMII;
8950
8951                 /* reset to prevent losing 1st rx packet intermittently */
8952                 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
8953                         tw32_f(MAC_RX_MODE, RX_MODE_RESET);
8954                         udelay(10);
8955                         tw32_f(MAC_RX_MODE, tp->rx_mode);
8956                 }
8957                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
8958                         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401)
8959                                 mac_mode &= ~MAC_MODE_LINK_POLARITY;
8960                         else if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411)
8961                                 mac_mode |= MAC_MODE_LINK_POLARITY;
8962                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
8963                                      MII_TG3_EXT_CTRL_LNK3_LED_MODE);
8964                 }
8965                 tw32(MAC_MODE, mac_mode);
8966         }
8967         else
8968                 return -EINVAL;
8969
8970         err = -EIO;
8971
8972         tx_len = 1514;
8973         skb = netdev_alloc_skb(tp->dev, tx_len);
8974         if (!skb)
8975                 return -ENOMEM;
8976
8977         tx_data = skb_put(skb, tx_len);
8978         memcpy(tx_data, tp->dev->dev_addr, 6);
8979         memset(tx_data + 6, 0x0, 8);
8980
8981         tw32(MAC_RX_MTU_SIZE, tx_len + 4);
8982
8983         for (i = 14; i < tx_len; i++)
8984                 tx_data[i] = (u8) (i & 0xff);
8985
8986         map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
8987
8988         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
8989              HOSTCC_MODE_NOW);
8990
8991         udelay(10);
8992
8993         rx_start_idx = tp->hw_status->idx[0].rx_producer;
8994
8995         num_pkts = 0;
8996
8997         tg3_set_txd(tp, tp->tx_prod, map, tx_len, 0, 1);
8998
8999         tp->tx_prod++;
9000         num_pkts++;
9001
9002         tw32_tx_mbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW,
9003                      tp->tx_prod);
9004         tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW);
9005
9006         udelay(10);
9007
9008         /* 250 usec to allow enough time on some 10/100 Mbps devices.  */
9009         for (i = 0; i < 25; i++) {
9010                 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
9011                        HOSTCC_MODE_NOW);
9012
9013                 udelay(10);
9014
9015                 tx_idx = tp->hw_status->idx[0].tx_consumer;
9016                 rx_idx = tp->hw_status->idx[0].rx_producer;
9017                 if ((tx_idx == tp->tx_prod) &&
9018                     (rx_idx == (rx_start_idx + num_pkts)))
9019                         break;
9020         }
9021
9022         pci_unmap_single(tp->pdev, map, tx_len, PCI_DMA_TODEVICE);
9023         dev_kfree_skb(skb);
9024
9025         if (tx_idx != tp->tx_prod)
9026                 goto out;
9027
9028         if (rx_idx != rx_start_idx + num_pkts)
9029                 goto out;
9030
9031         desc = &tp->rx_rcb[rx_start_idx];
9032         desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
9033         opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
9034         if (opaque_key != RXD_OPAQUE_RING_STD)
9035                 goto out;
9036
9037         if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
9038             (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
9039                 goto out;
9040
9041         rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4;
9042         if (rx_len != tx_len)
9043                 goto out;
9044
9045         rx_skb = tp->rx_std_buffers[desc_idx].skb;
9046
9047         map = pci_unmap_addr(&tp->rx_std_buffers[desc_idx], mapping);
9048         pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len, PCI_DMA_FROMDEVICE);
9049
9050         for (i = 14; i < tx_len; i++) {
9051                 if (*(rx_skb->data + i) != (u8) (i & 0xff))
9052                         goto out;
9053         }
9054         err = 0;
9055
9056         /* tg3_free_rings will unmap and free the rx_skb */
9057 out:
9058         return err;
9059 }
9060
9061 #define TG3_MAC_LOOPBACK_FAILED         1
9062 #define TG3_PHY_LOOPBACK_FAILED         2
9063 #define TG3_LOOPBACK_FAILED             (TG3_MAC_LOOPBACK_FAILED |      \
9064                                          TG3_PHY_LOOPBACK_FAILED)
9065
9066 static int tg3_test_loopback(struct tg3 *tp)
9067 {
9068         int err = 0;
9069
9070         if (!netif_running(tp->dev))
9071                 return TG3_LOOPBACK_FAILED;
9072
9073         err = tg3_reset_hw(tp, 1);
9074         if (err)
9075                 return TG3_LOOPBACK_FAILED;
9076
9077         if (tg3_run_loopback(tp, TG3_MAC_LOOPBACK))
9078                 err |= TG3_MAC_LOOPBACK_FAILED;
9079         if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
9080                 if (tg3_run_loopback(tp, TG3_PHY_LOOPBACK))
9081                         err |= TG3_PHY_LOOPBACK_FAILED;
9082         }
9083
9084         return err;
9085 }
9086
9087 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
9088                           u64 *data)
9089 {
9090         struct tg3 *tp = netdev_priv(dev);
9091
9092         if (tp->link_config.phy_is_low_power)
9093                 tg3_set_power_state(tp, PCI_D0);
9094
9095         memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
9096
9097         if (tg3_test_nvram(tp) != 0) {
9098                 etest->flags |= ETH_TEST_FL_FAILED;
9099                 data[0] = 1;
9100         }
9101         if (tg3_test_link(tp) != 0) {
9102                 etest->flags |= ETH_TEST_FL_FAILED;
9103                 data[1] = 1;
9104         }
9105         if (etest->flags & ETH_TEST_FL_OFFLINE) {
9106                 int err, irq_sync = 0;
9107
9108                 if (netif_running(dev)) {
9109                         tg3_netif_stop(tp);
9110                         irq_sync = 1;
9111                 }
9112
9113                 tg3_full_lock(tp, irq_sync);
9114
9115                 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
9116                 err = tg3_nvram_lock(tp);
9117                 tg3_halt_cpu(tp, RX_CPU_BASE);
9118                 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
9119                         tg3_halt_cpu(tp, TX_CPU_BASE);
9120                 if (!err)
9121                         tg3_nvram_unlock(tp);
9122
9123                 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
9124                         tg3_phy_reset(tp);
9125
9126                 if (tg3_test_registers(tp) != 0) {
9127                         etest->flags |= ETH_TEST_FL_FAILED;
9128                         data[2] = 1;
9129                 }
9130                 if (tg3_test_memory(tp) != 0) {
9131                         etest->flags |= ETH_TEST_FL_FAILED;
9132                         data[3] = 1;
9133                 }
9134                 if ((data[4] = tg3_test_loopback(tp)) != 0)
9135                         etest->flags |= ETH_TEST_FL_FAILED;
9136
9137                 tg3_full_unlock(tp);
9138
9139                 if (tg3_test_interrupt(tp) != 0) {
9140                         etest->flags |= ETH_TEST_FL_FAILED;
9141                         data[5] = 1;
9142                 }
9143
9144                 tg3_full_lock(tp, 0);
9145
9146                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9147                 if (netif_running(dev)) {
9148                         tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
9149                         if (!tg3_restart_hw(tp, 1))
9150                                 tg3_netif_start(tp);
9151                 }
9152
9153                 tg3_full_unlock(tp);
9154         }
9155         if (tp->link_config.phy_is_low_power)
9156                 tg3_set_power_state(tp, PCI_D3hot);
9157
9158 }
9159
9160 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
9161 {
9162         struct mii_ioctl_data *data = if_mii(ifr);
9163         struct tg3 *tp = netdev_priv(dev);
9164         int err;
9165
9166         switch(cmd) {
9167         case SIOCGMIIPHY:
9168                 data->phy_id = PHY_ADDR;
9169
9170                 /* fallthru */
9171         case SIOCGMIIREG: {
9172                 u32 mii_regval;
9173
9174                 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
9175                         break;                  /* We have no PHY */
9176
9177                 if (tp->link_config.phy_is_low_power)
9178                         return -EAGAIN;
9179
9180                 spin_lock_bh(&tp->lock);
9181                 err = tg3_readphy(tp, data->reg_num & 0x1f, &mii_regval);
9182                 spin_unlock_bh(&tp->lock);
9183
9184                 data->val_out = mii_regval;
9185
9186                 return err;
9187         }
9188
9189         case SIOCSMIIREG:
9190                 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
9191                         break;                  /* We have no PHY */
9192
9193                 if (!capable(CAP_NET_ADMIN))
9194                         return -EPERM;
9195
9196                 if (tp->link_config.phy_is_low_power)
9197                         return -EAGAIN;
9198
9199                 spin_lock_bh(&tp->lock);
9200                 err = tg3_writephy(tp, data->reg_num & 0x1f, data->val_in);
9201                 spin_unlock_bh(&tp->lock);
9202
9203                 return err;
9204
9205         default:
9206                 /* do nothing */
9207                 break;
9208         }
9209         return -EOPNOTSUPP;
9210 }
9211
9212 #if TG3_VLAN_TAG_USED
9213 static void tg3_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
9214 {
9215         struct tg3 *tp = netdev_priv(dev);
9216
9217         if (netif_running(dev))
9218                 tg3_netif_stop(tp);
9219
9220         tg3_full_lock(tp, 0);
9221
9222         tp->vlgrp = grp;
9223
9224         /* Update RX_MODE_KEEP_VLAN_TAG bit in RX_MODE register. */
9225         __tg3_set_rx_mode(dev);
9226
9227         if (netif_running(dev))
9228                 tg3_netif_start(tp);
9229
9230         tg3_full_unlock(tp);
9231 }
9232 #endif
9233
9234 static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
9235 {
9236         struct tg3 *tp = netdev_priv(dev);
9237
9238         memcpy(ec, &tp->coal, sizeof(*ec));
9239         return 0;
9240 }
9241
9242 static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
9243 {
9244         struct tg3 *tp = netdev_priv(dev);
9245         u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
9246         u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
9247
9248         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
9249                 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
9250                 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
9251                 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
9252                 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
9253         }
9254
9255         if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
9256             (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
9257             (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
9258             (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
9259             (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
9260             (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
9261             (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
9262             (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
9263             (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
9264             (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
9265                 return -EINVAL;
9266
9267         /* No rx interrupts will be generated if both are zero */
9268         if ((ec->rx_coalesce_usecs == 0) &&
9269             (ec->rx_max_coalesced_frames == 0))
9270                 return -EINVAL;
9271
9272         /* No tx interrupts will be generated if both are zero */
9273         if ((ec->tx_coalesce_usecs == 0) &&
9274             (ec->tx_max_coalesced_frames == 0))
9275                 return -EINVAL;
9276
9277         /* Only copy relevant parameters, ignore all others. */
9278         tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
9279         tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
9280         tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
9281         tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
9282         tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
9283         tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
9284         tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
9285         tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
9286         tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
9287
9288         if (netif_running(dev)) {
9289                 tg3_full_lock(tp, 0);
9290                 __tg3_set_coalesce(tp, &tp->coal);
9291                 tg3_full_unlock(tp);
9292         }
9293         return 0;
9294 }
9295
9296 static const struct ethtool_ops tg3_ethtool_ops = {
9297         .get_settings           = tg3_get_settings,
9298         .set_settings           = tg3_set_settings,
9299         .get_drvinfo            = tg3_get_drvinfo,
9300         .get_regs_len           = tg3_get_regs_len,
9301         .get_regs               = tg3_get_regs,
9302         .get_wol                = tg3_get_wol,
9303         .set_wol                = tg3_set_wol,
9304         .get_msglevel           = tg3_get_msglevel,
9305         .set_msglevel           = tg3_set_msglevel,
9306         .nway_reset             = tg3_nway_reset,
9307         .get_link               = ethtool_op_get_link,
9308         .get_eeprom_len         = tg3_get_eeprom_len,
9309         .get_eeprom             = tg3_get_eeprom,
9310         .set_eeprom             = tg3_set_eeprom,
9311         .get_ringparam          = tg3_get_ringparam,
9312         .set_ringparam          = tg3_set_ringparam,
9313         .get_pauseparam         = tg3_get_pauseparam,
9314         .set_pauseparam         = tg3_set_pauseparam,
9315         .get_rx_csum            = tg3_get_rx_csum,
9316         .set_rx_csum            = tg3_set_rx_csum,
9317         .set_tx_csum            = tg3_set_tx_csum,
9318         .set_sg                 = ethtool_op_set_sg,
9319         .set_tso                = tg3_set_tso,
9320         .self_test              = tg3_self_test,
9321         .get_strings            = tg3_get_strings,
9322         .phys_id                = tg3_phys_id,
9323         .get_ethtool_stats      = tg3_get_ethtool_stats,
9324         .get_coalesce           = tg3_get_coalesce,
9325         .set_coalesce           = tg3_set_coalesce,
9326         .get_sset_count         = tg3_get_sset_count,
9327 };
9328
9329 static void __devinit tg3_get_eeprom_size(struct tg3 *tp)
9330 {
9331         u32 cursize, val, magic;
9332
9333         tp->nvram_size = EEPROM_CHIP_SIZE;
9334
9335         if (tg3_nvram_read_swab(tp, 0, &magic) != 0)
9336                 return;
9337
9338         if ((magic != TG3_EEPROM_MAGIC) &&
9339             ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) &&
9340             ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW))
9341                 return;
9342
9343         /*
9344          * Size the chip by reading offsets at increasing powers of two.
9345          * When we encounter our validation signature, we know the addressing
9346          * has wrapped around, and thus have our chip size.
9347          */
9348         cursize = 0x10;
9349
9350         while (cursize < tp->nvram_size) {
9351                 if (tg3_nvram_read_swab(tp, cursize, &val) != 0)
9352                         return;
9353
9354                 if (val == magic)
9355                         break;
9356
9357                 cursize <<= 1;
9358         }
9359
9360         tp->nvram_size = cursize;
9361 }
9362
9363 static void __devinit tg3_get_nvram_size(struct tg3 *tp)
9364 {
9365         u32 val;
9366
9367         if (tg3_nvram_read_swab(tp, 0, &val) != 0)
9368                 return;
9369
9370         /* Selfboot format */
9371         if (val != TG3_EEPROM_MAGIC) {
9372                 tg3_get_eeprom_size(tp);
9373                 return;
9374         }
9375
9376         if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
9377                 if (val != 0) {
9378                         tp->nvram_size = (val >> 16) * 1024;
9379                         return;
9380                 }
9381         }
9382         tp->nvram_size = 0x80000;
9383 }
9384
9385 static void __devinit tg3_get_nvram_info(struct tg3 *tp)
9386 {
9387         u32 nvcfg1;
9388
9389         nvcfg1 = tr32(NVRAM_CFG1);
9390         if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
9391                 tp->tg3_flags2 |= TG3_FLG2_FLASH;
9392         }
9393         else {
9394                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
9395                 tw32(NVRAM_CFG1, nvcfg1);
9396         }
9397
9398         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) ||
9399             (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
9400                 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
9401                         case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
9402                                 tp->nvram_jedecnum = JEDEC_ATMEL;
9403                                 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
9404                                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9405                                 break;
9406                         case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
9407                                 tp->nvram_jedecnum = JEDEC_ATMEL;
9408                                 tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
9409                                 break;
9410                         case FLASH_VENDOR_ATMEL_EEPROM:
9411                                 tp->nvram_jedecnum = JEDEC_ATMEL;
9412                                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
9413                                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9414                                 break;
9415                         case FLASH_VENDOR_ST:
9416                                 tp->nvram_jedecnum = JEDEC_ST;
9417                                 tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
9418                                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9419                                 break;
9420                         case FLASH_VENDOR_SAIFUN:
9421                                 tp->nvram_jedecnum = JEDEC_SAIFUN;
9422                                 tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
9423                                 break;
9424                         case FLASH_VENDOR_SST_SMALL:
9425                         case FLASH_VENDOR_SST_LARGE:
9426                                 tp->nvram_jedecnum = JEDEC_SST;
9427                                 tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
9428                                 break;
9429                 }
9430         }
9431         else {
9432                 tp->nvram_jedecnum = JEDEC_ATMEL;
9433                 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
9434                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9435         }
9436 }
9437
9438 static void __devinit tg3_get_5752_nvram_info(struct tg3 *tp)
9439 {
9440         u32 nvcfg1;
9441
9442         nvcfg1 = tr32(NVRAM_CFG1);
9443
9444         /* NVRAM protection for TPM */
9445         if (nvcfg1 & (1 << 27))
9446                 tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM;
9447
9448         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
9449                 case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
9450                 case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
9451                         tp->nvram_jedecnum = JEDEC_ATMEL;
9452                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9453                         break;
9454                 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
9455                         tp->nvram_jedecnum = JEDEC_ATMEL;
9456                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9457                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
9458                         break;
9459                 case FLASH_5752VENDOR_ST_M45PE10:
9460                 case FLASH_5752VENDOR_ST_M45PE20:
9461                 case FLASH_5752VENDOR_ST_M45PE40:
9462                         tp->nvram_jedecnum = JEDEC_ST;
9463                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9464                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
9465                         break;
9466         }
9467
9468         if (tp->tg3_flags2 & TG3_FLG2_FLASH) {
9469                 switch (nvcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
9470                         case FLASH_5752PAGE_SIZE_256:
9471                                 tp->nvram_pagesize = 256;
9472                                 break;
9473                         case FLASH_5752PAGE_SIZE_512:
9474                                 tp->nvram_pagesize = 512;
9475                                 break;
9476                         case FLASH_5752PAGE_SIZE_1K:
9477                                 tp->nvram_pagesize = 1024;
9478                                 break;
9479                         case FLASH_5752PAGE_SIZE_2K:
9480                                 tp->nvram_pagesize = 2048;
9481                                 break;
9482                         case FLASH_5752PAGE_SIZE_4K:
9483                                 tp->nvram_pagesize = 4096;
9484                                 break;
9485                         case FLASH_5752PAGE_SIZE_264:
9486                                 tp->nvram_pagesize = 264;
9487                                 break;
9488                 }
9489         }
9490         else {
9491                 /* For eeprom, set pagesize to maximum eeprom size */
9492                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
9493
9494                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
9495                 tw32(NVRAM_CFG1, nvcfg1);
9496         }
9497 }
9498
9499 static void __devinit tg3_get_5755_nvram_info(struct tg3 *tp)
9500 {
9501         u32 nvcfg1, protect = 0;
9502
9503         nvcfg1 = tr32(NVRAM_CFG1);
9504
9505         /* NVRAM protection for TPM */
9506         if (nvcfg1 & (1 << 27)) {
9507                 tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM;
9508                 protect = 1;
9509         }
9510
9511         nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
9512         switch (nvcfg1) {
9513                 case FLASH_5755VENDOR_ATMEL_FLASH_1:
9514                 case FLASH_5755VENDOR_ATMEL_FLASH_2:
9515                 case FLASH_5755VENDOR_ATMEL_FLASH_3:
9516                 case FLASH_5755VENDOR_ATMEL_FLASH_5:
9517                         tp->nvram_jedecnum = JEDEC_ATMEL;
9518                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9519                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
9520                         tp->nvram_pagesize = 264;
9521                         if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 ||
9522                             nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5)
9523                                 tp->nvram_size = (protect ? 0x3e200 : 0x80000);
9524                         else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2)
9525                                 tp->nvram_size = (protect ? 0x1f200 : 0x40000);
9526                         else
9527                                 tp->nvram_size = (protect ? 0x1f200 : 0x20000);
9528                         break;
9529                 case FLASH_5752VENDOR_ST_M45PE10:
9530                 case FLASH_5752VENDOR_ST_M45PE20:
9531                 case FLASH_5752VENDOR_ST_M45PE40:
9532                         tp->nvram_jedecnum = JEDEC_ST;
9533                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9534                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
9535                         tp->nvram_pagesize = 256;
9536                         if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10)
9537                                 tp->nvram_size = (protect ? 0x10000 : 0x20000);
9538                         else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20)
9539                                 tp->nvram_size = (protect ? 0x10000 : 0x40000);
9540                         else
9541                                 tp->nvram_size = (protect ? 0x20000 : 0x80000);
9542                         break;
9543         }
9544 }
9545
9546 static void __devinit tg3_get_5787_nvram_info(struct tg3 *tp)
9547 {
9548         u32 nvcfg1;
9549
9550         nvcfg1 = tr32(NVRAM_CFG1);
9551
9552         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
9553                 case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
9554                 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
9555                 case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
9556                 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
9557                         tp->nvram_jedecnum = JEDEC_ATMEL;
9558                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9559                         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
9560
9561                         nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
9562                         tw32(NVRAM_CFG1, nvcfg1);
9563                         break;
9564                 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
9565                 case FLASH_5755VENDOR_ATMEL_FLASH_1:
9566                 case FLASH_5755VENDOR_ATMEL_FLASH_2:
9567                 case FLASH_5755VENDOR_ATMEL_FLASH_3:
9568                         tp->nvram_jedecnum = JEDEC_ATMEL;
9569                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9570                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
9571                         tp->nvram_pagesize = 264;
9572                         break;
9573                 case FLASH_5752VENDOR_ST_M45PE10:
9574                 case FLASH_5752VENDOR_ST_M45PE20:
9575                 case FLASH_5752VENDOR_ST_M45PE40:
9576                         tp->nvram_jedecnum = JEDEC_ST;
9577                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9578                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
9579                         tp->nvram_pagesize = 256;
9580                         break;
9581         }
9582 }
9583
9584 static void __devinit tg3_get_5906_nvram_info(struct tg3 *tp)
9585 {
9586         tp->nvram_jedecnum = JEDEC_ATMEL;
9587         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9588         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
9589 }
9590
9591 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
9592 static void __devinit tg3_nvram_init(struct tg3 *tp)
9593 {
9594         tw32_f(GRC_EEPROM_ADDR,
9595              (EEPROM_ADDR_FSM_RESET |
9596               (EEPROM_DEFAULT_CLOCK_PERIOD <<
9597                EEPROM_ADDR_CLKPERD_SHIFT)));
9598
9599         msleep(1);
9600
9601         /* Enable seeprom accesses. */
9602         tw32_f(GRC_LOCAL_CTRL,
9603              tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
9604         udelay(100);
9605
9606         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
9607             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
9608                 tp->tg3_flags |= TG3_FLAG_NVRAM;
9609
9610                 if (tg3_nvram_lock(tp)) {
9611                         printk(KERN_WARNING PFX "%s: Cannot get nvarm lock, "
9612                                "tg3_nvram_init failed.\n", tp->dev->name);
9613                         return;
9614                 }
9615                 tg3_enable_nvram_access(tp);
9616
9617                 tp->nvram_size = 0;
9618
9619                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
9620                         tg3_get_5752_nvram_info(tp);
9621                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
9622                         tg3_get_5755_nvram_info(tp);
9623                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
9624                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784)
9625                         tg3_get_5787_nvram_info(tp);
9626                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
9627                         tg3_get_5906_nvram_info(tp);
9628                 else
9629                         tg3_get_nvram_info(tp);
9630
9631                 if (tp->nvram_size == 0)
9632                         tg3_get_nvram_size(tp);
9633
9634                 tg3_disable_nvram_access(tp);
9635                 tg3_nvram_unlock(tp);
9636
9637         } else {
9638                 tp->tg3_flags &= ~(TG3_FLAG_NVRAM | TG3_FLAG_NVRAM_BUFFERED);
9639
9640                 tg3_get_eeprom_size(tp);
9641         }
9642 }
9643
9644 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
9645                                         u32 offset, u32 *val)
9646 {
9647         u32 tmp;
9648         int i;
9649
9650         if (offset > EEPROM_ADDR_ADDR_MASK ||
9651             (offset % 4) != 0)
9652                 return -EINVAL;
9653
9654         tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
9655                                         EEPROM_ADDR_DEVID_MASK |
9656                                         EEPROM_ADDR_READ);
9657         tw32(GRC_EEPROM_ADDR,
9658              tmp |
9659              (0 << EEPROM_ADDR_DEVID_SHIFT) |
9660              ((offset << EEPROM_ADDR_ADDR_SHIFT) &
9661               EEPROM_ADDR_ADDR_MASK) |
9662              EEPROM_ADDR_READ | EEPROM_ADDR_START);
9663
9664         for (i = 0; i < 1000; i++) {
9665                 tmp = tr32(GRC_EEPROM_ADDR);
9666
9667                 if (tmp & EEPROM_ADDR_COMPLETE)
9668                         break;
9669                 msleep(1);
9670         }
9671         if (!(tmp & EEPROM_ADDR_COMPLETE))
9672                 return -EBUSY;
9673
9674         *val = tr32(GRC_EEPROM_DATA);
9675         return 0;
9676 }
9677
9678 #define NVRAM_CMD_TIMEOUT 10000
9679
9680 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
9681 {
9682         int i;
9683
9684         tw32(NVRAM_CMD, nvram_cmd);
9685         for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
9686                 udelay(10);
9687                 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
9688                         udelay(10);
9689                         break;
9690                 }
9691         }
9692         if (i == NVRAM_CMD_TIMEOUT) {
9693                 return -EBUSY;
9694         }
9695         return 0;
9696 }
9697
9698 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
9699 {
9700         if ((tp->tg3_flags & TG3_FLAG_NVRAM) &&
9701             (tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) &&
9702             (tp->tg3_flags2 & TG3_FLG2_FLASH) &&
9703             (tp->nvram_jedecnum == JEDEC_ATMEL))
9704
9705                 addr = ((addr / tp->nvram_pagesize) <<
9706                         ATMEL_AT45DB0X1B_PAGE_POS) +
9707                        (addr % tp->nvram_pagesize);
9708
9709         return addr;
9710 }
9711
9712 static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
9713 {
9714         if ((tp->tg3_flags & TG3_FLAG_NVRAM) &&
9715             (tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) &&
9716             (tp->tg3_flags2 & TG3_FLG2_FLASH) &&
9717             (tp->nvram_jedecnum == JEDEC_ATMEL))
9718
9719                 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
9720                         tp->nvram_pagesize) +
9721                        (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
9722
9723         return addr;
9724 }
9725
9726 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
9727 {
9728         int ret;
9729
9730         if (!(tp->tg3_flags & TG3_FLAG_NVRAM))
9731                 return tg3_nvram_read_using_eeprom(tp, offset, val);
9732
9733         offset = tg3_nvram_phys_addr(tp, offset);
9734
9735         if (offset > NVRAM_ADDR_MSK)
9736                 return -EINVAL;
9737
9738         ret = tg3_nvram_lock(tp);
9739         if (ret)
9740                 return ret;
9741
9742         tg3_enable_nvram_access(tp);
9743
9744         tw32(NVRAM_ADDR, offset);
9745         ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
9746                 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
9747
9748         if (ret == 0)
9749                 *val = swab32(tr32(NVRAM_RDDATA));
9750
9751         tg3_disable_nvram_access(tp);
9752
9753         tg3_nvram_unlock(tp);
9754
9755         return ret;
9756 }
9757
9758 static int tg3_nvram_read_swab(struct tg3 *tp, u32 offset, u32 *val)
9759 {
9760         int err;
9761         u32 tmp;
9762
9763         err = tg3_nvram_read(tp, offset, &tmp);
9764         *val = swab32(tmp);
9765         return err;
9766 }
9767
9768 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
9769                                     u32 offset, u32 len, u8 *buf)
9770 {
9771         int i, j, rc = 0;
9772         u32 val;
9773
9774         for (i = 0; i < len; i += 4) {
9775                 u32 addr, data;
9776
9777                 addr = offset + i;
9778
9779                 memcpy(&data, buf + i, 4);
9780
9781                 tw32(GRC_EEPROM_DATA, cpu_to_le32(data));
9782
9783                 val = tr32(GRC_EEPROM_ADDR);
9784                 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
9785
9786                 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
9787                         EEPROM_ADDR_READ);
9788                 tw32(GRC_EEPROM_ADDR, val |
9789                         (0 << EEPROM_ADDR_DEVID_SHIFT) |
9790                         (addr & EEPROM_ADDR_ADDR_MASK) |
9791                         EEPROM_ADDR_START |
9792                         EEPROM_ADDR_WRITE);
9793
9794                 for (j = 0; j < 1000; j++) {
9795                         val = tr32(GRC_EEPROM_ADDR);
9796
9797                         if (val & EEPROM_ADDR_COMPLETE)
9798                                 break;
9799                         msleep(1);
9800                 }
9801                 if (!(val & EEPROM_ADDR_COMPLETE)) {
9802                         rc = -EBUSY;
9803                         break;
9804                 }
9805         }
9806
9807         return rc;
9808 }
9809
9810 /* offset and length are dword aligned */
9811 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
9812                 u8 *buf)
9813 {
9814         int ret = 0;
9815         u32 pagesize = tp->nvram_pagesize;
9816         u32 pagemask = pagesize - 1;
9817         u32 nvram_cmd;
9818         u8 *tmp;
9819
9820         tmp = kmalloc(pagesize, GFP_KERNEL);
9821         if (tmp == NULL)
9822                 return -ENOMEM;
9823
9824         while (len) {
9825                 int j;
9826                 u32 phy_addr, page_off, size;
9827
9828                 phy_addr = offset & ~pagemask;
9829
9830                 for (j = 0; j < pagesize; j += 4) {
9831                         if ((ret = tg3_nvram_read(tp, phy_addr + j,
9832                                                 (u32 *) (tmp + j))))
9833                                 break;
9834                 }
9835                 if (ret)
9836                         break;
9837
9838                 page_off = offset & pagemask;
9839                 size = pagesize;
9840                 if (len < size)
9841                         size = len;
9842
9843                 len -= size;
9844
9845                 memcpy(tmp + page_off, buf, size);
9846
9847                 offset = offset + (pagesize - page_off);
9848
9849                 tg3_enable_nvram_access(tp);
9850
9851                 /*
9852                  * Before we can erase the flash page, we need
9853                  * to issue a special "write enable" command.
9854                  */
9855                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
9856
9857                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
9858                         break;
9859
9860                 /* Erase the target page */
9861                 tw32(NVRAM_ADDR, phy_addr);
9862
9863                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
9864                         NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
9865
9866                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
9867                         break;
9868
9869                 /* Issue another write enable to start the write. */
9870                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
9871
9872                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
9873                         break;
9874
9875                 for (j = 0; j < pagesize; j += 4) {
9876                         u32 data;
9877
9878                         data = *((u32 *) (tmp + j));
9879                         tw32(NVRAM_WRDATA, cpu_to_be32(data));
9880
9881                         tw32(NVRAM_ADDR, phy_addr + j);
9882
9883                         nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
9884                                 NVRAM_CMD_WR;
9885
9886                         if (j == 0)
9887                                 nvram_cmd |= NVRAM_CMD_FIRST;
9888                         else if (j == (pagesize - 4))
9889                                 nvram_cmd |= NVRAM_CMD_LAST;
9890
9891                         if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
9892                                 break;
9893                 }
9894                 if (ret)
9895                         break;
9896         }
9897
9898         nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
9899         tg3_nvram_exec_cmd(tp, nvram_cmd);
9900
9901         kfree(tmp);
9902
9903         return ret;
9904 }
9905
9906 /* offset and length are dword aligned */
9907 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
9908                 u8 *buf)
9909 {
9910         int i, ret = 0;
9911
9912         for (i = 0; i < len; i += 4, offset += 4) {
9913                 u32 data, page_off, phy_addr, nvram_cmd;
9914
9915                 memcpy(&data, buf + i, 4);
9916                 tw32(NVRAM_WRDATA, cpu_to_be32(data));
9917
9918                 page_off = offset % tp->nvram_pagesize;
9919
9920                 phy_addr = tg3_nvram_phys_addr(tp, offset);
9921
9922                 tw32(NVRAM_ADDR, phy_addr);
9923
9924                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
9925
9926                 if ((page_off == 0) || (i == 0))
9927                         nvram_cmd |= NVRAM_CMD_FIRST;
9928                 if (page_off == (tp->nvram_pagesize - 4))
9929                         nvram_cmd |= NVRAM_CMD_LAST;
9930
9931                 if (i == (len - 4))
9932                         nvram_cmd |= NVRAM_CMD_LAST;
9933
9934                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752) &&
9935                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5755) &&
9936                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5787) &&
9937                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784) &&
9938                     (tp->nvram_jedecnum == JEDEC_ST) &&
9939                     (nvram_cmd & NVRAM_CMD_FIRST)) {
9940
9941                         if ((ret = tg3_nvram_exec_cmd(tp,
9942                                 NVRAM_CMD_WREN | NVRAM_CMD_GO |
9943                                 NVRAM_CMD_DONE)))
9944
9945                                 break;
9946                 }
9947                 if (!(tp->tg3_flags2 & TG3_FLG2_FLASH)) {
9948                         /* We always do complete word writes to eeprom. */
9949                         nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
9950                 }
9951
9952                 if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
9953                         break;
9954         }
9955         return ret;
9956 }
9957
9958 /* offset and length are dword aligned */
9959 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
9960 {
9961         int ret;
9962
9963         if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
9964                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
9965                        ~GRC_LCLCTRL_GPIO_OUTPUT1);
9966                 udelay(40);
9967         }
9968
9969         if (!(tp->tg3_flags & TG3_FLAG_NVRAM)) {
9970                 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
9971         }
9972         else {
9973                 u32 grc_mode;
9974
9975                 ret = tg3_nvram_lock(tp);
9976                 if (ret)
9977                         return ret;
9978
9979                 tg3_enable_nvram_access(tp);
9980                 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
9981                     !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM))
9982                         tw32(NVRAM_WRITE1, 0x406);
9983
9984                 grc_mode = tr32(GRC_MODE);
9985                 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
9986
9987                 if ((tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) ||
9988                         !(tp->tg3_flags2 & TG3_FLG2_FLASH)) {
9989
9990                         ret = tg3_nvram_write_block_buffered(tp, offset, len,
9991                                 buf);
9992                 }
9993                 else {
9994                         ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
9995                                 buf);
9996                 }
9997
9998                 grc_mode = tr32(GRC_MODE);
9999                 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
10000
10001                 tg3_disable_nvram_access(tp);
10002                 tg3_nvram_unlock(tp);
10003         }
10004
10005         if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
10006                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
10007                 udelay(40);
10008         }
10009
10010         return ret;
10011 }
10012
10013 struct subsys_tbl_ent {
10014         u16 subsys_vendor, subsys_devid;
10015         u32 phy_id;
10016 };
10017
10018 static struct subsys_tbl_ent subsys_id_to_phy_id[] = {
10019         /* Broadcom boards. */
10020         { PCI_VENDOR_ID_BROADCOM, 0x1644, PHY_ID_BCM5401 }, /* BCM95700A6 */
10021         { PCI_VENDOR_ID_BROADCOM, 0x0001, PHY_ID_BCM5701 }, /* BCM95701A5 */
10022         { PCI_VENDOR_ID_BROADCOM, 0x0002, PHY_ID_BCM8002 }, /* BCM95700T6 */
10023         { PCI_VENDOR_ID_BROADCOM, 0x0003, 0 },              /* BCM95700A9 */
10024         { PCI_VENDOR_ID_BROADCOM, 0x0005, PHY_ID_BCM5701 }, /* BCM95701T1 */
10025         { PCI_VENDOR_ID_BROADCOM, 0x0006, PHY_ID_BCM5701 }, /* BCM95701T8 */
10026         { PCI_VENDOR_ID_BROADCOM, 0x0007, 0 },              /* BCM95701A7 */
10027         { PCI_VENDOR_ID_BROADCOM, 0x0008, PHY_ID_BCM5701 }, /* BCM95701A10 */
10028         { PCI_VENDOR_ID_BROADCOM, 0x8008, PHY_ID_BCM5701 }, /* BCM95701A12 */
10029         { PCI_VENDOR_ID_BROADCOM, 0x0009, PHY_ID_BCM5703 }, /* BCM95703Ax1 */
10030         { PCI_VENDOR_ID_BROADCOM, 0x8009, PHY_ID_BCM5703 }, /* BCM95703Ax2 */
10031
10032         /* 3com boards. */
10033         { PCI_VENDOR_ID_3COM, 0x1000, PHY_ID_BCM5401 }, /* 3C996T */
10034         { PCI_VENDOR_ID_3COM, 0x1006, PHY_ID_BCM5701 }, /* 3C996BT */
10035         { PCI_VENDOR_ID_3COM, 0x1004, 0 },              /* 3C996SX */
10036         { PCI_VENDOR_ID_3COM, 0x1007, PHY_ID_BCM5701 }, /* 3C1000T */
10037         { PCI_VENDOR_ID_3COM, 0x1008, PHY_ID_BCM5701 }, /* 3C940BR01 */
10038
10039         /* DELL boards. */
10040         { PCI_VENDOR_ID_DELL, 0x00d1, PHY_ID_BCM5401 }, /* VIPER */
10041         { PCI_VENDOR_ID_DELL, 0x0106, PHY_ID_BCM5401 }, /* JAGUAR */
10042         { PCI_VENDOR_ID_DELL, 0x0109, PHY_ID_BCM5411 }, /* MERLOT */
10043         { PCI_VENDOR_ID_DELL, 0x010a, PHY_ID_BCM5411 }, /* SLIM_MERLOT */
10044
10045         /* Compaq boards. */
10046         { PCI_VENDOR_ID_COMPAQ, 0x007c, PHY_ID_BCM5701 }, /* BANSHEE */
10047         { PCI_VENDOR_ID_COMPAQ, 0x009a, PHY_ID_BCM5701 }, /* BANSHEE_2 */
10048         { PCI_VENDOR_ID_COMPAQ, 0x007d, 0 },              /* CHANGELING */
10049         { PCI_VENDOR_ID_COMPAQ, 0x0085, PHY_ID_BCM5701 }, /* NC7780 */
10050         { PCI_VENDOR_ID_COMPAQ, 0x0099, PHY_ID_BCM5701 }, /* NC7780_2 */
10051
10052         /* IBM boards. */
10053         { PCI_VENDOR_ID_IBM, 0x0281, 0 } /* IBM??? */
10054 };
10055
10056 static inline struct subsys_tbl_ent *lookup_by_subsys(struct tg3 *tp)
10057 {
10058         int i;
10059
10060         for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
10061                 if ((subsys_id_to_phy_id[i].subsys_vendor ==
10062                      tp->pdev->subsystem_vendor) &&
10063                     (subsys_id_to_phy_id[i].subsys_devid ==
10064                      tp->pdev->subsystem_device))
10065                         return &subsys_id_to_phy_id[i];
10066         }
10067         return NULL;
10068 }
10069
10070 static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
10071 {
10072         u32 val;
10073         u16 pmcsr;
10074
10075         /* On some early chips the SRAM cannot be accessed in D3hot state,
10076          * so need make sure we're in D0.
10077          */
10078         pci_read_config_word(tp->pdev, tp->pm_cap + PCI_PM_CTRL, &pmcsr);
10079         pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
10080         pci_write_config_word(tp->pdev, tp->pm_cap + PCI_PM_CTRL, pmcsr);
10081         msleep(1);
10082
10083         /* Make sure register accesses (indirect or otherwise)
10084          * will function correctly.
10085          */
10086         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
10087                                tp->misc_host_ctrl);
10088
10089         /* The memory arbiter has to be enabled in order for SRAM accesses
10090          * to succeed.  Normally on powerup the tg3 chip firmware will make
10091          * sure it is enabled, but other entities such as system netboot
10092          * code might disable it.
10093          */
10094         val = tr32(MEMARB_MODE);
10095         tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
10096
10097         tp->phy_id = PHY_ID_INVALID;
10098         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
10099
10100         /* Assume an onboard device and WOL capable by default.  */
10101         tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT | TG3_FLAG_WOL_CAP;
10102
10103         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
10104                 if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
10105                         tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT;
10106                         tp->tg3_flags2 |= TG3_FLG2_IS_NIC;
10107                 }
10108                 if (tr32(VCPU_CFGSHDW) & VCPU_CFGSHDW_ASPM_DBNC)
10109                         tp->tg3_flags |= TG3_FLAG_ASPM_WORKAROUND;
10110                 return;
10111         }
10112
10113         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
10114         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
10115                 u32 nic_cfg, led_cfg;
10116                 u32 nic_phy_id, ver, cfg2 = 0, eeprom_phy_id;
10117                 int eeprom_phy_serdes = 0;
10118
10119                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
10120                 tp->nic_sram_data_cfg = nic_cfg;
10121
10122                 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
10123                 ver >>= NIC_SRAM_DATA_VER_SHIFT;
10124                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700) &&
10125                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) &&
10126                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5703) &&
10127                     (ver > 0) && (ver < 0x100))
10128                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
10129
10130                 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
10131                     NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
10132                         eeprom_phy_serdes = 1;
10133
10134                 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
10135                 if (nic_phy_id != 0) {
10136                         u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
10137                         u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
10138
10139                         eeprom_phy_id  = (id1 >> 16) << 10;
10140                         eeprom_phy_id |= (id2 & 0xfc00) << 16;
10141                         eeprom_phy_id |= (id2 & 0x03ff) <<  0;
10142                 } else
10143                         eeprom_phy_id = 0;
10144
10145                 tp->phy_id = eeprom_phy_id;
10146                 if (eeprom_phy_serdes) {
10147                         if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
10148                                 tp->tg3_flags2 |= TG3_FLG2_MII_SERDES;
10149                         else
10150                                 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
10151                 }
10152
10153                 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
10154                         led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
10155                                     SHASTA_EXT_LED_MODE_MASK);
10156                 else
10157                         led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
10158
10159                 switch (led_cfg) {
10160                 default:
10161                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
10162                         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
10163                         break;
10164
10165                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
10166                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
10167                         break;
10168
10169                 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
10170                         tp->led_ctrl = LED_CTRL_MODE_MAC;
10171
10172                         /* Default to PHY_1_MODE if 0 (MAC_MODE) is
10173                          * read on some older 5700/5701 bootcode.
10174                          */
10175                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
10176                             ASIC_REV_5700 ||
10177                             GET_ASIC_REV(tp->pci_chip_rev_id) ==
10178                             ASIC_REV_5701)
10179                                 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
10180
10181                         break;
10182
10183                 case SHASTA_EXT_LED_SHARED:
10184                         tp->led_ctrl = LED_CTRL_MODE_SHARED;
10185                         if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
10186                             tp->pci_chip_rev_id != CHIPREV_ID_5750_A1)
10187                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
10188                                                  LED_CTRL_MODE_PHY_2);
10189                         break;
10190
10191                 case SHASTA_EXT_LED_MAC:
10192                         tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
10193                         break;
10194
10195                 case SHASTA_EXT_LED_COMBO:
10196                         tp->led_ctrl = LED_CTRL_MODE_COMBO;
10197                         if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0)
10198                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
10199                                                  LED_CTRL_MODE_PHY_2);
10200                         break;
10201
10202                 };
10203
10204                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
10205                      GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) &&
10206                     tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
10207                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
10208
10209                 if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
10210                         tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT;
10211                         if ((tp->pdev->subsystem_vendor ==
10212                              PCI_VENDOR_ID_ARIMA) &&
10213                             (tp->pdev->subsystem_device == 0x205a ||
10214                              tp->pdev->subsystem_device == 0x2063))
10215                                 tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT;
10216                 } else {
10217                         tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT;
10218                         tp->tg3_flags2 |= TG3_FLG2_IS_NIC;
10219                 }
10220
10221                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
10222                         tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
10223                         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
10224                                 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
10225                 }
10226                 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES &&
10227                     !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL))
10228                         tp->tg3_flags &= ~TG3_FLAG_WOL_CAP;
10229
10230                 if (cfg2 & (1 << 17))
10231                         tp->tg3_flags2 |= TG3_FLG2_CAPACITIVE_COUPLING;
10232
10233                 /* serdes signal pre-emphasis in register 0x590 set by */
10234                 /* bootcode if bit 18 is set */
10235                 if (cfg2 & (1 << 18))
10236                         tp->tg3_flags2 |= TG3_FLG2_SERDES_PREEMPHASIS;
10237
10238                 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
10239                         u32 cfg3;
10240
10241                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
10242                         if (cfg3 & NIC_SRAM_ASPM_DEBOUNCE)
10243                                 tp->tg3_flags |= TG3_FLAG_ASPM_WORKAROUND;
10244                 }
10245         }
10246 }
10247
10248 static int __devinit tg3_phy_probe(struct tg3 *tp)
10249 {
10250         u32 hw_phy_id_1, hw_phy_id_2;
10251         u32 hw_phy_id, hw_phy_id_masked;
10252         int err;
10253
10254         /* Reading the PHY ID register can conflict with ASF
10255          * firwmare access to the PHY hardware.
10256          */
10257         err = 0;
10258         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
10259                 hw_phy_id = hw_phy_id_masked = PHY_ID_INVALID;
10260         } else {
10261                 /* Now read the physical PHY_ID from the chip and verify
10262                  * that it is sane.  If it doesn't look good, we fall back
10263                  * to either the hard-coded table based PHY_ID and failing
10264                  * that the value found in the eeprom area.
10265                  */
10266                 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
10267                 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
10268
10269                 hw_phy_id  = (hw_phy_id_1 & 0xffff) << 10;
10270                 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
10271                 hw_phy_id |= (hw_phy_id_2 & 0x03ff) <<  0;
10272
10273                 hw_phy_id_masked = hw_phy_id & PHY_ID_MASK;
10274         }
10275
10276         if (!err && KNOWN_PHY_ID(hw_phy_id_masked)) {
10277                 tp->phy_id = hw_phy_id;
10278                 if (hw_phy_id_masked == PHY_ID_BCM8002)
10279                         tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
10280                 else
10281                         tp->tg3_flags2 &= ~TG3_FLG2_PHY_SERDES;
10282         } else {
10283                 if (tp->phy_id != PHY_ID_INVALID) {
10284                         /* Do nothing, phy ID already set up in
10285                          * tg3_get_eeprom_hw_cfg().
10286                          */
10287                 } else {
10288                         struct subsys_tbl_ent *p;
10289
10290                         /* No eeprom signature?  Try the hardcoded
10291                          * subsys device table.
10292                          */
10293                         p = lookup_by_subsys(tp);
10294                         if (!p)
10295                                 return -ENODEV;
10296
10297                         tp->phy_id = p->phy_id;
10298                         if (!tp->phy_id ||
10299                             tp->phy_id == PHY_ID_BCM8002)
10300                                 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
10301                 }
10302         }
10303
10304         if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) &&
10305             !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
10306                 u32 bmsr, adv_reg, tg3_ctrl, mask;
10307
10308                 tg3_readphy(tp, MII_BMSR, &bmsr);
10309                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
10310                     (bmsr & BMSR_LSTATUS))
10311                         goto skip_phy_reset;
10312
10313                 err = tg3_phy_reset(tp);
10314                 if (err)
10315                         return err;
10316
10317                 adv_reg = (ADVERTISE_10HALF | ADVERTISE_10FULL |
10318                            ADVERTISE_100HALF | ADVERTISE_100FULL |
10319                            ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
10320                 tg3_ctrl = 0;
10321                 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
10322                         tg3_ctrl = (MII_TG3_CTRL_ADV_1000_HALF |
10323                                     MII_TG3_CTRL_ADV_1000_FULL);
10324                         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
10325                             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
10326                                 tg3_ctrl |= (MII_TG3_CTRL_AS_MASTER |
10327                                              MII_TG3_CTRL_ENABLE_AS_MASTER);
10328                 }
10329
10330                 mask = (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
10331                         ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
10332                         ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full);
10333                 if (!tg3_copper_is_advertising_all(tp, mask)) {
10334                         tg3_writephy(tp, MII_ADVERTISE, adv_reg);
10335
10336                         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
10337                                 tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
10338
10339                         tg3_writephy(tp, MII_BMCR,
10340                                      BMCR_ANENABLE | BMCR_ANRESTART);
10341                 }
10342                 tg3_phy_set_wirespeed(tp);
10343
10344                 tg3_writephy(tp, MII_ADVERTISE, adv_reg);
10345                 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
10346                         tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
10347         }
10348
10349 skip_phy_reset:
10350         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
10351                 err = tg3_init_5401phy_dsp(tp);
10352                 if (err)
10353                         return err;
10354         }
10355
10356         if (!err && ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401)) {
10357                 err = tg3_init_5401phy_dsp(tp);
10358         }
10359
10360         if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
10361                 tp->link_config.advertising =
10362                         (ADVERTISED_1000baseT_Half |
10363                          ADVERTISED_1000baseT_Full |
10364                          ADVERTISED_Autoneg |
10365                          ADVERTISED_FIBRE);
10366         if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
10367                 tp->link_config.advertising &=
10368                         ~(ADVERTISED_1000baseT_Half |
10369                           ADVERTISED_1000baseT_Full);
10370
10371         return err;
10372 }
10373
10374 static void __devinit tg3_read_partno(struct tg3 *tp)
10375 {
10376         unsigned char vpd_data[256];
10377         unsigned int i;
10378         u32 magic;
10379
10380         if (tg3_nvram_read_swab(tp, 0x0, &magic))
10381                 goto out_not_found;
10382
10383         if (magic == TG3_EEPROM_MAGIC) {
10384                 for (i = 0; i < 256; i += 4) {
10385                         u32 tmp;
10386
10387                         if (tg3_nvram_read(tp, 0x100 + i, &tmp))
10388                                 goto out_not_found;
10389
10390                         vpd_data[i + 0] = ((tmp >>  0) & 0xff);
10391                         vpd_data[i + 1] = ((tmp >>  8) & 0xff);
10392                         vpd_data[i + 2] = ((tmp >> 16) & 0xff);
10393                         vpd_data[i + 3] = ((tmp >> 24) & 0xff);
10394                 }
10395         } else {
10396                 int vpd_cap;
10397
10398                 vpd_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_VPD);
10399                 for (i = 0; i < 256; i += 4) {
10400                         u32 tmp, j = 0;
10401                         u16 tmp16;
10402
10403                         pci_write_config_word(tp->pdev, vpd_cap + PCI_VPD_ADDR,
10404                                               i);
10405                         while (j++ < 100) {
10406                                 pci_read_config_word(tp->pdev, vpd_cap +
10407                                                      PCI_VPD_ADDR, &tmp16);
10408                                 if (tmp16 & 0x8000)
10409                                         break;
10410                                 msleep(1);
10411                         }
10412                         if (!(tmp16 & 0x8000))
10413                                 goto out_not_found;
10414
10415                         pci_read_config_dword(tp->pdev, vpd_cap + PCI_VPD_DATA,
10416                                               &tmp);
10417                         tmp = cpu_to_le32(tmp);
10418                         memcpy(&vpd_data[i], &tmp, 4);
10419                 }
10420         }
10421
10422         /* Now parse and find the part number. */
10423         for (i = 0; i < 254; ) {
10424                 unsigned char val = vpd_data[i];
10425                 unsigned int block_end;
10426
10427                 if (val == 0x82 || val == 0x91) {
10428                         i = (i + 3 +
10429                              (vpd_data[i + 1] +
10430                               (vpd_data[i + 2] << 8)));
10431                         continue;
10432                 }
10433
10434                 if (val != 0x90)
10435                         goto out_not_found;
10436
10437                 block_end = (i + 3 +
10438                              (vpd_data[i + 1] +
10439                               (vpd_data[i + 2] << 8)));
10440                 i += 3;
10441
10442                 if (block_end > 256)
10443                         goto out_not_found;
10444
10445                 while (i < (block_end - 2)) {
10446                         if (vpd_data[i + 0] == 'P' &&
10447                             vpd_data[i + 1] == 'N') {
10448                                 int partno_len = vpd_data[i + 2];
10449
10450                                 i += 3;
10451                                 if (partno_len > 24 || (partno_len + i) > 256)
10452                                         goto out_not_found;
10453
10454                                 memcpy(tp->board_part_number,
10455                                        &vpd_data[i], partno_len);
10456
10457                                 /* Success. */
10458                                 return;
10459                         }
10460                         i += 3 + vpd_data[i + 2];
10461                 }
10462
10463                 /* Part number not found. */
10464                 goto out_not_found;
10465         }
10466
10467 out_not_found:
10468         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
10469                 strcpy(tp->board_part_number, "BCM95906");
10470         else
10471                 strcpy(tp->board_part_number, "none");
10472 }
10473
10474 static void __devinit tg3_read_fw_ver(struct tg3 *tp)
10475 {
10476         u32 val, offset, start;
10477
10478         if (tg3_nvram_read_swab(tp, 0, &val))
10479                 return;
10480
10481         if (val != TG3_EEPROM_MAGIC)
10482                 return;
10483
10484         if (tg3_nvram_read_swab(tp, 0xc, &offset) ||
10485             tg3_nvram_read_swab(tp, 0x4, &start))
10486                 return;
10487
10488         offset = tg3_nvram_logical_addr(tp, offset);
10489         if (tg3_nvram_read_swab(tp, offset, &val))
10490                 return;
10491
10492         if ((val & 0xfc000000) == 0x0c000000) {
10493                 u32 ver_offset, addr;
10494                 int i;
10495
10496                 if (tg3_nvram_read_swab(tp, offset + 4, &val) ||
10497                     tg3_nvram_read_swab(tp, offset + 8, &ver_offset))
10498                         return;
10499
10500                 if (val != 0)
10501                         return;
10502
10503                 addr = offset + ver_offset - start;
10504                 for (i = 0; i < 16; i += 4) {
10505                         if (tg3_nvram_read(tp, addr + i, &val))
10506                                 return;
10507
10508                         val = cpu_to_le32(val);
10509                         memcpy(tp->fw_ver + i, &val, 4);
10510                 }
10511         }
10512 }
10513
10514 static struct pci_dev * __devinit tg3_find_peer(struct tg3 *);
10515
10516 static int __devinit tg3_get_invariants(struct tg3 *tp)
10517 {
10518         static struct pci_device_id write_reorder_chipsets[] = {
10519                 { PCI_DEVICE(PCI_VENDOR_ID_AMD,
10520                              PCI_DEVICE_ID_AMD_FE_GATE_700C) },
10521                 { PCI_DEVICE(PCI_VENDOR_ID_AMD,
10522                              PCI_DEVICE_ID_AMD_8131_BRIDGE) },
10523                 { PCI_DEVICE(PCI_VENDOR_ID_VIA,
10524                              PCI_DEVICE_ID_VIA_8385_0) },
10525                 { },
10526         };
10527         u32 misc_ctrl_reg;
10528         u32 cacheline_sz_reg;
10529         u32 pci_state_reg, grc_misc_cfg;
10530         u32 val;
10531         u16 pci_cmd;
10532         int err, pcie_cap;
10533
10534         /* Force memory write invalidate off.  If we leave it on,
10535          * then on 5700_BX chips we have to enable a workaround.
10536          * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
10537          * to match the cacheline size.  The Broadcom driver have this
10538          * workaround but turns MWI off all the times so never uses
10539          * it.  This seems to suggest that the workaround is insufficient.
10540          */
10541         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
10542         pci_cmd &= ~PCI_COMMAND_INVALIDATE;
10543         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
10544
10545         /* It is absolutely critical that TG3PCI_MISC_HOST_CTRL
10546          * has the register indirect write enable bit set before
10547          * we try to access any of the MMIO registers.  It is also
10548          * critical that the PCI-X hw workaround situation is decided
10549          * before that as well.
10550          */
10551         pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
10552                               &misc_ctrl_reg);
10553
10554         tp->pci_chip_rev_id = (misc_ctrl_reg >>
10555                                MISC_HOST_CTRL_CHIPREV_SHIFT);
10556         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_USE_PROD_ID_REG) {
10557                 u32 prod_id_asic_rev;
10558
10559                 pci_read_config_dword(tp->pdev, TG3PCI_PRODID_ASICREV,
10560                                       &prod_id_asic_rev);
10561                 tp->pci_chip_rev_id = prod_id_asic_rev & PROD_ID_ASIC_REV_MASK;
10562         }
10563
10564         /* Wrong chip ID in 5752 A0. This code can be removed later
10565          * as A0 is not in production.
10566          */
10567         if (tp->pci_chip_rev_id == CHIPREV_ID_5752_A0_HW)
10568                 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
10569
10570         /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
10571          * we need to disable memory and use config. cycles
10572          * only to access all registers. The 5702/03 chips
10573          * can mistakenly decode the special cycles from the
10574          * ICH chipsets as memory write cycles, causing corruption
10575          * of register and memory space. Only certain ICH bridges
10576          * will drive special cycles with non-zero data during the
10577          * address phase which can fall within the 5703's address
10578          * range. This is not an ICH bug as the PCI spec allows
10579          * non-zero address during special cycles. However, only
10580          * these ICH bridges are known to drive non-zero addresses
10581          * during special cycles.
10582          *
10583          * Since special cycles do not cross PCI bridges, we only
10584          * enable this workaround if the 5703 is on the secondary
10585          * bus of these ICH bridges.
10586          */
10587         if ((tp->pci_chip_rev_id == CHIPREV_ID_5703_A1) ||
10588             (tp->pci_chip_rev_id == CHIPREV_ID_5703_A2)) {
10589                 static struct tg3_dev_id {
10590                         u32     vendor;
10591                         u32     device;
10592                         u32     rev;
10593                 } ich_chipsets[] = {
10594                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
10595                           PCI_ANY_ID },
10596                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
10597                           PCI_ANY_ID },
10598                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
10599                           0xa },
10600                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
10601                           PCI_ANY_ID },
10602                         { },
10603                 };
10604                 struct tg3_dev_id *pci_id = &ich_chipsets[0];
10605                 struct pci_dev *bridge = NULL;
10606
10607                 while (pci_id->vendor != 0) {
10608                         bridge = pci_get_device(pci_id->vendor, pci_id->device,
10609                                                 bridge);
10610                         if (!bridge) {
10611                                 pci_id++;
10612                                 continue;
10613                         }
10614                         if (pci_id->rev != PCI_ANY_ID) {
10615                                 if (bridge->revision > pci_id->rev)
10616                                         continue;
10617                         }
10618                         if (bridge->subordinate &&
10619                             (bridge->subordinate->number ==
10620                              tp->pdev->bus->number)) {
10621
10622                                 tp->tg3_flags2 |= TG3_FLG2_ICH_WORKAROUND;
10623                                 pci_dev_put(bridge);
10624                                 break;
10625                         }
10626                 }
10627         }
10628
10629         /* The EPB bridge inside 5714, 5715, and 5780 cannot support
10630          * DMA addresses > 40-bit. This bridge may have other additional
10631          * 57xx devices behind it in some 4-port NIC designs for example.
10632          * Any tg3 device found behind the bridge will also need the 40-bit
10633          * DMA workaround.
10634          */
10635         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 ||
10636             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
10637                 tp->tg3_flags2 |= TG3_FLG2_5780_CLASS;
10638                 tp->tg3_flags |= TG3_FLAG_40BIT_DMA_BUG;
10639                 tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI);
10640         }
10641         else {
10642                 struct pci_dev *bridge = NULL;
10643
10644                 do {
10645                         bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
10646                                                 PCI_DEVICE_ID_SERVERWORKS_EPB,
10647                                                 bridge);
10648                         if (bridge && bridge->subordinate &&
10649                             (bridge->subordinate->number <=
10650                              tp->pdev->bus->number) &&
10651                             (bridge->subordinate->subordinate >=
10652                              tp->pdev->bus->number)) {
10653                                 tp->tg3_flags |= TG3_FLAG_40BIT_DMA_BUG;
10654                                 pci_dev_put(bridge);
10655                                 break;
10656                         }
10657                 } while (bridge);
10658         }
10659
10660         /* Initialize misc host control in PCI block. */
10661         tp->misc_host_ctrl |= (misc_ctrl_reg &
10662                                MISC_HOST_CTRL_CHIPREV);
10663         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
10664                                tp->misc_host_ctrl);
10665
10666         pci_read_config_dword(tp->pdev, TG3PCI_CACHELINESZ,
10667                               &cacheline_sz_reg);
10668
10669         tp->pci_cacheline_sz = (cacheline_sz_reg >>  0) & 0xff;
10670         tp->pci_lat_timer    = (cacheline_sz_reg >>  8) & 0xff;
10671         tp->pci_hdr_type     = (cacheline_sz_reg >> 16) & 0xff;
10672         tp->pci_bist         = (cacheline_sz_reg >> 24) & 0xff;
10673
10674         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
10675             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714))
10676                 tp->pdev_peer = tg3_find_peer(tp);
10677
10678         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
10679             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
10680             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
10681             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
10682             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
10683             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 ||
10684             (tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
10685                 tp->tg3_flags2 |= TG3_FLG2_5750_PLUS;
10686
10687         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) ||
10688             (tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
10689                 tp->tg3_flags2 |= TG3_FLG2_5705_PLUS;
10690
10691         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
10692                 tp->tg3_flags |= TG3_FLAG_SUPPORT_MSI;
10693                 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX ||
10694                     GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX ||
10695                     (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 &&
10696                      tp->pci_chip_rev_id <= CHIPREV_ID_5714_A2 &&
10697                      tp->pdev_peer == tp->pdev))
10698                         tp->tg3_flags &= ~TG3_FLAG_SUPPORT_MSI;
10699
10700                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
10701                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
10702                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
10703                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
10704                         tp->tg3_flags2 |= TG3_FLG2_HW_TSO_2;
10705                         tp->tg3_flags2 |= TG3_FLG2_1SHOT_MSI;
10706                 } else {
10707                         tp->tg3_flags2 |= TG3_FLG2_HW_TSO_1 | TG3_FLG2_TSO_BUG;
10708                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
10709                                 ASIC_REV_5750 &&
10710                             tp->pci_chip_rev_id >= CHIPREV_ID_5750_C2)
10711                                 tp->tg3_flags2 &= ~TG3_FLG2_TSO_BUG;
10712                 }
10713         }
10714
10715         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705 &&
10716             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5750 &&
10717             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752 &&
10718             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5755 &&
10719             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5787 &&
10720             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 &&
10721             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906)
10722                 tp->tg3_flags2 |= TG3_FLG2_JUMBO_CAPABLE;
10723
10724         pcie_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_EXP);
10725         if (pcie_cap != 0) {
10726                 tp->tg3_flags2 |= TG3_FLG2_PCI_EXPRESS;
10727                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
10728                         u16 lnkctl;
10729
10730                         pci_read_config_word(tp->pdev,
10731                                              pcie_cap + PCI_EXP_LNKCTL,
10732                                              &lnkctl);
10733                         if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN)
10734                                 tp->tg3_flags2 &= ~TG3_FLG2_HW_TSO_2;
10735                 }
10736         }
10737
10738         /* If we have an AMD 762 or VIA K8T800 chipset, write
10739          * reordering to the mailbox registers done by the host
10740          * controller can cause major troubles.  We read back from
10741          * every mailbox register write to force the writes to be
10742          * posted to the chip in order.
10743          */
10744         if (pci_dev_present(write_reorder_chipsets) &&
10745             !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
10746                 tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;
10747
10748         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
10749             tp->pci_lat_timer < 64) {
10750                 tp->pci_lat_timer = 64;
10751
10752                 cacheline_sz_reg  = ((tp->pci_cacheline_sz & 0xff) <<  0);
10753                 cacheline_sz_reg |= ((tp->pci_lat_timer    & 0xff) <<  8);
10754                 cacheline_sz_reg |= ((tp->pci_hdr_type     & 0xff) << 16);
10755                 cacheline_sz_reg |= ((tp->pci_bist         & 0xff) << 24);
10756
10757                 pci_write_config_dword(tp->pdev, TG3PCI_CACHELINESZ,
10758                                        cacheline_sz_reg);
10759         }
10760
10761         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
10762             (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
10763                 tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
10764                 if (!tp->pcix_cap) {
10765                         printk(KERN_ERR PFX "Cannot find PCI-X "
10766                                             "capability, aborting.\n");
10767                         return -EIO;
10768                 }
10769         }
10770
10771         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
10772                               &pci_state_reg);
10773
10774         if (tp->pcix_cap && (pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0) {
10775                 tp->tg3_flags |= TG3_FLAG_PCIX_MODE;
10776
10777                 /* If this is a 5700 BX chipset, and we are in PCI-X
10778                  * mode, enable register write workaround.
10779                  *
10780                  * The workaround is to use indirect register accesses
10781                  * for all chip writes not to mailbox registers.
10782                  */
10783                 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX) {
10784                         u32 pm_reg;
10785
10786                         tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
10787
10788                         /* The chip can have it's power management PCI config
10789                          * space registers clobbered due to this bug.
10790                          * So explicitly force the chip into D0 here.
10791                          */
10792                         pci_read_config_dword(tp->pdev,
10793                                               tp->pm_cap + PCI_PM_CTRL,
10794                                               &pm_reg);
10795                         pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
10796                         pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
10797                         pci_write_config_dword(tp->pdev,
10798                                                tp->pm_cap + PCI_PM_CTRL,
10799                                                pm_reg);
10800
10801                         /* Also, force SERR#/PERR# in PCI command. */
10802                         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
10803                         pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
10804                         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
10805                 }
10806         }
10807
10808         /* 5700 BX chips need to have their TX producer index mailboxes
10809          * written twice to workaround a bug.
10810          */
10811         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX)
10812                 tp->tg3_flags |= TG3_FLAG_TXD_MBOX_HWBUG;
10813
10814         if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
10815                 tp->tg3_flags |= TG3_FLAG_PCI_HIGH_SPEED;
10816         if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
10817                 tp->tg3_flags |= TG3_FLAG_PCI_32BIT;
10818
10819         /* Chip-specific fixup from Broadcom driver */
10820         if ((tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) &&
10821             (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
10822                 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
10823                 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
10824         }
10825
10826         /* Default fast path register access methods */
10827         tp->read32 = tg3_read32;
10828         tp->write32 = tg3_write32;
10829         tp->read32_mbox = tg3_read32;
10830         tp->write32_mbox = tg3_write32;
10831         tp->write32_tx_mbox = tg3_write32;
10832         tp->write32_rx_mbox = tg3_write32;
10833
10834         /* Various workaround register access methods */
10835         if (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG)
10836                 tp->write32 = tg3_write_indirect_reg32;
10837         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
10838                  ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
10839                   tp->pci_chip_rev_id == CHIPREV_ID_5750_A0)) {
10840                 /*
10841                  * Back to back register writes can cause problems on these
10842                  * chips, the workaround is to read back all reg writes
10843                  * except those to mailbox regs.
10844                  *
10845                  * See tg3_write_indirect_reg32().
10846                  */
10847                 tp->write32 = tg3_write_flush_reg32;
10848         }
10849
10850
10851         if ((tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG) ||
10852             (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)) {
10853                 tp->write32_tx_mbox = tg3_write32_tx_mbox;
10854                 if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
10855                         tp->write32_rx_mbox = tg3_write_flush_reg32;
10856         }
10857
10858         if (tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND) {
10859                 tp->read32 = tg3_read_indirect_reg32;
10860                 tp->write32 = tg3_write_indirect_reg32;
10861                 tp->read32_mbox = tg3_read_indirect_mbox;
10862                 tp->write32_mbox = tg3_write_indirect_mbox;
10863                 tp->write32_tx_mbox = tg3_write_indirect_mbox;
10864                 tp->write32_rx_mbox = tg3_write_indirect_mbox;
10865
10866                 iounmap(tp->regs);
10867                 tp->regs = NULL;
10868
10869                 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
10870                 pci_cmd &= ~PCI_COMMAND_MEMORY;
10871                 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
10872         }
10873         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
10874                 tp->read32_mbox = tg3_read32_mbox_5906;
10875                 tp->write32_mbox = tg3_write32_mbox_5906;
10876                 tp->write32_tx_mbox = tg3_write32_mbox_5906;
10877                 tp->write32_rx_mbox = tg3_write32_mbox_5906;
10878         }
10879
10880         if (tp->write32 == tg3_write_indirect_reg32 ||
10881             ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
10882              (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
10883               GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)))
10884                 tp->tg3_flags |= TG3_FLAG_SRAM_USE_CONFIG;
10885
10886         /* Get eeprom hw config before calling tg3_set_power_state().
10887          * In particular, the TG3_FLG2_IS_NIC flag must be
10888          * determined before calling tg3_set_power_state() so that
10889          * we know whether or not to switch out of Vaux power.
10890          * When the flag is set, it means that GPIO1 is used for eeprom
10891          * write protect and also implies that it is a LOM where GPIOs
10892          * are not used to switch power.
10893          */
10894         tg3_get_eeprom_hw_cfg(tp);
10895
10896         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784)
10897                 tp->tg3_flags |= TG3_FLAG_CPMU_PRESENT;
10898
10899         /* Set up tp->grc_local_ctrl before calling tg3_set_power_state().
10900          * GPIO1 driven high will bring 5700's external PHY out of reset.
10901          * It is also used as eeprom write protect on LOMs.
10902          */
10903         tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
10904         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
10905             (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT))
10906                 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
10907                                        GRC_LCLCTRL_GPIO_OUTPUT1);
10908         /* Unused GPIO3 must be driven as output on 5752 because there
10909          * are no pull-up resistors on unused GPIO pins.
10910          */
10911         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
10912                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
10913
10914         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
10915                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
10916
10917         /* Force the chip into D0. */
10918         err = tg3_set_power_state(tp, PCI_D0);
10919         if (err) {
10920                 printk(KERN_ERR PFX "(%s) transition to D0 failed\n",
10921                        pci_name(tp->pdev));
10922                 return err;
10923         }
10924
10925         /* 5700 B0 chips do not support checksumming correctly due
10926          * to hardware bugs.
10927          */
10928         if (tp->pci_chip_rev_id == CHIPREV_ID_5700_B0)
10929                 tp->tg3_flags |= TG3_FLAG_BROKEN_CHECKSUMS;
10930
10931         /* Derive initial jumbo mode from MTU assigned in
10932          * ether_setup() via the alloc_etherdev() call
10933          */
10934         if (tp->dev->mtu > ETH_DATA_LEN &&
10935             !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
10936                 tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE;
10937
10938         /* Determine WakeOnLan speed to use. */
10939         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
10940             tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
10941             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0 ||
10942             tp->pci_chip_rev_id == CHIPREV_ID_5701_B2) {
10943                 tp->tg3_flags &= ~(TG3_FLAG_WOL_SPEED_100MB);
10944         } else {
10945                 tp->tg3_flags |= TG3_FLAG_WOL_SPEED_100MB;
10946         }
10947
10948         /* A few boards don't want Ethernet@WireSpeed phy feature */
10949         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
10950             ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
10951              (tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) &&
10952              (tp->pci_chip_rev_id != CHIPREV_ID_5705_A1)) ||
10953             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) ||
10954             (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES))
10955                 tp->tg3_flags2 |= TG3_FLG2_NO_ETH_WIRE_SPEED;
10956
10957         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5703_AX ||
10958             GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_AX)
10959                 tp->tg3_flags2 |= TG3_FLG2_PHY_ADC_BUG;
10960         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0)
10961                 tp->tg3_flags2 |= TG3_FLG2_PHY_5704_A0_BUG;
10962
10963         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
10964                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
10965                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
10966                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784) {
10967                         if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
10968                             tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
10969                                 tp->tg3_flags2 |= TG3_FLG2_PHY_JITTER_BUG;
10970                         if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
10971                                 tp->tg3_flags2 |= TG3_FLG2_PHY_ADJUST_TRIM;
10972                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906)
10973                         tp->tg3_flags2 |= TG3_FLG2_PHY_BER_BUG;
10974         }
10975
10976         tp->coalesce_mode = 0;
10977         if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX &&
10978             GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX)
10979                 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
10980
10981         /* Initialize MAC MI mode, polling disabled. */
10982         tw32_f(MAC_MI_MODE, tp->mi_mode);
10983         udelay(80);
10984
10985         /* Initialize data/descriptor byte/word swapping. */
10986         val = tr32(GRC_MODE);
10987         val &= GRC_MODE_HOST_STACKUP;
10988         tw32(GRC_MODE, val | tp->grc_mode);
10989
10990         tg3_switch_clocks(tp);
10991
10992         /* Clear this out for sanity. */
10993         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
10994
10995         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
10996                               &pci_state_reg);
10997         if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
10998             (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) == 0) {
10999                 u32 chiprevid = GET_CHIP_REV_ID(tp->misc_host_ctrl);
11000
11001                 if (chiprevid == CHIPREV_ID_5701_A0 ||
11002                     chiprevid == CHIPREV_ID_5701_B0 ||
11003                     chiprevid == CHIPREV_ID_5701_B2 ||
11004                     chiprevid == CHIPREV_ID_5701_B5) {
11005                         void __iomem *sram_base;
11006
11007                         /* Write some dummy words into the SRAM status block
11008                          * area, see if it reads back correctly.  If the return
11009                          * value is bad, force enable the PCIX workaround.
11010                          */
11011                         sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
11012
11013                         writel(0x00000000, sram_base);
11014                         writel(0x00000000, sram_base + 4);
11015                         writel(0xffffffff, sram_base + 4);
11016                         if (readl(sram_base) != 0x00000000)
11017                                 tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
11018                 }
11019         }
11020
11021         udelay(50);
11022         tg3_nvram_init(tp);
11023
11024         grc_misc_cfg = tr32(GRC_MISC_CFG);
11025         grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
11026
11027         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
11028             (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
11029              grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
11030                 tp->tg3_flags2 |= TG3_FLG2_IS_5788;
11031
11032         if (!(tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
11033             (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700))
11034                 tp->tg3_flags |= TG3_FLAG_TAGGED_STATUS;
11035         if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) {
11036                 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
11037                                       HOSTCC_MODE_CLRTICK_TXBD);
11038
11039                 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
11040                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
11041                                        tp->misc_host_ctrl);
11042         }
11043
11044         /* these are limited to 10/100 only */
11045         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
11046              (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
11047             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
11048              tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
11049              (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901 ||
11050               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901_2 ||
11051               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5705F)) ||
11052             (tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
11053              (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5751F ||
11054               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5753F ||
11055               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5787F)) ||
11056             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
11057                 tp->tg3_flags |= TG3_FLAG_10_100_ONLY;
11058
11059         err = tg3_phy_probe(tp);
11060         if (err) {
11061                 printk(KERN_ERR PFX "(%s) phy probe failed, err %d\n",
11062                        pci_name(tp->pdev), err);
11063                 /* ... but do not return immediately ... */
11064         }
11065
11066         tg3_read_partno(tp);
11067         tg3_read_fw_ver(tp);
11068
11069         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
11070                 tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
11071         } else {
11072                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
11073                         tp->tg3_flags |= TG3_FLAG_USE_MI_INTERRUPT;
11074                 else
11075                         tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
11076         }
11077
11078         /* 5700 {AX,BX} chips have a broken status block link
11079          * change bit implementation, so we must use the
11080          * status register in those cases.
11081          */
11082         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
11083                 tp->tg3_flags |= TG3_FLAG_USE_LINKCHG_REG;
11084         else
11085                 tp->tg3_flags &= ~TG3_FLAG_USE_LINKCHG_REG;
11086
11087         /* The led_ctrl is set during tg3_phy_probe, here we might
11088          * have to force the link status polling mechanism based
11089          * upon subsystem IDs.
11090          */
11091         if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
11092             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
11093             !(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
11094                 tp->tg3_flags |= (TG3_FLAG_USE_MI_INTERRUPT |
11095                                   TG3_FLAG_USE_LINKCHG_REG);
11096         }
11097
11098         /* For all SERDES we poll the MAC status register. */
11099         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
11100                 tp->tg3_flags |= TG3_FLAG_POLL_SERDES;
11101         else
11102                 tp->tg3_flags &= ~TG3_FLAG_POLL_SERDES;
11103
11104         /* All chips before 5787 can get confused if TX buffers
11105          * straddle the 4GB address boundary in some cases.
11106          */
11107         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
11108             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
11109             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
11110             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
11111                 tp->dev->hard_start_xmit = tg3_start_xmit;
11112         else
11113                 tp->dev->hard_start_xmit = tg3_start_xmit_dma_bug;
11114
11115         tp->rx_offset = 2;
11116         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
11117             (tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0)
11118                 tp->rx_offset = 0;
11119
11120         tp->rx_std_max_post = TG3_RX_RING_SIZE;
11121
11122         /* Increment the rx prod index on the rx std ring by at most
11123          * 8 for these chips to workaround hw errata.
11124          */
11125         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
11126             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
11127             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
11128                 tp->rx_std_max_post = 8;
11129
11130         /* By default, disable wake-on-lan.  User can change this
11131          * using ETHTOOL_SWOL.
11132          */
11133         tp->tg3_flags &= ~TG3_FLAG_WOL_ENABLE;
11134
11135         if (tp->tg3_flags & TG3_FLAG_ASPM_WORKAROUND)
11136                 tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) &
11137                                      PCIE_PWR_MGMT_L1_THRESH_MSK;
11138
11139         return err;
11140 }
11141
11142 #ifdef CONFIG_SPARC
11143 static int __devinit tg3_get_macaddr_sparc(struct tg3 *tp)
11144 {
11145         struct net_device *dev = tp->dev;
11146         struct pci_dev *pdev = tp->pdev;
11147         struct device_node *dp = pci_device_to_OF_node(pdev);
11148         const unsigned char *addr;
11149         int len;
11150
11151         addr = of_get_property(dp, "local-mac-address", &len);
11152         if (addr && len == 6) {
11153                 memcpy(dev->dev_addr, addr, 6);
11154                 memcpy(dev->perm_addr, dev->dev_addr, 6);
11155                 return 0;
11156         }
11157         return -ENODEV;
11158 }
11159
11160 static int __devinit tg3_get_default_macaddr_sparc(struct tg3 *tp)
11161 {
11162         struct net_device *dev = tp->dev;
11163
11164         memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
11165         memcpy(dev->perm_addr, idprom->id_ethaddr, 6);
11166         return 0;
11167 }
11168 #endif
11169
11170 static int __devinit tg3_get_device_address(struct tg3 *tp)
11171 {
11172         struct net_device *dev = tp->dev;
11173         u32 hi, lo, mac_offset;
11174         int addr_ok = 0;
11175
11176 #ifdef CONFIG_SPARC
11177         if (!tg3_get_macaddr_sparc(tp))
11178                 return 0;
11179 #endif
11180
11181         mac_offset = 0x7c;
11182         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
11183             (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
11184                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
11185                         mac_offset = 0xcc;
11186                 if (tg3_nvram_lock(tp))
11187                         tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
11188                 else
11189                         tg3_nvram_unlock(tp);
11190         }
11191         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
11192                 mac_offset = 0x10;
11193
11194         /* First try to get it from MAC address mailbox. */
11195         tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
11196         if ((hi >> 16) == 0x484b) {
11197                 dev->dev_addr[0] = (hi >>  8) & 0xff;
11198                 dev->dev_addr[1] = (hi >>  0) & 0xff;
11199
11200                 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
11201                 dev->dev_addr[2] = (lo >> 24) & 0xff;
11202                 dev->dev_addr[3] = (lo >> 16) & 0xff;
11203                 dev->dev_addr[4] = (lo >>  8) & 0xff;
11204                 dev->dev_addr[5] = (lo >>  0) & 0xff;
11205
11206                 /* Some old bootcode may report a 0 MAC address in SRAM */
11207                 addr_ok = is_valid_ether_addr(&dev->dev_addr[0]);
11208         }
11209         if (!addr_ok) {
11210                 /* Next, try NVRAM. */
11211                 if (!tg3_nvram_read(tp, mac_offset + 0, &hi) &&
11212                     !tg3_nvram_read(tp, mac_offset + 4, &lo)) {
11213                         dev->dev_addr[0] = ((hi >> 16) & 0xff);
11214                         dev->dev_addr[1] = ((hi >> 24) & 0xff);
11215                         dev->dev_addr[2] = ((lo >>  0) & 0xff);
11216                         dev->dev_addr[3] = ((lo >>  8) & 0xff);
11217                         dev->dev_addr[4] = ((lo >> 16) & 0xff);
11218                         dev->dev_addr[5] = ((lo >> 24) & 0xff);
11219                 }
11220                 /* Finally just fetch it out of the MAC control regs. */
11221                 else {
11222                         hi = tr32(MAC_ADDR_0_HIGH);
11223                         lo = tr32(MAC_ADDR_0_LOW);
11224
11225                         dev->dev_addr[5] = lo & 0xff;
11226                         dev->dev_addr[4] = (lo >> 8) & 0xff;
11227                         dev->dev_addr[3] = (lo >> 16) & 0xff;
11228                         dev->dev_addr[2] = (lo >> 24) & 0xff;
11229                         dev->dev_addr[1] = hi & 0xff;
11230                         dev->dev_addr[0] = (hi >> 8) & 0xff;
11231                 }
11232         }
11233
11234         if (!is_valid_ether_addr(&dev->dev_addr[0])) {
11235 #ifdef CONFIG_SPARC64
11236                 if (!tg3_get_default_macaddr_sparc(tp))
11237                         return 0;
11238 #endif
11239                 return -EINVAL;
11240         }
11241         memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
11242         return 0;
11243 }
11244
11245 #define BOUNDARY_SINGLE_CACHELINE       1
11246 #define BOUNDARY_MULTI_CACHELINE        2
11247
11248 static u32 __devinit tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
11249 {
11250         int cacheline_size;
11251         u8 byte;
11252         int goal;
11253
11254         pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
11255         if (byte == 0)
11256                 cacheline_size = 1024;
11257         else
11258                 cacheline_size = (int) byte * 4;
11259
11260         /* On 5703 and later chips, the boundary bits have no
11261          * effect.
11262          */
11263         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
11264             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
11265             !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
11266                 goto out;
11267
11268 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
11269         goal = BOUNDARY_MULTI_CACHELINE;
11270 #else
11271 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
11272         goal = BOUNDARY_SINGLE_CACHELINE;
11273 #else
11274         goal = 0;
11275 #endif
11276 #endif
11277
11278         if (!goal)
11279                 goto out;
11280
11281         /* PCI controllers on most RISC systems tend to disconnect
11282          * when a device tries to burst across a cache-line boundary.
11283          * Therefore, letting tg3 do so just wastes PCI bandwidth.
11284          *
11285          * Unfortunately, for PCI-E there are only limited
11286          * write-side controls for this, and thus for reads
11287          * we will still get the disconnects.  We'll also waste
11288          * these PCI cycles for both read and write for chips
11289          * other than 5700 and 5701 which do not implement the
11290          * boundary bits.
11291          */
11292         if ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
11293             !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
11294                 switch (cacheline_size) {
11295                 case 16:
11296                 case 32:
11297                 case 64:
11298                 case 128:
11299                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
11300                                 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
11301                                         DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
11302                         } else {
11303                                 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
11304                                         DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
11305                         }
11306                         break;
11307
11308                 case 256:
11309                         val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
11310                                 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
11311                         break;
11312
11313                 default:
11314                         val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
11315                                 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
11316                         break;
11317                 };
11318         } else if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
11319                 switch (cacheline_size) {
11320                 case 16:
11321                 case 32:
11322                 case 64:
11323                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
11324                                 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
11325                                 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
11326                                 break;
11327                         }
11328                         /* fallthrough */
11329                 case 128:
11330                 default:
11331                         val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
11332                         val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
11333                         break;
11334                 };
11335         } else {
11336                 switch (cacheline_size) {
11337                 case 16:
11338                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
11339                                 val |= (DMA_RWCTRL_READ_BNDRY_16 |
11340                                         DMA_RWCTRL_WRITE_BNDRY_16);
11341                                 break;
11342                         }
11343                         /* fallthrough */
11344                 case 32:
11345                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
11346                                 val |= (DMA_RWCTRL_READ_BNDRY_32 |
11347                                         DMA_RWCTRL_WRITE_BNDRY_32);
11348                                 break;
11349                         }
11350                         /* fallthrough */
11351                 case 64:
11352                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
11353                                 val |= (DMA_RWCTRL_READ_BNDRY_64 |
11354                                         DMA_RWCTRL_WRITE_BNDRY_64);
11355                                 break;
11356                         }
11357                         /* fallthrough */
11358                 case 128:
11359                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
11360                                 val |= (DMA_RWCTRL_READ_BNDRY_128 |
11361                                         DMA_RWCTRL_WRITE_BNDRY_128);
11362                                 break;
11363                         }
11364                         /* fallthrough */
11365                 case 256:
11366                         val |= (DMA_RWCTRL_READ_BNDRY_256 |
11367                                 DMA_RWCTRL_WRITE_BNDRY_256);
11368                         break;
11369                 case 512:
11370                         val |= (DMA_RWCTRL_READ_BNDRY_512 |
11371                                 DMA_RWCTRL_WRITE_BNDRY_512);
11372                         break;
11373                 case 1024:
11374                 default:
11375                         val |= (DMA_RWCTRL_READ_BNDRY_1024 |
11376                                 DMA_RWCTRL_WRITE_BNDRY_1024);
11377                         break;
11378                 };
11379         }
11380
11381 out:
11382         return val;
11383 }
11384
11385 static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma, int size, int to_device)
11386 {
11387         struct tg3_internal_buffer_desc test_desc;
11388         u32 sram_dma_descs;
11389         int i, ret;
11390
11391         sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
11392
11393         tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
11394         tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
11395         tw32(RDMAC_STATUS, 0);
11396         tw32(WDMAC_STATUS, 0);
11397
11398         tw32(BUFMGR_MODE, 0);
11399         tw32(FTQ_RESET, 0);
11400
11401         test_desc.addr_hi = ((u64) buf_dma) >> 32;
11402         test_desc.addr_lo = buf_dma & 0xffffffff;
11403         test_desc.nic_mbuf = 0x00002100;
11404         test_desc.len = size;
11405
11406         /*
11407          * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
11408          * the *second* time the tg3 driver was getting loaded after an
11409          * initial scan.
11410          *
11411          * Broadcom tells me:
11412          *   ...the DMA engine is connected to the GRC block and a DMA
11413          *   reset may affect the GRC block in some unpredictable way...
11414          *   The behavior of resets to individual blocks has not been tested.
11415          *
11416          * Broadcom noted the GRC reset will also reset all sub-components.
11417          */
11418         if (to_device) {
11419                 test_desc.cqid_sqid = (13 << 8) | 2;
11420
11421                 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
11422                 udelay(40);
11423         } else {
11424                 test_desc.cqid_sqid = (16 << 8) | 7;
11425
11426                 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
11427                 udelay(40);
11428         }
11429         test_desc.flags = 0x00000005;
11430
11431         for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
11432                 u32 val;
11433
11434                 val = *(((u32 *)&test_desc) + i);
11435                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
11436                                        sram_dma_descs + (i * sizeof(u32)));
11437                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
11438         }
11439         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
11440
11441         if (to_device) {
11442                 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
11443         } else {
11444                 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
11445         }
11446
11447         ret = -ENODEV;
11448         for (i = 0; i < 40; i++) {
11449                 u32 val;
11450
11451                 if (to_device)
11452                         val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
11453                 else
11454                         val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
11455                 if ((val & 0xffff) == sram_dma_descs) {
11456                         ret = 0;
11457                         break;
11458                 }
11459
11460                 udelay(100);
11461         }
11462
11463         return ret;
11464 }
11465
11466 #define TEST_BUFFER_SIZE        0x2000
11467
11468 static int __devinit tg3_test_dma(struct tg3 *tp)
11469 {
11470         dma_addr_t buf_dma;
11471         u32 *buf, saved_dma_rwctrl;
11472         int ret;
11473
11474         buf = pci_alloc_consistent(tp->pdev, TEST_BUFFER_SIZE, &buf_dma);
11475         if (!buf) {
11476                 ret = -ENOMEM;
11477                 goto out_nofree;
11478         }
11479
11480         tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
11481                           (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
11482
11483         tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
11484
11485         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
11486                 /* DMA read watermark not used on PCIE */
11487                 tp->dma_rwctrl |= 0x00180000;
11488         } else if (!(tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
11489                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
11490                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
11491                         tp->dma_rwctrl |= 0x003f0000;
11492                 else
11493                         tp->dma_rwctrl |= 0x003f000f;
11494         } else {
11495                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
11496                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
11497                         u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
11498                         u32 read_water = 0x7;
11499
11500                         /* If the 5704 is behind the EPB bridge, we can
11501                          * do the less restrictive ONE_DMA workaround for
11502                          * better performance.
11503                          */
11504                         if ((tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG) &&
11505                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
11506                                 tp->dma_rwctrl |= 0x8000;
11507                         else if (ccval == 0x6 || ccval == 0x7)
11508                                 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
11509
11510                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703)
11511                                 read_water = 4;
11512                         /* Set bit 23 to enable PCIX hw bug fix */
11513                         tp->dma_rwctrl |=
11514                                 (read_water << DMA_RWCTRL_READ_WATER_SHIFT) |
11515                                 (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
11516                                 (1 << 23);
11517                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) {
11518                         /* 5780 always in PCIX mode */
11519                         tp->dma_rwctrl |= 0x00144000;
11520                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
11521                         /* 5714 always in PCIX mode */
11522                         tp->dma_rwctrl |= 0x00148000;
11523                 } else {
11524                         tp->dma_rwctrl |= 0x001b000f;
11525                 }
11526         }
11527
11528         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
11529             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
11530                 tp->dma_rwctrl &= 0xfffffff0;
11531
11532         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
11533             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
11534                 /* Remove this if it causes problems for some boards. */
11535                 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
11536
11537                 /* On 5700/5701 chips, we need to set this bit.
11538                  * Otherwise the chip will issue cacheline transactions
11539                  * to streamable DMA memory with not all the byte
11540                  * enables turned on.  This is an error on several
11541                  * RISC PCI controllers, in particular sparc64.
11542                  *
11543                  * On 5703/5704 chips, this bit has been reassigned
11544                  * a different meaning.  In particular, it is used
11545                  * on those chips to enable a PCI-X workaround.
11546                  */
11547                 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
11548         }
11549
11550         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
11551
11552 #if 0
11553         /* Unneeded, already done by tg3_get_invariants.  */
11554         tg3_switch_clocks(tp);
11555 #endif
11556
11557         ret = 0;
11558         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
11559             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
11560                 goto out;
11561
11562         /* It is best to perform DMA test with maximum write burst size
11563          * to expose the 5700/5701 write DMA bug.
11564          */
11565         saved_dma_rwctrl = tp->dma_rwctrl;
11566         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
11567         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
11568
11569         while (1) {
11570                 u32 *p = buf, i;
11571
11572                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
11573                         p[i] = i;
11574
11575                 /* Send the buffer to the chip. */
11576                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1);
11577                 if (ret) {
11578                         printk(KERN_ERR "tg3_test_dma() Write the buffer failed %d\n", ret);
11579                         break;
11580                 }
11581
11582 #if 0
11583                 /* validate data reached card RAM correctly. */
11584                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
11585                         u32 val;
11586                         tg3_read_mem(tp, 0x2100 + (i*4), &val);
11587                         if (le32_to_cpu(val) != p[i]) {
11588                                 printk(KERN_ERR "  tg3_test_dma()  Card buffer corrupted on write! (%d != %d)\n", val, i);
11589                                 /* ret = -ENODEV here? */
11590                         }
11591                         p[i] = 0;
11592                 }
11593 #endif
11594                 /* Now read it back. */
11595                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0);
11596                 if (ret) {
11597                         printk(KERN_ERR "tg3_test_dma() Read the buffer failed %d\n", ret);
11598
11599                         break;
11600                 }
11601
11602                 /* Verify it. */
11603                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
11604                         if (p[i] == i)
11605                                 continue;
11606
11607                         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
11608                             DMA_RWCTRL_WRITE_BNDRY_16) {
11609                                 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
11610                                 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
11611                                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
11612                                 break;
11613                         } else {
11614                                 printk(KERN_ERR "tg3_test_dma() buffer corrupted on read back! (%d != %d)\n", p[i], i);
11615                                 ret = -ENODEV;
11616                                 goto out;
11617                         }
11618                 }
11619
11620                 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
11621                         /* Success. */
11622                         ret = 0;
11623                         break;
11624                 }
11625         }
11626         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
11627             DMA_RWCTRL_WRITE_BNDRY_16) {
11628                 static struct pci_device_id dma_wait_state_chipsets[] = {
11629                         { PCI_DEVICE(PCI_VENDOR_ID_APPLE,
11630                                      PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
11631                         { },
11632                 };
11633
11634                 /* DMA test passed without adjusting DMA boundary,
11635                  * now look for chipsets that are known to expose the
11636                  * DMA bug without failing the test.
11637                  */
11638                 if (pci_dev_present(dma_wait_state_chipsets)) {
11639                         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
11640                         tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
11641                 }
11642                 else
11643                         /* Safe to use the calculated DMA boundary. */
11644                         tp->dma_rwctrl = saved_dma_rwctrl;
11645
11646                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
11647         }
11648
11649 out:
11650         pci_free_consistent(tp->pdev, TEST_BUFFER_SIZE, buf, buf_dma);
11651 out_nofree:
11652         return ret;
11653 }
11654
11655 static void __devinit tg3_init_link_config(struct tg3 *tp)
11656 {
11657         tp->link_config.advertising =
11658                 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
11659                  ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
11660                  ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full |
11661                  ADVERTISED_Autoneg | ADVERTISED_MII);
11662         tp->link_config.speed = SPEED_INVALID;
11663         tp->link_config.duplex = DUPLEX_INVALID;
11664         tp->link_config.autoneg = AUTONEG_ENABLE;
11665         tp->link_config.active_speed = SPEED_INVALID;
11666         tp->link_config.active_duplex = DUPLEX_INVALID;
11667         tp->link_config.phy_is_low_power = 0;
11668         tp->link_config.orig_speed = SPEED_INVALID;
11669         tp->link_config.orig_duplex = DUPLEX_INVALID;
11670         tp->link_config.orig_autoneg = AUTONEG_INVALID;
11671 }
11672
11673 static void __devinit tg3_init_bufmgr_config(struct tg3 *tp)
11674 {
11675         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
11676                 tp->bufmgr_config.mbuf_read_dma_low_water =
11677                         DEFAULT_MB_RDMA_LOW_WATER_5705;
11678                 tp->bufmgr_config.mbuf_mac_rx_low_water =
11679                         DEFAULT_MB_MACRX_LOW_WATER_5705;
11680                 tp->bufmgr_config.mbuf_high_water =
11681                         DEFAULT_MB_HIGH_WATER_5705;
11682                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
11683                         tp->bufmgr_config.mbuf_mac_rx_low_water =
11684                                 DEFAULT_MB_MACRX_LOW_WATER_5906;
11685                         tp->bufmgr_config.mbuf_high_water =
11686                                 DEFAULT_MB_HIGH_WATER_5906;
11687                 }
11688
11689                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
11690                         DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
11691                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
11692                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
11693                 tp->bufmgr_config.mbuf_high_water_jumbo =
11694                         DEFAULT_MB_HIGH_WATER_JUMBO_5780;
11695         } else {
11696                 tp->bufmgr_config.mbuf_read_dma_low_water =
11697                         DEFAULT_MB_RDMA_LOW_WATER;
11698                 tp->bufmgr_config.mbuf_mac_rx_low_water =
11699                         DEFAULT_MB_MACRX_LOW_WATER;
11700                 tp->bufmgr_config.mbuf_high_water =
11701                         DEFAULT_MB_HIGH_WATER;
11702
11703                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
11704                         DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
11705                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
11706                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
11707                 tp->bufmgr_config.mbuf_high_water_jumbo =
11708                         DEFAULT_MB_HIGH_WATER_JUMBO;
11709         }
11710
11711         tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
11712         tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
11713 }
11714
11715 static char * __devinit tg3_phy_string(struct tg3 *tp)
11716 {
11717         switch (tp->phy_id & PHY_ID_MASK) {
11718         case PHY_ID_BCM5400:    return "5400";
11719         case PHY_ID_BCM5401:    return "5401";
11720         case PHY_ID_BCM5411:    return "5411";
11721         case PHY_ID_BCM5701:    return "5701";
11722         case PHY_ID_BCM5703:    return "5703";
11723         case PHY_ID_BCM5704:    return "5704";
11724         case PHY_ID_BCM5705:    return "5705";
11725         case PHY_ID_BCM5750:    return "5750";
11726         case PHY_ID_BCM5752:    return "5752";
11727         case PHY_ID_BCM5714:    return "5714";
11728         case PHY_ID_BCM5780:    return "5780";
11729         case PHY_ID_BCM5755:    return "5755";
11730         case PHY_ID_BCM5787:    return "5787";
11731         case PHY_ID_BCM5784:    return "5784";
11732         case PHY_ID_BCM5756:    return "5722/5756";
11733         case PHY_ID_BCM5906:    return "5906";
11734         case PHY_ID_BCM8002:    return "8002/serdes";
11735         case 0:                 return "serdes";
11736         default:                return "unknown";
11737         };
11738 }
11739
11740 static char * __devinit tg3_bus_string(struct tg3 *tp, char *str)
11741 {
11742         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
11743                 strcpy(str, "PCI Express");
11744                 return str;
11745         } else if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) {
11746                 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
11747
11748                 strcpy(str, "PCIX:");
11749
11750                 if ((clock_ctrl == 7) ||
11751                     ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
11752                      GRC_MISC_CFG_BOARD_ID_5704CIOBE))
11753                         strcat(str, "133MHz");
11754                 else if (clock_ctrl == 0)
11755                         strcat(str, "33MHz");
11756                 else if (clock_ctrl == 2)
11757                         strcat(str, "50MHz");
11758                 else if (clock_ctrl == 4)
11759                         strcat(str, "66MHz");
11760                 else if (clock_ctrl == 6)
11761                         strcat(str, "100MHz");
11762         } else {
11763                 strcpy(str, "PCI:");
11764                 if (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED)
11765                         strcat(str, "66MHz");
11766                 else
11767                         strcat(str, "33MHz");
11768         }
11769         if (tp->tg3_flags & TG3_FLAG_PCI_32BIT)
11770                 strcat(str, ":32-bit");
11771         else
11772                 strcat(str, ":64-bit");
11773         return str;
11774 }
11775
11776 static struct pci_dev * __devinit tg3_find_peer(struct tg3 *tp)
11777 {
11778         struct pci_dev *peer;
11779         unsigned int func, devnr = tp->pdev->devfn & ~7;
11780
11781         for (func = 0; func < 8; func++) {
11782                 peer = pci_get_slot(tp->pdev->bus, devnr | func);
11783                 if (peer && peer != tp->pdev)
11784                         break;
11785                 pci_dev_put(peer);
11786         }
11787         /* 5704 can be configured in single-port mode, set peer to
11788          * tp->pdev in that case.
11789          */
11790         if (!peer) {
11791                 peer = tp->pdev;
11792                 return peer;
11793         }
11794
11795         /*
11796          * We don't need to keep the refcount elevated; there's no way
11797          * to remove one half of this device without removing the other
11798          */
11799         pci_dev_put(peer);
11800
11801         return peer;
11802 }
11803
11804 static void __devinit tg3_init_coal(struct tg3 *tp)
11805 {
11806         struct ethtool_coalesce *ec = &tp->coal;
11807
11808         memset(ec, 0, sizeof(*ec));
11809         ec->cmd = ETHTOOL_GCOALESCE;
11810         ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
11811         ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
11812         ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
11813         ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
11814         ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
11815         ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
11816         ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
11817         ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
11818         ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
11819
11820         if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
11821                                  HOSTCC_MODE_CLRTICK_TXBD)) {
11822                 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
11823                 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
11824                 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
11825                 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
11826         }
11827
11828         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
11829                 ec->rx_coalesce_usecs_irq = 0;
11830                 ec->tx_coalesce_usecs_irq = 0;
11831                 ec->stats_block_coalesce_usecs = 0;
11832         }
11833 }
11834
11835 static int __devinit tg3_init_one(struct pci_dev *pdev,
11836                                   const struct pci_device_id *ent)
11837 {
11838         static int tg3_version_printed = 0;
11839         unsigned long tg3reg_base, tg3reg_len;
11840         struct net_device *dev;
11841         struct tg3 *tp;
11842         int i, err, pm_cap;
11843         char str[40];
11844         u64 dma_mask, persist_dma_mask;
11845
11846         if (tg3_version_printed++ == 0)
11847                 printk(KERN_INFO "%s", version);
11848
11849         err = pci_enable_device(pdev);
11850         if (err) {
11851                 printk(KERN_ERR PFX "Cannot enable PCI device, "
11852                        "aborting.\n");
11853                 return err;
11854         }
11855
11856         if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
11857                 printk(KERN_ERR PFX "Cannot find proper PCI device "
11858                        "base address, aborting.\n");
11859                 err = -ENODEV;
11860                 goto err_out_disable_pdev;
11861         }
11862
11863         err = pci_request_regions(pdev, DRV_MODULE_NAME);
11864         if (err) {
11865                 printk(KERN_ERR PFX "Cannot obtain PCI resources, "
11866                        "aborting.\n");
11867                 goto err_out_disable_pdev;
11868         }
11869
11870         pci_set_master(pdev);
11871
11872         /* Find power-management capability. */
11873         pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
11874         if (pm_cap == 0) {
11875                 printk(KERN_ERR PFX "Cannot find PowerManagement capability, "
11876                        "aborting.\n");
11877                 err = -EIO;
11878                 goto err_out_free_res;
11879         }
11880
11881         tg3reg_base = pci_resource_start(pdev, 0);
11882         tg3reg_len = pci_resource_len(pdev, 0);
11883
11884         dev = alloc_etherdev(sizeof(*tp));
11885         if (!dev) {
11886                 printk(KERN_ERR PFX "Etherdev alloc failed, aborting.\n");
11887                 err = -ENOMEM;
11888                 goto err_out_free_res;
11889         }
11890
11891         SET_NETDEV_DEV(dev, &pdev->dev);
11892
11893 #if TG3_VLAN_TAG_USED
11894         dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
11895         dev->vlan_rx_register = tg3_vlan_rx_register;
11896 #endif
11897
11898         tp = netdev_priv(dev);
11899         tp->pdev = pdev;
11900         tp->dev = dev;
11901         tp->pm_cap = pm_cap;
11902         tp->mac_mode = TG3_DEF_MAC_MODE;
11903         tp->rx_mode = TG3_DEF_RX_MODE;
11904         tp->tx_mode = TG3_DEF_TX_MODE;
11905         tp->mi_mode = MAC_MI_MODE_BASE;
11906         if (tg3_debug > 0)
11907                 tp->msg_enable = tg3_debug;
11908         else
11909                 tp->msg_enable = TG3_DEF_MSG_ENABLE;
11910
11911         /* The word/byte swap controls here control register access byte
11912          * swapping.  DMA data byte swapping is controlled in the GRC_MODE
11913          * setting below.
11914          */
11915         tp->misc_host_ctrl =
11916                 MISC_HOST_CTRL_MASK_PCI_INT |
11917                 MISC_HOST_CTRL_WORD_SWAP |
11918                 MISC_HOST_CTRL_INDIR_ACCESS |
11919                 MISC_HOST_CTRL_PCISTATE_RW;
11920
11921         /* The NONFRM (non-frame) byte/word swap controls take effect
11922          * on descriptor entries, anything which isn't packet data.
11923          *
11924          * The StrongARM chips on the board (one for tx, one for rx)
11925          * are running in big-endian mode.
11926          */
11927         tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
11928                         GRC_MODE_WSWAP_NONFRM_DATA);
11929 #ifdef __BIG_ENDIAN
11930         tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
11931 #endif
11932         spin_lock_init(&tp->lock);
11933         spin_lock_init(&tp->indirect_lock);
11934         INIT_WORK(&tp->reset_task, tg3_reset_task);
11935
11936         tp->regs = ioremap_nocache(tg3reg_base, tg3reg_len);
11937         if (!tp->regs) {
11938                 printk(KERN_ERR PFX "Cannot map device registers, "
11939                        "aborting.\n");
11940                 err = -ENOMEM;
11941                 goto err_out_free_dev;
11942         }
11943
11944         tg3_init_link_config(tp);
11945
11946         tp->rx_pending = TG3_DEF_RX_RING_PENDING;
11947         tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
11948         tp->tx_pending = TG3_DEF_TX_RING_PENDING;
11949
11950         dev->open = tg3_open;
11951         dev->stop = tg3_close;
11952         dev->get_stats = tg3_get_stats;
11953         dev->set_multicast_list = tg3_set_rx_mode;
11954         dev->set_mac_address = tg3_set_mac_addr;
11955         dev->do_ioctl = tg3_ioctl;
11956         dev->tx_timeout = tg3_tx_timeout;
11957         netif_napi_add(dev, &tp->napi, tg3_poll, 64);
11958         dev->ethtool_ops = &tg3_ethtool_ops;
11959         dev->watchdog_timeo = TG3_TX_TIMEOUT;
11960         dev->change_mtu = tg3_change_mtu;
11961         dev->irq = pdev->irq;
11962 #ifdef CONFIG_NET_POLL_CONTROLLER
11963         dev->poll_controller = tg3_poll_controller;
11964 #endif
11965
11966         err = tg3_get_invariants(tp);
11967         if (err) {
11968                 printk(KERN_ERR PFX "Problem fetching invariants of chip, "
11969                        "aborting.\n");
11970                 goto err_out_iounmap;
11971         }
11972
11973         /* The EPB bridge inside 5714, 5715, and 5780 and any
11974          * device behind the EPB cannot support DMA addresses > 40-bit.
11975          * On 64-bit systems with IOMMU, use 40-bit dma_mask.
11976          * On 64-bit systems without IOMMU, use 64-bit dma_mask and
11977          * do DMA address check in tg3_start_xmit().
11978          */
11979         if (tp->tg3_flags2 & TG3_FLG2_IS_5788)
11980                 persist_dma_mask = dma_mask = DMA_32BIT_MASK;
11981         else if (tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG) {
11982                 persist_dma_mask = dma_mask = DMA_40BIT_MASK;
11983 #ifdef CONFIG_HIGHMEM
11984                 dma_mask = DMA_64BIT_MASK;
11985 #endif
11986         } else
11987                 persist_dma_mask = dma_mask = DMA_64BIT_MASK;
11988
11989         /* Configure DMA attributes. */
11990         if (dma_mask > DMA_32BIT_MASK) {
11991                 err = pci_set_dma_mask(pdev, dma_mask);
11992                 if (!err) {
11993                         dev->features |= NETIF_F_HIGHDMA;
11994                         err = pci_set_consistent_dma_mask(pdev,
11995                                                           persist_dma_mask);
11996                         if (err < 0) {
11997                                 printk(KERN_ERR PFX "Unable to obtain 64 bit "
11998                                        "DMA for consistent allocations\n");
11999                                 goto err_out_iounmap;
12000                         }
12001                 }
12002         }
12003         if (err || dma_mask == DMA_32BIT_MASK) {
12004                 err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
12005                 if (err) {
12006                         printk(KERN_ERR PFX "No usable DMA configuration, "
12007                                "aborting.\n");
12008                         goto err_out_iounmap;
12009                 }
12010         }
12011
12012         tg3_init_bufmgr_config(tp);
12013
12014         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
12015                 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
12016         }
12017         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
12018             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
12019             tp->pci_chip_rev_id == CHIPREV_ID_5705_A0 ||
12020             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 ||
12021             (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0) {
12022                 tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
12023         } else {
12024                 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE | TG3_FLG2_TSO_BUG;
12025         }
12026
12027         /* TSO is on by default on chips that support hardware TSO.
12028          * Firmware TSO on older chips gives lower performance, so it
12029          * is off by default, but can be enabled using ethtool.
12030          */
12031         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
12032                 dev->features |= NETIF_F_TSO;
12033                 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_2) &&
12034                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906))
12035                         dev->features |= NETIF_F_TSO6;
12036         }
12037
12038
12039         if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 &&
12040             !(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) &&
12041             !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
12042                 tp->tg3_flags2 |= TG3_FLG2_MAX_RXPEND_64;
12043                 tp->rx_pending = 63;
12044         }
12045
12046         err = tg3_get_device_address(tp);
12047         if (err) {
12048                 printk(KERN_ERR PFX "Could not obtain valid ethernet address, "
12049                        "aborting.\n");
12050                 goto err_out_iounmap;
12051         }
12052
12053         /*
12054          * Reset chip in case UNDI or EFI driver did not shutdown
12055          * DMA self test will enable WDMAC and we'll see (spurious)
12056          * pending DMA on the PCI bus at that point.
12057          */
12058         if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
12059             (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
12060                 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
12061                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12062         }
12063
12064         err = tg3_test_dma(tp);
12065         if (err) {
12066                 printk(KERN_ERR PFX "DMA engine test failed, aborting.\n");
12067                 goto err_out_iounmap;
12068         }
12069
12070         /* Tigon3 can do ipv4 only... and some chips have buggy
12071          * checksumming.
12072          */
12073         if ((tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) == 0) {
12074                 dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
12075                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
12076                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
12077                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784)
12078                         dev->features |= NETIF_F_IPV6_CSUM;
12079
12080                 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
12081         } else
12082                 tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
12083
12084         /* flow control autonegotiation is default behavior */
12085         tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
12086
12087         tg3_init_coal(tp);
12088
12089         pci_set_drvdata(pdev, dev);
12090
12091         err = register_netdev(dev);
12092         if (err) {
12093                 printk(KERN_ERR PFX "Cannot register net device, "
12094                        "aborting.\n");
12095                 goto err_out_iounmap;
12096         }
12097
12098         printk(KERN_INFO "%s: Tigon3 [partno(%s) rev %04x PHY(%s)] (%s) %s Ethernet ",
12099                dev->name,
12100                tp->board_part_number,
12101                tp->pci_chip_rev_id,
12102                tg3_phy_string(tp),
12103                tg3_bus_string(tp, str),
12104                ((tp->tg3_flags & TG3_FLAG_10_100_ONLY) ? "10/100Base-TX" :
12105                 ((tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) ? "1000Base-SX" :
12106                  "10/100/1000Base-T")));
12107
12108         for (i = 0; i < 6; i++)
12109                 printk("%2.2x%c", dev->dev_addr[i],
12110                        i == 5 ? '\n' : ':');
12111
12112         printk(KERN_INFO "%s: RXcsums[%d] LinkChgREG[%d] "
12113                "MIirq[%d] ASF[%d] WireSpeed[%d] TSOcap[%d]\n",
12114                dev->name,
12115                (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0,
12116                (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) != 0,
12117                (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) != 0,
12118                (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0,
12119                (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED) == 0,
12120                (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) != 0);
12121         printk(KERN_INFO "%s: dma_rwctrl[%08x] dma_mask[%d-bit]\n",
12122                dev->name, tp->dma_rwctrl,
12123                (pdev->dma_mask == DMA_32BIT_MASK) ? 32 :
12124                 (((u64) pdev->dma_mask == DMA_40BIT_MASK) ? 40 : 64));
12125
12126         return 0;
12127
12128 err_out_iounmap:
12129         if (tp->regs) {
12130                 iounmap(tp->regs);
12131                 tp->regs = NULL;
12132         }
12133
12134 err_out_free_dev:
12135         free_netdev(dev);
12136
12137 err_out_free_res:
12138         pci_release_regions(pdev);
12139
12140 err_out_disable_pdev:
12141         pci_disable_device(pdev);
12142         pci_set_drvdata(pdev, NULL);
12143         return err;
12144 }
12145
12146 static void __devexit tg3_remove_one(struct pci_dev *pdev)
12147 {
12148         struct net_device *dev = pci_get_drvdata(pdev);
12149
12150         if (dev) {
12151                 struct tg3 *tp = netdev_priv(dev);
12152
12153                 flush_scheduled_work();
12154                 unregister_netdev(dev);
12155                 if (tp->regs) {
12156                         iounmap(tp->regs);
12157                         tp->regs = NULL;
12158                 }
12159                 free_netdev(dev);
12160                 pci_release_regions(pdev);
12161                 pci_disable_device(pdev);
12162                 pci_set_drvdata(pdev, NULL);
12163         }
12164 }
12165
12166 static int tg3_suspend(struct pci_dev *pdev, pm_message_t state)
12167 {
12168         struct net_device *dev = pci_get_drvdata(pdev);
12169         struct tg3 *tp = netdev_priv(dev);
12170         int err;
12171
12172         /* PCI register 4 needs to be saved whether netif_running() or not.
12173          * MSI address and data need to be saved if using MSI and
12174          * netif_running().
12175          */
12176         pci_save_state(pdev);
12177
12178         if (!netif_running(dev))
12179                 return 0;
12180
12181         flush_scheduled_work();
12182         tg3_netif_stop(tp);
12183
12184         del_timer_sync(&tp->timer);
12185
12186         tg3_full_lock(tp, 1);
12187         tg3_disable_ints(tp);
12188         tg3_full_unlock(tp);
12189
12190         netif_device_detach(dev);
12191
12192         tg3_full_lock(tp, 0);
12193         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12194         tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
12195         tg3_full_unlock(tp);
12196
12197         err = tg3_set_power_state(tp, pci_choose_state(pdev, state));
12198         if (err) {
12199                 tg3_full_lock(tp, 0);
12200
12201                 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
12202                 if (tg3_restart_hw(tp, 1))
12203                         goto out;
12204
12205                 tp->timer.expires = jiffies + tp->timer_offset;
12206                 add_timer(&tp->timer);
12207
12208                 netif_device_attach(dev);
12209                 tg3_netif_start(tp);
12210
12211 out:
12212                 tg3_full_unlock(tp);
12213         }
12214
12215         return err;
12216 }
12217
12218 static int tg3_resume(struct pci_dev *pdev)
12219 {
12220         struct net_device *dev = pci_get_drvdata(pdev);
12221         struct tg3 *tp = netdev_priv(dev);
12222         int err;
12223
12224         pci_restore_state(tp->pdev);
12225
12226         if (!netif_running(dev))
12227                 return 0;
12228
12229         err = tg3_set_power_state(tp, PCI_D0);
12230         if (err)
12231                 return err;
12232
12233         /* Hardware bug - MSI won't work if INTX disabled. */
12234         if ((tp->tg3_flags2 & TG3_FLG2_5780_CLASS) &&
12235             (tp->tg3_flags2 & TG3_FLG2_USING_MSI))
12236                 pci_intx(tp->pdev, 1);
12237
12238         netif_device_attach(dev);
12239
12240         tg3_full_lock(tp, 0);
12241
12242         tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
12243         err = tg3_restart_hw(tp, 1);
12244         if (err)
12245                 goto out;
12246
12247         tp->timer.expires = jiffies + tp->timer_offset;
12248         add_timer(&tp->timer);
12249
12250         tg3_netif_start(tp);
12251
12252 out:
12253         tg3_full_unlock(tp);
12254
12255         return err;
12256 }
12257
12258 static struct pci_driver tg3_driver = {
12259         .name           = DRV_MODULE_NAME,
12260         .id_table       = tg3_pci_tbl,
12261         .probe          = tg3_init_one,
12262         .remove         = __devexit_p(tg3_remove_one),
12263         .suspend        = tg3_suspend,
12264         .resume         = tg3_resume
12265 };
12266
12267 static int __init tg3_init(void)
12268 {
12269         return pci_register_driver(&tg3_driver);
12270 }
12271
12272 static void __exit tg3_cleanup(void)
12273 {
12274         pci_unregister_driver(&tg3_driver);
12275 }
12276
12277 module_init(tg3_init);
12278 module_exit(tg3_cleanup);