[PATCH] iseries_veth: Don't send packets to LPARs which aren't up
[linux-2.6] / drivers / net / tg3.c
1 /*
2  * tg3.c: Broadcom Tigon3 ethernet driver.
3  *
4  * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5  * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6  * Copyright (C) 2004 Sun Microsystems Inc.
7  * Copyright (C) 2005 Broadcom Corporation.
8  *
9  * Firmware is:
10  *      Copyright (C) 2000-2003 Broadcom Corporation.
11  */
12
13 #include <linux/config.h>
14
15 #include <linux/module.h>
16 #include <linux/moduleparam.h>
17 #include <linux/kernel.h>
18 #include <linux/types.h>
19 #include <linux/compiler.h>
20 #include <linux/slab.h>
21 #include <linux/delay.h>
22 #include <linux/init.h>
23 #include <linux/ioport.h>
24 #include <linux/pci.h>
25 #include <linux/netdevice.h>
26 #include <linux/etherdevice.h>
27 #include <linux/skbuff.h>
28 #include <linux/ethtool.h>
29 #include <linux/mii.h>
30 #include <linux/if_vlan.h>
31 #include <linux/ip.h>
32 #include <linux/tcp.h>
33 #include <linux/workqueue.h>
34
35 #include <net/checksum.h>
36
37 #include <asm/system.h>
38 #include <asm/io.h>
39 #include <asm/byteorder.h>
40 #include <asm/uaccess.h>
41
42 #ifdef CONFIG_SPARC64
43 #include <asm/idprom.h>
44 #include <asm/oplib.h>
45 #include <asm/pbm.h>
46 #endif
47
48 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
49 #define TG3_VLAN_TAG_USED 1
50 #else
51 #define TG3_VLAN_TAG_USED 0
52 #endif
53
54 #ifdef NETIF_F_TSO
55 #define TG3_TSO_SUPPORT 1
56 #else
57 #define TG3_TSO_SUPPORT 0
58 #endif
59
60 #include "tg3.h"
61
62 #define DRV_MODULE_NAME         "tg3"
63 #define PFX DRV_MODULE_NAME     ": "
64 #define DRV_MODULE_VERSION      "3.27"
65 #define DRV_MODULE_RELDATE      "May 5, 2005"
66
67 #define TG3_DEF_MAC_MODE        0
68 #define TG3_DEF_RX_MODE         0
69 #define TG3_DEF_TX_MODE         0
70 #define TG3_DEF_MSG_ENABLE        \
71         (NETIF_MSG_DRV          | \
72          NETIF_MSG_PROBE        | \
73          NETIF_MSG_LINK         | \
74          NETIF_MSG_TIMER        | \
75          NETIF_MSG_IFDOWN       | \
76          NETIF_MSG_IFUP         | \
77          NETIF_MSG_RX_ERR       | \
78          NETIF_MSG_TX_ERR)
79
80 /* length of time before we decide the hardware is borked,
81  * and dev->tx_timeout() should be called to fix the problem
82  */
83 #define TG3_TX_TIMEOUT                  (5 * HZ)
84
85 /* hardware minimum and maximum for a single frame's data payload */
86 #define TG3_MIN_MTU                     60
87 #define TG3_MAX_MTU(tp) \
88         (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ? 9000 : 1500)
89
90 /* These numbers seem to be hard coded in the NIC firmware somehow.
91  * You can't change the ring sizes, but you can change where you place
92  * them in the NIC onboard memory.
93  */
94 #define TG3_RX_RING_SIZE                512
95 #define TG3_DEF_RX_RING_PENDING         200
96 #define TG3_RX_JUMBO_RING_SIZE          256
97 #define TG3_DEF_RX_JUMBO_RING_PENDING   100
98
99 /* Do not place this n-ring entries value into the tp struct itself,
100  * we really want to expose these constants to GCC so that modulo et
101  * al.  operations are done with shifts and masks instead of with
102  * hw multiply/modulo instructions.  Another solution would be to
103  * replace things like '% foo' with '& (foo - 1)'.
104  */
105 #define TG3_RX_RCB_RING_SIZE(tp)        \
106         ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ?  512 : 1024)
107
108 #define TG3_TX_RING_SIZE                512
109 #define TG3_DEF_TX_RING_PENDING         (TG3_TX_RING_SIZE - 1)
110
111 #define TG3_RX_RING_BYTES       (sizeof(struct tg3_rx_buffer_desc) * \
112                                  TG3_RX_RING_SIZE)
113 #define TG3_RX_JUMBO_RING_BYTES (sizeof(struct tg3_rx_buffer_desc) * \
114                                  TG3_RX_JUMBO_RING_SIZE)
115 #define TG3_RX_RCB_RING_BYTES(tp) (sizeof(struct tg3_rx_buffer_desc) * \
116                                    TG3_RX_RCB_RING_SIZE(tp))
117 #define TG3_TX_RING_BYTES       (sizeof(struct tg3_tx_buffer_desc) * \
118                                  TG3_TX_RING_SIZE)
119 #define TX_RING_GAP(TP) \
120         (TG3_TX_RING_SIZE - (TP)->tx_pending)
121 #define TX_BUFFS_AVAIL(TP)                                              \
122         (((TP)->tx_cons <= (TP)->tx_prod) ?                             \
123           (TP)->tx_cons + (TP)->tx_pending - (TP)->tx_prod :            \
124           (TP)->tx_cons - (TP)->tx_prod - TX_RING_GAP(TP))
125 #define NEXT_TX(N)              (((N) + 1) & (TG3_TX_RING_SIZE - 1))
126
127 #define RX_PKT_BUF_SZ           (1536 + tp->rx_offset + 64)
128 #define RX_JUMBO_PKT_BUF_SZ     (9046 + tp->rx_offset + 64)
129
130 /* minimum number of free TX descriptors required to wake up TX process */
131 #define TG3_TX_WAKEUP_THRESH            (TG3_TX_RING_SIZE / 4)
132
133 /* number of ETHTOOL_GSTATS u64's */
134 #define TG3_NUM_STATS           (sizeof(struct tg3_ethtool_stats)/sizeof(u64))
135
136 static char version[] __devinitdata =
137         DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
138
139 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
140 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
141 MODULE_LICENSE("GPL");
142 MODULE_VERSION(DRV_MODULE_VERSION);
143
144 static int tg3_debug = -1;      /* -1 == use TG3_DEF_MSG_ENABLE as value */
145 module_param(tg3_debug, int, 0);
146 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
147
148 static struct pci_device_id tg3_pci_tbl[] = {
149         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700,
150           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
151         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701,
152           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
153         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702,
154           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
155         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703,
156           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
157         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704,
158           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
159         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE,
160           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
161         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705,
162           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
163         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2,
164           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
165         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M,
166           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
167         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2,
168           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
169         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X,
170           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
171         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X,
172           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
173         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S,
174           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
175         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3,
176           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
177         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3,
178           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
179         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782,
180           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
181         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788,
182           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
183         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789,
184           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
185         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901,
186           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
187         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2,
188           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
189         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2,
190           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
191         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F,
192           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
193         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5720,
194           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
195         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721,
196           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
197         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750,
198           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
199         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751,
200           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
201         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750M,
202           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
203         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M,
204           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
205         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F,
206           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
207         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752,
208           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
209         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753,
210           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
211         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M,
212           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
213         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F,
214           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
215         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781,
216           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
217         { PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX,
218           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
219         { PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX,
220           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
221         { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000,
222           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
223         { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001,
224           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
225         { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003,
226           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
227         { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100,
228           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
229         { PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3,
230           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
231         { 0, }
232 };
233
234 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
235
236 static struct {
237         const char string[ETH_GSTRING_LEN];
238 } ethtool_stats_keys[TG3_NUM_STATS] = {
239         { "rx_octets" },
240         { "rx_fragments" },
241         { "rx_ucast_packets" },
242         { "rx_mcast_packets" },
243         { "rx_bcast_packets" },
244         { "rx_fcs_errors" },
245         { "rx_align_errors" },
246         { "rx_xon_pause_rcvd" },
247         { "rx_xoff_pause_rcvd" },
248         { "rx_mac_ctrl_rcvd" },
249         { "rx_xoff_entered" },
250         { "rx_frame_too_long_errors" },
251         { "rx_jabbers" },
252         { "rx_undersize_packets" },
253         { "rx_in_length_errors" },
254         { "rx_out_length_errors" },
255         { "rx_64_or_less_octet_packets" },
256         { "rx_65_to_127_octet_packets" },
257         { "rx_128_to_255_octet_packets" },
258         { "rx_256_to_511_octet_packets" },
259         { "rx_512_to_1023_octet_packets" },
260         { "rx_1024_to_1522_octet_packets" },
261         { "rx_1523_to_2047_octet_packets" },
262         { "rx_2048_to_4095_octet_packets" },
263         { "rx_4096_to_8191_octet_packets" },
264         { "rx_8192_to_9022_octet_packets" },
265
266         { "tx_octets" },
267         { "tx_collisions" },
268
269         { "tx_xon_sent" },
270         { "tx_xoff_sent" },
271         { "tx_flow_control" },
272         { "tx_mac_errors" },
273         { "tx_single_collisions" },
274         { "tx_mult_collisions" },
275         { "tx_deferred" },
276         { "tx_excessive_collisions" },
277         { "tx_late_collisions" },
278         { "tx_collide_2times" },
279         { "tx_collide_3times" },
280         { "tx_collide_4times" },
281         { "tx_collide_5times" },
282         { "tx_collide_6times" },
283         { "tx_collide_7times" },
284         { "tx_collide_8times" },
285         { "tx_collide_9times" },
286         { "tx_collide_10times" },
287         { "tx_collide_11times" },
288         { "tx_collide_12times" },
289         { "tx_collide_13times" },
290         { "tx_collide_14times" },
291         { "tx_collide_15times" },
292         { "tx_ucast_packets" },
293         { "tx_mcast_packets" },
294         { "tx_bcast_packets" },
295         { "tx_carrier_sense_errors" },
296         { "tx_discards" },
297         { "tx_errors" },
298
299         { "dma_writeq_full" },
300         { "dma_write_prioq_full" },
301         { "rxbds_empty" },
302         { "rx_discards" },
303         { "rx_errors" },
304         { "rx_threshold_hit" },
305
306         { "dma_readq_full" },
307         { "dma_read_prioq_full" },
308         { "tx_comp_queue_full" },
309
310         { "ring_set_send_prod_index" },
311         { "ring_status_update" },
312         { "nic_irqs" },
313         { "nic_avoided_irqs" },
314         { "nic_tx_threshold_hit" }
315 };
316
317 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
318 {
319         if ((tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) != 0) {
320                 unsigned long flags;
321
322                 spin_lock_irqsave(&tp->indirect_lock, flags);
323                 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
324                 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
325                 spin_unlock_irqrestore(&tp->indirect_lock, flags);
326         } else {
327                 writel(val, tp->regs + off);
328                 if ((tp->tg3_flags & TG3_FLAG_5701_REG_WRITE_BUG) != 0)
329                         readl(tp->regs + off);
330         }
331 }
332
333 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val)
334 {
335         if ((tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) != 0) {
336                 unsigned long flags;
337
338                 spin_lock_irqsave(&tp->indirect_lock, flags);
339                 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
340                 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
341                 spin_unlock_irqrestore(&tp->indirect_lock, flags);
342         } else {
343                 void __iomem *dest = tp->regs + off;
344                 writel(val, dest);
345                 readl(dest);    /* always flush PCI write */
346         }
347 }
348
349 static inline void _tw32_rx_mbox(struct tg3 *tp, u32 off, u32 val)
350 {
351         void __iomem *mbox = tp->regs + off;
352         writel(val, mbox);
353         if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
354                 readl(mbox);
355 }
356
357 static inline void _tw32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
358 {
359         void __iomem *mbox = tp->regs + off;
360         writel(val, mbox);
361         if (tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG)
362                 writel(val, mbox);
363         if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
364                 readl(mbox);
365 }
366
367 #define tw32_mailbox(reg, val)  writel(((val) & 0xffffffff), tp->regs + (reg))
368 #define tw32_rx_mbox(reg, val)  _tw32_rx_mbox(tp, reg, val)
369 #define tw32_tx_mbox(reg, val)  _tw32_tx_mbox(tp, reg, val)
370
371 #define tw32(reg,val)           tg3_write_indirect_reg32(tp,(reg),(val))
372 #define tw32_f(reg,val)         _tw32_flush(tp,(reg),(val))
373 #define tw16(reg,val)           writew(((val) & 0xffff), tp->regs + (reg))
374 #define tw8(reg,val)            writeb(((val) & 0xff), tp->regs + (reg))
375 #define tr32(reg)               readl(tp->regs + (reg))
376 #define tr16(reg)               readw(tp->regs + (reg))
377 #define tr8(reg)                readb(tp->regs + (reg))
378
379 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
380 {
381         unsigned long flags;
382
383         spin_lock_irqsave(&tp->indirect_lock, flags);
384         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
385         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
386
387         /* Always leave this as zero. */
388         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
389         spin_unlock_irqrestore(&tp->indirect_lock, flags);
390 }
391
392 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
393 {
394         unsigned long flags;
395
396         spin_lock_irqsave(&tp->indirect_lock, flags);
397         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
398         pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
399
400         /* Always leave this as zero. */
401         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
402         spin_unlock_irqrestore(&tp->indirect_lock, flags);
403 }
404
405 static void tg3_disable_ints(struct tg3 *tp)
406 {
407         tw32(TG3PCI_MISC_HOST_CTRL,
408              (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
409         tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
410         tr32(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW);
411 }
412
413 static inline void tg3_cond_int(struct tg3 *tp)
414 {
415         if (tp->hw_status->status & SD_STATUS_UPDATED)
416                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
417 }
418
419 static void tg3_enable_ints(struct tg3 *tp)
420 {
421         tw32(TG3PCI_MISC_HOST_CTRL,
422              (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
423         tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000000);
424         tr32(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW);
425
426         tg3_cond_int(tp);
427 }
428
429 static inline unsigned int tg3_has_work(struct tg3 *tp)
430 {
431         struct tg3_hw_status *sblk = tp->hw_status;
432         unsigned int work_exists = 0;
433
434         /* check for phy events */
435         if (!(tp->tg3_flags &
436               (TG3_FLAG_USE_LINKCHG_REG |
437                TG3_FLAG_POLL_SERDES))) {
438                 if (sblk->status & SD_STATUS_LINK_CHG)
439                         work_exists = 1;
440         }
441         /* check for RX/TX work to do */
442         if (sblk->idx[0].tx_consumer != tp->tx_cons ||
443             sblk->idx[0].rx_producer != tp->rx_rcb_ptr)
444                 work_exists = 1;
445
446         return work_exists;
447 }
448
449 /* tg3_restart_ints
450  *  similar to tg3_enable_ints, but it accurately determines whether there
451  *  is new work pending and can return without flushing the PIO write
452  *  which reenables interrupts 
453  */
454 static void tg3_restart_ints(struct tg3 *tp)
455 {
456         tw32(TG3PCI_MISC_HOST_CTRL,
457                 (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
458         tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000000);
459         mmiowb();
460
461         if (tg3_has_work(tp))
462                 tw32(HOSTCC_MODE, tp->coalesce_mode |
463                      (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
464 }
465
466 static inline void tg3_netif_stop(struct tg3 *tp)
467 {
468         netif_poll_disable(tp->dev);
469         netif_tx_disable(tp->dev);
470 }
471
472 static inline void tg3_netif_start(struct tg3 *tp)
473 {
474         netif_wake_queue(tp->dev);
475         /* NOTE: unconditional netif_wake_queue is only appropriate
476          * so long as all callers are assured to have free tx slots
477          * (such as after tg3_init_hw)
478          */
479         netif_poll_enable(tp->dev);
480         tg3_cond_int(tp);
481 }
482
483 static void tg3_switch_clocks(struct tg3 *tp)
484 {
485         u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
486         u32 orig_clock_ctrl;
487
488         orig_clock_ctrl = clock_ctrl;
489         clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
490                        CLOCK_CTRL_CLKRUN_OENABLE |
491                        0x1f);
492         tp->pci_clock_ctrl = clock_ctrl;
493
494         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
495                 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
496                         tw32_f(TG3PCI_CLOCK_CTRL,
497                                clock_ctrl | CLOCK_CTRL_625_CORE);
498                         udelay(40);
499                 }
500         } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
501                 tw32_f(TG3PCI_CLOCK_CTRL,
502                      clock_ctrl |
503                      (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK));
504                 udelay(40);
505                 tw32_f(TG3PCI_CLOCK_CTRL,
506                      clock_ctrl | (CLOCK_CTRL_ALTCLK));
507                 udelay(40);
508         }
509         tw32_f(TG3PCI_CLOCK_CTRL, clock_ctrl);
510         udelay(40);
511 }
512
513 #define PHY_BUSY_LOOPS  5000
514
515 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
516 {
517         u32 frame_val;
518         unsigned int loops;
519         int ret;
520
521         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
522                 tw32_f(MAC_MI_MODE,
523                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
524                 udelay(80);
525         }
526
527         *val = 0x0;
528
529         frame_val  = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
530                       MI_COM_PHY_ADDR_MASK);
531         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
532                       MI_COM_REG_ADDR_MASK);
533         frame_val |= (MI_COM_CMD_READ | MI_COM_START);
534         
535         tw32_f(MAC_MI_COM, frame_val);
536
537         loops = PHY_BUSY_LOOPS;
538         while (loops != 0) {
539                 udelay(10);
540                 frame_val = tr32(MAC_MI_COM);
541
542                 if ((frame_val & MI_COM_BUSY) == 0) {
543                         udelay(5);
544                         frame_val = tr32(MAC_MI_COM);
545                         break;
546                 }
547                 loops -= 1;
548         }
549
550         ret = -EBUSY;
551         if (loops != 0) {
552                 *val = frame_val & MI_COM_DATA_MASK;
553                 ret = 0;
554         }
555
556         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
557                 tw32_f(MAC_MI_MODE, tp->mi_mode);
558                 udelay(80);
559         }
560
561         return ret;
562 }
563
564 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
565 {
566         u32 frame_val;
567         unsigned int loops;
568         int ret;
569
570         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
571                 tw32_f(MAC_MI_MODE,
572                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
573                 udelay(80);
574         }
575
576         frame_val  = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
577                       MI_COM_PHY_ADDR_MASK);
578         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
579                       MI_COM_REG_ADDR_MASK);
580         frame_val |= (val & MI_COM_DATA_MASK);
581         frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
582         
583         tw32_f(MAC_MI_COM, frame_val);
584
585         loops = PHY_BUSY_LOOPS;
586         while (loops != 0) {
587                 udelay(10);
588                 frame_val = tr32(MAC_MI_COM);
589                 if ((frame_val & MI_COM_BUSY) == 0) {
590                         udelay(5);
591                         frame_val = tr32(MAC_MI_COM);
592                         break;
593                 }
594                 loops -= 1;
595         }
596
597         ret = -EBUSY;
598         if (loops != 0)
599                 ret = 0;
600
601         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
602                 tw32_f(MAC_MI_MODE, tp->mi_mode);
603                 udelay(80);
604         }
605
606         return ret;
607 }
608
609 static void tg3_phy_set_wirespeed(struct tg3 *tp)
610 {
611         u32 val;
612
613         if (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED)
614                 return;
615
616         if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x7007) &&
617             !tg3_readphy(tp, MII_TG3_AUX_CTRL, &val))
618                 tg3_writephy(tp, MII_TG3_AUX_CTRL,
619                              (val | (1 << 15) | (1 << 4)));
620 }
621
622 static int tg3_bmcr_reset(struct tg3 *tp)
623 {
624         u32 phy_control;
625         int limit, err;
626
627         /* OK, reset it, and poll the BMCR_RESET bit until it
628          * clears or we time out.
629          */
630         phy_control = BMCR_RESET;
631         err = tg3_writephy(tp, MII_BMCR, phy_control);
632         if (err != 0)
633                 return -EBUSY;
634
635         limit = 5000;
636         while (limit--) {
637                 err = tg3_readphy(tp, MII_BMCR, &phy_control);
638                 if (err != 0)
639                         return -EBUSY;
640
641                 if ((phy_control & BMCR_RESET) == 0) {
642                         udelay(40);
643                         break;
644                 }
645                 udelay(10);
646         }
647         if (limit <= 0)
648                 return -EBUSY;
649
650         return 0;
651 }
652
653 static int tg3_wait_macro_done(struct tg3 *tp)
654 {
655         int limit = 100;
656
657         while (limit--) {
658                 u32 tmp32;
659
660                 if (!tg3_readphy(tp, 0x16, &tmp32)) {
661                         if ((tmp32 & 0x1000) == 0)
662                                 break;
663                 }
664         }
665         if (limit <= 0)
666                 return -EBUSY;
667
668         return 0;
669 }
670
671 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
672 {
673         static const u32 test_pat[4][6] = {
674         { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
675         { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
676         { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
677         { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
678         };
679         int chan;
680
681         for (chan = 0; chan < 4; chan++) {
682                 int i;
683
684                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
685                              (chan * 0x2000) | 0x0200);
686                 tg3_writephy(tp, 0x16, 0x0002);
687
688                 for (i = 0; i < 6; i++)
689                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
690                                      test_pat[chan][i]);
691
692                 tg3_writephy(tp, 0x16, 0x0202);
693                 if (tg3_wait_macro_done(tp)) {
694                         *resetp = 1;
695                         return -EBUSY;
696                 }
697
698                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
699                              (chan * 0x2000) | 0x0200);
700                 tg3_writephy(tp, 0x16, 0x0082);
701                 if (tg3_wait_macro_done(tp)) {
702                         *resetp = 1;
703                         return -EBUSY;
704                 }
705
706                 tg3_writephy(tp, 0x16, 0x0802);
707                 if (tg3_wait_macro_done(tp)) {
708                         *resetp = 1;
709                         return -EBUSY;
710                 }
711
712                 for (i = 0; i < 6; i += 2) {
713                         u32 low, high;
714
715                         if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
716                             tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
717                             tg3_wait_macro_done(tp)) {
718                                 *resetp = 1;
719                                 return -EBUSY;
720                         }
721                         low &= 0x7fff;
722                         high &= 0x000f;
723                         if (low != test_pat[chan][i] ||
724                             high != test_pat[chan][i+1]) {
725                                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
726                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
727                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
728
729                                 return -EBUSY;
730                         }
731                 }
732         }
733
734         return 0;
735 }
736
737 static int tg3_phy_reset_chanpat(struct tg3 *tp)
738 {
739         int chan;
740
741         for (chan = 0; chan < 4; chan++) {
742                 int i;
743
744                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
745                              (chan * 0x2000) | 0x0200);
746                 tg3_writephy(tp, 0x16, 0x0002);
747                 for (i = 0; i < 6; i++)
748                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
749                 tg3_writephy(tp, 0x16, 0x0202);
750                 if (tg3_wait_macro_done(tp))
751                         return -EBUSY;
752         }
753
754         return 0;
755 }
756
757 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
758 {
759         u32 reg32, phy9_orig;
760         int retries, do_phy_reset, err;
761
762         retries = 10;
763         do_phy_reset = 1;
764         do {
765                 if (do_phy_reset) {
766                         err = tg3_bmcr_reset(tp);
767                         if (err)
768                                 return err;
769                         do_phy_reset = 0;
770                 }
771
772                 /* Disable transmitter and interrupt.  */
773                 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
774                         continue;
775
776                 reg32 |= 0x3000;
777                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
778
779                 /* Set full-duplex, 1000 mbps.  */
780                 tg3_writephy(tp, MII_BMCR,
781                              BMCR_FULLDPLX | TG3_BMCR_SPEED1000);
782
783                 /* Set to master mode.  */
784                 if (tg3_readphy(tp, MII_TG3_CTRL, &phy9_orig))
785                         continue;
786
787                 tg3_writephy(tp, MII_TG3_CTRL,
788                              (MII_TG3_CTRL_AS_MASTER |
789                               MII_TG3_CTRL_ENABLE_AS_MASTER));
790
791                 /* Enable SM_DSP_CLOCK and 6dB.  */
792                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
793
794                 /* Block the PHY control access.  */
795                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
796                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0800);
797
798                 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
799                 if (!err)
800                         break;
801         } while (--retries);
802
803         err = tg3_phy_reset_chanpat(tp);
804         if (err)
805                 return err;
806
807         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
808         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0000);
809
810         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
811         tg3_writephy(tp, 0x16, 0x0000);
812
813         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
814             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
815                 /* Set Extended packet length bit for jumbo frames */
816                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4400);
817         }
818         else {
819                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
820         }
821
822         tg3_writephy(tp, MII_TG3_CTRL, phy9_orig);
823
824         if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32)) {
825                 reg32 &= ~0x3000;
826                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
827         } else if (!err)
828                 err = -EBUSY;
829
830         return err;
831 }
832
833 /* This will reset the tigon3 PHY if there is no valid
834  * link unless the FORCE argument is non-zero.
835  */
836 static int tg3_phy_reset(struct tg3 *tp)
837 {
838         u32 phy_status;
839         int err;
840
841         err  = tg3_readphy(tp, MII_BMSR, &phy_status);
842         err |= tg3_readphy(tp, MII_BMSR, &phy_status);
843         if (err != 0)
844                 return -EBUSY;
845
846         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
847             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
848             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
849                 err = tg3_phy_reset_5703_4_5(tp);
850                 if (err)
851                         return err;
852                 goto out;
853         }
854
855         err = tg3_bmcr_reset(tp);
856         if (err)
857                 return err;
858
859 out:
860         if (tp->tg3_flags2 & TG3_FLG2_PHY_ADC_BUG) {
861                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
862                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
863                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x2aaa);
864                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
865                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0323);
866                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
867         }
868         if (tp->tg3_flags2 & TG3_FLG2_PHY_5704_A0_BUG) {
869                 tg3_writephy(tp, 0x1c, 0x8d68);
870                 tg3_writephy(tp, 0x1c, 0x8d68);
871         }
872         if (tp->tg3_flags2 & TG3_FLG2_PHY_BER_BUG) {
873                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
874                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
875                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x310b);
876                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
877                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x9506);
878                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x401f);
879                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x14e2);
880                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
881         }
882         /* Set Extended packet length bit (bit 14) on all chips that */
883         /* support jumbo frames */
884         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
885                 /* Cannot do read-modify-write on 5401 */
886                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
887         } else if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
888                 u32 phy_reg;
889
890                 /* Set bit 14 with read-modify-write to preserve other bits */
891                 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0007) &&
892                     !tg3_readphy(tp, MII_TG3_AUX_CTRL, &phy_reg))
893                         tg3_writephy(tp, MII_TG3_AUX_CTRL, phy_reg | 0x4000);
894         }
895
896         /* Set phy register 0x10 bit 0 to high fifo elasticity to support
897          * jumbo frames transmission.
898          */
899         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
900                 u32 phy_reg;
901
902                 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &phy_reg))
903                     tg3_writephy(tp, MII_TG3_EXT_CTRL,
904                                  phy_reg | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
905         }
906
907         tg3_phy_set_wirespeed(tp);
908         return 0;
909 }
910
911 static void tg3_frob_aux_power(struct tg3 *tp)
912 {
913         struct tg3 *tp_peer = tp;
914
915         if ((tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) != 0)
916                 return;
917
918         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
919                 tp_peer = pci_get_drvdata(tp->pdev_peer);
920                 if (!tp_peer)
921                         BUG();
922         }
923
924
925         if ((tp->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
926             (tp_peer->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0) {
927                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
928                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
929                         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
930                              (GRC_LCLCTRL_GPIO_OE0 |
931                               GRC_LCLCTRL_GPIO_OE1 |
932                               GRC_LCLCTRL_GPIO_OE2 |
933                               GRC_LCLCTRL_GPIO_OUTPUT0 |
934                               GRC_LCLCTRL_GPIO_OUTPUT1));
935                         udelay(100);
936                 } else {
937                         u32 no_gpio2;
938                         u32 grc_local_ctrl;
939
940                         if (tp_peer != tp &&
941                             (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
942                                 return;
943
944                         /* On 5753 and variants, GPIO2 cannot be used. */
945                         no_gpio2 = tp->nic_sram_data_cfg &
946                                     NIC_SRAM_DATA_CFG_NO_GPIO2;
947
948                         grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
949                                          GRC_LCLCTRL_GPIO_OE1 |
950                                          GRC_LCLCTRL_GPIO_OE2 |
951                                          GRC_LCLCTRL_GPIO_OUTPUT1 |
952                                          GRC_LCLCTRL_GPIO_OUTPUT2;
953                         if (no_gpio2) {
954                                 grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
955                                                     GRC_LCLCTRL_GPIO_OUTPUT2);
956                         }
957                         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
958                                                 grc_local_ctrl);
959                         udelay(100);
960
961                         grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
962
963                         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
964                                                 grc_local_ctrl);
965                         udelay(100);
966
967                         if (!no_gpio2) {
968                                 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
969                                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
970                                        grc_local_ctrl);
971                                 udelay(100);
972                         }
973                 }
974         } else {
975                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
976                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
977                         if (tp_peer != tp &&
978                             (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
979                                 return;
980
981                         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
982                              (GRC_LCLCTRL_GPIO_OE1 |
983                               GRC_LCLCTRL_GPIO_OUTPUT1));
984                         udelay(100);
985
986                         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
987                              (GRC_LCLCTRL_GPIO_OE1));
988                         udelay(100);
989
990                         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
991                              (GRC_LCLCTRL_GPIO_OE1 |
992                               GRC_LCLCTRL_GPIO_OUTPUT1));
993                         udelay(100);
994                 }
995         }
996 }
997
998 static int tg3_setup_phy(struct tg3 *, int);
999
1000 #define RESET_KIND_SHUTDOWN     0
1001 #define RESET_KIND_INIT         1
1002 #define RESET_KIND_SUSPEND      2
1003
1004 static void tg3_write_sig_post_reset(struct tg3 *, int);
1005 static int tg3_halt_cpu(struct tg3 *, u32);
1006
1007 static int tg3_set_power_state(struct tg3 *tp, int state)
1008 {
1009         u32 misc_host_ctrl;
1010         u16 power_control, power_caps;
1011         int pm = tp->pm_cap;
1012
1013         /* Make sure register accesses (indirect or otherwise)
1014          * will function correctly.
1015          */
1016         pci_write_config_dword(tp->pdev,
1017                                TG3PCI_MISC_HOST_CTRL,
1018                                tp->misc_host_ctrl);
1019
1020         pci_read_config_word(tp->pdev,
1021                              pm + PCI_PM_CTRL,
1022                              &power_control);
1023         power_control |= PCI_PM_CTRL_PME_STATUS;
1024         power_control &= ~(PCI_PM_CTRL_STATE_MASK);
1025         switch (state) {
1026         case 0:
1027                 power_control |= 0;
1028                 pci_write_config_word(tp->pdev,
1029                                       pm + PCI_PM_CTRL,
1030                                       power_control);
1031                 udelay(100);    /* Delay after power state change */
1032
1033                 /* Switch out of Vaux if it is not a LOM */
1034                 if (!(tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT)) {
1035                         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
1036                         udelay(100);
1037                 }
1038
1039                 return 0;
1040
1041         case 1:
1042                 power_control |= 1;
1043                 break;
1044
1045         case 2:
1046                 power_control |= 2;
1047                 break;
1048
1049         case 3:
1050                 power_control |= 3;
1051                 break;
1052
1053         default:
1054                 printk(KERN_WARNING PFX "%s: Invalid power state (%d) "
1055                        "requested.\n",
1056                        tp->dev->name, state);
1057                 return -EINVAL;
1058         };
1059
1060         power_control |= PCI_PM_CTRL_PME_ENABLE;
1061
1062         misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
1063         tw32(TG3PCI_MISC_HOST_CTRL,
1064              misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
1065
1066         if (tp->link_config.phy_is_low_power == 0) {
1067                 tp->link_config.phy_is_low_power = 1;
1068                 tp->link_config.orig_speed = tp->link_config.speed;
1069                 tp->link_config.orig_duplex = tp->link_config.duplex;
1070                 tp->link_config.orig_autoneg = tp->link_config.autoneg;
1071         }
1072
1073         if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
1074                 tp->link_config.speed = SPEED_10;
1075                 tp->link_config.duplex = DUPLEX_HALF;
1076                 tp->link_config.autoneg = AUTONEG_ENABLE;
1077                 tg3_setup_phy(tp, 0);
1078         }
1079
1080         pci_read_config_word(tp->pdev, pm + PCI_PM_PMC, &power_caps);
1081
1082         if (tp->tg3_flags & TG3_FLAG_WOL_ENABLE) {
1083                 u32 mac_mode;
1084
1085                 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
1086                         tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x5a);
1087                         udelay(40);
1088
1089                         mac_mode = MAC_MODE_PORT_MODE_MII;
1090
1091                         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 ||
1092                             !(tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB))
1093                                 mac_mode |= MAC_MODE_LINK_POLARITY;
1094                 } else {
1095                         mac_mode = MAC_MODE_PORT_MODE_TBI;
1096                 }
1097
1098                 if (!(tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
1099                         tw32(MAC_LED_CTRL, tp->led_ctrl);
1100
1101                 if (((power_caps & PCI_PM_CAP_PME_D3cold) &&
1102                      (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)))
1103                         mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
1104
1105                 tw32_f(MAC_MODE, mac_mode);
1106                 udelay(100);
1107
1108                 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
1109                 udelay(10);
1110         }
1111
1112         if (!(tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB) &&
1113             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1114              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
1115                 u32 base_val;
1116
1117                 base_val = tp->pci_clock_ctrl;
1118                 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
1119                              CLOCK_CTRL_TXCLK_DISABLE);
1120
1121                 tw32_f(TG3PCI_CLOCK_CTRL, base_val |
1122                      CLOCK_CTRL_ALTCLK |
1123                      CLOCK_CTRL_PWRDOWN_PLL133);
1124                 udelay(40);
1125         } else if (!((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
1126                      (tp->tg3_flags & TG3_FLAG_ENABLE_ASF))) {
1127                 u32 newbits1, newbits2;
1128
1129                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1130                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1131                         newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
1132                                     CLOCK_CTRL_TXCLK_DISABLE |
1133                                     CLOCK_CTRL_ALTCLK);
1134                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
1135                 } else if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
1136                         newbits1 = CLOCK_CTRL_625_CORE;
1137                         newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
1138                 } else {
1139                         newbits1 = CLOCK_CTRL_ALTCLK;
1140                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
1141                 }
1142
1143                 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1);
1144                 udelay(40);
1145
1146                 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2);
1147                 udelay(40);
1148
1149                 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
1150                         u32 newbits3;
1151
1152                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1153                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1154                                 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
1155                                             CLOCK_CTRL_TXCLK_DISABLE |
1156                                             CLOCK_CTRL_44MHZ_CORE);
1157                         } else {
1158                                 newbits3 = CLOCK_CTRL_44MHZ_CORE;
1159                         }
1160
1161                         tw32_f(TG3PCI_CLOCK_CTRL,
1162                                          tp->pci_clock_ctrl | newbits3);
1163                         udelay(40);
1164                 }
1165         }
1166
1167         tg3_frob_aux_power(tp);
1168
1169         /* Workaround for unstable PLL clock */
1170         if ((GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX) ||
1171             (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX)) {
1172                 u32 val = tr32(0x7d00);
1173
1174                 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
1175                 tw32(0x7d00, val);
1176                 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
1177                         tg3_halt_cpu(tp, RX_CPU_BASE);
1178         }
1179
1180         /* Finally, set the new power state. */
1181         pci_write_config_word(tp->pdev, pm + PCI_PM_CTRL, power_control);
1182         udelay(100);    /* Delay after power state change */
1183
1184         tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
1185
1186         return 0;
1187 }
1188
1189 static void tg3_link_report(struct tg3 *tp)
1190 {
1191         if (!netif_carrier_ok(tp->dev)) {
1192                 printk(KERN_INFO PFX "%s: Link is down.\n", tp->dev->name);
1193         } else {
1194                 printk(KERN_INFO PFX "%s: Link is up at %d Mbps, %s duplex.\n",
1195                        tp->dev->name,
1196                        (tp->link_config.active_speed == SPEED_1000 ?
1197                         1000 :
1198                         (tp->link_config.active_speed == SPEED_100 ?
1199                          100 : 10)),
1200                        (tp->link_config.active_duplex == DUPLEX_FULL ?
1201                         "full" : "half"));
1202
1203                 printk(KERN_INFO PFX "%s: Flow control is %s for TX and "
1204                        "%s for RX.\n",
1205                        tp->dev->name,
1206                        (tp->tg3_flags & TG3_FLAG_TX_PAUSE) ? "on" : "off",
1207                        (tp->tg3_flags & TG3_FLAG_RX_PAUSE) ? "on" : "off");
1208         }
1209 }
1210
1211 static void tg3_setup_flow_control(struct tg3 *tp, u32 local_adv, u32 remote_adv)
1212 {
1213         u32 new_tg3_flags = 0;
1214         u32 old_rx_mode = tp->rx_mode;
1215         u32 old_tx_mode = tp->tx_mode;
1216
1217         if (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG) {
1218                 if (local_adv & ADVERTISE_PAUSE_CAP) {
1219                         if (local_adv & ADVERTISE_PAUSE_ASYM) {
1220                                 if (remote_adv & LPA_PAUSE_CAP)
1221                                         new_tg3_flags |=
1222                                                 (TG3_FLAG_RX_PAUSE |
1223                                                 TG3_FLAG_TX_PAUSE);
1224                                 else if (remote_adv & LPA_PAUSE_ASYM)
1225                                         new_tg3_flags |=
1226                                                 (TG3_FLAG_RX_PAUSE);
1227                         } else {
1228                                 if (remote_adv & LPA_PAUSE_CAP)
1229                                         new_tg3_flags |=
1230                                                 (TG3_FLAG_RX_PAUSE |
1231                                                 TG3_FLAG_TX_PAUSE);
1232                         }
1233                 } else if (local_adv & ADVERTISE_PAUSE_ASYM) {
1234                         if ((remote_adv & LPA_PAUSE_CAP) &&
1235                         (remote_adv & LPA_PAUSE_ASYM))
1236                                 new_tg3_flags |= TG3_FLAG_TX_PAUSE;
1237                 }
1238
1239                 tp->tg3_flags &= ~(TG3_FLAG_RX_PAUSE | TG3_FLAG_TX_PAUSE);
1240                 tp->tg3_flags |= new_tg3_flags;
1241         } else {
1242                 new_tg3_flags = tp->tg3_flags;
1243         }
1244
1245         if (new_tg3_flags & TG3_FLAG_RX_PAUSE)
1246                 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1247         else
1248                 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1249
1250         if (old_rx_mode != tp->rx_mode) {
1251                 tw32_f(MAC_RX_MODE, tp->rx_mode);
1252         }
1253         
1254         if (new_tg3_flags & TG3_FLAG_TX_PAUSE)
1255                 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1256         else
1257                 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1258
1259         if (old_tx_mode != tp->tx_mode) {
1260                 tw32_f(MAC_TX_MODE, tp->tx_mode);
1261         }
1262 }
1263
1264 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
1265 {
1266         switch (val & MII_TG3_AUX_STAT_SPDMASK) {
1267         case MII_TG3_AUX_STAT_10HALF:
1268                 *speed = SPEED_10;
1269                 *duplex = DUPLEX_HALF;
1270                 break;
1271
1272         case MII_TG3_AUX_STAT_10FULL:
1273                 *speed = SPEED_10;
1274                 *duplex = DUPLEX_FULL;
1275                 break;
1276
1277         case MII_TG3_AUX_STAT_100HALF:
1278                 *speed = SPEED_100;
1279                 *duplex = DUPLEX_HALF;
1280                 break;
1281
1282         case MII_TG3_AUX_STAT_100FULL:
1283                 *speed = SPEED_100;
1284                 *duplex = DUPLEX_FULL;
1285                 break;
1286
1287         case MII_TG3_AUX_STAT_1000HALF:
1288                 *speed = SPEED_1000;
1289                 *duplex = DUPLEX_HALF;
1290                 break;
1291
1292         case MII_TG3_AUX_STAT_1000FULL:
1293                 *speed = SPEED_1000;
1294                 *duplex = DUPLEX_FULL;
1295                 break;
1296
1297         default:
1298                 *speed = SPEED_INVALID;
1299                 *duplex = DUPLEX_INVALID;
1300                 break;
1301         };
1302 }
1303
1304 static void tg3_phy_copper_begin(struct tg3 *tp)
1305 {
1306         u32 new_adv;
1307         int i;
1308
1309         if (tp->link_config.phy_is_low_power) {
1310                 /* Entering low power mode.  Disable gigabit and
1311                  * 100baseT advertisements.
1312                  */
1313                 tg3_writephy(tp, MII_TG3_CTRL, 0);
1314
1315                 new_adv = (ADVERTISE_10HALF | ADVERTISE_10FULL |
1316                            ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
1317                 if (tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB)
1318                         new_adv |= (ADVERTISE_100HALF | ADVERTISE_100FULL);
1319
1320                 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1321         } else if (tp->link_config.speed == SPEED_INVALID) {
1322                 tp->link_config.advertising =
1323                         (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
1324                          ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
1325                          ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full |
1326                          ADVERTISED_Autoneg | ADVERTISED_MII);
1327
1328                 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
1329                         tp->link_config.advertising &=
1330                                 ~(ADVERTISED_1000baseT_Half |
1331                                   ADVERTISED_1000baseT_Full);
1332
1333                 new_adv = (ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
1334                 if (tp->link_config.advertising & ADVERTISED_10baseT_Half)
1335                         new_adv |= ADVERTISE_10HALF;
1336                 if (tp->link_config.advertising & ADVERTISED_10baseT_Full)
1337                         new_adv |= ADVERTISE_10FULL;
1338                 if (tp->link_config.advertising & ADVERTISED_100baseT_Half)
1339                         new_adv |= ADVERTISE_100HALF;
1340                 if (tp->link_config.advertising & ADVERTISED_100baseT_Full)
1341                         new_adv |= ADVERTISE_100FULL;
1342                 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1343
1344                 if (tp->link_config.advertising &
1345                     (ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full)) {
1346                         new_adv = 0;
1347                         if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
1348                                 new_adv |= MII_TG3_CTRL_ADV_1000_HALF;
1349                         if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
1350                                 new_adv |= MII_TG3_CTRL_ADV_1000_FULL;
1351                         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY) &&
1352                             (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1353                              tp->pci_chip_rev_id == CHIPREV_ID_5701_B0))
1354                                 new_adv |= (MII_TG3_CTRL_AS_MASTER |
1355                                             MII_TG3_CTRL_ENABLE_AS_MASTER);
1356                         tg3_writephy(tp, MII_TG3_CTRL, new_adv);
1357                 } else {
1358                         tg3_writephy(tp, MII_TG3_CTRL, 0);
1359                 }
1360         } else {
1361                 /* Asking for a specific link mode. */
1362                 if (tp->link_config.speed == SPEED_1000) {
1363                         new_adv = ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP;
1364                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
1365
1366                         if (tp->link_config.duplex == DUPLEX_FULL)
1367                                 new_adv = MII_TG3_CTRL_ADV_1000_FULL;
1368                         else
1369                                 new_adv = MII_TG3_CTRL_ADV_1000_HALF;
1370                         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1371                             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
1372                                 new_adv |= (MII_TG3_CTRL_AS_MASTER |
1373                                             MII_TG3_CTRL_ENABLE_AS_MASTER);
1374                         tg3_writephy(tp, MII_TG3_CTRL, new_adv);
1375                 } else {
1376                         tg3_writephy(tp, MII_TG3_CTRL, 0);
1377
1378                         new_adv = ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP;
1379                         if (tp->link_config.speed == SPEED_100) {
1380                                 if (tp->link_config.duplex == DUPLEX_FULL)
1381                                         new_adv |= ADVERTISE_100FULL;
1382                                 else
1383                                         new_adv |= ADVERTISE_100HALF;
1384                         } else {
1385                                 if (tp->link_config.duplex == DUPLEX_FULL)
1386                                         new_adv |= ADVERTISE_10FULL;
1387                                 else
1388                                         new_adv |= ADVERTISE_10HALF;
1389                         }
1390                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
1391                 }
1392         }
1393
1394         if (tp->link_config.autoneg == AUTONEG_DISABLE &&
1395             tp->link_config.speed != SPEED_INVALID) {
1396                 u32 bmcr, orig_bmcr;
1397
1398                 tp->link_config.active_speed = tp->link_config.speed;
1399                 tp->link_config.active_duplex = tp->link_config.duplex;
1400
1401                 bmcr = 0;
1402                 switch (tp->link_config.speed) {
1403                 default:
1404                 case SPEED_10:
1405                         break;
1406
1407                 case SPEED_100:
1408                         bmcr |= BMCR_SPEED100;
1409                         break;
1410
1411                 case SPEED_1000:
1412                         bmcr |= TG3_BMCR_SPEED1000;
1413                         break;
1414                 };
1415
1416                 if (tp->link_config.duplex == DUPLEX_FULL)
1417                         bmcr |= BMCR_FULLDPLX;
1418
1419                 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
1420                     (bmcr != orig_bmcr)) {
1421                         tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
1422                         for (i = 0; i < 1500; i++) {
1423                                 u32 tmp;
1424
1425                                 udelay(10);
1426                                 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
1427                                     tg3_readphy(tp, MII_BMSR, &tmp))
1428                                         continue;
1429                                 if (!(tmp & BMSR_LSTATUS)) {
1430                                         udelay(40);
1431                                         break;
1432                                 }
1433                         }
1434                         tg3_writephy(tp, MII_BMCR, bmcr);
1435                         udelay(40);
1436                 }
1437         } else {
1438                 tg3_writephy(tp, MII_BMCR,
1439                              BMCR_ANENABLE | BMCR_ANRESTART);
1440         }
1441 }
1442
1443 static int tg3_init_5401phy_dsp(struct tg3 *tp)
1444 {
1445         int err;
1446
1447         /* Turn off tap power management. */
1448         /* Set Extended packet length bit */
1449         err  = tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
1450
1451         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0012);
1452         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1804);
1453
1454         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0013);
1455         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1204);
1456
1457         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
1458         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0132);
1459
1460         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
1461         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0232);
1462
1463         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1464         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0a20);
1465
1466         udelay(40);
1467
1468         return err;
1469 }
1470
1471 static int tg3_copper_is_advertising_all(struct tg3 *tp)
1472 {
1473         u32 adv_reg, all_mask;
1474
1475         if (tg3_readphy(tp, MII_ADVERTISE, &adv_reg))
1476                 return 0;
1477
1478         all_mask = (ADVERTISE_10HALF | ADVERTISE_10FULL |
1479                     ADVERTISE_100HALF | ADVERTISE_100FULL);
1480         if ((adv_reg & all_mask) != all_mask)
1481                 return 0;
1482         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
1483                 u32 tg3_ctrl;
1484
1485                 if (tg3_readphy(tp, MII_TG3_CTRL, &tg3_ctrl))
1486                         return 0;
1487
1488                 all_mask = (MII_TG3_CTRL_ADV_1000_HALF |
1489                             MII_TG3_CTRL_ADV_1000_FULL);
1490                 if ((tg3_ctrl & all_mask) != all_mask)
1491                         return 0;
1492         }
1493         return 1;
1494 }
1495
1496 static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
1497 {
1498         int current_link_up;
1499         u32 bmsr, dummy;
1500         u16 current_speed;
1501         u8 current_duplex;
1502         int i, err;
1503
1504         tw32(MAC_EVENT, 0);
1505
1506         tw32_f(MAC_STATUS,
1507              (MAC_STATUS_SYNC_CHANGED |
1508               MAC_STATUS_CFG_CHANGED |
1509               MAC_STATUS_MI_COMPLETION |
1510               MAC_STATUS_LNKSTATE_CHANGED));
1511         udelay(40);
1512
1513         tp->mi_mode = MAC_MI_MODE_BASE;
1514         tw32_f(MAC_MI_MODE, tp->mi_mode);
1515         udelay(80);
1516
1517         tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x02);
1518
1519         /* Some third-party PHYs need to be reset on link going
1520          * down.
1521          */
1522         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
1523              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
1524              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
1525             netif_carrier_ok(tp->dev)) {
1526                 tg3_readphy(tp, MII_BMSR, &bmsr);
1527                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
1528                     !(bmsr & BMSR_LSTATUS))
1529                         force_reset = 1;
1530         }
1531         if (force_reset)
1532                 tg3_phy_reset(tp);
1533
1534         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
1535                 tg3_readphy(tp, MII_BMSR, &bmsr);
1536                 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
1537                     !(tp->tg3_flags & TG3_FLAG_INIT_COMPLETE))
1538                         bmsr = 0;
1539
1540                 if (!(bmsr & BMSR_LSTATUS)) {
1541                         err = tg3_init_5401phy_dsp(tp);
1542                         if (err)
1543                                 return err;
1544
1545                         tg3_readphy(tp, MII_BMSR, &bmsr);
1546                         for (i = 0; i < 1000; i++) {
1547                                 udelay(10);
1548                                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
1549                                     (bmsr & BMSR_LSTATUS)) {
1550                                         udelay(40);
1551                                         break;
1552                                 }
1553                         }
1554
1555                         if ((tp->phy_id & PHY_ID_REV_MASK) == PHY_REV_BCM5401_B0 &&
1556                             !(bmsr & BMSR_LSTATUS) &&
1557                             tp->link_config.active_speed == SPEED_1000) {
1558                                 err = tg3_phy_reset(tp);
1559                                 if (!err)
1560                                         err = tg3_init_5401phy_dsp(tp);
1561                                 if (err)
1562                                         return err;
1563                         }
1564                 }
1565         } else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1566                    tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
1567                 /* 5701 {A0,B0} CRC bug workaround */
1568                 tg3_writephy(tp, 0x15, 0x0a75);
1569                 tg3_writephy(tp, 0x1c, 0x8c68);
1570                 tg3_writephy(tp, 0x1c, 0x8d68);
1571                 tg3_writephy(tp, 0x1c, 0x8c68);
1572         }
1573
1574         /* Clear pending interrupts... */
1575         tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
1576         tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
1577
1578         if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT)
1579                 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
1580         else
1581                 tg3_writephy(tp, MII_TG3_IMASK, ~0);
1582
1583         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1584             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1585                 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
1586                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
1587                                      MII_TG3_EXT_CTRL_LNK3_LED_MODE);
1588                 else
1589                         tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
1590         }
1591
1592         current_link_up = 0;
1593         current_speed = SPEED_INVALID;
1594         current_duplex = DUPLEX_INVALID;
1595
1596         if (tp->tg3_flags2 & TG3_FLG2_CAPACITIVE_COUPLING) {
1597                 u32 val;
1598
1599                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4007);
1600                 tg3_readphy(tp, MII_TG3_AUX_CTRL, &val);
1601                 if (!(val & (1 << 10))) {
1602                         val |= (1 << 10);
1603                         tg3_writephy(tp, MII_TG3_AUX_CTRL, val);
1604                         goto relink;
1605                 }
1606         }
1607
1608         bmsr = 0;
1609         for (i = 0; i < 100; i++) {
1610                 tg3_readphy(tp, MII_BMSR, &bmsr);
1611                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
1612                     (bmsr & BMSR_LSTATUS))
1613                         break;
1614                 udelay(40);
1615         }
1616
1617         if (bmsr & BMSR_LSTATUS) {
1618                 u32 aux_stat, bmcr;
1619
1620                 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
1621                 for (i = 0; i < 2000; i++) {
1622                         udelay(10);
1623                         if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
1624                             aux_stat)
1625                                 break;
1626                 }
1627
1628                 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
1629                                              &current_speed,
1630                                              &current_duplex);
1631
1632                 bmcr = 0;
1633                 for (i = 0; i < 200; i++) {
1634                         tg3_readphy(tp, MII_BMCR, &bmcr);
1635                         if (tg3_readphy(tp, MII_BMCR, &bmcr))
1636                                 continue;
1637                         if (bmcr && bmcr != 0x7fff)
1638                                 break;
1639                         udelay(10);
1640                 }
1641
1642                 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
1643                         if (bmcr & BMCR_ANENABLE) {
1644                                 current_link_up = 1;
1645
1646                                 /* Force autoneg restart if we are exiting
1647                                  * low power mode.
1648                                  */
1649                                 if (!tg3_copper_is_advertising_all(tp))
1650                                         current_link_up = 0;
1651                         } else {
1652                                 current_link_up = 0;
1653                         }
1654                 } else {
1655                         if (!(bmcr & BMCR_ANENABLE) &&
1656                             tp->link_config.speed == current_speed &&
1657                             tp->link_config.duplex == current_duplex) {
1658                                 current_link_up = 1;
1659                         } else {
1660                                 current_link_up = 0;
1661                         }
1662                 }
1663
1664                 tp->link_config.active_speed = current_speed;
1665                 tp->link_config.active_duplex = current_duplex;
1666         }
1667
1668         if (current_link_up == 1 &&
1669             (tp->link_config.active_duplex == DUPLEX_FULL) &&
1670             (tp->link_config.autoneg == AUTONEG_ENABLE)) {
1671                 u32 local_adv, remote_adv;
1672
1673                 if (tg3_readphy(tp, MII_ADVERTISE, &local_adv))
1674                         local_adv = 0;
1675                 local_adv &= (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
1676
1677                 if (tg3_readphy(tp, MII_LPA, &remote_adv))
1678                         remote_adv = 0;
1679
1680                 remote_adv &= (LPA_PAUSE_CAP | LPA_PAUSE_ASYM);
1681
1682                 /* If we are not advertising full pause capability,
1683                  * something is wrong.  Bring the link down and reconfigure.
1684                  */
1685                 if (local_adv != ADVERTISE_PAUSE_CAP) {
1686                         current_link_up = 0;
1687                 } else {
1688                         tg3_setup_flow_control(tp, local_adv, remote_adv);
1689                 }
1690         }
1691 relink:
1692         if (current_link_up == 0) {
1693                 u32 tmp;
1694
1695                 tg3_phy_copper_begin(tp);
1696
1697                 tg3_readphy(tp, MII_BMSR, &tmp);
1698                 if (!tg3_readphy(tp, MII_BMSR, &tmp) &&
1699                     (tmp & BMSR_LSTATUS))
1700                         current_link_up = 1;
1701         }
1702
1703         tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
1704         if (current_link_up == 1) {
1705                 if (tp->link_config.active_speed == SPEED_100 ||
1706                     tp->link_config.active_speed == SPEED_10)
1707                         tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
1708                 else
1709                         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
1710         } else
1711                 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
1712
1713         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
1714         if (tp->link_config.active_duplex == DUPLEX_HALF)
1715                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
1716
1717         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
1718         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
1719                 if ((tp->led_ctrl == LED_CTRL_MODE_PHY_2) ||
1720                     (current_link_up == 1 &&
1721                      tp->link_config.active_speed == SPEED_10))
1722                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
1723         } else {
1724                 if (current_link_up == 1)
1725                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
1726         }
1727
1728         /* ??? Without this setting Netgear GA302T PHY does not
1729          * ??? send/receive packets...
1730          */
1731         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411 &&
1732             tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) {
1733                 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
1734                 tw32_f(MAC_MI_MODE, tp->mi_mode);
1735                 udelay(80);
1736         }
1737
1738         tw32_f(MAC_MODE, tp->mac_mode);
1739         udelay(40);
1740
1741         if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
1742                 /* Polled via timer. */
1743                 tw32_f(MAC_EVENT, 0);
1744         } else {
1745                 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
1746         }
1747         udelay(40);
1748
1749         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 &&
1750             current_link_up == 1 &&
1751             tp->link_config.active_speed == SPEED_1000 &&
1752             ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ||
1753              (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED))) {
1754                 udelay(120);
1755                 tw32_f(MAC_STATUS,
1756                      (MAC_STATUS_SYNC_CHANGED |
1757                       MAC_STATUS_CFG_CHANGED));
1758                 udelay(40);
1759                 tg3_write_mem(tp,
1760                               NIC_SRAM_FIRMWARE_MBOX,
1761                               NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
1762         }
1763
1764         if (current_link_up != netif_carrier_ok(tp->dev)) {
1765                 if (current_link_up)
1766                         netif_carrier_on(tp->dev);
1767                 else
1768                         netif_carrier_off(tp->dev);
1769                 tg3_link_report(tp);
1770         }
1771
1772         return 0;
1773 }
1774
1775 struct tg3_fiber_aneginfo {
1776         int state;
1777 #define ANEG_STATE_UNKNOWN              0
1778 #define ANEG_STATE_AN_ENABLE            1
1779 #define ANEG_STATE_RESTART_INIT         2
1780 #define ANEG_STATE_RESTART              3
1781 #define ANEG_STATE_DISABLE_LINK_OK      4
1782 #define ANEG_STATE_ABILITY_DETECT_INIT  5
1783 #define ANEG_STATE_ABILITY_DETECT       6
1784 #define ANEG_STATE_ACK_DETECT_INIT      7
1785 #define ANEG_STATE_ACK_DETECT           8
1786 #define ANEG_STATE_COMPLETE_ACK_INIT    9
1787 #define ANEG_STATE_COMPLETE_ACK         10
1788 #define ANEG_STATE_IDLE_DETECT_INIT     11
1789 #define ANEG_STATE_IDLE_DETECT          12
1790 #define ANEG_STATE_LINK_OK              13
1791 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT  14
1792 #define ANEG_STATE_NEXT_PAGE_WAIT       15
1793
1794         u32 flags;
1795 #define MR_AN_ENABLE            0x00000001
1796 #define MR_RESTART_AN           0x00000002
1797 #define MR_AN_COMPLETE          0x00000004
1798 #define MR_PAGE_RX              0x00000008
1799 #define MR_NP_LOADED            0x00000010
1800 #define MR_TOGGLE_TX            0x00000020
1801 #define MR_LP_ADV_FULL_DUPLEX   0x00000040
1802 #define MR_LP_ADV_HALF_DUPLEX   0x00000080
1803 #define MR_LP_ADV_SYM_PAUSE     0x00000100
1804 #define MR_LP_ADV_ASYM_PAUSE    0x00000200
1805 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
1806 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
1807 #define MR_LP_ADV_NEXT_PAGE     0x00001000
1808 #define MR_TOGGLE_RX            0x00002000
1809 #define MR_NP_RX                0x00004000
1810
1811 #define MR_LINK_OK              0x80000000
1812
1813         unsigned long link_time, cur_time;
1814
1815         u32 ability_match_cfg;
1816         int ability_match_count;
1817
1818         char ability_match, idle_match, ack_match;
1819
1820         u32 txconfig, rxconfig;
1821 #define ANEG_CFG_NP             0x00000080
1822 #define ANEG_CFG_ACK            0x00000040
1823 #define ANEG_CFG_RF2            0x00000020
1824 #define ANEG_CFG_RF1            0x00000010
1825 #define ANEG_CFG_PS2            0x00000001
1826 #define ANEG_CFG_PS1            0x00008000
1827 #define ANEG_CFG_HD             0x00004000
1828 #define ANEG_CFG_FD             0x00002000
1829 #define ANEG_CFG_INVAL          0x00001f06
1830
1831 };
1832 #define ANEG_OK         0
1833 #define ANEG_DONE       1
1834 #define ANEG_TIMER_ENAB 2
1835 #define ANEG_FAILED     -1
1836
1837 #define ANEG_STATE_SETTLE_TIME  10000
1838
1839 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
1840                                    struct tg3_fiber_aneginfo *ap)
1841 {
1842         unsigned long delta;
1843         u32 rx_cfg_reg;
1844         int ret;
1845
1846         if (ap->state == ANEG_STATE_UNKNOWN) {
1847                 ap->rxconfig = 0;
1848                 ap->link_time = 0;
1849                 ap->cur_time = 0;
1850                 ap->ability_match_cfg = 0;
1851                 ap->ability_match_count = 0;
1852                 ap->ability_match = 0;
1853                 ap->idle_match = 0;
1854                 ap->ack_match = 0;
1855         }
1856         ap->cur_time++;
1857
1858         if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
1859                 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
1860
1861                 if (rx_cfg_reg != ap->ability_match_cfg) {
1862                         ap->ability_match_cfg = rx_cfg_reg;
1863                         ap->ability_match = 0;
1864                         ap->ability_match_count = 0;
1865                 } else {
1866                         if (++ap->ability_match_count > 1) {
1867                                 ap->ability_match = 1;
1868                                 ap->ability_match_cfg = rx_cfg_reg;
1869                         }
1870                 }
1871                 if (rx_cfg_reg & ANEG_CFG_ACK)
1872                         ap->ack_match = 1;
1873                 else
1874                         ap->ack_match = 0;
1875
1876                 ap->idle_match = 0;
1877         } else {
1878                 ap->idle_match = 1;
1879                 ap->ability_match_cfg = 0;
1880                 ap->ability_match_count = 0;
1881                 ap->ability_match = 0;
1882                 ap->ack_match = 0;
1883
1884                 rx_cfg_reg = 0;
1885         }
1886
1887         ap->rxconfig = rx_cfg_reg;
1888         ret = ANEG_OK;
1889
1890         switch(ap->state) {
1891         case ANEG_STATE_UNKNOWN:
1892                 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
1893                         ap->state = ANEG_STATE_AN_ENABLE;
1894
1895                 /* fallthru */
1896         case ANEG_STATE_AN_ENABLE:
1897                 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
1898                 if (ap->flags & MR_AN_ENABLE) {
1899                         ap->link_time = 0;
1900                         ap->cur_time = 0;
1901                         ap->ability_match_cfg = 0;
1902                         ap->ability_match_count = 0;
1903                         ap->ability_match = 0;
1904                         ap->idle_match = 0;
1905                         ap->ack_match = 0;
1906
1907                         ap->state = ANEG_STATE_RESTART_INIT;
1908                 } else {
1909                         ap->state = ANEG_STATE_DISABLE_LINK_OK;
1910                 }
1911                 break;
1912
1913         case ANEG_STATE_RESTART_INIT:
1914                 ap->link_time = ap->cur_time;
1915                 ap->flags &= ~(MR_NP_LOADED);
1916                 ap->txconfig = 0;
1917                 tw32(MAC_TX_AUTO_NEG, 0);
1918                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
1919                 tw32_f(MAC_MODE, tp->mac_mode);
1920                 udelay(40);
1921
1922                 ret = ANEG_TIMER_ENAB;
1923                 ap->state = ANEG_STATE_RESTART;
1924
1925                 /* fallthru */
1926         case ANEG_STATE_RESTART:
1927                 delta = ap->cur_time - ap->link_time;
1928                 if (delta > ANEG_STATE_SETTLE_TIME) {
1929                         ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
1930                 } else {
1931                         ret = ANEG_TIMER_ENAB;
1932                 }
1933                 break;
1934
1935         case ANEG_STATE_DISABLE_LINK_OK:
1936                 ret = ANEG_DONE;
1937                 break;
1938
1939         case ANEG_STATE_ABILITY_DETECT_INIT:
1940                 ap->flags &= ~(MR_TOGGLE_TX);
1941                 ap->txconfig = (ANEG_CFG_FD | ANEG_CFG_PS1);
1942                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
1943                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
1944                 tw32_f(MAC_MODE, tp->mac_mode);
1945                 udelay(40);
1946
1947                 ap->state = ANEG_STATE_ABILITY_DETECT;
1948                 break;
1949
1950         case ANEG_STATE_ABILITY_DETECT:
1951                 if (ap->ability_match != 0 && ap->rxconfig != 0) {
1952                         ap->state = ANEG_STATE_ACK_DETECT_INIT;
1953                 }
1954                 break;
1955
1956         case ANEG_STATE_ACK_DETECT_INIT:
1957                 ap->txconfig |= ANEG_CFG_ACK;
1958                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
1959                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
1960                 tw32_f(MAC_MODE, tp->mac_mode);
1961                 udelay(40);
1962
1963                 ap->state = ANEG_STATE_ACK_DETECT;
1964
1965                 /* fallthru */
1966         case ANEG_STATE_ACK_DETECT:
1967                 if (ap->ack_match != 0) {
1968                         if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
1969                             (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
1970                                 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
1971                         } else {
1972                                 ap->state = ANEG_STATE_AN_ENABLE;
1973                         }
1974                 } else if (ap->ability_match != 0 &&
1975                            ap->rxconfig == 0) {
1976                         ap->state = ANEG_STATE_AN_ENABLE;
1977                 }
1978                 break;
1979
1980         case ANEG_STATE_COMPLETE_ACK_INIT:
1981                 if (ap->rxconfig & ANEG_CFG_INVAL) {
1982                         ret = ANEG_FAILED;
1983                         break;
1984                 }
1985                 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
1986                                MR_LP_ADV_HALF_DUPLEX |
1987                                MR_LP_ADV_SYM_PAUSE |
1988                                MR_LP_ADV_ASYM_PAUSE |
1989                                MR_LP_ADV_REMOTE_FAULT1 |
1990                                MR_LP_ADV_REMOTE_FAULT2 |
1991                                MR_LP_ADV_NEXT_PAGE |
1992                                MR_TOGGLE_RX |
1993                                MR_NP_RX);
1994                 if (ap->rxconfig & ANEG_CFG_FD)
1995                         ap->flags |= MR_LP_ADV_FULL_DUPLEX;
1996                 if (ap->rxconfig & ANEG_CFG_HD)
1997                         ap->flags |= MR_LP_ADV_HALF_DUPLEX;
1998                 if (ap->rxconfig & ANEG_CFG_PS1)
1999                         ap->flags |= MR_LP_ADV_SYM_PAUSE;
2000                 if (ap->rxconfig & ANEG_CFG_PS2)
2001                         ap->flags |= MR_LP_ADV_ASYM_PAUSE;
2002                 if (ap->rxconfig & ANEG_CFG_RF1)
2003                         ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
2004                 if (ap->rxconfig & ANEG_CFG_RF2)
2005                         ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
2006                 if (ap->rxconfig & ANEG_CFG_NP)
2007                         ap->flags |= MR_LP_ADV_NEXT_PAGE;
2008
2009                 ap->link_time = ap->cur_time;
2010
2011                 ap->flags ^= (MR_TOGGLE_TX);
2012                 if (ap->rxconfig & 0x0008)
2013                         ap->flags |= MR_TOGGLE_RX;
2014                 if (ap->rxconfig & ANEG_CFG_NP)
2015                         ap->flags |= MR_NP_RX;
2016                 ap->flags |= MR_PAGE_RX;
2017
2018                 ap->state = ANEG_STATE_COMPLETE_ACK;
2019                 ret = ANEG_TIMER_ENAB;
2020                 break;
2021
2022         case ANEG_STATE_COMPLETE_ACK:
2023                 if (ap->ability_match != 0 &&
2024                     ap->rxconfig == 0) {
2025                         ap->state = ANEG_STATE_AN_ENABLE;
2026                         break;
2027                 }
2028                 delta = ap->cur_time - ap->link_time;
2029                 if (delta > ANEG_STATE_SETTLE_TIME) {
2030                         if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
2031                                 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
2032                         } else {
2033                                 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
2034                                     !(ap->flags & MR_NP_RX)) {
2035                                         ap->state = ANEG_STATE_IDLE_DETECT_INIT;
2036                                 } else {
2037                                         ret = ANEG_FAILED;
2038                                 }
2039                         }
2040                 }
2041                 break;
2042
2043         case ANEG_STATE_IDLE_DETECT_INIT:
2044                 ap->link_time = ap->cur_time;
2045                 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
2046                 tw32_f(MAC_MODE, tp->mac_mode);
2047                 udelay(40);
2048
2049                 ap->state = ANEG_STATE_IDLE_DETECT;
2050                 ret = ANEG_TIMER_ENAB;
2051                 break;
2052
2053         case ANEG_STATE_IDLE_DETECT:
2054                 if (ap->ability_match != 0 &&
2055                     ap->rxconfig == 0) {
2056                         ap->state = ANEG_STATE_AN_ENABLE;
2057                         break;
2058                 }
2059                 delta = ap->cur_time - ap->link_time;
2060                 if (delta > ANEG_STATE_SETTLE_TIME) {
2061                         /* XXX another gem from the Broadcom driver :( */
2062                         ap->state = ANEG_STATE_LINK_OK;
2063                 }
2064                 break;
2065
2066         case ANEG_STATE_LINK_OK:
2067                 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
2068                 ret = ANEG_DONE;
2069                 break;
2070
2071         case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
2072                 /* ??? unimplemented */
2073                 break;
2074
2075         case ANEG_STATE_NEXT_PAGE_WAIT:
2076                 /* ??? unimplemented */
2077                 break;
2078
2079         default:
2080                 ret = ANEG_FAILED;
2081                 break;
2082         };
2083
2084         return ret;
2085 }
2086
2087 static int fiber_autoneg(struct tg3 *tp, u32 *flags)
2088 {
2089         int res = 0;
2090         struct tg3_fiber_aneginfo aninfo;
2091         int status = ANEG_FAILED;
2092         unsigned int tick;
2093         u32 tmp;
2094
2095         tw32_f(MAC_TX_AUTO_NEG, 0);
2096
2097         tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
2098         tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
2099         udelay(40);
2100
2101         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
2102         udelay(40);
2103
2104         memset(&aninfo, 0, sizeof(aninfo));
2105         aninfo.flags |= MR_AN_ENABLE;
2106         aninfo.state = ANEG_STATE_UNKNOWN;
2107         aninfo.cur_time = 0;
2108         tick = 0;
2109         while (++tick < 195000) {
2110                 status = tg3_fiber_aneg_smachine(tp, &aninfo);
2111                 if (status == ANEG_DONE || status == ANEG_FAILED)
2112                         break;
2113
2114                 udelay(1);
2115         }
2116
2117         tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
2118         tw32_f(MAC_MODE, tp->mac_mode);
2119         udelay(40);
2120
2121         *flags = aninfo.flags;
2122
2123         if (status == ANEG_DONE &&
2124             (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
2125                              MR_LP_ADV_FULL_DUPLEX)))
2126                 res = 1;
2127
2128         return res;
2129 }
2130
2131 static void tg3_init_bcm8002(struct tg3 *tp)
2132 {
2133         u32 mac_status = tr32(MAC_STATUS);
2134         int i;
2135
2136         /* Reset when initting first time or we have a link. */
2137         if ((tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) &&
2138             !(mac_status & MAC_STATUS_PCS_SYNCED))
2139                 return;
2140
2141         /* Set PLL lock range. */
2142         tg3_writephy(tp, 0x16, 0x8007);
2143
2144         /* SW reset */
2145         tg3_writephy(tp, MII_BMCR, BMCR_RESET);
2146
2147         /* Wait for reset to complete. */
2148         /* XXX schedule_timeout() ... */
2149         for (i = 0; i < 500; i++)
2150                 udelay(10);
2151
2152         /* Config mode; select PMA/Ch 1 regs. */
2153         tg3_writephy(tp, 0x10, 0x8411);
2154
2155         /* Enable auto-lock and comdet, select txclk for tx. */
2156         tg3_writephy(tp, 0x11, 0x0a10);
2157
2158         tg3_writephy(tp, 0x18, 0x00a0);
2159         tg3_writephy(tp, 0x16, 0x41ff);
2160
2161         /* Assert and deassert POR. */
2162         tg3_writephy(tp, 0x13, 0x0400);
2163         udelay(40);
2164         tg3_writephy(tp, 0x13, 0x0000);
2165
2166         tg3_writephy(tp, 0x11, 0x0a50);
2167         udelay(40);
2168         tg3_writephy(tp, 0x11, 0x0a10);
2169
2170         /* Wait for signal to stabilize */
2171         /* XXX schedule_timeout() ... */
2172         for (i = 0; i < 15000; i++)
2173                 udelay(10);
2174
2175         /* Deselect the channel register so we can read the PHYID
2176          * later.
2177          */
2178         tg3_writephy(tp, 0x10, 0x8011);
2179 }
2180
2181 static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
2182 {
2183         u32 sg_dig_ctrl, sg_dig_status;
2184         u32 serdes_cfg, expected_sg_dig_ctrl;
2185         int workaround, port_a;
2186         int current_link_up;
2187
2188         serdes_cfg = 0;
2189         expected_sg_dig_ctrl = 0;
2190         workaround = 0;
2191         port_a = 1;
2192         current_link_up = 0;
2193
2194         if (tp->pci_chip_rev_id != CHIPREV_ID_5704_A0 &&
2195             tp->pci_chip_rev_id != CHIPREV_ID_5704_A1) {
2196                 workaround = 1;
2197                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
2198                         port_a = 0;
2199
2200                 /* preserve bits 0-11,13,14 for signal pre-emphasis */
2201                 /* preserve bits 20-23 for voltage regulator */
2202                 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
2203         }
2204
2205         sg_dig_ctrl = tr32(SG_DIG_CTRL);
2206
2207         if (tp->link_config.autoneg != AUTONEG_ENABLE) {
2208                 if (sg_dig_ctrl & (1 << 31)) {
2209                         if (workaround) {
2210                                 u32 val = serdes_cfg;
2211
2212                                 if (port_a)
2213                                         val |= 0xc010000;
2214                                 else
2215                                         val |= 0x4010000;
2216                                 tw32_f(MAC_SERDES_CFG, val);
2217                         }
2218                         tw32_f(SG_DIG_CTRL, 0x01388400);
2219                 }
2220                 if (mac_status & MAC_STATUS_PCS_SYNCED) {
2221                         tg3_setup_flow_control(tp, 0, 0);
2222                         current_link_up = 1;
2223                 }
2224                 goto out;
2225         }
2226
2227         /* Want auto-negotiation.  */
2228         expected_sg_dig_ctrl = 0x81388400;
2229
2230         /* Pause capability */
2231         expected_sg_dig_ctrl |= (1 << 11);
2232
2233         /* Asymettric pause */
2234         expected_sg_dig_ctrl |= (1 << 12);
2235
2236         if (sg_dig_ctrl != expected_sg_dig_ctrl) {
2237                 if (workaround)
2238                         tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
2239                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | (1 << 30));
2240                 udelay(5);
2241                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
2242
2243                 tp->tg3_flags2 |= TG3_FLG2_PHY_JUST_INITTED;
2244         } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
2245                                  MAC_STATUS_SIGNAL_DET)) {
2246                 int i;
2247
2248                 /* Giver time to negotiate (~200ms) */
2249                 for (i = 0; i < 40000; i++) {
2250                         sg_dig_status = tr32(SG_DIG_STATUS);
2251                         if (sg_dig_status & (0x3))
2252                                 break;
2253                         udelay(5);
2254                 }
2255                 mac_status = tr32(MAC_STATUS);
2256
2257                 if ((sg_dig_status & (1 << 1)) &&
2258                     (mac_status & MAC_STATUS_PCS_SYNCED)) {
2259                         u32 local_adv, remote_adv;
2260
2261                         local_adv = ADVERTISE_PAUSE_CAP;
2262                         remote_adv = 0;
2263                         if (sg_dig_status & (1 << 19))
2264                                 remote_adv |= LPA_PAUSE_CAP;
2265                         if (sg_dig_status & (1 << 20))
2266                                 remote_adv |= LPA_PAUSE_ASYM;
2267
2268                         tg3_setup_flow_control(tp, local_adv, remote_adv);
2269                         current_link_up = 1;
2270                         tp->tg3_flags2 &= ~TG3_FLG2_PHY_JUST_INITTED;
2271                 } else if (!(sg_dig_status & (1 << 1))) {
2272                         if (tp->tg3_flags2 & TG3_FLG2_PHY_JUST_INITTED)
2273                                 tp->tg3_flags2 &= ~TG3_FLG2_PHY_JUST_INITTED;
2274                         else {
2275                                 if (workaround) {
2276                                         u32 val = serdes_cfg;
2277
2278                                         if (port_a)
2279                                                 val |= 0xc010000;
2280                                         else
2281                                                 val |= 0x4010000;
2282
2283                                         tw32_f(MAC_SERDES_CFG, val);
2284                                 }
2285
2286                                 tw32_f(SG_DIG_CTRL, 0x01388400);
2287                                 udelay(40);
2288
2289                                 /* Link parallel detection - link is up */
2290                                 /* only if we have PCS_SYNC and not */
2291                                 /* receiving config code words */
2292                                 mac_status = tr32(MAC_STATUS);
2293                                 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
2294                                     !(mac_status & MAC_STATUS_RCVD_CFG)) {
2295                                         tg3_setup_flow_control(tp, 0, 0);
2296                                         current_link_up = 1;
2297                                 }
2298                         }
2299                 }
2300         }
2301
2302 out:
2303         return current_link_up;
2304 }
2305
2306 static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
2307 {
2308         int current_link_up = 0;
2309
2310         if (!(mac_status & MAC_STATUS_PCS_SYNCED)) {
2311                 tp->tg3_flags &= ~TG3_FLAG_GOT_SERDES_FLOWCTL;
2312                 goto out;
2313         }
2314
2315         if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2316                 u32 flags;
2317                 int i;
2318   
2319                 if (fiber_autoneg(tp, &flags)) {
2320                         u32 local_adv, remote_adv;
2321
2322                         local_adv = ADVERTISE_PAUSE_CAP;
2323                         remote_adv = 0;
2324                         if (flags & MR_LP_ADV_SYM_PAUSE)
2325                                 remote_adv |= LPA_PAUSE_CAP;
2326                         if (flags & MR_LP_ADV_ASYM_PAUSE)
2327                                 remote_adv |= LPA_PAUSE_ASYM;
2328
2329                         tg3_setup_flow_control(tp, local_adv, remote_adv);
2330
2331                         tp->tg3_flags |= TG3_FLAG_GOT_SERDES_FLOWCTL;
2332                         current_link_up = 1;
2333                 }
2334                 for (i = 0; i < 30; i++) {
2335                         udelay(20);
2336                         tw32_f(MAC_STATUS,
2337                                (MAC_STATUS_SYNC_CHANGED |
2338                                 MAC_STATUS_CFG_CHANGED));
2339                         udelay(40);
2340                         if ((tr32(MAC_STATUS) &
2341                              (MAC_STATUS_SYNC_CHANGED |
2342                               MAC_STATUS_CFG_CHANGED)) == 0)
2343                                 break;
2344                 }
2345
2346                 mac_status = tr32(MAC_STATUS);
2347                 if (current_link_up == 0 &&
2348                     (mac_status & MAC_STATUS_PCS_SYNCED) &&
2349                     !(mac_status & MAC_STATUS_RCVD_CFG))
2350                         current_link_up = 1;
2351         } else {
2352                 /* Forcing 1000FD link up. */
2353                 current_link_up = 1;
2354                 tp->tg3_flags |= TG3_FLAG_GOT_SERDES_FLOWCTL;
2355
2356                 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
2357                 udelay(40);
2358         }
2359
2360 out:
2361         return current_link_up;
2362 }
2363
2364 static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
2365 {
2366         u32 orig_pause_cfg;
2367         u16 orig_active_speed;
2368         u8 orig_active_duplex;
2369         u32 mac_status;
2370         int current_link_up;
2371         int i;
2372
2373         orig_pause_cfg =
2374                 (tp->tg3_flags & (TG3_FLAG_RX_PAUSE |
2375                                   TG3_FLAG_TX_PAUSE));
2376         orig_active_speed = tp->link_config.active_speed;
2377         orig_active_duplex = tp->link_config.active_duplex;
2378
2379         if (!(tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG) &&
2380             netif_carrier_ok(tp->dev) &&
2381             (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE)) {
2382                 mac_status = tr32(MAC_STATUS);
2383                 mac_status &= (MAC_STATUS_PCS_SYNCED |
2384                                MAC_STATUS_SIGNAL_DET |
2385                                MAC_STATUS_CFG_CHANGED |
2386                                MAC_STATUS_RCVD_CFG);
2387                 if (mac_status == (MAC_STATUS_PCS_SYNCED |
2388                                    MAC_STATUS_SIGNAL_DET)) {
2389                         tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
2390                                             MAC_STATUS_CFG_CHANGED));
2391                         return 0;
2392                 }
2393         }
2394
2395         tw32_f(MAC_TX_AUTO_NEG, 0);
2396
2397         tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
2398         tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
2399         tw32_f(MAC_MODE, tp->mac_mode);
2400         udelay(40);
2401
2402         if (tp->phy_id == PHY_ID_BCM8002)
2403                 tg3_init_bcm8002(tp);
2404
2405         /* Enable link change event even when serdes polling.  */
2406         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2407         udelay(40);
2408
2409         current_link_up = 0;
2410         mac_status = tr32(MAC_STATUS);
2411
2412         if (tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG)
2413                 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
2414         else
2415                 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
2416
2417         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
2418         tw32_f(MAC_MODE, tp->mac_mode);
2419         udelay(40);
2420
2421         tp->hw_status->status =
2422                 (SD_STATUS_UPDATED |
2423                  (tp->hw_status->status & ~SD_STATUS_LINK_CHG));
2424
2425         for (i = 0; i < 100; i++) {
2426                 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
2427                                     MAC_STATUS_CFG_CHANGED));
2428                 udelay(5);
2429                 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
2430                                          MAC_STATUS_CFG_CHANGED)) == 0)
2431                         break;
2432         }
2433
2434         mac_status = tr32(MAC_STATUS);
2435         if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
2436                 current_link_up = 0;
2437                 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2438                         tw32_f(MAC_MODE, (tp->mac_mode |
2439                                           MAC_MODE_SEND_CONFIGS));
2440                         udelay(1);
2441                         tw32_f(MAC_MODE, tp->mac_mode);
2442                 }
2443         }
2444
2445         if (current_link_up == 1) {
2446                 tp->link_config.active_speed = SPEED_1000;
2447                 tp->link_config.active_duplex = DUPLEX_FULL;
2448                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
2449                                     LED_CTRL_LNKLED_OVERRIDE |
2450                                     LED_CTRL_1000MBPS_ON));
2451         } else {
2452                 tp->link_config.active_speed = SPEED_INVALID;
2453                 tp->link_config.active_duplex = DUPLEX_INVALID;
2454                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
2455                                     LED_CTRL_LNKLED_OVERRIDE |
2456                                     LED_CTRL_TRAFFIC_OVERRIDE));
2457         }
2458
2459         if (current_link_up != netif_carrier_ok(tp->dev)) {
2460                 if (current_link_up)
2461                         netif_carrier_on(tp->dev);
2462                 else
2463                         netif_carrier_off(tp->dev);
2464                 tg3_link_report(tp);
2465         } else {
2466                 u32 now_pause_cfg =
2467                         tp->tg3_flags & (TG3_FLAG_RX_PAUSE |
2468                                          TG3_FLAG_TX_PAUSE);
2469                 if (orig_pause_cfg != now_pause_cfg ||
2470                     orig_active_speed != tp->link_config.active_speed ||
2471                     orig_active_duplex != tp->link_config.active_duplex)
2472                         tg3_link_report(tp);
2473         }
2474
2475         return 0;
2476 }
2477
2478 static int tg3_setup_phy(struct tg3 *tp, int force_reset)
2479 {
2480         int err;
2481
2482         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
2483                 err = tg3_setup_fiber_phy(tp, force_reset);
2484         } else {
2485                 err = tg3_setup_copper_phy(tp, force_reset);
2486         }
2487
2488         if (tp->link_config.active_speed == SPEED_1000 &&
2489             tp->link_config.active_duplex == DUPLEX_HALF)
2490                 tw32(MAC_TX_LENGTHS,
2491                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2492                       (6 << TX_LENGTHS_IPG_SHIFT) |
2493                       (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
2494         else
2495                 tw32(MAC_TX_LENGTHS,
2496                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2497                       (6 << TX_LENGTHS_IPG_SHIFT) |
2498                       (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
2499
2500         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
2501                 if (netif_carrier_ok(tp->dev)) {
2502                         tw32(HOSTCC_STAT_COAL_TICKS,
2503                              DEFAULT_STAT_COAL_TICKS);
2504                 } else {
2505                         tw32(HOSTCC_STAT_COAL_TICKS, 0);
2506                 }
2507         }
2508
2509         return err;
2510 }
2511
2512 /* Tigon3 never reports partial packet sends.  So we do not
2513  * need special logic to handle SKBs that have not had all
2514  * of their frags sent yet, like SunGEM does.
2515  */
2516 static void tg3_tx(struct tg3 *tp)
2517 {
2518         u32 hw_idx = tp->hw_status->idx[0].tx_consumer;
2519         u32 sw_idx = tp->tx_cons;
2520
2521         while (sw_idx != hw_idx) {
2522                 struct tx_ring_info *ri = &tp->tx_buffers[sw_idx];
2523                 struct sk_buff *skb = ri->skb;
2524                 int i;
2525
2526                 if (unlikely(skb == NULL))
2527                         BUG();
2528
2529                 pci_unmap_single(tp->pdev,
2530                                  pci_unmap_addr(ri, mapping),
2531                                  skb_headlen(skb),
2532                                  PCI_DMA_TODEVICE);
2533
2534                 ri->skb = NULL;
2535
2536                 sw_idx = NEXT_TX(sw_idx);
2537
2538                 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2539                         if (unlikely(sw_idx == hw_idx))
2540                                 BUG();
2541
2542                         ri = &tp->tx_buffers[sw_idx];
2543                         if (unlikely(ri->skb != NULL))
2544                                 BUG();
2545
2546                         pci_unmap_page(tp->pdev,
2547                                        pci_unmap_addr(ri, mapping),
2548                                        skb_shinfo(skb)->frags[i].size,
2549                                        PCI_DMA_TODEVICE);
2550
2551                         sw_idx = NEXT_TX(sw_idx);
2552                 }
2553
2554                 dev_kfree_skb_irq(skb);
2555         }
2556
2557         tp->tx_cons = sw_idx;
2558
2559         if (netif_queue_stopped(tp->dev) &&
2560             (TX_BUFFS_AVAIL(tp) > TG3_TX_WAKEUP_THRESH))
2561                 netif_wake_queue(tp->dev);
2562 }
2563
2564 /* Returns size of skb allocated or < 0 on error.
2565  *
2566  * We only need to fill in the address because the other members
2567  * of the RX descriptor are invariant, see tg3_init_rings.
2568  *
2569  * Note the purposeful assymetry of cpu vs. chip accesses.  For
2570  * posting buffers we only dirty the first cache line of the RX
2571  * descriptor (containing the address).  Whereas for the RX status
2572  * buffers the cpu only reads the last cacheline of the RX descriptor
2573  * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
2574  */
2575 static int tg3_alloc_rx_skb(struct tg3 *tp, u32 opaque_key,
2576                             int src_idx, u32 dest_idx_unmasked)
2577 {
2578         struct tg3_rx_buffer_desc *desc;
2579         struct ring_info *map, *src_map;
2580         struct sk_buff *skb;
2581         dma_addr_t mapping;
2582         int skb_size, dest_idx;
2583
2584         src_map = NULL;
2585         switch (opaque_key) {
2586         case RXD_OPAQUE_RING_STD:
2587                 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
2588                 desc = &tp->rx_std[dest_idx];
2589                 map = &tp->rx_std_buffers[dest_idx];
2590                 if (src_idx >= 0)
2591                         src_map = &tp->rx_std_buffers[src_idx];
2592                 skb_size = RX_PKT_BUF_SZ;
2593                 break;
2594
2595         case RXD_OPAQUE_RING_JUMBO:
2596                 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
2597                 desc = &tp->rx_jumbo[dest_idx];
2598                 map = &tp->rx_jumbo_buffers[dest_idx];
2599                 if (src_idx >= 0)
2600                         src_map = &tp->rx_jumbo_buffers[src_idx];
2601                 skb_size = RX_JUMBO_PKT_BUF_SZ;
2602                 break;
2603
2604         default:
2605                 return -EINVAL;
2606         };
2607
2608         /* Do not overwrite any of the map or rp information
2609          * until we are sure we can commit to a new buffer.
2610          *
2611          * Callers depend upon this behavior and assume that
2612          * we leave everything unchanged if we fail.
2613          */
2614         skb = dev_alloc_skb(skb_size);
2615         if (skb == NULL)
2616                 return -ENOMEM;
2617
2618         skb->dev = tp->dev;
2619         skb_reserve(skb, tp->rx_offset);
2620
2621         mapping = pci_map_single(tp->pdev, skb->data,
2622                                  skb_size - tp->rx_offset,
2623                                  PCI_DMA_FROMDEVICE);
2624
2625         map->skb = skb;
2626         pci_unmap_addr_set(map, mapping, mapping);
2627
2628         if (src_map != NULL)
2629                 src_map->skb = NULL;
2630
2631         desc->addr_hi = ((u64)mapping >> 32);
2632         desc->addr_lo = ((u64)mapping & 0xffffffff);
2633
2634         return skb_size;
2635 }
2636
2637 /* We only need to move over in the address because the other
2638  * members of the RX descriptor are invariant.  See notes above
2639  * tg3_alloc_rx_skb for full details.
2640  */
2641 static void tg3_recycle_rx(struct tg3 *tp, u32 opaque_key,
2642                            int src_idx, u32 dest_idx_unmasked)
2643 {
2644         struct tg3_rx_buffer_desc *src_desc, *dest_desc;
2645         struct ring_info *src_map, *dest_map;
2646         int dest_idx;
2647
2648         switch (opaque_key) {
2649         case RXD_OPAQUE_RING_STD:
2650                 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
2651                 dest_desc = &tp->rx_std[dest_idx];
2652                 dest_map = &tp->rx_std_buffers[dest_idx];
2653                 src_desc = &tp->rx_std[src_idx];
2654                 src_map = &tp->rx_std_buffers[src_idx];
2655                 break;
2656
2657         case RXD_OPAQUE_RING_JUMBO:
2658                 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
2659                 dest_desc = &tp->rx_jumbo[dest_idx];
2660                 dest_map = &tp->rx_jumbo_buffers[dest_idx];
2661                 src_desc = &tp->rx_jumbo[src_idx];
2662                 src_map = &tp->rx_jumbo_buffers[src_idx];
2663                 break;
2664
2665         default:
2666                 return;
2667         };
2668
2669         dest_map->skb = src_map->skb;
2670         pci_unmap_addr_set(dest_map, mapping,
2671                            pci_unmap_addr(src_map, mapping));
2672         dest_desc->addr_hi = src_desc->addr_hi;
2673         dest_desc->addr_lo = src_desc->addr_lo;
2674
2675         src_map->skb = NULL;
2676 }
2677
2678 #if TG3_VLAN_TAG_USED
2679 static int tg3_vlan_rx(struct tg3 *tp, struct sk_buff *skb, u16 vlan_tag)
2680 {
2681         return vlan_hwaccel_receive_skb(skb, tp->vlgrp, vlan_tag);
2682 }
2683 #endif
2684
2685 /* The RX ring scheme is composed of multiple rings which post fresh
2686  * buffers to the chip, and one special ring the chip uses to report
2687  * status back to the host.
2688  *
2689  * The special ring reports the status of received packets to the
2690  * host.  The chip does not write into the original descriptor the
2691  * RX buffer was obtained from.  The chip simply takes the original
2692  * descriptor as provided by the host, updates the status and length
2693  * field, then writes this into the next status ring entry.
2694  *
2695  * Each ring the host uses to post buffers to the chip is described
2696  * by a TG3_BDINFO entry in the chips SRAM area.  When a packet arrives,
2697  * it is first placed into the on-chip ram.  When the packet's length
2698  * is known, it walks down the TG3_BDINFO entries to select the ring.
2699  * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
2700  * which is within the range of the new packet's length is chosen.
2701  *
2702  * The "separate ring for rx status" scheme may sound queer, but it makes
2703  * sense from a cache coherency perspective.  If only the host writes
2704  * to the buffer post rings, and only the chip writes to the rx status
2705  * rings, then cache lines never move beyond shared-modified state.
2706  * If both the host and chip were to write into the same ring, cache line
2707  * eviction could occur since both entities want it in an exclusive state.
2708  */
2709 static int tg3_rx(struct tg3 *tp, int budget)
2710 {
2711         u32 work_mask;
2712         u32 sw_idx = tp->rx_rcb_ptr;
2713         u16 hw_idx;
2714         int received;
2715
2716         hw_idx = tp->hw_status->idx[0].rx_producer;
2717         /*
2718          * We need to order the read of hw_idx and the read of
2719          * the opaque cookie.
2720          */
2721         rmb();
2722         work_mask = 0;
2723         received = 0;
2724         while (sw_idx != hw_idx && budget > 0) {
2725                 struct tg3_rx_buffer_desc *desc = &tp->rx_rcb[sw_idx];
2726                 unsigned int len;
2727                 struct sk_buff *skb;
2728                 dma_addr_t dma_addr;
2729                 u32 opaque_key, desc_idx, *post_ptr;
2730
2731                 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
2732                 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
2733                 if (opaque_key == RXD_OPAQUE_RING_STD) {
2734                         dma_addr = pci_unmap_addr(&tp->rx_std_buffers[desc_idx],
2735                                                   mapping);
2736                         skb = tp->rx_std_buffers[desc_idx].skb;
2737                         post_ptr = &tp->rx_std_ptr;
2738                 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
2739                         dma_addr = pci_unmap_addr(&tp->rx_jumbo_buffers[desc_idx],
2740                                                   mapping);
2741                         skb = tp->rx_jumbo_buffers[desc_idx].skb;
2742                         post_ptr = &tp->rx_jumbo_ptr;
2743                 }
2744                 else {
2745                         goto next_pkt_nopost;
2746                 }
2747
2748                 work_mask |= opaque_key;
2749
2750                 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
2751                     (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
2752                 drop_it:
2753                         tg3_recycle_rx(tp, opaque_key,
2754                                        desc_idx, *post_ptr);
2755                 drop_it_no_recycle:
2756                         /* Other statistics kept track of by card. */
2757                         tp->net_stats.rx_dropped++;
2758                         goto next_pkt;
2759                 }
2760
2761                 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4; /* omit crc */
2762
2763                 if (len > RX_COPY_THRESHOLD 
2764                         && tp->rx_offset == 2
2765                         /* rx_offset != 2 iff this is a 5701 card running
2766                          * in PCI-X mode [see tg3_get_invariants()] */
2767                 ) {
2768                         int skb_size;
2769
2770                         skb_size = tg3_alloc_rx_skb(tp, opaque_key,
2771                                                     desc_idx, *post_ptr);
2772                         if (skb_size < 0)
2773                                 goto drop_it;
2774
2775                         pci_unmap_single(tp->pdev, dma_addr,
2776                                          skb_size - tp->rx_offset,
2777                                          PCI_DMA_FROMDEVICE);
2778
2779                         skb_put(skb, len);
2780                 } else {
2781                         struct sk_buff *copy_skb;
2782
2783                         tg3_recycle_rx(tp, opaque_key,
2784                                        desc_idx, *post_ptr);
2785
2786                         copy_skb = dev_alloc_skb(len + 2);
2787                         if (copy_skb == NULL)
2788                                 goto drop_it_no_recycle;
2789
2790                         copy_skb->dev = tp->dev;
2791                         skb_reserve(copy_skb, 2);
2792                         skb_put(copy_skb, len);
2793                         pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
2794                         memcpy(copy_skb->data, skb->data, len);
2795                         pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
2796
2797                         /* We'll reuse the original ring buffer. */
2798                         skb = copy_skb;
2799                 }
2800
2801                 if ((tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) &&
2802                     (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
2803                     (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
2804                       >> RXD_TCPCSUM_SHIFT) == 0xffff))
2805                         skb->ip_summed = CHECKSUM_UNNECESSARY;
2806                 else
2807                         skb->ip_summed = CHECKSUM_NONE;
2808
2809                 skb->protocol = eth_type_trans(skb, tp->dev);
2810 #if TG3_VLAN_TAG_USED
2811                 if (tp->vlgrp != NULL &&
2812                     desc->type_flags & RXD_FLAG_VLAN) {
2813                         tg3_vlan_rx(tp, skb,
2814                                     desc->err_vlan & RXD_VLAN_MASK);
2815                 } else
2816 #endif
2817                         netif_receive_skb(skb);
2818
2819                 tp->dev->last_rx = jiffies;
2820                 received++;
2821                 budget--;
2822
2823 next_pkt:
2824                 (*post_ptr)++;
2825 next_pkt_nopost:
2826                 sw_idx++;
2827                 sw_idx %= TG3_RX_RCB_RING_SIZE(tp);
2828
2829                 /* Refresh hw_idx to see if there is new work */
2830                 if (sw_idx == hw_idx) {
2831                         hw_idx = tp->hw_status->idx[0].rx_producer;
2832                         rmb();
2833                 }
2834         }
2835
2836         /* ACK the status ring. */
2837         tp->rx_rcb_ptr = sw_idx;
2838         tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW, sw_idx);
2839
2840         /* Refill RX ring(s). */
2841         if (work_mask & RXD_OPAQUE_RING_STD) {
2842                 sw_idx = tp->rx_std_ptr % TG3_RX_RING_SIZE;
2843                 tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
2844                              sw_idx);
2845         }
2846         if (work_mask & RXD_OPAQUE_RING_JUMBO) {
2847                 sw_idx = tp->rx_jumbo_ptr % TG3_RX_JUMBO_RING_SIZE;
2848                 tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
2849                              sw_idx);
2850         }
2851         mmiowb();
2852
2853         return received;
2854 }
2855
2856 static int tg3_poll(struct net_device *netdev, int *budget)
2857 {
2858         struct tg3 *tp = netdev_priv(netdev);
2859         struct tg3_hw_status *sblk = tp->hw_status;
2860         unsigned long flags;
2861         int done;
2862
2863         spin_lock_irqsave(&tp->lock, flags);
2864
2865         /* handle link change and other phy events */
2866         if (!(tp->tg3_flags &
2867               (TG3_FLAG_USE_LINKCHG_REG |
2868                TG3_FLAG_POLL_SERDES))) {
2869                 if (sblk->status & SD_STATUS_LINK_CHG) {
2870                         sblk->status = SD_STATUS_UPDATED |
2871                                 (sblk->status & ~SD_STATUS_LINK_CHG);
2872                         tg3_setup_phy(tp, 0);
2873                 }
2874         }
2875
2876         /* run TX completion thread */
2877         if (sblk->idx[0].tx_consumer != tp->tx_cons) {
2878                 spin_lock(&tp->tx_lock);
2879                 tg3_tx(tp);
2880                 spin_unlock(&tp->tx_lock);
2881         }
2882
2883         spin_unlock_irqrestore(&tp->lock, flags);
2884
2885         /* run RX thread, within the bounds set by NAPI.
2886          * All RX "locking" is done by ensuring outside
2887          * code synchronizes with dev->poll()
2888          */
2889         done = 1;
2890         if (sblk->idx[0].rx_producer != tp->rx_rcb_ptr) {
2891                 int orig_budget = *budget;
2892                 int work_done;
2893
2894                 if (orig_budget > netdev->quota)
2895                         orig_budget = netdev->quota;
2896
2897                 work_done = tg3_rx(tp, orig_budget);
2898
2899                 *budget -= work_done;
2900                 netdev->quota -= work_done;
2901
2902                 if (work_done >= orig_budget)
2903                         done = 0;
2904         }
2905
2906         /* if no more work, tell net stack and NIC we're done */
2907         if (done) {
2908                 spin_lock_irqsave(&tp->lock, flags);
2909                 __netif_rx_complete(netdev);
2910                 tg3_restart_ints(tp);
2911                 spin_unlock_irqrestore(&tp->lock, flags);
2912         }
2913
2914         return (done ? 0 : 1);
2915 }
2916
2917 /* MSI ISR - No need to check for interrupt sharing and no need to
2918  * flush status block and interrupt mailbox. PCI ordering rules
2919  * guarantee that MSI will arrive after the status block.
2920  */
2921 static irqreturn_t tg3_msi(int irq, void *dev_id, struct pt_regs *regs)
2922 {
2923         struct net_device *dev = dev_id;
2924         struct tg3 *tp = netdev_priv(dev);
2925         struct tg3_hw_status *sblk = tp->hw_status;
2926         unsigned long flags;
2927
2928         spin_lock_irqsave(&tp->lock, flags);
2929
2930         /*
2931          * writing any value to intr-mbox-0 clears PCI INTA# and
2932          * chip-internal interrupt pending events.
2933          * writing non-zero to intr-mbox-0 additional tells the
2934          * NIC to stop sending us irqs, engaging "in-intr-handler"
2935          * event coalescing.
2936          */
2937         tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
2938         sblk->status &= ~SD_STATUS_UPDATED;
2939
2940         if (likely(tg3_has_work(tp)))
2941                 netif_rx_schedule(dev);         /* schedule NAPI poll */
2942         else {
2943                 /* no work, re-enable interrupts
2944                  */
2945                 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
2946                              0x00000000);
2947         }
2948
2949         spin_unlock_irqrestore(&tp->lock, flags);
2950
2951         return IRQ_RETVAL(1);
2952 }
2953
2954 static irqreturn_t tg3_interrupt(int irq, void *dev_id, struct pt_regs *regs)
2955 {
2956         struct net_device *dev = dev_id;
2957         struct tg3 *tp = netdev_priv(dev);
2958         struct tg3_hw_status *sblk = tp->hw_status;
2959         unsigned long flags;
2960         unsigned int handled = 1;
2961
2962         spin_lock_irqsave(&tp->lock, flags);
2963
2964         /* In INTx mode, it is possible for the interrupt to arrive at
2965          * the CPU before the status block posted prior to the interrupt.
2966          * Reading the PCI State register will confirm whether the
2967          * interrupt is ours and will flush the status block.
2968          */
2969         if ((sblk->status & SD_STATUS_UPDATED) ||
2970             !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
2971                 /*
2972                  * writing any value to intr-mbox-0 clears PCI INTA# and
2973                  * chip-internal interrupt pending events.
2974                  * writing non-zero to intr-mbox-0 additional tells the
2975                  * NIC to stop sending us irqs, engaging "in-intr-handler"
2976                  * event coalescing.
2977                  */
2978                 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
2979                              0x00000001);
2980                 /*
2981                  * Flush PCI write.  This also guarantees that our
2982                  * status block has been flushed to host memory.
2983                  */
2984                 tr32(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW);
2985                 sblk->status &= ~SD_STATUS_UPDATED;
2986
2987                 if (likely(tg3_has_work(tp)))
2988                         netif_rx_schedule(dev);         /* schedule NAPI poll */
2989                 else {
2990                         /* no work, shared interrupt perhaps?  re-enable
2991                          * interrupts, and flush that PCI write
2992                          */
2993                         tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
2994                                 0x00000000);
2995                         tr32(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW);
2996                 }
2997         } else {        /* shared interrupt */
2998                 handled = 0;
2999         }
3000
3001         spin_unlock_irqrestore(&tp->lock, flags);
3002
3003         return IRQ_RETVAL(handled);
3004 }
3005
3006 /* ISR for interrupt test */
3007 static irqreturn_t tg3_test_isr(int irq, void *dev_id,
3008                 struct pt_regs *regs)
3009 {
3010         struct net_device *dev = dev_id;
3011         struct tg3 *tp = netdev_priv(dev);
3012         struct tg3_hw_status *sblk = tp->hw_status;
3013
3014         if (sblk->status & SD_STATUS_UPDATED) {
3015                 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
3016                              0x00000001);
3017                 return IRQ_RETVAL(1);
3018         }
3019         return IRQ_RETVAL(0);
3020 }
3021
3022 static int tg3_init_hw(struct tg3 *);
3023 static int tg3_halt(struct tg3 *, int);
3024
3025 #ifdef CONFIG_NET_POLL_CONTROLLER
3026 static void tg3_poll_controller(struct net_device *dev)
3027 {
3028         struct tg3 *tp = netdev_priv(dev);
3029
3030         tg3_interrupt(tp->pdev->irq, dev, NULL);
3031 }
3032 #endif
3033
3034 static void tg3_reset_task(void *_data)
3035 {
3036         struct tg3 *tp = _data;
3037         unsigned int restart_timer;
3038
3039         tg3_netif_stop(tp);
3040
3041         spin_lock_irq(&tp->lock);
3042         spin_lock(&tp->tx_lock);
3043
3044         restart_timer = tp->tg3_flags2 & TG3_FLG2_RESTART_TIMER;
3045         tp->tg3_flags2 &= ~TG3_FLG2_RESTART_TIMER;
3046
3047         tg3_halt(tp, 0);
3048         tg3_init_hw(tp);
3049
3050         tg3_netif_start(tp);
3051
3052         spin_unlock(&tp->tx_lock);
3053         spin_unlock_irq(&tp->lock);
3054
3055         if (restart_timer)
3056                 mod_timer(&tp->timer, jiffies + 1);
3057 }
3058
3059 static void tg3_tx_timeout(struct net_device *dev)
3060 {
3061         struct tg3 *tp = netdev_priv(dev);
3062
3063         printk(KERN_ERR PFX "%s: transmit timed out, resetting\n",
3064                dev->name);
3065
3066         schedule_work(&tp->reset_task);
3067 }
3068
3069 static void tg3_set_txd(struct tg3 *, int, dma_addr_t, int, u32, u32);
3070
3071 static int tigon3_4gb_hwbug_workaround(struct tg3 *tp, struct sk_buff *skb,
3072                                        u32 guilty_entry, int guilty_len,
3073                                        u32 last_plus_one, u32 *start, u32 mss)
3074 {
3075         struct sk_buff *new_skb = skb_copy(skb, GFP_ATOMIC);
3076         dma_addr_t new_addr;
3077         u32 entry = *start;
3078         int i;
3079
3080         if (!new_skb) {
3081                 dev_kfree_skb(skb);
3082                 return -1;
3083         }
3084
3085         /* New SKB is guaranteed to be linear. */
3086         entry = *start;
3087         new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
3088                                   PCI_DMA_TODEVICE);
3089         tg3_set_txd(tp, entry, new_addr, new_skb->len,
3090                     (skb->ip_summed == CHECKSUM_HW) ?
3091                     TXD_FLAG_TCPUDP_CSUM : 0, 1 | (mss << 1));
3092         *start = NEXT_TX(entry);
3093
3094         /* Now clean up the sw ring entries. */
3095         i = 0;
3096         while (entry != last_plus_one) {
3097                 int len;
3098
3099                 if (i == 0)
3100                         len = skb_headlen(skb);
3101                 else
3102                         len = skb_shinfo(skb)->frags[i-1].size;
3103                 pci_unmap_single(tp->pdev,
3104                                  pci_unmap_addr(&tp->tx_buffers[entry], mapping),
3105                                  len, PCI_DMA_TODEVICE);
3106                 if (i == 0) {
3107                         tp->tx_buffers[entry].skb = new_skb;
3108                         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, new_addr);
3109                 } else {
3110                         tp->tx_buffers[entry].skb = NULL;
3111                 }
3112                 entry = NEXT_TX(entry);
3113                 i++;
3114         }
3115
3116         dev_kfree_skb(skb);
3117
3118         return 0;
3119 }
3120
3121 static void tg3_set_txd(struct tg3 *tp, int entry,
3122                         dma_addr_t mapping, int len, u32 flags,
3123                         u32 mss_and_is_end)
3124 {
3125         struct tg3_tx_buffer_desc *txd = &tp->tx_ring[entry];
3126         int is_end = (mss_and_is_end & 0x1);
3127         u32 mss = (mss_and_is_end >> 1);
3128         u32 vlan_tag = 0;
3129
3130         if (is_end)
3131                 flags |= TXD_FLAG_END;
3132         if (flags & TXD_FLAG_VLAN) {
3133                 vlan_tag = flags >> 16;
3134                 flags &= 0xffff;
3135         }
3136         vlan_tag |= (mss << TXD_MSS_SHIFT);
3137
3138         txd->addr_hi = ((u64) mapping >> 32);
3139         txd->addr_lo = ((u64) mapping & 0xffffffff);
3140         txd->len_flags = (len << TXD_LEN_SHIFT) | flags;
3141         txd->vlan_tag = vlan_tag << TXD_VLAN_TAG_SHIFT;
3142 }
3143
3144 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
3145 {
3146         u32 base = (u32) mapping & 0xffffffff;
3147
3148         return ((base > 0xffffdcc0) &&
3149                 (base + len + 8 < base));
3150 }
3151
3152 static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
3153 {
3154         struct tg3 *tp = netdev_priv(dev);
3155         dma_addr_t mapping;
3156         unsigned int i;
3157         u32 len, entry, base_flags, mss;
3158         int would_hit_hwbug;
3159         unsigned long flags;
3160
3161         len = skb_headlen(skb);
3162
3163         /* No BH disabling for tx_lock here.  We are running in BH disabled
3164          * context and TX reclaim runs via tp->poll inside of a software
3165          * interrupt.  Rejoice!
3166          *
3167          * Actually, things are not so simple.  If we are to take a hw
3168          * IRQ here, we can deadlock, consider:
3169          *
3170          *       CPU1           CPU2
3171          *   tg3_start_xmit
3172          *   take tp->tx_lock
3173          *                      tg3_timer
3174          *                      take tp->lock
3175          *   tg3_interrupt
3176          *   spin on tp->lock
3177          *                      spin on tp->tx_lock
3178          *
3179          * So we really do need to disable interrupts when taking
3180          * tx_lock here.
3181          */
3182         local_irq_save(flags);
3183         if (!spin_trylock(&tp->tx_lock)) { 
3184                 local_irq_restore(flags);
3185                 return NETDEV_TX_LOCKED; 
3186         } 
3187
3188         /* This is a hard error, log it. */
3189         if (unlikely(TX_BUFFS_AVAIL(tp) <= (skb_shinfo(skb)->nr_frags + 1))) {
3190                 netif_stop_queue(dev);
3191                 spin_unlock_irqrestore(&tp->tx_lock, flags);
3192                 printk(KERN_ERR PFX "%s: BUG! Tx Ring full when queue awake!\n",
3193                        dev->name);
3194                 return NETDEV_TX_BUSY;
3195         }
3196
3197         entry = tp->tx_prod;
3198         base_flags = 0;
3199         if (skb->ip_summed == CHECKSUM_HW)
3200                 base_flags |= TXD_FLAG_TCPUDP_CSUM;
3201 #if TG3_TSO_SUPPORT != 0
3202         mss = 0;
3203         if (skb->len > (tp->dev->mtu + ETH_HLEN) &&
3204             (mss = skb_shinfo(skb)->tso_size) != 0) {
3205                 int tcp_opt_len, ip_tcp_len;
3206
3207                 if (skb_header_cloned(skb) &&
3208                     pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
3209                         dev_kfree_skb(skb);
3210                         goto out_unlock;
3211                 }
3212
3213                 tcp_opt_len = ((skb->h.th->doff - 5) * 4);
3214                 ip_tcp_len = (skb->nh.iph->ihl * 4) + sizeof(struct tcphdr);
3215
3216                 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
3217                                TXD_FLAG_CPU_POST_DMA);
3218
3219                 skb->nh.iph->check = 0;
3220                 skb->nh.iph->tot_len = ntohs(mss + ip_tcp_len + tcp_opt_len);
3221                 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
3222                         skb->h.th->check = 0;
3223                         base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
3224                 }
3225                 else {
3226                         skb->h.th->check =
3227                                 ~csum_tcpudp_magic(skb->nh.iph->saddr,
3228                                                    skb->nh.iph->daddr,
3229                                                    0, IPPROTO_TCP, 0);
3230                 }
3231
3232                 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO) ||
3233                     (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)) {
3234                         if (tcp_opt_len || skb->nh.iph->ihl > 5) {
3235                                 int tsflags;
3236
3237                                 tsflags = ((skb->nh.iph->ihl - 5) +
3238                                            (tcp_opt_len >> 2));
3239                                 mss |= (tsflags << 11);
3240                         }
3241                 } else {
3242                         if (tcp_opt_len || skb->nh.iph->ihl > 5) {
3243                                 int tsflags;
3244
3245                                 tsflags = ((skb->nh.iph->ihl - 5) +
3246                                            (tcp_opt_len >> 2));
3247                                 base_flags |= tsflags << 12;
3248                         }
3249                 }
3250         }
3251 #else
3252         mss = 0;
3253 #endif
3254 #if TG3_VLAN_TAG_USED
3255         if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
3256                 base_flags |= (TXD_FLAG_VLAN |
3257                                (vlan_tx_tag_get(skb) << 16));
3258 #endif
3259
3260         /* Queue skb data, a.k.a. the main skb fragment. */
3261         mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
3262
3263         tp->tx_buffers[entry].skb = skb;
3264         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
3265
3266         would_hit_hwbug = 0;
3267
3268         if (tg3_4g_overflow_test(mapping, len))
3269                 would_hit_hwbug = entry + 1;
3270
3271         tg3_set_txd(tp, entry, mapping, len, base_flags,
3272                     (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
3273
3274         entry = NEXT_TX(entry);
3275
3276         /* Now loop through additional data fragments, and queue them. */
3277         if (skb_shinfo(skb)->nr_frags > 0) {
3278                 unsigned int i, last;
3279
3280                 last = skb_shinfo(skb)->nr_frags - 1;
3281                 for (i = 0; i <= last; i++) {
3282                         skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3283
3284                         len = frag->size;
3285                         mapping = pci_map_page(tp->pdev,
3286                                                frag->page,
3287                                                frag->page_offset,
3288                                                len, PCI_DMA_TODEVICE);
3289
3290                         tp->tx_buffers[entry].skb = NULL;
3291                         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
3292
3293                         if (tg3_4g_overflow_test(mapping, len)) {
3294                                 /* Only one should match. */
3295                                 if (would_hit_hwbug)
3296                                         BUG();
3297                                 would_hit_hwbug = entry + 1;
3298                         }
3299
3300                         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
3301                                 tg3_set_txd(tp, entry, mapping, len,
3302                                             base_flags, (i == last)|(mss << 1));
3303                         else
3304                                 tg3_set_txd(tp, entry, mapping, len,
3305                                             base_flags, (i == last));
3306
3307                         entry = NEXT_TX(entry);
3308                 }
3309         }
3310
3311         if (would_hit_hwbug) {
3312                 u32 last_plus_one = entry;
3313                 u32 start;
3314                 unsigned int len = 0;
3315
3316                 would_hit_hwbug -= 1;
3317                 entry = entry - 1 - skb_shinfo(skb)->nr_frags;
3318                 entry &= (TG3_TX_RING_SIZE - 1);
3319                 start = entry;
3320                 i = 0;
3321                 while (entry != last_plus_one) {
3322                         if (i == 0)
3323                                 len = skb_headlen(skb);
3324                         else
3325                                 len = skb_shinfo(skb)->frags[i-1].size;
3326
3327                         if (entry == would_hit_hwbug)
3328                                 break;
3329
3330                         i++;
3331                         entry = NEXT_TX(entry);
3332
3333                 }
3334
3335                 /* If the workaround fails due to memory/mapping
3336                  * failure, silently drop this packet.
3337                  */
3338                 if (tigon3_4gb_hwbug_workaround(tp, skb,
3339                                                 entry, len,
3340                                                 last_plus_one,
3341                                                 &start, mss))
3342                         goto out_unlock;
3343
3344                 entry = start;
3345         }
3346
3347         /* Packets are ready, update Tx producer idx local and on card. */
3348         tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry);
3349
3350         tp->tx_prod = entry;
3351         if (TX_BUFFS_AVAIL(tp) <= (MAX_SKB_FRAGS + 1))
3352                 netif_stop_queue(dev);
3353
3354 out_unlock:
3355         mmiowb();
3356         spin_unlock_irqrestore(&tp->tx_lock, flags);
3357
3358         dev->trans_start = jiffies;
3359
3360         return NETDEV_TX_OK;
3361 }
3362
3363 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
3364                                int new_mtu)
3365 {
3366         dev->mtu = new_mtu;
3367
3368         if (new_mtu > ETH_DATA_LEN)
3369                 tp->tg3_flags |= TG3_FLAG_JUMBO_ENABLE;
3370         else
3371                 tp->tg3_flags &= ~TG3_FLAG_JUMBO_ENABLE;
3372 }
3373
3374 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
3375 {
3376         struct tg3 *tp = netdev_priv(dev);
3377
3378         if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
3379                 return -EINVAL;
3380
3381         if (!netif_running(dev)) {
3382                 /* We'll just catch it later when the
3383                  * device is up'd.
3384                  */
3385                 tg3_set_mtu(dev, tp, new_mtu);
3386                 return 0;
3387         }
3388
3389         tg3_netif_stop(tp);
3390         spin_lock_irq(&tp->lock);
3391         spin_lock(&tp->tx_lock);
3392
3393         tg3_halt(tp, 1);
3394
3395         tg3_set_mtu(dev, tp, new_mtu);
3396
3397         tg3_init_hw(tp);
3398
3399         tg3_netif_start(tp);
3400
3401         spin_unlock(&tp->tx_lock);
3402         spin_unlock_irq(&tp->lock);
3403
3404         return 0;
3405 }
3406
3407 /* Free up pending packets in all rx/tx rings.
3408  *
3409  * The chip has been shut down and the driver detached from
3410  * the networking, so no interrupts or new tx packets will
3411  * end up in the driver.  tp->{tx,}lock is not held and we are not
3412  * in an interrupt context and thus may sleep.
3413  */
3414 static void tg3_free_rings(struct tg3 *tp)
3415 {
3416         struct ring_info *rxp;
3417         int i;
3418
3419         for (i = 0; i < TG3_RX_RING_SIZE; i++) {
3420                 rxp = &tp->rx_std_buffers[i];
3421
3422                 if (rxp->skb == NULL)
3423                         continue;
3424                 pci_unmap_single(tp->pdev,
3425                                  pci_unmap_addr(rxp, mapping),
3426                                  RX_PKT_BUF_SZ - tp->rx_offset,
3427                                  PCI_DMA_FROMDEVICE);
3428                 dev_kfree_skb_any(rxp->skb);
3429                 rxp->skb = NULL;
3430         }
3431
3432         for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
3433                 rxp = &tp->rx_jumbo_buffers[i];
3434
3435                 if (rxp->skb == NULL)
3436                         continue;
3437                 pci_unmap_single(tp->pdev,
3438                                  pci_unmap_addr(rxp, mapping),
3439                                  RX_JUMBO_PKT_BUF_SZ - tp->rx_offset,
3440                                  PCI_DMA_FROMDEVICE);
3441                 dev_kfree_skb_any(rxp->skb);
3442                 rxp->skb = NULL;
3443         }
3444
3445         for (i = 0; i < TG3_TX_RING_SIZE; ) {
3446                 struct tx_ring_info *txp;
3447                 struct sk_buff *skb;
3448                 int j;
3449
3450                 txp = &tp->tx_buffers[i];
3451                 skb = txp->skb;
3452
3453                 if (skb == NULL) {
3454                         i++;
3455                         continue;
3456                 }
3457
3458                 pci_unmap_single(tp->pdev,
3459                                  pci_unmap_addr(txp, mapping),
3460                                  skb_headlen(skb),
3461                                  PCI_DMA_TODEVICE);
3462                 txp->skb = NULL;
3463
3464                 i++;
3465
3466                 for (j = 0; j < skb_shinfo(skb)->nr_frags; j++) {
3467                         txp = &tp->tx_buffers[i & (TG3_TX_RING_SIZE - 1)];
3468                         pci_unmap_page(tp->pdev,
3469                                        pci_unmap_addr(txp, mapping),
3470                                        skb_shinfo(skb)->frags[j].size,
3471                                        PCI_DMA_TODEVICE);
3472                         i++;
3473                 }
3474
3475                 dev_kfree_skb_any(skb);
3476         }
3477 }
3478
3479 /* Initialize tx/rx rings for packet processing.
3480  *
3481  * The chip has been shut down and the driver detached from
3482  * the networking, so no interrupts or new tx packets will
3483  * end up in the driver.  tp->{tx,}lock are held and thus
3484  * we may not sleep.
3485  */
3486 static void tg3_init_rings(struct tg3 *tp)
3487 {
3488         u32 i;
3489
3490         /* Free up all the SKBs. */
3491         tg3_free_rings(tp);
3492
3493         /* Zero out all descriptors. */
3494         memset(tp->rx_std, 0, TG3_RX_RING_BYTES);
3495         memset(tp->rx_jumbo, 0, TG3_RX_JUMBO_RING_BYTES);
3496         memset(tp->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
3497         memset(tp->tx_ring, 0, TG3_TX_RING_BYTES);
3498
3499         /* Initialize invariants of the rings, we only set this
3500          * stuff once.  This works because the card does not
3501          * write into the rx buffer posting rings.
3502          */
3503         for (i = 0; i < TG3_RX_RING_SIZE; i++) {
3504                 struct tg3_rx_buffer_desc *rxd;
3505
3506                 rxd = &tp->rx_std[i];
3507                 rxd->idx_len = (RX_PKT_BUF_SZ - tp->rx_offset - 64)
3508                         << RXD_LEN_SHIFT;
3509                 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
3510                 rxd->opaque = (RXD_OPAQUE_RING_STD |
3511                                (i << RXD_OPAQUE_INDEX_SHIFT));
3512         }
3513
3514         if (tp->tg3_flags & TG3_FLAG_JUMBO_ENABLE) {
3515                 for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
3516                         struct tg3_rx_buffer_desc *rxd;
3517
3518                         rxd = &tp->rx_jumbo[i];
3519                         rxd->idx_len = (RX_JUMBO_PKT_BUF_SZ - tp->rx_offset - 64)
3520                                 << RXD_LEN_SHIFT;
3521                         rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
3522                                 RXD_FLAG_JUMBO;
3523                         rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
3524                                (i << RXD_OPAQUE_INDEX_SHIFT));
3525                 }
3526         }
3527
3528         /* Now allocate fresh SKBs for each rx ring. */
3529         for (i = 0; i < tp->rx_pending; i++) {
3530                 if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_STD,
3531                                      -1, i) < 0)
3532                         break;
3533         }
3534
3535         if (tp->tg3_flags & TG3_FLAG_JUMBO_ENABLE) {
3536                 for (i = 0; i < tp->rx_jumbo_pending; i++) {
3537                         if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_JUMBO,
3538                                              -1, i) < 0)
3539                                 break;
3540                 }
3541         }
3542 }
3543
3544 /*
3545  * Must not be invoked with interrupt sources disabled and
3546  * the hardware shutdown down.
3547  */
3548 static void tg3_free_consistent(struct tg3 *tp)
3549 {
3550         if (tp->rx_std_buffers) {
3551                 kfree(tp->rx_std_buffers);
3552                 tp->rx_std_buffers = NULL;
3553         }
3554         if (tp->rx_std) {
3555                 pci_free_consistent(tp->pdev, TG3_RX_RING_BYTES,
3556                                     tp->rx_std, tp->rx_std_mapping);
3557                 tp->rx_std = NULL;
3558         }
3559         if (tp->rx_jumbo) {
3560                 pci_free_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
3561                                     tp->rx_jumbo, tp->rx_jumbo_mapping);
3562                 tp->rx_jumbo = NULL;
3563         }
3564         if (tp->rx_rcb) {
3565                 pci_free_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
3566                                     tp->rx_rcb, tp->rx_rcb_mapping);
3567                 tp->rx_rcb = NULL;
3568         }
3569         if (tp->tx_ring) {
3570                 pci_free_consistent(tp->pdev, TG3_TX_RING_BYTES,
3571                         tp->tx_ring, tp->tx_desc_mapping);
3572                 tp->tx_ring = NULL;
3573         }
3574         if (tp->hw_status) {
3575                 pci_free_consistent(tp->pdev, TG3_HW_STATUS_SIZE,
3576                                     tp->hw_status, tp->status_mapping);
3577                 tp->hw_status = NULL;
3578         }
3579         if (tp->hw_stats) {
3580                 pci_free_consistent(tp->pdev, sizeof(struct tg3_hw_stats),
3581                                     tp->hw_stats, tp->stats_mapping);
3582                 tp->hw_stats = NULL;
3583         }
3584 }
3585
3586 /*
3587  * Must not be invoked with interrupt sources disabled and
3588  * the hardware shutdown down.  Can sleep.
3589  */
3590 static int tg3_alloc_consistent(struct tg3 *tp)
3591 {
3592         tp->rx_std_buffers = kmalloc((sizeof(struct ring_info) *
3593                                       (TG3_RX_RING_SIZE +
3594                                        TG3_RX_JUMBO_RING_SIZE)) +
3595                                      (sizeof(struct tx_ring_info) *
3596                                       TG3_TX_RING_SIZE),
3597                                      GFP_KERNEL);
3598         if (!tp->rx_std_buffers)
3599                 return -ENOMEM;
3600
3601         memset(tp->rx_std_buffers, 0,
3602                (sizeof(struct ring_info) *
3603                 (TG3_RX_RING_SIZE +
3604                  TG3_RX_JUMBO_RING_SIZE)) +
3605                (sizeof(struct tx_ring_info) *
3606                 TG3_TX_RING_SIZE));
3607
3608         tp->rx_jumbo_buffers = &tp->rx_std_buffers[TG3_RX_RING_SIZE];
3609         tp->tx_buffers = (struct tx_ring_info *)
3610                 &tp->rx_jumbo_buffers[TG3_RX_JUMBO_RING_SIZE];
3611
3612         tp->rx_std = pci_alloc_consistent(tp->pdev, TG3_RX_RING_BYTES,
3613                                           &tp->rx_std_mapping);
3614         if (!tp->rx_std)
3615                 goto err_out;
3616
3617         tp->rx_jumbo = pci_alloc_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
3618                                             &tp->rx_jumbo_mapping);
3619
3620         if (!tp->rx_jumbo)
3621                 goto err_out;
3622
3623         tp->rx_rcb = pci_alloc_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
3624                                           &tp->rx_rcb_mapping);
3625         if (!tp->rx_rcb)
3626                 goto err_out;
3627
3628         tp->tx_ring = pci_alloc_consistent(tp->pdev, TG3_TX_RING_BYTES,
3629                                            &tp->tx_desc_mapping);
3630         if (!tp->tx_ring)
3631                 goto err_out;
3632
3633         tp->hw_status = pci_alloc_consistent(tp->pdev,
3634                                              TG3_HW_STATUS_SIZE,
3635                                              &tp->status_mapping);
3636         if (!tp->hw_status)
3637                 goto err_out;
3638
3639         tp->hw_stats = pci_alloc_consistent(tp->pdev,
3640                                             sizeof(struct tg3_hw_stats),
3641                                             &tp->stats_mapping);
3642         if (!tp->hw_stats)
3643                 goto err_out;
3644
3645         memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
3646         memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
3647
3648         return 0;
3649
3650 err_out:
3651         tg3_free_consistent(tp);
3652         return -ENOMEM;
3653 }
3654
3655 #define MAX_WAIT_CNT 1000
3656
3657 /* To stop a block, clear the enable bit and poll till it
3658  * clears.  tp->lock is held.
3659  */
3660 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, int silent)
3661 {
3662         unsigned int i;
3663         u32 val;
3664
3665         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
3666                 switch (ofs) {
3667                 case RCVLSC_MODE:
3668                 case DMAC_MODE:
3669                 case MBFREE_MODE:
3670                 case BUFMGR_MODE:
3671                 case MEMARB_MODE:
3672                         /* We can't enable/disable these bits of the
3673                          * 5705/5750, just say success.
3674                          */
3675                         return 0;
3676
3677                 default:
3678                         break;
3679                 };
3680         }
3681
3682         val = tr32(ofs);
3683         val &= ~enable_bit;
3684         tw32_f(ofs, val);
3685
3686         for (i = 0; i < MAX_WAIT_CNT; i++) {
3687                 udelay(100);
3688                 val = tr32(ofs);
3689                 if ((val & enable_bit) == 0)
3690                         break;
3691         }
3692
3693         if (i == MAX_WAIT_CNT && !silent) {
3694                 printk(KERN_ERR PFX "tg3_stop_block timed out, "
3695                        "ofs=%lx enable_bit=%x\n",
3696                        ofs, enable_bit);
3697                 return -ENODEV;
3698         }
3699
3700         return 0;
3701 }
3702
3703 /* tp->lock is held. */
3704 static int tg3_abort_hw(struct tg3 *tp, int silent)
3705 {
3706         int i, err;
3707
3708         tg3_disable_ints(tp);
3709
3710         tp->rx_mode &= ~RX_MODE_ENABLE;
3711         tw32_f(MAC_RX_MODE, tp->rx_mode);
3712         udelay(10);
3713
3714         err  = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
3715         err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
3716         err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
3717         err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
3718         err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
3719         err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
3720
3721         err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
3722         err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
3723         err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
3724         err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
3725         err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
3726         err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
3727         err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
3728
3729         tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
3730         tw32_f(MAC_MODE, tp->mac_mode);
3731         udelay(40);
3732
3733         tp->tx_mode &= ~TX_MODE_ENABLE;
3734         tw32_f(MAC_TX_MODE, tp->tx_mode);
3735
3736         for (i = 0; i < MAX_WAIT_CNT; i++) {
3737                 udelay(100);
3738                 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
3739                         break;
3740         }
3741         if (i >= MAX_WAIT_CNT) {
3742                 printk(KERN_ERR PFX "tg3_abort_hw timed out for %s, "
3743                        "TX_MODE_ENABLE will not clear MAC_TX_MODE=%08x\n",
3744                        tp->dev->name, tr32(MAC_TX_MODE));
3745                 err |= -ENODEV;
3746         }
3747
3748         err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
3749         err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
3750         err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
3751
3752         tw32(FTQ_RESET, 0xffffffff);
3753         tw32(FTQ_RESET, 0x00000000);
3754
3755         err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
3756         err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
3757
3758         if (tp->hw_status)
3759                 memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
3760         if (tp->hw_stats)
3761                 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
3762
3763         return err;
3764 }
3765
3766 /* tp->lock is held. */
3767 static int tg3_nvram_lock(struct tg3 *tp)
3768 {
3769         if (tp->tg3_flags & TG3_FLAG_NVRAM) {
3770                 int i;
3771
3772                 tw32(NVRAM_SWARB, SWARB_REQ_SET1);
3773                 for (i = 0; i < 8000; i++) {
3774                         if (tr32(NVRAM_SWARB) & SWARB_GNT1)
3775                                 break;
3776                         udelay(20);
3777                 }
3778                 if (i == 8000)
3779                         return -ENODEV;
3780         }
3781         return 0;
3782 }
3783
3784 /* tp->lock is held. */
3785 static void tg3_nvram_unlock(struct tg3 *tp)
3786 {
3787         if (tp->tg3_flags & TG3_FLAG_NVRAM)
3788                 tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
3789 }
3790
3791 /* tp->lock is held. */
3792 static void tg3_enable_nvram_access(struct tg3 *tp)
3793 {
3794         if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
3795             !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM)) {
3796                 u32 nvaccess = tr32(NVRAM_ACCESS);
3797
3798                 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
3799         }
3800 }
3801
3802 /* tp->lock is held. */
3803 static void tg3_disable_nvram_access(struct tg3 *tp)
3804 {
3805         if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
3806             !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM)) {
3807                 u32 nvaccess = tr32(NVRAM_ACCESS);
3808
3809                 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
3810         }
3811 }
3812
3813 /* tp->lock is held. */
3814 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
3815 {
3816         if (!(tp->tg3_flags2 & TG3_FLG2_SUN_570X))
3817                 tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
3818                               NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
3819
3820         if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
3821                 switch (kind) {
3822                 case RESET_KIND_INIT:
3823                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
3824                                       DRV_STATE_START);
3825                         break;
3826
3827                 case RESET_KIND_SHUTDOWN:
3828                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
3829                                       DRV_STATE_UNLOAD);
3830                         break;
3831
3832                 case RESET_KIND_SUSPEND:
3833                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
3834                                       DRV_STATE_SUSPEND);
3835                         break;
3836
3837                 default:
3838                         break;
3839                 };
3840         }
3841 }
3842
3843 /* tp->lock is held. */
3844 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
3845 {
3846         if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
3847                 switch (kind) {
3848                 case RESET_KIND_INIT:
3849                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
3850                                       DRV_STATE_START_DONE);
3851                         break;
3852
3853                 case RESET_KIND_SHUTDOWN:
3854                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
3855                                       DRV_STATE_UNLOAD_DONE);
3856                         break;
3857
3858                 default:
3859                         break;
3860                 };
3861         }
3862 }
3863
3864 /* tp->lock is held. */
3865 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
3866 {
3867         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
3868                 switch (kind) {
3869                 case RESET_KIND_INIT:
3870                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
3871                                       DRV_STATE_START);
3872                         break;
3873
3874                 case RESET_KIND_SHUTDOWN:
3875                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
3876                                       DRV_STATE_UNLOAD);
3877                         break;
3878
3879                 case RESET_KIND_SUSPEND:
3880                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
3881                                       DRV_STATE_SUSPEND);
3882                         break;
3883
3884                 default:
3885                         break;
3886                 };
3887         }
3888 }
3889
3890 static void tg3_stop_fw(struct tg3 *);
3891
3892 /* tp->lock is held. */
3893 static int tg3_chip_reset(struct tg3 *tp)
3894 {
3895         u32 val;
3896         u32 flags_save;
3897         int i;
3898
3899         if (!(tp->tg3_flags2 & TG3_FLG2_SUN_570X))
3900                 tg3_nvram_lock(tp);
3901
3902         /*
3903          * We must avoid the readl() that normally takes place.
3904          * It locks machines, causes machine checks, and other
3905          * fun things.  So, temporarily disable the 5701
3906          * hardware workaround, while we do the reset.
3907          */
3908         flags_save = tp->tg3_flags;
3909         tp->tg3_flags &= ~TG3_FLAG_5701_REG_WRITE_BUG;
3910
3911         /* do the reset */
3912         val = GRC_MISC_CFG_CORECLK_RESET;
3913
3914         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
3915                 if (tr32(0x7e2c) == 0x60) {
3916                         tw32(0x7e2c, 0x20);
3917                 }
3918                 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
3919                         tw32(GRC_MISC_CFG, (1 << 29));
3920                         val |= (1 << 29);
3921                 }
3922         }
3923
3924         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
3925                 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
3926         tw32(GRC_MISC_CFG, val);
3927
3928         /* restore 5701 hardware bug workaround flag */
3929         tp->tg3_flags = flags_save;
3930
3931         /* Unfortunately, we have to delay before the PCI read back.
3932          * Some 575X chips even will not respond to a PCI cfg access
3933          * when the reset command is given to the chip.
3934          *
3935          * How do these hardware designers expect things to work
3936          * properly if the PCI write is posted for a long period
3937          * of time?  It is always necessary to have some method by
3938          * which a register read back can occur to push the write
3939          * out which does the reset.
3940          *
3941          * For most tg3 variants the trick below was working.
3942          * Ho hum...
3943          */
3944         udelay(120);
3945
3946         /* Flush PCI posted writes.  The normal MMIO registers
3947          * are inaccessible at this time so this is the only
3948          * way to make this reliably (actually, this is no longer
3949          * the case, see above).  I tried to use indirect
3950          * register read/write but this upset some 5701 variants.
3951          */
3952         pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
3953
3954         udelay(120);
3955
3956         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
3957                 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) {
3958                         int i;
3959                         u32 cfg_val;
3960
3961                         /* Wait for link training to complete.  */
3962                         for (i = 0; i < 5000; i++)
3963                                 udelay(100);
3964
3965                         pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
3966                         pci_write_config_dword(tp->pdev, 0xc4,
3967                                                cfg_val | (1 << 15));
3968                 }
3969                 /* Set PCIE max payload size and clear error status.  */
3970                 pci_write_config_dword(tp->pdev, 0xd8, 0xf5000);
3971         }
3972
3973         /* Re-enable indirect register accesses. */
3974         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
3975                                tp->misc_host_ctrl);
3976
3977         /* Set MAX PCI retry to zero. */
3978         val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
3979         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
3980             (tp->tg3_flags & TG3_FLAG_PCIX_MODE))
3981                 val |= PCISTATE_RETRY_SAME_DMA;
3982         pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
3983
3984         pci_restore_state(tp->pdev);
3985
3986         /* Make sure PCI-X relaxed ordering bit is clear. */
3987         pci_read_config_dword(tp->pdev, TG3PCI_X_CAPS, &val);
3988         val &= ~PCIX_CAPS_RELAXED_ORDERING;
3989         pci_write_config_dword(tp->pdev, TG3PCI_X_CAPS, val);
3990
3991         tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
3992
3993         if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A3) {
3994                 tg3_stop_fw(tp);
3995                 tw32(0x5000, 0x400);
3996         }
3997
3998         tw32(GRC_MODE, tp->grc_mode);
3999
4000         if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0) {
4001                 u32 val = tr32(0xc4);
4002
4003                 tw32(0xc4, val | (1 << 15));
4004         }
4005
4006         if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
4007             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
4008                 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
4009                 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0)
4010                         tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
4011                 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
4012         }
4013
4014         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
4015                 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
4016                 tw32_f(MAC_MODE, tp->mac_mode);
4017         } else
4018                 tw32_f(MAC_MODE, 0);
4019         udelay(40);
4020
4021         if (!(tp->tg3_flags2 & TG3_FLG2_SUN_570X)) {
4022                 /* Wait for firmware initialization to complete. */
4023                 for (i = 0; i < 100000; i++) {
4024                         tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
4025                         if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
4026                                 break;
4027                         udelay(10);
4028                 }
4029                 if (i >= 100000) {
4030                         printk(KERN_ERR PFX "tg3_reset_hw timed out for %s, "
4031                                "firmware will not restart magic=%08x\n",
4032                                tp->dev->name, val);
4033                         return -ENODEV;
4034                 }
4035         }
4036
4037         if ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
4038             tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
4039                 u32 val = tr32(0x7c00);
4040
4041                 tw32(0x7c00, val | (1 << 25));
4042         }
4043
4044         /* Reprobe ASF enable state.  */
4045         tp->tg3_flags &= ~TG3_FLAG_ENABLE_ASF;
4046         tp->tg3_flags2 &= ~TG3_FLG2_ASF_NEW_HANDSHAKE;
4047         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
4048         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
4049                 u32 nic_cfg;
4050
4051                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
4052                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
4053                         tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
4054                         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
4055                                 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
4056                 }
4057         }
4058
4059         return 0;
4060 }
4061
4062 /* tp->lock is held. */
4063 static void tg3_stop_fw(struct tg3 *tp)
4064 {
4065         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
4066                 u32 val;
4067                 int i;
4068
4069                 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
4070                 val = tr32(GRC_RX_CPU_EVENT);
4071                 val |= (1 << 14);
4072                 tw32(GRC_RX_CPU_EVENT, val);
4073
4074                 /* Wait for RX cpu to ACK the event.  */
4075                 for (i = 0; i < 100; i++) {
4076                         if (!(tr32(GRC_RX_CPU_EVENT) & (1 << 14)))
4077                                 break;
4078                         udelay(1);
4079                 }
4080         }
4081 }
4082
4083 /* tp->lock is held. */
4084 static int tg3_halt(struct tg3 *tp, int silent)
4085 {
4086         int err;
4087
4088         tg3_stop_fw(tp);
4089
4090         tg3_write_sig_pre_reset(tp, RESET_KIND_SHUTDOWN);
4091
4092         tg3_abort_hw(tp, silent);
4093         err = tg3_chip_reset(tp);
4094
4095         tg3_write_sig_legacy(tp, RESET_KIND_SHUTDOWN);
4096         tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
4097
4098         if (err)
4099                 return err;
4100
4101         return 0;
4102 }
4103
4104 #define TG3_FW_RELEASE_MAJOR    0x0
4105 #define TG3_FW_RELASE_MINOR     0x0
4106 #define TG3_FW_RELEASE_FIX      0x0
4107 #define TG3_FW_START_ADDR       0x08000000
4108 #define TG3_FW_TEXT_ADDR        0x08000000
4109 #define TG3_FW_TEXT_LEN         0x9c0
4110 #define TG3_FW_RODATA_ADDR      0x080009c0
4111 #define TG3_FW_RODATA_LEN       0x60
4112 #define TG3_FW_DATA_ADDR        0x08000a40
4113 #define TG3_FW_DATA_LEN         0x20
4114 #define TG3_FW_SBSS_ADDR        0x08000a60
4115 #define TG3_FW_SBSS_LEN         0xc
4116 #define TG3_FW_BSS_ADDR         0x08000a70
4117 #define TG3_FW_BSS_LEN          0x10
4118
4119 static u32 tg3FwText[(TG3_FW_TEXT_LEN / sizeof(u32)) + 1] = {
4120         0x00000000, 0x10000003, 0x00000000, 0x0000000d, 0x0000000d, 0x3c1d0800,
4121         0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100000, 0x0e000018, 0x00000000,
4122         0x0000000d, 0x3c1d0800, 0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100034,
4123         0x0e00021c, 0x00000000, 0x0000000d, 0x00000000, 0x00000000, 0x00000000,
4124         0x27bdffe0, 0x3c1cc000, 0xafbf0018, 0xaf80680c, 0x0e00004c, 0x241b2105,
4125         0x97850000, 0x97870002, 0x9782002c, 0x9783002e, 0x3c040800, 0x248409c0,
4126         0xafa00014, 0x00021400, 0x00621825, 0x00052c00, 0xafa30010, 0x8f860010,
4127         0x00e52825, 0x0e000060, 0x24070102, 0x3c02ac00, 0x34420100, 0x3c03ac01,
4128         0x34630100, 0xaf820490, 0x3c02ffff, 0xaf820494, 0xaf830498, 0xaf82049c,
4129         0x24020001, 0xaf825ce0, 0x0e00003f, 0xaf825d00, 0x0e000140, 0x00000000,
4130         0x8fbf0018, 0x03e00008, 0x27bd0020, 0x2402ffff, 0xaf825404, 0x8f835400,
4131         0x34630400, 0xaf835400, 0xaf825404, 0x3c020800, 0x24420034, 0xaf82541c,
4132         0x03e00008, 0xaf805400, 0x00000000, 0x00000000, 0x3c020800, 0x34423000,
4133         0x3c030800, 0x34633000, 0x3c040800, 0x348437ff, 0x3c010800, 0xac220a64,
4134         0x24020040, 0x3c010800, 0xac220a68, 0x3c010800, 0xac200a60, 0xac600000,
4135         0x24630004, 0x0083102b, 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000,
4136         0x00804821, 0x8faa0010, 0x3c020800, 0x8c420a60, 0x3c040800, 0x8c840a68,
4137         0x8fab0014, 0x24430001, 0x0044102b, 0x3c010800, 0xac230a60, 0x14400003,
4138         0x00004021, 0x3c010800, 0xac200a60, 0x3c020800, 0x8c420a60, 0x3c030800,
4139         0x8c630a64, 0x91240000, 0x00021140, 0x00431021, 0x00481021, 0x25080001,
4140         0xa0440000, 0x29020008, 0x1440fff4, 0x25290001, 0x3c020800, 0x8c420a60,
4141         0x3c030800, 0x8c630a64, 0x8f84680c, 0x00021140, 0x00431021, 0xac440008,
4142         0xac45000c, 0xac460010, 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c,
4143         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4144         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4145         0, 0, 0, 0, 0, 0,
4146         0x02000008, 0x00000000, 0x0a0001e3, 0x3c0a0001, 0x0a0001e3, 0x3c0a0002,
4147         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4148         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4149         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4150         0x0a0001e3, 0x3c0a0007, 0x0a0001e3, 0x3c0a0008, 0x0a0001e3, 0x3c0a0009,
4151         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a000b,
4152         0x0a0001e3, 0x3c0a000c, 0x0a0001e3, 0x3c0a000d, 0x0a0001e3, 0x00000000,
4153         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a000e, 0x0a0001e3, 0x00000000,
4154         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4155         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4156         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a0013, 0x0a0001e3, 0x3c0a0014,
4157         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4158         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4159         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4160         0x27bdffe0, 0x00001821, 0x00001021, 0xafbf0018, 0xafb10014, 0xafb00010,
4161         0x3c010800, 0x00220821, 0xac200a70, 0x3c010800, 0x00220821, 0xac200a74,
4162         0x3c010800, 0x00220821, 0xac200a78, 0x24630001, 0x1860fff5, 0x2442000c,
4163         0x24110001, 0x8f906810, 0x32020004, 0x14400005, 0x24040001, 0x3c020800,
4164         0x8c420a78, 0x18400003, 0x00002021, 0x0e000182, 0x00000000, 0x32020001,
4165         0x10400003, 0x00000000, 0x0e000169, 0x00000000, 0x0a000153, 0xaf915028,
4166         0x8fbf0018, 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0020, 0x3c050800,
4167         0x8ca50a70, 0x3c060800, 0x8cc60a80, 0x3c070800, 0x8ce70a78, 0x27bdffe0,
4168         0x3c040800, 0x248409d0, 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014,
4169         0x0e00017b, 0x00002021, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x24020001,
4170         0x8f836810, 0x00821004, 0x00021027, 0x00621824, 0x03e00008, 0xaf836810,
4171         0x27bdffd8, 0xafbf0024, 0x1080002e, 0xafb00020, 0x8f825cec, 0xafa20018,
4172         0x8f825cec, 0x3c100800, 0x26100a78, 0xafa2001c, 0x34028000, 0xaf825cec,
4173         0x8e020000, 0x18400016, 0x00000000, 0x3c020800, 0x94420a74, 0x8fa3001c,
4174         0x000221c0, 0xac830004, 0x8fa2001c, 0x3c010800, 0x0e000201, 0xac220a74,
4175         0x10400005, 0x00000000, 0x8e020000, 0x24420001, 0x0a0001df, 0xae020000,
4176         0x3c020800, 0x8c420a70, 0x00021c02, 0x000321c0, 0x0a0001c5, 0xafa2001c,
4177         0x0e000201, 0x00000000, 0x1040001f, 0x00000000, 0x8e020000, 0x8fa3001c,
4178         0x24420001, 0x3c010800, 0xac230a70, 0x3c010800, 0xac230a74, 0x0a0001df,
4179         0xae020000, 0x3c100800, 0x26100a78, 0x8e020000, 0x18400028, 0x00000000,
4180         0x0e000201, 0x00000000, 0x14400024, 0x00000000, 0x8e020000, 0x3c030800,
4181         0x8c630a70, 0x2442ffff, 0xafa3001c, 0x18400006, 0xae020000, 0x00031402,
4182         0x000221c0, 0x8c820004, 0x3c010800, 0xac220a70, 0x97a2001e, 0x2442ff00,
4183         0x2c420300, 0x1440000b, 0x24024000, 0x3c040800, 0x248409dc, 0xafa00010,
4184         0xafa00014, 0x8fa6001c, 0x24050008, 0x0e000060, 0x00003821, 0x0a0001df,
4185         0x00000000, 0xaf825cf8, 0x3c020800, 0x8c420a40, 0x8fa3001c, 0x24420001,
4186         0xaf835cf8, 0x3c010800, 0xac220a40, 0x8fbf0024, 0x8fb00020, 0x03e00008,
4187         0x27bd0028, 0x27bdffe0, 0x3c040800, 0x248409e8, 0x00002821, 0x00003021,
4188         0x00003821, 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014, 0x8fbf0018,
4189         0x03e00008, 0x27bd0020, 0x8f82680c, 0x8f85680c, 0x00021827, 0x0003182b,
4190         0x00031823, 0x00431024, 0x00441021, 0x00a2282b, 0x10a00006, 0x00000000,
4191         0x00401821, 0x8f82680c, 0x0043102b, 0x1440fffd, 0x00000000, 0x03e00008,
4192         0x00000000, 0x3c040800, 0x8c840000, 0x3c030800, 0x8c630a40, 0x0064102b,
4193         0x54400002, 0x00831023, 0x00641023, 0x2c420008, 0x03e00008, 0x38420001,
4194         0x27bdffe0, 0x00802821, 0x3c040800, 0x24840a00, 0x00003021, 0x00003821,
4195         0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014, 0x0a000216, 0x00000000,
4196         0x8fbf0018, 0x03e00008, 0x27bd0020, 0x00000000, 0x27bdffe0, 0x3c1cc000,
4197         0xafbf0018, 0x0e00004c, 0xaf80680c, 0x3c040800, 0x24840a10, 0x03802821,
4198         0x00003021, 0x00003821, 0xafa00010, 0x0e000060, 0xafa00014, 0x2402ffff,
4199         0xaf825404, 0x3c0200aa, 0x0e000234, 0xaf825434, 0x8fbf0018, 0x03e00008,
4200         0x27bd0020, 0x00000000, 0x00000000, 0x00000000, 0x27bdffe8, 0xafb00010,
4201         0x24100001, 0xafbf0014, 0x3c01c003, 0xac200000, 0x8f826810, 0x30422000,
4202         0x10400003, 0x00000000, 0x0e000246, 0x00000000, 0x0a00023a, 0xaf905428,
4203         0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x27bdfff8, 0x8f845d0c,
4204         0x3c0200ff, 0x3c030800, 0x8c630a50, 0x3442fff8, 0x00821024, 0x1043001e,
4205         0x3c0500ff, 0x34a5fff8, 0x3c06c003, 0x3c074000, 0x00851824, 0x8c620010,
4206         0x3c010800, 0xac230a50, 0x30420008, 0x10400005, 0x00871025, 0x8cc20000,
4207         0x24420001, 0xacc20000, 0x00871025, 0xaf825d0c, 0x8fa20000, 0x24420001,
4208         0xafa20000, 0x8fa20000, 0x8fa20000, 0x24420001, 0xafa20000, 0x8fa20000,
4209         0x8f845d0c, 0x3c030800, 0x8c630a50, 0x00851024, 0x1443ffe8, 0x00851824,
4210         0x27bd0008, 0x03e00008, 0x00000000, 0x00000000, 0x00000000
4211 };
4212
4213 static u32 tg3FwRodata[(TG3_FW_RODATA_LEN / sizeof(u32)) + 1] = {
4214         0x35373031, 0x726c7341, 0x00000000, 0x00000000, 0x53774576, 0x656e7430,
4215         0x00000000, 0x726c7045, 0x76656e74, 0x31000000, 0x556e6b6e, 0x45766e74,
4216         0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x66617461, 0x6c457272,
4217         0x00000000, 0x00000000, 0x4d61696e, 0x43707542, 0x00000000, 0x00000000,
4218         0x00000000
4219 };
4220
4221 #if 0 /* All zeros, don't eat up space with it. */
4222 u32 tg3FwData[(TG3_FW_DATA_LEN / sizeof(u32)) + 1] = {
4223         0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
4224         0x00000000, 0x00000000, 0x00000000, 0x00000000
4225 };
4226 #endif
4227
4228 #define RX_CPU_SCRATCH_BASE     0x30000
4229 #define RX_CPU_SCRATCH_SIZE     0x04000
4230 #define TX_CPU_SCRATCH_BASE     0x34000
4231 #define TX_CPU_SCRATCH_SIZE     0x04000
4232
4233 /* tp->lock is held. */
4234 static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
4235 {
4236         int i;
4237
4238         if (offset == TX_CPU_BASE &&
4239             (tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
4240                 BUG();
4241
4242         if (offset == RX_CPU_BASE) {
4243                 for (i = 0; i < 10000; i++) {
4244                         tw32(offset + CPU_STATE, 0xffffffff);
4245                         tw32(offset + CPU_MODE,  CPU_MODE_HALT);
4246                         if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
4247                                 break;
4248                 }
4249
4250                 tw32(offset + CPU_STATE, 0xffffffff);
4251                 tw32_f(offset + CPU_MODE,  CPU_MODE_HALT);
4252                 udelay(10);
4253         } else {
4254                 for (i = 0; i < 10000; i++) {
4255                         tw32(offset + CPU_STATE, 0xffffffff);
4256                         tw32(offset + CPU_MODE,  CPU_MODE_HALT);
4257                         if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
4258                                 break;
4259                 }
4260         }
4261
4262         if (i >= 10000) {
4263                 printk(KERN_ERR PFX "tg3_reset_cpu timed out for %s, "
4264                        "and %s CPU\n",
4265                        tp->dev->name,
4266                        (offset == RX_CPU_BASE ? "RX" : "TX"));
4267                 return -ENODEV;
4268         }
4269         return 0;
4270 }
4271
4272 struct fw_info {
4273         unsigned int text_base;
4274         unsigned int text_len;
4275         u32 *text_data;
4276         unsigned int rodata_base;
4277         unsigned int rodata_len;
4278         u32 *rodata_data;
4279         unsigned int data_base;
4280         unsigned int data_len;
4281         u32 *data_data;
4282 };
4283
4284 /* tp->lock is held. */
4285 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base, u32 cpu_scratch_base,
4286                                  int cpu_scratch_size, struct fw_info *info)
4287 {
4288         int err, i;
4289         u32 orig_tg3_flags = tp->tg3_flags;
4290         void (*write_op)(struct tg3 *, u32, u32);
4291
4292         if (cpu_base == TX_CPU_BASE &&
4293             (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
4294                 printk(KERN_ERR PFX "tg3_load_firmware_cpu: Trying to load "
4295                        "TX cpu firmware on %s which is 5705.\n",
4296                        tp->dev->name);
4297                 return -EINVAL;
4298         }
4299
4300         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
4301                 write_op = tg3_write_mem;
4302         else
4303                 write_op = tg3_write_indirect_reg32;
4304
4305         /* Force use of PCI config space for indirect register
4306          * write calls.
4307          */
4308         tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
4309
4310         err = tg3_halt_cpu(tp, cpu_base);
4311         if (err)
4312                 goto out;
4313
4314         for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
4315                 write_op(tp, cpu_scratch_base + i, 0);
4316         tw32(cpu_base + CPU_STATE, 0xffffffff);
4317         tw32(cpu_base + CPU_MODE, tr32(cpu_base+CPU_MODE)|CPU_MODE_HALT);
4318         for (i = 0; i < (info->text_len / sizeof(u32)); i++)
4319                 write_op(tp, (cpu_scratch_base +
4320                               (info->text_base & 0xffff) +
4321                               (i * sizeof(u32))),
4322                          (info->text_data ?
4323                           info->text_data[i] : 0));
4324         for (i = 0; i < (info->rodata_len / sizeof(u32)); i++)
4325                 write_op(tp, (cpu_scratch_base +
4326                               (info->rodata_base & 0xffff) +
4327                               (i * sizeof(u32))),
4328                          (info->rodata_data ?
4329                           info->rodata_data[i] : 0));
4330         for (i = 0; i < (info->data_len / sizeof(u32)); i++)
4331                 write_op(tp, (cpu_scratch_base +
4332                               (info->data_base & 0xffff) +
4333                               (i * sizeof(u32))),
4334                          (info->data_data ?
4335                           info->data_data[i] : 0));
4336
4337         err = 0;
4338
4339 out:
4340         tp->tg3_flags = orig_tg3_flags;
4341         return err;
4342 }
4343
4344 /* tp->lock is held. */
4345 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
4346 {
4347         struct fw_info info;
4348         int err, i;
4349
4350         info.text_base = TG3_FW_TEXT_ADDR;
4351         info.text_len = TG3_FW_TEXT_LEN;
4352         info.text_data = &tg3FwText[0];
4353         info.rodata_base = TG3_FW_RODATA_ADDR;
4354         info.rodata_len = TG3_FW_RODATA_LEN;
4355         info.rodata_data = &tg3FwRodata[0];
4356         info.data_base = TG3_FW_DATA_ADDR;
4357         info.data_len = TG3_FW_DATA_LEN;
4358         info.data_data = NULL;
4359
4360         err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
4361                                     RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
4362                                     &info);
4363         if (err)
4364                 return err;
4365
4366         err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
4367                                     TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
4368                                     &info);
4369         if (err)
4370                 return err;
4371
4372         /* Now startup only the RX cpu. */
4373         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
4374         tw32_f(RX_CPU_BASE + CPU_PC,    TG3_FW_TEXT_ADDR);
4375
4376         for (i = 0; i < 5; i++) {
4377                 if (tr32(RX_CPU_BASE + CPU_PC) == TG3_FW_TEXT_ADDR)
4378                         break;
4379                 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
4380                 tw32(RX_CPU_BASE + CPU_MODE,  CPU_MODE_HALT);
4381                 tw32_f(RX_CPU_BASE + CPU_PC,    TG3_FW_TEXT_ADDR);
4382                 udelay(1000);
4383         }
4384         if (i >= 5) {
4385                 printk(KERN_ERR PFX "tg3_load_firmware fails for %s "
4386                        "to set RX CPU PC, is %08x should be %08x\n",
4387                        tp->dev->name, tr32(RX_CPU_BASE + CPU_PC),
4388                        TG3_FW_TEXT_ADDR);
4389                 return -ENODEV;
4390         }
4391         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
4392         tw32_f(RX_CPU_BASE + CPU_MODE,  0x00000000);
4393
4394         return 0;
4395 }
4396
4397 #if TG3_TSO_SUPPORT != 0
4398
4399 #define TG3_TSO_FW_RELEASE_MAJOR        0x1
4400 #define TG3_TSO_FW_RELASE_MINOR         0x6
4401 #define TG3_TSO_FW_RELEASE_FIX          0x0
4402 #define TG3_TSO_FW_START_ADDR           0x08000000
4403 #define TG3_TSO_FW_TEXT_ADDR            0x08000000
4404 #define TG3_TSO_FW_TEXT_LEN             0x1aa0
4405 #define TG3_TSO_FW_RODATA_ADDR          0x08001aa0
4406 #define TG3_TSO_FW_RODATA_LEN           0x60
4407 #define TG3_TSO_FW_DATA_ADDR            0x08001b20
4408 #define TG3_TSO_FW_DATA_LEN             0x30
4409 #define TG3_TSO_FW_SBSS_ADDR            0x08001b50
4410 #define TG3_TSO_FW_SBSS_LEN             0x2c
4411 #define TG3_TSO_FW_BSS_ADDR             0x08001b80
4412 #define TG3_TSO_FW_BSS_LEN              0x894
4413
4414 static u32 tg3TsoFwText[(TG3_TSO_FW_TEXT_LEN / 4) + 1] = {
4415         0x0e000003, 0x00000000, 0x08001b24, 0x00000000, 0x10000003, 0x00000000,
4416         0x0000000d, 0x0000000d, 0x3c1d0800, 0x37bd4000, 0x03a0f021, 0x3c100800,
4417         0x26100000, 0x0e000010, 0x00000000, 0x0000000d, 0x27bdffe0, 0x3c04fefe,
4418         0xafbf0018, 0x0e0005d8, 0x34840002, 0x0e000668, 0x00000000, 0x3c030800,
4419         0x90631b68, 0x24020002, 0x3c040800, 0x24841aac, 0x14620003, 0x24050001,
4420         0x3c040800, 0x24841aa0, 0x24060006, 0x00003821, 0xafa00010, 0x0e00067c,
4421         0xafa00014, 0x8f625c50, 0x34420001, 0xaf625c50, 0x8f625c90, 0x34420001,
4422         0xaf625c90, 0x2402ffff, 0x0e000034, 0xaf625404, 0x8fbf0018, 0x03e00008,
4423         0x27bd0020, 0x00000000, 0x00000000, 0x00000000, 0x27bdffe0, 0xafbf001c,
4424         0xafb20018, 0xafb10014, 0x0e00005b, 0xafb00010, 0x24120002, 0x24110001,
4425         0x8f706820, 0x32020100, 0x10400003, 0x00000000, 0x0e0000bb, 0x00000000,
4426         0x8f706820, 0x32022000, 0x10400004, 0x32020001, 0x0e0001f0, 0x24040001,
4427         0x32020001, 0x10400003, 0x00000000, 0x0e0000a3, 0x00000000, 0x3c020800,
4428         0x90421b98, 0x14520003, 0x00000000, 0x0e0004c0, 0x00000000, 0x0a00003c,
4429         0xaf715028, 0x8fbf001c, 0x8fb20018, 0x8fb10014, 0x8fb00010, 0x03e00008,
4430         0x27bd0020, 0x27bdffe0, 0x3c040800, 0x24841ac0, 0x00002821, 0x00003021,
4431         0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014, 0x3c040800,
4432         0x248423d8, 0xa4800000, 0x3c010800, 0xa0201b98, 0x3c010800, 0xac201b9c,
4433         0x3c010800, 0xac201ba0, 0x3c010800, 0xac201ba4, 0x3c010800, 0xac201bac,
4434         0x3c010800, 0xac201bb8, 0x3c010800, 0xac201bbc, 0x8f624434, 0x3c010800,
4435         0xac221b88, 0x8f624438, 0x3c010800, 0xac221b8c, 0x8f624410, 0xac80f7a8,
4436         0x3c010800, 0xac201b84, 0x3c010800, 0xac2023e0, 0x3c010800, 0xac2023c8,
4437         0x3c010800, 0xac2023cc, 0x3c010800, 0xac202400, 0x3c010800, 0xac221b90,
4438         0x8f620068, 0x24030007, 0x00021702, 0x10430005, 0x00000000, 0x8f620068,
4439         0x00021702, 0x14400004, 0x24020001, 0x3c010800, 0x0a000097, 0xac20240c,
4440         0xac820034, 0x3c040800, 0x24841acc, 0x3c050800, 0x8ca5240c, 0x00003021,
4441         0x00003821, 0xafa00010, 0x0e00067c, 0xafa00014, 0x8fbf0018, 0x03e00008,
4442         0x27bd0020, 0x27bdffe0, 0x3c040800, 0x24841ad8, 0x00002821, 0x00003021,
4443         0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014, 0x0e00005b,
4444         0x00000000, 0x0e0000b4, 0x00002021, 0x8fbf0018, 0x03e00008, 0x27bd0020,
4445         0x24020001, 0x8f636820, 0x00821004, 0x00021027, 0x00621824, 0x03e00008,
4446         0xaf636820, 0x27bdffd0, 0xafbf002c, 0xafb60028, 0xafb50024, 0xafb40020,
4447         0xafb3001c, 0xafb20018, 0xafb10014, 0xafb00010, 0x8f675c5c, 0x3c030800,
4448         0x24631bbc, 0x8c620000, 0x14470005, 0x3c0200ff, 0x3c020800, 0x90421b98,
4449         0x14400119, 0x3c0200ff, 0x3442fff8, 0x00e28824, 0xac670000, 0x00111902,
4450         0x306300ff, 0x30e20003, 0x000211c0, 0x00622825, 0x00a04021, 0x00071602,
4451         0x3c030800, 0x90631b98, 0x3044000f, 0x14600036, 0x00804821, 0x24020001,
4452         0x3c010800, 0xa0221b98, 0x00051100, 0x00821025, 0x3c010800, 0xac201b9c,
4453         0x3c010800, 0xac201ba0, 0x3c010800, 0xac201ba4, 0x3c010800, 0xac201bac,
4454         0x3c010800, 0xac201bb8, 0x3c010800, 0xac201bb0, 0x3c010800, 0xac201bb4,
4455         0x3c010800, 0xa42223d8, 0x9622000c, 0x30437fff, 0x3c010800, 0xa4222410,
4456         0x30428000, 0x3c010800, 0xa4231bc6, 0x10400005, 0x24020001, 0x3c010800,
4457         0xac2223f4, 0x0a000102, 0x2406003e, 0x24060036, 0x3c010800, 0xac2023f4,
4458         0x9622000a, 0x3c030800, 0x94631bc6, 0x3c010800, 0xac2023f0, 0x3c010800,
4459         0xac2023f8, 0x00021302, 0x00021080, 0x00c21021, 0x00621821, 0x3c010800,
4460         0xa42223d0, 0x3c010800, 0x0a000115, 0xa4231b96, 0x9622000c, 0x3c010800,
4461         0xa42223ec, 0x3c040800, 0x24841b9c, 0x8c820000, 0x00021100, 0x3c010800,
4462         0x00220821, 0xac311bc8, 0x8c820000, 0x00021100, 0x3c010800, 0x00220821,
4463         0xac271bcc, 0x8c820000, 0x25030001, 0x306601ff, 0x00021100, 0x3c010800,
4464         0x00220821, 0xac261bd0, 0x8c820000, 0x00021100, 0x3c010800, 0x00220821,
4465         0xac291bd4, 0x96230008, 0x3c020800, 0x8c421bac, 0x00432821, 0x3c010800,
4466         0xac251bac, 0x9622000a, 0x30420004, 0x14400018, 0x00061100, 0x8f630c14,
4467         0x3063000f, 0x2c620002, 0x1440000b, 0x3c02c000, 0x8f630c14, 0x3c020800,
4468         0x8c421b40, 0x3063000f, 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002,
4469         0x1040fff7, 0x3c02c000, 0x00e21825, 0xaf635c5c, 0x8f625c50, 0x30420002,
4470         0x10400014, 0x00000000, 0x0a000147, 0x00000000, 0x3c030800, 0x8c631b80,
4471         0x3c040800, 0x94841b94, 0x01221025, 0x3c010800, 0xa42223da, 0x24020001,
4472         0x3c010800, 0xac221bb8, 0x24630001, 0x0085202a, 0x3c010800, 0x10800003,
4473         0xac231b80, 0x3c010800, 0xa4251b94, 0x3c060800, 0x24c61b9c, 0x8cc20000,
4474         0x24420001, 0xacc20000, 0x28420080, 0x14400005, 0x00000000, 0x0e000656,
4475         0x24040002, 0x0a0001e6, 0x00000000, 0x3c020800, 0x8c421bb8, 0x10400078,
4476         0x24020001, 0x3c050800, 0x90a51b98, 0x14a20072, 0x00000000, 0x3c150800,
4477         0x96b51b96, 0x3c040800, 0x8c841bac, 0x32a3ffff, 0x0083102a, 0x1440006c,
4478         0x00000000, 0x14830003, 0x00000000, 0x3c010800, 0xac2523f0, 0x1060005c,
4479         0x00009021, 0x24d60004, 0x0060a021, 0x24d30014, 0x8ec20000, 0x00028100,
4480         0x3c110800, 0x02308821, 0x0e000625, 0x8e311bc8, 0x00402821, 0x10a00054,
4481         0x00000000, 0x9628000a, 0x31020040, 0x10400005, 0x2407180c, 0x8e22000c,
4482         0x2407188c, 0x00021400, 0xaca20018, 0x3c030800, 0x00701821, 0x8c631bd0,
4483         0x3c020800, 0x00501021, 0x8c421bd4, 0x00031d00, 0x00021400, 0x00621825,
4484         0xaca30014, 0x8ec30004, 0x96220008, 0x00432023, 0x3242ffff, 0x3083ffff,
4485         0x00431021, 0x0282102a, 0x14400002, 0x02b23023, 0x00803021, 0x8e620000,
4486         0x30c4ffff, 0x00441021, 0xae620000, 0x8e220000, 0xaca20000, 0x8e220004,
4487         0x8e63fff4, 0x00431021, 0xaca20004, 0xa4a6000e, 0x8e62fff4, 0x00441021,
4488         0xae62fff4, 0x96230008, 0x0043102a, 0x14400005, 0x02469021, 0x8e62fff0,
4489         0xae60fff4, 0x24420001, 0xae62fff0, 0xaca00008, 0x3242ffff, 0x14540008,
4490         0x24020305, 0x31020080, 0x54400001, 0x34e70010, 0x24020905, 0xa4a2000c,
4491         0x0a0001cb, 0x34e70020, 0xa4a2000c, 0x3c020800, 0x8c4223f0, 0x10400003,
4492         0x3c024b65, 0x0a0001d3, 0x34427654, 0x3c02b49a, 0x344289ab, 0xaca2001c,
4493         0x30e2ffff, 0xaca20010, 0x0e0005a2, 0x00a02021, 0x3242ffff, 0x0054102b,
4494         0x1440ffa9, 0x00000000, 0x24020002, 0x3c010800, 0x0a0001e6, 0xa0221b98,
4495         0x8ec2083c, 0x24420001, 0x0a0001e6, 0xaec2083c, 0x0e0004c0, 0x00000000,
4496         0x8fbf002c, 0x8fb60028, 0x8fb50024, 0x8fb40020, 0x8fb3001c, 0x8fb20018,
4497         0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0030, 0x27bdffd0, 0xafbf0028,
4498         0xafb30024, 0xafb20020, 0xafb1001c, 0xafb00018, 0x8f725c9c, 0x3c0200ff,
4499         0x3442fff8, 0x3c070800, 0x24e71bb4, 0x02428824, 0x9623000e, 0x8ce20000,
4500         0x00431021, 0xace20000, 0x8e220010, 0x30420020, 0x14400011, 0x00809821,
4501         0x0e00063b, 0x02202021, 0x3c02c000, 0x02421825, 0xaf635c9c, 0x8f625c90,
4502         0x30420002, 0x1040011e, 0x00000000, 0xaf635c9c, 0x8f625c90, 0x30420002,
4503         0x10400119, 0x00000000, 0x0a00020d, 0x00000000, 0x8e240008, 0x8e230014,
4504         0x00041402, 0x000231c0, 0x00031502, 0x304201ff, 0x2442ffff, 0x3042007f,
4505         0x00031942, 0x30637800, 0x00021100, 0x24424000, 0x00624821, 0x9522000a,
4506         0x3084ffff, 0x30420008, 0x104000b0, 0x000429c0, 0x3c020800, 0x8c422400,
4507         0x14400024, 0x24c50008, 0x94c20014, 0x3c010800, 0xa42223d0, 0x8cc40010,
4508         0x00041402, 0x3c010800, 0xa42223d2, 0x3c010800, 0xa42423d4, 0x94c2000e,
4509         0x3083ffff, 0x00431023, 0x3c010800, 0xac222408, 0x94c2001a, 0x3c010800,
4510         0xac262400, 0x3c010800, 0xac322404, 0x3c010800, 0xac2223fc, 0x3c02c000,
4511         0x02421825, 0xaf635c9c, 0x8f625c90, 0x30420002, 0x104000e5, 0x00000000,
4512         0xaf635c9c, 0x8f625c90, 0x30420002, 0x104000e0, 0x00000000, 0x0a000246,
4513         0x00000000, 0x94c2000e, 0x3c030800, 0x946323d4, 0x00434023, 0x3103ffff,
4514         0x2c620008, 0x1040001c, 0x00000000, 0x94c20014, 0x24420028, 0x00a22821,
4515         0x00031042, 0x1840000b, 0x00002021, 0x24e60848, 0x00403821, 0x94a30000,
4516         0x8cc20000, 0x24840001, 0x00431021, 0xacc20000, 0x0087102a, 0x1440fff9,
4517         0x24a50002, 0x31020001, 0x1040001f, 0x3c024000, 0x3c040800, 0x248423fc,
4518         0xa0a00001, 0x94a30000, 0x8c820000, 0x00431021, 0x0a000285, 0xac820000,
4519         0x8f626800, 0x3c030010, 0x00431024, 0x10400009, 0x00000000, 0x94c2001a,
4520         0x3c030800, 0x8c6323fc, 0x00431021, 0x3c010800, 0xac2223fc, 0x0a000286,
4521         0x3c024000, 0x94c2001a, 0x94c4001c, 0x3c030800, 0x8c6323fc, 0x00441023,
4522         0x00621821, 0x3c010800, 0xac2323fc, 0x3c024000, 0x02421825, 0xaf635c9c,
4523         0x8f625c90, 0x30420002, 0x1440fffc, 0x00000000, 0x9522000a, 0x30420010,
4524         0x1040009b, 0x00000000, 0x3c030800, 0x946323d4, 0x3c070800, 0x24e72400,
4525         0x8ce40000, 0x8f626800, 0x24630030, 0x00832821, 0x3c030010, 0x00431024,
4526         0x1440000a, 0x00000000, 0x94a20004, 0x3c040800, 0x8c842408, 0x3c030800,
4527         0x8c6323fc, 0x00441023, 0x00621821, 0x3c010800, 0xac2323fc, 0x3c040800,
4528         0x8c8423fc, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402, 0x00822021,
4529         0x00041027, 0xa4a20006, 0x3c030800, 0x8c632404, 0x3c0200ff, 0x3442fff8,
4530         0x00628824, 0x96220008, 0x24050001, 0x24034000, 0x000231c0, 0x00801021,
4531         0xa4c2001a, 0xa4c0001c, 0xace00000, 0x3c010800, 0xac251b60, 0xaf635cb8,
4532         0x8f625cb0, 0x30420002, 0x10400003, 0x00000000, 0x3c010800, 0xac201b60,
4533         0x8e220008, 0xaf625cb8, 0x8f625cb0, 0x30420002, 0x10400003, 0x00000000,
4534         0x3c010800, 0xac201b60, 0x3c020800, 0x8c421b60, 0x1040ffec, 0x00000000,
4535         0x3c040800, 0x0e00063b, 0x8c842404, 0x0a00032a, 0x00000000, 0x3c030800,
4536         0x90631b98, 0x24020002, 0x14620003, 0x3c034b65, 0x0a0002e1, 0x00008021,
4537         0x8e22001c, 0x34637654, 0x10430002, 0x24100002, 0x24100001, 0x00c02021,
4538         0x0e000350, 0x02003021, 0x24020003, 0x3c010800, 0xa0221b98, 0x24020002,
4539         0x1202000a, 0x24020001, 0x3c030800, 0x8c6323f0, 0x10620006, 0x00000000,
4540         0x3c020800, 0x944223d8, 0x00021400, 0x0a00031f, 0xae220014, 0x3c040800,
4541         0x248423da, 0x94820000, 0x00021400, 0xae220014, 0x3c020800, 0x8c421bbc,
4542         0x3c03c000, 0x3c010800, 0xa0201b98, 0x00431025, 0xaf625c5c, 0x8f625c50,
4543         0x30420002, 0x10400009, 0x00000000, 0x2484f7e2, 0x8c820000, 0x00431025,
4544         0xaf625c5c, 0x8f625c50, 0x30420002, 0x1440fffa, 0x00000000, 0x3c020800,
4545         0x24421b84, 0x8c430000, 0x24630001, 0xac430000, 0x8f630c14, 0x3063000f,
4546         0x2c620002, 0x1440000c, 0x3c024000, 0x8f630c14, 0x3c020800, 0x8c421b40,
4547         0x3063000f, 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002, 0x1040fff7,
4548         0x00000000, 0x3c024000, 0x02421825, 0xaf635c9c, 0x8f625c90, 0x30420002,
4549         0x1440fffc, 0x00000000, 0x12600003, 0x00000000, 0x0e0004c0, 0x00000000,
4550         0x8fbf0028, 0x8fb30024, 0x8fb20020, 0x8fb1001c, 0x8fb00018, 0x03e00008,
4551         0x27bd0030, 0x8f634450, 0x3c040800, 0x24841b88, 0x8c820000, 0x00031c02,
4552         0x0043102b, 0x14400007, 0x3c038000, 0x8c840004, 0x8f624450, 0x00021c02,
4553         0x0083102b, 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024,
4554         0x1440fffd, 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3c024000,
4555         0x00822025, 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00000000,
4556         0x03e00008, 0x00000000, 0x27bdffe0, 0x00805821, 0x14c00011, 0x256e0008,
4557         0x3c020800, 0x8c4223f4, 0x10400007, 0x24020016, 0x3c010800, 0xa42223d2,
4558         0x2402002a, 0x3c010800, 0x0a000364, 0xa42223d4, 0x8d670010, 0x00071402,
4559         0x3c010800, 0xa42223d2, 0x3c010800, 0xa42723d4, 0x3c040800, 0x948423d4,
4560         0x3c030800, 0x946323d2, 0x95cf0006, 0x3c020800, 0x944223d0, 0x00832023,
4561         0x01e2c023, 0x3065ffff, 0x24a20028, 0x01c24821, 0x3082ffff, 0x14c0001a,
4562         0x01226021, 0x9582000c, 0x3042003f, 0x3c010800, 0xa42223d6, 0x95820004,
4563         0x95830006, 0x3c010800, 0xac2023e4, 0x3c010800, 0xac2023e8, 0x00021400,
4564         0x00431025, 0x3c010800, 0xac221bc0, 0x95220004, 0x3c010800, 0xa4221bc4,
4565         0x95230002, 0x01e51023, 0x0043102a, 0x10400010, 0x24020001, 0x3c010800,
4566         0x0a000398, 0xac2223f8, 0x3c030800, 0x8c6323e8, 0x3c020800, 0x94421bc4,
4567         0x00431021, 0xa5220004, 0x3c020800, 0x94421bc0, 0xa5820004, 0x3c020800,
4568         0x8c421bc0, 0xa5820006, 0x3c020800, 0x8c4223f0, 0x3c0d0800, 0x8dad23e4,
4569         0x3c0a0800, 0x144000e5, 0x8d4a23e8, 0x3c020800, 0x94421bc4, 0x004a1821,
4570         0x3063ffff, 0x0062182b, 0x24020002, 0x10c2000d, 0x01435023, 0x3c020800,
4571         0x944223d6, 0x30420009, 0x10400008, 0x00000000, 0x9582000c, 0x3042fff6,
4572         0xa582000c, 0x3c020800, 0x944223d6, 0x30420009, 0x01a26823, 0x3c020800,
4573         0x8c4223f8, 0x1040004a, 0x01203821, 0x3c020800, 0x944223d2, 0x00004021,
4574         0xa520000a, 0x01e21023, 0xa5220002, 0x3082ffff, 0x00021042, 0x18400008,
4575         0x00003021, 0x00401821, 0x94e20000, 0x25080001, 0x00c23021, 0x0103102a,
4576         0x1440fffb, 0x24e70002, 0x00061c02, 0x30c2ffff, 0x00623021, 0x00061402,
4577         0x00c23021, 0x00c02821, 0x00061027, 0xa522000a, 0x00003021, 0x2527000c,
4578         0x00004021, 0x94e20000, 0x25080001, 0x00c23021, 0x2d020004, 0x1440fffb,
4579         0x24e70002, 0x95220002, 0x00004021, 0x91230009, 0x00442023, 0x01803821,
4580         0x3082ffff, 0xa4e00010, 0x00621821, 0x00021042, 0x18400010, 0x00c33021,
4581         0x00404821, 0x94e20000, 0x24e70002, 0x00c23021, 0x30e2007f, 0x14400006,
4582         0x25080001, 0x8d630000, 0x3c02007f, 0x3442ff80, 0x00625824, 0x25670008,
4583         0x0109102a, 0x1440fff3, 0x00000000, 0x30820001, 0x10400005, 0x00061c02,
4584         0xa0e00001, 0x94e20000, 0x00c23021, 0x00061c02, 0x30c2ffff, 0x00623021,
4585         0x00061402, 0x00c23021, 0x0a00047d, 0x30c6ffff, 0x24020002, 0x14c20081,
4586         0x00000000, 0x3c020800, 0x8c42240c, 0x14400007, 0x00000000, 0x3c020800,
4587         0x944223d2, 0x95230002, 0x01e21023, 0x10620077, 0x00000000, 0x3c020800,
4588         0x944223d2, 0x01e21023, 0xa5220002, 0x3c020800, 0x8c42240c, 0x1040001a,
4589         0x31e3ffff, 0x8dc70010, 0x3c020800, 0x94421b96, 0x00e04021, 0x00072c02,
4590         0x00aa2021, 0x00431023, 0x00823823, 0x00072402, 0x30e2ffff, 0x00823821,
4591         0x00071027, 0xa522000a, 0x3102ffff, 0x3c040800, 0x948423d4, 0x00453023,
4592         0x00e02821, 0x00641823, 0x006d1821, 0x00c33021, 0x00061c02, 0x30c2ffff,
4593         0x0a00047d, 0x00623021, 0x01203821, 0x00004021, 0x3082ffff, 0x00021042,
4594         0x18400008, 0x00003021, 0x00401821, 0x94e20000, 0x25080001, 0x00c23021,
4595         0x0103102a, 0x1440fffb, 0x24e70002, 0x00061c02, 0x30c2ffff, 0x00623021,
4596         0x00061402, 0x00c23021, 0x00c02821, 0x00061027, 0xa522000a, 0x00003021,
4597         0x2527000c, 0x00004021, 0x94e20000, 0x25080001, 0x00c23021, 0x2d020004,
4598         0x1440fffb, 0x24e70002, 0x95220002, 0x00004021, 0x91230009, 0x00442023,
4599         0x01803821, 0x3082ffff, 0xa4e00010, 0x3c040800, 0x948423d4, 0x00621821,
4600         0x00c33021, 0x00061c02, 0x30c2ffff, 0x00623021, 0x00061c02, 0x3c020800,
4601         0x944223d0, 0x00c34821, 0x00441023, 0x00021fc2, 0x00431021, 0x00021043,
4602         0x18400010, 0x00003021, 0x00402021, 0x94e20000, 0x24e70002, 0x00c23021,
4603         0x30e2007f, 0x14400006, 0x25080001, 0x8d630000, 0x3c02007f, 0x3442ff80,
4604         0x00625824, 0x25670008, 0x0104102a, 0x1440fff3, 0x00000000, 0x3c020800,
4605         0x944223ec, 0x00c23021, 0x3122ffff, 0x00c23021, 0x00061c02, 0x30c2ffff,
4606         0x00623021, 0x00061402, 0x00c23021, 0x00c04021, 0x00061027, 0xa5820010,
4607         0xadc00014, 0x0a00049d, 0xadc00000, 0x8dc70010, 0x00e04021, 0x11400007,
4608         0x00072c02, 0x00aa3021, 0x00061402, 0x30c3ffff, 0x00433021, 0x00061402,
4609         0x00c22821, 0x00051027, 0xa522000a, 0x3c030800, 0x946323d4, 0x3102ffff,
4610         0x01e21021, 0x00433023, 0x00cd3021, 0x00061c02, 0x30c2ffff, 0x00623021,
4611         0x00061402, 0x00c23021, 0x00c04021, 0x00061027, 0xa5820010, 0x3102ffff,
4612         0x00051c00, 0x00431025, 0xadc20010, 0x3c020800, 0x8c4223f4, 0x10400005,
4613         0x2de205eb, 0x14400002, 0x25e2fff2, 0x34028870, 0xa5c20034, 0x3c030800,
4614         0x246323e8, 0x8c620000, 0x24420001, 0xac620000, 0x3c040800, 0x8c8423e4,
4615         0x3c020800, 0x8c421bc0, 0x3303ffff, 0x00832021, 0x00431821, 0x0062102b,
4616         0x3c010800, 0xac2423e4, 0x10400003, 0x2482ffff, 0x3c010800, 0xac2223e4,
4617         0x3c010800, 0xac231bc0, 0x03e00008, 0x27bd0020, 0x27bdffb8, 0x3c050800,
4618         0x24a51b96, 0xafbf0044, 0xafbe0040, 0xafb7003c, 0xafb60038, 0xafb50034,
4619         0xafb40030, 0xafb3002c, 0xafb20028, 0xafb10024, 0xafb00020, 0x94a90000,
4620         0x3c020800, 0x944223d0, 0x3c030800, 0x8c631bb0, 0x3c040800, 0x8c841bac,
4621         0x01221023, 0x0064182a, 0xa7a9001e, 0x106000be, 0xa7a20016, 0x24be0022,
4622         0x97b6001e, 0x24b3001a, 0x24b70016, 0x8fc20000, 0x14400008, 0x00000000,
4623         0x8fc2fff8, 0x97a30016, 0x8fc4fff4, 0x00431021, 0x0082202a, 0x148000b0,
4624         0x00000000, 0x97d50818, 0x32a2ffff, 0x104000a3, 0x00009021, 0x0040a021,
4625         0x00008821, 0x0e000625, 0x00000000, 0x00403021, 0x14c00007, 0x00000000,
4626         0x3c020800, 0x8c4223dc, 0x24420001, 0x3c010800, 0x0a000596, 0xac2223dc,
4627         0x3c100800, 0x02118021, 0x8e101bc8, 0x9608000a, 0x31020040, 0x10400005,
4628         0x2407180c, 0x8e02000c, 0x2407188c, 0x00021400, 0xacc20018, 0x31020080,
4629         0x54400001, 0x34e70010, 0x3c020800, 0x00511021, 0x8c421bd0, 0x3c030800,
4630         0x00711821, 0x8c631bd4, 0x00021500, 0x00031c00, 0x00431025, 0xacc20014,
4631         0x96040008, 0x3242ffff, 0x00821021, 0x0282102a, 0x14400002, 0x02b22823,
4632         0x00802821, 0x8e020000, 0x02459021, 0xacc20000, 0x8e020004, 0x00c02021,
4633         0x26310010, 0xac820004, 0x30e2ffff, 0xac800008, 0xa485000e, 0xac820010,
4634         0x24020305, 0x0e0005a2, 0xa482000c, 0x3242ffff, 0x0054102b, 0x1440ffc5,
4635         0x3242ffff, 0x0a00058e, 0x00000000, 0x8e620000, 0x8e63fffc, 0x0043102a,
4636         0x10400067, 0x00000000, 0x8e62fff0, 0x00028900, 0x3c100800, 0x02118021,
4637         0x0e000625, 0x8e101bc8, 0x00403021, 0x14c00005, 0x00000000, 0x8e62082c,
4638         0x24420001, 0x0a000596, 0xae62082c, 0x9608000a, 0x31020040, 0x10400005,
4639         0x2407180c, 0x8e02000c, 0x2407188c, 0x00021400, 0xacc20018, 0x3c020800,
4640         0x00511021, 0x8c421bd0, 0x3c030800, 0x00711821, 0x8c631bd4, 0x00021500,
4641         0x00031c00, 0x00431025, 0xacc20014, 0x8e63fff4, 0x96020008, 0x00432023,
4642         0x3242ffff, 0x3083ffff, 0x00431021, 0x02c2102a, 0x10400003, 0x00802821,
4643         0x97a9001e, 0x01322823, 0x8e620000, 0x30a4ffff, 0x00441021, 0xae620000,
4644         0xa4c5000e, 0x8e020000, 0xacc20000, 0x8e020004, 0x8e63fff4, 0x00431021,
4645         0xacc20004, 0x8e63fff4, 0x96020008, 0x00641821, 0x0062102a, 0x14400006,
4646         0x02459021, 0x8e62fff0, 0xae60fff4, 0x24420001, 0x0a000571, 0xae62fff0,
4647         0xae63fff4, 0xacc00008, 0x3242ffff, 0x10560003, 0x31020004, 0x10400006,
4648         0x24020305, 0x31020080, 0x54400001, 0x34e70010, 0x34e70020, 0x24020905,
4649         0xa4c2000c, 0x8ee30000, 0x8ee20004, 0x14620007, 0x3c02b49a, 0x8ee20860,
4650         0x54400001, 0x34e70400, 0x3c024b65, 0x0a000588, 0x34427654, 0x344289ab,
4651         0xacc2001c, 0x30e2ffff, 0xacc20010, 0x0e0005a2, 0x00c02021, 0x3242ffff,
4652         0x0056102b, 0x1440ff9b, 0x00000000, 0x8e620000, 0x8e63fffc, 0x0043102a,
4653         0x1440ff48, 0x00000000, 0x8fbf0044, 0x8fbe0040, 0x8fb7003c, 0x8fb60038,
4654         0x8fb50034, 0x8fb40030, 0x8fb3002c, 0x8fb20028, 0x8fb10024, 0x8fb00020,
4655         0x03e00008, 0x27bd0048, 0x27bdffe8, 0xafbf0014, 0xafb00010, 0x8f624450,
4656         0x8f634410, 0x0a0005b1, 0x00808021, 0x8f626820, 0x30422000, 0x10400003,
4657         0x00000000, 0x0e0001f0, 0x00002021, 0x8f624450, 0x8f634410, 0x3042ffff,
4658         0x0043102b, 0x1440fff5, 0x00000000, 0x8f630c14, 0x3063000f, 0x2c620002,
4659         0x1440000b, 0x00000000, 0x8f630c14, 0x3c020800, 0x8c421b40, 0x3063000f,
4660         0x24420001, 0x3c010800, 0xac221b40, 0x2c620002, 0x1040fff7, 0x00000000,
4661         0xaf705c18, 0x8f625c10, 0x30420002, 0x10400009, 0x00000000, 0x8f626820,
4662         0x30422000, 0x1040fff8, 0x00000000, 0x0e0001f0, 0x00002021, 0x0a0005c4,
4663         0x00000000, 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x00000000,
4664         0x00000000, 0x00000000, 0x27bdffe8, 0x3c1bc000, 0xafbf0014, 0xafb00010,
4665         0xaf60680c, 0x8f626804, 0x34420082, 0xaf626804, 0x8f634000, 0x24020b50,
4666         0x3c010800, 0xac221b54, 0x24020b78, 0x3c010800, 0xac221b64, 0x34630002,
4667         0xaf634000, 0x0e000605, 0x00808021, 0x3c010800, 0xa0221b68, 0x304200ff,
4668         0x24030002, 0x14430005, 0x00000000, 0x3c020800, 0x8c421b54, 0x0a0005f8,
4669         0xac5000c0, 0x3c020800, 0x8c421b54, 0xac5000bc, 0x8f624434, 0x8f634438,
4670         0x8f644410, 0x3c010800, 0xac221b5c, 0x3c010800, 0xac231b6c, 0x3c010800,
4671         0xac241b58, 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x3c040800,
4672         0x8c870000, 0x3c03aa55, 0x3463aa55, 0x3c06c003, 0xac830000, 0x8cc20000,
4673         0x14430007, 0x24050002, 0x3c0355aa, 0x346355aa, 0xac830000, 0x8cc20000,
4674         0x50430001, 0x24050001, 0x3c020800, 0xac470000, 0x03e00008, 0x00a01021,
4675         0x27bdfff8, 0x18800009, 0x00002821, 0x8f63680c, 0x8f62680c, 0x1043fffe,
4676         0x00000000, 0x24a50001, 0x00a4102a, 0x1440fff9, 0x00000000, 0x03e00008,
4677         0x27bd0008, 0x8f634450, 0x3c020800, 0x8c421b5c, 0x00031c02, 0x0043102b,
4678         0x14400008, 0x3c038000, 0x3c040800, 0x8c841b6c, 0x8f624450, 0x00021c02,
4679         0x0083102b, 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024,
4680         0x1440fffd, 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3082ffff,
4681         0x2442e000, 0x2c422001, 0x14400003, 0x3c024000, 0x0a000648, 0x2402ffff,
4682         0x00822025, 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00001021,
4683         0x03e00008, 0x00000000, 0x8f624450, 0x3c030800, 0x8c631b58, 0x0a000651,
4684         0x3042ffff, 0x8f624450, 0x3042ffff, 0x0043102b, 0x1440fffc, 0x00000000,
4685         0x03e00008, 0x00000000, 0x27bdffe0, 0x00802821, 0x3c040800, 0x24841af0,
4686         0x00003021, 0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014,
4687         0x0a000660, 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x00000000,
4688         0x00000000, 0x00000000, 0x3c020800, 0x34423000, 0x3c030800, 0x34633000,
4689         0x3c040800, 0x348437ff, 0x3c010800, 0xac221b74, 0x24020040, 0x3c010800,
4690         0xac221b78, 0x3c010800, 0xac201b70, 0xac600000, 0x24630004, 0x0083102b,
4691         0x5040fffd, 0xac600000, 0x03e00008, 0x00000000, 0x00804821, 0x8faa0010,
4692         0x3c020800, 0x8c421b70, 0x3c040800, 0x8c841b78, 0x8fab0014, 0x24430001,
4693         0x0044102b, 0x3c010800, 0xac231b70, 0x14400003, 0x00004021, 0x3c010800,
4694         0xac201b70, 0x3c020800, 0x8c421b70, 0x3c030800, 0x8c631b74, 0x91240000,
4695         0x00021140, 0x00431021, 0x00481021, 0x25080001, 0xa0440000, 0x29020008,
4696         0x1440fff4, 0x25290001, 0x3c020800, 0x8c421b70, 0x3c030800, 0x8c631b74,
4697         0x8f64680c, 0x00021140, 0x00431021, 0xac440008, 0xac45000c, 0xac460010,
4698         0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c, 0x00000000, 0x00000000,
4699 };
4700
4701 static u32 tg3TsoFwRodata[] = {
4702         0x4d61696e, 0x43707542, 0x00000000, 0x4d61696e, 0x43707541, 0x00000000,
4703         0x00000000, 0x00000000, 0x73746b6f, 0x66666c64, 0x496e0000, 0x73746b6f,
4704         0x66662a2a, 0x00000000, 0x53774576, 0x656e7430, 0x00000000, 0x00000000,
4705         0x00000000, 0x00000000, 0x66617461, 0x6c457272, 0x00000000, 0x00000000,
4706         0x00000000,
4707 };
4708
4709 static u32 tg3TsoFwData[] = {
4710         0x00000000, 0x73746b6f, 0x66666c64, 0x5f76312e, 0x362e3000, 0x00000000,
4711         0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
4712         0x00000000,
4713 };
4714
4715 /* 5705 needs a special version of the TSO firmware.  */
4716 #define TG3_TSO5_FW_RELEASE_MAJOR       0x1
4717 #define TG3_TSO5_FW_RELASE_MINOR        0x2
4718 #define TG3_TSO5_FW_RELEASE_FIX         0x0
4719 #define TG3_TSO5_FW_START_ADDR          0x00010000
4720 #define TG3_TSO5_FW_TEXT_ADDR           0x00010000
4721 #define TG3_TSO5_FW_TEXT_LEN            0xe90
4722 #define TG3_TSO5_FW_RODATA_ADDR         0x00010e90
4723 #define TG3_TSO5_FW_RODATA_LEN          0x50
4724 #define TG3_TSO5_FW_DATA_ADDR           0x00010f00
4725 #define TG3_TSO5_FW_DATA_LEN            0x20
4726 #define TG3_TSO5_FW_SBSS_ADDR           0x00010f20
4727 #define TG3_TSO5_FW_SBSS_LEN            0x28
4728 #define TG3_TSO5_FW_BSS_ADDR            0x00010f50
4729 #define TG3_TSO5_FW_BSS_LEN             0x88
4730
4731 static u32 tg3Tso5FwText[(TG3_TSO5_FW_TEXT_LEN / 4) + 1] = {
4732         0x0c004003, 0x00000000, 0x00010f04, 0x00000000, 0x10000003, 0x00000000,
4733         0x0000000d, 0x0000000d, 0x3c1d0001, 0x37bde000, 0x03a0f021, 0x3c100001,
4734         0x26100000, 0x0c004010, 0x00000000, 0x0000000d, 0x27bdffe0, 0x3c04fefe,
4735         0xafbf0018, 0x0c0042e8, 0x34840002, 0x0c004364, 0x00000000, 0x3c030001,
4736         0x90630f34, 0x24020002, 0x3c040001, 0x24840e9c, 0x14620003, 0x24050001,
4737         0x3c040001, 0x24840e90, 0x24060002, 0x00003821, 0xafa00010, 0x0c004378,
4738         0xafa00014, 0x0c00402c, 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020,
4739         0x00000000, 0x00000000, 0x27bdffe0, 0xafbf001c, 0xafb20018, 0xafb10014,
4740         0x0c0042d4, 0xafb00010, 0x3c128000, 0x24110001, 0x8f706810, 0x32020400,
4741         0x10400007, 0x00000000, 0x8f641008, 0x00921024, 0x14400003, 0x00000000,
4742         0x0c004064, 0x00000000, 0x3c020001, 0x90420f56, 0x10510003, 0x32020200,
4743         0x1040fff1, 0x00000000, 0x0c0041b4, 0x00000000, 0x08004034, 0x00000000,
4744         0x8fbf001c, 0x8fb20018, 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0020,
4745         0x27bdffe0, 0x3c040001, 0x24840eb0, 0x00002821, 0x00003021, 0x00003821,
4746         0xafbf0018, 0xafa00010, 0x0c004378, 0xafa00014, 0x0000d021, 0x24020130,
4747         0xaf625000, 0x3c010001, 0xa4200f50, 0x3c010001, 0xa0200f57, 0x8fbf0018,
4748         0x03e00008, 0x27bd0020, 0x00000000, 0x00000000, 0x3c030001, 0x24630f60,
4749         0x90620000, 0x27bdfff0, 0x14400003, 0x0080c021, 0x08004073, 0x00004821,
4750         0x3c022000, 0x03021024, 0x10400003, 0x24090002, 0x08004073, 0xa0600000,
4751         0x24090001, 0x00181040, 0x30431f80, 0x346f8008, 0x1520004b, 0x25eb0028,
4752         0x3c040001, 0x00832021, 0x8c848010, 0x3c050001, 0x24a50f7a, 0x00041402,
4753         0xa0a20000, 0x3c010001, 0xa0240f7b, 0x3c020001, 0x00431021, 0x94428014,
4754         0x3c010001, 0xa0220f7c, 0x3c0c0001, 0x01836021, 0x8d8c8018, 0x304200ff,
4755         0x24420008, 0x000220c3, 0x24020001, 0x3c010001, 0xa0220f60, 0x0124102b,
4756         0x1040000c, 0x00003821, 0x24a6000e, 0x01602821, 0x8ca20000, 0x8ca30004,
4757         0x24a50008, 0x24e70001, 0xacc20000, 0xacc30004, 0x00e4102b, 0x1440fff8,
4758         0x24c60008, 0x00003821, 0x3c080001, 0x25080f7b, 0x91060000, 0x3c020001,
4759         0x90420f7c, 0x2503000d, 0x00c32821, 0x00461023, 0x00021fc2, 0x00431021,
4760         0x00021043, 0x1840000c, 0x00002021, 0x91020001, 0x00461023, 0x00021fc2,
4761         0x00431021, 0x00021843, 0x94a20000, 0x24e70001, 0x00822021, 0x00e3102a,
4762         0x1440fffb, 0x24a50002, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402,
4763         0x00822021, 0x3c02ffff, 0x01821024, 0x3083ffff, 0x00431025, 0x3c010001,
4764         0x080040fa, 0xac220f80, 0x3c050001, 0x24a50f7c, 0x90a20000, 0x3c0c0001,
4765         0x01836021, 0x8d8c8018, 0x000220c2, 0x1080000e, 0x00003821, 0x01603021,
4766         0x24a5000c, 0x8ca20000, 0x8ca30004, 0x24a50008, 0x24e70001, 0xacc20000,
4767         0xacc30004, 0x00e4102b, 0x1440fff8, 0x24c60008, 0x3c050001, 0x24a50f7c,
4768         0x90a20000, 0x30430007, 0x24020004, 0x10620011, 0x28620005, 0x10400005,
4769         0x24020002, 0x10620008, 0x000710c0, 0x080040fa, 0x00000000, 0x24020006,
4770         0x1062000e, 0x000710c0, 0x080040fa, 0x00000000, 0x00a21821, 0x9463000c,
4771         0x004b1021, 0x080040fa, 0xa4430000, 0x000710c0, 0x00a21821, 0x8c63000c,
4772         0x004b1021, 0x080040fa, 0xac430000, 0x00a21821, 0x8c63000c, 0x004b2021,
4773         0x00a21021, 0xac830000, 0x94420010, 0xa4820004, 0x95e70006, 0x3c020001,
4774         0x90420f7c, 0x3c030001, 0x90630f7a, 0x00e2c823, 0x3c020001, 0x90420f7b,
4775         0x24630028, 0x01e34021, 0x24420028, 0x15200012, 0x01e23021, 0x94c2000c,
4776         0x3c010001, 0xa4220f78, 0x94c20004, 0x94c30006, 0x3c010001, 0xa4200f76,
4777         0x3c010001, 0xa4200f72, 0x00021400, 0x00431025, 0x3c010001, 0xac220f6c,
4778         0x95020004, 0x3c010001, 0x08004124, 0xa4220f70, 0x3c020001, 0x94420f70,
4779         0x3c030001, 0x94630f72, 0x00431021, 0xa5020004, 0x3c020001, 0x94420f6c,
4780         0xa4c20004, 0x3c020001, 0x8c420f6c, 0xa4c20006, 0x3c040001, 0x94840f72,
4781         0x3c020001, 0x94420f70, 0x3c0a0001, 0x954a0f76, 0x00441821, 0x3063ffff,
4782         0x0062182a, 0x24020002, 0x1122000b, 0x00832023, 0x3c030001, 0x94630f78,
4783         0x30620009, 0x10400006, 0x3062fff6, 0xa4c2000c, 0x3c020001, 0x94420f78,
4784         0x30420009, 0x01425023, 0x24020001, 0x1122001b, 0x29220002, 0x50400005,
4785         0x24020002, 0x11200007, 0x31a2ffff, 0x08004197, 0x00000000, 0x1122001d,
4786         0x24020016, 0x08004197, 0x31a2ffff, 0x3c0e0001, 0x95ce0f80, 0x10800005,
4787         0x01806821, 0x01c42021, 0x00041c02, 0x3082ffff, 0x00627021, 0x000e1027,
4788         0xa502000a, 0x3c030001, 0x90630f7b, 0x31a2ffff, 0x00e21021, 0x0800418d,
4789         0x00432023, 0x3c020001, 0x94420f80, 0x00442021, 0x00041c02, 0x3082ffff,
4790         0x00622021, 0x00807021, 0x00041027, 0x08004185, 0xa502000a, 0x3c050001,
4791         0x24a50f7a, 0x90a30000, 0x14620002, 0x24e2fff2, 0xa5e20034, 0x90a20000,
4792         0x00e21023, 0xa5020002, 0x3c030001, 0x94630f80, 0x3c020001, 0x94420f5a,
4793         0x30e5ffff, 0x00641821, 0x00451023, 0x00622023, 0x00041c02, 0x3082ffff,
4794         0x00622021, 0x00041027, 0xa502000a, 0x3c030001, 0x90630f7c, 0x24620001,
4795         0x14a20005, 0x00807021, 0x01631021, 0x90420000, 0x08004185, 0x00026200,
4796         0x24620002, 0x14a20003, 0x306200fe, 0x004b1021, 0x944c0000, 0x3c020001,
4797         0x94420f82, 0x3183ffff, 0x3c040001, 0x90840f7b, 0x00431021, 0x00e21021,
4798         0x00442023, 0x008a2021, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402,
4799         0x00822021, 0x00806821, 0x00041027, 0xa4c20010, 0x31a2ffff, 0x000e1c00,
4800         0x00431025, 0x3c040001, 0x24840f72, 0xade20010, 0x94820000, 0x3c050001,
4801         0x94a50f76, 0x3c030001, 0x8c630f6c, 0x24420001, 0x00b92821, 0xa4820000,
4802         0x3322ffff, 0x00622021, 0x0083182b, 0x3c010001, 0xa4250f76, 0x10600003,
4803         0x24a2ffff, 0x3c010001, 0xa4220f76, 0x3c024000, 0x03021025, 0x3c010001,
4804         0xac240f6c, 0xaf621008, 0x03e00008, 0x27bd0010, 0x3c030001, 0x90630f56,
4805         0x27bdffe8, 0x24020001, 0xafbf0014, 0x10620026, 0xafb00010, 0x8f620cf4,
4806         0x2442ffff, 0x3042007f, 0x00021100, 0x8c434000, 0x3c010001, 0xac230f64,
4807         0x8c434008, 0x24444000, 0x8c5c4004, 0x30620040, 0x14400002, 0x24020088,
4808         0x24020008, 0x3c010001, 0xa4220f68, 0x30620004, 0x10400005, 0x24020001,
4809         0x3c010001, 0xa0220f57, 0x080041d5, 0x00031402, 0x3c010001, 0xa0200f57,
4810         0x00031402, 0x3c010001, 0xa4220f54, 0x9483000c, 0x24020001, 0x3c010001,
4811         0xa4200f50, 0x3c010001, 0xa0220f56, 0x3c010001, 0xa4230f62, 0x24020001,
4812         0x1342001e, 0x00000000, 0x13400005, 0x24020003, 0x13420067, 0x00000000,
4813         0x080042cf, 0x00000000, 0x3c020001, 0x94420f62, 0x241a0001, 0x3c010001,
4814         0xa4200f5e, 0x3c010001, 0xa4200f52, 0x304407ff, 0x00021bc2, 0x00031823,
4815         0x3063003e, 0x34630036, 0x00021242, 0x3042003c, 0x00621821, 0x3c010001,
4816         0xa4240f58, 0x00832021, 0x24630030, 0x3c010001, 0xa4240f5a, 0x3c010001,
4817         0xa4230f5c, 0x3c060001, 0x24c60f52, 0x94c50000, 0x94c30002, 0x3c040001,
4818         0x94840f5a, 0x00651021, 0x0044102a, 0x10400013, 0x3c108000, 0x00a31021,
4819         0xa4c20000, 0x3c02a000, 0xaf620cf4, 0x3c010001, 0xa0200f56, 0x8f641008,
4820         0x00901024, 0x14400003, 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4,
4821         0x00501024, 0x104000b7, 0x00000000, 0x0800420f, 0x00000000, 0x3c030001,
4822         0x94630f50, 0x00851023, 0xa4c40000, 0x00621821, 0x3042ffff, 0x3c010001,
4823         0xa4230f50, 0xaf620ce8, 0x3c020001, 0x94420f68, 0x34420024, 0xaf620cec,
4824         0x94c30002, 0x3c020001, 0x94420f50, 0x14620012, 0x3c028000, 0x3c108000,
4825         0x3c02a000, 0xaf620cf4, 0x3c010001, 0xa0200f56, 0x8f641008, 0x00901024,
4826         0x14400003, 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4, 0x00501024,
4827         0x1440fff7, 0x00000000, 0x080042cf, 0x241a0003, 0xaf620cf4, 0x3c108000,
4828         0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064, 0x00000000,
4829         0x8f620cf4, 0x00501024, 0x1440fff7, 0x00000000, 0x080042cf, 0x241a0003,
4830         0x3c070001, 0x24e70f50, 0x94e20000, 0x03821021, 0xaf620ce0, 0x3c020001,
4831         0x8c420f64, 0xaf620ce4, 0x3c050001, 0x94a50f54, 0x94e30000, 0x3c040001,
4832         0x94840f58, 0x3c020001, 0x94420f5e, 0x00a32823, 0x00822023, 0x30a6ffff,
4833         0x3083ffff, 0x00c3102b, 0x14400043, 0x00000000, 0x3c020001, 0x94420f5c,
4834         0x00021400, 0x00621025, 0xaf620ce8, 0x94e20000, 0x3c030001, 0x94630f54,
4835         0x00441021, 0xa4e20000, 0x3042ffff, 0x14430021, 0x3c020008, 0x3c020001,
4836         0x90420f57, 0x10400006, 0x3c03000c, 0x3c020001, 0x94420f68, 0x34630624,
4837         0x0800427c, 0x0000d021, 0x3c020001, 0x94420f68, 0x3c030008, 0x34630624,
4838         0x00431025, 0xaf620cec, 0x3c108000, 0x3c02a000, 0xaf620cf4, 0x3c010001,
4839         0xa0200f56, 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064,
4840         0x00000000, 0x8f620cf4, 0x00501024, 0x10400015, 0x00000000, 0x08004283,
4841         0x00000000, 0x3c030001, 0x94630f68, 0x34420624, 0x3c108000, 0x00621825,
4842         0x3c028000, 0xaf630cec, 0xaf620cf4, 0x8f641008, 0x00901024, 0x14400003,
4843         0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4, 0x00501024, 0x1440fff7,
4844         0x00000000, 0x3c010001, 0x080042cf, 0xa4200f5e, 0x3c020001, 0x94420f5c,
4845         0x00021400, 0x00c21025, 0xaf620ce8, 0x3c020001, 0x90420f57, 0x10400009,
4846         0x3c03000c, 0x3c020001, 0x94420f68, 0x34630624, 0x0000d021, 0x00431025,
4847         0xaf620cec, 0x080042c1, 0x3c108000, 0x3c020001, 0x94420f68, 0x3c030008,
4848         0x34630604, 0x00431025, 0xaf620cec, 0x3c020001, 0x94420f5e, 0x00451021,
4849         0x3c010001, 0xa4220f5e, 0x3c108000, 0x3c02a000, 0xaf620cf4, 0x3c010001,
4850         0xa0200f56, 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064,
4851         0x00000000, 0x8f620cf4, 0x00501024, 0x1440fff7, 0x00000000, 0x8fbf0014,
4852         0x8fb00010, 0x03e00008, 0x27bd0018, 0x00000000, 0x27bdffe0, 0x3c040001,
4853         0x24840ec0, 0x00002821, 0x00003021, 0x00003821, 0xafbf0018, 0xafa00010,
4854         0x0c004378, 0xafa00014, 0x0000d021, 0x24020130, 0xaf625000, 0x3c010001,
4855         0xa4200f50, 0x3c010001, 0xa0200f57, 0x8fbf0018, 0x03e00008, 0x27bd0020,
4856         0x27bdffe8, 0x3c1bc000, 0xafbf0014, 0xafb00010, 0xaf60680c, 0x8f626804,
4857         0x34420082, 0xaf626804, 0x8f634000, 0x24020b50, 0x3c010001, 0xac220f20,
4858         0x24020b78, 0x3c010001, 0xac220f30, 0x34630002, 0xaf634000, 0x0c004315,
4859         0x00808021, 0x3c010001, 0xa0220f34, 0x304200ff, 0x24030002, 0x14430005,
4860         0x00000000, 0x3c020001, 0x8c420f20, 0x08004308, 0xac5000c0, 0x3c020001,
4861         0x8c420f20, 0xac5000bc, 0x8f624434, 0x8f634438, 0x8f644410, 0x3c010001,
4862         0xac220f28, 0x3c010001, 0xac230f38, 0x3c010001, 0xac240f24, 0x8fbf0014,
4863         0x8fb00010, 0x03e00008, 0x27bd0018, 0x03e00008, 0x24020001, 0x27bdfff8,
4864         0x18800009, 0x00002821, 0x8f63680c, 0x8f62680c, 0x1043fffe, 0x00000000,
4865         0x24a50001, 0x00a4102a, 0x1440fff9, 0x00000000, 0x03e00008, 0x27bd0008,
4866         0x8f634450, 0x3c020001, 0x8c420f28, 0x00031c02, 0x0043102b, 0x14400008,
4867         0x3c038000, 0x3c040001, 0x8c840f38, 0x8f624450, 0x00021c02, 0x0083102b,
4868         0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024, 0x1440fffd,
4869         0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3082ffff, 0x2442e000,
4870         0x2c422001, 0x14400003, 0x3c024000, 0x08004347, 0x2402ffff, 0x00822025,
4871         0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00001021, 0x03e00008,
4872         0x00000000, 0x8f624450, 0x3c030001, 0x8c630f24, 0x08004350, 0x3042ffff,
4873         0x8f624450, 0x3042ffff, 0x0043102b, 0x1440fffc, 0x00000000, 0x03e00008,
4874         0x00000000, 0x27bdffe0, 0x00802821, 0x3c040001, 0x24840ed0, 0x00003021,
4875         0x00003821, 0xafbf0018, 0xafa00010, 0x0c004378, 0xafa00014, 0x0800435f,
4876         0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x3c020001, 0x3442d600,
4877         0x3c030001, 0x3463d600, 0x3c040001, 0x3484ddff, 0x3c010001, 0xac220f40,
4878         0x24020040, 0x3c010001, 0xac220f44, 0x3c010001, 0xac200f3c, 0xac600000,
4879         0x24630004, 0x0083102b, 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000,
4880         0x00804821, 0x8faa0010, 0x3c020001, 0x8c420f3c, 0x3c040001, 0x8c840f44,
4881         0x8fab0014, 0x24430001, 0x0044102b, 0x3c010001, 0xac230f3c, 0x14400003,
4882         0x00004021, 0x3c010001, 0xac200f3c, 0x3c020001, 0x8c420f3c, 0x3c030001,
4883         0x8c630f40, 0x91240000, 0x00021140, 0x00431021, 0x00481021, 0x25080001,
4884         0xa0440000, 0x29020008, 0x1440fff4, 0x25290001, 0x3c020001, 0x8c420f3c,
4885         0x3c030001, 0x8c630f40, 0x8f64680c, 0x00021140, 0x00431021, 0xac440008,
4886         0xac45000c, 0xac460010, 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c,
4887         0x00000000, 0x00000000, 0x00000000,
4888 };
4889
4890 static u32 tg3Tso5FwRodata[(TG3_TSO5_FW_RODATA_LEN / 4) + 1] = {
4891         0x4d61696e, 0x43707542, 0x00000000, 0x4d61696e, 0x43707541, 0x00000000,
4892         0x00000000, 0x00000000, 0x73746b6f, 0x66666c64, 0x00000000, 0x00000000,
4893         0x73746b6f, 0x66666c64, 0x00000000, 0x00000000, 0x66617461, 0x6c457272,
4894         0x00000000, 0x00000000, 0x00000000,
4895 };
4896
4897 static u32 tg3Tso5FwData[(TG3_TSO5_FW_DATA_LEN / 4) + 1] = {
4898         0x00000000, 0x73746b6f, 0x66666c64, 0x5f76312e, 0x322e3000, 0x00000000,
4899         0x00000000, 0x00000000, 0x00000000,
4900 };
4901
4902 /* tp->lock is held. */
4903 static int tg3_load_tso_firmware(struct tg3 *tp)
4904 {
4905         struct fw_info info;
4906         unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
4907         int err, i;
4908
4909         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
4910                 return 0;
4911
4912         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
4913                 info.text_base = TG3_TSO5_FW_TEXT_ADDR;
4914                 info.text_len = TG3_TSO5_FW_TEXT_LEN;
4915                 info.text_data = &tg3Tso5FwText[0];
4916                 info.rodata_base = TG3_TSO5_FW_RODATA_ADDR;
4917                 info.rodata_len = TG3_TSO5_FW_RODATA_LEN;
4918                 info.rodata_data = &tg3Tso5FwRodata[0];
4919                 info.data_base = TG3_TSO5_FW_DATA_ADDR;
4920                 info.data_len = TG3_TSO5_FW_DATA_LEN;
4921                 info.data_data = &tg3Tso5FwData[0];
4922                 cpu_base = RX_CPU_BASE;
4923                 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
4924                 cpu_scratch_size = (info.text_len +
4925                                     info.rodata_len +
4926                                     info.data_len +
4927                                     TG3_TSO5_FW_SBSS_LEN +
4928                                     TG3_TSO5_FW_BSS_LEN);
4929         } else {
4930                 info.text_base = TG3_TSO_FW_TEXT_ADDR;
4931                 info.text_len = TG3_TSO_FW_TEXT_LEN;
4932                 info.text_data = &tg3TsoFwText[0];
4933                 info.rodata_base = TG3_TSO_FW_RODATA_ADDR;
4934                 info.rodata_len = TG3_TSO_FW_RODATA_LEN;
4935                 info.rodata_data = &tg3TsoFwRodata[0];
4936                 info.data_base = TG3_TSO_FW_DATA_ADDR;
4937                 info.data_len = TG3_TSO_FW_DATA_LEN;
4938                 info.data_data = &tg3TsoFwData[0];
4939                 cpu_base = TX_CPU_BASE;
4940                 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
4941                 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
4942         }
4943
4944         err = tg3_load_firmware_cpu(tp, cpu_base,
4945                                     cpu_scratch_base, cpu_scratch_size,
4946                                     &info);
4947         if (err)
4948                 return err;
4949
4950         /* Now startup the cpu. */
4951         tw32(cpu_base + CPU_STATE, 0xffffffff);
4952         tw32_f(cpu_base + CPU_PC,    info.text_base);
4953
4954         for (i = 0; i < 5; i++) {
4955                 if (tr32(cpu_base + CPU_PC) == info.text_base)
4956                         break;
4957                 tw32(cpu_base + CPU_STATE, 0xffffffff);
4958                 tw32(cpu_base + CPU_MODE,  CPU_MODE_HALT);
4959                 tw32_f(cpu_base + CPU_PC,    info.text_base);
4960                 udelay(1000);
4961         }
4962         if (i >= 5) {
4963                 printk(KERN_ERR PFX "tg3_load_tso_firmware fails for %s "
4964                        "to set CPU PC, is %08x should be %08x\n",
4965                        tp->dev->name, tr32(cpu_base + CPU_PC),
4966                        info.text_base);
4967                 return -ENODEV;
4968         }
4969         tw32(cpu_base + CPU_STATE, 0xffffffff);
4970         tw32_f(cpu_base + CPU_MODE,  0x00000000);
4971         return 0;
4972 }
4973
4974 #endif /* TG3_TSO_SUPPORT != 0 */
4975
4976 /* tp->lock is held. */
4977 static void __tg3_set_mac_addr(struct tg3 *tp)
4978 {
4979         u32 addr_high, addr_low;
4980         int i;
4981
4982         addr_high = ((tp->dev->dev_addr[0] << 8) |
4983                      tp->dev->dev_addr[1]);
4984         addr_low = ((tp->dev->dev_addr[2] << 24) |
4985                     (tp->dev->dev_addr[3] << 16) |
4986                     (tp->dev->dev_addr[4] <<  8) |
4987                     (tp->dev->dev_addr[5] <<  0));
4988         for (i = 0; i < 4; i++) {
4989                 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
4990                 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
4991         }
4992
4993         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
4994             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
4995                 for (i = 0; i < 12; i++) {
4996                         tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
4997                         tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
4998                 }
4999         }
5000
5001         addr_high = (tp->dev->dev_addr[0] +
5002                      tp->dev->dev_addr[1] +
5003                      tp->dev->dev_addr[2] +
5004                      tp->dev->dev_addr[3] +
5005                      tp->dev->dev_addr[4] +
5006                      tp->dev->dev_addr[5]) &
5007                 TX_BACKOFF_SEED_MASK;
5008         tw32(MAC_TX_BACKOFF_SEED, addr_high);
5009 }
5010
5011 static int tg3_set_mac_addr(struct net_device *dev, void *p)
5012 {
5013         struct tg3 *tp = netdev_priv(dev);
5014         struct sockaddr *addr = p;
5015
5016         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
5017
5018         spin_lock_irq(&tp->lock);
5019         __tg3_set_mac_addr(tp);
5020         spin_unlock_irq(&tp->lock);
5021
5022         return 0;
5023 }
5024
5025 /* tp->lock is held. */
5026 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
5027                            dma_addr_t mapping, u32 maxlen_flags,
5028                            u32 nic_addr)
5029 {
5030         tg3_write_mem(tp,
5031                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
5032                       ((u64) mapping >> 32));
5033         tg3_write_mem(tp,
5034                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
5035                       ((u64) mapping & 0xffffffff));
5036         tg3_write_mem(tp,
5037                       (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
5038                        maxlen_flags);
5039
5040         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
5041                 tg3_write_mem(tp,
5042                               (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
5043                               nic_addr);
5044 }
5045
5046 static void __tg3_set_rx_mode(struct net_device *);
5047
5048 /* tp->lock is held. */
5049 static int tg3_reset_hw(struct tg3 *tp)
5050 {
5051         u32 val, rdmac_mode;
5052         int i, err, limit;
5053
5054         tg3_disable_ints(tp);
5055
5056         tg3_stop_fw(tp);
5057
5058         tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
5059
5060         if (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) {
5061                 tg3_abort_hw(tp, 1);
5062         }
5063
5064         err = tg3_chip_reset(tp);
5065         if (err)
5066                 return err;
5067
5068         tg3_write_sig_legacy(tp, RESET_KIND_INIT);
5069
5070         /* This works around an issue with Athlon chipsets on
5071          * B3 tigon3 silicon.  This bit has no effect on any
5072          * other revision.  But do not set this on PCI Express
5073          * chips.
5074          */
5075         if (!(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
5076                 tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
5077         tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
5078
5079         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
5080             (tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
5081                 val = tr32(TG3PCI_PCISTATE);
5082                 val |= PCISTATE_RETRY_SAME_DMA;
5083                 tw32(TG3PCI_PCISTATE, val);
5084         }
5085
5086         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_BX) {
5087                 /* Enable some hw fixes.  */
5088                 val = tr32(TG3PCI_MSI_DATA);
5089                 val |= (1 << 26) | (1 << 28) | (1 << 29);
5090                 tw32(TG3PCI_MSI_DATA, val);
5091         }
5092
5093         /* Descriptor ring init may make accesses to the
5094          * NIC SRAM area to setup the TX descriptors, so we
5095          * can only do this after the hardware has been
5096          * successfully reset.
5097          */
5098         tg3_init_rings(tp);
5099
5100         /* This value is determined during the probe time DMA
5101          * engine test, tg3_test_dma.
5102          */
5103         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
5104
5105         tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
5106                           GRC_MODE_4X_NIC_SEND_RINGS |
5107                           GRC_MODE_NO_TX_PHDR_CSUM |
5108                           GRC_MODE_NO_RX_PHDR_CSUM);
5109         tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
5110         if (tp->tg3_flags & TG3_FLAG_NO_TX_PSEUDO_CSUM)
5111                 tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
5112         if (tp->tg3_flags & TG3_FLAG_NO_RX_PSEUDO_CSUM)
5113                 tp->grc_mode |= GRC_MODE_NO_RX_PHDR_CSUM;
5114
5115         tw32(GRC_MODE,
5116              tp->grc_mode |
5117              (GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP));
5118
5119         /* Setup the timer prescalar register.  Clock is always 66Mhz. */
5120         val = tr32(GRC_MISC_CFG);
5121         val &= ~0xff;
5122         val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
5123         tw32(GRC_MISC_CFG, val);
5124
5125         /* Initialize MBUF/DESC pool. */
5126         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
5127                 /* Do nothing.  */
5128         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
5129                 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
5130                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
5131                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
5132                 else
5133                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
5134                 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
5135                 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
5136         }
5137 #if TG3_TSO_SUPPORT != 0
5138         else if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
5139                 int fw_len;
5140
5141                 fw_len = (TG3_TSO5_FW_TEXT_LEN +
5142                           TG3_TSO5_FW_RODATA_LEN +
5143                           TG3_TSO5_FW_DATA_LEN +
5144                           TG3_TSO5_FW_SBSS_LEN +
5145                           TG3_TSO5_FW_BSS_LEN);
5146                 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
5147                 tw32(BUFMGR_MB_POOL_ADDR,
5148                      NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
5149                 tw32(BUFMGR_MB_POOL_SIZE,
5150                      NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
5151         }
5152 #endif
5153
5154         if (!(tp->tg3_flags & TG3_FLAG_JUMBO_ENABLE)) {
5155                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
5156                      tp->bufmgr_config.mbuf_read_dma_low_water);
5157                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
5158                      tp->bufmgr_config.mbuf_mac_rx_low_water);
5159                 tw32(BUFMGR_MB_HIGH_WATER,
5160                      tp->bufmgr_config.mbuf_high_water);
5161         } else {
5162                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
5163                      tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
5164                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
5165                      tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
5166                 tw32(BUFMGR_MB_HIGH_WATER,
5167                      tp->bufmgr_config.mbuf_high_water_jumbo);
5168         }
5169         tw32(BUFMGR_DMA_LOW_WATER,
5170              tp->bufmgr_config.dma_low_water);
5171         tw32(BUFMGR_DMA_HIGH_WATER,
5172              tp->bufmgr_config.dma_high_water);
5173
5174         tw32(BUFMGR_MODE, BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE);
5175         for (i = 0; i < 2000; i++) {
5176                 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
5177                         break;
5178                 udelay(10);
5179         }
5180         if (i >= 2000) {
5181                 printk(KERN_ERR PFX "tg3_reset_hw cannot enable BUFMGR for %s.\n",
5182                        tp->dev->name);
5183                 return -ENODEV;
5184         }
5185
5186         /* Setup replenish threshold. */
5187         tw32(RCVBDI_STD_THRESH, tp->rx_pending / 8);
5188
5189         /* Initialize TG3_BDINFO's at:
5190          *  RCVDBDI_STD_BD:     standard eth size rx ring
5191          *  RCVDBDI_JUMBO_BD:   jumbo frame rx ring
5192          *  RCVDBDI_MINI_BD:    small frame rx ring (??? does not work)
5193          *
5194          * like so:
5195          *  TG3_BDINFO_HOST_ADDR:       high/low parts of DMA address of ring
5196          *  TG3_BDINFO_MAXLEN_FLAGS:    (rx max buffer size << 16) |
5197          *                              ring attribute flags
5198          *  TG3_BDINFO_NIC_ADDR:        location of descriptors in nic SRAM
5199          *
5200          * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
5201          * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
5202          *
5203          * The size of each ring is fixed in the firmware, but the location is
5204          * configurable.
5205          */
5206         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
5207              ((u64) tp->rx_std_mapping >> 32));
5208         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
5209              ((u64) tp->rx_std_mapping & 0xffffffff));
5210         tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
5211              NIC_SRAM_RX_BUFFER_DESC);
5212
5213         /* Don't even try to program the JUMBO/MINI buffer descriptor
5214          * configs on 5705.
5215          */
5216         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
5217                 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS,
5218                      RX_STD_MAX_SIZE_5705 << BDINFO_FLAGS_MAXLEN_SHIFT);
5219         } else {
5220                 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS,
5221                      RX_STD_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT);
5222
5223                 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
5224                      BDINFO_FLAGS_DISABLED);
5225
5226                 /* Setup replenish threshold. */
5227                 tw32(RCVBDI_JUMBO_THRESH, tp->rx_jumbo_pending / 8);
5228
5229                 if (tp->tg3_flags & TG3_FLAG_JUMBO_ENABLE) {
5230                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
5231                              ((u64) tp->rx_jumbo_mapping >> 32));
5232                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
5233                              ((u64) tp->rx_jumbo_mapping & 0xffffffff));
5234                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
5235                              RX_JUMBO_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT);
5236                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
5237                              NIC_SRAM_RX_JUMBO_BUFFER_DESC);
5238                 } else {
5239                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
5240                              BDINFO_FLAGS_DISABLED);
5241                 }
5242
5243         }
5244
5245         /* There is only one send ring on 5705/5750, no need to explicitly
5246          * disable the others.
5247          */
5248         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5249                 /* Clear out send RCB ring in SRAM. */
5250                 for (i = NIC_SRAM_SEND_RCB; i < NIC_SRAM_RCV_RET_RCB; i += TG3_BDINFO_SIZE)
5251                         tg3_write_mem(tp, i + TG3_BDINFO_MAXLEN_FLAGS,
5252                                       BDINFO_FLAGS_DISABLED);
5253         }
5254
5255         tp->tx_prod = 0;
5256         tp->tx_cons = 0;
5257         tw32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0);
5258         tw32_tx_mbox(MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0);
5259
5260         tg3_set_bdinfo(tp, NIC_SRAM_SEND_RCB,
5261                        tp->tx_desc_mapping,
5262                        (TG3_TX_RING_SIZE <<
5263                         BDINFO_FLAGS_MAXLEN_SHIFT),
5264                        NIC_SRAM_TX_BUFFER_DESC);
5265
5266         /* There is only one receive return ring on 5705/5750, no need
5267          * to explicitly disable the others.
5268          */
5269         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5270                 for (i = NIC_SRAM_RCV_RET_RCB; i < NIC_SRAM_STATS_BLK;
5271                      i += TG3_BDINFO_SIZE) {
5272                         tg3_write_mem(tp, i + TG3_BDINFO_MAXLEN_FLAGS,
5273                                       BDINFO_FLAGS_DISABLED);
5274                 }
5275         }
5276
5277         tp->rx_rcb_ptr = 0;
5278         tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW, 0);
5279
5280         tg3_set_bdinfo(tp, NIC_SRAM_RCV_RET_RCB,
5281                        tp->rx_rcb_mapping,
5282                        (TG3_RX_RCB_RING_SIZE(tp) <<
5283                         BDINFO_FLAGS_MAXLEN_SHIFT),
5284                        0);
5285
5286         tp->rx_std_ptr = tp->rx_pending;
5287         tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
5288                      tp->rx_std_ptr);
5289
5290         tp->rx_jumbo_ptr = (tp->tg3_flags & TG3_FLAG_JUMBO_ENABLE) ?
5291                                                 tp->rx_jumbo_pending : 0;
5292         tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
5293                      tp->rx_jumbo_ptr);
5294
5295         /* Initialize MAC address and backoff seed. */
5296         __tg3_set_mac_addr(tp);
5297
5298         /* MTU + ethernet header + FCS + optional VLAN tag */
5299         tw32(MAC_RX_MTU_SIZE, tp->dev->mtu + ETH_HLEN + 8);
5300
5301         /* The slot time is changed by tg3_setup_phy if we
5302          * run at gigabit with half duplex.
5303          */
5304         tw32(MAC_TX_LENGTHS,
5305              (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
5306              (6 << TX_LENGTHS_IPG_SHIFT) |
5307              (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
5308
5309         /* Receive rules. */
5310         tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
5311         tw32(RCVLPC_CONFIG, 0x0181);
5312
5313         /* Calculate RDMAC_MODE setting early, we need it to determine
5314          * the RCVLPC_STATE_ENABLE mask.
5315          */
5316         rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
5317                       RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
5318                       RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
5319                       RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
5320                       RDMAC_MODE_LNGREAD_ENAB);
5321         if (tp->tg3_flags & TG3_FLAG_SPLIT_MODE)
5322                 rdmac_mode |= RDMAC_MODE_SPLIT_ENABLE;
5323
5324         /* If statement applies to 5705 and 5750 PCI devices only */
5325         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
5326              tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
5327             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)) {
5328                 if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE &&
5329                     (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
5330                      tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
5331                         rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
5332                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
5333                            !(tp->tg3_flags2 & TG3_FLG2_IS_5788)) {
5334                         rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
5335                 }
5336         }
5337
5338         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)
5339                 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
5340
5341 #if TG3_TSO_SUPPORT != 0
5342         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
5343                 rdmac_mode |= (1 << 27);
5344 #endif
5345
5346         /* Receive/send statistics. */
5347         if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
5348             (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
5349                 val = tr32(RCVLPC_STATS_ENABLE);
5350                 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
5351                 tw32(RCVLPC_STATS_ENABLE, val);
5352         } else {
5353                 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
5354         }
5355         tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
5356         tw32(SNDDATAI_STATSENAB, 0xffffff);
5357         tw32(SNDDATAI_STATSCTRL,
5358              (SNDDATAI_SCTRL_ENABLE |
5359               SNDDATAI_SCTRL_FASTUPD));
5360
5361         /* Setup host coalescing engine. */
5362         tw32(HOSTCC_MODE, 0);
5363         for (i = 0; i < 2000; i++) {
5364                 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
5365                         break;
5366                 udelay(10);
5367         }
5368
5369         tw32(HOSTCC_RXCOL_TICKS, 0);
5370         tw32(HOSTCC_TXCOL_TICKS, LOW_TXCOL_TICKS);
5371         tw32(HOSTCC_RXMAX_FRAMES, 1);
5372         tw32(HOSTCC_TXMAX_FRAMES, LOW_RXMAX_FRAMES);
5373         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5374                 tw32(HOSTCC_RXCOAL_TICK_INT, 0);
5375                 tw32(HOSTCC_TXCOAL_TICK_INT, 0);
5376         }
5377         tw32(HOSTCC_RXCOAL_MAXF_INT, 1);
5378         tw32(HOSTCC_TXCOAL_MAXF_INT, 0);
5379
5380         /* set status block DMA address */
5381         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
5382              ((u64) tp->status_mapping >> 32));
5383         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
5384              ((u64) tp->status_mapping & 0xffffffff));
5385
5386         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5387                 /* Status/statistics block address.  See tg3_timer,
5388                  * the tg3_periodic_fetch_stats call there, and
5389                  * tg3_get_stats to see how this works for 5705/5750 chips.
5390                  */
5391                 tw32(HOSTCC_STAT_COAL_TICKS,
5392                      DEFAULT_STAT_COAL_TICKS);
5393                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
5394                      ((u64) tp->stats_mapping >> 32));
5395                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
5396                      ((u64) tp->stats_mapping & 0xffffffff));
5397                 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
5398                 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
5399         }
5400
5401         tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
5402
5403         tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
5404         tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
5405         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
5406                 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
5407
5408         /* Clear statistics/status block in chip, and status block in ram. */
5409         for (i = NIC_SRAM_STATS_BLK;
5410              i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
5411              i += sizeof(u32)) {
5412                 tg3_write_mem(tp, i, 0);
5413                 udelay(40);
5414         }
5415         memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
5416
5417         tp->mac_mode = MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
5418                 MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE | MAC_MODE_FHDE_ENABLE;
5419         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
5420         udelay(40);
5421
5422         /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
5423          * If TG3_FLAG_EEPROM_WRITE_PROT is set, we should read the
5424          * register to preserve the GPIO settings for LOMs. The GPIOs,
5425          * whether used as inputs or outputs, are set by boot code after
5426          * reset.
5427          */
5428         if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
5429                 u32 gpio_mask;
5430
5431                 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE2 |
5432                             GRC_LCLCTRL_GPIO_OUTPUT0 | GRC_LCLCTRL_GPIO_OUTPUT2;
5433
5434                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
5435                         gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
5436                                      GRC_LCLCTRL_GPIO_OUTPUT3;
5437
5438                 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
5439
5440                 /* GPIO1 must be driven high for eeprom write protect */
5441                 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
5442                                        GRC_LCLCTRL_GPIO_OUTPUT1);
5443         }
5444         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
5445         udelay(100);
5446
5447         tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0);
5448         tr32(MAILBOX_INTERRUPT_0);
5449
5450         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5451                 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
5452                 udelay(40);
5453         }
5454
5455         val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
5456                WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
5457                WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
5458                WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
5459                WDMAC_MODE_LNGREAD_ENAB);
5460
5461         /* If statement applies to 5705 and 5750 PCI devices only */
5462         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
5463              tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
5464             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
5465                 if ((tp->tg3_flags & TG3_FLG2_TSO_CAPABLE) &&
5466                     (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
5467                      tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
5468                         /* nothing */
5469                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
5470                            !(tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
5471                            !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
5472                         val |= WDMAC_MODE_RX_ACCEL;
5473                 }
5474         }
5475
5476         tw32_f(WDMAC_MODE, val);
5477         udelay(40);
5478
5479         if ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0) {
5480                 val = tr32(TG3PCI_X_CAPS);
5481                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) {
5482                         val &= ~PCIX_CAPS_BURST_MASK;
5483                         val |= (PCIX_CAPS_MAX_BURST_CPIOB << PCIX_CAPS_BURST_SHIFT);
5484                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
5485                         val &= ~(PCIX_CAPS_SPLIT_MASK | PCIX_CAPS_BURST_MASK);
5486                         val |= (PCIX_CAPS_MAX_BURST_CPIOB << PCIX_CAPS_BURST_SHIFT);
5487                         if (tp->tg3_flags & TG3_FLAG_SPLIT_MODE)
5488                                 val |= (tp->split_mode_max_reqs <<
5489                                         PCIX_CAPS_SPLIT_SHIFT);
5490                 }
5491                 tw32(TG3PCI_X_CAPS, val);
5492         }
5493
5494         tw32_f(RDMAC_MODE, rdmac_mode);
5495         udelay(40);
5496
5497         tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
5498         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
5499                 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
5500         tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
5501         tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
5502         tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
5503         tw32(RCVDBDI_MODE, RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ);
5504         tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
5505 #if TG3_TSO_SUPPORT != 0
5506         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
5507                 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
5508 #endif
5509         tw32(SNDBDI_MODE, SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE);
5510         tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
5511
5512         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
5513                 err = tg3_load_5701_a0_firmware_fix(tp);
5514                 if (err)
5515                         return err;
5516         }
5517
5518 #if TG3_TSO_SUPPORT != 0
5519         if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
5520                 err = tg3_load_tso_firmware(tp);
5521                 if (err)
5522                         return err;
5523         }
5524 #endif
5525
5526         tp->tx_mode = TX_MODE_ENABLE;
5527         tw32_f(MAC_TX_MODE, tp->tx_mode);
5528         udelay(100);
5529
5530         tp->rx_mode = RX_MODE_ENABLE;
5531         tw32_f(MAC_RX_MODE, tp->rx_mode);
5532         udelay(10);
5533
5534         if (tp->link_config.phy_is_low_power) {
5535                 tp->link_config.phy_is_low_power = 0;
5536                 tp->link_config.speed = tp->link_config.orig_speed;
5537                 tp->link_config.duplex = tp->link_config.orig_duplex;
5538                 tp->link_config.autoneg = tp->link_config.orig_autoneg;
5539         }
5540
5541         tp->mi_mode = MAC_MI_MODE_BASE;
5542         tw32_f(MAC_MI_MODE, tp->mi_mode);
5543         udelay(80);
5544
5545         tw32(MAC_LED_CTRL, tp->led_ctrl);
5546
5547         tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
5548         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
5549                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
5550                 udelay(10);
5551         }
5552         tw32_f(MAC_RX_MODE, tp->rx_mode);
5553         udelay(10);
5554
5555         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
5556                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) &&
5557                         !(tp->tg3_flags2 & TG3_FLG2_SERDES_PREEMPHASIS)) {
5558                         /* Set drive transmission level to 1.2V  */
5559                         /* only if the signal pre-emphasis bit is not set  */
5560                         val = tr32(MAC_SERDES_CFG);
5561                         val &= 0xfffff000;
5562                         val |= 0x880;
5563                         tw32(MAC_SERDES_CFG, val);
5564                 }
5565                 if (tp->pci_chip_rev_id == CHIPREV_ID_5703_A1)
5566                         tw32(MAC_SERDES_CFG, 0x616000);
5567         }
5568
5569         /* Prevent chip from dropping frames when flow control
5570          * is enabled.
5571          */
5572         tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, 2);
5573
5574         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
5575             (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
5576                 /* Use hardware link auto-negotiation */
5577                 tp->tg3_flags2 |= TG3_FLG2_HW_AUTONEG;
5578         }
5579
5580         err = tg3_setup_phy(tp, 1);
5581         if (err)
5582                 return err;
5583
5584         if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
5585                 u32 tmp;
5586
5587                 /* Clear CRC stats. */
5588                 if (!tg3_readphy(tp, 0x1e, &tmp)) {
5589                         tg3_writephy(tp, 0x1e, tmp | 0x8000);
5590                         tg3_readphy(tp, 0x14, &tmp);
5591                 }
5592         }
5593
5594         __tg3_set_rx_mode(tp->dev);
5595
5596         /* Initialize receive rules. */
5597         tw32(MAC_RCV_RULE_0,  0xc2000000 & RCV_RULE_DISABLE_MASK);
5598         tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
5599         tw32(MAC_RCV_RULE_1,  0x86000004 & RCV_RULE_DISABLE_MASK);
5600         tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
5601
5602         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
5603                 limit = 8;
5604         else
5605                 limit = 16;
5606         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF)
5607                 limit -= 4;
5608         switch (limit) {
5609         case 16:
5610                 tw32(MAC_RCV_RULE_15,  0); tw32(MAC_RCV_VALUE_15,  0);
5611         case 15:
5612                 tw32(MAC_RCV_RULE_14,  0); tw32(MAC_RCV_VALUE_14,  0);
5613         case 14:
5614                 tw32(MAC_RCV_RULE_13,  0); tw32(MAC_RCV_VALUE_13,  0);
5615         case 13:
5616                 tw32(MAC_RCV_RULE_12,  0); tw32(MAC_RCV_VALUE_12,  0);
5617         case 12:
5618                 tw32(MAC_RCV_RULE_11,  0); tw32(MAC_RCV_VALUE_11,  0);
5619         case 11:
5620                 tw32(MAC_RCV_RULE_10,  0); tw32(MAC_RCV_VALUE_10,  0);
5621         case 10:
5622                 tw32(MAC_RCV_RULE_9,  0); tw32(MAC_RCV_VALUE_9,  0);
5623         case 9:
5624                 tw32(MAC_RCV_RULE_8,  0); tw32(MAC_RCV_VALUE_8,  0);
5625         case 8:
5626                 tw32(MAC_RCV_RULE_7,  0); tw32(MAC_RCV_VALUE_7,  0);
5627         case 7:
5628                 tw32(MAC_RCV_RULE_6,  0); tw32(MAC_RCV_VALUE_6,  0);
5629         case 6:
5630                 tw32(MAC_RCV_RULE_5,  0); tw32(MAC_RCV_VALUE_5,  0);
5631         case 5:
5632                 tw32(MAC_RCV_RULE_4,  0); tw32(MAC_RCV_VALUE_4,  0);
5633         case 4:
5634                 /* tw32(MAC_RCV_RULE_3,  0); tw32(MAC_RCV_VALUE_3,  0); */
5635         case 3:
5636                 /* tw32(MAC_RCV_RULE_2,  0); tw32(MAC_RCV_VALUE_2,  0); */
5637         case 2:
5638         case 1:
5639
5640         default:
5641                 break;
5642         };
5643
5644         tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
5645
5646         if (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE)
5647                 tg3_enable_ints(tp);
5648
5649         return 0;
5650 }
5651
5652 /* Called at device open time to get the chip ready for
5653  * packet processing.  Invoked with tp->lock held.
5654  */
5655 static int tg3_init_hw(struct tg3 *tp)
5656 {
5657         int err;
5658
5659         /* Force the chip into D0. */
5660         err = tg3_set_power_state(tp, 0);
5661         if (err)
5662                 goto out;
5663
5664         tg3_switch_clocks(tp);
5665
5666         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
5667
5668         err = tg3_reset_hw(tp);
5669
5670 out:
5671         return err;
5672 }
5673
5674 #define TG3_STAT_ADD32(PSTAT, REG) \
5675 do {    u32 __val = tr32(REG); \
5676         (PSTAT)->low += __val; \
5677         if ((PSTAT)->low < __val) \
5678                 (PSTAT)->high += 1; \
5679 } while (0)
5680
5681 static void tg3_periodic_fetch_stats(struct tg3 *tp)
5682 {
5683         struct tg3_hw_stats *sp = tp->hw_stats;
5684
5685         if (!netif_carrier_ok(tp->dev))
5686                 return;
5687
5688         TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
5689         TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
5690         TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
5691         TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
5692         TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
5693         TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
5694         TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
5695         TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
5696         TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
5697         TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
5698         TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
5699         TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
5700         TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
5701
5702         TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
5703         TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
5704         TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
5705         TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
5706         TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
5707         TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
5708         TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
5709         TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
5710         TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
5711         TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
5712         TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
5713         TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
5714         TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
5715         TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
5716 }
5717
5718 static void tg3_timer(unsigned long __opaque)
5719 {
5720         struct tg3 *tp = (struct tg3 *) __opaque;
5721         unsigned long flags;
5722
5723         spin_lock_irqsave(&tp->lock, flags);
5724         spin_lock(&tp->tx_lock);
5725
5726         /* All of this garbage is because when using non-tagged
5727          * IRQ status the mailbox/status_block protocol the chip
5728          * uses with the cpu is race prone.
5729          */
5730         if (tp->hw_status->status & SD_STATUS_UPDATED) {
5731                 tw32(GRC_LOCAL_CTRL,
5732                      tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
5733         } else {
5734                 tw32(HOSTCC_MODE, tp->coalesce_mode |
5735                      (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
5736         }
5737
5738         if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
5739                 tp->tg3_flags2 |= TG3_FLG2_RESTART_TIMER;
5740                 spin_unlock(&tp->tx_lock);
5741                 spin_unlock_irqrestore(&tp->lock, flags);
5742                 schedule_work(&tp->reset_task);
5743                 return;
5744         }
5745
5746         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
5747                 tg3_periodic_fetch_stats(tp);
5748
5749         /* This part only runs once per second. */
5750         if (!--tp->timer_counter) {
5751                 if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
5752                         u32 mac_stat;
5753                         int phy_event;
5754
5755                         mac_stat = tr32(MAC_STATUS);
5756
5757                         phy_event = 0;
5758                         if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) {
5759                                 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
5760                                         phy_event = 1;
5761                         } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
5762                                 phy_event = 1;
5763
5764                         if (phy_event)
5765                                 tg3_setup_phy(tp, 0);
5766                 } else if (tp->tg3_flags & TG3_FLAG_POLL_SERDES) {
5767                         u32 mac_stat = tr32(MAC_STATUS);
5768                         int need_setup = 0;
5769
5770                         if (netif_carrier_ok(tp->dev) &&
5771                             (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
5772                                 need_setup = 1;
5773                         }
5774                         if (! netif_carrier_ok(tp->dev) &&
5775                             (mac_stat & (MAC_STATUS_PCS_SYNCED |
5776                                          MAC_STATUS_SIGNAL_DET))) {
5777                                 need_setup = 1;
5778                         }
5779                         if (need_setup) {
5780                                 tw32_f(MAC_MODE,
5781                                      (tp->mac_mode &
5782                                       ~MAC_MODE_PORT_MODE_MASK));
5783                                 udelay(40);
5784                                 tw32_f(MAC_MODE, tp->mac_mode);
5785                                 udelay(40);
5786                                 tg3_setup_phy(tp, 0);
5787                         }
5788                 }
5789
5790                 tp->timer_counter = tp->timer_multiplier;
5791         }
5792
5793         /* Heartbeat is only sent once every 120 seconds.  */
5794         if (!--tp->asf_counter) {
5795                 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
5796                         u32 val;
5797
5798                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_ALIVE);
5799                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
5800                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, 3);
5801                         val = tr32(GRC_RX_CPU_EVENT);
5802                         val |= (1 << 14);
5803                         tw32(GRC_RX_CPU_EVENT, val);
5804                 }
5805                 tp->asf_counter = tp->asf_multiplier;
5806         }
5807
5808         spin_unlock(&tp->tx_lock);
5809         spin_unlock_irqrestore(&tp->lock, flags);
5810
5811         tp->timer.expires = jiffies + tp->timer_offset;
5812         add_timer(&tp->timer);
5813 }
5814
5815 static int tg3_test_interrupt(struct tg3 *tp)
5816 {
5817         struct net_device *dev = tp->dev;
5818         int err, i;
5819         u32 int_mbox = 0;
5820
5821         tg3_disable_ints(tp);
5822
5823         free_irq(tp->pdev->irq, dev);
5824
5825         err = request_irq(tp->pdev->irq, tg3_test_isr,
5826                           SA_SHIRQ | SA_SAMPLE_RANDOM, dev->name, dev);
5827         if (err)
5828                 return err;
5829
5830         tg3_enable_ints(tp);
5831
5832         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
5833                HOSTCC_MODE_NOW);
5834
5835         for (i = 0; i < 5; i++) {
5836                 int_mbox = tr32(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW);
5837                 if (int_mbox != 0)
5838                         break;
5839                 msleep(10);
5840         }
5841
5842         tg3_disable_ints(tp);
5843
5844         free_irq(tp->pdev->irq, dev);
5845         
5846         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI)
5847                 err = request_irq(tp->pdev->irq, tg3_msi,
5848                                   SA_SAMPLE_RANDOM, dev->name, dev);
5849         else
5850                 err = request_irq(tp->pdev->irq, tg3_interrupt,
5851                                   SA_SHIRQ | SA_SAMPLE_RANDOM, dev->name, dev);
5852
5853         if (err)
5854                 return err;
5855
5856         if (int_mbox != 0)
5857                 return 0;
5858
5859         return -EIO;
5860 }
5861
5862 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
5863  * successfully restored
5864  */
5865 static int tg3_test_msi(struct tg3 *tp)
5866 {
5867         struct net_device *dev = tp->dev;
5868         int err;
5869         u16 pci_cmd;
5870
5871         if (!(tp->tg3_flags2 & TG3_FLG2_USING_MSI))
5872                 return 0;
5873
5874         /* Turn off SERR reporting in case MSI terminates with Master
5875          * Abort.
5876          */
5877         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
5878         pci_write_config_word(tp->pdev, PCI_COMMAND,
5879                               pci_cmd & ~PCI_COMMAND_SERR);
5880
5881         err = tg3_test_interrupt(tp);
5882
5883         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
5884
5885         if (!err)
5886                 return 0;
5887
5888         /* other failures */
5889         if (err != -EIO)
5890                 return err;
5891
5892         /* MSI test failed, go back to INTx mode */
5893         printk(KERN_WARNING PFX "%s: No interrupt was generated using MSI, "
5894                "switching to INTx mode. Please report this failure to "
5895                "the PCI maintainer and include system chipset information.\n",
5896                        tp->dev->name);
5897
5898         free_irq(tp->pdev->irq, dev);
5899         pci_disable_msi(tp->pdev);
5900
5901         tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
5902
5903         err = request_irq(tp->pdev->irq, tg3_interrupt,
5904                           SA_SHIRQ | SA_SAMPLE_RANDOM, dev->name, dev);
5905
5906         if (err)
5907                 return err;
5908
5909         /* Need to reset the chip because the MSI cycle may have terminated
5910          * with Master Abort.
5911          */
5912         spin_lock_irq(&tp->lock);
5913         spin_lock(&tp->tx_lock);
5914
5915         tg3_halt(tp, 1);
5916         err = tg3_init_hw(tp);
5917
5918         spin_unlock(&tp->tx_lock);
5919         spin_unlock_irq(&tp->lock);
5920
5921         if (err)
5922                 free_irq(tp->pdev->irq, dev);
5923
5924         return err;
5925 }
5926
5927 static int tg3_open(struct net_device *dev)
5928 {
5929         struct tg3 *tp = netdev_priv(dev);
5930         int err;
5931
5932         spin_lock_irq(&tp->lock);
5933         spin_lock(&tp->tx_lock);
5934
5935         tg3_disable_ints(tp);
5936         tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
5937
5938         spin_unlock(&tp->tx_lock);
5939         spin_unlock_irq(&tp->lock);
5940
5941         /* The placement of this call is tied
5942          * to the setup and use of Host TX descriptors.
5943          */
5944         err = tg3_alloc_consistent(tp);
5945         if (err)
5946                 return err;
5947
5948         if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
5949             (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5750_AX) &&
5950             (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5750_BX)) {
5951                 if (pci_enable_msi(tp->pdev) == 0) {
5952                         u32 msi_mode;
5953
5954                         msi_mode = tr32(MSGINT_MODE);
5955                         tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
5956                         tp->tg3_flags2 |= TG3_FLG2_USING_MSI;
5957                 }
5958         }
5959         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI)
5960                 err = request_irq(tp->pdev->irq, tg3_msi,
5961                                   SA_SAMPLE_RANDOM, dev->name, dev);
5962         else
5963                 err = request_irq(tp->pdev->irq, tg3_interrupt,
5964                                   SA_SHIRQ | SA_SAMPLE_RANDOM, dev->name, dev);
5965
5966         if (err) {
5967                 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
5968                         pci_disable_msi(tp->pdev);
5969                         tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
5970                 }
5971                 tg3_free_consistent(tp);
5972                 return err;
5973         }
5974
5975         spin_lock_irq(&tp->lock);
5976         spin_lock(&tp->tx_lock);
5977
5978         err = tg3_init_hw(tp);
5979         if (err) {
5980                 tg3_halt(tp, 1);
5981                 tg3_free_rings(tp);
5982         } else {
5983                 tp->timer_offset = HZ / 10;
5984                 tp->timer_counter = tp->timer_multiplier = 10;
5985                 tp->asf_counter = tp->asf_multiplier = (10 * 120);
5986
5987                 init_timer(&tp->timer);
5988                 tp->timer.expires = jiffies + tp->timer_offset;
5989                 tp->timer.data = (unsigned long) tp;
5990                 tp->timer.function = tg3_timer;
5991         }
5992
5993         spin_unlock(&tp->tx_lock);
5994         spin_unlock_irq(&tp->lock);
5995
5996         if (err) {
5997                 free_irq(tp->pdev->irq, dev);
5998                 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
5999                         pci_disable_msi(tp->pdev);
6000                         tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
6001                 }
6002                 tg3_free_consistent(tp);
6003                 return err;
6004         }
6005
6006         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6007                 err = tg3_test_msi(tp);
6008                 if (err) {
6009                         spin_lock_irq(&tp->lock);
6010                         spin_lock(&tp->tx_lock);
6011
6012                         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6013                                 pci_disable_msi(tp->pdev);
6014                                 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
6015                         }
6016                         tg3_halt(tp, 1);
6017                         tg3_free_rings(tp);
6018                         tg3_free_consistent(tp);
6019
6020                         spin_unlock(&tp->tx_lock);
6021                         spin_unlock_irq(&tp->lock);
6022
6023                         return err;
6024                 }
6025         }
6026
6027         spin_lock_irq(&tp->lock);
6028         spin_lock(&tp->tx_lock);
6029
6030         add_timer(&tp->timer);
6031         tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
6032         tg3_enable_ints(tp);
6033
6034         spin_unlock(&tp->tx_lock);
6035         spin_unlock_irq(&tp->lock);
6036
6037         netif_start_queue(dev);
6038
6039         return 0;
6040 }
6041
6042 #if 0
6043 /*static*/ void tg3_dump_state(struct tg3 *tp)
6044 {
6045         u32 val32, val32_2, val32_3, val32_4, val32_5;
6046         u16 val16;
6047         int i;
6048
6049         pci_read_config_word(tp->pdev, PCI_STATUS, &val16);
6050         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE, &val32);
6051         printk("DEBUG: PCI status [%04x] TG3PCI state[%08x]\n",
6052                val16, val32);
6053
6054         /* MAC block */
6055         printk("DEBUG: MAC_MODE[%08x] MAC_STATUS[%08x]\n",
6056                tr32(MAC_MODE), tr32(MAC_STATUS));
6057         printk("       MAC_EVENT[%08x] MAC_LED_CTRL[%08x]\n",
6058                tr32(MAC_EVENT), tr32(MAC_LED_CTRL));
6059         printk("DEBUG: MAC_TX_MODE[%08x] MAC_TX_STATUS[%08x]\n",
6060                tr32(MAC_TX_MODE), tr32(MAC_TX_STATUS));
6061         printk("       MAC_RX_MODE[%08x] MAC_RX_STATUS[%08x]\n",
6062                tr32(MAC_RX_MODE), tr32(MAC_RX_STATUS));
6063
6064         /* Send data initiator control block */
6065         printk("DEBUG: SNDDATAI_MODE[%08x] SNDDATAI_STATUS[%08x]\n",
6066                tr32(SNDDATAI_MODE), tr32(SNDDATAI_STATUS));
6067         printk("       SNDDATAI_STATSCTRL[%08x]\n",
6068                tr32(SNDDATAI_STATSCTRL));
6069
6070         /* Send data completion control block */
6071         printk("DEBUG: SNDDATAC_MODE[%08x]\n", tr32(SNDDATAC_MODE));
6072
6073         /* Send BD ring selector block */
6074         printk("DEBUG: SNDBDS_MODE[%08x] SNDBDS_STATUS[%08x]\n",
6075                tr32(SNDBDS_MODE), tr32(SNDBDS_STATUS));
6076
6077         /* Send BD initiator control block */
6078         printk("DEBUG: SNDBDI_MODE[%08x] SNDBDI_STATUS[%08x]\n",
6079                tr32(SNDBDI_MODE), tr32(SNDBDI_STATUS));
6080
6081         /* Send BD completion control block */
6082         printk("DEBUG: SNDBDC_MODE[%08x]\n", tr32(SNDBDC_MODE));
6083
6084         /* Receive list placement control block */
6085         printk("DEBUG: RCVLPC_MODE[%08x] RCVLPC_STATUS[%08x]\n",
6086                tr32(RCVLPC_MODE), tr32(RCVLPC_STATUS));
6087         printk("       RCVLPC_STATSCTRL[%08x]\n",
6088                tr32(RCVLPC_STATSCTRL));
6089
6090         /* Receive data and receive BD initiator control block */
6091         printk("DEBUG: RCVDBDI_MODE[%08x] RCVDBDI_STATUS[%08x]\n",
6092                tr32(RCVDBDI_MODE), tr32(RCVDBDI_STATUS));
6093
6094         /* Receive data completion control block */
6095         printk("DEBUG: RCVDCC_MODE[%08x]\n",
6096                tr32(RCVDCC_MODE));
6097
6098         /* Receive BD initiator control block */
6099         printk("DEBUG: RCVBDI_MODE[%08x] RCVBDI_STATUS[%08x]\n",
6100                tr32(RCVBDI_MODE), tr32(RCVBDI_STATUS));
6101
6102         /* Receive BD completion control block */
6103         printk("DEBUG: RCVCC_MODE[%08x] RCVCC_STATUS[%08x]\n",
6104                tr32(RCVCC_MODE), tr32(RCVCC_STATUS));
6105
6106         /* Receive list selector control block */
6107         printk("DEBUG: RCVLSC_MODE[%08x] RCVLSC_STATUS[%08x]\n",
6108                tr32(RCVLSC_MODE), tr32(RCVLSC_STATUS));
6109
6110         /* Mbuf cluster free block */
6111         printk("DEBUG: MBFREE_MODE[%08x] MBFREE_STATUS[%08x]\n",
6112                tr32(MBFREE_MODE), tr32(MBFREE_STATUS));
6113
6114         /* Host coalescing control block */
6115         printk("DEBUG: HOSTCC_MODE[%08x] HOSTCC_STATUS[%08x]\n",
6116                tr32(HOSTCC_MODE), tr32(HOSTCC_STATUS));
6117         printk("DEBUG: HOSTCC_STATS_BLK_HOST_ADDR[%08x%08x]\n",
6118                tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
6119                tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
6120         printk("DEBUG: HOSTCC_STATUS_BLK_HOST_ADDR[%08x%08x]\n",
6121                tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
6122                tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
6123         printk("DEBUG: HOSTCC_STATS_BLK_NIC_ADDR[%08x]\n",
6124                tr32(HOSTCC_STATS_BLK_NIC_ADDR));
6125         printk("DEBUG: HOSTCC_STATUS_BLK_NIC_ADDR[%08x]\n",
6126                tr32(HOSTCC_STATUS_BLK_NIC_ADDR));
6127
6128         /* Memory arbiter control block */
6129         printk("DEBUG: MEMARB_MODE[%08x] MEMARB_STATUS[%08x]\n",
6130                tr32(MEMARB_MODE), tr32(MEMARB_STATUS));
6131
6132         /* Buffer manager control block */
6133         printk("DEBUG: BUFMGR_MODE[%08x] BUFMGR_STATUS[%08x]\n",
6134                tr32(BUFMGR_MODE), tr32(BUFMGR_STATUS));
6135         printk("DEBUG: BUFMGR_MB_POOL_ADDR[%08x] BUFMGR_MB_POOL_SIZE[%08x]\n",
6136                tr32(BUFMGR_MB_POOL_ADDR), tr32(BUFMGR_MB_POOL_SIZE));
6137         printk("DEBUG: BUFMGR_DMA_DESC_POOL_ADDR[%08x] "
6138                "BUFMGR_DMA_DESC_POOL_SIZE[%08x]\n",
6139                tr32(BUFMGR_DMA_DESC_POOL_ADDR),
6140                tr32(BUFMGR_DMA_DESC_POOL_SIZE));
6141
6142         /* Read DMA control block */
6143         printk("DEBUG: RDMAC_MODE[%08x] RDMAC_STATUS[%08x]\n",
6144                tr32(RDMAC_MODE), tr32(RDMAC_STATUS));
6145
6146         /* Write DMA control block */
6147         printk("DEBUG: WDMAC_MODE[%08x] WDMAC_STATUS[%08x]\n",
6148                tr32(WDMAC_MODE), tr32(WDMAC_STATUS));
6149
6150         /* DMA completion block */
6151         printk("DEBUG: DMAC_MODE[%08x]\n",
6152                tr32(DMAC_MODE));
6153
6154         /* GRC block */
6155         printk("DEBUG: GRC_MODE[%08x] GRC_MISC_CFG[%08x]\n",
6156                tr32(GRC_MODE), tr32(GRC_MISC_CFG));
6157         printk("DEBUG: GRC_LOCAL_CTRL[%08x]\n",
6158                tr32(GRC_LOCAL_CTRL));
6159
6160         /* TG3_BDINFOs */
6161         printk("DEBUG: RCVDBDI_JUMBO_BD[%08x%08x:%08x:%08x]\n",
6162                tr32(RCVDBDI_JUMBO_BD + 0x0),
6163                tr32(RCVDBDI_JUMBO_BD + 0x4),
6164                tr32(RCVDBDI_JUMBO_BD + 0x8),
6165                tr32(RCVDBDI_JUMBO_BD + 0xc));
6166         printk("DEBUG: RCVDBDI_STD_BD[%08x%08x:%08x:%08x]\n",
6167                tr32(RCVDBDI_STD_BD + 0x0),
6168                tr32(RCVDBDI_STD_BD + 0x4),
6169                tr32(RCVDBDI_STD_BD + 0x8),
6170                tr32(RCVDBDI_STD_BD + 0xc));
6171         printk("DEBUG: RCVDBDI_MINI_BD[%08x%08x:%08x:%08x]\n",
6172                tr32(RCVDBDI_MINI_BD + 0x0),
6173                tr32(RCVDBDI_MINI_BD + 0x4),
6174                tr32(RCVDBDI_MINI_BD + 0x8),
6175                tr32(RCVDBDI_MINI_BD + 0xc));
6176
6177         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x0, &val32);
6178         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x4, &val32_2);
6179         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x8, &val32_3);
6180         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0xc, &val32_4);
6181         printk("DEBUG: SRAM_SEND_RCB_0[%08x%08x:%08x:%08x]\n",
6182                val32, val32_2, val32_3, val32_4);
6183
6184         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x0, &val32);
6185         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x4, &val32_2);
6186         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x8, &val32_3);
6187         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0xc, &val32_4);
6188         printk("DEBUG: SRAM_RCV_RET_RCB_0[%08x%08x:%08x:%08x]\n",
6189                val32, val32_2, val32_3, val32_4);
6190
6191         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x0, &val32);
6192         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x4, &val32_2);
6193         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x8, &val32_3);
6194         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0xc, &val32_4);
6195         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x10, &val32_5);
6196         printk("DEBUG: SRAM_STATUS_BLK[%08x:%08x:%08x:%08x:%08x]\n",
6197                val32, val32_2, val32_3, val32_4, val32_5);
6198
6199         /* SW status block */
6200         printk("DEBUG: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
6201                tp->hw_status->status,
6202                tp->hw_status->status_tag,
6203                tp->hw_status->rx_jumbo_consumer,
6204                tp->hw_status->rx_consumer,
6205                tp->hw_status->rx_mini_consumer,
6206                tp->hw_status->idx[0].rx_producer,
6207                tp->hw_status->idx[0].tx_consumer);
6208
6209         /* SW statistics block */
6210         printk("DEBUG: Host statistics block [%08x:%08x:%08x:%08x]\n",
6211                ((u32 *)tp->hw_stats)[0],
6212                ((u32 *)tp->hw_stats)[1],
6213                ((u32 *)tp->hw_stats)[2],
6214                ((u32 *)tp->hw_stats)[3]);
6215
6216         /* Mailboxes */
6217         printk("DEBUG: SNDHOST_PROD[%08x%08x] SNDNIC_PROD[%08x%08x]\n",
6218                tr32(MAILBOX_SNDHOST_PROD_IDX_0 + 0x0),
6219                tr32(MAILBOX_SNDHOST_PROD_IDX_0 + 0x4),
6220                tr32(MAILBOX_SNDNIC_PROD_IDX_0 + 0x0),
6221                tr32(MAILBOX_SNDNIC_PROD_IDX_0 + 0x4));
6222
6223         /* NIC side send descriptors. */
6224         for (i = 0; i < 6; i++) {
6225                 unsigned long txd;
6226
6227                 txd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_TX_BUFFER_DESC
6228                         + (i * sizeof(struct tg3_tx_buffer_desc));
6229                 printk("DEBUG: NIC TXD(%d)[%08x:%08x:%08x:%08x]\n",
6230                        i,
6231                        readl(txd + 0x0), readl(txd + 0x4),
6232                        readl(txd + 0x8), readl(txd + 0xc));
6233         }
6234
6235         /* NIC side RX descriptors. */
6236         for (i = 0; i < 6; i++) {
6237                 unsigned long rxd;
6238
6239                 rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_BUFFER_DESC
6240                         + (i * sizeof(struct tg3_rx_buffer_desc));
6241                 printk("DEBUG: NIC RXD_STD(%d)[0][%08x:%08x:%08x:%08x]\n",
6242                        i,
6243                        readl(rxd + 0x0), readl(rxd + 0x4),
6244                        readl(rxd + 0x8), readl(rxd + 0xc));
6245                 rxd += (4 * sizeof(u32));
6246                 printk("DEBUG: NIC RXD_STD(%d)[1][%08x:%08x:%08x:%08x]\n",
6247                        i,
6248                        readl(rxd + 0x0), readl(rxd + 0x4),
6249                        readl(rxd + 0x8), readl(rxd + 0xc));
6250         }
6251
6252         for (i = 0; i < 6; i++) {
6253                 unsigned long rxd;
6254
6255                 rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_JUMBO_BUFFER_DESC
6256                         + (i * sizeof(struct tg3_rx_buffer_desc));
6257                 printk("DEBUG: NIC RXD_JUMBO(%d)[0][%08x:%08x:%08x:%08x]\n",
6258                        i,
6259                        readl(rxd + 0x0), readl(rxd + 0x4),
6260                        readl(rxd + 0x8), readl(rxd + 0xc));
6261                 rxd += (4 * sizeof(u32));
6262                 printk("DEBUG: NIC RXD_JUMBO(%d)[1][%08x:%08x:%08x:%08x]\n",
6263                        i,
6264                        readl(rxd + 0x0), readl(rxd + 0x4),
6265                        readl(rxd + 0x8), readl(rxd + 0xc));
6266         }
6267 }
6268 #endif
6269
6270 static struct net_device_stats *tg3_get_stats(struct net_device *);
6271 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *);
6272
6273 static int tg3_close(struct net_device *dev)
6274 {
6275         struct tg3 *tp = netdev_priv(dev);
6276
6277         netif_stop_queue(dev);
6278
6279         del_timer_sync(&tp->timer);
6280
6281         spin_lock_irq(&tp->lock);
6282         spin_lock(&tp->tx_lock);
6283 #if 0
6284         tg3_dump_state(tp);
6285 #endif
6286
6287         tg3_disable_ints(tp);
6288
6289         tg3_halt(tp, 1);
6290         tg3_free_rings(tp);
6291         tp->tg3_flags &=
6292                 ~(TG3_FLAG_INIT_COMPLETE |
6293                   TG3_FLAG_GOT_SERDES_FLOWCTL);
6294         netif_carrier_off(tp->dev);
6295
6296         spin_unlock(&tp->tx_lock);
6297         spin_unlock_irq(&tp->lock);
6298
6299         free_irq(tp->pdev->irq, dev);
6300         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6301                 pci_disable_msi(tp->pdev);
6302                 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
6303         }
6304
6305         memcpy(&tp->net_stats_prev, tg3_get_stats(tp->dev),
6306                sizeof(tp->net_stats_prev));
6307         memcpy(&tp->estats_prev, tg3_get_estats(tp),
6308                sizeof(tp->estats_prev));
6309
6310         tg3_free_consistent(tp);
6311
6312         return 0;
6313 }
6314
6315 static inline unsigned long get_stat64(tg3_stat64_t *val)
6316 {
6317         unsigned long ret;
6318
6319 #if (BITS_PER_LONG == 32)
6320         ret = val->low;
6321 #else
6322         ret = ((u64)val->high << 32) | ((u64)val->low);
6323 #endif
6324         return ret;
6325 }
6326
6327 static unsigned long calc_crc_errors(struct tg3 *tp)
6328 {
6329         struct tg3_hw_stats *hw_stats = tp->hw_stats;
6330
6331         if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
6332             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
6333              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
6334                 unsigned long flags;
6335                 u32 val;
6336
6337                 spin_lock_irqsave(&tp->lock, flags);
6338                 if (!tg3_readphy(tp, 0x1e, &val)) {
6339                         tg3_writephy(tp, 0x1e, val | 0x8000);
6340                         tg3_readphy(tp, 0x14, &val);
6341                 } else
6342                         val = 0;
6343                 spin_unlock_irqrestore(&tp->lock, flags);
6344
6345                 tp->phy_crc_errors += val;
6346
6347                 return tp->phy_crc_errors;
6348         }
6349
6350         return get_stat64(&hw_stats->rx_fcs_errors);
6351 }
6352
6353 #define ESTAT_ADD(member) \
6354         estats->member =        old_estats->member + \
6355                                 get_stat64(&hw_stats->member)
6356
6357 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *tp)
6358 {
6359         struct tg3_ethtool_stats *estats = &tp->estats;
6360         struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
6361         struct tg3_hw_stats *hw_stats = tp->hw_stats;
6362
6363         if (!hw_stats)
6364                 return old_estats;
6365
6366         ESTAT_ADD(rx_octets);
6367         ESTAT_ADD(rx_fragments);
6368         ESTAT_ADD(rx_ucast_packets);
6369         ESTAT_ADD(rx_mcast_packets);
6370         ESTAT_ADD(rx_bcast_packets);
6371         ESTAT_ADD(rx_fcs_errors);
6372         ESTAT_ADD(rx_align_errors);
6373         ESTAT_ADD(rx_xon_pause_rcvd);
6374         ESTAT_ADD(rx_xoff_pause_rcvd);
6375         ESTAT_ADD(rx_mac_ctrl_rcvd);
6376         ESTAT_ADD(rx_xoff_entered);
6377         ESTAT_ADD(rx_frame_too_long_errors);
6378         ESTAT_ADD(rx_jabbers);
6379         ESTAT_ADD(rx_undersize_packets);
6380         ESTAT_ADD(rx_in_length_errors);
6381         ESTAT_ADD(rx_out_length_errors);
6382         ESTAT_ADD(rx_64_or_less_octet_packets);
6383         ESTAT_ADD(rx_65_to_127_octet_packets);
6384         ESTAT_ADD(rx_128_to_255_octet_packets);
6385         ESTAT_ADD(rx_256_to_511_octet_packets);
6386         ESTAT_ADD(rx_512_to_1023_octet_packets);
6387         ESTAT_ADD(rx_1024_to_1522_octet_packets);
6388         ESTAT_ADD(rx_1523_to_2047_octet_packets);
6389         ESTAT_ADD(rx_2048_to_4095_octet_packets);
6390         ESTAT_ADD(rx_4096_to_8191_octet_packets);
6391         ESTAT_ADD(rx_8192_to_9022_octet_packets);
6392
6393         ESTAT_ADD(tx_octets);
6394         ESTAT_ADD(tx_collisions);
6395         ESTAT_ADD(tx_xon_sent);
6396         ESTAT_ADD(tx_xoff_sent);
6397         ESTAT_ADD(tx_flow_control);
6398         ESTAT_ADD(tx_mac_errors);
6399         ESTAT_ADD(tx_single_collisions);
6400         ESTAT_ADD(tx_mult_collisions);
6401         ESTAT_ADD(tx_deferred);
6402         ESTAT_ADD(tx_excessive_collisions);
6403         ESTAT_ADD(tx_late_collisions);
6404         ESTAT_ADD(tx_collide_2times);
6405         ESTAT_ADD(tx_collide_3times);
6406         ESTAT_ADD(tx_collide_4times);
6407         ESTAT_ADD(tx_collide_5times);
6408         ESTAT_ADD(tx_collide_6times);
6409         ESTAT_ADD(tx_collide_7times);
6410         ESTAT_ADD(tx_collide_8times);
6411         ESTAT_ADD(tx_collide_9times);
6412         ESTAT_ADD(tx_collide_10times);
6413         ESTAT_ADD(tx_collide_11times);
6414         ESTAT_ADD(tx_collide_12times);
6415         ESTAT_ADD(tx_collide_13times);
6416         ESTAT_ADD(tx_collide_14times);
6417         ESTAT_ADD(tx_collide_15times);
6418         ESTAT_ADD(tx_ucast_packets);
6419         ESTAT_ADD(tx_mcast_packets);
6420         ESTAT_ADD(tx_bcast_packets);
6421         ESTAT_ADD(tx_carrier_sense_errors);
6422         ESTAT_ADD(tx_discards);
6423         ESTAT_ADD(tx_errors);
6424
6425         ESTAT_ADD(dma_writeq_full);
6426         ESTAT_ADD(dma_write_prioq_full);
6427         ESTAT_ADD(rxbds_empty);
6428         ESTAT_ADD(rx_discards);
6429         ESTAT_ADD(rx_errors);
6430         ESTAT_ADD(rx_threshold_hit);
6431
6432         ESTAT_ADD(dma_readq_full);
6433         ESTAT_ADD(dma_read_prioq_full);
6434         ESTAT_ADD(tx_comp_queue_full);
6435
6436         ESTAT_ADD(ring_set_send_prod_index);
6437         ESTAT_ADD(ring_status_update);
6438         ESTAT_ADD(nic_irqs);
6439         ESTAT_ADD(nic_avoided_irqs);
6440         ESTAT_ADD(nic_tx_threshold_hit);
6441
6442         return estats;
6443 }
6444
6445 static struct net_device_stats *tg3_get_stats(struct net_device *dev)
6446 {
6447         struct tg3 *tp = netdev_priv(dev);
6448         struct net_device_stats *stats = &tp->net_stats;
6449         struct net_device_stats *old_stats = &tp->net_stats_prev;
6450         struct tg3_hw_stats *hw_stats = tp->hw_stats;
6451
6452         if (!hw_stats)
6453                 return old_stats;
6454
6455         stats->rx_packets = old_stats->rx_packets +
6456                 get_stat64(&hw_stats->rx_ucast_packets) +
6457                 get_stat64(&hw_stats->rx_mcast_packets) +
6458                 get_stat64(&hw_stats->rx_bcast_packets);
6459                 
6460         stats->tx_packets = old_stats->tx_packets +
6461                 get_stat64(&hw_stats->tx_ucast_packets) +
6462                 get_stat64(&hw_stats->tx_mcast_packets) +
6463                 get_stat64(&hw_stats->tx_bcast_packets);
6464
6465         stats->rx_bytes = old_stats->rx_bytes +
6466                 get_stat64(&hw_stats->rx_octets);
6467         stats->tx_bytes = old_stats->tx_bytes +
6468                 get_stat64(&hw_stats->tx_octets);
6469
6470         stats->rx_errors = old_stats->rx_errors +
6471                 get_stat64(&hw_stats->rx_errors) +
6472                 get_stat64(&hw_stats->rx_discards);
6473         stats->tx_errors = old_stats->tx_errors +
6474                 get_stat64(&hw_stats->tx_errors) +
6475                 get_stat64(&hw_stats->tx_mac_errors) +
6476                 get_stat64(&hw_stats->tx_carrier_sense_errors) +
6477                 get_stat64(&hw_stats->tx_discards);
6478
6479         stats->multicast = old_stats->multicast +
6480                 get_stat64(&hw_stats->rx_mcast_packets);
6481         stats->collisions = old_stats->collisions +
6482                 get_stat64(&hw_stats->tx_collisions);
6483
6484         stats->rx_length_errors = old_stats->rx_length_errors +
6485                 get_stat64(&hw_stats->rx_frame_too_long_errors) +
6486                 get_stat64(&hw_stats->rx_undersize_packets);
6487
6488         stats->rx_over_errors = old_stats->rx_over_errors +
6489                 get_stat64(&hw_stats->rxbds_empty);
6490         stats->rx_frame_errors = old_stats->rx_frame_errors +
6491                 get_stat64(&hw_stats->rx_align_errors);
6492         stats->tx_aborted_errors = old_stats->tx_aborted_errors +
6493                 get_stat64(&hw_stats->tx_discards);
6494         stats->tx_carrier_errors = old_stats->tx_carrier_errors +
6495                 get_stat64(&hw_stats->tx_carrier_sense_errors);
6496
6497         stats->rx_crc_errors = old_stats->rx_crc_errors +
6498                 calc_crc_errors(tp);
6499
6500         return stats;
6501 }
6502
6503 static inline u32 calc_crc(unsigned char *buf, int len)
6504 {
6505         u32 reg;
6506         u32 tmp;
6507         int j, k;
6508
6509         reg = 0xffffffff;
6510
6511         for (j = 0; j < len; j++) {
6512                 reg ^= buf[j];
6513
6514                 for (k = 0; k < 8; k++) {
6515                         tmp = reg & 0x01;
6516
6517                         reg >>= 1;
6518
6519                         if (tmp) {
6520                                 reg ^= 0xedb88320;
6521                         }
6522                 }
6523         }
6524
6525         return ~reg;
6526 }
6527
6528 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
6529 {
6530         /* accept or reject all multicast frames */
6531         tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
6532         tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
6533         tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
6534         tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
6535 }
6536
6537 static void __tg3_set_rx_mode(struct net_device *dev)
6538 {
6539         struct tg3 *tp = netdev_priv(dev);
6540         u32 rx_mode;
6541
6542         rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
6543                                   RX_MODE_KEEP_VLAN_TAG);
6544
6545         /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
6546          * flag clear.
6547          */
6548 #if TG3_VLAN_TAG_USED
6549         if (!tp->vlgrp &&
6550             !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
6551                 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
6552 #else
6553         /* By definition, VLAN is disabled always in this
6554          * case.
6555          */
6556         if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
6557                 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
6558 #endif
6559
6560         if (dev->flags & IFF_PROMISC) {
6561                 /* Promiscuous mode. */
6562                 rx_mode |= RX_MODE_PROMISC;
6563         } else if (dev->flags & IFF_ALLMULTI) {
6564                 /* Accept all multicast. */
6565                 tg3_set_multi (tp, 1);
6566         } else if (dev->mc_count < 1) {
6567                 /* Reject all multicast. */
6568                 tg3_set_multi (tp, 0);
6569         } else {
6570                 /* Accept one or more multicast(s). */
6571                 struct dev_mc_list *mclist;
6572                 unsigned int i;
6573                 u32 mc_filter[4] = { 0, };
6574                 u32 regidx;
6575                 u32 bit;
6576                 u32 crc;
6577
6578                 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
6579                      i++, mclist = mclist->next) {
6580
6581                         crc = calc_crc (mclist->dmi_addr, ETH_ALEN);
6582                         bit = ~crc & 0x7f;
6583                         regidx = (bit & 0x60) >> 5;
6584                         bit &= 0x1f;
6585                         mc_filter[regidx] |= (1 << bit);
6586                 }
6587
6588                 tw32(MAC_HASH_REG_0, mc_filter[0]);
6589                 tw32(MAC_HASH_REG_1, mc_filter[1]);
6590                 tw32(MAC_HASH_REG_2, mc_filter[2]);
6591                 tw32(MAC_HASH_REG_3, mc_filter[3]);
6592         }
6593
6594         if (rx_mode != tp->rx_mode) {
6595                 tp->rx_mode = rx_mode;
6596                 tw32_f(MAC_RX_MODE, rx_mode);
6597                 udelay(10);
6598         }
6599 }
6600
6601 static void tg3_set_rx_mode(struct net_device *dev)
6602 {
6603         struct tg3 *tp = netdev_priv(dev);
6604
6605         spin_lock_irq(&tp->lock);
6606         spin_lock(&tp->tx_lock);
6607         __tg3_set_rx_mode(dev);
6608         spin_unlock(&tp->tx_lock);
6609         spin_unlock_irq(&tp->lock);
6610 }
6611
6612 #define TG3_REGDUMP_LEN         (32 * 1024)
6613
6614 static int tg3_get_regs_len(struct net_device *dev)
6615 {
6616         return TG3_REGDUMP_LEN;
6617 }
6618
6619 static void tg3_get_regs(struct net_device *dev,
6620                 struct ethtool_regs *regs, void *_p)
6621 {
6622         u32 *p = _p;
6623         struct tg3 *tp = netdev_priv(dev);
6624         u8 *orig_p = _p;
6625         int i;
6626
6627         regs->version = 0;
6628
6629         memset(p, 0, TG3_REGDUMP_LEN);
6630
6631         spin_lock_irq(&tp->lock);
6632         spin_lock(&tp->tx_lock);
6633
6634 #define __GET_REG32(reg)        (*(p)++ = tr32(reg))
6635 #define GET_REG32_LOOP(base,len)                \
6636 do {    p = (u32 *)(orig_p + (base));           \
6637         for (i = 0; i < len; i += 4)            \
6638                 __GET_REG32((base) + i);        \
6639 } while (0)
6640 #define GET_REG32_1(reg)                        \
6641 do {    p = (u32 *)(orig_p + (reg));            \
6642         __GET_REG32((reg));                     \
6643 } while (0)
6644
6645         GET_REG32_LOOP(TG3PCI_VENDOR, 0xb0);
6646         GET_REG32_LOOP(MAILBOX_INTERRUPT_0, 0x200);
6647         GET_REG32_LOOP(MAC_MODE, 0x4f0);
6648         GET_REG32_LOOP(SNDDATAI_MODE, 0xe0);
6649         GET_REG32_1(SNDDATAC_MODE);
6650         GET_REG32_LOOP(SNDBDS_MODE, 0x80);
6651         GET_REG32_LOOP(SNDBDI_MODE, 0x48);
6652         GET_REG32_1(SNDBDC_MODE);
6653         GET_REG32_LOOP(RCVLPC_MODE, 0x20);
6654         GET_REG32_LOOP(RCVLPC_SELLST_BASE, 0x15c);
6655         GET_REG32_LOOP(RCVDBDI_MODE, 0x0c);
6656         GET_REG32_LOOP(RCVDBDI_JUMBO_BD, 0x3c);
6657         GET_REG32_LOOP(RCVDBDI_BD_PROD_IDX_0, 0x44);
6658         GET_REG32_1(RCVDCC_MODE);
6659         GET_REG32_LOOP(RCVBDI_MODE, 0x20);
6660         GET_REG32_LOOP(RCVCC_MODE, 0x14);
6661         GET_REG32_LOOP(RCVLSC_MODE, 0x08);
6662         GET_REG32_1(MBFREE_MODE);
6663         GET_REG32_LOOP(HOSTCC_MODE, 0x100);
6664         GET_REG32_LOOP(MEMARB_MODE, 0x10);
6665         GET_REG32_LOOP(BUFMGR_MODE, 0x58);
6666         GET_REG32_LOOP(RDMAC_MODE, 0x08);
6667         GET_REG32_LOOP(WDMAC_MODE, 0x08);
6668         GET_REG32_LOOP(RX_CPU_BASE, 0x280);
6669         GET_REG32_LOOP(TX_CPU_BASE, 0x280);
6670         GET_REG32_LOOP(GRCMBOX_INTERRUPT_0, 0x110);
6671         GET_REG32_LOOP(FTQ_RESET, 0x120);
6672         GET_REG32_LOOP(MSGINT_MODE, 0x0c);
6673         GET_REG32_1(DMAC_MODE);
6674         GET_REG32_LOOP(GRC_MODE, 0x4c);
6675         if (tp->tg3_flags & TG3_FLAG_NVRAM)
6676                 GET_REG32_LOOP(NVRAM_CMD, 0x24);
6677
6678 #undef __GET_REG32
6679 #undef GET_REG32_LOOP
6680 #undef GET_REG32_1
6681
6682         spin_unlock(&tp->tx_lock);
6683         spin_unlock_irq(&tp->lock);
6684 }
6685
6686 static int tg3_get_eeprom_len(struct net_device *dev)
6687 {
6688         struct tg3 *tp = netdev_priv(dev);
6689
6690         return tp->nvram_size;
6691 }
6692
6693 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val);
6694
6695 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
6696 {
6697         struct tg3 *tp = netdev_priv(dev);
6698         int ret;
6699         u8  *pd;
6700         u32 i, offset, len, val, b_offset, b_count;
6701
6702         offset = eeprom->offset;
6703         len = eeprom->len;
6704         eeprom->len = 0;
6705
6706         eeprom->magic = TG3_EEPROM_MAGIC;
6707
6708         if (offset & 3) {
6709                 /* adjustments to start on required 4 byte boundary */
6710                 b_offset = offset & 3;
6711                 b_count = 4 - b_offset;
6712                 if (b_count > len) {
6713                         /* i.e. offset=1 len=2 */
6714                         b_count = len;
6715                 }
6716                 ret = tg3_nvram_read(tp, offset-b_offset, &val);
6717                 if (ret)
6718                         return ret;
6719                 val = cpu_to_le32(val);
6720                 memcpy(data, ((char*)&val) + b_offset, b_count);
6721                 len -= b_count;
6722                 offset += b_count;
6723                 eeprom->len += b_count;
6724         }
6725
6726         /* read bytes upto the last 4 byte boundary */
6727         pd = &data[eeprom->len];
6728         for (i = 0; i < (len - (len & 3)); i += 4) {
6729                 ret = tg3_nvram_read(tp, offset + i, &val);
6730                 if (ret) {
6731                         eeprom->len += i;
6732                         return ret;
6733                 }
6734                 val = cpu_to_le32(val);
6735                 memcpy(pd + i, &val, 4);
6736         }
6737         eeprom->len += i;
6738
6739         if (len & 3) {
6740                 /* read last bytes not ending on 4 byte boundary */
6741                 pd = &data[eeprom->len];
6742                 b_count = len & 3;
6743                 b_offset = offset + len - b_count;
6744                 ret = tg3_nvram_read(tp, b_offset, &val);
6745                 if (ret)
6746                         return ret;
6747                 val = cpu_to_le32(val);
6748                 memcpy(pd, ((char*)&val), b_count);
6749                 eeprom->len += b_count;
6750         }
6751         return 0;
6752 }
6753
6754 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf); 
6755
6756 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
6757 {
6758         struct tg3 *tp = netdev_priv(dev);
6759         int ret;
6760         u32 offset, len, b_offset, odd_len, start, end;
6761         u8 *buf;
6762
6763         if (eeprom->magic != TG3_EEPROM_MAGIC)
6764                 return -EINVAL;
6765
6766         offset = eeprom->offset;
6767         len = eeprom->len;
6768
6769         if ((b_offset = (offset & 3))) {
6770                 /* adjustments to start on required 4 byte boundary */
6771                 ret = tg3_nvram_read(tp, offset-b_offset, &start);
6772                 if (ret)
6773                         return ret;
6774                 start = cpu_to_le32(start);
6775                 len += b_offset;
6776                 offset &= ~3;
6777                 if (len < 4)
6778                         len = 4;
6779         }
6780
6781         odd_len = 0;
6782         if (len & 3) {
6783                 /* adjustments to end on required 4 byte boundary */
6784                 odd_len = 1;
6785                 len = (len + 3) & ~3;
6786                 ret = tg3_nvram_read(tp, offset+len-4, &end);
6787                 if (ret)
6788                         return ret;
6789                 end = cpu_to_le32(end);
6790         }
6791
6792         buf = data;
6793         if (b_offset || odd_len) {
6794                 buf = kmalloc(len, GFP_KERNEL);
6795                 if (buf == 0)
6796                         return -ENOMEM;
6797                 if (b_offset)
6798                         memcpy(buf, &start, 4);
6799                 if (odd_len)
6800                         memcpy(buf+len-4, &end, 4);
6801                 memcpy(buf + b_offset, data, eeprom->len);
6802         }
6803
6804         ret = tg3_nvram_write_block(tp, offset, len, buf);
6805
6806         if (buf != data)
6807                 kfree(buf);
6808
6809         return ret;
6810 }
6811
6812 static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
6813 {
6814         struct tg3 *tp = netdev_priv(dev);
6815   
6816         cmd->supported = (SUPPORTED_Autoneg);
6817
6818         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
6819                 cmd->supported |= (SUPPORTED_1000baseT_Half |
6820                                    SUPPORTED_1000baseT_Full);
6821
6822         if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES))
6823                 cmd->supported |= (SUPPORTED_100baseT_Half |
6824                                   SUPPORTED_100baseT_Full |
6825                                   SUPPORTED_10baseT_Half |
6826                                   SUPPORTED_10baseT_Full |
6827                                   SUPPORTED_MII);
6828         else
6829                 cmd->supported |= SUPPORTED_FIBRE;
6830   
6831         cmd->advertising = tp->link_config.advertising;
6832         if (netif_running(dev)) {
6833                 cmd->speed = tp->link_config.active_speed;
6834                 cmd->duplex = tp->link_config.active_duplex;
6835         }
6836         cmd->port = 0;
6837         cmd->phy_address = PHY_ADDR;
6838         cmd->transceiver = 0;
6839         cmd->autoneg = tp->link_config.autoneg;
6840         cmd->maxtxpkt = 0;
6841         cmd->maxrxpkt = 0;
6842         return 0;
6843 }
6844   
6845 static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
6846 {
6847         struct tg3 *tp = netdev_priv(dev);
6848   
6849         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
6850                 /* These are the only valid advertisement bits allowed.  */
6851                 if (cmd->autoneg == AUTONEG_ENABLE &&
6852                     (cmd->advertising & ~(ADVERTISED_1000baseT_Half |
6853                                           ADVERTISED_1000baseT_Full |
6854                                           ADVERTISED_Autoneg |
6855                                           ADVERTISED_FIBRE)))
6856                         return -EINVAL;
6857         }
6858
6859         spin_lock_irq(&tp->lock);
6860         spin_lock(&tp->tx_lock);
6861
6862         tp->link_config.autoneg = cmd->autoneg;
6863         if (cmd->autoneg == AUTONEG_ENABLE) {
6864                 tp->link_config.advertising = cmd->advertising;
6865                 tp->link_config.speed = SPEED_INVALID;
6866                 tp->link_config.duplex = DUPLEX_INVALID;
6867         } else {
6868                 tp->link_config.advertising = 0;
6869                 tp->link_config.speed = cmd->speed;
6870                 tp->link_config.duplex = cmd->duplex;
6871         }
6872   
6873         if (netif_running(dev))
6874                 tg3_setup_phy(tp, 1);
6875
6876         spin_unlock(&tp->tx_lock);
6877         spin_unlock_irq(&tp->lock);
6878   
6879         return 0;
6880 }
6881   
6882 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
6883 {
6884         struct tg3 *tp = netdev_priv(dev);
6885   
6886         strcpy(info->driver, DRV_MODULE_NAME);
6887         strcpy(info->version, DRV_MODULE_VERSION);
6888         strcpy(info->bus_info, pci_name(tp->pdev));
6889 }
6890   
6891 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
6892 {
6893         struct tg3 *tp = netdev_priv(dev);
6894   
6895         wol->supported = WAKE_MAGIC;
6896         wol->wolopts = 0;
6897         if (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)
6898                 wol->wolopts = WAKE_MAGIC;
6899         memset(&wol->sopass, 0, sizeof(wol->sopass));
6900 }
6901   
6902 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
6903 {
6904         struct tg3 *tp = netdev_priv(dev);
6905   
6906         if (wol->wolopts & ~WAKE_MAGIC)
6907                 return -EINVAL;
6908         if ((wol->wolopts & WAKE_MAGIC) &&
6909             tp->tg3_flags2 & TG3_FLG2_PHY_SERDES &&
6910             !(tp->tg3_flags & TG3_FLAG_SERDES_WOL_CAP))
6911                 return -EINVAL;
6912   
6913         spin_lock_irq(&tp->lock);
6914         if (wol->wolopts & WAKE_MAGIC)
6915                 tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
6916         else
6917                 tp->tg3_flags &= ~TG3_FLAG_WOL_ENABLE;
6918         spin_unlock_irq(&tp->lock);
6919   
6920         return 0;
6921 }
6922   
6923 static u32 tg3_get_msglevel(struct net_device *dev)
6924 {
6925         struct tg3 *tp = netdev_priv(dev);
6926         return tp->msg_enable;
6927 }
6928   
6929 static void tg3_set_msglevel(struct net_device *dev, u32 value)
6930 {
6931         struct tg3 *tp = netdev_priv(dev);
6932         tp->msg_enable = value;
6933 }
6934   
6935 #if TG3_TSO_SUPPORT != 0
6936 static int tg3_set_tso(struct net_device *dev, u32 value)
6937 {
6938         struct tg3 *tp = netdev_priv(dev);
6939
6940         if (!(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
6941                 if (value)
6942                         return -EINVAL;
6943                 return 0;
6944         }
6945         return ethtool_op_set_tso(dev, value);
6946 }
6947 #endif
6948   
6949 static int tg3_nway_reset(struct net_device *dev)
6950 {
6951         struct tg3 *tp = netdev_priv(dev);
6952         u32 bmcr;
6953         int r;
6954   
6955         if (!netif_running(dev))
6956                 return -EAGAIN;
6957
6958         spin_lock_irq(&tp->lock);
6959         r = -EINVAL;
6960         tg3_readphy(tp, MII_BMCR, &bmcr);
6961         if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
6962             (bmcr & BMCR_ANENABLE)) {
6963                 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART);
6964                 r = 0;
6965         }
6966         spin_unlock_irq(&tp->lock);
6967   
6968         return r;
6969 }
6970   
6971 static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
6972 {
6973         struct tg3 *tp = netdev_priv(dev);
6974   
6975         ering->rx_max_pending = TG3_RX_RING_SIZE - 1;
6976         ering->rx_mini_max_pending = 0;
6977         ering->rx_jumbo_max_pending = TG3_RX_JUMBO_RING_SIZE - 1;
6978
6979         ering->rx_pending = tp->rx_pending;
6980         ering->rx_mini_pending = 0;
6981         ering->rx_jumbo_pending = tp->rx_jumbo_pending;
6982         ering->tx_pending = tp->tx_pending;
6983 }
6984   
6985 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
6986 {
6987         struct tg3 *tp = netdev_priv(dev);
6988   
6989         if ((ering->rx_pending > TG3_RX_RING_SIZE - 1) ||
6990             (ering->rx_jumbo_pending > TG3_RX_JUMBO_RING_SIZE - 1) ||
6991             (ering->tx_pending > TG3_TX_RING_SIZE - 1))
6992                 return -EINVAL;
6993   
6994         if (netif_running(dev))
6995                 tg3_netif_stop(tp);
6996
6997         spin_lock_irq(&tp->lock);
6998         spin_lock(&tp->tx_lock);
6999   
7000         tp->rx_pending = ering->rx_pending;
7001
7002         if ((tp->tg3_flags2 & TG3_FLG2_MAX_RXPEND_64) &&
7003             tp->rx_pending > 63)
7004                 tp->rx_pending = 63;
7005         tp->rx_jumbo_pending = ering->rx_jumbo_pending;
7006         tp->tx_pending = ering->tx_pending;
7007
7008         if (netif_running(dev)) {
7009                 tg3_halt(tp, 1);
7010                 tg3_init_hw(tp);
7011                 tg3_netif_start(tp);
7012         }
7013
7014         spin_unlock(&tp->tx_lock);
7015         spin_unlock_irq(&tp->lock);
7016   
7017         return 0;
7018 }
7019   
7020 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
7021 {
7022         struct tg3 *tp = netdev_priv(dev);
7023   
7024         epause->autoneg = (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG) != 0;
7025         epause->rx_pause = (tp->tg3_flags & TG3_FLAG_RX_PAUSE) != 0;
7026         epause->tx_pause = (tp->tg3_flags & TG3_FLAG_TX_PAUSE) != 0;
7027 }
7028   
7029 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
7030 {
7031         struct tg3 *tp = netdev_priv(dev);
7032   
7033         if (netif_running(dev))
7034                 tg3_netif_stop(tp);
7035
7036         spin_lock_irq(&tp->lock);
7037         spin_lock(&tp->tx_lock);
7038         if (epause->autoneg)
7039                 tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
7040         else
7041                 tp->tg3_flags &= ~TG3_FLAG_PAUSE_AUTONEG;
7042         if (epause->rx_pause)
7043                 tp->tg3_flags |= TG3_FLAG_RX_PAUSE;
7044         else
7045                 tp->tg3_flags &= ~TG3_FLAG_RX_PAUSE;
7046         if (epause->tx_pause)
7047                 tp->tg3_flags |= TG3_FLAG_TX_PAUSE;
7048         else
7049                 tp->tg3_flags &= ~TG3_FLAG_TX_PAUSE;
7050
7051         if (netif_running(dev)) {
7052                 tg3_halt(tp, 1);
7053                 tg3_init_hw(tp);
7054                 tg3_netif_start(tp);
7055         }
7056         spin_unlock(&tp->tx_lock);
7057         spin_unlock_irq(&tp->lock);
7058   
7059         return 0;
7060 }
7061   
7062 static u32 tg3_get_rx_csum(struct net_device *dev)
7063 {
7064         struct tg3 *tp = netdev_priv(dev);
7065         return (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0;
7066 }
7067   
7068 static int tg3_set_rx_csum(struct net_device *dev, u32 data)
7069 {
7070         struct tg3 *tp = netdev_priv(dev);
7071   
7072         if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
7073                 if (data != 0)
7074                         return -EINVAL;
7075                 return 0;
7076         }
7077   
7078         spin_lock_irq(&tp->lock);
7079         if (data)
7080                 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
7081         else
7082                 tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
7083         spin_unlock_irq(&tp->lock);
7084   
7085         return 0;
7086 }
7087   
7088 static int tg3_set_tx_csum(struct net_device *dev, u32 data)
7089 {
7090         struct tg3 *tp = netdev_priv(dev);
7091   
7092         if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
7093                 if (data != 0)
7094                         return -EINVAL;
7095                 return 0;
7096         }
7097   
7098         if (data)
7099                 dev->features |= NETIF_F_IP_CSUM;
7100         else
7101                 dev->features &= ~NETIF_F_IP_CSUM;
7102
7103         return 0;
7104 }
7105
7106 static int tg3_get_stats_count (struct net_device *dev)
7107 {
7108         return TG3_NUM_STATS;
7109 }
7110
7111 static void tg3_get_strings (struct net_device *dev, u32 stringset, u8 *buf)
7112 {
7113         switch (stringset) {
7114         case ETH_SS_STATS:
7115                 memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
7116                 break;
7117         default:
7118                 WARN_ON(1);     /* we need a WARN() */
7119                 break;
7120         }
7121 }
7122
7123 static void tg3_get_ethtool_stats (struct net_device *dev,
7124                                    struct ethtool_stats *estats, u64 *tmp_stats)
7125 {
7126         struct tg3 *tp = netdev_priv(dev);
7127         memcpy(tmp_stats, tg3_get_estats(tp), sizeof(tp->estats));
7128 }
7129
7130 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
7131 {
7132         struct mii_ioctl_data *data = if_mii(ifr);
7133         struct tg3 *tp = netdev_priv(dev);
7134         int err;
7135
7136         switch(cmd) {
7137         case SIOCGMIIPHY:
7138                 data->phy_id = PHY_ADDR;
7139
7140                 /* fallthru */
7141         case SIOCGMIIREG: {
7142                 u32 mii_regval;
7143
7144                 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
7145                         break;                  /* We have no PHY */
7146
7147                 spin_lock_irq(&tp->lock);
7148                 err = tg3_readphy(tp, data->reg_num & 0x1f, &mii_regval);
7149                 spin_unlock_irq(&tp->lock);
7150
7151                 data->val_out = mii_regval;
7152
7153                 return err;
7154         }
7155
7156         case SIOCSMIIREG:
7157                 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
7158                         break;                  /* We have no PHY */
7159
7160                 if (!capable(CAP_NET_ADMIN))
7161                         return -EPERM;
7162
7163                 spin_lock_irq(&tp->lock);
7164                 err = tg3_writephy(tp, data->reg_num & 0x1f, data->val_in);
7165                 spin_unlock_irq(&tp->lock);
7166
7167                 return err;
7168
7169         default:
7170                 /* do nothing */
7171                 break;
7172         }
7173         return -EOPNOTSUPP;
7174 }
7175
7176 #if TG3_VLAN_TAG_USED
7177 static void tg3_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
7178 {
7179         struct tg3 *tp = netdev_priv(dev);
7180
7181         spin_lock_irq(&tp->lock);
7182         spin_lock(&tp->tx_lock);
7183
7184         tp->vlgrp = grp;
7185
7186         /* Update RX_MODE_KEEP_VLAN_TAG bit in RX_MODE register. */
7187         __tg3_set_rx_mode(dev);
7188
7189         spin_unlock(&tp->tx_lock);
7190         spin_unlock_irq(&tp->lock);
7191 }
7192
7193 static void tg3_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
7194 {
7195         struct tg3 *tp = netdev_priv(dev);
7196
7197         spin_lock_irq(&tp->lock);
7198         spin_lock(&tp->tx_lock);
7199         if (tp->vlgrp)
7200                 tp->vlgrp->vlan_devices[vid] = NULL;
7201         spin_unlock(&tp->tx_lock);
7202         spin_unlock_irq(&tp->lock);
7203 }
7204 #endif
7205
7206 static struct ethtool_ops tg3_ethtool_ops = {
7207         .get_settings           = tg3_get_settings,
7208         .set_settings           = tg3_set_settings,
7209         .get_drvinfo            = tg3_get_drvinfo,
7210         .get_regs_len           = tg3_get_regs_len,
7211         .get_regs               = tg3_get_regs,
7212         .get_wol                = tg3_get_wol,
7213         .set_wol                = tg3_set_wol,
7214         .get_msglevel           = tg3_get_msglevel,
7215         .set_msglevel           = tg3_set_msglevel,
7216         .nway_reset             = tg3_nway_reset,
7217         .get_link               = ethtool_op_get_link,
7218         .get_eeprom_len         = tg3_get_eeprom_len,
7219         .get_eeprom             = tg3_get_eeprom,
7220         .set_eeprom             = tg3_set_eeprom,
7221         .get_ringparam          = tg3_get_ringparam,
7222         .set_ringparam          = tg3_set_ringparam,
7223         .get_pauseparam         = tg3_get_pauseparam,
7224         .set_pauseparam         = tg3_set_pauseparam,
7225         .get_rx_csum            = tg3_get_rx_csum,
7226         .set_rx_csum            = tg3_set_rx_csum,
7227         .get_tx_csum            = ethtool_op_get_tx_csum,
7228         .set_tx_csum            = tg3_set_tx_csum,
7229         .get_sg                 = ethtool_op_get_sg,
7230         .set_sg                 = ethtool_op_set_sg,
7231 #if TG3_TSO_SUPPORT != 0
7232         .get_tso                = ethtool_op_get_tso,
7233         .set_tso                = tg3_set_tso,
7234 #endif
7235         .get_strings            = tg3_get_strings,
7236         .get_stats_count        = tg3_get_stats_count,
7237         .get_ethtool_stats      = tg3_get_ethtool_stats,
7238 };
7239
7240 static void __devinit tg3_get_eeprom_size(struct tg3 *tp)
7241 {
7242         u32 cursize, val;
7243
7244         tp->nvram_size = EEPROM_CHIP_SIZE;
7245
7246         if (tg3_nvram_read(tp, 0, &val) != 0)
7247                 return;
7248
7249         if (swab32(val) != TG3_EEPROM_MAGIC)
7250                 return;
7251
7252         /*
7253          * Size the chip by reading offsets at increasing powers of two.
7254          * When we encounter our validation signature, we know the addressing
7255          * has wrapped around, and thus have our chip size.
7256          */
7257         cursize = 0x800;
7258
7259         while (cursize < tp->nvram_size) {
7260                 if (tg3_nvram_read(tp, cursize, &val) != 0)
7261                         return;
7262
7263                 if (swab32(val) == TG3_EEPROM_MAGIC)
7264                         break;
7265
7266                 cursize <<= 1;
7267         }
7268
7269         tp->nvram_size = cursize;
7270 }
7271                 
7272 static void __devinit tg3_get_nvram_size(struct tg3 *tp)
7273 {
7274         u32 val;
7275
7276         if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
7277                 if (val != 0) {
7278                         tp->nvram_size = (val >> 16) * 1024;
7279                         return;
7280                 }
7281         }
7282         tp->nvram_size = 0x20000;
7283 }
7284
7285 static void __devinit tg3_get_nvram_info(struct tg3 *tp)
7286 {
7287         u32 nvcfg1;
7288
7289         nvcfg1 = tr32(NVRAM_CFG1);
7290         if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
7291                 tp->tg3_flags2 |= TG3_FLG2_FLASH;
7292         }
7293         else {
7294                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
7295                 tw32(NVRAM_CFG1, nvcfg1);
7296         }
7297
7298         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
7299                 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
7300                         case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
7301                                 tp->nvram_jedecnum = JEDEC_ATMEL;
7302                                 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
7303                                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
7304                                 break;
7305                         case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
7306                                 tp->nvram_jedecnum = JEDEC_ATMEL;
7307                                 tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
7308                                 break;
7309                         case FLASH_VENDOR_ATMEL_EEPROM:
7310                                 tp->nvram_jedecnum = JEDEC_ATMEL;
7311                                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
7312                                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
7313                                 break;
7314                         case FLASH_VENDOR_ST:
7315                                 tp->nvram_jedecnum = JEDEC_ST;
7316                                 tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
7317                                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
7318                                 break;
7319                         case FLASH_VENDOR_SAIFUN:
7320                                 tp->nvram_jedecnum = JEDEC_SAIFUN;
7321                                 tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
7322                                 break;
7323                         case FLASH_VENDOR_SST_SMALL:
7324                         case FLASH_VENDOR_SST_LARGE:
7325                                 tp->nvram_jedecnum = JEDEC_SST;
7326                                 tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
7327                                 break;
7328                 }
7329         }
7330         else {
7331                 tp->nvram_jedecnum = JEDEC_ATMEL;
7332                 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
7333                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
7334         }
7335 }
7336
7337 static void __devinit tg3_get_5752_nvram_info(struct tg3 *tp)
7338 {
7339         u32 nvcfg1;
7340
7341         nvcfg1 = tr32(NVRAM_CFG1);
7342
7343         /* NVRAM protection for TPM */
7344         if (nvcfg1 & (1 << 27))
7345                 tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM;
7346
7347         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
7348                 case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
7349                 case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
7350                         tp->nvram_jedecnum = JEDEC_ATMEL;
7351                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
7352                         break;
7353                 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
7354                         tp->nvram_jedecnum = JEDEC_ATMEL;
7355                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
7356                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
7357                         break;
7358                 case FLASH_5752VENDOR_ST_M45PE10:
7359                 case FLASH_5752VENDOR_ST_M45PE20:
7360                 case FLASH_5752VENDOR_ST_M45PE40:
7361                         tp->nvram_jedecnum = JEDEC_ST;
7362                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
7363                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
7364                         break;
7365         }
7366
7367         if (tp->tg3_flags2 & TG3_FLG2_FLASH) {
7368                 switch (nvcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
7369                         case FLASH_5752PAGE_SIZE_256:
7370                                 tp->nvram_pagesize = 256;
7371                                 break;
7372                         case FLASH_5752PAGE_SIZE_512:
7373                                 tp->nvram_pagesize = 512;
7374                                 break;
7375                         case FLASH_5752PAGE_SIZE_1K:
7376                                 tp->nvram_pagesize = 1024;
7377                                 break;
7378                         case FLASH_5752PAGE_SIZE_2K:
7379                                 tp->nvram_pagesize = 2048;
7380                                 break;
7381                         case FLASH_5752PAGE_SIZE_4K:
7382                                 tp->nvram_pagesize = 4096;
7383                                 break;
7384                         case FLASH_5752PAGE_SIZE_264:
7385                                 tp->nvram_pagesize = 264;
7386                                 break;
7387                 }
7388         }
7389         else {
7390                 /* For eeprom, set pagesize to maximum eeprom size */
7391                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
7392
7393                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
7394                 tw32(NVRAM_CFG1, nvcfg1);
7395         }
7396 }
7397
7398 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
7399 static void __devinit tg3_nvram_init(struct tg3 *tp)
7400 {
7401         int j;
7402
7403         if (tp->tg3_flags2 & TG3_FLG2_SUN_570X)
7404                 return;
7405
7406         tw32_f(GRC_EEPROM_ADDR,
7407              (EEPROM_ADDR_FSM_RESET |
7408               (EEPROM_DEFAULT_CLOCK_PERIOD <<
7409                EEPROM_ADDR_CLKPERD_SHIFT)));
7410
7411         /* XXX schedule_timeout() ... */
7412         for (j = 0; j < 100; j++)
7413                 udelay(10);
7414
7415         /* Enable seeprom accesses. */
7416         tw32_f(GRC_LOCAL_CTRL,
7417              tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
7418         udelay(100);
7419
7420         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
7421             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
7422                 tp->tg3_flags |= TG3_FLAG_NVRAM;
7423
7424                 tg3_enable_nvram_access(tp);
7425
7426                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
7427                         tg3_get_5752_nvram_info(tp);
7428                 else
7429                         tg3_get_nvram_info(tp);
7430
7431                 tg3_get_nvram_size(tp);
7432
7433                 tg3_disable_nvram_access(tp);
7434
7435         } else {
7436                 tp->tg3_flags &= ~(TG3_FLAG_NVRAM | TG3_FLAG_NVRAM_BUFFERED);
7437
7438                 tg3_get_eeprom_size(tp);
7439         }
7440 }
7441
7442 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
7443                                         u32 offset, u32 *val)
7444 {
7445         u32 tmp;
7446         int i;
7447
7448         if (offset > EEPROM_ADDR_ADDR_MASK ||
7449             (offset % 4) != 0)
7450                 return -EINVAL;
7451
7452         tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
7453                                         EEPROM_ADDR_DEVID_MASK |
7454                                         EEPROM_ADDR_READ);
7455         tw32(GRC_EEPROM_ADDR,
7456              tmp |
7457              (0 << EEPROM_ADDR_DEVID_SHIFT) |
7458              ((offset << EEPROM_ADDR_ADDR_SHIFT) &
7459               EEPROM_ADDR_ADDR_MASK) |
7460              EEPROM_ADDR_READ | EEPROM_ADDR_START);
7461
7462         for (i = 0; i < 10000; i++) {
7463                 tmp = tr32(GRC_EEPROM_ADDR);
7464
7465                 if (tmp & EEPROM_ADDR_COMPLETE)
7466                         break;
7467                 udelay(100);
7468         }
7469         if (!(tmp & EEPROM_ADDR_COMPLETE))
7470                 return -EBUSY;
7471
7472         *val = tr32(GRC_EEPROM_DATA);
7473         return 0;
7474 }
7475
7476 #define NVRAM_CMD_TIMEOUT 10000
7477
7478 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
7479 {
7480         int i;
7481
7482         tw32(NVRAM_CMD, nvram_cmd);
7483         for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
7484                 udelay(10);
7485                 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
7486                         udelay(10);
7487                         break;
7488                 }
7489         }
7490         if (i == NVRAM_CMD_TIMEOUT) {
7491                 return -EBUSY;
7492         }
7493         return 0;
7494 }
7495
7496 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
7497 {
7498         int ret;
7499
7500         if (tp->tg3_flags2 & TG3_FLG2_SUN_570X) {
7501                 printk(KERN_ERR PFX "Attempt to do nvram_read on Sun 570X\n");
7502                 return -EINVAL;
7503         }
7504
7505         if (!(tp->tg3_flags & TG3_FLAG_NVRAM))
7506                 return tg3_nvram_read_using_eeprom(tp, offset, val);
7507
7508         if ((tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) &&
7509                 (tp->tg3_flags2 & TG3_FLG2_FLASH) &&
7510                 (tp->nvram_jedecnum == JEDEC_ATMEL)) {
7511
7512                 offset = ((offset / tp->nvram_pagesize) <<
7513                           ATMEL_AT45DB0X1B_PAGE_POS) +
7514                         (offset % tp->nvram_pagesize);
7515         }
7516
7517         if (offset > NVRAM_ADDR_MSK)
7518                 return -EINVAL;
7519
7520         tg3_nvram_lock(tp);
7521
7522         tg3_enable_nvram_access(tp);
7523
7524         tw32(NVRAM_ADDR, offset);
7525         ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
7526                 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
7527
7528         if (ret == 0)
7529                 *val = swab32(tr32(NVRAM_RDDATA));
7530
7531         tg3_nvram_unlock(tp);
7532
7533         tg3_disable_nvram_access(tp);
7534
7535         return ret;
7536 }
7537
7538 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
7539                                     u32 offset, u32 len, u8 *buf)
7540 {
7541         int i, j, rc = 0;
7542         u32 val;
7543
7544         for (i = 0; i < len; i += 4) {
7545                 u32 addr, data;
7546
7547                 addr = offset + i;
7548
7549                 memcpy(&data, buf + i, 4);
7550
7551                 tw32(GRC_EEPROM_DATA, cpu_to_le32(data));
7552
7553                 val = tr32(GRC_EEPROM_ADDR);
7554                 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
7555
7556                 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
7557                         EEPROM_ADDR_READ);
7558                 tw32(GRC_EEPROM_ADDR, val |
7559                         (0 << EEPROM_ADDR_DEVID_SHIFT) |
7560                         (addr & EEPROM_ADDR_ADDR_MASK) |
7561                         EEPROM_ADDR_START |
7562                         EEPROM_ADDR_WRITE);
7563                 
7564                 for (j = 0; j < 10000; j++) {
7565                         val = tr32(GRC_EEPROM_ADDR);
7566
7567                         if (val & EEPROM_ADDR_COMPLETE)
7568                                 break;
7569                         udelay(100);
7570                 }
7571                 if (!(val & EEPROM_ADDR_COMPLETE)) {
7572                         rc = -EBUSY;
7573                         break;
7574                 }
7575         }
7576
7577         return rc;
7578 }
7579
7580 /* offset and length are dword aligned */
7581 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
7582                 u8 *buf)
7583 {
7584         int ret = 0;
7585         u32 pagesize = tp->nvram_pagesize;
7586         u32 pagemask = pagesize - 1;
7587         u32 nvram_cmd;
7588         u8 *tmp;
7589
7590         tmp = kmalloc(pagesize, GFP_KERNEL);
7591         if (tmp == NULL)
7592                 return -ENOMEM;
7593
7594         while (len) {
7595                 int j;
7596                 u32 phy_addr, page_off, size;
7597
7598                 phy_addr = offset & ~pagemask;
7599         
7600                 for (j = 0; j < pagesize; j += 4) {
7601                         if ((ret = tg3_nvram_read(tp, phy_addr + j,
7602                                                 (u32 *) (tmp + j))))
7603                                 break;
7604                 }
7605                 if (ret)
7606                         break;
7607
7608                 page_off = offset & pagemask;
7609                 size = pagesize;
7610                 if (len < size)
7611                         size = len;
7612
7613                 len -= size;
7614
7615                 memcpy(tmp + page_off, buf, size);
7616
7617                 offset = offset + (pagesize - page_off);
7618
7619                 tg3_enable_nvram_access(tp);
7620
7621                 /*
7622                  * Before we can erase the flash page, we need
7623                  * to issue a special "write enable" command.
7624                  */
7625                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
7626
7627                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
7628                         break;
7629
7630                 /* Erase the target page */
7631                 tw32(NVRAM_ADDR, phy_addr);
7632
7633                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
7634                         NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
7635
7636                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
7637                         break;
7638
7639                 /* Issue another write enable to start the write. */
7640                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
7641
7642                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
7643                         break;
7644
7645                 for (j = 0; j < pagesize; j += 4) {
7646                         u32 data;
7647
7648                         data = *((u32 *) (tmp + j));
7649                         tw32(NVRAM_WRDATA, cpu_to_be32(data));
7650
7651                         tw32(NVRAM_ADDR, phy_addr + j);
7652
7653                         nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
7654                                 NVRAM_CMD_WR;
7655
7656                         if (j == 0)
7657                                 nvram_cmd |= NVRAM_CMD_FIRST;
7658                         else if (j == (pagesize - 4))
7659                                 nvram_cmd |= NVRAM_CMD_LAST;
7660
7661                         if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
7662                                 break;
7663                 }
7664                 if (ret)
7665                         break;
7666         }
7667
7668         nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
7669         tg3_nvram_exec_cmd(tp, nvram_cmd);
7670
7671         kfree(tmp);
7672
7673         return ret;
7674 }
7675
7676 /* offset and length are dword aligned */
7677 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
7678                 u8 *buf)
7679 {
7680         int i, ret = 0;
7681
7682         for (i = 0; i < len; i += 4, offset += 4) {
7683                 u32 data, page_off, phy_addr, nvram_cmd;
7684
7685                 memcpy(&data, buf + i, 4);
7686                 tw32(NVRAM_WRDATA, cpu_to_be32(data));
7687
7688                 page_off = offset % tp->nvram_pagesize;
7689
7690                 if ((tp->tg3_flags2 & TG3_FLG2_FLASH) &&
7691                         (tp->nvram_jedecnum == JEDEC_ATMEL)) {
7692
7693                         phy_addr = ((offset / tp->nvram_pagesize) <<
7694                                     ATMEL_AT45DB0X1B_PAGE_POS) + page_off;
7695                 }
7696                 else {
7697                         phy_addr = offset;
7698                 }
7699
7700                 tw32(NVRAM_ADDR, phy_addr);
7701
7702                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
7703
7704                 if ((page_off == 0) || (i == 0))
7705                         nvram_cmd |= NVRAM_CMD_FIRST;
7706                 else if (page_off == (tp->nvram_pagesize - 4))
7707                         nvram_cmd |= NVRAM_CMD_LAST;
7708
7709                 if (i == (len - 4))
7710                         nvram_cmd |= NVRAM_CMD_LAST;
7711
7712                 if ((tp->nvram_jedecnum == JEDEC_ST) &&
7713                         (nvram_cmd & NVRAM_CMD_FIRST)) {
7714
7715                         if ((ret = tg3_nvram_exec_cmd(tp,
7716                                 NVRAM_CMD_WREN | NVRAM_CMD_GO |
7717                                 NVRAM_CMD_DONE)))
7718
7719                                 break;
7720                 }
7721                 if (!(tp->tg3_flags2 & TG3_FLG2_FLASH)) {
7722                         /* We always do complete word writes to eeprom. */
7723                         nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
7724                 }
7725
7726                 if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
7727                         break;
7728         }
7729         return ret;
7730 }
7731
7732 /* offset and length are dword aligned */
7733 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
7734 {
7735         int ret;
7736
7737         if (tp->tg3_flags2 & TG3_FLG2_SUN_570X) {
7738                 printk(KERN_ERR PFX "Attempt to do nvram_write on Sun 570X\n");
7739                 return -EINVAL;
7740         }
7741
7742         if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
7743                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
7744                        ~GRC_LCLCTRL_GPIO_OUTPUT1);
7745                 udelay(40);
7746         }
7747
7748         if (!(tp->tg3_flags & TG3_FLAG_NVRAM)) {
7749                 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
7750         }
7751         else {
7752                 u32 grc_mode;
7753
7754                 tg3_nvram_lock(tp);
7755
7756                 tg3_enable_nvram_access(tp);
7757                 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
7758                     !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM))
7759                         tw32(NVRAM_WRITE1, 0x406);
7760
7761                 grc_mode = tr32(GRC_MODE);
7762                 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
7763
7764                 if ((tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) ||
7765                         !(tp->tg3_flags2 & TG3_FLG2_FLASH)) {
7766
7767                         ret = tg3_nvram_write_block_buffered(tp, offset, len,
7768                                 buf);
7769                 }
7770                 else {
7771                         ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
7772                                 buf);
7773                 }
7774
7775                 grc_mode = tr32(GRC_MODE);
7776                 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
7777
7778                 tg3_disable_nvram_access(tp);
7779                 tg3_nvram_unlock(tp);
7780         }
7781
7782         if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
7783                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
7784                 udelay(40);
7785         }
7786
7787         return ret;
7788 }
7789
7790 struct subsys_tbl_ent {
7791         u16 subsys_vendor, subsys_devid;
7792         u32 phy_id;
7793 };
7794
7795 static struct subsys_tbl_ent subsys_id_to_phy_id[] = {
7796         /* Broadcom boards. */
7797         { PCI_VENDOR_ID_BROADCOM, 0x1644, PHY_ID_BCM5401 }, /* BCM95700A6 */
7798         { PCI_VENDOR_ID_BROADCOM, 0x0001, PHY_ID_BCM5701 }, /* BCM95701A5 */
7799         { PCI_VENDOR_ID_BROADCOM, 0x0002, PHY_ID_BCM8002 }, /* BCM95700T6 */
7800         { PCI_VENDOR_ID_BROADCOM, 0x0003, 0 },              /* BCM95700A9 */
7801         { PCI_VENDOR_ID_BROADCOM, 0x0005, PHY_ID_BCM5701 }, /* BCM95701T1 */
7802         { PCI_VENDOR_ID_BROADCOM, 0x0006, PHY_ID_BCM5701 }, /* BCM95701T8 */
7803         { PCI_VENDOR_ID_BROADCOM, 0x0007, 0 },              /* BCM95701A7 */
7804         { PCI_VENDOR_ID_BROADCOM, 0x0008, PHY_ID_BCM5701 }, /* BCM95701A10 */
7805         { PCI_VENDOR_ID_BROADCOM, 0x8008, PHY_ID_BCM5701 }, /* BCM95701A12 */
7806         { PCI_VENDOR_ID_BROADCOM, 0x0009, PHY_ID_BCM5703 }, /* BCM95703Ax1 */
7807         { PCI_VENDOR_ID_BROADCOM, 0x8009, PHY_ID_BCM5703 }, /* BCM95703Ax2 */
7808
7809         /* 3com boards. */
7810         { PCI_VENDOR_ID_3COM, 0x1000, PHY_ID_BCM5401 }, /* 3C996T */
7811         { PCI_VENDOR_ID_3COM, 0x1006, PHY_ID_BCM5701 }, /* 3C996BT */
7812         { PCI_VENDOR_ID_3COM, 0x1004, 0 },              /* 3C996SX */
7813         { PCI_VENDOR_ID_3COM, 0x1007, PHY_ID_BCM5701 }, /* 3C1000T */
7814         { PCI_VENDOR_ID_3COM, 0x1008, PHY_ID_BCM5701 }, /* 3C940BR01 */
7815
7816         /* DELL boards. */
7817         { PCI_VENDOR_ID_DELL, 0x00d1, PHY_ID_BCM5401 }, /* VIPER */
7818         { PCI_VENDOR_ID_DELL, 0x0106, PHY_ID_BCM5401 }, /* JAGUAR */
7819         { PCI_VENDOR_ID_DELL, 0x0109, PHY_ID_BCM5411 }, /* MERLOT */
7820         { PCI_VENDOR_ID_DELL, 0x010a, PHY_ID_BCM5411 }, /* SLIM_MERLOT */
7821
7822         /* Compaq boards. */
7823         { PCI_VENDOR_ID_COMPAQ, 0x007c, PHY_ID_BCM5701 }, /* BANSHEE */
7824         { PCI_VENDOR_ID_COMPAQ, 0x009a, PHY_ID_BCM5701 }, /* BANSHEE_2 */
7825         { PCI_VENDOR_ID_COMPAQ, 0x007d, 0 },              /* CHANGELING */
7826         { PCI_VENDOR_ID_COMPAQ, 0x0085, PHY_ID_BCM5701 }, /* NC7780 */
7827         { PCI_VENDOR_ID_COMPAQ, 0x0099, PHY_ID_BCM5701 }, /* NC7780_2 */
7828
7829         /* IBM boards. */
7830         { PCI_VENDOR_ID_IBM, 0x0281, 0 } /* IBM??? */
7831 };
7832
7833 static inline struct subsys_tbl_ent *lookup_by_subsys(struct tg3 *tp)
7834 {
7835         int i;
7836
7837         for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
7838                 if ((subsys_id_to_phy_id[i].subsys_vendor ==
7839                      tp->pdev->subsystem_vendor) &&
7840                     (subsys_id_to_phy_id[i].subsys_devid ==
7841                      tp->pdev->subsystem_device))
7842                         return &subsys_id_to_phy_id[i];
7843         }
7844         return NULL;
7845 }
7846
7847 /* Since this function may be called in D3-hot power state during
7848  * tg3_init_one(), only config cycles are allowed.
7849  */
7850 static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
7851 {
7852         u32 val;
7853
7854         /* Make sure register accesses (indirect or otherwise)
7855          * will function correctly.
7856          */
7857         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
7858                                tp->misc_host_ctrl);
7859
7860         tp->phy_id = PHY_ID_INVALID;
7861         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
7862
7863         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
7864         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
7865                 u32 nic_cfg, led_cfg;
7866                 u32 nic_phy_id, ver, cfg2 = 0, eeprom_phy_id;
7867                 int eeprom_phy_serdes = 0;
7868
7869                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
7870                 tp->nic_sram_data_cfg = nic_cfg;
7871
7872                 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
7873                 ver >>= NIC_SRAM_DATA_VER_SHIFT;
7874                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700) &&
7875                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) &&
7876                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5703) &&
7877                     (ver > 0) && (ver < 0x100))
7878                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
7879
7880                 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
7881                     NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
7882                         eeprom_phy_serdes = 1;
7883
7884                 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
7885                 if (nic_phy_id != 0) {
7886                         u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
7887                         u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
7888
7889                         eeprom_phy_id  = (id1 >> 16) << 10;
7890                         eeprom_phy_id |= (id2 & 0xfc00) << 16;
7891                         eeprom_phy_id |= (id2 & 0x03ff) <<  0;
7892                 } else
7893                         eeprom_phy_id = 0;
7894
7895                 tp->phy_id = eeprom_phy_id;
7896                 if (eeprom_phy_serdes)
7897                         tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
7898
7899                 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
7900                         led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
7901                                     SHASTA_EXT_LED_MODE_MASK);
7902                 else
7903                         led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
7904
7905                 switch (led_cfg) {
7906                 default:
7907                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
7908                         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
7909                         break;
7910
7911                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
7912                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
7913                         break;
7914
7915                 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
7916                         tp->led_ctrl = LED_CTRL_MODE_MAC;
7917                         break;
7918
7919                 case SHASTA_EXT_LED_SHARED:
7920                         tp->led_ctrl = LED_CTRL_MODE_SHARED;
7921                         if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
7922                             tp->pci_chip_rev_id != CHIPREV_ID_5750_A1)
7923                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
7924                                                  LED_CTRL_MODE_PHY_2);
7925                         break;
7926
7927                 case SHASTA_EXT_LED_MAC:
7928                         tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
7929                         break;
7930
7931                 case SHASTA_EXT_LED_COMBO:
7932                         tp->led_ctrl = LED_CTRL_MODE_COMBO;
7933                         if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0)
7934                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
7935                                                  LED_CTRL_MODE_PHY_2);
7936                         break;
7937
7938                 };
7939
7940                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
7941                      GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) &&
7942                     tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
7943                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
7944
7945                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700) &&
7946                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) &&
7947                     (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP))
7948                         tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT;
7949
7950                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
7951                         tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
7952                         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
7953                                 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
7954                 }
7955                 if (nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL)
7956                         tp->tg3_flags |= TG3_FLAG_SERDES_WOL_CAP;
7957
7958                 if (cfg2 & (1 << 17))
7959                         tp->tg3_flags2 |= TG3_FLG2_CAPACITIVE_COUPLING;
7960
7961                 /* serdes signal pre-emphasis in register 0x590 set by */
7962                 /* bootcode if bit 18 is set */
7963                 if (cfg2 & (1 << 18))
7964                         tp->tg3_flags2 |= TG3_FLG2_SERDES_PREEMPHASIS;
7965         }
7966 }
7967
7968 static int __devinit tg3_phy_probe(struct tg3 *tp)
7969 {
7970         u32 hw_phy_id_1, hw_phy_id_2;
7971         u32 hw_phy_id, hw_phy_id_masked;
7972         int err;
7973
7974         /* Reading the PHY ID register can conflict with ASF
7975          * firwmare access to the PHY hardware.
7976          */
7977         err = 0;
7978         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
7979                 hw_phy_id = hw_phy_id_masked = PHY_ID_INVALID;
7980         } else {
7981                 /* Now read the physical PHY_ID from the chip and verify
7982                  * that it is sane.  If it doesn't look good, we fall back
7983                  * to either the hard-coded table based PHY_ID and failing
7984                  * that the value found in the eeprom area.
7985                  */
7986                 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
7987                 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
7988
7989                 hw_phy_id  = (hw_phy_id_1 & 0xffff) << 10;
7990                 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
7991                 hw_phy_id |= (hw_phy_id_2 & 0x03ff) <<  0;
7992
7993                 hw_phy_id_masked = hw_phy_id & PHY_ID_MASK;
7994         }
7995
7996         if (!err && KNOWN_PHY_ID(hw_phy_id_masked)) {
7997                 tp->phy_id = hw_phy_id;
7998                 if (hw_phy_id_masked == PHY_ID_BCM8002)
7999                         tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
8000         } else {
8001                 if (tp->phy_id != PHY_ID_INVALID) {
8002                         /* Do nothing, phy ID already set up in
8003                          * tg3_get_eeprom_hw_cfg().
8004                          */
8005                 } else {
8006                         struct subsys_tbl_ent *p;
8007
8008                         /* No eeprom signature?  Try the hardcoded
8009                          * subsys device table.
8010                          */
8011                         p = lookup_by_subsys(tp);
8012                         if (!p)
8013                                 return -ENODEV;
8014
8015                         tp->phy_id = p->phy_id;
8016                         if (!tp->phy_id ||
8017                             tp->phy_id == PHY_ID_BCM8002)
8018                                 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
8019                 }
8020         }
8021
8022         if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
8023             !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
8024                 u32 bmsr, adv_reg, tg3_ctrl;
8025
8026                 tg3_readphy(tp, MII_BMSR, &bmsr);
8027                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
8028                     (bmsr & BMSR_LSTATUS))
8029                         goto skip_phy_reset;
8030                     
8031                 err = tg3_phy_reset(tp);
8032                 if (err)
8033                         return err;
8034
8035                 adv_reg = (ADVERTISE_10HALF | ADVERTISE_10FULL |
8036                            ADVERTISE_100HALF | ADVERTISE_100FULL |
8037                            ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
8038                 tg3_ctrl = 0;
8039                 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
8040                         tg3_ctrl = (MII_TG3_CTRL_ADV_1000_HALF |
8041                                     MII_TG3_CTRL_ADV_1000_FULL);
8042                         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
8043                             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
8044                                 tg3_ctrl |= (MII_TG3_CTRL_AS_MASTER |
8045                                              MII_TG3_CTRL_ENABLE_AS_MASTER);
8046                 }
8047
8048                 if (!tg3_copper_is_advertising_all(tp)) {
8049                         tg3_writephy(tp, MII_ADVERTISE, adv_reg);
8050
8051                         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
8052                                 tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
8053
8054                         tg3_writephy(tp, MII_BMCR,
8055                                      BMCR_ANENABLE | BMCR_ANRESTART);
8056                 }
8057                 tg3_phy_set_wirespeed(tp);
8058
8059                 tg3_writephy(tp, MII_ADVERTISE, adv_reg);
8060                 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
8061                         tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
8062         }
8063
8064 skip_phy_reset:
8065         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
8066                 err = tg3_init_5401phy_dsp(tp);
8067                 if (err)
8068                         return err;
8069         }
8070
8071         if (!err && ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401)) {
8072                 err = tg3_init_5401phy_dsp(tp);
8073         }
8074
8075         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
8076                 tp->link_config.advertising =
8077                         (ADVERTISED_1000baseT_Half |
8078                          ADVERTISED_1000baseT_Full |
8079                          ADVERTISED_Autoneg |
8080                          ADVERTISED_FIBRE);
8081         if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
8082                 tp->link_config.advertising &=
8083                         ~(ADVERTISED_1000baseT_Half |
8084                           ADVERTISED_1000baseT_Full);
8085
8086         return err;
8087 }
8088
8089 static void __devinit tg3_read_partno(struct tg3 *tp)
8090 {
8091         unsigned char vpd_data[256];
8092         int i;
8093
8094         if (tp->tg3_flags2 & TG3_FLG2_SUN_570X) {
8095                 /* Sun decided not to put the necessary bits in the
8096                  * NVRAM of their onboard tg3 parts :(
8097                  */
8098                 strcpy(tp->board_part_number, "Sun 570X");
8099                 return;
8100         }
8101
8102         for (i = 0; i < 256; i += 4) {
8103                 u32 tmp;
8104
8105                 if (tg3_nvram_read(tp, 0x100 + i, &tmp))
8106                         goto out_not_found;
8107
8108                 vpd_data[i + 0] = ((tmp >>  0) & 0xff);
8109                 vpd_data[i + 1] = ((tmp >>  8) & 0xff);
8110                 vpd_data[i + 2] = ((tmp >> 16) & 0xff);
8111                 vpd_data[i + 3] = ((tmp >> 24) & 0xff);
8112         }
8113
8114         /* Now parse and find the part number. */
8115         for (i = 0; i < 256; ) {
8116                 unsigned char val = vpd_data[i];
8117                 int block_end;
8118
8119                 if (val == 0x82 || val == 0x91) {
8120                         i = (i + 3 +
8121                              (vpd_data[i + 1] +
8122                               (vpd_data[i + 2] << 8)));
8123                         continue;
8124                 }
8125
8126                 if (val != 0x90)
8127                         goto out_not_found;
8128
8129                 block_end = (i + 3 +
8130                              (vpd_data[i + 1] +
8131                               (vpd_data[i + 2] << 8)));
8132                 i += 3;
8133                 while (i < block_end) {
8134                         if (vpd_data[i + 0] == 'P' &&
8135                             vpd_data[i + 1] == 'N') {
8136                                 int partno_len = vpd_data[i + 2];
8137
8138                                 if (partno_len > 24)
8139                                         goto out_not_found;
8140
8141                                 memcpy(tp->board_part_number,
8142                                        &vpd_data[i + 3],
8143                                        partno_len);
8144
8145                                 /* Success. */
8146                                 return;
8147                         }
8148                 }
8149
8150                 /* Part number not found. */
8151                 goto out_not_found;
8152         }
8153
8154 out_not_found:
8155         strcpy(tp->board_part_number, "none");
8156 }
8157
8158 #ifdef CONFIG_SPARC64
8159 static int __devinit tg3_is_sun_570X(struct tg3 *tp)
8160 {
8161         struct pci_dev *pdev = tp->pdev;
8162         struct pcidev_cookie *pcp = pdev->sysdata;
8163
8164         if (pcp != NULL) {
8165                 int node = pcp->prom_node;
8166                 u32 venid;
8167                 int err;
8168
8169                 err = prom_getproperty(node, "subsystem-vendor-id",
8170                                        (char *) &venid, sizeof(venid));
8171                 if (err == 0 || err == -1)
8172                         return 0;
8173                 if (venid == PCI_VENDOR_ID_SUN)
8174                         return 1;
8175         }
8176         return 0;
8177 }
8178 #endif
8179
8180 static int __devinit tg3_get_invariants(struct tg3 *tp)
8181 {
8182         static struct pci_device_id write_reorder_chipsets[] = {
8183                 { PCI_DEVICE(PCI_VENDOR_ID_INTEL,
8184                              PCI_DEVICE_ID_INTEL_82801AA_8) },
8185                 { PCI_DEVICE(PCI_VENDOR_ID_INTEL,
8186                              PCI_DEVICE_ID_INTEL_82801AB_8) },
8187                 { PCI_DEVICE(PCI_VENDOR_ID_INTEL,
8188                              PCI_DEVICE_ID_INTEL_82801BA_11) },
8189                 { PCI_DEVICE(PCI_VENDOR_ID_INTEL,
8190                              PCI_DEVICE_ID_INTEL_82801BA_6) },
8191                 { PCI_DEVICE(PCI_VENDOR_ID_AMD,
8192                              PCI_DEVICE_ID_AMD_FE_GATE_700C) },
8193                 { },
8194         };
8195         u32 misc_ctrl_reg;
8196         u32 cacheline_sz_reg;
8197         u32 pci_state_reg, grc_misc_cfg;
8198         u32 val;
8199         u16 pci_cmd;
8200         int err;
8201
8202 #ifdef CONFIG_SPARC64
8203         if (tg3_is_sun_570X(tp))
8204                 tp->tg3_flags2 |= TG3_FLG2_SUN_570X;
8205 #endif
8206
8207         /* If we have an AMD 762 or Intel ICH/ICH0/ICH2 chipset, write
8208          * reordering to the mailbox registers done by the host
8209          * controller can cause major troubles.  We read back from
8210          * every mailbox register write to force the writes to be
8211          * posted to the chip in order.
8212          */
8213         if (pci_dev_present(write_reorder_chipsets))
8214                 tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;
8215
8216         /* Force memory write invalidate off.  If we leave it on,
8217          * then on 5700_BX chips we have to enable a workaround.
8218          * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
8219          * to match the cacheline size.  The Broadcom driver have this
8220          * workaround but turns MWI off all the times so never uses
8221          * it.  This seems to suggest that the workaround is insufficient.
8222          */
8223         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
8224         pci_cmd &= ~PCI_COMMAND_INVALIDATE;
8225         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
8226
8227         /* It is absolutely critical that TG3PCI_MISC_HOST_CTRL
8228          * has the register indirect write enable bit set before
8229          * we try to access any of the MMIO registers.  It is also
8230          * critical that the PCI-X hw workaround situation is decided
8231          * before that as well.
8232          */
8233         pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
8234                               &misc_ctrl_reg);
8235
8236         tp->pci_chip_rev_id = (misc_ctrl_reg >>
8237                                MISC_HOST_CTRL_CHIPREV_SHIFT);
8238
8239         /* Wrong chip ID in 5752 A0. This code can be removed later
8240          * as A0 is not in production.
8241          */
8242         if (tp->pci_chip_rev_id == CHIPREV_ID_5752_A0_HW)
8243                 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
8244
8245         /* Initialize misc host control in PCI block. */
8246         tp->misc_host_ctrl |= (misc_ctrl_reg &
8247                                MISC_HOST_CTRL_CHIPREV);
8248         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
8249                                tp->misc_host_ctrl);
8250
8251         pci_read_config_dword(tp->pdev, TG3PCI_CACHELINESZ,
8252                               &cacheline_sz_reg);
8253
8254         tp->pci_cacheline_sz = (cacheline_sz_reg >>  0) & 0xff;
8255         tp->pci_lat_timer    = (cacheline_sz_reg >>  8) & 0xff;
8256         tp->pci_hdr_type     = (cacheline_sz_reg >> 16) & 0xff;
8257         tp->pci_bist         = (cacheline_sz_reg >> 24) & 0xff;
8258
8259         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
8260             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
8261                 tp->tg3_flags2 |= TG3_FLG2_5750_PLUS;
8262
8263         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) ||
8264             (tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
8265                 tp->tg3_flags2 |= TG3_FLG2_5705_PLUS;
8266
8267         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
8268                 tp->tg3_flags2 |= TG3_FLG2_HW_TSO;
8269
8270         if (pci_find_capability(tp->pdev, PCI_CAP_ID_EXP) != 0)
8271                 tp->tg3_flags2 |= TG3_FLG2_PCI_EXPRESS;
8272
8273         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
8274             tp->pci_lat_timer < 64) {
8275                 tp->pci_lat_timer = 64;
8276
8277                 cacheline_sz_reg  = ((tp->pci_cacheline_sz & 0xff) <<  0);
8278                 cacheline_sz_reg |= ((tp->pci_lat_timer    & 0xff) <<  8);
8279                 cacheline_sz_reg |= ((tp->pci_hdr_type     & 0xff) << 16);
8280                 cacheline_sz_reg |= ((tp->pci_bist         & 0xff) << 24);
8281
8282                 pci_write_config_dword(tp->pdev, TG3PCI_CACHELINESZ,
8283                                        cacheline_sz_reg);
8284         }
8285
8286         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
8287                               &pci_state_reg);
8288
8289         if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0) {
8290                 tp->tg3_flags |= TG3_FLAG_PCIX_MODE;
8291
8292                 /* If this is a 5700 BX chipset, and we are in PCI-X
8293                  * mode, enable register write workaround.
8294                  *
8295                  * The workaround is to use indirect register accesses
8296                  * for all chip writes not to mailbox registers.
8297                  */
8298                 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX) {
8299                         u32 pm_reg;
8300                         u16 pci_cmd;
8301
8302                         tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
8303
8304                         /* The chip can have it's power management PCI config
8305                          * space registers clobbered due to this bug.
8306                          * So explicitly force the chip into D0 here.
8307                          */
8308                         pci_read_config_dword(tp->pdev, TG3PCI_PM_CTRL_STAT,
8309                                               &pm_reg);
8310                         pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
8311                         pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
8312                         pci_write_config_dword(tp->pdev, TG3PCI_PM_CTRL_STAT,
8313                                                pm_reg);
8314
8315                         /* Also, force SERR#/PERR# in PCI command. */
8316                         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
8317                         pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
8318                         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
8319                 }
8320         }
8321
8322         /* Back to back register writes can cause problems on this chip,
8323          * the workaround is to read back all reg writes except those to
8324          * mailbox regs.  See tg3_write_indirect_reg32().
8325          *
8326          * PCI Express 5750_A0 rev chips need this workaround too.
8327          */
8328         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
8329             ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
8330              tp->pci_chip_rev_id == CHIPREV_ID_5750_A0))
8331                 tp->tg3_flags |= TG3_FLAG_5701_REG_WRITE_BUG;
8332
8333         if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
8334                 tp->tg3_flags |= TG3_FLAG_PCI_HIGH_SPEED;
8335         if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
8336                 tp->tg3_flags |= TG3_FLAG_PCI_32BIT;
8337
8338         /* Chip-specific fixup from Broadcom driver */
8339         if ((tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) &&
8340             (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
8341                 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
8342                 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
8343         }
8344
8345         /* Get eeprom hw config before calling tg3_set_power_state().
8346          * In particular, the TG3_FLAG_EEPROM_WRITE_PROT flag must be
8347          * determined before calling tg3_set_power_state() so that
8348          * we know whether or not to switch out of Vaux power.
8349          * When the flag is set, it means that GPIO1 is used for eeprom
8350          * write protect and also implies that it is a LOM where GPIOs
8351          * are not used to switch power.
8352          */ 
8353         tg3_get_eeprom_hw_cfg(tp);
8354
8355         /* Set up tp->grc_local_ctrl before calling tg3_set_power_state().
8356          * GPIO1 driven high will bring 5700's external PHY out of reset.
8357          * It is also used as eeprom write protect on LOMs.
8358          */
8359         tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
8360         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
8361             (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT))
8362                 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
8363                                        GRC_LCLCTRL_GPIO_OUTPUT1);
8364         /* Unused GPIO3 must be driven as output on 5752 because there
8365          * are no pull-up resistors on unused GPIO pins.
8366          */
8367         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
8368                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
8369
8370         /* Force the chip into D0. */
8371         err = tg3_set_power_state(tp, 0);
8372         if (err) {
8373                 printk(KERN_ERR PFX "(%s) transition to D0 failed\n",
8374                        pci_name(tp->pdev));
8375                 return err;
8376         }
8377
8378         /* 5700 B0 chips do not support checksumming correctly due
8379          * to hardware bugs.
8380          */
8381         if (tp->pci_chip_rev_id == CHIPREV_ID_5700_B0)
8382                 tp->tg3_flags |= TG3_FLAG_BROKEN_CHECKSUMS;
8383
8384         /* Pseudo-header checksum is done by hardware logic and not
8385          * the offload processers, so make the chip do the pseudo-
8386          * header checksums on receive.  For transmit it is more
8387          * convenient to do the pseudo-header checksum in software
8388          * as Linux does that on transmit for us in all cases.
8389          */
8390         tp->tg3_flags |= TG3_FLAG_NO_TX_PSEUDO_CSUM;
8391         tp->tg3_flags &= ~TG3_FLAG_NO_RX_PSEUDO_CSUM;
8392
8393         /* Derive initial jumbo mode from MTU assigned in
8394          * ether_setup() via the alloc_etherdev() call
8395          */
8396         if (tp->dev->mtu > ETH_DATA_LEN)
8397                 tp->tg3_flags |= TG3_FLAG_JUMBO_ENABLE;
8398
8399         /* Determine WakeOnLan speed to use. */
8400         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
8401             tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
8402             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0 ||
8403             tp->pci_chip_rev_id == CHIPREV_ID_5701_B2) {
8404                 tp->tg3_flags &= ~(TG3_FLAG_WOL_SPEED_100MB);
8405         } else {
8406                 tp->tg3_flags |= TG3_FLAG_WOL_SPEED_100MB;
8407         }
8408
8409         /* A few boards don't want Ethernet@WireSpeed phy feature */
8410         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
8411             ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
8412              (tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) &&
8413              (tp->pci_chip_rev_id != CHIPREV_ID_5705_A1)))
8414                 tp->tg3_flags2 |= TG3_FLG2_NO_ETH_WIRE_SPEED;
8415
8416         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5703_AX ||
8417             GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_AX)
8418                 tp->tg3_flags2 |= TG3_FLG2_PHY_ADC_BUG;
8419         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0)
8420                 tp->tg3_flags2 |= TG3_FLG2_PHY_5704_A0_BUG;
8421
8422         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
8423                 tp->tg3_flags2 |= TG3_FLG2_PHY_BER_BUG;
8424
8425         /* Only 5701 and later support tagged irq status mode.
8426          * Also, 5788 chips cannot use tagged irq status.
8427          *
8428          * However, since we are using NAPI avoid tagged irq status
8429          * because the interrupt condition is more difficult to
8430          * fully clear in that mode.
8431          */
8432         tp->coalesce_mode = 0;
8433
8434         if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX &&
8435             GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX)
8436                 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
8437
8438         /* Initialize MAC MI mode, polling disabled. */
8439         tw32_f(MAC_MI_MODE, tp->mi_mode);
8440         udelay(80);
8441
8442         /* Initialize data/descriptor byte/word swapping. */
8443         val = tr32(GRC_MODE);
8444         val &= GRC_MODE_HOST_STACKUP;
8445         tw32(GRC_MODE, val | tp->grc_mode);
8446
8447         tg3_switch_clocks(tp);
8448
8449         /* Clear this out for sanity. */
8450         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
8451
8452         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
8453                               &pci_state_reg);
8454         if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
8455             (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) == 0) {
8456                 u32 chiprevid = GET_CHIP_REV_ID(tp->misc_host_ctrl);
8457
8458                 if (chiprevid == CHIPREV_ID_5701_A0 ||
8459                     chiprevid == CHIPREV_ID_5701_B0 ||
8460                     chiprevid == CHIPREV_ID_5701_B2 ||
8461                     chiprevid == CHIPREV_ID_5701_B5) {
8462                         void __iomem *sram_base;
8463
8464                         /* Write some dummy words into the SRAM status block
8465                          * area, see if it reads back correctly.  If the return
8466                          * value is bad, force enable the PCIX workaround.
8467                          */
8468                         sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
8469
8470                         writel(0x00000000, sram_base);
8471                         writel(0x00000000, sram_base + 4);
8472                         writel(0xffffffff, sram_base + 4);
8473                         if (readl(sram_base) != 0x00000000)
8474                                 tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
8475                 }
8476         }
8477
8478         udelay(50);
8479         tg3_nvram_init(tp);
8480
8481         grc_misc_cfg = tr32(GRC_MISC_CFG);
8482         grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
8483
8484         /* Broadcom's driver says that CIOBE multisplit has a bug */
8485 #if 0
8486         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
8487             grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5704CIOBE) {
8488                 tp->tg3_flags |= TG3_FLAG_SPLIT_MODE;
8489                 tp->split_mode_max_reqs = SPLIT_MODE_5704_MAX_REQ;
8490         }
8491 #endif
8492         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
8493             (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
8494              grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
8495                 tp->tg3_flags2 |= TG3_FLG2_IS_5788;
8496
8497         /* these are limited to 10/100 only */
8498         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
8499              (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
8500             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
8501              tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
8502              (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901 ||
8503               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901_2 ||
8504               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5705F)) ||
8505             (tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
8506              (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5751F ||
8507               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5753F)))
8508                 tp->tg3_flags |= TG3_FLAG_10_100_ONLY;
8509
8510         err = tg3_phy_probe(tp);
8511         if (err) {
8512                 printk(KERN_ERR PFX "(%s) phy probe failed, err %d\n",
8513                        pci_name(tp->pdev), err);
8514                 /* ... but do not return immediately ... */
8515         }
8516
8517         tg3_read_partno(tp);
8518
8519         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
8520                 tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
8521         } else {
8522                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
8523                         tp->tg3_flags |= TG3_FLAG_USE_MI_INTERRUPT;
8524                 else
8525                         tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
8526         }
8527
8528         /* 5700 {AX,BX} chips have a broken status block link
8529          * change bit implementation, so we must use the
8530          * status register in those cases.
8531          */
8532         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
8533                 tp->tg3_flags |= TG3_FLAG_USE_LINKCHG_REG;
8534         else
8535                 tp->tg3_flags &= ~TG3_FLAG_USE_LINKCHG_REG;
8536
8537         /* The led_ctrl is set during tg3_phy_probe, here we might
8538          * have to force the link status polling mechanism based
8539          * upon subsystem IDs.
8540          */
8541         if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
8542             !(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
8543                 tp->tg3_flags |= (TG3_FLAG_USE_MI_INTERRUPT |
8544                                   TG3_FLAG_USE_LINKCHG_REG);
8545         }
8546
8547         /* For all SERDES we poll the MAC status register. */
8548         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
8549                 tp->tg3_flags |= TG3_FLAG_POLL_SERDES;
8550         else
8551                 tp->tg3_flags &= ~TG3_FLAG_POLL_SERDES;
8552
8553         /* 5700 BX chips need to have their TX producer index mailboxes
8554          * written twice to workaround a bug.
8555          */
8556         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX)
8557                 tp->tg3_flags |= TG3_FLAG_TXD_MBOX_HWBUG;
8558         else
8559                 tp->tg3_flags &= ~TG3_FLAG_TXD_MBOX_HWBUG;
8560
8561         /* It seems all chips can get confused if TX buffers
8562          * straddle the 4GB address boundary in some cases.
8563          */
8564         tp->dev->hard_start_xmit = tg3_start_xmit;
8565
8566         tp->rx_offset = 2;
8567         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
8568             (tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0)
8569                 tp->rx_offset = 0;
8570
8571         /* By default, disable wake-on-lan.  User can change this
8572          * using ETHTOOL_SWOL.
8573          */
8574         tp->tg3_flags &= ~TG3_FLAG_WOL_ENABLE;
8575
8576         return err;
8577 }
8578
8579 #ifdef CONFIG_SPARC64
8580 static int __devinit tg3_get_macaddr_sparc(struct tg3 *tp)
8581 {
8582         struct net_device *dev = tp->dev;
8583         struct pci_dev *pdev = tp->pdev;
8584         struct pcidev_cookie *pcp = pdev->sysdata;
8585
8586         if (pcp != NULL) {
8587                 int node = pcp->prom_node;
8588
8589                 if (prom_getproplen(node, "local-mac-address") == 6) {
8590                         prom_getproperty(node, "local-mac-address",
8591                                          dev->dev_addr, 6);
8592                         return 0;
8593                 }
8594         }
8595         return -ENODEV;
8596 }
8597
8598 static int __devinit tg3_get_default_macaddr_sparc(struct tg3 *tp)
8599 {
8600         struct net_device *dev = tp->dev;
8601
8602         memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
8603         return 0;
8604 }
8605 #endif
8606
8607 static int __devinit tg3_get_device_address(struct tg3 *tp)
8608 {
8609         struct net_device *dev = tp->dev;
8610         u32 hi, lo, mac_offset;
8611
8612 #ifdef CONFIG_SPARC64
8613         if (!tg3_get_macaddr_sparc(tp))
8614                 return 0;
8615 #endif
8616
8617         mac_offset = 0x7c;
8618         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
8619             !(tp->tg3_flags & TG3_FLG2_SUN_570X)) {
8620                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
8621                         mac_offset = 0xcc;
8622                 if (tg3_nvram_lock(tp))
8623                         tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
8624                 else
8625                         tg3_nvram_unlock(tp);
8626         }
8627
8628         /* First try to get it from MAC address mailbox. */
8629         tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
8630         if ((hi >> 16) == 0x484b) {
8631                 dev->dev_addr[0] = (hi >>  8) & 0xff;
8632                 dev->dev_addr[1] = (hi >>  0) & 0xff;
8633
8634                 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
8635                 dev->dev_addr[2] = (lo >> 24) & 0xff;
8636                 dev->dev_addr[3] = (lo >> 16) & 0xff;
8637                 dev->dev_addr[4] = (lo >>  8) & 0xff;
8638                 dev->dev_addr[5] = (lo >>  0) & 0xff;
8639         }
8640         /* Next, try NVRAM. */
8641         else if (!(tp->tg3_flags & TG3_FLG2_SUN_570X) &&
8642                  !tg3_nvram_read(tp, mac_offset + 0, &hi) &&
8643                  !tg3_nvram_read(tp, mac_offset + 4, &lo)) {
8644                 dev->dev_addr[0] = ((hi >> 16) & 0xff);
8645                 dev->dev_addr[1] = ((hi >> 24) & 0xff);
8646                 dev->dev_addr[2] = ((lo >>  0) & 0xff);
8647                 dev->dev_addr[3] = ((lo >>  8) & 0xff);
8648                 dev->dev_addr[4] = ((lo >> 16) & 0xff);
8649                 dev->dev_addr[5] = ((lo >> 24) & 0xff);
8650         }
8651         /* Finally just fetch it out of the MAC control regs. */
8652         else {
8653                 hi = tr32(MAC_ADDR_0_HIGH);
8654                 lo = tr32(MAC_ADDR_0_LOW);
8655
8656                 dev->dev_addr[5] = lo & 0xff;
8657                 dev->dev_addr[4] = (lo >> 8) & 0xff;
8658                 dev->dev_addr[3] = (lo >> 16) & 0xff;
8659                 dev->dev_addr[2] = (lo >> 24) & 0xff;
8660                 dev->dev_addr[1] = hi & 0xff;
8661                 dev->dev_addr[0] = (hi >> 8) & 0xff;
8662         }
8663
8664         if (!is_valid_ether_addr(&dev->dev_addr[0])) {
8665 #ifdef CONFIG_SPARC64
8666                 if (!tg3_get_default_macaddr_sparc(tp))
8667                         return 0;
8668 #endif
8669                 return -EINVAL;
8670         }
8671         return 0;
8672 }
8673
8674 static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma, int size, int to_device)
8675 {
8676         struct tg3_internal_buffer_desc test_desc;
8677         u32 sram_dma_descs;
8678         int i, ret;
8679
8680         sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
8681
8682         tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
8683         tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
8684         tw32(RDMAC_STATUS, 0);
8685         tw32(WDMAC_STATUS, 0);
8686
8687         tw32(BUFMGR_MODE, 0);
8688         tw32(FTQ_RESET, 0);
8689
8690         test_desc.addr_hi = ((u64) buf_dma) >> 32;
8691         test_desc.addr_lo = buf_dma & 0xffffffff;
8692         test_desc.nic_mbuf = 0x00002100;
8693         test_desc.len = size;
8694
8695         /*
8696          * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
8697          * the *second* time the tg3 driver was getting loaded after an
8698          * initial scan.
8699          *
8700          * Broadcom tells me:
8701          *   ...the DMA engine is connected to the GRC block and a DMA
8702          *   reset may affect the GRC block in some unpredictable way...
8703          *   The behavior of resets to individual blocks has not been tested.
8704          *
8705          * Broadcom noted the GRC reset will also reset all sub-components.
8706          */
8707         if (to_device) {
8708                 test_desc.cqid_sqid = (13 << 8) | 2;
8709
8710                 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
8711                 udelay(40);
8712         } else {
8713                 test_desc.cqid_sqid = (16 << 8) | 7;
8714
8715                 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
8716                 udelay(40);
8717         }
8718         test_desc.flags = 0x00000005;
8719
8720         for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
8721                 u32 val;
8722
8723                 val = *(((u32 *)&test_desc) + i);
8724                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
8725                                        sram_dma_descs + (i * sizeof(u32)));
8726                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
8727         }
8728         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
8729
8730         if (to_device) {
8731                 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
8732         } else {
8733                 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
8734         }
8735
8736         ret = -ENODEV;
8737         for (i = 0; i < 40; i++) {
8738                 u32 val;
8739
8740                 if (to_device)
8741                         val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
8742                 else
8743                         val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
8744                 if ((val & 0xffff) == sram_dma_descs) {
8745                         ret = 0;
8746                         break;
8747                 }
8748
8749                 udelay(100);
8750         }
8751
8752         return ret;
8753 }
8754
8755 #define TEST_BUFFER_SIZE        0x400
8756
8757 static int __devinit tg3_test_dma(struct tg3 *tp)
8758 {
8759         dma_addr_t buf_dma;
8760         u32 *buf;
8761         int ret;
8762
8763         buf = pci_alloc_consistent(tp->pdev, TEST_BUFFER_SIZE, &buf_dma);
8764         if (!buf) {
8765                 ret = -ENOMEM;
8766                 goto out_nofree;
8767         }
8768
8769         tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
8770                           (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
8771
8772 #ifndef CONFIG_X86
8773         {
8774                 u8 byte;
8775                 int cacheline_size;
8776                 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
8777
8778                 if (byte == 0)
8779                         cacheline_size = 1024;
8780                 else
8781                         cacheline_size = (int) byte * 4;
8782
8783                 switch (cacheline_size) {
8784                 case 16:
8785                 case 32:
8786                 case 64:
8787                 case 128:
8788                         if ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
8789                             !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
8790                                 tp->dma_rwctrl |=
8791                                         DMA_RWCTRL_WRITE_BNDRY_384_PCIX;
8792                                 break;
8793                         } else if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
8794                                 tp->dma_rwctrl &=
8795                                         ~(DMA_RWCTRL_PCI_WRITE_CMD);
8796                                 tp->dma_rwctrl |=
8797                                         DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
8798                                 break;
8799                         }
8800                         /* fallthrough */
8801                 case 256:
8802                         if (!(tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
8803                             !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
8804                                 tp->dma_rwctrl |=
8805                                         DMA_RWCTRL_WRITE_BNDRY_256;
8806                         else if (!(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
8807                                 tp->dma_rwctrl |=
8808                                         DMA_RWCTRL_WRITE_BNDRY_256_PCIX;
8809                 };
8810         }
8811 #endif
8812
8813         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
8814                 /* DMA read watermark not used on PCIE */
8815                 tp->dma_rwctrl |= 0x00180000;
8816         } else if (!(tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
8817                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
8818                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
8819                         tp->dma_rwctrl |= 0x003f0000;
8820                 else
8821                         tp->dma_rwctrl |= 0x003f000f;
8822         } else {
8823                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
8824                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
8825                         u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
8826
8827                         if (ccval == 0x6 || ccval == 0x7)
8828                                 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
8829
8830                         /* Set bit 23 to renable PCIX hw bug fix */
8831                         tp->dma_rwctrl |= 0x009f0000;
8832                 } else {
8833                         tp->dma_rwctrl |= 0x001b000f;
8834                 }
8835         }
8836
8837         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
8838             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
8839                 tp->dma_rwctrl &= 0xfffffff0;
8840
8841         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
8842             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
8843                 /* Remove this if it causes problems for some boards. */
8844                 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
8845
8846                 /* On 5700/5701 chips, we need to set this bit.
8847                  * Otherwise the chip will issue cacheline transactions
8848                  * to streamable DMA memory with not all the byte
8849                  * enables turned on.  This is an error on several
8850                  * RISC PCI controllers, in particular sparc64.
8851                  *
8852                  * On 5703/5704 chips, this bit has been reassigned
8853                  * a different meaning.  In particular, it is used
8854                  * on those chips to enable a PCI-X workaround.
8855                  */
8856                 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
8857         }
8858
8859         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
8860
8861 #if 0
8862         /* Unneeded, already done by tg3_get_invariants.  */
8863         tg3_switch_clocks(tp);
8864 #endif
8865
8866         ret = 0;
8867         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
8868             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
8869                 goto out;
8870
8871         while (1) {
8872                 u32 *p = buf, i;
8873
8874                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
8875                         p[i] = i;
8876
8877                 /* Send the buffer to the chip. */
8878                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1);
8879                 if (ret) {
8880                         printk(KERN_ERR "tg3_test_dma() Write the buffer failed %d\n", ret);
8881                         break;
8882                 }
8883
8884 #if 0
8885                 /* validate data reached card RAM correctly. */
8886                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
8887                         u32 val;
8888                         tg3_read_mem(tp, 0x2100 + (i*4), &val);
8889                         if (le32_to_cpu(val) != p[i]) {
8890                                 printk(KERN_ERR "  tg3_test_dma()  Card buffer corrupted on write! (%d != %d)\n", val, i);
8891                                 /* ret = -ENODEV here? */
8892                         }
8893                         p[i] = 0;
8894                 }
8895 #endif
8896                 /* Now read it back. */
8897                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0);
8898                 if (ret) {
8899                         printk(KERN_ERR "tg3_test_dma() Read the buffer failed %d\n", ret);
8900
8901                         break;
8902                 }
8903
8904                 /* Verify it. */
8905                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
8906                         if (p[i] == i)
8907                                 continue;
8908
8909                         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) ==
8910                             DMA_RWCTRL_WRITE_BNDRY_DISAB) {
8911                                 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
8912                                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
8913                                 break;
8914                         } else {
8915                                 printk(KERN_ERR "tg3_test_dma() buffer corrupted on read back! (%d != %d)\n", p[i], i);
8916                                 ret = -ENODEV;
8917                                 goto out;
8918                         }
8919                 }
8920
8921                 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
8922                         /* Success. */
8923                         ret = 0;
8924                         break;
8925                 }
8926         }
8927
8928 out:
8929         pci_free_consistent(tp->pdev, TEST_BUFFER_SIZE, buf, buf_dma);
8930 out_nofree:
8931         return ret;
8932 }
8933
8934 static void __devinit tg3_init_link_config(struct tg3 *tp)
8935 {
8936         tp->link_config.advertising =
8937                 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
8938                  ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
8939                  ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full |
8940                  ADVERTISED_Autoneg | ADVERTISED_MII);
8941         tp->link_config.speed = SPEED_INVALID;
8942         tp->link_config.duplex = DUPLEX_INVALID;
8943         tp->link_config.autoneg = AUTONEG_ENABLE;
8944         netif_carrier_off(tp->dev);
8945         tp->link_config.active_speed = SPEED_INVALID;
8946         tp->link_config.active_duplex = DUPLEX_INVALID;
8947         tp->link_config.phy_is_low_power = 0;
8948         tp->link_config.orig_speed = SPEED_INVALID;
8949         tp->link_config.orig_duplex = DUPLEX_INVALID;
8950         tp->link_config.orig_autoneg = AUTONEG_INVALID;
8951 }
8952
8953 static void __devinit tg3_init_bufmgr_config(struct tg3 *tp)
8954 {
8955         tp->bufmgr_config.mbuf_read_dma_low_water =
8956                 DEFAULT_MB_RDMA_LOW_WATER;
8957         tp->bufmgr_config.mbuf_mac_rx_low_water =
8958                 DEFAULT_MB_MACRX_LOW_WATER;
8959         tp->bufmgr_config.mbuf_high_water =
8960                 DEFAULT_MB_HIGH_WATER;
8961
8962         tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
8963                 DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
8964         tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
8965                 DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
8966         tp->bufmgr_config.mbuf_high_water_jumbo =
8967                 DEFAULT_MB_HIGH_WATER_JUMBO;
8968
8969         tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
8970         tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
8971 }
8972
8973 static char * __devinit tg3_phy_string(struct tg3 *tp)
8974 {
8975         switch (tp->phy_id & PHY_ID_MASK) {
8976         case PHY_ID_BCM5400:    return "5400";
8977         case PHY_ID_BCM5401:    return "5401";
8978         case PHY_ID_BCM5411:    return "5411";
8979         case PHY_ID_BCM5701:    return "5701";
8980         case PHY_ID_BCM5703:    return "5703";
8981         case PHY_ID_BCM5704:    return "5704";
8982         case PHY_ID_BCM5705:    return "5705";
8983         case PHY_ID_BCM5750:    return "5750";
8984         case PHY_ID_BCM5752:    return "5752";
8985         case PHY_ID_BCM8002:    return "8002/serdes";
8986         case 0:                 return "serdes";
8987         default:                return "unknown";
8988         };
8989 }
8990
8991 static struct pci_dev * __devinit tg3_find_5704_peer(struct tg3 *tp)
8992 {
8993         struct pci_dev *peer;
8994         unsigned int func, devnr = tp->pdev->devfn & ~7;
8995
8996         for (func = 0; func < 8; func++) {
8997                 peer = pci_get_slot(tp->pdev->bus, devnr | func);
8998                 if (peer && peer != tp->pdev)
8999                         break;
9000                 pci_dev_put(peer);
9001         }
9002         if (!peer || peer == tp->pdev)
9003                 BUG();
9004
9005         /*
9006          * We don't need to keep the refcount elevated; there's no way
9007          * to remove one half of this device without removing the other
9008          */
9009         pci_dev_put(peer);
9010
9011         return peer;
9012 }
9013
9014 static int __devinit tg3_init_one(struct pci_dev *pdev,
9015                                   const struct pci_device_id *ent)
9016 {
9017         static int tg3_version_printed = 0;
9018         unsigned long tg3reg_base, tg3reg_len;
9019         struct net_device *dev;
9020         struct tg3 *tp;
9021         int i, err, pci_using_dac, pm_cap;
9022
9023         if (tg3_version_printed++ == 0)
9024                 printk(KERN_INFO "%s", version);
9025
9026         err = pci_enable_device(pdev);
9027         if (err) {
9028                 printk(KERN_ERR PFX "Cannot enable PCI device, "
9029                        "aborting.\n");
9030                 return err;
9031         }
9032
9033         if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
9034                 printk(KERN_ERR PFX "Cannot find proper PCI device "
9035                        "base address, aborting.\n");
9036                 err = -ENODEV;
9037                 goto err_out_disable_pdev;
9038         }
9039
9040         err = pci_request_regions(pdev, DRV_MODULE_NAME);
9041         if (err) {
9042                 printk(KERN_ERR PFX "Cannot obtain PCI resources, "
9043                        "aborting.\n");
9044                 goto err_out_disable_pdev;
9045         }
9046
9047         pci_set_master(pdev);
9048
9049         /* Find power-management capability. */
9050         pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
9051         if (pm_cap == 0) {
9052                 printk(KERN_ERR PFX "Cannot find PowerManagement capability, "
9053                        "aborting.\n");
9054                 err = -EIO;
9055                 goto err_out_free_res;
9056         }
9057
9058         /* Configure DMA attributes. */
9059         err = pci_set_dma_mask(pdev, 0xffffffffffffffffULL);
9060         if (!err) {
9061                 pci_using_dac = 1;
9062                 err = pci_set_consistent_dma_mask(pdev, 0xffffffffffffffffULL);
9063                 if (err < 0) {
9064                         printk(KERN_ERR PFX "Unable to obtain 64 bit DMA "
9065                                "for consistent allocations\n");
9066                         goto err_out_free_res;
9067                 }
9068         } else {
9069                 err = pci_set_dma_mask(pdev, 0xffffffffULL);
9070                 if (err) {
9071                         printk(KERN_ERR PFX "No usable DMA configuration, "
9072                                "aborting.\n");
9073                         goto err_out_free_res;
9074                 }
9075                 pci_using_dac = 0;
9076         }
9077
9078         tg3reg_base = pci_resource_start(pdev, 0);
9079         tg3reg_len = pci_resource_len(pdev, 0);
9080
9081         dev = alloc_etherdev(sizeof(*tp));
9082         if (!dev) {
9083                 printk(KERN_ERR PFX "Etherdev alloc failed, aborting.\n");
9084                 err = -ENOMEM;
9085                 goto err_out_free_res;
9086         }
9087
9088         SET_MODULE_OWNER(dev);
9089         SET_NETDEV_DEV(dev, &pdev->dev);
9090
9091         if (pci_using_dac)
9092                 dev->features |= NETIF_F_HIGHDMA;
9093         dev->features |= NETIF_F_LLTX;
9094 #if TG3_VLAN_TAG_USED
9095         dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
9096         dev->vlan_rx_register = tg3_vlan_rx_register;
9097         dev->vlan_rx_kill_vid = tg3_vlan_rx_kill_vid;
9098 #endif
9099
9100         tp = netdev_priv(dev);
9101         tp->pdev = pdev;
9102         tp->dev = dev;
9103         tp->pm_cap = pm_cap;
9104         tp->mac_mode = TG3_DEF_MAC_MODE;
9105         tp->rx_mode = TG3_DEF_RX_MODE;
9106         tp->tx_mode = TG3_DEF_TX_MODE;
9107         tp->mi_mode = MAC_MI_MODE_BASE;
9108         if (tg3_debug > 0)
9109                 tp->msg_enable = tg3_debug;
9110         else
9111                 tp->msg_enable = TG3_DEF_MSG_ENABLE;
9112
9113         /* The word/byte swap controls here control register access byte
9114          * swapping.  DMA data byte swapping is controlled in the GRC_MODE
9115          * setting below.
9116          */
9117         tp->misc_host_ctrl =
9118                 MISC_HOST_CTRL_MASK_PCI_INT |
9119                 MISC_HOST_CTRL_WORD_SWAP |
9120                 MISC_HOST_CTRL_INDIR_ACCESS |
9121                 MISC_HOST_CTRL_PCISTATE_RW;
9122
9123         /* The NONFRM (non-frame) byte/word swap controls take effect
9124          * on descriptor entries, anything which isn't packet data.
9125          *
9126          * The StrongARM chips on the board (one for tx, one for rx)
9127          * are running in big-endian mode.
9128          */
9129         tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
9130                         GRC_MODE_WSWAP_NONFRM_DATA);
9131 #ifdef __BIG_ENDIAN
9132         tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
9133 #endif
9134         spin_lock_init(&tp->lock);
9135         spin_lock_init(&tp->tx_lock);
9136         spin_lock_init(&tp->indirect_lock);
9137         INIT_WORK(&tp->reset_task, tg3_reset_task, tp);
9138
9139         tp->regs = ioremap_nocache(tg3reg_base, tg3reg_len);
9140         if (tp->regs == 0UL) {
9141                 printk(KERN_ERR PFX "Cannot map device registers, "
9142                        "aborting.\n");
9143                 err = -ENOMEM;
9144                 goto err_out_free_dev;
9145         }
9146
9147         tg3_init_link_config(tp);
9148
9149         tg3_init_bufmgr_config(tp);
9150
9151         tp->rx_pending = TG3_DEF_RX_RING_PENDING;
9152         tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
9153         tp->tx_pending = TG3_DEF_TX_RING_PENDING;
9154
9155         dev->open = tg3_open;
9156         dev->stop = tg3_close;
9157         dev->get_stats = tg3_get_stats;
9158         dev->set_multicast_list = tg3_set_rx_mode;
9159         dev->set_mac_address = tg3_set_mac_addr;
9160         dev->do_ioctl = tg3_ioctl;
9161         dev->tx_timeout = tg3_tx_timeout;
9162         dev->poll = tg3_poll;
9163         dev->ethtool_ops = &tg3_ethtool_ops;
9164         dev->weight = 64;
9165         dev->watchdog_timeo = TG3_TX_TIMEOUT;
9166         dev->change_mtu = tg3_change_mtu;
9167         dev->irq = pdev->irq;
9168 #ifdef CONFIG_NET_POLL_CONTROLLER
9169         dev->poll_controller = tg3_poll_controller;
9170 #endif
9171
9172         err = tg3_get_invariants(tp);
9173         if (err) {
9174                 printk(KERN_ERR PFX "Problem fetching invariants of chip, "
9175                        "aborting.\n");
9176                 goto err_out_iounmap;
9177         }
9178
9179         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
9180                 tp->bufmgr_config.mbuf_read_dma_low_water =
9181                         DEFAULT_MB_RDMA_LOW_WATER_5705;
9182                 tp->bufmgr_config.mbuf_mac_rx_low_water =
9183                         DEFAULT_MB_MACRX_LOW_WATER_5705;
9184                 tp->bufmgr_config.mbuf_high_water =
9185                         DEFAULT_MB_HIGH_WATER_5705;
9186         }
9187
9188 #if TG3_TSO_SUPPORT != 0
9189         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
9190                 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
9191         }
9192         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
9193             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
9194             tp->pci_chip_rev_id == CHIPREV_ID_5705_A0 ||
9195             (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0) {
9196                 tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
9197         } else {
9198                 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
9199         }
9200
9201         /* TSO is off by default, user can enable using ethtool.  */
9202 #if 0
9203         if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)
9204                 dev->features |= NETIF_F_TSO;
9205 #endif
9206
9207 #endif
9208
9209         if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 &&
9210             !(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) &&
9211             !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
9212                 tp->tg3_flags2 |= TG3_FLG2_MAX_RXPEND_64;
9213                 tp->rx_pending = 63;
9214         }
9215
9216         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
9217                 tp->pdev_peer = tg3_find_5704_peer(tp);
9218
9219         err = tg3_get_device_address(tp);
9220         if (err) {
9221                 printk(KERN_ERR PFX "Could not obtain valid ethernet address, "
9222                        "aborting.\n");
9223                 goto err_out_iounmap;
9224         }
9225
9226         /*
9227          * Reset chip in case UNDI or EFI driver did not shutdown
9228          * DMA self test will enable WDMAC and we'll see (spurious)
9229          * pending DMA on the PCI bus at that point.
9230          */
9231         if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
9232             (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
9233                 pci_save_state(tp->pdev);
9234                 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
9235                 tg3_halt(tp, 1);
9236         }
9237
9238         err = tg3_test_dma(tp);
9239         if (err) {
9240                 printk(KERN_ERR PFX "DMA engine test failed, aborting.\n");
9241                 goto err_out_iounmap;
9242         }
9243
9244         /* Tigon3 can do ipv4 only... and some chips have buggy
9245          * checksumming.
9246          */
9247         if ((tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) == 0) {
9248                 dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM;
9249                 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
9250         } else
9251                 tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
9252
9253         if (tp->tg3_flags2 & TG3_FLG2_IS_5788)
9254                 dev->features &= ~NETIF_F_HIGHDMA;
9255
9256         /* flow control autonegotiation is default behavior */
9257         tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
9258
9259         err = register_netdev(dev);
9260         if (err) {
9261                 printk(KERN_ERR PFX "Cannot register net device, "
9262                        "aborting.\n");
9263                 goto err_out_iounmap;
9264         }
9265
9266         pci_set_drvdata(pdev, dev);
9267
9268         /* Now that we have fully setup the chip, save away a snapshot
9269          * of the PCI config space.  We need to restore this after
9270          * GRC_MISC_CFG core clock resets and some resume events.
9271          */
9272         pci_save_state(tp->pdev);
9273
9274         printk(KERN_INFO "%s: Tigon3 [partno(%s) rev %04x PHY(%s)] (PCI%s:%s:%s) %sBaseT Ethernet ",
9275                dev->name,
9276                tp->board_part_number,
9277                tp->pci_chip_rev_id,
9278                tg3_phy_string(tp),
9279                ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ? "X" : ""),
9280                ((tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED) ?
9281                 ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ? "133MHz" : "66MHz") :
9282                 ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ? "100MHz" : "33MHz")),
9283                ((tp->tg3_flags & TG3_FLAG_PCI_32BIT) ? "32-bit" : "64-bit"),
9284                (tp->tg3_flags & TG3_FLAG_10_100_ONLY) ? "10/100" : "10/100/1000");
9285
9286         for (i = 0; i < 6; i++)
9287                 printk("%2.2x%c", dev->dev_addr[i],
9288                        i == 5 ? '\n' : ':');
9289
9290         printk(KERN_INFO "%s: RXcsums[%d] LinkChgREG[%d] "
9291                "MIirq[%d] ASF[%d] Split[%d] WireSpeed[%d] "
9292                "TSOcap[%d] \n",
9293                dev->name,
9294                (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0,
9295                (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) != 0,
9296                (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) != 0,
9297                (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0,
9298                (tp->tg3_flags & TG3_FLAG_SPLIT_MODE) != 0,
9299                (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED) == 0,
9300                (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) != 0);
9301
9302         return 0;
9303
9304 err_out_iounmap:
9305         iounmap(tp->regs);
9306
9307 err_out_free_dev:
9308         free_netdev(dev);
9309
9310 err_out_free_res:
9311         pci_release_regions(pdev);
9312
9313 err_out_disable_pdev:
9314         pci_disable_device(pdev);
9315         pci_set_drvdata(pdev, NULL);
9316         return err;
9317 }
9318
9319 static void __devexit tg3_remove_one(struct pci_dev *pdev)
9320 {
9321         struct net_device *dev = pci_get_drvdata(pdev);
9322
9323         if (dev) {
9324                 struct tg3 *tp = netdev_priv(dev);
9325
9326                 unregister_netdev(dev);
9327                 iounmap(tp->regs);
9328                 free_netdev(dev);
9329                 pci_release_regions(pdev);
9330                 pci_disable_device(pdev);
9331                 pci_set_drvdata(pdev, NULL);
9332         }
9333 }
9334
9335 static int tg3_suspend(struct pci_dev *pdev, pm_message_t state)
9336 {
9337         struct net_device *dev = pci_get_drvdata(pdev);
9338         struct tg3 *tp = netdev_priv(dev);
9339         int err;
9340
9341         if (!netif_running(dev))
9342                 return 0;
9343
9344         tg3_netif_stop(tp);
9345
9346         del_timer_sync(&tp->timer);
9347
9348         spin_lock_irq(&tp->lock);
9349         spin_lock(&tp->tx_lock);
9350         tg3_disable_ints(tp);
9351         spin_unlock(&tp->tx_lock);
9352         spin_unlock_irq(&tp->lock);
9353
9354         netif_device_detach(dev);
9355
9356         spin_lock_irq(&tp->lock);
9357         spin_lock(&tp->tx_lock);
9358         tg3_halt(tp, 1);
9359         spin_unlock(&tp->tx_lock);
9360         spin_unlock_irq(&tp->lock);
9361
9362         err = tg3_set_power_state(tp, pci_choose_state(pdev, state));
9363         if (err) {
9364                 spin_lock_irq(&tp->lock);
9365                 spin_lock(&tp->tx_lock);
9366
9367                 tg3_init_hw(tp);
9368
9369                 tp->timer.expires = jiffies + tp->timer_offset;
9370                 add_timer(&tp->timer);
9371
9372                 netif_device_attach(dev);
9373                 tg3_netif_start(tp);
9374
9375                 spin_unlock(&tp->tx_lock);
9376                 spin_unlock_irq(&tp->lock);
9377         }
9378
9379         return err;
9380 }
9381
9382 static int tg3_resume(struct pci_dev *pdev)
9383 {
9384         struct net_device *dev = pci_get_drvdata(pdev);
9385         struct tg3 *tp = netdev_priv(dev);
9386         int err;
9387
9388         if (!netif_running(dev))
9389                 return 0;
9390
9391         pci_restore_state(tp->pdev);
9392
9393         err = tg3_set_power_state(tp, 0);
9394         if (err)
9395                 return err;
9396
9397         netif_device_attach(dev);
9398
9399         spin_lock_irq(&tp->lock);
9400         spin_lock(&tp->tx_lock);
9401
9402         tg3_init_hw(tp);
9403
9404         tp->timer.expires = jiffies + tp->timer_offset;
9405         add_timer(&tp->timer);
9406
9407         tg3_enable_ints(tp);
9408
9409         tg3_netif_start(tp);
9410
9411         spin_unlock(&tp->tx_lock);
9412         spin_unlock_irq(&tp->lock);
9413
9414         return 0;
9415 }
9416
9417 static struct pci_driver tg3_driver = {
9418         .name           = DRV_MODULE_NAME,
9419         .id_table       = tg3_pci_tbl,
9420         .probe          = tg3_init_one,
9421         .remove         = __devexit_p(tg3_remove_one),
9422         .suspend        = tg3_suspend,
9423         .resume         = tg3_resume
9424 };
9425
9426 static int __init tg3_init(void)
9427 {
9428         return pci_module_init(&tg3_driver);
9429 }
9430
9431 static void __exit tg3_cleanup(void)
9432 {
9433         pci_unregister_driver(&tg3_driver);
9434 }
9435
9436 module_init(tg3_init);
9437 module_exit(tg3_cleanup);