2 * tg3.c: Broadcom Tigon3 ethernet driver.
4 * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5 * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6 * Copyright (C) 2004 Sun Microsystems Inc.
7 * Copyright (C) 2005 Broadcom Corporation.
10 * Derived from proprietary unpublished source code,
11 * Copyright (C) 2000-2003 Broadcom Corporation.
13 * Permission is hereby granted for the distribution of this firmware
14 * data in hexadecimal or equivalent format, provided this copyright
15 * notice is accompanying it.
18 #include <linux/config.h>
20 #include <linux/module.h>
21 #include <linux/moduleparam.h>
22 #include <linux/kernel.h>
23 #include <linux/types.h>
24 #include <linux/compiler.h>
25 #include <linux/slab.h>
26 #include <linux/delay.h>
27 #include <linux/init.h>
28 #include <linux/ioport.h>
29 #include <linux/pci.h>
30 #include <linux/netdevice.h>
31 #include <linux/etherdevice.h>
32 #include <linux/skbuff.h>
33 #include <linux/ethtool.h>
34 #include <linux/mii.h>
35 #include <linux/if_vlan.h>
37 #include <linux/tcp.h>
38 #include <linux/workqueue.h>
40 #include <net/checksum.h>
42 #include <asm/system.h>
44 #include <asm/byteorder.h>
45 #include <asm/uaccess.h>
48 #include <asm/idprom.h>
49 #include <asm/oplib.h>
53 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
54 #define TG3_VLAN_TAG_USED 1
56 #define TG3_VLAN_TAG_USED 0
60 #define TG3_TSO_SUPPORT 1
62 #define TG3_TSO_SUPPORT 0
67 #define DRV_MODULE_NAME "tg3"
68 #define PFX DRV_MODULE_NAME ": "
69 #define DRV_MODULE_VERSION "3.33"
70 #define DRV_MODULE_RELDATE "July 5, 2005"
72 #define TG3_DEF_MAC_MODE 0
73 #define TG3_DEF_RX_MODE 0
74 #define TG3_DEF_TX_MODE 0
75 #define TG3_DEF_MSG_ENABLE \
85 /* length of time before we decide the hardware is borked,
86 * and dev->tx_timeout() should be called to fix the problem
88 #define TG3_TX_TIMEOUT (5 * HZ)
90 /* hardware minimum and maximum for a single frame's data payload */
91 #define TG3_MIN_MTU 60
92 #define TG3_MAX_MTU(tp) \
93 ((tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) ? 9000 : 1500)
95 /* These numbers seem to be hard coded in the NIC firmware somehow.
96 * You can't change the ring sizes, but you can change where you place
97 * them in the NIC onboard memory.
99 #define TG3_RX_RING_SIZE 512
100 #define TG3_DEF_RX_RING_PENDING 200
101 #define TG3_RX_JUMBO_RING_SIZE 256
102 #define TG3_DEF_RX_JUMBO_RING_PENDING 100
104 /* Do not place this n-ring entries value into the tp struct itself,
105 * we really want to expose these constants to GCC so that modulo et
106 * al. operations are done with shifts and masks instead of with
107 * hw multiply/modulo instructions. Another solution would be to
108 * replace things like '% foo' with '& (foo - 1)'.
110 #define TG3_RX_RCB_RING_SIZE(tp) \
111 ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ? 512 : 1024)
113 #define TG3_TX_RING_SIZE 512
114 #define TG3_DEF_TX_RING_PENDING (TG3_TX_RING_SIZE - 1)
116 #define TG3_RX_RING_BYTES (sizeof(struct tg3_rx_buffer_desc) * \
118 #define TG3_RX_JUMBO_RING_BYTES (sizeof(struct tg3_rx_buffer_desc) * \
119 TG3_RX_JUMBO_RING_SIZE)
120 #define TG3_RX_RCB_RING_BYTES(tp) (sizeof(struct tg3_rx_buffer_desc) * \
121 TG3_RX_RCB_RING_SIZE(tp))
122 #define TG3_TX_RING_BYTES (sizeof(struct tg3_tx_buffer_desc) * \
124 #define TX_RING_GAP(TP) \
125 (TG3_TX_RING_SIZE - (TP)->tx_pending)
126 #define TX_BUFFS_AVAIL(TP) \
127 (((TP)->tx_cons <= (TP)->tx_prod) ? \
128 (TP)->tx_cons + (TP)->tx_pending - (TP)->tx_prod : \
129 (TP)->tx_cons - (TP)->tx_prod - TX_RING_GAP(TP))
130 #define NEXT_TX(N) (((N) + 1) & (TG3_TX_RING_SIZE - 1))
132 #define RX_PKT_BUF_SZ (1536 + tp->rx_offset + 64)
133 #define RX_JUMBO_PKT_BUF_SZ (9046 + tp->rx_offset + 64)
135 /* minimum number of free TX descriptors required to wake up TX process */
136 #define TG3_TX_WAKEUP_THRESH (TG3_TX_RING_SIZE / 4)
138 /* number of ETHTOOL_GSTATS u64's */
139 #define TG3_NUM_STATS (sizeof(struct tg3_ethtool_stats)/sizeof(u64))
141 #define TG3_NUM_TEST 6
143 static char version[] __devinitdata =
144 DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
146 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
147 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
148 MODULE_LICENSE("GPL");
149 MODULE_VERSION(DRV_MODULE_VERSION);
151 static int tg3_debug = -1; /* -1 == use TG3_DEF_MSG_ENABLE as value */
152 module_param(tg3_debug, int, 0);
153 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
155 static struct pci_device_id tg3_pci_tbl[] = {
156 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700,
157 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
158 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701,
159 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
160 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702,
161 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
162 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703,
163 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
164 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704,
165 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
166 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE,
167 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
168 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705,
169 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
170 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2,
171 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
172 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M,
173 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
174 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2,
175 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
176 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X,
177 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
178 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X,
179 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
180 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S,
181 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
182 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3,
183 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
184 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3,
185 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
186 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782,
187 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
188 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788,
189 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
190 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789,
191 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
192 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901,
193 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
194 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2,
195 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
196 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2,
197 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
198 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F,
199 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
200 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5720,
201 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
202 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721,
203 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
204 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750,
205 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
206 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751,
207 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
208 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750M,
209 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
210 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M,
211 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
212 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F,
213 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
214 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752,
215 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
216 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M,
217 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
218 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753,
219 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
220 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M,
221 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
222 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F,
223 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
224 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780,
225 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
226 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S,
227 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
228 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781,
229 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
230 { PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX,
231 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
232 { PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX,
233 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
234 { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000,
235 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
236 { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001,
237 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
238 { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003,
239 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
240 { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100,
241 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
242 { PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3,
243 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
247 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
250 const char string[ETH_GSTRING_LEN];
251 } ethtool_stats_keys[TG3_NUM_STATS] = {
254 { "rx_ucast_packets" },
255 { "rx_mcast_packets" },
256 { "rx_bcast_packets" },
258 { "rx_align_errors" },
259 { "rx_xon_pause_rcvd" },
260 { "rx_xoff_pause_rcvd" },
261 { "rx_mac_ctrl_rcvd" },
262 { "rx_xoff_entered" },
263 { "rx_frame_too_long_errors" },
265 { "rx_undersize_packets" },
266 { "rx_in_length_errors" },
267 { "rx_out_length_errors" },
268 { "rx_64_or_less_octet_packets" },
269 { "rx_65_to_127_octet_packets" },
270 { "rx_128_to_255_octet_packets" },
271 { "rx_256_to_511_octet_packets" },
272 { "rx_512_to_1023_octet_packets" },
273 { "rx_1024_to_1522_octet_packets" },
274 { "rx_1523_to_2047_octet_packets" },
275 { "rx_2048_to_4095_octet_packets" },
276 { "rx_4096_to_8191_octet_packets" },
277 { "rx_8192_to_9022_octet_packets" },
284 { "tx_flow_control" },
286 { "tx_single_collisions" },
287 { "tx_mult_collisions" },
289 { "tx_excessive_collisions" },
290 { "tx_late_collisions" },
291 { "tx_collide_2times" },
292 { "tx_collide_3times" },
293 { "tx_collide_4times" },
294 { "tx_collide_5times" },
295 { "tx_collide_6times" },
296 { "tx_collide_7times" },
297 { "tx_collide_8times" },
298 { "tx_collide_9times" },
299 { "tx_collide_10times" },
300 { "tx_collide_11times" },
301 { "tx_collide_12times" },
302 { "tx_collide_13times" },
303 { "tx_collide_14times" },
304 { "tx_collide_15times" },
305 { "tx_ucast_packets" },
306 { "tx_mcast_packets" },
307 { "tx_bcast_packets" },
308 { "tx_carrier_sense_errors" },
312 { "dma_writeq_full" },
313 { "dma_write_prioq_full" },
317 { "rx_threshold_hit" },
319 { "dma_readq_full" },
320 { "dma_read_prioq_full" },
321 { "tx_comp_queue_full" },
323 { "ring_set_send_prod_index" },
324 { "ring_status_update" },
326 { "nic_avoided_irqs" },
327 { "nic_tx_threshold_hit" }
331 const char string[ETH_GSTRING_LEN];
332 } ethtool_test_keys[TG3_NUM_TEST] = {
333 { "nvram test (online) " },
334 { "link test (online) " },
335 { "register test (offline)" },
336 { "memory test (offline)" },
337 { "loopback test (offline)" },
338 { "interrupt test (offline)" },
341 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
343 if ((tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) != 0) {
344 spin_lock_bh(&tp->indirect_lock);
345 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
346 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
347 spin_unlock_bh(&tp->indirect_lock);
349 writel(val, tp->regs + off);
350 if ((tp->tg3_flags & TG3_FLAG_5701_REG_WRITE_BUG) != 0)
351 readl(tp->regs + off);
355 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val)
357 if ((tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) != 0) {
358 spin_lock_bh(&tp->indirect_lock);
359 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
360 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
361 spin_unlock_bh(&tp->indirect_lock);
363 void __iomem *dest = tp->regs + off;
365 readl(dest); /* always flush PCI write */
369 static inline void _tw32_rx_mbox(struct tg3 *tp, u32 off, u32 val)
371 void __iomem *mbox = tp->regs + off;
373 if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
377 static inline void _tw32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
379 void __iomem *mbox = tp->regs + off;
381 if (tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG)
383 if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
387 #define tw32_mailbox(reg, val) writel(((val) & 0xffffffff), tp->regs + (reg))
388 #define tw32_rx_mbox(reg, val) _tw32_rx_mbox(tp, reg, val)
389 #define tw32_tx_mbox(reg, val) _tw32_tx_mbox(tp, reg, val)
391 #define tw32(reg,val) tg3_write_indirect_reg32(tp,(reg),(val))
392 #define tw32_f(reg,val) _tw32_flush(tp,(reg),(val))
393 #define tw16(reg,val) writew(((val) & 0xffff), tp->regs + (reg))
394 #define tw8(reg,val) writeb(((val) & 0xff), tp->regs + (reg))
395 #define tr32(reg) readl(tp->regs + (reg))
396 #define tr16(reg) readw(tp->regs + (reg))
397 #define tr8(reg) readb(tp->regs + (reg))
399 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
401 spin_lock_bh(&tp->indirect_lock);
402 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
403 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
405 /* Always leave this as zero. */
406 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
407 spin_unlock_bh(&tp->indirect_lock);
410 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
412 spin_lock_bh(&tp->indirect_lock);
413 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
414 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
416 /* Always leave this as zero. */
417 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
418 spin_unlock_bh(&tp->indirect_lock);
421 static void tg3_disable_ints(struct tg3 *tp)
423 tw32(TG3PCI_MISC_HOST_CTRL,
424 (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
425 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
426 tr32(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW);
429 static inline void tg3_cond_int(struct tg3 *tp)
431 if (tp->hw_status->status & SD_STATUS_UPDATED)
432 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
435 static void tg3_enable_ints(struct tg3 *tp)
440 tw32(TG3PCI_MISC_HOST_CTRL,
441 (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
442 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
443 (tp->last_tag << 24));
444 tr32(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW);
448 static inline unsigned int tg3_has_work(struct tg3 *tp)
450 struct tg3_hw_status *sblk = tp->hw_status;
451 unsigned int work_exists = 0;
453 /* check for phy events */
454 if (!(tp->tg3_flags &
455 (TG3_FLAG_USE_LINKCHG_REG |
456 TG3_FLAG_POLL_SERDES))) {
457 if (sblk->status & SD_STATUS_LINK_CHG)
460 /* check for RX/TX work to do */
461 if (sblk->idx[0].tx_consumer != tp->tx_cons ||
462 sblk->idx[0].rx_producer != tp->rx_rcb_ptr)
469 * similar to tg3_enable_ints, but it accurately determines whether there
470 * is new work pending and can return without flushing the PIO write
471 * which reenables interrupts
473 static void tg3_restart_ints(struct tg3 *tp)
475 tw32(TG3PCI_MISC_HOST_CTRL,
476 (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
477 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
481 /* When doing tagged status, this work check is unnecessary.
482 * The last_tag we write above tells the chip which piece of
483 * work we've completed.
485 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
487 tw32(HOSTCC_MODE, tp->coalesce_mode |
488 (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
491 static inline void tg3_netif_stop(struct tg3 *tp)
493 tp->dev->trans_start = jiffies; /* prevent tx timeout */
494 netif_poll_disable(tp->dev);
495 netif_tx_disable(tp->dev);
498 static inline void tg3_netif_start(struct tg3 *tp)
500 netif_wake_queue(tp->dev);
501 /* NOTE: unconditional netif_wake_queue is only appropriate
502 * so long as all callers are assured to have free tx slots
503 * (such as after tg3_init_hw)
505 netif_poll_enable(tp->dev);
506 tp->hw_status->status |= SD_STATUS_UPDATED;
510 static void tg3_switch_clocks(struct tg3 *tp)
512 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
515 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780)
518 orig_clock_ctrl = clock_ctrl;
519 clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
520 CLOCK_CTRL_CLKRUN_OENABLE |
522 tp->pci_clock_ctrl = clock_ctrl;
524 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
525 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
526 tw32_f(TG3PCI_CLOCK_CTRL,
527 clock_ctrl | CLOCK_CTRL_625_CORE);
530 } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
531 tw32_f(TG3PCI_CLOCK_CTRL,
533 (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK));
535 tw32_f(TG3PCI_CLOCK_CTRL,
536 clock_ctrl | (CLOCK_CTRL_ALTCLK));
539 tw32_f(TG3PCI_CLOCK_CTRL, clock_ctrl);
543 #define PHY_BUSY_LOOPS 5000
545 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
551 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
553 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
559 frame_val = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
560 MI_COM_PHY_ADDR_MASK);
561 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
562 MI_COM_REG_ADDR_MASK);
563 frame_val |= (MI_COM_CMD_READ | MI_COM_START);
565 tw32_f(MAC_MI_COM, frame_val);
567 loops = PHY_BUSY_LOOPS;
570 frame_val = tr32(MAC_MI_COM);
572 if ((frame_val & MI_COM_BUSY) == 0) {
574 frame_val = tr32(MAC_MI_COM);
582 *val = frame_val & MI_COM_DATA_MASK;
586 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
587 tw32_f(MAC_MI_MODE, tp->mi_mode);
594 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
600 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
602 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
606 frame_val = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
607 MI_COM_PHY_ADDR_MASK);
608 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
609 MI_COM_REG_ADDR_MASK);
610 frame_val |= (val & MI_COM_DATA_MASK);
611 frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
613 tw32_f(MAC_MI_COM, frame_val);
615 loops = PHY_BUSY_LOOPS;
618 frame_val = tr32(MAC_MI_COM);
619 if ((frame_val & MI_COM_BUSY) == 0) {
621 frame_val = tr32(MAC_MI_COM);
631 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
632 tw32_f(MAC_MI_MODE, tp->mi_mode);
639 static void tg3_phy_set_wirespeed(struct tg3 *tp)
643 if (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED)
646 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x7007) &&
647 !tg3_readphy(tp, MII_TG3_AUX_CTRL, &val))
648 tg3_writephy(tp, MII_TG3_AUX_CTRL,
649 (val | (1 << 15) | (1 << 4)));
652 static int tg3_bmcr_reset(struct tg3 *tp)
657 /* OK, reset it, and poll the BMCR_RESET bit until it
658 * clears or we time out.
660 phy_control = BMCR_RESET;
661 err = tg3_writephy(tp, MII_BMCR, phy_control);
667 err = tg3_readphy(tp, MII_BMCR, &phy_control);
671 if ((phy_control & BMCR_RESET) == 0) {
683 static int tg3_wait_macro_done(struct tg3 *tp)
690 if (!tg3_readphy(tp, 0x16, &tmp32)) {
691 if ((tmp32 & 0x1000) == 0)
701 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
703 static const u32 test_pat[4][6] = {
704 { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
705 { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
706 { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
707 { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
711 for (chan = 0; chan < 4; chan++) {
714 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
715 (chan * 0x2000) | 0x0200);
716 tg3_writephy(tp, 0x16, 0x0002);
718 for (i = 0; i < 6; i++)
719 tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
722 tg3_writephy(tp, 0x16, 0x0202);
723 if (tg3_wait_macro_done(tp)) {
728 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
729 (chan * 0x2000) | 0x0200);
730 tg3_writephy(tp, 0x16, 0x0082);
731 if (tg3_wait_macro_done(tp)) {
736 tg3_writephy(tp, 0x16, 0x0802);
737 if (tg3_wait_macro_done(tp)) {
742 for (i = 0; i < 6; i += 2) {
745 if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
746 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
747 tg3_wait_macro_done(tp)) {
753 if (low != test_pat[chan][i] ||
754 high != test_pat[chan][i+1]) {
755 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
756 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
757 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
767 static int tg3_phy_reset_chanpat(struct tg3 *tp)
771 for (chan = 0; chan < 4; chan++) {
774 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
775 (chan * 0x2000) | 0x0200);
776 tg3_writephy(tp, 0x16, 0x0002);
777 for (i = 0; i < 6; i++)
778 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
779 tg3_writephy(tp, 0x16, 0x0202);
780 if (tg3_wait_macro_done(tp))
787 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
789 u32 reg32, phy9_orig;
790 int retries, do_phy_reset, err;
796 err = tg3_bmcr_reset(tp);
802 /* Disable transmitter and interrupt. */
803 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, ®32))
807 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
809 /* Set full-duplex, 1000 mbps. */
810 tg3_writephy(tp, MII_BMCR,
811 BMCR_FULLDPLX | TG3_BMCR_SPEED1000);
813 /* Set to master mode. */
814 if (tg3_readphy(tp, MII_TG3_CTRL, &phy9_orig))
817 tg3_writephy(tp, MII_TG3_CTRL,
818 (MII_TG3_CTRL_AS_MASTER |
819 MII_TG3_CTRL_ENABLE_AS_MASTER));
821 /* Enable SM_DSP_CLOCK and 6dB. */
822 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
824 /* Block the PHY control access. */
825 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
826 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0800);
828 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
833 err = tg3_phy_reset_chanpat(tp);
837 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
838 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0000);
840 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
841 tg3_writephy(tp, 0x16, 0x0000);
843 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
844 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
845 /* Set Extended packet length bit for jumbo frames */
846 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4400);
849 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
852 tg3_writephy(tp, MII_TG3_CTRL, phy9_orig);
854 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, ®32)) {
856 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
863 /* This will reset the tigon3 PHY if there is no valid
864 * link unless the FORCE argument is non-zero.
866 static int tg3_phy_reset(struct tg3 *tp)
871 err = tg3_readphy(tp, MII_BMSR, &phy_status);
872 err |= tg3_readphy(tp, MII_BMSR, &phy_status);
876 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
877 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
878 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
879 err = tg3_phy_reset_5703_4_5(tp);
885 err = tg3_bmcr_reset(tp);
890 if (tp->tg3_flags2 & TG3_FLG2_PHY_ADC_BUG) {
891 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
892 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
893 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x2aaa);
894 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
895 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0323);
896 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
898 if (tp->tg3_flags2 & TG3_FLG2_PHY_5704_A0_BUG) {
899 tg3_writephy(tp, 0x1c, 0x8d68);
900 tg3_writephy(tp, 0x1c, 0x8d68);
902 if (tp->tg3_flags2 & TG3_FLG2_PHY_BER_BUG) {
903 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
904 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
905 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x310b);
906 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
907 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x9506);
908 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x401f);
909 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x14e2);
910 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
912 /* Set Extended packet length bit (bit 14) on all chips that */
913 /* support jumbo frames */
914 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
915 /* Cannot do read-modify-write on 5401 */
916 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
917 } else if (tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) {
920 /* Set bit 14 with read-modify-write to preserve other bits */
921 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0007) &&
922 !tg3_readphy(tp, MII_TG3_AUX_CTRL, &phy_reg))
923 tg3_writephy(tp, MII_TG3_AUX_CTRL, phy_reg | 0x4000);
926 /* Set phy register 0x10 bit 0 to high fifo elasticity to support
927 * jumbo frames transmission.
929 if (tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) {
932 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &phy_reg))
933 tg3_writephy(tp, MII_TG3_EXT_CTRL,
934 phy_reg | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
937 tg3_phy_set_wirespeed(tp);
941 static void tg3_frob_aux_power(struct tg3 *tp)
943 struct tg3 *tp_peer = tp;
945 if ((tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) != 0)
948 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
949 tp_peer = pci_get_drvdata(tp->pdev_peer);
955 if ((tp->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
956 (tp_peer->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0) {
957 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
958 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
959 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
960 (GRC_LCLCTRL_GPIO_OE0 |
961 GRC_LCLCTRL_GPIO_OE1 |
962 GRC_LCLCTRL_GPIO_OE2 |
963 GRC_LCLCTRL_GPIO_OUTPUT0 |
964 GRC_LCLCTRL_GPIO_OUTPUT1));
971 (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
974 /* On 5753 and variants, GPIO2 cannot be used. */
975 no_gpio2 = tp->nic_sram_data_cfg &
976 NIC_SRAM_DATA_CFG_NO_GPIO2;
978 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
979 GRC_LCLCTRL_GPIO_OE1 |
980 GRC_LCLCTRL_GPIO_OE2 |
981 GRC_LCLCTRL_GPIO_OUTPUT1 |
982 GRC_LCLCTRL_GPIO_OUTPUT2;
984 grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
985 GRC_LCLCTRL_GPIO_OUTPUT2);
987 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
991 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
993 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
998 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
999 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1005 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
1006 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
1007 if (tp_peer != tp &&
1008 (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
1011 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1012 (GRC_LCLCTRL_GPIO_OE1 |
1013 GRC_LCLCTRL_GPIO_OUTPUT1));
1016 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1017 (GRC_LCLCTRL_GPIO_OE1));
1020 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1021 (GRC_LCLCTRL_GPIO_OE1 |
1022 GRC_LCLCTRL_GPIO_OUTPUT1));
1028 static int tg3_setup_phy(struct tg3 *, int);
1030 #define RESET_KIND_SHUTDOWN 0
1031 #define RESET_KIND_INIT 1
1032 #define RESET_KIND_SUSPEND 2
1034 static void tg3_write_sig_post_reset(struct tg3 *, int);
1035 static int tg3_halt_cpu(struct tg3 *, u32);
1037 static int tg3_set_power_state(struct tg3 *tp, int state)
1040 u16 power_control, power_caps;
1041 int pm = tp->pm_cap;
1043 /* Make sure register accesses (indirect or otherwise)
1044 * will function correctly.
1046 pci_write_config_dword(tp->pdev,
1047 TG3PCI_MISC_HOST_CTRL,
1048 tp->misc_host_ctrl);
1050 pci_read_config_word(tp->pdev,
1053 power_control |= PCI_PM_CTRL_PME_STATUS;
1054 power_control &= ~(PCI_PM_CTRL_STATE_MASK);
1058 pci_write_config_word(tp->pdev,
1061 udelay(100); /* Delay after power state change */
1063 /* Switch out of Vaux if it is not a LOM */
1064 if (!(tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT)) {
1065 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
1084 printk(KERN_WARNING PFX "%s: Invalid power state (%d) "
1086 tp->dev->name, state);
1090 power_control |= PCI_PM_CTRL_PME_ENABLE;
1092 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
1093 tw32(TG3PCI_MISC_HOST_CTRL,
1094 misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
1096 if (tp->link_config.phy_is_low_power == 0) {
1097 tp->link_config.phy_is_low_power = 1;
1098 tp->link_config.orig_speed = tp->link_config.speed;
1099 tp->link_config.orig_duplex = tp->link_config.duplex;
1100 tp->link_config.orig_autoneg = tp->link_config.autoneg;
1103 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
1104 tp->link_config.speed = SPEED_10;
1105 tp->link_config.duplex = DUPLEX_HALF;
1106 tp->link_config.autoneg = AUTONEG_ENABLE;
1107 tg3_setup_phy(tp, 0);
1110 pci_read_config_word(tp->pdev, pm + PCI_PM_PMC, &power_caps);
1112 if (tp->tg3_flags & TG3_FLAG_WOL_ENABLE) {
1115 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
1116 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x5a);
1119 mac_mode = MAC_MODE_PORT_MODE_MII;
1121 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 ||
1122 !(tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB))
1123 mac_mode |= MAC_MODE_LINK_POLARITY;
1125 mac_mode = MAC_MODE_PORT_MODE_TBI;
1128 if (!(tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
1129 tw32(MAC_LED_CTRL, tp->led_ctrl);
1131 if (((power_caps & PCI_PM_CAP_PME_D3cold) &&
1132 (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)))
1133 mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
1135 tw32_f(MAC_MODE, mac_mode);
1138 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
1142 if (!(tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB) &&
1143 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1144 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
1147 base_val = tp->pci_clock_ctrl;
1148 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
1149 CLOCK_CTRL_TXCLK_DISABLE);
1151 tw32_f(TG3PCI_CLOCK_CTRL, base_val |
1153 CLOCK_CTRL_PWRDOWN_PLL133);
1155 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) {
1157 } else if (!((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
1158 (tp->tg3_flags & TG3_FLAG_ENABLE_ASF))) {
1159 u32 newbits1, newbits2;
1161 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1162 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1163 newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
1164 CLOCK_CTRL_TXCLK_DISABLE |
1166 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
1167 } else if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
1168 newbits1 = CLOCK_CTRL_625_CORE;
1169 newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
1171 newbits1 = CLOCK_CTRL_ALTCLK;
1172 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
1175 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1);
1178 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2);
1181 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
1184 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1185 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1186 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
1187 CLOCK_CTRL_TXCLK_DISABLE |
1188 CLOCK_CTRL_44MHZ_CORE);
1190 newbits3 = CLOCK_CTRL_44MHZ_CORE;
1193 tw32_f(TG3PCI_CLOCK_CTRL,
1194 tp->pci_clock_ctrl | newbits3);
1199 tg3_frob_aux_power(tp);
1201 /* Workaround for unstable PLL clock */
1202 if ((GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX) ||
1203 (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX)) {
1204 u32 val = tr32(0x7d00);
1206 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
1208 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
1209 tg3_halt_cpu(tp, RX_CPU_BASE);
1212 /* Finally, set the new power state. */
1213 pci_write_config_word(tp->pdev, pm + PCI_PM_CTRL, power_control);
1214 udelay(100); /* Delay after power state change */
1216 tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
1221 static void tg3_link_report(struct tg3 *tp)
1223 if (!netif_carrier_ok(tp->dev)) {
1224 printk(KERN_INFO PFX "%s: Link is down.\n", tp->dev->name);
1226 printk(KERN_INFO PFX "%s: Link is up at %d Mbps, %s duplex.\n",
1228 (tp->link_config.active_speed == SPEED_1000 ?
1230 (tp->link_config.active_speed == SPEED_100 ?
1232 (tp->link_config.active_duplex == DUPLEX_FULL ?
1235 printk(KERN_INFO PFX "%s: Flow control is %s for TX and "
1238 (tp->tg3_flags & TG3_FLAG_TX_PAUSE) ? "on" : "off",
1239 (tp->tg3_flags & TG3_FLAG_RX_PAUSE) ? "on" : "off");
1243 static void tg3_setup_flow_control(struct tg3 *tp, u32 local_adv, u32 remote_adv)
1245 u32 new_tg3_flags = 0;
1246 u32 old_rx_mode = tp->rx_mode;
1247 u32 old_tx_mode = tp->tx_mode;
1249 if (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG) {
1250 if (local_adv & ADVERTISE_PAUSE_CAP) {
1251 if (local_adv & ADVERTISE_PAUSE_ASYM) {
1252 if (remote_adv & LPA_PAUSE_CAP)
1254 (TG3_FLAG_RX_PAUSE |
1256 else if (remote_adv & LPA_PAUSE_ASYM)
1258 (TG3_FLAG_RX_PAUSE);
1260 if (remote_adv & LPA_PAUSE_CAP)
1262 (TG3_FLAG_RX_PAUSE |
1265 } else if (local_adv & ADVERTISE_PAUSE_ASYM) {
1266 if ((remote_adv & LPA_PAUSE_CAP) &&
1267 (remote_adv & LPA_PAUSE_ASYM))
1268 new_tg3_flags |= TG3_FLAG_TX_PAUSE;
1271 tp->tg3_flags &= ~(TG3_FLAG_RX_PAUSE | TG3_FLAG_TX_PAUSE);
1272 tp->tg3_flags |= new_tg3_flags;
1274 new_tg3_flags = tp->tg3_flags;
1277 if (new_tg3_flags & TG3_FLAG_RX_PAUSE)
1278 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1280 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1282 if (old_rx_mode != tp->rx_mode) {
1283 tw32_f(MAC_RX_MODE, tp->rx_mode);
1286 if (new_tg3_flags & TG3_FLAG_TX_PAUSE)
1287 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1289 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1291 if (old_tx_mode != tp->tx_mode) {
1292 tw32_f(MAC_TX_MODE, tp->tx_mode);
1296 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
1298 switch (val & MII_TG3_AUX_STAT_SPDMASK) {
1299 case MII_TG3_AUX_STAT_10HALF:
1301 *duplex = DUPLEX_HALF;
1304 case MII_TG3_AUX_STAT_10FULL:
1306 *duplex = DUPLEX_FULL;
1309 case MII_TG3_AUX_STAT_100HALF:
1311 *duplex = DUPLEX_HALF;
1314 case MII_TG3_AUX_STAT_100FULL:
1316 *duplex = DUPLEX_FULL;
1319 case MII_TG3_AUX_STAT_1000HALF:
1320 *speed = SPEED_1000;
1321 *duplex = DUPLEX_HALF;
1324 case MII_TG3_AUX_STAT_1000FULL:
1325 *speed = SPEED_1000;
1326 *duplex = DUPLEX_FULL;
1330 *speed = SPEED_INVALID;
1331 *duplex = DUPLEX_INVALID;
1336 static void tg3_phy_copper_begin(struct tg3 *tp)
1341 if (tp->link_config.phy_is_low_power) {
1342 /* Entering low power mode. Disable gigabit and
1343 * 100baseT advertisements.
1345 tg3_writephy(tp, MII_TG3_CTRL, 0);
1347 new_adv = (ADVERTISE_10HALF | ADVERTISE_10FULL |
1348 ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
1349 if (tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB)
1350 new_adv |= (ADVERTISE_100HALF | ADVERTISE_100FULL);
1352 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1353 } else if (tp->link_config.speed == SPEED_INVALID) {
1354 tp->link_config.advertising =
1355 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
1356 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
1357 ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full |
1358 ADVERTISED_Autoneg | ADVERTISED_MII);
1360 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
1361 tp->link_config.advertising &=
1362 ~(ADVERTISED_1000baseT_Half |
1363 ADVERTISED_1000baseT_Full);
1365 new_adv = (ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
1366 if (tp->link_config.advertising & ADVERTISED_10baseT_Half)
1367 new_adv |= ADVERTISE_10HALF;
1368 if (tp->link_config.advertising & ADVERTISED_10baseT_Full)
1369 new_adv |= ADVERTISE_10FULL;
1370 if (tp->link_config.advertising & ADVERTISED_100baseT_Half)
1371 new_adv |= ADVERTISE_100HALF;
1372 if (tp->link_config.advertising & ADVERTISED_100baseT_Full)
1373 new_adv |= ADVERTISE_100FULL;
1374 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1376 if (tp->link_config.advertising &
1377 (ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full)) {
1379 if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
1380 new_adv |= MII_TG3_CTRL_ADV_1000_HALF;
1381 if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
1382 new_adv |= MII_TG3_CTRL_ADV_1000_FULL;
1383 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY) &&
1384 (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1385 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0))
1386 new_adv |= (MII_TG3_CTRL_AS_MASTER |
1387 MII_TG3_CTRL_ENABLE_AS_MASTER);
1388 tg3_writephy(tp, MII_TG3_CTRL, new_adv);
1390 tg3_writephy(tp, MII_TG3_CTRL, 0);
1393 /* Asking for a specific link mode. */
1394 if (tp->link_config.speed == SPEED_1000) {
1395 new_adv = ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP;
1396 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1398 if (tp->link_config.duplex == DUPLEX_FULL)
1399 new_adv = MII_TG3_CTRL_ADV_1000_FULL;
1401 new_adv = MII_TG3_CTRL_ADV_1000_HALF;
1402 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1403 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
1404 new_adv |= (MII_TG3_CTRL_AS_MASTER |
1405 MII_TG3_CTRL_ENABLE_AS_MASTER);
1406 tg3_writephy(tp, MII_TG3_CTRL, new_adv);
1408 tg3_writephy(tp, MII_TG3_CTRL, 0);
1410 new_adv = ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP;
1411 if (tp->link_config.speed == SPEED_100) {
1412 if (tp->link_config.duplex == DUPLEX_FULL)
1413 new_adv |= ADVERTISE_100FULL;
1415 new_adv |= ADVERTISE_100HALF;
1417 if (tp->link_config.duplex == DUPLEX_FULL)
1418 new_adv |= ADVERTISE_10FULL;
1420 new_adv |= ADVERTISE_10HALF;
1422 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1426 if (tp->link_config.autoneg == AUTONEG_DISABLE &&
1427 tp->link_config.speed != SPEED_INVALID) {
1428 u32 bmcr, orig_bmcr;
1430 tp->link_config.active_speed = tp->link_config.speed;
1431 tp->link_config.active_duplex = tp->link_config.duplex;
1434 switch (tp->link_config.speed) {
1440 bmcr |= BMCR_SPEED100;
1444 bmcr |= TG3_BMCR_SPEED1000;
1448 if (tp->link_config.duplex == DUPLEX_FULL)
1449 bmcr |= BMCR_FULLDPLX;
1451 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
1452 (bmcr != orig_bmcr)) {
1453 tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
1454 for (i = 0; i < 1500; i++) {
1458 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
1459 tg3_readphy(tp, MII_BMSR, &tmp))
1461 if (!(tmp & BMSR_LSTATUS)) {
1466 tg3_writephy(tp, MII_BMCR, bmcr);
1470 tg3_writephy(tp, MII_BMCR,
1471 BMCR_ANENABLE | BMCR_ANRESTART);
1475 static int tg3_init_5401phy_dsp(struct tg3 *tp)
1479 /* Turn off tap power management. */
1480 /* Set Extended packet length bit */
1481 err = tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
1483 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0012);
1484 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1804);
1486 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0013);
1487 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1204);
1489 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
1490 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0132);
1492 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
1493 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0232);
1495 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1496 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0a20);
1503 static int tg3_copper_is_advertising_all(struct tg3 *tp)
1505 u32 adv_reg, all_mask;
1507 if (tg3_readphy(tp, MII_ADVERTISE, &adv_reg))
1510 all_mask = (ADVERTISE_10HALF | ADVERTISE_10FULL |
1511 ADVERTISE_100HALF | ADVERTISE_100FULL);
1512 if ((adv_reg & all_mask) != all_mask)
1514 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
1517 if (tg3_readphy(tp, MII_TG3_CTRL, &tg3_ctrl))
1520 all_mask = (MII_TG3_CTRL_ADV_1000_HALF |
1521 MII_TG3_CTRL_ADV_1000_FULL);
1522 if ((tg3_ctrl & all_mask) != all_mask)
1528 static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
1530 int current_link_up;
1539 (MAC_STATUS_SYNC_CHANGED |
1540 MAC_STATUS_CFG_CHANGED |
1541 MAC_STATUS_MI_COMPLETION |
1542 MAC_STATUS_LNKSTATE_CHANGED));
1545 tp->mi_mode = MAC_MI_MODE_BASE;
1546 tw32_f(MAC_MI_MODE, tp->mi_mode);
1549 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x02);
1551 /* Some third-party PHYs need to be reset on link going
1554 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
1555 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
1556 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
1557 netif_carrier_ok(tp->dev)) {
1558 tg3_readphy(tp, MII_BMSR, &bmsr);
1559 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
1560 !(bmsr & BMSR_LSTATUS))
1566 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
1567 tg3_readphy(tp, MII_BMSR, &bmsr);
1568 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
1569 !(tp->tg3_flags & TG3_FLAG_INIT_COMPLETE))
1572 if (!(bmsr & BMSR_LSTATUS)) {
1573 err = tg3_init_5401phy_dsp(tp);
1577 tg3_readphy(tp, MII_BMSR, &bmsr);
1578 for (i = 0; i < 1000; i++) {
1580 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
1581 (bmsr & BMSR_LSTATUS)) {
1587 if ((tp->phy_id & PHY_ID_REV_MASK) == PHY_REV_BCM5401_B0 &&
1588 !(bmsr & BMSR_LSTATUS) &&
1589 tp->link_config.active_speed == SPEED_1000) {
1590 err = tg3_phy_reset(tp);
1592 err = tg3_init_5401phy_dsp(tp);
1597 } else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1598 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
1599 /* 5701 {A0,B0} CRC bug workaround */
1600 tg3_writephy(tp, 0x15, 0x0a75);
1601 tg3_writephy(tp, 0x1c, 0x8c68);
1602 tg3_writephy(tp, 0x1c, 0x8d68);
1603 tg3_writephy(tp, 0x1c, 0x8c68);
1606 /* Clear pending interrupts... */
1607 tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
1608 tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
1610 if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT)
1611 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
1613 tg3_writephy(tp, MII_TG3_IMASK, ~0);
1615 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1616 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1617 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
1618 tg3_writephy(tp, MII_TG3_EXT_CTRL,
1619 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
1621 tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
1624 current_link_up = 0;
1625 current_speed = SPEED_INVALID;
1626 current_duplex = DUPLEX_INVALID;
1628 if (tp->tg3_flags2 & TG3_FLG2_CAPACITIVE_COUPLING) {
1631 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4007);
1632 tg3_readphy(tp, MII_TG3_AUX_CTRL, &val);
1633 if (!(val & (1 << 10))) {
1635 tg3_writephy(tp, MII_TG3_AUX_CTRL, val);
1641 for (i = 0; i < 100; i++) {
1642 tg3_readphy(tp, MII_BMSR, &bmsr);
1643 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
1644 (bmsr & BMSR_LSTATUS))
1649 if (bmsr & BMSR_LSTATUS) {
1652 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
1653 for (i = 0; i < 2000; i++) {
1655 if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
1660 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
1665 for (i = 0; i < 200; i++) {
1666 tg3_readphy(tp, MII_BMCR, &bmcr);
1667 if (tg3_readphy(tp, MII_BMCR, &bmcr))
1669 if (bmcr && bmcr != 0x7fff)
1674 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
1675 if (bmcr & BMCR_ANENABLE) {
1676 current_link_up = 1;
1678 /* Force autoneg restart if we are exiting
1681 if (!tg3_copper_is_advertising_all(tp))
1682 current_link_up = 0;
1684 current_link_up = 0;
1687 if (!(bmcr & BMCR_ANENABLE) &&
1688 tp->link_config.speed == current_speed &&
1689 tp->link_config.duplex == current_duplex) {
1690 current_link_up = 1;
1692 current_link_up = 0;
1696 tp->link_config.active_speed = current_speed;
1697 tp->link_config.active_duplex = current_duplex;
1700 if (current_link_up == 1 &&
1701 (tp->link_config.active_duplex == DUPLEX_FULL) &&
1702 (tp->link_config.autoneg == AUTONEG_ENABLE)) {
1703 u32 local_adv, remote_adv;
1705 if (tg3_readphy(tp, MII_ADVERTISE, &local_adv))
1707 local_adv &= (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
1709 if (tg3_readphy(tp, MII_LPA, &remote_adv))
1712 remote_adv &= (LPA_PAUSE_CAP | LPA_PAUSE_ASYM);
1714 /* If we are not advertising full pause capability,
1715 * something is wrong. Bring the link down and reconfigure.
1717 if (local_adv != ADVERTISE_PAUSE_CAP) {
1718 current_link_up = 0;
1720 tg3_setup_flow_control(tp, local_adv, remote_adv);
1724 if (current_link_up == 0) {
1727 tg3_phy_copper_begin(tp);
1729 tg3_readphy(tp, MII_BMSR, &tmp);
1730 if (!tg3_readphy(tp, MII_BMSR, &tmp) &&
1731 (tmp & BMSR_LSTATUS))
1732 current_link_up = 1;
1735 tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
1736 if (current_link_up == 1) {
1737 if (tp->link_config.active_speed == SPEED_100 ||
1738 tp->link_config.active_speed == SPEED_10)
1739 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
1741 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
1743 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
1745 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
1746 if (tp->link_config.active_duplex == DUPLEX_HALF)
1747 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
1749 tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
1750 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
1751 if ((tp->led_ctrl == LED_CTRL_MODE_PHY_2) ||
1752 (current_link_up == 1 &&
1753 tp->link_config.active_speed == SPEED_10))
1754 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
1756 if (current_link_up == 1)
1757 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
1760 /* ??? Without this setting Netgear GA302T PHY does not
1761 * ??? send/receive packets...
1763 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411 &&
1764 tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) {
1765 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
1766 tw32_f(MAC_MI_MODE, tp->mi_mode);
1770 tw32_f(MAC_MODE, tp->mac_mode);
1773 if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
1774 /* Polled via timer. */
1775 tw32_f(MAC_EVENT, 0);
1777 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
1781 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 &&
1782 current_link_up == 1 &&
1783 tp->link_config.active_speed == SPEED_1000 &&
1784 ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ||
1785 (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED))) {
1788 (MAC_STATUS_SYNC_CHANGED |
1789 MAC_STATUS_CFG_CHANGED));
1792 NIC_SRAM_FIRMWARE_MBOX,
1793 NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
1796 if (current_link_up != netif_carrier_ok(tp->dev)) {
1797 if (current_link_up)
1798 netif_carrier_on(tp->dev);
1800 netif_carrier_off(tp->dev);
1801 tg3_link_report(tp);
1807 struct tg3_fiber_aneginfo {
1809 #define ANEG_STATE_UNKNOWN 0
1810 #define ANEG_STATE_AN_ENABLE 1
1811 #define ANEG_STATE_RESTART_INIT 2
1812 #define ANEG_STATE_RESTART 3
1813 #define ANEG_STATE_DISABLE_LINK_OK 4
1814 #define ANEG_STATE_ABILITY_DETECT_INIT 5
1815 #define ANEG_STATE_ABILITY_DETECT 6
1816 #define ANEG_STATE_ACK_DETECT_INIT 7
1817 #define ANEG_STATE_ACK_DETECT 8
1818 #define ANEG_STATE_COMPLETE_ACK_INIT 9
1819 #define ANEG_STATE_COMPLETE_ACK 10
1820 #define ANEG_STATE_IDLE_DETECT_INIT 11
1821 #define ANEG_STATE_IDLE_DETECT 12
1822 #define ANEG_STATE_LINK_OK 13
1823 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT 14
1824 #define ANEG_STATE_NEXT_PAGE_WAIT 15
1827 #define MR_AN_ENABLE 0x00000001
1828 #define MR_RESTART_AN 0x00000002
1829 #define MR_AN_COMPLETE 0x00000004
1830 #define MR_PAGE_RX 0x00000008
1831 #define MR_NP_LOADED 0x00000010
1832 #define MR_TOGGLE_TX 0x00000020
1833 #define MR_LP_ADV_FULL_DUPLEX 0x00000040
1834 #define MR_LP_ADV_HALF_DUPLEX 0x00000080
1835 #define MR_LP_ADV_SYM_PAUSE 0x00000100
1836 #define MR_LP_ADV_ASYM_PAUSE 0x00000200
1837 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
1838 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
1839 #define MR_LP_ADV_NEXT_PAGE 0x00001000
1840 #define MR_TOGGLE_RX 0x00002000
1841 #define MR_NP_RX 0x00004000
1843 #define MR_LINK_OK 0x80000000
1845 unsigned long link_time, cur_time;
1847 u32 ability_match_cfg;
1848 int ability_match_count;
1850 char ability_match, idle_match, ack_match;
1852 u32 txconfig, rxconfig;
1853 #define ANEG_CFG_NP 0x00000080
1854 #define ANEG_CFG_ACK 0x00000040
1855 #define ANEG_CFG_RF2 0x00000020
1856 #define ANEG_CFG_RF1 0x00000010
1857 #define ANEG_CFG_PS2 0x00000001
1858 #define ANEG_CFG_PS1 0x00008000
1859 #define ANEG_CFG_HD 0x00004000
1860 #define ANEG_CFG_FD 0x00002000
1861 #define ANEG_CFG_INVAL 0x00001f06
1866 #define ANEG_TIMER_ENAB 2
1867 #define ANEG_FAILED -1
1869 #define ANEG_STATE_SETTLE_TIME 10000
1871 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
1872 struct tg3_fiber_aneginfo *ap)
1874 unsigned long delta;
1878 if (ap->state == ANEG_STATE_UNKNOWN) {
1882 ap->ability_match_cfg = 0;
1883 ap->ability_match_count = 0;
1884 ap->ability_match = 0;
1890 if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
1891 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
1893 if (rx_cfg_reg != ap->ability_match_cfg) {
1894 ap->ability_match_cfg = rx_cfg_reg;
1895 ap->ability_match = 0;
1896 ap->ability_match_count = 0;
1898 if (++ap->ability_match_count > 1) {
1899 ap->ability_match = 1;
1900 ap->ability_match_cfg = rx_cfg_reg;
1903 if (rx_cfg_reg & ANEG_CFG_ACK)
1911 ap->ability_match_cfg = 0;
1912 ap->ability_match_count = 0;
1913 ap->ability_match = 0;
1919 ap->rxconfig = rx_cfg_reg;
1923 case ANEG_STATE_UNKNOWN:
1924 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
1925 ap->state = ANEG_STATE_AN_ENABLE;
1928 case ANEG_STATE_AN_ENABLE:
1929 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
1930 if (ap->flags & MR_AN_ENABLE) {
1933 ap->ability_match_cfg = 0;
1934 ap->ability_match_count = 0;
1935 ap->ability_match = 0;
1939 ap->state = ANEG_STATE_RESTART_INIT;
1941 ap->state = ANEG_STATE_DISABLE_LINK_OK;
1945 case ANEG_STATE_RESTART_INIT:
1946 ap->link_time = ap->cur_time;
1947 ap->flags &= ~(MR_NP_LOADED);
1949 tw32(MAC_TX_AUTO_NEG, 0);
1950 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
1951 tw32_f(MAC_MODE, tp->mac_mode);
1954 ret = ANEG_TIMER_ENAB;
1955 ap->state = ANEG_STATE_RESTART;
1958 case ANEG_STATE_RESTART:
1959 delta = ap->cur_time - ap->link_time;
1960 if (delta > ANEG_STATE_SETTLE_TIME) {
1961 ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
1963 ret = ANEG_TIMER_ENAB;
1967 case ANEG_STATE_DISABLE_LINK_OK:
1971 case ANEG_STATE_ABILITY_DETECT_INIT:
1972 ap->flags &= ~(MR_TOGGLE_TX);
1973 ap->txconfig = (ANEG_CFG_FD | ANEG_CFG_PS1);
1974 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
1975 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
1976 tw32_f(MAC_MODE, tp->mac_mode);
1979 ap->state = ANEG_STATE_ABILITY_DETECT;
1982 case ANEG_STATE_ABILITY_DETECT:
1983 if (ap->ability_match != 0 && ap->rxconfig != 0) {
1984 ap->state = ANEG_STATE_ACK_DETECT_INIT;
1988 case ANEG_STATE_ACK_DETECT_INIT:
1989 ap->txconfig |= ANEG_CFG_ACK;
1990 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
1991 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
1992 tw32_f(MAC_MODE, tp->mac_mode);
1995 ap->state = ANEG_STATE_ACK_DETECT;
1998 case ANEG_STATE_ACK_DETECT:
1999 if (ap->ack_match != 0) {
2000 if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
2001 (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
2002 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
2004 ap->state = ANEG_STATE_AN_ENABLE;
2006 } else if (ap->ability_match != 0 &&
2007 ap->rxconfig == 0) {
2008 ap->state = ANEG_STATE_AN_ENABLE;
2012 case ANEG_STATE_COMPLETE_ACK_INIT:
2013 if (ap->rxconfig & ANEG_CFG_INVAL) {
2017 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
2018 MR_LP_ADV_HALF_DUPLEX |
2019 MR_LP_ADV_SYM_PAUSE |
2020 MR_LP_ADV_ASYM_PAUSE |
2021 MR_LP_ADV_REMOTE_FAULT1 |
2022 MR_LP_ADV_REMOTE_FAULT2 |
2023 MR_LP_ADV_NEXT_PAGE |
2026 if (ap->rxconfig & ANEG_CFG_FD)
2027 ap->flags |= MR_LP_ADV_FULL_DUPLEX;
2028 if (ap->rxconfig & ANEG_CFG_HD)
2029 ap->flags |= MR_LP_ADV_HALF_DUPLEX;
2030 if (ap->rxconfig & ANEG_CFG_PS1)
2031 ap->flags |= MR_LP_ADV_SYM_PAUSE;
2032 if (ap->rxconfig & ANEG_CFG_PS2)
2033 ap->flags |= MR_LP_ADV_ASYM_PAUSE;
2034 if (ap->rxconfig & ANEG_CFG_RF1)
2035 ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
2036 if (ap->rxconfig & ANEG_CFG_RF2)
2037 ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
2038 if (ap->rxconfig & ANEG_CFG_NP)
2039 ap->flags |= MR_LP_ADV_NEXT_PAGE;
2041 ap->link_time = ap->cur_time;
2043 ap->flags ^= (MR_TOGGLE_TX);
2044 if (ap->rxconfig & 0x0008)
2045 ap->flags |= MR_TOGGLE_RX;
2046 if (ap->rxconfig & ANEG_CFG_NP)
2047 ap->flags |= MR_NP_RX;
2048 ap->flags |= MR_PAGE_RX;
2050 ap->state = ANEG_STATE_COMPLETE_ACK;
2051 ret = ANEG_TIMER_ENAB;
2054 case ANEG_STATE_COMPLETE_ACK:
2055 if (ap->ability_match != 0 &&
2056 ap->rxconfig == 0) {
2057 ap->state = ANEG_STATE_AN_ENABLE;
2060 delta = ap->cur_time - ap->link_time;
2061 if (delta > ANEG_STATE_SETTLE_TIME) {
2062 if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
2063 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
2065 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
2066 !(ap->flags & MR_NP_RX)) {
2067 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
2075 case ANEG_STATE_IDLE_DETECT_INIT:
2076 ap->link_time = ap->cur_time;
2077 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
2078 tw32_f(MAC_MODE, tp->mac_mode);
2081 ap->state = ANEG_STATE_IDLE_DETECT;
2082 ret = ANEG_TIMER_ENAB;
2085 case ANEG_STATE_IDLE_DETECT:
2086 if (ap->ability_match != 0 &&
2087 ap->rxconfig == 0) {
2088 ap->state = ANEG_STATE_AN_ENABLE;
2091 delta = ap->cur_time - ap->link_time;
2092 if (delta > ANEG_STATE_SETTLE_TIME) {
2093 /* XXX another gem from the Broadcom driver :( */
2094 ap->state = ANEG_STATE_LINK_OK;
2098 case ANEG_STATE_LINK_OK:
2099 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
2103 case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
2104 /* ??? unimplemented */
2107 case ANEG_STATE_NEXT_PAGE_WAIT:
2108 /* ??? unimplemented */
2119 static int fiber_autoneg(struct tg3 *tp, u32 *flags)
2122 struct tg3_fiber_aneginfo aninfo;
2123 int status = ANEG_FAILED;
2127 tw32_f(MAC_TX_AUTO_NEG, 0);
2129 tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
2130 tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
2133 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
2136 memset(&aninfo, 0, sizeof(aninfo));
2137 aninfo.flags |= MR_AN_ENABLE;
2138 aninfo.state = ANEG_STATE_UNKNOWN;
2139 aninfo.cur_time = 0;
2141 while (++tick < 195000) {
2142 status = tg3_fiber_aneg_smachine(tp, &aninfo);
2143 if (status == ANEG_DONE || status == ANEG_FAILED)
2149 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
2150 tw32_f(MAC_MODE, tp->mac_mode);
2153 *flags = aninfo.flags;
2155 if (status == ANEG_DONE &&
2156 (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
2157 MR_LP_ADV_FULL_DUPLEX)))
2163 static void tg3_init_bcm8002(struct tg3 *tp)
2165 u32 mac_status = tr32(MAC_STATUS);
2168 /* Reset when initting first time or we have a link. */
2169 if ((tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) &&
2170 !(mac_status & MAC_STATUS_PCS_SYNCED))
2173 /* Set PLL lock range. */
2174 tg3_writephy(tp, 0x16, 0x8007);
2177 tg3_writephy(tp, MII_BMCR, BMCR_RESET);
2179 /* Wait for reset to complete. */
2180 /* XXX schedule_timeout() ... */
2181 for (i = 0; i < 500; i++)
2184 /* Config mode; select PMA/Ch 1 regs. */
2185 tg3_writephy(tp, 0x10, 0x8411);
2187 /* Enable auto-lock and comdet, select txclk for tx. */
2188 tg3_writephy(tp, 0x11, 0x0a10);
2190 tg3_writephy(tp, 0x18, 0x00a0);
2191 tg3_writephy(tp, 0x16, 0x41ff);
2193 /* Assert and deassert POR. */
2194 tg3_writephy(tp, 0x13, 0x0400);
2196 tg3_writephy(tp, 0x13, 0x0000);
2198 tg3_writephy(tp, 0x11, 0x0a50);
2200 tg3_writephy(tp, 0x11, 0x0a10);
2202 /* Wait for signal to stabilize */
2203 /* XXX schedule_timeout() ... */
2204 for (i = 0; i < 15000; i++)
2207 /* Deselect the channel register so we can read the PHYID
2210 tg3_writephy(tp, 0x10, 0x8011);
2213 static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
2215 u32 sg_dig_ctrl, sg_dig_status;
2216 u32 serdes_cfg, expected_sg_dig_ctrl;
2217 int workaround, port_a;
2218 int current_link_up;
2221 expected_sg_dig_ctrl = 0;
2224 current_link_up = 0;
2226 if (tp->pci_chip_rev_id != CHIPREV_ID_5704_A0 &&
2227 tp->pci_chip_rev_id != CHIPREV_ID_5704_A1) {
2229 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
2232 /* preserve bits 0-11,13,14 for signal pre-emphasis */
2233 /* preserve bits 20-23 for voltage regulator */
2234 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
2237 sg_dig_ctrl = tr32(SG_DIG_CTRL);
2239 if (tp->link_config.autoneg != AUTONEG_ENABLE) {
2240 if (sg_dig_ctrl & (1 << 31)) {
2242 u32 val = serdes_cfg;
2248 tw32_f(MAC_SERDES_CFG, val);
2250 tw32_f(SG_DIG_CTRL, 0x01388400);
2252 if (mac_status & MAC_STATUS_PCS_SYNCED) {
2253 tg3_setup_flow_control(tp, 0, 0);
2254 current_link_up = 1;
2259 /* Want auto-negotiation. */
2260 expected_sg_dig_ctrl = 0x81388400;
2262 /* Pause capability */
2263 expected_sg_dig_ctrl |= (1 << 11);
2265 /* Asymettric pause */
2266 expected_sg_dig_ctrl |= (1 << 12);
2268 if (sg_dig_ctrl != expected_sg_dig_ctrl) {
2270 tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
2271 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | (1 << 30));
2273 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
2275 tp->tg3_flags2 |= TG3_FLG2_PHY_JUST_INITTED;
2276 } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
2277 MAC_STATUS_SIGNAL_DET)) {
2280 /* Giver time to negotiate (~200ms) */
2281 for (i = 0; i < 40000; i++) {
2282 sg_dig_status = tr32(SG_DIG_STATUS);
2283 if (sg_dig_status & (0x3))
2287 mac_status = tr32(MAC_STATUS);
2289 if ((sg_dig_status & (1 << 1)) &&
2290 (mac_status & MAC_STATUS_PCS_SYNCED)) {
2291 u32 local_adv, remote_adv;
2293 local_adv = ADVERTISE_PAUSE_CAP;
2295 if (sg_dig_status & (1 << 19))
2296 remote_adv |= LPA_PAUSE_CAP;
2297 if (sg_dig_status & (1 << 20))
2298 remote_adv |= LPA_PAUSE_ASYM;
2300 tg3_setup_flow_control(tp, local_adv, remote_adv);
2301 current_link_up = 1;
2302 tp->tg3_flags2 &= ~TG3_FLG2_PHY_JUST_INITTED;
2303 } else if (!(sg_dig_status & (1 << 1))) {
2304 if (tp->tg3_flags2 & TG3_FLG2_PHY_JUST_INITTED)
2305 tp->tg3_flags2 &= ~TG3_FLG2_PHY_JUST_INITTED;
2308 u32 val = serdes_cfg;
2315 tw32_f(MAC_SERDES_CFG, val);
2318 tw32_f(SG_DIG_CTRL, 0x01388400);
2321 /* Link parallel detection - link is up */
2322 /* only if we have PCS_SYNC and not */
2323 /* receiving config code words */
2324 mac_status = tr32(MAC_STATUS);
2325 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
2326 !(mac_status & MAC_STATUS_RCVD_CFG)) {
2327 tg3_setup_flow_control(tp, 0, 0);
2328 current_link_up = 1;
2335 return current_link_up;
2338 static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
2340 int current_link_up = 0;
2342 if (!(mac_status & MAC_STATUS_PCS_SYNCED)) {
2343 tp->tg3_flags &= ~TG3_FLAG_GOT_SERDES_FLOWCTL;
2347 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2351 if (fiber_autoneg(tp, &flags)) {
2352 u32 local_adv, remote_adv;
2354 local_adv = ADVERTISE_PAUSE_CAP;
2356 if (flags & MR_LP_ADV_SYM_PAUSE)
2357 remote_adv |= LPA_PAUSE_CAP;
2358 if (flags & MR_LP_ADV_ASYM_PAUSE)
2359 remote_adv |= LPA_PAUSE_ASYM;
2361 tg3_setup_flow_control(tp, local_adv, remote_adv);
2363 tp->tg3_flags |= TG3_FLAG_GOT_SERDES_FLOWCTL;
2364 current_link_up = 1;
2366 for (i = 0; i < 30; i++) {
2369 (MAC_STATUS_SYNC_CHANGED |
2370 MAC_STATUS_CFG_CHANGED));
2372 if ((tr32(MAC_STATUS) &
2373 (MAC_STATUS_SYNC_CHANGED |
2374 MAC_STATUS_CFG_CHANGED)) == 0)
2378 mac_status = tr32(MAC_STATUS);
2379 if (current_link_up == 0 &&
2380 (mac_status & MAC_STATUS_PCS_SYNCED) &&
2381 !(mac_status & MAC_STATUS_RCVD_CFG))
2382 current_link_up = 1;
2384 /* Forcing 1000FD link up. */
2385 current_link_up = 1;
2386 tp->tg3_flags |= TG3_FLAG_GOT_SERDES_FLOWCTL;
2388 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
2393 return current_link_up;
2396 static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
2399 u16 orig_active_speed;
2400 u8 orig_active_duplex;
2402 int current_link_up;
2406 (tp->tg3_flags & (TG3_FLAG_RX_PAUSE |
2407 TG3_FLAG_TX_PAUSE));
2408 orig_active_speed = tp->link_config.active_speed;
2409 orig_active_duplex = tp->link_config.active_duplex;
2411 if (!(tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG) &&
2412 netif_carrier_ok(tp->dev) &&
2413 (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE)) {
2414 mac_status = tr32(MAC_STATUS);
2415 mac_status &= (MAC_STATUS_PCS_SYNCED |
2416 MAC_STATUS_SIGNAL_DET |
2417 MAC_STATUS_CFG_CHANGED |
2418 MAC_STATUS_RCVD_CFG);
2419 if (mac_status == (MAC_STATUS_PCS_SYNCED |
2420 MAC_STATUS_SIGNAL_DET)) {
2421 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
2422 MAC_STATUS_CFG_CHANGED));
2427 tw32_f(MAC_TX_AUTO_NEG, 0);
2429 tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
2430 tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
2431 tw32_f(MAC_MODE, tp->mac_mode);
2434 if (tp->phy_id == PHY_ID_BCM8002)
2435 tg3_init_bcm8002(tp);
2437 /* Enable link change event even when serdes polling. */
2438 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2441 current_link_up = 0;
2442 mac_status = tr32(MAC_STATUS);
2444 if (tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG)
2445 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
2447 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
2449 tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
2450 tw32_f(MAC_MODE, tp->mac_mode);
2453 tp->hw_status->status =
2454 (SD_STATUS_UPDATED |
2455 (tp->hw_status->status & ~SD_STATUS_LINK_CHG));
2457 for (i = 0; i < 100; i++) {
2458 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
2459 MAC_STATUS_CFG_CHANGED));
2461 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
2462 MAC_STATUS_CFG_CHANGED)) == 0)
2466 mac_status = tr32(MAC_STATUS);
2467 if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
2468 current_link_up = 0;
2469 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2470 tw32_f(MAC_MODE, (tp->mac_mode |
2471 MAC_MODE_SEND_CONFIGS));
2473 tw32_f(MAC_MODE, tp->mac_mode);
2477 if (current_link_up == 1) {
2478 tp->link_config.active_speed = SPEED_1000;
2479 tp->link_config.active_duplex = DUPLEX_FULL;
2480 tw32(MAC_LED_CTRL, (tp->led_ctrl |
2481 LED_CTRL_LNKLED_OVERRIDE |
2482 LED_CTRL_1000MBPS_ON));
2484 tp->link_config.active_speed = SPEED_INVALID;
2485 tp->link_config.active_duplex = DUPLEX_INVALID;
2486 tw32(MAC_LED_CTRL, (tp->led_ctrl |
2487 LED_CTRL_LNKLED_OVERRIDE |
2488 LED_CTRL_TRAFFIC_OVERRIDE));
2491 if (current_link_up != netif_carrier_ok(tp->dev)) {
2492 if (current_link_up)
2493 netif_carrier_on(tp->dev);
2495 netif_carrier_off(tp->dev);
2496 tg3_link_report(tp);
2499 tp->tg3_flags & (TG3_FLAG_RX_PAUSE |
2501 if (orig_pause_cfg != now_pause_cfg ||
2502 orig_active_speed != tp->link_config.active_speed ||
2503 orig_active_duplex != tp->link_config.active_duplex)
2504 tg3_link_report(tp);
2510 static int tg3_setup_phy(struct tg3 *tp, int force_reset)
2514 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
2515 err = tg3_setup_fiber_phy(tp, force_reset);
2517 err = tg3_setup_copper_phy(tp, force_reset);
2520 if (tp->link_config.active_speed == SPEED_1000 &&
2521 tp->link_config.active_duplex == DUPLEX_HALF)
2522 tw32(MAC_TX_LENGTHS,
2523 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2524 (6 << TX_LENGTHS_IPG_SHIFT) |
2525 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
2527 tw32(MAC_TX_LENGTHS,
2528 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2529 (6 << TX_LENGTHS_IPG_SHIFT) |
2530 (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
2532 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
2533 if (netif_carrier_ok(tp->dev)) {
2534 tw32(HOSTCC_STAT_COAL_TICKS,
2535 tp->coal.stats_block_coalesce_usecs);
2537 tw32(HOSTCC_STAT_COAL_TICKS, 0);
2544 /* Tigon3 never reports partial packet sends. So we do not
2545 * need special logic to handle SKBs that have not had all
2546 * of their frags sent yet, like SunGEM does.
2548 static void tg3_tx(struct tg3 *tp)
2550 u32 hw_idx = tp->hw_status->idx[0].tx_consumer;
2551 u32 sw_idx = tp->tx_cons;
2553 while (sw_idx != hw_idx) {
2554 struct tx_ring_info *ri = &tp->tx_buffers[sw_idx];
2555 struct sk_buff *skb = ri->skb;
2558 if (unlikely(skb == NULL))
2561 pci_unmap_single(tp->pdev,
2562 pci_unmap_addr(ri, mapping),
2568 sw_idx = NEXT_TX(sw_idx);
2570 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2571 if (unlikely(sw_idx == hw_idx))
2574 ri = &tp->tx_buffers[sw_idx];
2575 if (unlikely(ri->skb != NULL))
2578 pci_unmap_page(tp->pdev,
2579 pci_unmap_addr(ri, mapping),
2580 skb_shinfo(skb)->frags[i].size,
2583 sw_idx = NEXT_TX(sw_idx);
2589 tp->tx_cons = sw_idx;
2591 if (netif_queue_stopped(tp->dev) &&
2592 (TX_BUFFS_AVAIL(tp) > TG3_TX_WAKEUP_THRESH))
2593 netif_wake_queue(tp->dev);
2596 /* Returns size of skb allocated or < 0 on error.
2598 * We only need to fill in the address because the other members
2599 * of the RX descriptor are invariant, see tg3_init_rings.
2601 * Note the purposeful assymetry of cpu vs. chip accesses. For
2602 * posting buffers we only dirty the first cache line of the RX
2603 * descriptor (containing the address). Whereas for the RX status
2604 * buffers the cpu only reads the last cacheline of the RX descriptor
2605 * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
2607 static int tg3_alloc_rx_skb(struct tg3 *tp, u32 opaque_key,
2608 int src_idx, u32 dest_idx_unmasked)
2610 struct tg3_rx_buffer_desc *desc;
2611 struct ring_info *map, *src_map;
2612 struct sk_buff *skb;
2614 int skb_size, dest_idx;
2617 switch (opaque_key) {
2618 case RXD_OPAQUE_RING_STD:
2619 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
2620 desc = &tp->rx_std[dest_idx];
2621 map = &tp->rx_std_buffers[dest_idx];
2623 src_map = &tp->rx_std_buffers[src_idx];
2624 skb_size = RX_PKT_BUF_SZ;
2627 case RXD_OPAQUE_RING_JUMBO:
2628 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
2629 desc = &tp->rx_jumbo[dest_idx];
2630 map = &tp->rx_jumbo_buffers[dest_idx];
2632 src_map = &tp->rx_jumbo_buffers[src_idx];
2633 skb_size = RX_JUMBO_PKT_BUF_SZ;
2640 /* Do not overwrite any of the map or rp information
2641 * until we are sure we can commit to a new buffer.
2643 * Callers depend upon this behavior and assume that
2644 * we leave everything unchanged if we fail.
2646 skb = dev_alloc_skb(skb_size);
2651 skb_reserve(skb, tp->rx_offset);
2653 mapping = pci_map_single(tp->pdev, skb->data,
2654 skb_size - tp->rx_offset,
2655 PCI_DMA_FROMDEVICE);
2658 pci_unmap_addr_set(map, mapping, mapping);
2660 if (src_map != NULL)
2661 src_map->skb = NULL;
2663 desc->addr_hi = ((u64)mapping >> 32);
2664 desc->addr_lo = ((u64)mapping & 0xffffffff);
2669 /* We only need to move over in the address because the other
2670 * members of the RX descriptor are invariant. See notes above
2671 * tg3_alloc_rx_skb for full details.
2673 static void tg3_recycle_rx(struct tg3 *tp, u32 opaque_key,
2674 int src_idx, u32 dest_idx_unmasked)
2676 struct tg3_rx_buffer_desc *src_desc, *dest_desc;
2677 struct ring_info *src_map, *dest_map;
2680 switch (opaque_key) {
2681 case RXD_OPAQUE_RING_STD:
2682 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
2683 dest_desc = &tp->rx_std[dest_idx];
2684 dest_map = &tp->rx_std_buffers[dest_idx];
2685 src_desc = &tp->rx_std[src_idx];
2686 src_map = &tp->rx_std_buffers[src_idx];
2689 case RXD_OPAQUE_RING_JUMBO:
2690 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
2691 dest_desc = &tp->rx_jumbo[dest_idx];
2692 dest_map = &tp->rx_jumbo_buffers[dest_idx];
2693 src_desc = &tp->rx_jumbo[src_idx];
2694 src_map = &tp->rx_jumbo_buffers[src_idx];
2701 dest_map->skb = src_map->skb;
2702 pci_unmap_addr_set(dest_map, mapping,
2703 pci_unmap_addr(src_map, mapping));
2704 dest_desc->addr_hi = src_desc->addr_hi;
2705 dest_desc->addr_lo = src_desc->addr_lo;
2707 src_map->skb = NULL;
2710 #if TG3_VLAN_TAG_USED
2711 static int tg3_vlan_rx(struct tg3 *tp, struct sk_buff *skb, u16 vlan_tag)
2713 return vlan_hwaccel_receive_skb(skb, tp->vlgrp, vlan_tag);
2717 /* The RX ring scheme is composed of multiple rings which post fresh
2718 * buffers to the chip, and one special ring the chip uses to report
2719 * status back to the host.
2721 * The special ring reports the status of received packets to the
2722 * host. The chip does not write into the original descriptor the
2723 * RX buffer was obtained from. The chip simply takes the original
2724 * descriptor as provided by the host, updates the status and length
2725 * field, then writes this into the next status ring entry.
2727 * Each ring the host uses to post buffers to the chip is described
2728 * by a TG3_BDINFO entry in the chips SRAM area. When a packet arrives,
2729 * it is first placed into the on-chip ram. When the packet's length
2730 * is known, it walks down the TG3_BDINFO entries to select the ring.
2731 * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
2732 * which is within the range of the new packet's length is chosen.
2734 * The "separate ring for rx status" scheme may sound queer, but it makes
2735 * sense from a cache coherency perspective. If only the host writes
2736 * to the buffer post rings, and only the chip writes to the rx status
2737 * rings, then cache lines never move beyond shared-modified state.
2738 * If both the host and chip were to write into the same ring, cache line
2739 * eviction could occur since both entities want it in an exclusive state.
2741 static int tg3_rx(struct tg3 *tp, int budget)
2744 u32 sw_idx = tp->rx_rcb_ptr;
2748 hw_idx = tp->hw_status->idx[0].rx_producer;
2750 * We need to order the read of hw_idx and the read of
2751 * the opaque cookie.
2756 while (sw_idx != hw_idx && budget > 0) {
2757 struct tg3_rx_buffer_desc *desc = &tp->rx_rcb[sw_idx];
2759 struct sk_buff *skb;
2760 dma_addr_t dma_addr;
2761 u32 opaque_key, desc_idx, *post_ptr;
2763 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
2764 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
2765 if (opaque_key == RXD_OPAQUE_RING_STD) {
2766 dma_addr = pci_unmap_addr(&tp->rx_std_buffers[desc_idx],
2768 skb = tp->rx_std_buffers[desc_idx].skb;
2769 post_ptr = &tp->rx_std_ptr;
2770 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
2771 dma_addr = pci_unmap_addr(&tp->rx_jumbo_buffers[desc_idx],
2773 skb = tp->rx_jumbo_buffers[desc_idx].skb;
2774 post_ptr = &tp->rx_jumbo_ptr;
2777 goto next_pkt_nopost;
2780 work_mask |= opaque_key;
2782 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
2783 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
2785 tg3_recycle_rx(tp, opaque_key,
2786 desc_idx, *post_ptr);
2788 /* Other statistics kept track of by card. */
2789 tp->net_stats.rx_dropped++;
2793 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4; /* omit crc */
2795 if (len > RX_COPY_THRESHOLD
2796 && tp->rx_offset == 2
2797 /* rx_offset != 2 iff this is a 5701 card running
2798 * in PCI-X mode [see tg3_get_invariants()] */
2802 skb_size = tg3_alloc_rx_skb(tp, opaque_key,
2803 desc_idx, *post_ptr);
2807 pci_unmap_single(tp->pdev, dma_addr,
2808 skb_size - tp->rx_offset,
2809 PCI_DMA_FROMDEVICE);
2813 struct sk_buff *copy_skb;
2815 tg3_recycle_rx(tp, opaque_key,
2816 desc_idx, *post_ptr);
2818 copy_skb = dev_alloc_skb(len + 2);
2819 if (copy_skb == NULL)
2820 goto drop_it_no_recycle;
2822 copy_skb->dev = tp->dev;
2823 skb_reserve(copy_skb, 2);
2824 skb_put(copy_skb, len);
2825 pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
2826 memcpy(copy_skb->data, skb->data, len);
2827 pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
2829 /* We'll reuse the original ring buffer. */
2833 if ((tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) &&
2834 (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
2835 (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
2836 >> RXD_TCPCSUM_SHIFT) == 0xffff))
2837 skb->ip_summed = CHECKSUM_UNNECESSARY;
2839 skb->ip_summed = CHECKSUM_NONE;
2841 skb->protocol = eth_type_trans(skb, tp->dev);
2842 #if TG3_VLAN_TAG_USED
2843 if (tp->vlgrp != NULL &&
2844 desc->type_flags & RXD_FLAG_VLAN) {
2845 tg3_vlan_rx(tp, skb,
2846 desc->err_vlan & RXD_VLAN_MASK);
2849 netif_receive_skb(skb);
2851 tp->dev->last_rx = jiffies;
2859 sw_idx %= TG3_RX_RCB_RING_SIZE(tp);
2861 /* Refresh hw_idx to see if there is new work */
2862 if (sw_idx == hw_idx) {
2863 hw_idx = tp->hw_status->idx[0].rx_producer;
2868 /* ACK the status ring. */
2869 tp->rx_rcb_ptr = sw_idx;
2870 tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW, sw_idx);
2872 /* Refill RX ring(s). */
2873 if (work_mask & RXD_OPAQUE_RING_STD) {
2874 sw_idx = tp->rx_std_ptr % TG3_RX_RING_SIZE;
2875 tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
2878 if (work_mask & RXD_OPAQUE_RING_JUMBO) {
2879 sw_idx = tp->rx_jumbo_ptr % TG3_RX_JUMBO_RING_SIZE;
2880 tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
2888 static int tg3_poll(struct net_device *netdev, int *budget)
2890 struct tg3 *tp = netdev_priv(netdev);
2891 struct tg3_hw_status *sblk = tp->hw_status;
2894 /* handle link change and other phy events */
2895 if (!(tp->tg3_flags &
2896 (TG3_FLAG_USE_LINKCHG_REG |
2897 TG3_FLAG_POLL_SERDES))) {
2898 if (sblk->status & SD_STATUS_LINK_CHG) {
2899 sblk->status = SD_STATUS_UPDATED |
2900 (sblk->status & ~SD_STATUS_LINK_CHG);
2901 spin_lock(&tp->lock);
2902 tg3_setup_phy(tp, 0);
2903 spin_unlock(&tp->lock);
2907 /* run TX completion thread */
2908 if (sblk->idx[0].tx_consumer != tp->tx_cons) {
2909 spin_lock(&tp->tx_lock);
2911 spin_unlock(&tp->tx_lock);
2914 /* run RX thread, within the bounds set by NAPI.
2915 * All RX "locking" is done by ensuring outside
2916 * code synchronizes with dev->poll()
2918 if (sblk->idx[0].rx_producer != tp->rx_rcb_ptr) {
2919 int orig_budget = *budget;
2922 if (orig_budget > netdev->quota)
2923 orig_budget = netdev->quota;
2925 work_done = tg3_rx(tp, orig_budget);
2927 *budget -= work_done;
2928 netdev->quota -= work_done;
2931 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
2932 tp->last_tag = sblk->status_tag;
2934 sblk->status &= ~SD_STATUS_UPDATED;
2936 /* if no more work, tell net stack and NIC we're done */
2937 done = !tg3_has_work(tp);
2939 spin_lock(&tp->lock);
2940 netif_rx_complete(netdev);
2941 tg3_restart_ints(tp);
2942 spin_unlock(&tp->lock);
2945 return (done ? 0 : 1);
2948 static void tg3_irq_quiesce(struct tg3 *tp)
2950 BUG_ON(tp->irq_sync);
2955 synchronize_irq(tp->pdev->irq);
2958 static inline int tg3_irq_sync(struct tg3 *tp)
2960 return tp->irq_sync;
2963 /* Fully shutdown all tg3 driver activity elsewhere in the system.
2964 * If irq_sync is non-zero, then the IRQ handler must be synchronized
2965 * with as well. Most of the time, this is not necessary except when
2966 * shutting down the device.
2968 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
2971 tg3_irq_quiesce(tp);
2972 spin_lock_bh(&tp->lock);
2973 spin_lock(&tp->tx_lock);
2976 static inline void tg3_full_unlock(struct tg3 *tp)
2978 spin_unlock(&tp->tx_lock);
2979 spin_unlock_bh(&tp->lock);
2982 /* MSI ISR - No need to check for interrupt sharing and no need to
2983 * flush status block and interrupt mailbox. PCI ordering rules
2984 * guarantee that MSI will arrive after the status block.
2986 static irqreturn_t tg3_msi(int irq, void *dev_id, struct pt_regs *regs)
2988 struct net_device *dev = dev_id;
2989 struct tg3 *tp = netdev_priv(dev);
2990 struct tg3_hw_status *sblk = tp->hw_status;
2993 * Writing any value to intr-mbox-0 clears PCI INTA# and
2994 * chip-internal interrupt pending events.
2995 * Writing non-zero to intr-mbox-0 additional tells the
2996 * NIC to stop sending us irqs, engaging "in-intr-handler"
2999 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
3000 tp->last_tag = sblk->status_tag;
3002 if (tg3_irq_sync(tp))
3004 sblk->status &= ~SD_STATUS_UPDATED;
3005 if (likely(tg3_has_work(tp)))
3006 netif_rx_schedule(dev); /* schedule NAPI poll */
3008 /* No work, re-enable interrupts. */
3009 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
3010 tp->last_tag << 24);
3013 return IRQ_RETVAL(1);
3016 static irqreturn_t tg3_interrupt(int irq, void *dev_id, struct pt_regs *regs)
3018 struct net_device *dev = dev_id;
3019 struct tg3 *tp = netdev_priv(dev);
3020 struct tg3_hw_status *sblk = tp->hw_status;
3021 unsigned int handled = 1;
3023 /* In INTx mode, it is possible for the interrupt to arrive at
3024 * the CPU before the status block posted prior to the interrupt.
3025 * Reading the PCI State register will confirm whether the
3026 * interrupt is ours and will flush the status block.
3028 if ((sblk->status & SD_STATUS_UPDATED) ||
3029 !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
3031 * Writing any value to intr-mbox-0 clears PCI INTA# and
3032 * chip-internal interrupt pending events.
3033 * Writing non-zero to intr-mbox-0 additional tells the
3034 * NIC to stop sending us irqs, engaging "in-intr-handler"
3037 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
3039 if (tg3_irq_sync(tp))
3041 sblk->status &= ~SD_STATUS_UPDATED;
3042 if (likely(tg3_has_work(tp)))
3043 netif_rx_schedule(dev); /* schedule NAPI poll */
3045 /* No work, shared interrupt perhaps? re-enable
3046 * interrupts, and flush that PCI write
3048 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
3050 tr32(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW);
3052 } else { /* shared interrupt */
3056 return IRQ_RETVAL(handled);
3059 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id, struct pt_regs *regs)
3061 struct net_device *dev = dev_id;
3062 struct tg3 *tp = netdev_priv(dev);
3063 struct tg3_hw_status *sblk = tp->hw_status;
3064 unsigned int handled = 1;
3066 /* In INTx mode, it is possible for the interrupt to arrive at
3067 * the CPU before the status block posted prior to the interrupt.
3068 * Reading the PCI State register will confirm whether the
3069 * interrupt is ours and will flush the status block.
3071 if ((sblk->status & SD_STATUS_UPDATED) ||
3072 !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
3074 * writing any value to intr-mbox-0 clears PCI INTA# and
3075 * chip-internal interrupt pending events.
3076 * writing non-zero to intr-mbox-0 additional tells the
3077 * NIC to stop sending us irqs, engaging "in-intr-handler"
3080 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
3082 tp->last_tag = sblk->status_tag;
3084 if (tg3_irq_sync(tp))
3086 sblk->status &= ~SD_STATUS_UPDATED;
3087 if (likely(tg3_has_work(tp)))
3088 netif_rx_schedule(dev); /* schedule NAPI poll */
3090 /* no work, shared interrupt perhaps? re-enable
3091 * interrupts, and flush that PCI write
3093 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
3094 tp->last_tag << 24);
3095 tr32(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW);
3097 } else { /* shared interrupt */
3101 return IRQ_RETVAL(handled);
3104 /* ISR for interrupt test */
3105 static irqreturn_t tg3_test_isr(int irq, void *dev_id,
3106 struct pt_regs *regs)
3108 struct net_device *dev = dev_id;
3109 struct tg3 *tp = netdev_priv(dev);
3110 struct tg3_hw_status *sblk = tp->hw_status;
3112 if (sblk->status & SD_STATUS_UPDATED) {
3113 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
3115 return IRQ_RETVAL(1);
3117 return IRQ_RETVAL(0);
3120 static int tg3_init_hw(struct tg3 *);
3121 static int tg3_halt(struct tg3 *, int, int);
3123 #ifdef CONFIG_NET_POLL_CONTROLLER
3124 static void tg3_poll_controller(struct net_device *dev)
3126 struct tg3 *tp = netdev_priv(dev);
3128 tg3_interrupt(tp->pdev->irq, dev, NULL);
3132 static void tg3_reset_task(void *_data)
3134 struct tg3 *tp = _data;
3135 unsigned int restart_timer;
3139 tg3_full_lock(tp, 1);
3141 restart_timer = tp->tg3_flags2 & TG3_FLG2_RESTART_TIMER;
3142 tp->tg3_flags2 &= ~TG3_FLG2_RESTART_TIMER;
3144 tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
3147 tg3_netif_start(tp);
3149 tg3_full_unlock(tp);
3152 mod_timer(&tp->timer, jiffies + 1);
3155 static void tg3_tx_timeout(struct net_device *dev)
3157 struct tg3 *tp = netdev_priv(dev);
3159 printk(KERN_ERR PFX "%s: transmit timed out, resetting\n",
3162 schedule_work(&tp->reset_task);
3165 static void tg3_set_txd(struct tg3 *, int, dma_addr_t, int, u32, u32);
3167 static int tigon3_4gb_hwbug_workaround(struct tg3 *tp, struct sk_buff *skb,
3168 u32 guilty_entry, int guilty_len,
3169 u32 last_plus_one, u32 *start, u32 mss)
3171 struct sk_buff *new_skb = skb_copy(skb, GFP_ATOMIC);
3172 dma_addr_t new_addr;
3181 /* New SKB is guaranteed to be linear. */
3183 new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
3185 tg3_set_txd(tp, entry, new_addr, new_skb->len,
3186 (skb->ip_summed == CHECKSUM_HW) ?
3187 TXD_FLAG_TCPUDP_CSUM : 0, 1 | (mss << 1));
3188 *start = NEXT_TX(entry);
3190 /* Now clean up the sw ring entries. */
3192 while (entry != last_plus_one) {
3196 len = skb_headlen(skb);
3198 len = skb_shinfo(skb)->frags[i-1].size;
3199 pci_unmap_single(tp->pdev,
3200 pci_unmap_addr(&tp->tx_buffers[entry], mapping),
3201 len, PCI_DMA_TODEVICE);
3203 tp->tx_buffers[entry].skb = new_skb;
3204 pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, new_addr);
3206 tp->tx_buffers[entry].skb = NULL;
3208 entry = NEXT_TX(entry);
3217 static void tg3_set_txd(struct tg3 *tp, int entry,
3218 dma_addr_t mapping, int len, u32 flags,
3221 struct tg3_tx_buffer_desc *txd = &tp->tx_ring[entry];
3222 int is_end = (mss_and_is_end & 0x1);
3223 u32 mss = (mss_and_is_end >> 1);
3227 flags |= TXD_FLAG_END;
3228 if (flags & TXD_FLAG_VLAN) {
3229 vlan_tag = flags >> 16;
3232 vlan_tag |= (mss << TXD_MSS_SHIFT);
3234 txd->addr_hi = ((u64) mapping >> 32);
3235 txd->addr_lo = ((u64) mapping & 0xffffffff);
3236 txd->len_flags = (len << TXD_LEN_SHIFT) | flags;
3237 txd->vlan_tag = vlan_tag << TXD_VLAN_TAG_SHIFT;
3240 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
3242 u32 base = (u32) mapping & 0xffffffff;
3244 return ((base > 0xffffdcc0) &&
3245 (base + len + 8 < base));
3248 static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
3250 struct tg3 *tp = netdev_priv(dev);
3253 u32 len, entry, base_flags, mss;
3254 int would_hit_hwbug;
3256 len = skb_headlen(skb);
3258 /* No BH disabling for tx_lock here. We are running in BH disabled
3259 * context and TX reclaim runs via tp->poll inside of a software
3260 * interrupt. Furthermore, IRQ processing runs lockless so we have
3261 * no IRQ context deadlocks to worry about either. Rejoice!
3263 if (!spin_trylock(&tp->tx_lock))
3264 return NETDEV_TX_LOCKED;
3266 /* This is a hard error, log it. */
3267 if (unlikely(TX_BUFFS_AVAIL(tp) <= (skb_shinfo(skb)->nr_frags + 1))) {
3268 netif_stop_queue(dev);
3269 spin_unlock(&tp->tx_lock);
3270 printk(KERN_ERR PFX "%s: BUG! Tx Ring full when queue awake!\n",
3272 return NETDEV_TX_BUSY;
3275 entry = tp->tx_prod;
3277 if (skb->ip_summed == CHECKSUM_HW)
3278 base_flags |= TXD_FLAG_TCPUDP_CSUM;
3279 #if TG3_TSO_SUPPORT != 0
3281 if (skb->len > (tp->dev->mtu + ETH_HLEN) &&
3282 (mss = skb_shinfo(skb)->tso_size) != 0) {
3283 int tcp_opt_len, ip_tcp_len;
3285 if (skb_header_cloned(skb) &&
3286 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
3291 tcp_opt_len = ((skb->h.th->doff - 5) * 4);
3292 ip_tcp_len = (skb->nh.iph->ihl * 4) + sizeof(struct tcphdr);
3294 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
3295 TXD_FLAG_CPU_POST_DMA);
3297 skb->nh.iph->check = 0;
3298 skb->nh.iph->tot_len = ntohs(mss + ip_tcp_len + tcp_opt_len);
3299 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
3300 skb->h.th->check = 0;
3301 base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
3305 ~csum_tcpudp_magic(skb->nh.iph->saddr,
3310 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO) ||
3311 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)) {
3312 if (tcp_opt_len || skb->nh.iph->ihl > 5) {
3315 tsflags = ((skb->nh.iph->ihl - 5) +
3316 (tcp_opt_len >> 2));
3317 mss |= (tsflags << 11);
3320 if (tcp_opt_len || skb->nh.iph->ihl > 5) {
3323 tsflags = ((skb->nh.iph->ihl - 5) +
3324 (tcp_opt_len >> 2));
3325 base_flags |= tsflags << 12;
3332 #if TG3_VLAN_TAG_USED
3333 if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
3334 base_flags |= (TXD_FLAG_VLAN |
3335 (vlan_tx_tag_get(skb) << 16));
3338 /* Queue skb data, a.k.a. the main skb fragment. */
3339 mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
3341 tp->tx_buffers[entry].skb = skb;
3342 pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
3344 would_hit_hwbug = 0;
3346 if (tg3_4g_overflow_test(mapping, len))
3347 would_hit_hwbug = entry + 1;
3349 tg3_set_txd(tp, entry, mapping, len, base_flags,
3350 (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
3352 entry = NEXT_TX(entry);
3354 /* Now loop through additional data fragments, and queue them. */
3355 if (skb_shinfo(skb)->nr_frags > 0) {
3356 unsigned int i, last;
3358 last = skb_shinfo(skb)->nr_frags - 1;
3359 for (i = 0; i <= last; i++) {
3360 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3363 mapping = pci_map_page(tp->pdev,
3366 len, PCI_DMA_TODEVICE);
3368 tp->tx_buffers[entry].skb = NULL;
3369 pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
3371 if (tg3_4g_overflow_test(mapping, len)) {
3372 /* Only one should match. */
3373 if (would_hit_hwbug)
3375 would_hit_hwbug = entry + 1;
3378 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
3379 tg3_set_txd(tp, entry, mapping, len,
3380 base_flags, (i == last)|(mss << 1));
3382 tg3_set_txd(tp, entry, mapping, len,
3383 base_flags, (i == last));
3385 entry = NEXT_TX(entry);
3389 if (would_hit_hwbug) {
3390 u32 last_plus_one = entry;
3392 unsigned int len = 0;
3394 would_hit_hwbug -= 1;
3395 entry = entry - 1 - skb_shinfo(skb)->nr_frags;
3396 entry &= (TG3_TX_RING_SIZE - 1);
3399 while (entry != last_plus_one) {
3401 len = skb_headlen(skb);
3403 len = skb_shinfo(skb)->frags[i-1].size;
3405 if (entry == would_hit_hwbug)
3409 entry = NEXT_TX(entry);
3413 /* If the workaround fails due to memory/mapping
3414 * failure, silently drop this packet.
3416 if (tigon3_4gb_hwbug_workaround(tp, skb,
3425 /* Packets are ready, update Tx producer idx local and on card. */
3426 tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry);
3428 tp->tx_prod = entry;
3429 if (TX_BUFFS_AVAIL(tp) <= (MAX_SKB_FRAGS + 1))
3430 netif_stop_queue(dev);
3434 spin_unlock(&tp->tx_lock);
3436 dev->trans_start = jiffies;
3438 return NETDEV_TX_OK;
3441 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
3446 if (new_mtu > ETH_DATA_LEN)
3447 tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE;
3449 tp->tg3_flags &= ~TG3_FLAG_JUMBO_RING_ENABLE;
3452 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
3454 struct tg3 *tp = netdev_priv(dev);
3456 if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
3459 if (!netif_running(dev)) {
3460 /* We'll just catch it later when the
3463 tg3_set_mtu(dev, tp, new_mtu);
3469 tg3_full_lock(tp, 1);
3471 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
3473 tg3_set_mtu(dev, tp, new_mtu);
3477 tg3_netif_start(tp);
3479 tg3_full_unlock(tp);
3484 /* Free up pending packets in all rx/tx rings.
3486 * The chip has been shut down and the driver detached from
3487 * the networking, so no interrupts or new tx packets will
3488 * end up in the driver. tp->{tx,}lock is not held and we are not
3489 * in an interrupt context and thus may sleep.
3491 static void tg3_free_rings(struct tg3 *tp)
3493 struct ring_info *rxp;
3496 for (i = 0; i < TG3_RX_RING_SIZE; i++) {
3497 rxp = &tp->rx_std_buffers[i];
3499 if (rxp->skb == NULL)
3501 pci_unmap_single(tp->pdev,
3502 pci_unmap_addr(rxp, mapping),
3503 RX_PKT_BUF_SZ - tp->rx_offset,
3504 PCI_DMA_FROMDEVICE);
3505 dev_kfree_skb_any(rxp->skb);
3509 for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
3510 rxp = &tp->rx_jumbo_buffers[i];
3512 if (rxp->skb == NULL)
3514 pci_unmap_single(tp->pdev,
3515 pci_unmap_addr(rxp, mapping),
3516 RX_JUMBO_PKT_BUF_SZ - tp->rx_offset,
3517 PCI_DMA_FROMDEVICE);
3518 dev_kfree_skb_any(rxp->skb);
3522 for (i = 0; i < TG3_TX_RING_SIZE; ) {
3523 struct tx_ring_info *txp;
3524 struct sk_buff *skb;
3527 txp = &tp->tx_buffers[i];
3535 pci_unmap_single(tp->pdev,
3536 pci_unmap_addr(txp, mapping),
3543 for (j = 0; j < skb_shinfo(skb)->nr_frags; j++) {
3544 txp = &tp->tx_buffers[i & (TG3_TX_RING_SIZE - 1)];
3545 pci_unmap_page(tp->pdev,
3546 pci_unmap_addr(txp, mapping),
3547 skb_shinfo(skb)->frags[j].size,
3552 dev_kfree_skb_any(skb);
3556 /* Initialize tx/rx rings for packet processing.
3558 * The chip has been shut down and the driver detached from
3559 * the networking, so no interrupts or new tx packets will
3560 * end up in the driver. tp->{tx,}lock are held and thus
3563 static void tg3_init_rings(struct tg3 *tp)
3567 /* Free up all the SKBs. */
3570 /* Zero out all descriptors. */
3571 memset(tp->rx_std, 0, TG3_RX_RING_BYTES);
3572 memset(tp->rx_jumbo, 0, TG3_RX_JUMBO_RING_BYTES);
3573 memset(tp->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
3574 memset(tp->tx_ring, 0, TG3_TX_RING_BYTES);
3576 /* Initialize invariants of the rings, we only set this
3577 * stuff once. This works because the card does not
3578 * write into the rx buffer posting rings.
3580 for (i = 0; i < TG3_RX_RING_SIZE; i++) {
3581 struct tg3_rx_buffer_desc *rxd;
3583 rxd = &tp->rx_std[i];
3584 rxd->idx_len = (RX_PKT_BUF_SZ - tp->rx_offset - 64)
3586 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
3587 rxd->opaque = (RXD_OPAQUE_RING_STD |
3588 (i << RXD_OPAQUE_INDEX_SHIFT));
3591 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
3592 for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
3593 struct tg3_rx_buffer_desc *rxd;
3595 rxd = &tp->rx_jumbo[i];
3596 rxd->idx_len = (RX_JUMBO_PKT_BUF_SZ - tp->rx_offset - 64)
3598 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
3600 rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
3601 (i << RXD_OPAQUE_INDEX_SHIFT));
3605 /* Now allocate fresh SKBs for each rx ring. */
3606 for (i = 0; i < tp->rx_pending; i++) {
3607 if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_STD,
3612 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
3613 for (i = 0; i < tp->rx_jumbo_pending; i++) {
3614 if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_JUMBO,
3622 * Must not be invoked with interrupt sources disabled and
3623 * the hardware shutdown down.
3625 static void tg3_free_consistent(struct tg3 *tp)
3627 if (tp->rx_std_buffers) {
3628 kfree(tp->rx_std_buffers);
3629 tp->rx_std_buffers = NULL;
3632 pci_free_consistent(tp->pdev, TG3_RX_RING_BYTES,
3633 tp->rx_std, tp->rx_std_mapping);
3637 pci_free_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
3638 tp->rx_jumbo, tp->rx_jumbo_mapping);
3639 tp->rx_jumbo = NULL;
3642 pci_free_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
3643 tp->rx_rcb, tp->rx_rcb_mapping);
3647 pci_free_consistent(tp->pdev, TG3_TX_RING_BYTES,
3648 tp->tx_ring, tp->tx_desc_mapping);
3651 if (tp->hw_status) {
3652 pci_free_consistent(tp->pdev, TG3_HW_STATUS_SIZE,
3653 tp->hw_status, tp->status_mapping);
3654 tp->hw_status = NULL;
3657 pci_free_consistent(tp->pdev, sizeof(struct tg3_hw_stats),
3658 tp->hw_stats, tp->stats_mapping);
3659 tp->hw_stats = NULL;
3664 * Must not be invoked with interrupt sources disabled and
3665 * the hardware shutdown down. Can sleep.
3667 static int tg3_alloc_consistent(struct tg3 *tp)
3669 tp->rx_std_buffers = kmalloc((sizeof(struct ring_info) *
3671 TG3_RX_JUMBO_RING_SIZE)) +
3672 (sizeof(struct tx_ring_info) *
3675 if (!tp->rx_std_buffers)
3678 memset(tp->rx_std_buffers, 0,
3679 (sizeof(struct ring_info) *
3681 TG3_RX_JUMBO_RING_SIZE)) +
3682 (sizeof(struct tx_ring_info) *
3685 tp->rx_jumbo_buffers = &tp->rx_std_buffers[TG3_RX_RING_SIZE];
3686 tp->tx_buffers = (struct tx_ring_info *)
3687 &tp->rx_jumbo_buffers[TG3_RX_JUMBO_RING_SIZE];
3689 tp->rx_std = pci_alloc_consistent(tp->pdev, TG3_RX_RING_BYTES,
3690 &tp->rx_std_mapping);
3694 tp->rx_jumbo = pci_alloc_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
3695 &tp->rx_jumbo_mapping);
3700 tp->rx_rcb = pci_alloc_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
3701 &tp->rx_rcb_mapping);
3705 tp->tx_ring = pci_alloc_consistent(tp->pdev, TG3_TX_RING_BYTES,
3706 &tp->tx_desc_mapping);
3710 tp->hw_status = pci_alloc_consistent(tp->pdev,
3712 &tp->status_mapping);
3716 tp->hw_stats = pci_alloc_consistent(tp->pdev,
3717 sizeof(struct tg3_hw_stats),
3718 &tp->stats_mapping);
3722 memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
3723 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
3728 tg3_free_consistent(tp);
3732 #define MAX_WAIT_CNT 1000
3734 /* To stop a block, clear the enable bit and poll till it
3735 * clears. tp->lock is held.
3737 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, int silent)
3742 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
3749 /* We can't enable/disable these bits of the
3750 * 5705/5750, just say success.
3763 for (i = 0; i < MAX_WAIT_CNT; i++) {
3766 if ((val & enable_bit) == 0)
3770 if (i == MAX_WAIT_CNT && !silent) {
3771 printk(KERN_ERR PFX "tg3_stop_block timed out, "
3772 "ofs=%lx enable_bit=%x\n",
3780 /* tp->lock is held. */
3781 static int tg3_abort_hw(struct tg3 *tp, int silent)
3785 tg3_disable_ints(tp);
3787 tp->rx_mode &= ~RX_MODE_ENABLE;
3788 tw32_f(MAC_RX_MODE, tp->rx_mode);
3791 err = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
3792 err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
3793 err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
3794 err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
3795 err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
3796 err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
3798 err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
3799 err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
3800 err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
3801 err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
3802 err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
3803 err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
3804 err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
3806 tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
3807 tw32_f(MAC_MODE, tp->mac_mode);
3810 tp->tx_mode &= ~TX_MODE_ENABLE;
3811 tw32_f(MAC_TX_MODE, tp->tx_mode);
3813 for (i = 0; i < MAX_WAIT_CNT; i++) {
3815 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
3818 if (i >= MAX_WAIT_CNT) {
3819 printk(KERN_ERR PFX "tg3_abort_hw timed out for %s, "
3820 "TX_MODE_ENABLE will not clear MAC_TX_MODE=%08x\n",
3821 tp->dev->name, tr32(MAC_TX_MODE));
3825 err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
3826 err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
3827 err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
3829 tw32(FTQ_RESET, 0xffffffff);
3830 tw32(FTQ_RESET, 0x00000000);
3832 err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
3833 err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
3836 memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
3838 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
3843 /* tp->lock is held. */
3844 static int tg3_nvram_lock(struct tg3 *tp)
3846 if (tp->tg3_flags & TG3_FLAG_NVRAM) {
3849 tw32(NVRAM_SWARB, SWARB_REQ_SET1);
3850 for (i = 0; i < 8000; i++) {
3851 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
3861 /* tp->lock is held. */
3862 static void tg3_nvram_unlock(struct tg3 *tp)
3864 if (tp->tg3_flags & TG3_FLAG_NVRAM)
3865 tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
3868 /* tp->lock is held. */
3869 static void tg3_enable_nvram_access(struct tg3 *tp)
3871 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
3872 !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM)) {
3873 u32 nvaccess = tr32(NVRAM_ACCESS);
3875 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
3879 /* tp->lock is held. */
3880 static void tg3_disable_nvram_access(struct tg3 *tp)
3882 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
3883 !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM)) {
3884 u32 nvaccess = tr32(NVRAM_ACCESS);
3886 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
3890 /* tp->lock is held. */
3891 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
3893 if (!(tp->tg3_flags2 & TG3_FLG2_SUN_570X))
3894 tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
3895 NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
3897 if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
3899 case RESET_KIND_INIT:
3900 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
3904 case RESET_KIND_SHUTDOWN:
3905 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
3909 case RESET_KIND_SUSPEND:
3910 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
3920 /* tp->lock is held. */
3921 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
3923 if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
3925 case RESET_KIND_INIT:
3926 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
3927 DRV_STATE_START_DONE);
3930 case RESET_KIND_SHUTDOWN:
3931 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
3932 DRV_STATE_UNLOAD_DONE);
3941 /* tp->lock is held. */
3942 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
3944 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
3946 case RESET_KIND_INIT:
3947 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
3951 case RESET_KIND_SHUTDOWN:
3952 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
3956 case RESET_KIND_SUSPEND:
3957 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
3967 static void tg3_stop_fw(struct tg3 *);
3969 /* tp->lock is held. */
3970 static int tg3_chip_reset(struct tg3 *tp)
3976 if (!(tp->tg3_flags2 & TG3_FLG2_SUN_570X))
3980 * We must avoid the readl() that normally takes place.
3981 * It locks machines, causes machine checks, and other
3982 * fun things. So, temporarily disable the 5701
3983 * hardware workaround, while we do the reset.
3985 flags_save = tp->tg3_flags;
3986 tp->tg3_flags &= ~TG3_FLAG_5701_REG_WRITE_BUG;
3989 val = GRC_MISC_CFG_CORECLK_RESET;
3991 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
3992 if (tr32(0x7e2c) == 0x60) {
3995 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
3996 tw32(GRC_MISC_CFG, (1 << 29));
4001 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
4002 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
4003 tw32(GRC_MISC_CFG, val);
4005 /* restore 5701 hardware bug workaround flag */
4006 tp->tg3_flags = flags_save;
4008 /* Unfortunately, we have to delay before the PCI read back.
4009 * Some 575X chips even will not respond to a PCI cfg access
4010 * when the reset command is given to the chip.
4012 * How do these hardware designers expect things to work
4013 * properly if the PCI write is posted for a long period
4014 * of time? It is always necessary to have some method by
4015 * which a register read back can occur to push the write
4016 * out which does the reset.
4018 * For most tg3 variants the trick below was working.
4023 /* Flush PCI posted writes. The normal MMIO registers
4024 * are inaccessible at this time so this is the only
4025 * way to make this reliably (actually, this is no longer
4026 * the case, see above). I tried to use indirect
4027 * register read/write but this upset some 5701 variants.
4029 pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
4033 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
4034 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) {
4038 /* Wait for link training to complete. */
4039 for (i = 0; i < 5000; i++)
4042 pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
4043 pci_write_config_dword(tp->pdev, 0xc4,
4044 cfg_val | (1 << 15));
4046 /* Set PCIE max payload size and clear error status. */
4047 pci_write_config_dword(tp->pdev, 0xd8, 0xf5000);
4050 /* Re-enable indirect register accesses. */
4051 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
4052 tp->misc_host_ctrl);
4054 /* Set MAX PCI retry to zero. */
4055 val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
4056 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
4057 (tp->tg3_flags & TG3_FLAG_PCIX_MODE))
4058 val |= PCISTATE_RETRY_SAME_DMA;
4059 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
4061 pci_restore_state(tp->pdev);
4063 /* Make sure PCI-X relaxed ordering bit is clear. */
4064 pci_read_config_dword(tp->pdev, TG3PCI_X_CAPS, &val);
4065 val &= ~PCIX_CAPS_RELAXED_ORDERING;
4066 pci_write_config_dword(tp->pdev, TG3PCI_X_CAPS, val);
4068 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) {
4071 /* Chip reset on 5780 will reset MSI enable bit,
4072 * so need to restore it.
4074 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
4077 pci_read_config_word(tp->pdev,
4078 tp->msi_cap + PCI_MSI_FLAGS,
4080 pci_write_config_word(tp->pdev,
4081 tp->msi_cap + PCI_MSI_FLAGS,
4082 ctrl | PCI_MSI_FLAGS_ENABLE);
4083 val = tr32(MSGINT_MODE);
4084 tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
4087 val = tr32(MEMARB_MODE);
4088 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
4091 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
4093 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A3) {
4095 tw32(0x5000, 0x400);
4098 tw32(GRC_MODE, tp->grc_mode);
4100 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0) {
4101 u32 val = tr32(0xc4);
4103 tw32(0xc4, val | (1 << 15));
4106 if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
4107 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
4108 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
4109 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0)
4110 tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
4111 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
4114 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
4115 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
4116 tw32_f(MAC_MODE, tp->mac_mode);
4118 tw32_f(MAC_MODE, 0);
4121 if (!(tp->tg3_flags2 & TG3_FLG2_SUN_570X)) {
4122 /* Wait for firmware initialization to complete. */
4123 for (i = 0; i < 100000; i++) {
4124 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
4125 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
4130 printk(KERN_ERR PFX "tg3_reset_hw timed out for %s, "
4131 "firmware will not restart magic=%08x\n",
4132 tp->dev->name, val);
4137 if ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
4138 tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
4139 u32 val = tr32(0x7c00);
4141 tw32(0x7c00, val | (1 << 25));
4144 /* Reprobe ASF enable state. */
4145 tp->tg3_flags &= ~TG3_FLAG_ENABLE_ASF;
4146 tp->tg3_flags2 &= ~TG3_FLG2_ASF_NEW_HANDSHAKE;
4147 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
4148 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
4151 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
4152 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
4153 tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
4154 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
4155 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
4162 /* tp->lock is held. */
4163 static void tg3_stop_fw(struct tg3 *tp)
4165 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
4169 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
4170 val = tr32(GRC_RX_CPU_EVENT);
4172 tw32(GRC_RX_CPU_EVENT, val);
4174 /* Wait for RX cpu to ACK the event. */
4175 for (i = 0; i < 100; i++) {
4176 if (!(tr32(GRC_RX_CPU_EVENT) & (1 << 14)))
4183 /* tp->lock is held. */
4184 static int tg3_halt(struct tg3 *tp, int kind, int silent)
4190 tg3_write_sig_pre_reset(tp, kind);
4192 tg3_abort_hw(tp, silent);
4193 err = tg3_chip_reset(tp);
4195 tg3_write_sig_legacy(tp, kind);
4196 tg3_write_sig_post_reset(tp, kind);
4204 #define TG3_FW_RELEASE_MAJOR 0x0
4205 #define TG3_FW_RELASE_MINOR 0x0
4206 #define TG3_FW_RELEASE_FIX 0x0
4207 #define TG3_FW_START_ADDR 0x08000000
4208 #define TG3_FW_TEXT_ADDR 0x08000000
4209 #define TG3_FW_TEXT_LEN 0x9c0
4210 #define TG3_FW_RODATA_ADDR 0x080009c0
4211 #define TG3_FW_RODATA_LEN 0x60
4212 #define TG3_FW_DATA_ADDR 0x08000a40
4213 #define TG3_FW_DATA_LEN 0x20
4214 #define TG3_FW_SBSS_ADDR 0x08000a60
4215 #define TG3_FW_SBSS_LEN 0xc
4216 #define TG3_FW_BSS_ADDR 0x08000a70
4217 #define TG3_FW_BSS_LEN 0x10
4219 static u32 tg3FwText[(TG3_FW_TEXT_LEN / sizeof(u32)) + 1] = {
4220 0x00000000, 0x10000003, 0x00000000, 0x0000000d, 0x0000000d, 0x3c1d0800,
4221 0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100000, 0x0e000018, 0x00000000,
4222 0x0000000d, 0x3c1d0800, 0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100034,
4223 0x0e00021c, 0x00000000, 0x0000000d, 0x00000000, 0x00000000, 0x00000000,
4224 0x27bdffe0, 0x3c1cc000, 0xafbf0018, 0xaf80680c, 0x0e00004c, 0x241b2105,
4225 0x97850000, 0x97870002, 0x9782002c, 0x9783002e, 0x3c040800, 0x248409c0,
4226 0xafa00014, 0x00021400, 0x00621825, 0x00052c00, 0xafa30010, 0x8f860010,
4227 0x00e52825, 0x0e000060, 0x24070102, 0x3c02ac00, 0x34420100, 0x3c03ac01,
4228 0x34630100, 0xaf820490, 0x3c02ffff, 0xaf820494, 0xaf830498, 0xaf82049c,
4229 0x24020001, 0xaf825ce0, 0x0e00003f, 0xaf825d00, 0x0e000140, 0x00000000,
4230 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x2402ffff, 0xaf825404, 0x8f835400,
4231 0x34630400, 0xaf835400, 0xaf825404, 0x3c020800, 0x24420034, 0xaf82541c,
4232 0x03e00008, 0xaf805400, 0x00000000, 0x00000000, 0x3c020800, 0x34423000,
4233 0x3c030800, 0x34633000, 0x3c040800, 0x348437ff, 0x3c010800, 0xac220a64,
4234 0x24020040, 0x3c010800, 0xac220a68, 0x3c010800, 0xac200a60, 0xac600000,
4235 0x24630004, 0x0083102b, 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000,
4236 0x00804821, 0x8faa0010, 0x3c020800, 0x8c420a60, 0x3c040800, 0x8c840a68,
4237 0x8fab0014, 0x24430001, 0x0044102b, 0x3c010800, 0xac230a60, 0x14400003,
4238 0x00004021, 0x3c010800, 0xac200a60, 0x3c020800, 0x8c420a60, 0x3c030800,
4239 0x8c630a64, 0x91240000, 0x00021140, 0x00431021, 0x00481021, 0x25080001,
4240 0xa0440000, 0x29020008, 0x1440fff4, 0x25290001, 0x3c020800, 0x8c420a60,
4241 0x3c030800, 0x8c630a64, 0x8f84680c, 0x00021140, 0x00431021, 0xac440008,
4242 0xac45000c, 0xac460010, 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c,
4243 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4244 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4246 0x02000008, 0x00000000, 0x0a0001e3, 0x3c0a0001, 0x0a0001e3, 0x3c0a0002,
4247 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4248 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4249 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4250 0x0a0001e3, 0x3c0a0007, 0x0a0001e3, 0x3c0a0008, 0x0a0001e3, 0x3c0a0009,
4251 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a000b,
4252 0x0a0001e3, 0x3c0a000c, 0x0a0001e3, 0x3c0a000d, 0x0a0001e3, 0x00000000,
4253 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a000e, 0x0a0001e3, 0x00000000,
4254 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4255 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4256 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a0013, 0x0a0001e3, 0x3c0a0014,
4257 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4258 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4259 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4260 0x27bdffe0, 0x00001821, 0x00001021, 0xafbf0018, 0xafb10014, 0xafb00010,
4261 0x3c010800, 0x00220821, 0xac200a70, 0x3c010800, 0x00220821, 0xac200a74,
4262 0x3c010800, 0x00220821, 0xac200a78, 0x24630001, 0x1860fff5, 0x2442000c,
4263 0x24110001, 0x8f906810, 0x32020004, 0x14400005, 0x24040001, 0x3c020800,
4264 0x8c420a78, 0x18400003, 0x00002021, 0x0e000182, 0x00000000, 0x32020001,
4265 0x10400003, 0x00000000, 0x0e000169, 0x00000000, 0x0a000153, 0xaf915028,
4266 0x8fbf0018, 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0020, 0x3c050800,
4267 0x8ca50a70, 0x3c060800, 0x8cc60a80, 0x3c070800, 0x8ce70a78, 0x27bdffe0,
4268 0x3c040800, 0x248409d0, 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014,
4269 0x0e00017b, 0x00002021, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x24020001,
4270 0x8f836810, 0x00821004, 0x00021027, 0x00621824, 0x03e00008, 0xaf836810,
4271 0x27bdffd8, 0xafbf0024, 0x1080002e, 0xafb00020, 0x8f825cec, 0xafa20018,
4272 0x8f825cec, 0x3c100800, 0x26100a78, 0xafa2001c, 0x34028000, 0xaf825cec,
4273 0x8e020000, 0x18400016, 0x00000000, 0x3c020800, 0x94420a74, 0x8fa3001c,
4274 0x000221c0, 0xac830004, 0x8fa2001c, 0x3c010800, 0x0e000201, 0xac220a74,
4275 0x10400005, 0x00000000, 0x8e020000, 0x24420001, 0x0a0001df, 0xae020000,
4276 0x3c020800, 0x8c420a70, 0x00021c02, 0x000321c0, 0x0a0001c5, 0xafa2001c,
4277 0x0e000201, 0x00000000, 0x1040001f, 0x00000000, 0x8e020000, 0x8fa3001c,
4278 0x24420001, 0x3c010800, 0xac230a70, 0x3c010800, 0xac230a74, 0x0a0001df,
4279 0xae020000, 0x3c100800, 0x26100a78, 0x8e020000, 0x18400028, 0x00000000,
4280 0x0e000201, 0x00000000, 0x14400024, 0x00000000, 0x8e020000, 0x3c030800,
4281 0x8c630a70, 0x2442ffff, 0xafa3001c, 0x18400006, 0xae020000, 0x00031402,
4282 0x000221c0, 0x8c820004, 0x3c010800, 0xac220a70, 0x97a2001e, 0x2442ff00,
4283 0x2c420300, 0x1440000b, 0x24024000, 0x3c040800, 0x248409dc, 0xafa00010,
4284 0xafa00014, 0x8fa6001c, 0x24050008, 0x0e000060, 0x00003821, 0x0a0001df,
4285 0x00000000, 0xaf825cf8, 0x3c020800, 0x8c420a40, 0x8fa3001c, 0x24420001,
4286 0xaf835cf8, 0x3c010800, 0xac220a40, 0x8fbf0024, 0x8fb00020, 0x03e00008,
4287 0x27bd0028, 0x27bdffe0, 0x3c040800, 0x248409e8, 0x00002821, 0x00003021,
4288 0x00003821, 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014, 0x8fbf0018,
4289 0x03e00008, 0x27bd0020, 0x8f82680c, 0x8f85680c, 0x00021827, 0x0003182b,
4290 0x00031823, 0x00431024, 0x00441021, 0x00a2282b, 0x10a00006, 0x00000000,
4291 0x00401821, 0x8f82680c, 0x0043102b, 0x1440fffd, 0x00000000, 0x03e00008,
4292 0x00000000, 0x3c040800, 0x8c840000, 0x3c030800, 0x8c630a40, 0x0064102b,
4293 0x54400002, 0x00831023, 0x00641023, 0x2c420008, 0x03e00008, 0x38420001,
4294 0x27bdffe0, 0x00802821, 0x3c040800, 0x24840a00, 0x00003021, 0x00003821,
4295 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014, 0x0a000216, 0x00000000,
4296 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x00000000, 0x27bdffe0, 0x3c1cc000,
4297 0xafbf0018, 0x0e00004c, 0xaf80680c, 0x3c040800, 0x24840a10, 0x03802821,
4298 0x00003021, 0x00003821, 0xafa00010, 0x0e000060, 0xafa00014, 0x2402ffff,
4299 0xaf825404, 0x3c0200aa, 0x0e000234, 0xaf825434, 0x8fbf0018, 0x03e00008,
4300 0x27bd0020, 0x00000000, 0x00000000, 0x00000000, 0x27bdffe8, 0xafb00010,
4301 0x24100001, 0xafbf0014, 0x3c01c003, 0xac200000, 0x8f826810, 0x30422000,
4302 0x10400003, 0x00000000, 0x0e000246, 0x00000000, 0x0a00023a, 0xaf905428,
4303 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x27bdfff8, 0x8f845d0c,
4304 0x3c0200ff, 0x3c030800, 0x8c630a50, 0x3442fff8, 0x00821024, 0x1043001e,
4305 0x3c0500ff, 0x34a5fff8, 0x3c06c003, 0x3c074000, 0x00851824, 0x8c620010,
4306 0x3c010800, 0xac230a50, 0x30420008, 0x10400005, 0x00871025, 0x8cc20000,
4307 0x24420001, 0xacc20000, 0x00871025, 0xaf825d0c, 0x8fa20000, 0x24420001,
4308 0xafa20000, 0x8fa20000, 0x8fa20000, 0x24420001, 0xafa20000, 0x8fa20000,
4309 0x8f845d0c, 0x3c030800, 0x8c630a50, 0x00851024, 0x1443ffe8, 0x00851824,
4310 0x27bd0008, 0x03e00008, 0x00000000, 0x00000000, 0x00000000
4313 static u32 tg3FwRodata[(TG3_FW_RODATA_LEN / sizeof(u32)) + 1] = {
4314 0x35373031, 0x726c7341, 0x00000000, 0x00000000, 0x53774576, 0x656e7430,
4315 0x00000000, 0x726c7045, 0x76656e74, 0x31000000, 0x556e6b6e, 0x45766e74,
4316 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x66617461, 0x6c457272,
4317 0x00000000, 0x00000000, 0x4d61696e, 0x43707542, 0x00000000, 0x00000000,
4321 #if 0 /* All zeros, don't eat up space with it. */
4322 u32 tg3FwData[(TG3_FW_DATA_LEN / sizeof(u32)) + 1] = {
4323 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
4324 0x00000000, 0x00000000, 0x00000000, 0x00000000
4328 #define RX_CPU_SCRATCH_BASE 0x30000
4329 #define RX_CPU_SCRATCH_SIZE 0x04000
4330 #define TX_CPU_SCRATCH_BASE 0x34000
4331 #define TX_CPU_SCRATCH_SIZE 0x04000
4333 /* tp->lock is held. */
4334 static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
4338 if (offset == TX_CPU_BASE &&
4339 (tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
4342 if (offset == RX_CPU_BASE) {
4343 for (i = 0; i < 10000; i++) {
4344 tw32(offset + CPU_STATE, 0xffffffff);
4345 tw32(offset + CPU_MODE, CPU_MODE_HALT);
4346 if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
4350 tw32(offset + CPU_STATE, 0xffffffff);
4351 tw32_f(offset + CPU_MODE, CPU_MODE_HALT);
4354 for (i = 0; i < 10000; i++) {
4355 tw32(offset + CPU_STATE, 0xffffffff);
4356 tw32(offset + CPU_MODE, CPU_MODE_HALT);
4357 if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
4363 printk(KERN_ERR PFX "tg3_reset_cpu timed out for %s, "
4366 (offset == RX_CPU_BASE ? "RX" : "TX"));
4373 unsigned int text_base;
4374 unsigned int text_len;
4376 unsigned int rodata_base;
4377 unsigned int rodata_len;
4379 unsigned int data_base;
4380 unsigned int data_len;
4384 /* tp->lock is held. */
4385 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base, u32 cpu_scratch_base,
4386 int cpu_scratch_size, struct fw_info *info)
4389 u32 orig_tg3_flags = tp->tg3_flags;
4390 void (*write_op)(struct tg3 *, u32, u32);
4392 if (cpu_base == TX_CPU_BASE &&
4393 (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
4394 printk(KERN_ERR PFX "tg3_load_firmware_cpu: Trying to load "
4395 "TX cpu firmware on %s which is 5705.\n",
4400 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
4401 write_op = tg3_write_mem;
4403 write_op = tg3_write_indirect_reg32;
4405 /* Force use of PCI config space for indirect register
4408 tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
4410 /* It is possible that bootcode is still loading at this point.
4411 * Get the nvram lock first before halting the cpu.
4414 err = tg3_halt_cpu(tp, cpu_base);
4415 tg3_nvram_unlock(tp);
4419 for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
4420 write_op(tp, cpu_scratch_base + i, 0);
4421 tw32(cpu_base + CPU_STATE, 0xffffffff);
4422 tw32(cpu_base + CPU_MODE, tr32(cpu_base+CPU_MODE)|CPU_MODE_HALT);
4423 for (i = 0; i < (info->text_len / sizeof(u32)); i++)
4424 write_op(tp, (cpu_scratch_base +
4425 (info->text_base & 0xffff) +
4428 info->text_data[i] : 0));
4429 for (i = 0; i < (info->rodata_len / sizeof(u32)); i++)
4430 write_op(tp, (cpu_scratch_base +
4431 (info->rodata_base & 0xffff) +
4433 (info->rodata_data ?
4434 info->rodata_data[i] : 0));
4435 for (i = 0; i < (info->data_len / sizeof(u32)); i++)
4436 write_op(tp, (cpu_scratch_base +
4437 (info->data_base & 0xffff) +
4440 info->data_data[i] : 0));
4445 tp->tg3_flags = orig_tg3_flags;
4449 /* tp->lock is held. */
4450 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
4452 struct fw_info info;
4455 info.text_base = TG3_FW_TEXT_ADDR;
4456 info.text_len = TG3_FW_TEXT_LEN;
4457 info.text_data = &tg3FwText[0];
4458 info.rodata_base = TG3_FW_RODATA_ADDR;
4459 info.rodata_len = TG3_FW_RODATA_LEN;
4460 info.rodata_data = &tg3FwRodata[0];
4461 info.data_base = TG3_FW_DATA_ADDR;
4462 info.data_len = TG3_FW_DATA_LEN;
4463 info.data_data = NULL;
4465 err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
4466 RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
4471 err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
4472 TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
4477 /* Now startup only the RX cpu. */
4478 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
4479 tw32_f(RX_CPU_BASE + CPU_PC, TG3_FW_TEXT_ADDR);
4481 for (i = 0; i < 5; i++) {
4482 if (tr32(RX_CPU_BASE + CPU_PC) == TG3_FW_TEXT_ADDR)
4484 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
4485 tw32(RX_CPU_BASE + CPU_MODE, CPU_MODE_HALT);
4486 tw32_f(RX_CPU_BASE + CPU_PC, TG3_FW_TEXT_ADDR);
4490 printk(KERN_ERR PFX "tg3_load_firmware fails for %s "
4491 "to set RX CPU PC, is %08x should be %08x\n",
4492 tp->dev->name, tr32(RX_CPU_BASE + CPU_PC),
4496 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
4497 tw32_f(RX_CPU_BASE + CPU_MODE, 0x00000000);
4502 #if TG3_TSO_SUPPORT != 0
4504 #define TG3_TSO_FW_RELEASE_MAJOR 0x1
4505 #define TG3_TSO_FW_RELASE_MINOR 0x6
4506 #define TG3_TSO_FW_RELEASE_FIX 0x0
4507 #define TG3_TSO_FW_START_ADDR 0x08000000
4508 #define TG3_TSO_FW_TEXT_ADDR 0x08000000
4509 #define TG3_TSO_FW_TEXT_LEN 0x1aa0
4510 #define TG3_TSO_FW_RODATA_ADDR 0x08001aa0
4511 #define TG3_TSO_FW_RODATA_LEN 0x60
4512 #define TG3_TSO_FW_DATA_ADDR 0x08001b20
4513 #define TG3_TSO_FW_DATA_LEN 0x30
4514 #define TG3_TSO_FW_SBSS_ADDR 0x08001b50
4515 #define TG3_TSO_FW_SBSS_LEN 0x2c
4516 #define TG3_TSO_FW_BSS_ADDR 0x08001b80
4517 #define TG3_TSO_FW_BSS_LEN 0x894
4519 static u32 tg3TsoFwText[(TG3_TSO_FW_TEXT_LEN / 4) + 1] = {
4520 0x0e000003, 0x00000000, 0x08001b24, 0x00000000, 0x10000003, 0x00000000,
4521 0x0000000d, 0x0000000d, 0x3c1d0800, 0x37bd4000, 0x03a0f021, 0x3c100800,
4522 0x26100000, 0x0e000010, 0x00000000, 0x0000000d, 0x27bdffe0, 0x3c04fefe,
4523 0xafbf0018, 0x0e0005d8, 0x34840002, 0x0e000668, 0x00000000, 0x3c030800,
4524 0x90631b68, 0x24020002, 0x3c040800, 0x24841aac, 0x14620003, 0x24050001,
4525 0x3c040800, 0x24841aa0, 0x24060006, 0x00003821, 0xafa00010, 0x0e00067c,
4526 0xafa00014, 0x8f625c50, 0x34420001, 0xaf625c50, 0x8f625c90, 0x34420001,
4527 0xaf625c90, 0x2402ffff, 0x0e000034, 0xaf625404, 0x8fbf0018, 0x03e00008,
4528 0x27bd0020, 0x00000000, 0x00000000, 0x00000000, 0x27bdffe0, 0xafbf001c,
4529 0xafb20018, 0xafb10014, 0x0e00005b, 0xafb00010, 0x24120002, 0x24110001,
4530 0x8f706820, 0x32020100, 0x10400003, 0x00000000, 0x0e0000bb, 0x00000000,
4531 0x8f706820, 0x32022000, 0x10400004, 0x32020001, 0x0e0001f0, 0x24040001,
4532 0x32020001, 0x10400003, 0x00000000, 0x0e0000a3, 0x00000000, 0x3c020800,
4533 0x90421b98, 0x14520003, 0x00000000, 0x0e0004c0, 0x00000000, 0x0a00003c,
4534 0xaf715028, 0x8fbf001c, 0x8fb20018, 0x8fb10014, 0x8fb00010, 0x03e00008,
4535 0x27bd0020, 0x27bdffe0, 0x3c040800, 0x24841ac0, 0x00002821, 0x00003021,
4536 0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014, 0x3c040800,
4537 0x248423d8, 0xa4800000, 0x3c010800, 0xa0201b98, 0x3c010800, 0xac201b9c,
4538 0x3c010800, 0xac201ba0, 0x3c010800, 0xac201ba4, 0x3c010800, 0xac201bac,
4539 0x3c010800, 0xac201bb8, 0x3c010800, 0xac201bbc, 0x8f624434, 0x3c010800,
4540 0xac221b88, 0x8f624438, 0x3c010800, 0xac221b8c, 0x8f624410, 0xac80f7a8,
4541 0x3c010800, 0xac201b84, 0x3c010800, 0xac2023e0, 0x3c010800, 0xac2023c8,
4542 0x3c010800, 0xac2023cc, 0x3c010800, 0xac202400, 0x3c010800, 0xac221b90,
4543 0x8f620068, 0x24030007, 0x00021702, 0x10430005, 0x00000000, 0x8f620068,
4544 0x00021702, 0x14400004, 0x24020001, 0x3c010800, 0x0a000097, 0xac20240c,
4545 0xac820034, 0x3c040800, 0x24841acc, 0x3c050800, 0x8ca5240c, 0x00003021,
4546 0x00003821, 0xafa00010, 0x0e00067c, 0xafa00014, 0x8fbf0018, 0x03e00008,
4547 0x27bd0020, 0x27bdffe0, 0x3c040800, 0x24841ad8, 0x00002821, 0x00003021,
4548 0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014, 0x0e00005b,
4549 0x00000000, 0x0e0000b4, 0x00002021, 0x8fbf0018, 0x03e00008, 0x27bd0020,
4550 0x24020001, 0x8f636820, 0x00821004, 0x00021027, 0x00621824, 0x03e00008,
4551 0xaf636820, 0x27bdffd0, 0xafbf002c, 0xafb60028, 0xafb50024, 0xafb40020,
4552 0xafb3001c, 0xafb20018, 0xafb10014, 0xafb00010, 0x8f675c5c, 0x3c030800,
4553 0x24631bbc, 0x8c620000, 0x14470005, 0x3c0200ff, 0x3c020800, 0x90421b98,
4554 0x14400119, 0x3c0200ff, 0x3442fff8, 0x00e28824, 0xac670000, 0x00111902,
4555 0x306300ff, 0x30e20003, 0x000211c0, 0x00622825, 0x00a04021, 0x00071602,
4556 0x3c030800, 0x90631b98, 0x3044000f, 0x14600036, 0x00804821, 0x24020001,
4557 0x3c010800, 0xa0221b98, 0x00051100, 0x00821025, 0x3c010800, 0xac201b9c,
4558 0x3c010800, 0xac201ba0, 0x3c010800, 0xac201ba4, 0x3c010800, 0xac201bac,
4559 0x3c010800, 0xac201bb8, 0x3c010800, 0xac201bb0, 0x3c010800, 0xac201bb4,
4560 0x3c010800, 0xa42223d8, 0x9622000c, 0x30437fff, 0x3c010800, 0xa4222410,
4561 0x30428000, 0x3c010800, 0xa4231bc6, 0x10400005, 0x24020001, 0x3c010800,
4562 0xac2223f4, 0x0a000102, 0x2406003e, 0x24060036, 0x3c010800, 0xac2023f4,
4563 0x9622000a, 0x3c030800, 0x94631bc6, 0x3c010800, 0xac2023f0, 0x3c010800,
4564 0xac2023f8, 0x00021302, 0x00021080, 0x00c21021, 0x00621821, 0x3c010800,
4565 0xa42223d0, 0x3c010800, 0x0a000115, 0xa4231b96, 0x9622000c, 0x3c010800,
4566 0xa42223ec, 0x3c040800, 0x24841b9c, 0x8c820000, 0x00021100, 0x3c010800,
4567 0x00220821, 0xac311bc8, 0x8c820000, 0x00021100, 0x3c010800, 0x00220821,
4568 0xac271bcc, 0x8c820000, 0x25030001, 0x306601ff, 0x00021100, 0x3c010800,
4569 0x00220821, 0xac261bd0, 0x8c820000, 0x00021100, 0x3c010800, 0x00220821,
4570 0xac291bd4, 0x96230008, 0x3c020800, 0x8c421bac, 0x00432821, 0x3c010800,
4571 0xac251bac, 0x9622000a, 0x30420004, 0x14400018, 0x00061100, 0x8f630c14,
4572 0x3063000f, 0x2c620002, 0x1440000b, 0x3c02c000, 0x8f630c14, 0x3c020800,
4573 0x8c421b40, 0x3063000f, 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002,
4574 0x1040fff7, 0x3c02c000, 0x00e21825, 0xaf635c5c, 0x8f625c50, 0x30420002,
4575 0x10400014, 0x00000000, 0x0a000147, 0x00000000, 0x3c030800, 0x8c631b80,
4576 0x3c040800, 0x94841b94, 0x01221025, 0x3c010800, 0xa42223da, 0x24020001,
4577 0x3c010800, 0xac221bb8, 0x24630001, 0x0085202a, 0x3c010800, 0x10800003,
4578 0xac231b80, 0x3c010800, 0xa4251b94, 0x3c060800, 0x24c61b9c, 0x8cc20000,
4579 0x24420001, 0xacc20000, 0x28420080, 0x14400005, 0x00000000, 0x0e000656,
4580 0x24040002, 0x0a0001e6, 0x00000000, 0x3c020800, 0x8c421bb8, 0x10400078,
4581 0x24020001, 0x3c050800, 0x90a51b98, 0x14a20072, 0x00000000, 0x3c150800,
4582 0x96b51b96, 0x3c040800, 0x8c841bac, 0x32a3ffff, 0x0083102a, 0x1440006c,
4583 0x00000000, 0x14830003, 0x00000000, 0x3c010800, 0xac2523f0, 0x1060005c,
4584 0x00009021, 0x24d60004, 0x0060a021, 0x24d30014, 0x8ec20000, 0x00028100,
4585 0x3c110800, 0x02308821, 0x0e000625, 0x8e311bc8, 0x00402821, 0x10a00054,
4586 0x00000000, 0x9628000a, 0x31020040, 0x10400005, 0x2407180c, 0x8e22000c,
4587 0x2407188c, 0x00021400, 0xaca20018, 0x3c030800, 0x00701821, 0x8c631bd0,
4588 0x3c020800, 0x00501021, 0x8c421bd4, 0x00031d00, 0x00021400, 0x00621825,
4589 0xaca30014, 0x8ec30004, 0x96220008, 0x00432023, 0x3242ffff, 0x3083ffff,
4590 0x00431021, 0x0282102a, 0x14400002, 0x02b23023, 0x00803021, 0x8e620000,
4591 0x30c4ffff, 0x00441021, 0xae620000, 0x8e220000, 0xaca20000, 0x8e220004,
4592 0x8e63fff4, 0x00431021, 0xaca20004, 0xa4a6000e, 0x8e62fff4, 0x00441021,
4593 0xae62fff4, 0x96230008, 0x0043102a, 0x14400005, 0x02469021, 0x8e62fff0,
4594 0xae60fff4, 0x24420001, 0xae62fff0, 0xaca00008, 0x3242ffff, 0x14540008,
4595 0x24020305, 0x31020080, 0x54400001, 0x34e70010, 0x24020905, 0xa4a2000c,
4596 0x0a0001cb, 0x34e70020, 0xa4a2000c, 0x3c020800, 0x8c4223f0, 0x10400003,
4597 0x3c024b65, 0x0a0001d3, 0x34427654, 0x3c02b49a, 0x344289ab, 0xaca2001c,
4598 0x30e2ffff, 0xaca20010, 0x0e0005a2, 0x00a02021, 0x3242ffff, 0x0054102b,
4599 0x1440ffa9, 0x00000000, 0x24020002, 0x3c010800, 0x0a0001e6, 0xa0221b98,
4600 0x8ec2083c, 0x24420001, 0x0a0001e6, 0xaec2083c, 0x0e0004c0, 0x00000000,
4601 0x8fbf002c, 0x8fb60028, 0x8fb50024, 0x8fb40020, 0x8fb3001c, 0x8fb20018,
4602 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0030, 0x27bdffd0, 0xafbf0028,
4603 0xafb30024, 0xafb20020, 0xafb1001c, 0xafb00018, 0x8f725c9c, 0x3c0200ff,
4604 0x3442fff8, 0x3c070800, 0x24e71bb4, 0x02428824, 0x9623000e, 0x8ce20000,
4605 0x00431021, 0xace20000, 0x8e220010, 0x30420020, 0x14400011, 0x00809821,
4606 0x0e00063b, 0x02202021, 0x3c02c000, 0x02421825, 0xaf635c9c, 0x8f625c90,
4607 0x30420002, 0x1040011e, 0x00000000, 0xaf635c9c, 0x8f625c90, 0x30420002,
4608 0x10400119, 0x00000000, 0x0a00020d, 0x00000000, 0x8e240008, 0x8e230014,
4609 0x00041402, 0x000231c0, 0x00031502, 0x304201ff, 0x2442ffff, 0x3042007f,
4610 0x00031942, 0x30637800, 0x00021100, 0x24424000, 0x00624821, 0x9522000a,
4611 0x3084ffff, 0x30420008, 0x104000b0, 0x000429c0, 0x3c020800, 0x8c422400,
4612 0x14400024, 0x24c50008, 0x94c20014, 0x3c010800, 0xa42223d0, 0x8cc40010,
4613 0x00041402, 0x3c010800, 0xa42223d2, 0x3c010800, 0xa42423d4, 0x94c2000e,
4614 0x3083ffff, 0x00431023, 0x3c010800, 0xac222408, 0x94c2001a, 0x3c010800,
4615 0xac262400, 0x3c010800, 0xac322404, 0x3c010800, 0xac2223fc, 0x3c02c000,
4616 0x02421825, 0xaf635c9c, 0x8f625c90, 0x30420002, 0x104000e5, 0x00000000,
4617 0xaf635c9c, 0x8f625c90, 0x30420002, 0x104000e0, 0x00000000, 0x0a000246,
4618 0x00000000, 0x94c2000e, 0x3c030800, 0x946323d4, 0x00434023, 0x3103ffff,
4619 0x2c620008, 0x1040001c, 0x00000000, 0x94c20014, 0x24420028, 0x00a22821,
4620 0x00031042, 0x1840000b, 0x00002021, 0x24e60848, 0x00403821, 0x94a30000,
4621 0x8cc20000, 0x24840001, 0x00431021, 0xacc20000, 0x0087102a, 0x1440fff9,
4622 0x24a50002, 0x31020001, 0x1040001f, 0x3c024000, 0x3c040800, 0x248423fc,
4623 0xa0a00001, 0x94a30000, 0x8c820000, 0x00431021, 0x0a000285, 0xac820000,
4624 0x8f626800, 0x3c030010, 0x00431024, 0x10400009, 0x00000000, 0x94c2001a,
4625 0x3c030800, 0x8c6323fc, 0x00431021, 0x3c010800, 0xac2223fc, 0x0a000286,
4626 0x3c024000, 0x94c2001a, 0x94c4001c, 0x3c030800, 0x8c6323fc, 0x00441023,
4627 0x00621821, 0x3c010800, 0xac2323fc, 0x3c024000, 0x02421825, 0xaf635c9c,
4628 0x8f625c90, 0x30420002, 0x1440fffc, 0x00000000, 0x9522000a, 0x30420010,
4629 0x1040009b, 0x00000000, 0x3c030800, 0x946323d4, 0x3c070800, 0x24e72400,
4630 0x8ce40000, 0x8f626800, 0x24630030, 0x00832821, 0x3c030010, 0x00431024,
4631 0x1440000a, 0x00000000, 0x94a20004, 0x3c040800, 0x8c842408, 0x3c030800,
4632 0x8c6323fc, 0x00441023, 0x00621821, 0x3c010800, 0xac2323fc, 0x3c040800,
4633 0x8c8423fc, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402, 0x00822021,
4634 0x00041027, 0xa4a20006, 0x3c030800, 0x8c632404, 0x3c0200ff, 0x3442fff8,
4635 0x00628824, 0x96220008, 0x24050001, 0x24034000, 0x000231c0, 0x00801021,
4636 0xa4c2001a, 0xa4c0001c, 0xace00000, 0x3c010800, 0xac251b60, 0xaf635cb8,
4637 0x8f625cb0, 0x30420002, 0x10400003, 0x00000000, 0x3c010800, 0xac201b60,
4638 0x8e220008, 0xaf625cb8, 0x8f625cb0, 0x30420002, 0x10400003, 0x00000000,
4639 0x3c010800, 0xac201b60, 0x3c020800, 0x8c421b60, 0x1040ffec, 0x00000000,
4640 0x3c040800, 0x0e00063b, 0x8c842404, 0x0a00032a, 0x00000000, 0x3c030800,
4641 0x90631b98, 0x24020002, 0x14620003, 0x3c034b65, 0x0a0002e1, 0x00008021,
4642 0x8e22001c, 0x34637654, 0x10430002, 0x24100002, 0x24100001, 0x00c02021,
4643 0x0e000350, 0x02003021, 0x24020003, 0x3c010800, 0xa0221b98, 0x24020002,
4644 0x1202000a, 0x24020001, 0x3c030800, 0x8c6323f0, 0x10620006, 0x00000000,
4645 0x3c020800, 0x944223d8, 0x00021400, 0x0a00031f, 0xae220014, 0x3c040800,
4646 0x248423da, 0x94820000, 0x00021400, 0xae220014, 0x3c020800, 0x8c421bbc,
4647 0x3c03c000, 0x3c010800, 0xa0201b98, 0x00431025, 0xaf625c5c, 0x8f625c50,
4648 0x30420002, 0x10400009, 0x00000000, 0x2484f7e2, 0x8c820000, 0x00431025,
4649 0xaf625c5c, 0x8f625c50, 0x30420002, 0x1440fffa, 0x00000000, 0x3c020800,
4650 0x24421b84, 0x8c430000, 0x24630001, 0xac430000, 0x8f630c14, 0x3063000f,
4651 0x2c620002, 0x1440000c, 0x3c024000, 0x8f630c14, 0x3c020800, 0x8c421b40,
4652 0x3063000f, 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002, 0x1040fff7,
4653 0x00000000, 0x3c024000, 0x02421825, 0xaf635c9c, 0x8f625c90, 0x30420002,
4654 0x1440fffc, 0x00000000, 0x12600003, 0x00000000, 0x0e0004c0, 0x00000000,
4655 0x8fbf0028, 0x8fb30024, 0x8fb20020, 0x8fb1001c, 0x8fb00018, 0x03e00008,
4656 0x27bd0030, 0x8f634450, 0x3c040800, 0x24841b88, 0x8c820000, 0x00031c02,
4657 0x0043102b, 0x14400007, 0x3c038000, 0x8c840004, 0x8f624450, 0x00021c02,
4658 0x0083102b, 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024,
4659 0x1440fffd, 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3c024000,
4660 0x00822025, 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00000000,
4661 0x03e00008, 0x00000000, 0x27bdffe0, 0x00805821, 0x14c00011, 0x256e0008,
4662 0x3c020800, 0x8c4223f4, 0x10400007, 0x24020016, 0x3c010800, 0xa42223d2,
4663 0x2402002a, 0x3c010800, 0x0a000364, 0xa42223d4, 0x8d670010, 0x00071402,
4664 0x3c010800, 0xa42223d2, 0x3c010800, 0xa42723d4, 0x3c040800, 0x948423d4,
4665 0x3c030800, 0x946323d2, 0x95cf0006, 0x3c020800, 0x944223d0, 0x00832023,
4666 0x01e2c023, 0x3065ffff, 0x24a20028, 0x01c24821, 0x3082ffff, 0x14c0001a,
4667 0x01226021, 0x9582000c, 0x3042003f, 0x3c010800, 0xa42223d6, 0x95820004,
4668 0x95830006, 0x3c010800, 0xac2023e4, 0x3c010800, 0xac2023e8, 0x00021400,
4669 0x00431025, 0x3c010800, 0xac221bc0, 0x95220004, 0x3c010800, 0xa4221bc4,
4670 0x95230002, 0x01e51023, 0x0043102a, 0x10400010, 0x24020001, 0x3c010800,
4671 0x0a000398, 0xac2223f8, 0x3c030800, 0x8c6323e8, 0x3c020800, 0x94421bc4,
4672 0x00431021, 0xa5220004, 0x3c020800, 0x94421bc0, 0xa5820004, 0x3c020800,
4673 0x8c421bc0, 0xa5820006, 0x3c020800, 0x8c4223f0, 0x3c0d0800, 0x8dad23e4,
4674 0x3c0a0800, 0x144000e5, 0x8d4a23e8, 0x3c020800, 0x94421bc4, 0x004a1821,
4675 0x3063ffff, 0x0062182b, 0x24020002, 0x10c2000d, 0x01435023, 0x3c020800,
4676 0x944223d6, 0x30420009, 0x10400008, 0x00000000, 0x9582000c, 0x3042fff6,
4677 0xa582000c, 0x3c020800, 0x944223d6, 0x30420009, 0x01a26823, 0x3c020800,
4678 0x8c4223f8, 0x1040004a, 0x01203821, 0x3c020800, 0x944223d2, 0x00004021,
4679 0xa520000a, 0x01e21023, 0xa5220002, 0x3082ffff, 0x00021042, 0x18400008,
4680 0x00003021, 0x00401821, 0x94e20000, 0x25080001, 0x00c23021, 0x0103102a,
4681 0x1440fffb, 0x24e70002, 0x00061c02, 0x30c2ffff, 0x00623021, 0x00061402,
4682 0x00c23021, 0x00c02821, 0x00061027, 0xa522000a, 0x00003021, 0x2527000c,
4683 0x00004021, 0x94e20000, 0x25080001, 0x00c23021, 0x2d020004, 0x1440fffb,
4684 0x24e70002, 0x95220002, 0x00004021, 0x91230009, 0x00442023, 0x01803821,
4685 0x3082ffff, 0xa4e00010, 0x00621821, 0x00021042, 0x18400010, 0x00c33021,
4686 0x00404821, 0x94e20000, 0x24e70002, 0x00c23021, 0x30e2007f, 0x14400006,
4687 0x25080001, 0x8d630000, 0x3c02007f, 0x3442ff80, 0x00625824, 0x25670008,
4688 0x0109102a, 0x1440fff3, 0x00000000, 0x30820001, 0x10400005, 0x00061c02,
4689 0xa0e00001, 0x94e20000, 0x00c23021, 0x00061c02, 0x30c2ffff, 0x00623021,
4690 0x00061402, 0x00c23021, 0x0a00047d, 0x30c6ffff, 0x24020002, 0x14c20081,
4691 0x00000000, 0x3c020800, 0x8c42240c, 0x14400007, 0x00000000, 0x3c020800,
4692 0x944223d2, 0x95230002, 0x01e21023, 0x10620077, 0x00000000, 0x3c020800,
4693 0x944223d2, 0x01e21023, 0xa5220002, 0x3c020800, 0x8c42240c, 0x1040001a,
4694 0x31e3ffff, 0x8dc70010, 0x3c020800, 0x94421b96, 0x00e04021, 0x00072c02,
4695 0x00aa2021, 0x00431023, 0x00823823, 0x00072402, 0x30e2ffff, 0x00823821,
4696 0x00071027, 0xa522000a, 0x3102ffff, 0x3c040800, 0x948423d4, 0x00453023,
4697 0x00e02821, 0x00641823, 0x006d1821, 0x00c33021, 0x00061c02, 0x30c2ffff,
4698 0x0a00047d, 0x00623021, 0x01203821, 0x00004021, 0x3082ffff, 0x00021042,
4699 0x18400008, 0x00003021, 0x00401821, 0x94e20000, 0x25080001, 0x00c23021,
4700 0x0103102a, 0x1440fffb, 0x24e70002, 0x00061c02, 0x30c2ffff, 0x00623021,
4701 0x00061402, 0x00c23021, 0x00c02821, 0x00061027, 0xa522000a, 0x00003021,
4702 0x2527000c, 0x00004021, 0x94e20000, 0x25080001, 0x00c23021, 0x2d020004,
4703 0x1440fffb, 0x24e70002, 0x95220002, 0x00004021, 0x91230009, 0x00442023,
4704 0x01803821, 0x3082ffff, 0xa4e00010, 0x3c040800, 0x948423d4, 0x00621821,
4705 0x00c33021, 0x00061c02, 0x30c2ffff, 0x00623021, 0x00061c02, 0x3c020800,
4706 0x944223d0, 0x00c34821, 0x00441023, 0x00021fc2, 0x00431021, 0x00021043,
4707 0x18400010, 0x00003021, 0x00402021, 0x94e20000, 0x24e70002, 0x00c23021,
4708 0x30e2007f, 0x14400006, 0x25080001, 0x8d630000, 0x3c02007f, 0x3442ff80,
4709 0x00625824, 0x25670008, 0x0104102a, 0x1440fff3, 0x00000000, 0x3c020800,
4710 0x944223ec, 0x00c23021, 0x3122ffff, 0x00c23021, 0x00061c02, 0x30c2ffff,
4711 0x00623021, 0x00061402, 0x00c23021, 0x00c04021, 0x00061027, 0xa5820010,
4712 0xadc00014, 0x0a00049d, 0xadc00000, 0x8dc70010, 0x00e04021, 0x11400007,
4713 0x00072c02, 0x00aa3021, 0x00061402, 0x30c3ffff, 0x00433021, 0x00061402,
4714 0x00c22821, 0x00051027, 0xa522000a, 0x3c030800, 0x946323d4, 0x3102ffff,
4715 0x01e21021, 0x00433023, 0x00cd3021, 0x00061c02, 0x30c2ffff, 0x00623021,
4716 0x00061402, 0x00c23021, 0x00c04021, 0x00061027, 0xa5820010, 0x3102ffff,
4717 0x00051c00, 0x00431025, 0xadc20010, 0x3c020800, 0x8c4223f4, 0x10400005,
4718 0x2de205eb, 0x14400002, 0x25e2fff2, 0x34028870, 0xa5c20034, 0x3c030800,
4719 0x246323e8, 0x8c620000, 0x24420001, 0xac620000, 0x3c040800, 0x8c8423e4,
4720 0x3c020800, 0x8c421bc0, 0x3303ffff, 0x00832021, 0x00431821, 0x0062102b,
4721 0x3c010800, 0xac2423e4, 0x10400003, 0x2482ffff, 0x3c010800, 0xac2223e4,
4722 0x3c010800, 0xac231bc0, 0x03e00008, 0x27bd0020, 0x27bdffb8, 0x3c050800,
4723 0x24a51b96, 0xafbf0044, 0xafbe0040, 0xafb7003c, 0xafb60038, 0xafb50034,
4724 0xafb40030, 0xafb3002c, 0xafb20028, 0xafb10024, 0xafb00020, 0x94a90000,
4725 0x3c020800, 0x944223d0, 0x3c030800, 0x8c631bb0, 0x3c040800, 0x8c841bac,
4726 0x01221023, 0x0064182a, 0xa7a9001e, 0x106000be, 0xa7a20016, 0x24be0022,
4727 0x97b6001e, 0x24b3001a, 0x24b70016, 0x8fc20000, 0x14400008, 0x00000000,
4728 0x8fc2fff8, 0x97a30016, 0x8fc4fff4, 0x00431021, 0x0082202a, 0x148000b0,
4729 0x00000000, 0x97d50818, 0x32a2ffff, 0x104000a3, 0x00009021, 0x0040a021,
4730 0x00008821, 0x0e000625, 0x00000000, 0x00403021, 0x14c00007, 0x00000000,
4731 0x3c020800, 0x8c4223dc, 0x24420001, 0x3c010800, 0x0a000596, 0xac2223dc,
4732 0x3c100800, 0x02118021, 0x8e101bc8, 0x9608000a, 0x31020040, 0x10400005,
4733 0x2407180c, 0x8e02000c, 0x2407188c, 0x00021400, 0xacc20018, 0x31020080,
4734 0x54400001, 0x34e70010, 0x3c020800, 0x00511021, 0x8c421bd0, 0x3c030800,
4735 0x00711821, 0x8c631bd4, 0x00021500, 0x00031c00, 0x00431025, 0xacc20014,
4736 0x96040008, 0x3242ffff, 0x00821021, 0x0282102a, 0x14400002, 0x02b22823,
4737 0x00802821, 0x8e020000, 0x02459021, 0xacc20000, 0x8e020004, 0x00c02021,
4738 0x26310010, 0xac820004, 0x30e2ffff, 0xac800008, 0xa485000e, 0xac820010,
4739 0x24020305, 0x0e0005a2, 0xa482000c, 0x3242ffff, 0x0054102b, 0x1440ffc5,
4740 0x3242ffff, 0x0a00058e, 0x00000000, 0x8e620000, 0x8e63fffc, 0x0043102a,
4741 0x10400067, 0x00000000, 0x8e62fff0, 0x00028900, 0x3c100800, 0x02118021,
4742 0x0e000625, 0x8e101bc8, 0x00403021, 0x14c00005, 0x00000000, 0x8e62082c,
4743 0x24420001, 0x0a000596, 0xae62082c, 0x9608000a, 0x31020040, 0x10400005,
4744 0x2407180c, 0x8e02000c, 0x2407188c, 0x00021400, 0xacc20018, 0x3c020800,
4745 0x00511021, 0x8c421bd0, 0x3c030800, 0x00711821, 0x8c631bd4, 0x00021500,
4746 0x00031c00, 0x00431025, 0xacc20014, 0x8e63fff4, 0x96020008, 0x00432023,
4747 0x3242ffff, 0x3083ffff, 0x00431021, 0x02c2102a, 0x10400003, 0x00802821,
4748 0x97a9001e, 0x01322823, 0x8e620000, 0x30a4ffff, 0x00441021, 0xae620000,
4749 0xa4c5000e, 0x8e020000, 0xacc20000, 0x8e020004, 0x8e63fff4, 0x00431021,
4750 0xacc20004, 0x8e63fff4, 0x96020008, 0x00641821, 0x0062102a, 0x14400006,
4751 0x02459021, 0x8e62fff0, 0xae60fff4, 0x24420001, 0x0a000571, 0xae62fff0,
4752 0xae63fff4, 0xacc00008, 0x3242ffff, 0x10560003, 0x31020004, 0x10400006,
4753 0x24020305, 0x31020080, 0x54400001, 0x34e70010, 0x34e70020, 0x24020905,
4754 0xa4c2000c, 0x8ee30000, 0x8ee20004, 0x14620007, 0x3c02b49a, 0x8ee20860,
4755 0x54400001, 0x34e70400, 0x3c024b65, 0x0a000588, 0x34427654, 0x344289ab,
4756 0xacc2001c, 0x30e2ffff, 0xacc20010, 0x0e0005a2, 0x00c02021, 0x3242ffff,
4757 0x0056102b, 0x1440ff9b, 0x00000000, 0x8e620000, 0x8e63fffc, 0x0043102a,
4758 0x1440ff48, 0x00000000, 0x8fbf0044, 0x8fbe0040, 0x8fb7003c, 0x8fb60038,
4759 0x8fb50034, 0x8fb40030, 0x8fb3002c, 0x8fb20028, 0x8fb10024, 0x8fb00020,
4760 0x03e00008, 0x27bd0048, 0x27bdffe8, 0xafbf0014, 0xafb00010, 0x8f624450,
4761 0x8f634410, 0x0a0005b1, 0x00808021, 0x8f626820, 0x30422000, 0x10400003,
4762 0x00000000, 0x0e0001f0, 0x00002021, 0x8f624450, 0x8f634410, 0x3042ffff,
4763 0x0043102b, 0x1440fff5, 0x00000000, 0x8f630c14, 0x3063000f, 0x2c620002,
4764 0x1440000b, 0x00000000, 0x8f630c14, 0x3c020800, 0x8c421b40, 0x3063000f,
4765 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002, 0x1040fff7, 0x00000000,
4766 0xaf705c18, 0x8f625c10, 0x30420002, 0x10400009, 0x00000000, 0x8f626820,
4767 0x30422000, 0x1040fff8, 0x00000000, 0x0e0001f0, 0x00002021, 0x0a0005c4,
4768 0x00000000, 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x00000000,
4769 0x00000000, 0x00000000, 0x27bdffe8, 0x3c1bc000, 0xafbf0014, 0xafb00010,
4770 0xaf60680c, 0x8f626804, 0x34420082, 0xaf626804, 0x8f634000, 0x24020b50,
4771 0x3c010800, 0xac221b54, 0x24020b78, 0x3c010800, 0xac221b64, 0x34630002,
4772 0xaf634000, 0x0e000605, 0x00808021, 0x3c010800, 0xa0221b68, 0x304200ff,
4773 0x24030002, 0x14430005, 0x00000000, 0x3c020800, 0x8c421b54, 0x0a0005f8,
4774 0xac5000c0, 0x3c020800, 0x8c421b54, 0xac5000bc, 0x8f624434, 0x8f634438,
4775 0x8f644410, 0x3c010800, 0xac221b5c, 0x3c010800, 0xac231b6c, 0x3c010800,
4776 0xac241b58, 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x3c040800,
4777 0x8c870000, 0x3c03aa55, 0x3463aa55, 0x3c06c003, 0xac830000, 0x8cc20000,
4778 0x14430007, 0x24050002, 0x3c0355aa, 0x346355aa, 0xac830000, 0x8cc20000,
4779 0x50430001, 0x24050001, 0x3c020800, 0xac470000, 0x03e00008, 0x00a01021,
4780 0x27bdfff8, 0x18800009, 0x00002821, 0x8f63680c, 0x8f62680c, 0x1043fffe,
4781 0x00000000, 0x24a50001, 0x00a4102a, 0x1440fff9, 0x00000000, 0x03e00008,
4782 0x27bd0008, 0x8f634450, 0x3c020800, 0x8c421b5c, 0x00031c02, 0x0043102b,
4783 0x14400008, 0x3c038000, 0x3c040800, 0x8c841b6c, 0x8f624450, 0x00021c02,
4784 0x0083102b, 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024,
4785 0x1440fffd, 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3082ffff,
4786 0x2442e000, 0x2c422001, 0x14400003, 0x3c024000, 0x0a000648, 0x2402ffff,
4787 0x00822025, 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00001021,
4788 0x03e00008, 0x00000000, 0x8f624450, 0x3c030800, 0x8c631b58, 0x0a000651,
4789 0x3042ffff, 0x8f624450, 0x3042ffff, 0x0043102b, 0x1440fffc, 0x00000000,
4790 0x03e00008, 0x00000000, 0x27bdffe0, 0x00802821, 0x3c040800, 0x24841af0,
4791 0x00003021, 0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014,
4792 0x0a000660, 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x00000000,
4793 0x00000000, 0x00000000, 0x3c020800, 0x34423000, 0x3c030800, 0x34633000,
4794 0x3c040800, 0x348437ff, 0x3c010800, 0xac221b74, 0x24020040, 0x3c010800,
4795 0xac221b78, 0x3c010800, 0xac201b70, 0xac600000, 0x24630004, 0x0083102b,
4796 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000, 0x00804821, 0x8faa0010,
4797 0x3c020800, 0x8c421b70, 0x3c040800, 0x8c841b78, 0x8fab0014, 0x24430001,
4798 0x0044102b, 0x3c010800, 0xac231b70, 0x14400003, 0x00004021, 0x3c010800,
4799 0xac201b70, 0x3c020800, 0x8c421b70, 0x3c030800, 0x8c631b74, 0x91240000,
4800 0x00021140, 0x00431021, 0x00481021, 0x25080001, 0xa0440000, 0x29020008,
4801 0x1440fff4, 0x25290001, 0x3c020800, 0x8c421b70, 0x3c030800, 0x8c631b74,
4802 0x8f64680c, 0x00021140, 0x00431021, 0xac440008, 0xac45000c, 0xac460010,
4803 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c, 0x00000000, 0x00000000,
4806 static u32 tg3TsoFwRodata[] = {
4807 0x4d61696e, 0x43707542, 0x00000000, 0x4d61696e, 0x43707541, 0x00000000,
4808 0x00000000, 0x00000000, 0x73746b6f, 0x66666c64, 0x496e0000, 0x73746b6f,
4809 0x66662a2a, 0x00000000, 0x53774576, 0x656e7430, 0x00000000, 0x00000000,
4810 0x00000000, 0x00000000, 0x66617461, 0x6c457272, 0x00000000, 0x00000000,
4814 static u32 tg3TsoFwData[] = {
4815 0x00000000, 0x73746b6f, 0x66666c64, 0x5f76312e, 0x362e3000, 0x00000000,
4816 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
4820 /* 5705 needs a special version of the TSO firmware. */
4821 #define TG3_TSO5_FW_RELEASE_MAJOR 0x1
4822 #define TG3_TSO5_FW_RELASE_MINOR 0x2
4823 #define TG3_TSO5_FW_RELEASE_FIX 0x0
4824 #define TG3_TSO5_FW_START_ADDR 0x00010000
4825 #define TG3_TSO5_FW_TEXT_ADDR 0x00010000
4826 #define TG3_TSO5_FW_TEXT_LEN 0xe90
4827 #define TG3_TSO5_FW_RODATA_ADDR 0x00010e90
4828 #define TG3_TSO5_FW_RODATA_LEN 0x50
4829 #define TG3_TSO5_FW_DATA_ADDR 0x00010f00
4830 #define TG3_TSO5_FW_DATA_LEN 0x20
4831 #define TG3_TSO5_FW_SBSS_ADDR 0x00010f20
4832 #define TG3_TSO5_FW_SBSS_LEN 0x28
4833 #define TG3_TSO5_FW_BSS_ADDR 0x00010f50
4834 #define TG3_TSO5_FW_BSS_LEN 0x88
4836 static u32 tg3Tso5FwText[(TG3_TSO5_FW_TEXT_LEN / 4) + 1] = {
4837 0x0c004003, 0x00000000, 0x00010f04, 0x00000000, 0x10000003, 0x00000000,
4838 0x0000000d, 0x0000000d, 0x3c1d0001, 0x37bde000, 0x03a0f021, 0x3c100001,
4839 0x26100000, 0x0c004010, 0x00000000, 0x0000000d, 0x27bdffe0, 0x3c04fefe,
4840 0xafbf0018, 0x0c0042e8, 0x34840002, 0x0c004364, 0x00000000, 0x3c030001,
4841 0x90630f34, 0x24020002, 0x3c040001, 0x24840e9c, 0x14620003, 0x24050001,
4842 0x3c040001, 0x24840e90, 0x24060002, 0x00003821, 0xafa00010, 0x0c004378,
4843 0xafa00014, 0x0c00402c, 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020,
4844 0x00000000, 0x00000000, 0x27bdffe0, 0xafbf001c, 0xafb20018, 0xafb10014,
4845 0x0c0042d4, 0xafb00010, 0x3c128000, 0x24110001, 0x8f706810, 0x32020400,
4846 0x10400007, 0x00000000, 0x8f641008, 0x00921024, 0x14400003, 0x00000000,
4847 0x0c004064, 0x00000000, 0x3c020001, 0x90420f56, 0x10510003, 0x32020200,
4848 0x1040fff1, 0x00000000, 0x0c0041b4, 0x00000000, 0x08004034, 0x00000000,
4849 0x8fbf001c, 0x8fb20018, 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0020,
4850 0x27bdffe0, 0x3c040001, 0x24840eb0, 0x00002821, 0x00003021, 0x00003821,
4851 0xafbf0018, 0xafa00010, 0x0c004378, 0xafa00014, 0x0000d021, 0x24020130,
4852 0xaf625000, 0x3c010001, 0xa4200f50, 0x3c010001, 0xa0200f57, 0x8fbf0018,
4853 0x03e00008, 0x27bd0020, 0x00000000, 0x00000000, 0x3c030001, 0x24630f60,
4854 0x90620000, 0x27bdfff0, 0x14400003, 0x0080c021, 0x08004073, 0x00004821,
4855 0x3c022000, 0x03021024, 0x10400003, 0x24090002, 0x08004073, 0xa0600000,
4856 0x24090001, 0x00181040, 0x30431f80, 0x346f8008, 0x1520004b, 0x25eb0028,
4857 0x3c040001, 0x00832021, 0x8c848010, 0x3c050001, 0x24a50f7a, 0x00041402,
4858 0xa0a20000, 0x3c010001, 0xa0240f7b, 0x3c020001, 0x00431021, 0x94428014,
4859 0x3c010001, 0xa0220f7c, 0x3c0c0001, 0x01836021, 0x8d8c8018, 0x304200ff,
4860 0x24420008, 0x000220c3, 0x24020001, 0x3c010001, 0xa0220f60, 0x0124102b,
4861 0x1040000c, 0x00003821, 0x24a6000e, 0x01602821, 0x8ca20000, 0x8ca30004,
4862 0x24a50008, 0x24e70001, 0xacc20000, 0xacc30004, 0x00e4102b, 0x1440fff8,
4863 0x24c60008, 0x00003821, 0x3c080001, 0x25080f7b, 0x91060000, 0x3c020001,
4864 0x90420f7c, 0x2503000d, 0x00c32821, 0x00461023, 0x00021fc2, 0x00431021,
4865 0x00021043, 0x1840000c, 0x00002021, 0x91020001, 0x00461023, 0x00021fc2,
4866 0x00431021, 0x00021843, 0x94a20000, 0x24e70001, 0x00822021, 0x00e3102a,
4867 0x1440fffb, 0x24a50002, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402,
4868 0x00822021, 0x3c02ffff, 0x01821024, 0x3083ffff, 0x00431025, 0x3c010001,
4869 0x080040fa, 0xac220f80, 0x3c050001, 0x24a50f7c, 0x90a20000, 0x3c0c0001,
4870 0x01836021, 0x8d8c8018, 0x000220c2, 0x1080000e, 0x00003821, 0x01603021,
4871 0x24a5000c, 0x8ca20000, 0x8ca30004, 0x24a50008, 0x24e70001, 0xacc20000,
4872 0xacc30004, 0x00e4102b, 0x1440fff8, 0x24c60008, 0x3c050001, 0x24a50f7c,
4873 0x90a20000, 0x30430007, 0x24020004, 0x10620011, 0x28620005, 0x10400005,
4874 0x24020002, 0x10620008, 0x000710c0, 0x080040fa, 0x00000000, 0x24020006,
4875 0x1062000e, 0x000710c0, 0x080040fa, 0x00000000, 0x00a21821, 0x9463000c,
4876 0x004b1021, 0x080040fa, 0xa4430000, 0x000710c0, 0x00a21821, 0x8c63000c,
4877 0x004b1021, 0x080040fa, 0xac430000, 0x00a21821, 0x8c63000c, 0x004b2021,
4878 0x00a21021, 0xac830000, 0x94420010, 0xa4820004, 0x95e70006, 0x3c020001,
4879 0x90420f7c, 0x3c030001, 0x90630f7a, 0x00e2c823, 0x3c020001, 0x90420f7b,
4880 0x24630028, 0x01e34021, 0x24420028, 0x15200012, 0x01e23021, 0x94c2000c,
4881 0x3c010001, 0xa4220f78, 0x94c20004, 0x94c30006, 0x3c010001, 0xa4200f76,
4882 0x3c010001, 0xa4200f72, 0x00021400, 0x00431025, 0x3c010001, 0xac220f6c,
4883 0x95020004, 0x3c010001, 0x08004124, 0xa4220f70, 0x3c020001, 0x94420f70,
4884 0x3c030001, 0x94630f72, 0x00431021, 0xa5020004, 0x3c020001, 0x94420f6c,
4885 0xa4c20004, 0x3c020001, 0x8c420f6c, 0xa4c20006, 0x3c040001, 0x94840f72,
4886 0x3c020001, 0x94420f70, 0x3c0a0001, 0x954a0f76, 0x00441821, 0x3063ffff,
4887 0x0062182a, 0x24020002, 0x1122000b, 0x00832023, 0x3c030001, 0x94630f78,
4888 0x30620009, 0x10400006, 0x3062fff6, 0xa4c2000c, 0x3c020001, 0x94420f78,
4889 0x30420009, 0x01425023, 0x24020001, 0x1122001b, 0x29220002, 0x50400005,
4890 0x24020002, 0x11200007, 0x31a2ffff, 0x08004197, 0x00000000, 0x1122001d,
4891 0x24020016, 0x08004197, 0x31a2ffff, 0x3c0e0001, 0x95ce0f80, 0x10800005,
4892 0x01806821, 0x01c42021, 0x00041c02, 0x3082ffff, 0x00627021, 0x000e1027,
4893 0xa502000a, 0x3c030001, 0x90630f7b, 0x31a2ffff, 0x00e21021, 0x0800418d,
4894 0x00432023, 0x3c020001, 0x94420f80, 0x00442021, 0x00041c02, 0x3082ffff,
4895 0x00622021, 0x00807021, 0x00041027, 0x08004185, 0xa502000a, 0x3c050001,
4896 0x24a50f7a, 0x90a30000, 0x14620002, 0x24e2fff2, 0xa5e20034, 0x90a20000,
4897 0x00e21023, 0xa5020002, 0x3c030001, 0x94630f80, 0x3c020001, 0x94420f5a,
4898 0x30e5ffff, 0x00641821, 0x00451023, 0x00622023, 0x00041c02, 0x3082ffff,
4899 0x00622021, 0x00041027, 0xa502000a, 0x3c030001, 0x90630f7c, 0x24620001,
4900 0x14a20005, 0x00807021, 0x01631021, 0x90420000, 0x08004185, 0x00026200,
4901 0x24620002, 0x14a20003, 0x306200fe, 0x004b1021, 0x944c0000, 0x3c020001,
4902 0x94420f82, 0x3183ffff, 0x3c040001, 0x90840f7b, 0x00431021, 0x00e21021,
4903 0x00442023, 0x008a2021, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402,
4904 0x00822021, 0x00806821, 0x00041027, 0xa4c20010, 0x31a2ffff, 0x000e1c00,
4905 0x00431025, 0x3c040001, 0x24840f72, 0xade20010, 0x94820000, 0x3c050001,
4906 0x94a50f76, 0x3c030001, 0x8c630f6c, 0x24420001, 0x00b92821, 0xa4820000,
4907 0x3322ffff, 0x00622021, 0x0083182b, 0x3c010001, 0xa4250f76, 0x10600003,
4908 0x24a2ffff, 0x3c010001, 0xa4220f76, 0x3c024000, 0x03021025, 0x3c010001,
4909 0xac240f6c, 0xaf621008, 0x03e00008, 0x27bd0010, 0x3c030001, 0x90630f56,
4910 0x27bdffe8, 0x24020001, 0xafbf0014, 0x10620026, 0xafb00010, 0x8f620cf4,
4911 0x2442ffff, 0x3042007f, 0x00021100, 0x8c434000, 0x3c010001, 0xac230f64,
4912 0x8c434008, 0x24444000, 0x8c5c4004, 0x30620040, 0x14400002, 0x24020088,
4913 0x24020008, 0x3c010001, 0xa4220f68, 0x30620004, 0x10400005, 0x24020001,
4914 0x3c010001, 0xa0220f57, 0x080041d5, 0x00031402, 0x3c010001, 0xa0200f57,
4915 0x00031402, 0x3c010001, 0xa4220f54, 0x9483000c, 0x24020001, 0x3c010001,
4916 0xa4200f50, 0x3c010001, 0xa0220f56, 0x3c010001, 0xa4230f62, 0x24020001,
4917 0x1342001e, 0x00000000, 0x13400005, 0x24020003, 0x13420067, 0x00000000,
4918 0x080042cf, 0x00000000, 0x3c020001, 0x94420f62, 0x241a0001, 0x3c010001,
4919 0xa4200f5e, 0x3c010001, 0xa4200f52, 0x304407ff, 0x00021bc2, 0x00031823,
4920 0x3063003e, 0x34630036, 0x00021242, 0x3042003c, 0x00621821, 0x3c010001,
4921 0xa4240f58, 0x00832021, 0x24630030, 0x3c010001, 0xa4240f5a, 0x3c010001,
4922 0xa4230f5c, 0x3c060001, 0x24c60f52, 0x94c50000, 0x94c30002, 0x3c040001,
4923 0x94840f5a, 0x00651021, 0x0044102a, 0x10400013, 0x3c108000, 0x00a31021,
4924 0xa4c20000, 0x3c02a000, 0xaf620cf4, 0x3c010001, 0xa0200f56, 0x8f641008,
4925 0x00901024, 0x14400003, 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4,
4926 0x00501024, 0x104000b7, 0x00000000, 0x0800420f, 0x00000000, 0x3c030001,
4927 0x94630f50, 0x00851023, 0xa4c40000, 0x00621821, 0x3042ffff, 0x3c010001,
4928 0xa4230f50, 0xaf620ce8, 0x3c020001, 0x94420f68, 0x34420024, 0xaf620cec,
4929 0x94c30002, 0x3c020001, 0x94420f50, 0x14620012, 0x3c028000, 0x3c108000,
4930 0x3c02a000, 0xaf620cf4, 0x3c010001, 0xa0200f56, 0x8f641008, 0x00901024,
4931 0x14400003, 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4, 0x00501024,
4932 0x1440fff7, 0x00000000, 0x080042cf, 0x241a0003, 0xaf620cf4, 0x3c108000,
4933 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064, 0x00000000,
4934 0x8f620cf4, 0x00501024, 0x1440fff7, 0x00000000, 0x080042cf, 0x241a0003,
4935 0x3c070001, 0x24e70f50, 0x94e20000, 0x03821021, 0xaf620ce0, 0x3c020001,
4936 0x8c420f64, 0xaf620ce4, 0x3c050001, 0x94a50f54, 0x94e30000, 0x3c040001,
4937 0x94840f58, 0x3c020001, 0x94420f5e, 0x00a32823, 0x00822023, 0x30a6ffff,
4938 0x3083ffff, 0x00c3102b, 0x14400043, 0x00000000, 0x3c020001, 0x94420f5c,
4939 0x00021400, 0x00621025, 0xaf620ce8, 0x94e20000, 0x3c030001, 0x94630f54,
4940 0x00441021, 0xa4e20000, 0x3042ffff, 0x14430021, 0x3c020008, 0x3c020001,
4941 0x90420f57, 0x10400006, 0x3c03000c, 0x3c020001, 0x94420f68, 0x34630624,
4942 0x0800427c, 0x0000d021, 0x3c020001, 0x94420f68, 0x3c030008, 0x34630624,
4943 0x00431025, 0xaf620cec, 0x3c108000, 0x3c02a000, 0xaf620cf4, 0x3c010001,
4944 0xa0200f56, 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064,
4945 0x00000000, 0x8f620cf4, 0x00501024, 0x10400015, 0x00000000, 0x08004283,
4946 0x00000000, 0x3c030001, 0x94630f68, 0x34420624, 0x3c108000, 0x00621825,
4947 0x3c028000, 0xaf630cec, 0xaf620cf4, 0x8f641008, 0x00901024, 0x14400003,
4948 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4, 0x00501024, 0x1440fff7,
4949 0x00000000, 0x3c010001, 0x080042cf, 0xa4200f5e, 0x3c020001, 0x94420f5c,
4950 0x00021400, 0x00c21025, 0xaf620ce8, 0x3c020001, 0x90420f57, 0x10400009,
4951 0x3c03000c, 0x3c020001, 0x94420f68, 0x34630624, 0x0000d021, 0x00431025,
4952 0xaf620cec, 0x080042c1, 0x3c108000, 0x3c020001, 0x94420f68, 0x3c030008,
4953 0x34630604, 0x00431025, 0xaf620cec, 0x3c020001, 0x94420f5e, 0x00451021,
4954 0x3c010001, 0xa4220f5e, 0x3c108000, 0x3c02a000, 0xaf620cf4, 0x3c010001,
4955 0xa0200f56, 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064,
4956 0x00000000, 0x8f620cf4, 0x00501024, 0x1440fff7, 0x00000000, 0x8fbf0014,
4957 0x8fb00010, 0x03e00008, 0x27bd0018, 0x00000000, 0x27bdffe0, 0x3c040001,
4958 0x24840ec0, 0x00002821, 0x00003021, 0x00003821, 0xafbf0018, 0xafa00010,
4959 0x0c004378, 0xafa00014, 0x0000d021, 0x24020130, 0xaf625000, 0x3c010001,
4960 0xa4200f50, 0x3c010001, 0xa0200f57, 0x8fbf0018, 0x03e00008, 0x27bd0020,
4961 0x27bdffe8, 0x3c1bc000, 0xafbf0014, 0xafb00010, 0xaf60680c, 0x8f626804,
4962 0x34420082, 0xaf626804, 0x8f634000, 0x24020b50, 0x3c010001, 0xac220f20,
4963 0x24020b78, 0x3c010001, 0xac220f30, 0x34630002, 0xaf634000, 0x0c004315,
4964 0x00808021, 0x3c010001, 0xa0220f34, 0x304200ff, 0x24030002, 0x14430005,
4965 0x00000000, 0x3c020001, 0x8c420f20, 0x08004308, 0xac5000c0, 0x3c020001,
4966 0x8c420f20, 0xac5000bc, 0x8f624434, 0x8f634438, 0x8f644410, 0x3c010001,
4967 0xac220f28, 0x3c010001, 0xac230f38, 0x3c010001, 0xac240f24, 0x8fbf0014,
4968 0x8fb00010, 0x03e00008, 0x27bd0018, 0x03e00008, 0x24020001, 0x27bdfff8,
4969 0x18800009, 0x00002821, 0x8f63680c, 0x8f62680c, 0x1043fffe, 0x00000000,
4970 0x24a50001, 0x00a4102a, 0x1440fff9, 0x00000000, 0x03e00008, 0x27bd0008,
4971 0x8f634450, 0x3c020001, 0x8c420f28, 0x00031c02, 0x0043102b, 0x14400008,
4972 0x3c038000, 0x3c040001, 0x8c840f38, 0x8f624450, 0x00021c02, 0x0083102b,
4973 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024, 0x1440fffd,
4974 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3082ffff, 0x2442e000,
4975 0x2c422001, 0x14400003, 0x3c024000, 0x08004347, 0x2402ffff, 0x00822025,
4976 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00001021, 0x03e00008,
4977 0x00000000, 0x8f624450, 0x3c030001, 0x8c630f24, 0x08004350, 0x3042ffff,
4978 0x8f624450, 0x3042ffff, 0x0043102b, 0x1440fffc, 0x00000000, 0x03e00008,
4979 0x00000000, 0x27bdffe0, 0x00802821, 0x3c040001, 0x24840ed0, 0x00003021,
4980 0x00003821, 0xafbf0018, 0xafa00010, 0x0c004378, 0xafa00014, 0x0800435f,
4981 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x3c020001, 0x3442d600,
4982 0x3c030001, 0x3463d600, 0x3c040001, 0x3484ddff, 0x3c010001, 0xac220f40,
4983 0x24020040, 0x3c010001, 0xac220f44, 0x3c010001, 0xac200f3c, 0xac600000,
4984 0x24630004, 0x0083102b, 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000,
4985 0x00804821, 0x8faa0010, 0x3c020001, 0x8c420f3c, 0x3c040001, 0x8c840f44,
4986 0x8fab0014, 0x24430001, 0x0044102b, 0x3c010001, 0xac230f3c, 0x14400003,
4987 0x00004021, 0x3c010001, 0xac200f3c, 0x3c020001, 0x8c420f3c, 0x3c030001,
4988 0x8c630f40, 0x91240000, 0x00021140, 0x00431021, 0x00481021, 0x25080001,
4989 0xa0440000, 0x29020008, 0x1440fff4, 0x25290001, 0x3c020001, 0x8c420f3c,
4990 0x3c030001, 0x8c630f40, 0x8f64680c, 0x00021140, 0x00431021, 0xac440008,
4991 0xac45000c, 0xac460010, 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c,
4992 0x00000000, 0x00000000, 0x00000000,
4995 static u32 tg3Tso5FwRodata[(TG3_TSO5_FW_RODATA_LEN / 4) + 1] = {
4996 0x4d61696e, 0x43707542, 0x00000000, 0x4d61696e, 0x43707541, 0x00000000,
4997 0x00000000, 0x00000000, 0x73746b6f, 0x66666c64, 0x00000000, 0x00000000,
4998 0x73746b6f, 0x66666c64, 0x00000000, 0x00000000, 0x66617461, 0x6c457272,
4999 0x00000000, 0x00000000, 0x00000000,
5002 static u32 tg3Tso5FwData[(TG3_TSO5_FW_DATA_LEN / 4) + 1] = {
5003 0x00000000, 0x73746b6f, 0x66666c64, 0x5f76312e, 0x322e3000, 0x00000000,
5004 0x00000000, 0x00000000, 0x00000000,
5007 /* tp->lock is held. */
5008 static int tg3_load_tso_firmware(struct tg3 *tp)
5010 struct fw_info info;
5011 unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
5014 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
5017 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
5018 info.text_base = TG3_TSO5_FW_TEXT_ADDR;
5019 info.text_len = TG3_TSO5_FW_TEXT_LEN;
5020 info.text_data = &tg3Tso5FwText[0];
5021 info.rodata_base = TG3_TSO5_FW_RODATA_ADDR;
5022 info.rodata_len = TG3_TSO5_FW_RODATA_LEN;
5023 info.rodata_data = &tg3Tso5FwRodata[0];
5024 info.data_base = TG3_TSO5_FW_DATA_ADDR;
5025 info.data_len = TG3_TSO5_FW_DATA_LEN;
5026 info.data_data = &tg3Tso5FwData[0];
5027 cpu_base = RX_CPU_BASE;
5028 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
5029 cpu_scratch_size = (info.text_len +
5032 TG3_TSO5_FW_SBSS_LEN +
5033 TG3_TSO5_FW_BSS_LEN);
5035 info.text_base = TG3_TSO_FW_TEXT_ADDR;
5036 info.text_len = TG3_TSO_FW_TEXT_LEN;
5037 info.text_data = &tg3TsoFwText[0];
5038 info.rodata_base = TG3_TSO_FW_RODATA_ADDR;
5039 info.rodata_len = TG3_TSO_FW_RODATA_LEN;
5040 info.rodata_data = &tg3TsoFwRodata[0];
5041 info.data_base = TG3_TSO_FW_DATA_ADDR;
5042 info.data_len = TG3_TSO_FW_DATA_LEN;
5043 info.data_data = &tg3TsoFwData[0];
5044 cpu_base = TX_CPU_BASE;
5045 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
5046 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
5049 err = tg3_load_firmware_cpu(tp, cpu_base,
5050 cpu_scratch_base, cpu_scratch_size,
5055 /* Now startup the cpu. */
5056 tw32(cpu_base + CPU_STATE, 0xffffffff);
5057 tw32_f(cpu_base + CPU_PC, info.text_base);
5059 for (i = 0; i < 5; i++) {
5060 if (tr32(cpu_base + CPU_PC) == info.text_base)
5062 tw32(cpu_base + CPU_STATE, 0xffffffff);
5063 tw32(cpu_base + CPU_MODE, CPU_MODE_HALT);
5064 tw32_f(cpu_base + CPU_PC, info.text_base);
5068 printk(KERN_ERR PFX "tg3_load_tso_firmware fails for %s "
5069 "to set CPU PC, is %08x should be %08x\n",
5070 tp->dev->name, tr32(cpu_base + CPU_PC),
5074 tw32(cpu_base + CPU_STATE, 0xffffffff);
5075 tw32_f(cpu_base + CPU_MODE, 0x00000000);
5079 #endif /* TG3_TSO_SUPPORT != 0 */
5081 /* tp->lock is held. */
5082 static void __tg3_set_mac_addr(struct tg3 *tp)
5084 u32 addr_high, addr_low;
5087 addr_high = ((tp->dev->dev_addr[0] << 8) |
5088 tp->dev->dev_addr[1]);
5089 addr_low = ((tp->dev->dev_addr[2] << 24) |
5090 (tp->dev->dev_addr[3] << 16) |
5091 (tp->dev->dev_addr[4] << 8) |
5092 (tp->dev->dev_addr[5] << 0));
5093 for (i = 0; i < 4; i++) {
5094 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
5095 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
5098 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
5099 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
5100 for (i = 0; i < 12; i++) {
5101 tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
5102 tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
5106 addr_high = (tp->dev->dev_addr[0] +
5107 tp->dev->dev_addr[1] +
5108 tp->dev->dev_addr[2] +
5109 tp->dev->dev_addr[3] +
5110 tp->dev->dev_addr[4] +
5111 tp->dev->dev_addr[5]) &
5112 TX_BACKOFF_SEED_MASK;
5113 tw32(MAC_TX_BACKOFF_SEED, addr_high);
5116 static int tg3_set_mac_addr(struct net_device *dev, void *p)
5118 struct tg3 *tp = netdev_priv(dev);
5119 struct sockaddr *addr = p;
5121 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
5123 spin_lock_bh(&tp->lock);
5124 __tg3_set_mac_addr(tp);
5125 spin_unlock_bh(&tp->lock);
5130 /* tp->lock is held. */
5131 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
5132 dma_addr_t mapping, u32 maxlen_flags,
5136 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
5137 ((u64) mapping >> 32));
5139 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
5140 ((u64) mapping & 0xffffffff));
5142 (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
5145 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
5147 (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
5151 static void __tg3_set_rx_mode(struct net_device *);
5152 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
5154 tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
5155 tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
5156 tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
5157 tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
5158 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5159 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
5160 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
5162 tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
5163 tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
5164 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5165 u32 val = ec->stats_block_coalesce_usecs;
5167 if (!netif_carrier_ok(tp->dev))
5170 tw32(HOSTCC_STAT_COAL_TICKS, val);
5174 /* tp->lock is held. */
5175 static int tg3_reset_hw(struct tg3 *tp)
5177 u32 val, rdmac_mode;
5180 tg3_disable_ints(tp);
5184 tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
5186 if (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) {
5187 tg3_abort_hw(tp, 1);
5190 err = tg3_chip_reset(tp);
5194 tg3_write_sig_legacy(tp, RESET_KIND_INIT);
5196 /* This works around an issue with Athlon chipsets on
5197 * B3 tigon3 silicon. This bit has no effect on any
5198 * other revision. But do not set this on PCI Express
5201 if (!(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
5202 tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
5203 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
5205 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
5206 (tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
5207 val = tr32(TG3PCI_PCISTATE);
5208 val |= PCISTATE_RETRY_SAME_DMA;
5209 tw32(TG3PCI_PCISTATE, val);
5212 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_BX) {
5213 /* Enable some hw fixes. */
5214 val = tr32(TG3PCI_MSI_DATA);
5215 val |= (1 << 26) | (1 << 28) | (1 << 29);
5216 tw32(TG3PCI_MSI_DATA, val);
5219 /* Descriptor ring init may make accesses to the
5220 * NIC SRAM area to setup the TX descriptors, so we
5221 * can only do this after the hardware has been
5222 * successfully reset.
5226 /* This value is determined during the probe time DMA
5227 * engine test, tg3_test_dma.
5229 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
5231 tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
5232 GRC_MODE_4X_NIC_SEND_RINGS |
5233 GRC_MODE_NO_TX_PHDR_CSUM |
5234 GRC_MODE_NO_RX_PHDR_CSUM);
5235 tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
5236 if (tp->tg3_flags & TG3_FLAG_NO_TX_PSEUDO_CSUM)
5237 tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
5238 if (tp->tg3_flags & TG3_FLAG_NO_RX_PSEUDO_CSUM)
5239 tp->grc_mode |= GRC_MODE_NO_RX_PHDR_CSUM;
5243 (GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP));
5245 /* Setup the timer prescalar register. Clock is always 66Mhz. */
5246 val = tr32(GRC_MISC_CFG);
5248 val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
5249 tw32(GRC_MISC_CFG, val);
5251 /* Initialize MBUF/DESC pool. */
5252 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
5254 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
5255 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
5256 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
5257 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
5259 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
5260 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
5261 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
5263 #if TG3_TSO_SUPPORT != 0
5264 else if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
5267 fw_len = (TG3_TSO5_FW_TEXT_LEN +
5268 TG3_TSO5_FW_RODATA_LEN +
5269 TG3_TSO5_FW_DATA_LEN +
5270 TG3_TSO5_FW_SBSS_LEN +
5271 TG3_TSO5_FW_BSS_LEN);
5272 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
5273 tw32(BUFMGR_MB_POOL_ADDR,
5274 NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
5275 tw32(BUFMGR_MB_POOL_SIZE,
5276 NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
5280 if (tp->dev->mtu <= ETH_DATA_LEN) {
5281 tw32(BUFMGR_MB_RDMA_LOW_WATER,
5282 tp->bufmgr_config.mbuf_read_dma_low_water);
5283 tw32(BUFMGR_MB_MACRX_LOW_WATER,
5284 tp->bufmgr_config.mbuf_mac_rx_low_water);
5285 tw32(BUFMGR_MB_HIGH_WATER,
5286 tp->bufmgr_config.mbuf_high_water);
5288 tw32(BUFMGR_MB_RDMA_LOW_WATER,
5289 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
5290 tw32(BUFMGR_MB_MACRX_LOW_WATER,
5291 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
5292 tw32(BUFMGR_MB_HIGH_WATER,
5293 tp->bufmgr_config.mbuf_high_water_jumbo);
5295 tw32(BUFMGR_DMA_LOW_WATER,
5296 tp->bufmgr_config.dma_low_water);
5297 tw32(BUFMGR_DMA_HIGH_WATER,
5298 tp->bufmgr_config.dma_high_water);
5300 tw32(BUFMGR_MODE, BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE);
5301 for (i = 0; i < 2000; i++) {
5302 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
5307 printk(KERN_ERR PFX "tg3_reset_hw cannot enable BUFMGR for %s.\n",
5312 /* Setup replenish threshold. */
5313 tw32(RCVBDI_STD_THRESH, tp->rx_pending / 8);
5315 /* Initialize TG3_BDINFO's at:
5316 * RCVDBDI_STD_BD: standard eth size rx ring
5317 * RCVDBDI_JUMBO_BD: jumbo frame rx ring
5318 * RCVDBDI_MINI_BD: small frame rx ring (??? does not work)
5321 * TG3_BDINFO_HOST_ADDR: high/low parts of DMA address of ring
5322 * TG3_BDINFO_MAXLEN_FLAGS: (rx max buffer size << 16) |
5323 * ring attribute flags
5324 * TG3_BDINFO_NIC_ADDR: location of descriptors in nic SRAM
5326 * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
5327 * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
5329 * The size of each ring is fixed in the firmware, but the location is
5332 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
5333 ((u64) tp->rx_std_mapping >> 32));
5334 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
5335 ((u64) tp->rx_std_mapping & 0xffffffff));
5336 tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
5337 NIC_SRAM_RX_BUFFER_DESC);
5339 /* Don't even try to program the JUMBO/MINI buffer descriptor
5342 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
5343 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS,
5344 RX_STD_MAX_SIZE_5705 << BDINFO_FLAGS_MAXLEN_SHIFT);
5346 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS,
5347 RX_STD_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT);
5349 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
5350 BDINFO_FLAGS_DISABLED);
5352 /* Setup replenish threshold. */
5353 tw32(RCVBDI_JUMBO_THRESH, tp->rx_jumbo_pending / 8);
5355 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
5356 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
5357 ((u64) tp->rx_jumbo_mapping >> 32));
5358 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
5359 ((u64) tp->rx_jumbo_mapping & 0xffffffff));
5360 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
5361 RX_JUMBO_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT);
5362 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
5363 NIC_SRAM_RX_JUMBO_BUFFER_DESC);
5365 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
5366 BDINFO_FLAGS_DISABLED);
5371 /* There is only one send ring on 5705/5750, no need to explicitly
5372 * disable the others.
5374 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5375 /* Clear out send RCB ring in SRAM. */
5376 for (i = NIC_SRAM_SEND_RCB; i < NIC_SRAM_RCV_RET_RCB; i += TG3_BDINFO_SIZE)
5377 tg3_write_mem(tp, i + TG3_BDINFO_MAXLEN_FLAGS,
5378 BDINFO_FLAGS_DISABLED);
5383 tw32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0);
5384 tw32_tx_mbox(MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0);
5386 tg3_set_bdinfo(tp, NIC_SRAM_SEND_RCB,
5387 tp->tx_desc_mapping,
5388 (TG3_TX_RING_SIZE <<
5389 BDINFO_FLAGS_MAXLEN_SHIFT),
5390 NIC_SRAM_TX_BUFFER_DESC);
5392 /* There is only one receive return ring on 5705/5750, no need
5393 * to explicitly disable the others.
5395 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5396 for (i = NIC_SRAM_RCV_RET_RCB; i < NIC_SRAM_STATS_BLK;
5397 i += TG3_BDINFO_SIZE) {
5398 tg3_write_mem(tp, i + TG3_BDINFO_MAXLEN_FLAGS,
5399 BDINFO_FLAGS_DISABLED);
5404 tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW, 0);
5406 tg3_set_bdinfo(tp, NIC_SRAM_RCV_RET_RCB,
5408 (TG3_RX_RCB_RING_SIZE(tp) <<
5409 BDINFO_FLAGS_MAXLEN_SHIFT),
5412 tp->rx_std_ptr = tp->rx_pending;
5413 tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
5416 tp->rx_jumbo_ptr = (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) ?
5417 tp->rx_jumbo_pending : 0;
5418 tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
5421 /* Initialize MAC address and backoff seed. */
5422 __tg3_set_mac_addr(tp);
5424 /* MTU + ethernet header + FCS + optional VLAN tag */
5425 tw32(MAC_RX_MTU_SIZE, tp->dev->mtu + ETH_HLEN + 8);
5427 /* The slot time is changed by tg3_setup_phy if we
5428 * run at gigabit with half duplex.
5430 tw32(MAC_TX_LENGTHS,
5431 (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
5432 (6 << TX_LENGTHS_IPG_SHIFT) |
5433 (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
5435 /* Receive rules. */
5436 tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
5437 tw32(RCVLPC_CONFIG, 0x0181);
5439 /* Calculate RDMAC_MODE setting early, we need it to determine
5440 * the RCVLPC_STATE_ENABLE mask.
5442 rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
5443 RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
5444 RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
5445 RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
5446 RDMAC_MODE_LNGREAD_ENAB);
5447 if (tp->tg3_flags & TG3_FLAG_SPLIT_MODE)
5448 rdmac_mode |= RDMAC_MODE_SPLIT_ENABLE;
5450 /* If statement applies to 5705 and 5750 PCI devices only */
5451 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
5452 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
5453 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)) {
5454 if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE &&
5455 (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
5456 tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
5457 rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
5458 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
5459 !(tp->tg3_flags2 & TG3_FLG2_IS_5788)) {
5460 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
5464 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)
5465 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
5467 #if TG3_TSO_SUPPORT != 0
5468 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
5469 rdmac_mode |= (1 << 27);
5472 /* Receive/send statistics. */
5473 if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
5474 (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
5475 val = tr32(RCVLPC_STATS_ENABLE);
5476 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
5477 tw32(RCVLPC_STATS_ENABLE, val);
5479 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
5481 tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
5482 tw32(SNDDATAI_STATSENAB, 0xffffff);
5483 tw32(SNDDATAI_STATSCTRL,
5484 (SNDDATAI_SCTRL_ENABLE |
5485 SNDDATAI_SCTRL_FASTUPD));
5487 /* Setup host coalescing engine. */
5488 tw32(HOSTCC_MODE, 0);
5489 for (i = 0; i < 2000; i++) {
5490 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
5495 __tg3_set_coalesce(tp, &tp->coal);
5497 /* set status block DMA address */
5498 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
5499 ((u64) tp->status_mapping >> 32));
5500 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
5501 ((u64) tp->status_mapping & 0xffffffff));
5503 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5504 /* Status/statistics block address. See tg3_timer,
5505 * the tg3_periodic_fetch_stats call there, and
5506 * tg3_get_stats to see how this works for 5705/5750 chips.
5508 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
5509 ((u64) tp->stats_mapping >> 32));
5510 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
5511 ((u64) tp->stats_mapping & 0xffffffff));
5512 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
5513 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
5516 tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
5518 tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
5519 tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
5520 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
5521 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
5523 /* Clear statistics/status block in chip, and status block in ram. */
5524 for (i = NIC_SRAM_STATS_BLK;
5525 i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
5527 tg3_write_mem(tp, i, 0);
5530 memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
5532 tp->mac_mode = MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
5533 MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE | MAC_MODE_FHDE_ENABLE;
5534 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
5537 /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
5538 * If TG3_FLAG_EEPROM_WRITE_PROT is set, we should read the
5539 * register to preserve the GPIO settings for LOMs. The GPIOs,
5540 * whether used as inputs or outputs, are set by boot code after
5543 if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
5546 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE2 |
5547 GRC_LCLCTRL_GPIO_OUTPUT0 | GRC_LCLCTRL_GPIO_OUTPUT2;
5549 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
5550 gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
5551 GRC_LCLCTRL_GPIO_OUTPUT3;
5553 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
5555 /* GPIO1 must be driven high for eeprom write protect */
5556 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
5557 GRC_LCLCTRL_GPIO_OUTPUT1);
5559 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
5562 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0);
5563 tr32(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW);
5566 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5567 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
5571 val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
5572 WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
5573 WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
5574 WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
5575 WDMAC_MODE_LNGREAD_ENAB);
5577 /* If statement applies to 5705 and 5750 PCI devices only */
5578 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
5579 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
5580 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
5581 if ((tp->tg3_flags & TG3_FLG2_TSO_CAPABLE) &&
5582 (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
5583 tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
5585 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
5586 !(tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
5587 !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
5588 val |= WDMAC_MODE_RX_ACCEL;
5592 tw32_f(WDMAC_MODE, val);
5595 if ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0) {
5596 val = tr32(TG3PCI_X_CAPS);
5597 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) {
5598 val &= ~PCIX_CAPS_BURST_MASK;
5599 val |= (PCIX_CAPS_MAX_BURST_CPIOB << PCIX_CAPS_BURST_SHIFT);
5600 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
5601 val &= ~(PCIX_CAPS_SPLIT_MASK | PCIX_CAPS_BURST_MASK);
5602 val |= (PCIX_CAPS_MAX_BURST_CPIOB << PCIX_CAPS_BURST_SHIFT);
5603 if (tp->tg3_flags & TG3_FLAG_SPLIT_MODE)
5604 val |= (tp->split_mode_max_reqs <<
5605 PCIX_CAPS_SPLIT_SHIFT);
5607 tw32(TG3PCI_X_CAPS, val);
5610 tw32_f(RDMAC_MODE, rdmac_mode);
5613 tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
5614 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
5615 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
5616 tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
5617 tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
5618 tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
5619 tw32(RCVDBDI_MODE, RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ);
5620 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
5621 #if TG3_TSO_SUPPORT != 0
5622 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
5623 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
5625 tw32(SNDBDI_MODE, SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE);
5626 tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
5628 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
5629 err = tg3_load_5701_a0_firmware_fix(tp);
5634 #if TG3_TSO_SUPPORT != 0
5635 if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
5636 err = tg3_load_tso_firmware(tp);
5642 tp->tx_mode = TX_MODE_ENABLE;
5643 tw32_f(MAC_TX_MODE, tp->tx_mode);
5646 tp->rx_mode = RX_MODE_ENABLE;
5647 tw32_f(MAC_RX_MODE, tp->rx_mode);
5650 if (tp->link_config.phy_is_low_power) {
5651 tp->link_config.phy_is_low_power = 0;
5652 tp->link_config.speed = tp->link_config.orig_speed;
5653 tp->link_config.duplex = tp->link_config.orig_duplex;
5654 tp->link_config.autoneg = tp->link_config.orig_autoneg;
5657 tp->mi_mode = MAC_MI_MODE_BASE;
5658 tw32_f(MAC_MI_MODE, tp->mi_mode);
5661 tw32(MAC_LED_CTRL, tp->led_ctrl);
5663 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
5664 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
5665 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
5668 tw32_f(MAC_RX_MODE, tp->rx_mode);
5671 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
5672 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) &&
5673 !(tp->tg3_flags2 & TG3_FLG2_SERDES_PREEMPHASIS)) {
5674 /* Set drive transmission level to 1.2V */
5675 /* only if the signal pre-emphasis bit is not set */
5676 val = tr32(MAC_SERDES_CFG);
5679 tw32(MAC_SERDES_CFG, val);
5681 if (tp->pci_chip_rev_id == CHIPREV_ID_5703_A1)
5682 tw32(MAC_SERDES_CFG, 0x616000);
5685 /* Prevent chip from dropping frames when flow control
5688 tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, 2);
5690 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
5691 (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
5692 /* Use hardware link auto-negotiation */
5693 tp->tg3_flags2 |= TG3_FLG2_HW_AUTONEG;
5696 err = tg3_setup_phy(tp, 1);
5700 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
5703 /* Clear CRC stats. */
5704 if (!tg3_readphy(tp, 0x1e, &tmp)) {
5705 tg3_writephy(tp, 0x1e, tmp | 0x8000);
5706 tg3_readphy(tp, 0x14, &tmp);
5710 __tg3_set_rx_mode(tp->dev);
5712 /* Initialize receive rules. */
5713 tw32(MAC_RCV_RULE_0, 0xc2000000 & RCV_RULE_DISABLE_MASK);
5714 tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
5715 tw32(MAC_RCV_RULE_1, 0x86000004 & RCV_RULE_DISABLE_MASK);
5716 tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
5718 if ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
5719 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5780))
5723 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF)
5727 tw32(MAC_RCV_RULE_15, 0); tw32(MAC_RCV_VALUE_15, 0);
5729 tw32(MAC_RCV_RULE_14, 0); tw32(MAC_RCV_VALUE_14, 0);
5731 tw32(MAC_RCV_RULE_13, 0); tw32(MAC_RCV_VALUE_13, 0);
5733 tw32(MAC_RCV_RULE_12, 0); tw32(MAC_RCV_VALUE_12, 0);
5735 tw32(MAC_RCV_RULE_11, 0); tw32(MAC_RCV_VALUE_11, 0);
5737 tw32(MAC_RCV_RULE_10, 0); tw32(MAC_RCV_VALUE_10, 0);
5739 tw32(MAC_RCV_RULE_9, 0); tw32(MAC_RCV_VALUE_9, 0);
5741 tw32(MAC_RCV_RULE_8, 0); tw32(MAC_RCV_VALUE_8, 0);
5743 tw32(MAC_RCV_RULE_7, 0); tw32(MAC_RCV_VALUE_7, 0);
5745 tw32(MAC_RCV_RULE_6, 0); tw32(MAC_RCV_VALUE_6, 0);
5747 tw32(MAC_RCV_RULE_5, 0); tw32(MAC_RCV_VALUE_5, 0);
5749 tw32(MAC_RCV_RULE_4, 0); tw32(MAC_RCV_VALUE_4, 0);
5751 /* tw32(MAC_RCV_RULE_3, 0); tw32(MAC_RCV_VALUE_3, 0); */
5753 /* tw32(MAC_RCV_RULE_2, 0); tw32(MAC_RCV_VALUE_2, 0); */
5761 tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
5766 /* Called at device open time to get the chip ready for
5767 * packet processing. Invoked with tp->lock held.
5769 static int tg3_init_hw(struct tg3 *tp)
5773 /* Force the chip into D0. */
5774 err = tg3_set_power_state(tp, 0);
5778 tg3_switch_clocks(tp);
5780 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
5782 err = tg3_reset_hw(tp);
5788 #define TG3_STAT_ADD32(PSTAT, REG) \
5789 do { u32 __val = tr32(REG); \
5790 (PSTAT)->low += __val; \
5791 if ((PSTAT)->low < __val) \
5792 (PSTAT)->high += 1; \
5795 static void tg3_periodic_fetch_stats(struct tg3 *tp)
5797 struct tg3_hw_stats *sp = tp->hw_stats;
5799 if (!netif_carrier_ok(tp->dev))
5802 TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
5803 TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
5804 TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
5805 TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
5806 TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
5807 TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
5808 TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
5809 TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
5810 TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
5811 TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
5812 TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
5813 TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
5814 TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
5816 TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
5817 TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
5818 TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
5819 TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
5820 TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
5821 TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
5822 TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
5823 TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
5824 TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
5825 TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
5826 TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
5827 TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
5828 TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
5829 TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
5832 static void tg3_timer(unsigned long __opaque)
5834 struct tg3 *tp = (struct tg3 *) __opaque;
5836 spin_lock(&tp->lock);
5838 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) {
5839 /* All of this garbage is because when using non-tagged
5840 * IRQ status the mailbox/status_block protocol the chip
5841 * uses with the cpu is race prone.
5843 if (tp->hw_status->status & SD_STATUS_UPDATED) {
5844 tw32(GRC_LOCAL_CTRL,
5845 tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
5847 tw32(HOSTCC_MODE, tp->coalesce_mode |
5848 (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
5851 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
5852 tp->tg3_flags2 |= TG3_FLG2_RESTART_TIMER;
5853 spin_unlock(&tp->lock);
5854 schedule_work(&tp->reset_task);
5859 /* This part only runs once per second. */
5860 if (!--tp->timer_counter) {
5861 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
5862 tg3_periodic_fetch_stats(tp);
5864 if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
5868 mac_stat = tr32(MAC_STATUS);
5871 if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) {
5872 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
5874 } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
5878 tg3_setup_phy(tp, 0);
5879 } else if (tp->tg3_flags & TG3_FLAG_POLL_SERDES) {
5880 u32 mac_stat = tr32(MAC_STATUS);
5883 if (netif_carrier_ok(tp->dev) &&
5884 (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
5887 if (! netif_carrier_ok(tp->dev) &&
5888 (mac_stat & (MAC_STATUS_PCS_SYNCED |
5889 MAC_STATUS_SIGNAL_DET))) {
5895 ~MAC_MODE_PORT_MODE_MASK));
5897 tw32_f(MAC_MODE, tp->mac_mode);
5899 tg3_setup_phy(tp, 0);
5903 tp->timer_counter = tp->timer_multiplier;
5906 /* Heartbeat is only sent once every 120 seconds. */
5907 if (!--tp->asf_counter) {
5908 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
5911 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_ALIVE);
5912 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
5913 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, 3);
5914 val = tr32(GRC_RX_CPU_EVENT);
5916 tw32(GRC_RX_CPU_EVENT, val);
5918 tp->asf_counter = tp->asf_multiplier;
5921 spin_unlock(&tp->lock);
5923 tp->timer.expires = jiffies + tp->timer_offset;
5924 add_timer(&tp->timer);
5927 static int tg3_test_interrupt(struct tg3 *tp)
5929 struct net_device *dev = tp->dev;
5933 if (!netif_running(dev))
5936 tg3_disable_ints(tp);
5938 free_irq(tp->pdev->irq, dev);
5940 err = request_irq(tp->pdev->irq, tg3_test_isr,
5941 SA_SHIRQ | SA_SAMPLE_RANDOM, dev->name, dev);
5945 tg3_enable_ints(tp);
5947 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
5950 for (i = 0; i < 5; i++) {
5951 int_mbox = tr32(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW);
5957 tg3_disable_ints(tp);
5959 free_irq(tp->pdev->irq, dev);
5961 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI)
5962 err = request_irq(tp->pdev->irq, tg3_msi,
5963 SA_SAMPLE_RANDOM, dev->name, dev);
5965 irqreturn_t (*fn)(int, void *, struct pt_regs *)=tg3_interrupt;
5966 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
5967 fn = tg3_interrupt_tagged;
5968 err = request_irq(tp->pdev->irq, fn,
5969 SA_SHIRQ | SA_SAMPLE_RANDOM, dev->name, dev);
5981 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
5982 * successfully restored
5984 static int tg3_test_msi(struct tg3 *tp)
5986 struct net_device *dev = tp->dev;
5990 if (!(tp->tg3_flags2 & TG3_FLG2_USING_MSI))
5993 /* Turn off SERR reporting in case MSI terminates with Master
5996 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
5997 pci_write_config_word(tp->pdev, PCI_COMMAND,
5998 pci_cmd & ~PCI_COMMAND_SERR);
6000 err = tg3_test_interrupt(tp);
6002 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
6007 /* other failures */
6011 /* MSI test failed, go back to INTx mode */
6012 printk(KERN_WARNING PFX "%s: No interrupt was generated using MSI, "
6013 "switching to INTx mode. Please report this failure to "
6014 "the PCI maintainer and include system chipset information.\n",
6017 free_irq(tp->pdev->irq, dev);
6018 pci_disable_msi(tp->pdev);
6020 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
6023 irqreturn_t (*fn)(int, void *, struct pt_regs *)=tg3_interrupt;
6024 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
6025 fn = tg3_interrupt_tagged;
6027 err = request_irq(tp->pdev->irq, fn,
6028 SA_SHIRQ | SA_SAMPLE_RANDOM, dev->name, dev);
6033 /* Need to reset the chip because the MSI cycle may have terminated
6034 * with Master Abort.
6036 tg3_full_lock(tp, 1);
6038 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
6039 err = tg3_init_hw(tp);
6041 tg3_full_unlock(tp);
6044 free_irq(tp->pdev->irq, dev);
6049 static int tg3_open(struct net_device *dev)
6051 struct tg3 *tp = netdev_priv(dev);
6054 tg3_full_lock(tp, 0);
6056 tg3_disable_ints(tp);
6057 tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
6059 tg3_full_unlock(tp);
6061 /* The placement of this call is tied
6062 * to the setup and use of Host TX descriptors.
6064 err = tg3_alloc_consistent(tp);
6068 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
6069 (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5750_AX) &&
6070 (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5750_BX)) {
6071 /* All MSI supporting chips should support tagged
6072 * status. Assert that this is the case.
6074 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) {
6075 printk(KERN_WARNING PFX "%s: MSI without TAGGED? "
6076 "Not using MSI.\n", tp->dev->name);
6077 } else if (pci_enable_msi(tp->pdev) == 0) {
6080 msi_mode = tr32(MSGINT_MODE);
6081 tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
6082 tp->tg3_flags2 |= TG3_FLG2_USING_MSI;
6085 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI)
6086 err = request_irq(tp->pdev->irq, tg3_msi,
6087 SA_SAMPLE_RANDOM, dev->name, dev);
6089 irqreturn_t (*fn)(int, void *, struct pt_regs *)=tg3_interrupt;
6090 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
6091 fn = tg3_interrupt_tagged;
6093 err = request_irq(tp->pdev->irq, fn,
6094 SA_SHIRQ | SA_SAMPLE_RANDOM, dev->name, dev);
6098 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6099 pci_disable_msi(tp->pdev);
6100 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
6102 tg3_free_consistent(tp);
6106 tg3_full_lock(tp, 0);
6108 err = tg3_init_hw(tp);
6110 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
6113 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
6114 tp->timer_offset = HZ;
6116 tp->timer_offset = HZ / 10;
6118 BUG_ON(tp->timer_offset > HZ);
6119 tp->timer_counter = tp->timer_multiplier =
6120 (HZ / tp->timer_offset);
6121 tp->asf_counter = tp->asf_multiplier =
6122 ((HZ / tp->timer_offset) * 120);
6124 init_timer(&tp->timer);
6125 tp->timer.expires = jiffies + tp->timer_offset;
6126 tp->timer.data = (unsigned long) tp;
6127 tp->timer.function = tg3_timer;
6130 tg3_full_unlock(tp);
6133 free_irq(tp->pdev->irq, dev);
6134 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6135 pci_disable_msi(tp->pdev);
6136 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
6138 tg3_free_consistent(tp);
6142 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6143 err = tg3_test_msi(tp);
6146 tg3_full_lock(tp, 0);
6148 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6149 pci_disable_msi(tp->pdev);
6150 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
6152 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
6154 tg3_free_consistent(tp);
6156 tg3_full_unlock(tp);
6162 tg3_full_lock(tp, 0);
6164 add_timer(&tp->timer);
6165 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
6166 tg3_enable_ints(tp);
6168 tg3_full_unlock(tp);
6170 netif_start_queue(dev);
6176 /*static*/ void tg3_dump_state(struct tg3 *tp)
6178 u32 val32, val32_2, val32_3, val32_4, val32_5;
6182 pci_read_config_word(tp->pdev, PCI_STATUS, &val16);
6183 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE, &val32);
6184 printk("DEBUG: PCI status [%04x] TG3PCI state[%08x]\n",
6188 printk("DEBUG: MAC_MODE[%08x] MAC_STATUS[%08x]\n",
6189 tr32(MAC_MODE), tr32(MAC_STATUS));
6190 printk(" MAC_EVENT[%08x] MAC_LED_CTRL[%08x]\n",
6191 tr32(MAC_EVENT), tr32(MAC_LED_CTRL));
6192 printk("DEBUG: MAC_TX_MODE[%08x] MAC_TX_STATUS[%08x]\n",
6193 tr32(MAC_TX_MODE), tr32(MAC_TX_STATUS));
6194 printk(" MAC_RX_MODE[%08x] MAC_RX_STATUS[%08x]\n",
6195 tr32(MAC_RX_MODE), tr32(MAC_RX_STATUS));
6197 /* Send data initiator control block */
6198 printk("DEBUG: SNDDATAI_MODE[%08x] SNDDATAI_STATUS[%08x]\n",
6199 tr32(SNDDATAI_MODE), tr32(SNDDATAI_STATUS));
6200 printk(" SNDDATAI_STATSCTRL[%08x]\n",
6201 tr32(SNDDATAI_STATSCTRL));
6203 /* Send data completion control block */
6204 printk("DEBUG: SNDDATAC_MODE[%08x]\n", tr32(SNDDATAC_MODE));
6206 /* Send BD ring selector block */
6207 printk("DEBUG: SNDBDS_MODE[%08x] SNDBDS_STATUS[%08x]\n",
6208 tr32(SNDBDS_MODE), tr32(SNDBDS_STATUS));
6210 /* Send BD initiator control block */
6211 printk("DEBUG: SNDBDI_MODE[%08x] SNDBDI_STATUS[%08x]\n",
6212 tr32(SNDBDI_MODE), tr32(SNDBDI_STATUS));
6214 /* Send BD completion control block */
6215 printk("DEBUG: SNDBDC_MODE[%08x]\n", tr32(SNDBDC_MODE));
6217 /* Receive list placement control block */
6218 printk("DEBUG: RCVLPC_MODE[%08x] RCVLPC_STATUS[%08x]\n",
6219 tr32(RCVLPC_MODE), tr32(RCVLPC_STATUS));
6220 printk(" RCVLPC_STATSCTRL[%08x]\n",
6221 tr32(RCVLPC_STATSCTRL));
6223 /* Receive data and receive BD initiator control block */
6224 printk("DEBUG: RCVDBDI_MODE[%08x] RCVDBDI_STATUS[%08x]\n",
6225 tr32(RCVDBDI_MODE), tr32(RCVDBDI_STATUS));
6227 /* Receive data completion control block */
6228 printk("DEBUG: RCVDCC_MODE[%08x]\n",
6231 /* Receive BD initiator control block */
6232 printk("DEBUG: RCVBDI_MODE[%08x] RCVBDI_STATUS[%08x]\n",
6233 tr32(RCVBDI_MODE), tr32(RCVBDI_STATUS));
6235 /* Receive BD completion control block */
6236 printk("DEBUG: RCVCC_MODE[%08x] RCVCC_STATUS[%08x]\n",
6237 tr32(RCVCC_MODE), tr32(RCVCC_STATUS));
6239 /* Receive list selector control block */
6240 printk("DEBUG: RCVLSC_MODE[%08x] RCVLSC_STATUS[%08x]\n",
6241 tr32(RCVLSC_MODE), tr32(RCVLSC_STATUS));
6243 /* Mbuf cluster free block */
6244 printk("DEBUG: MBFREE_MODE[%08x] MBFREE_STATUS[%08x]\n",
6245 tr32(MBFREE_MODE), tr32(MBFREE_STATUS));
6247 /* Host coalescing control block */
6248 printk("DEBUG: HOSTCC_MODE[%08x] HOSTCC_STATUS[%08x]\n",
6249 tr32(HOSTCC_MODE), tr32(HOSTCC_STATUS));
6250 printk("DEBUG: HOSTCC_STATS_BLK_HOST_ADDR[%08x%08x]\n",
6251 tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
6252 tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
6253 printk("DEBUG: HOSTCC_STATUS_BLK_HOST_ADDR[%08x%08x]\n",
6254 tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
6255 tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
6256 printk("DEBUG: HOSTCC_STATS_BLK_NIC_ADDR[%08x]\n",
6257 tr32(HOSTCC_STATS_BLK_NIC_ADDR));
6258 printk("DEBUG: HOSTCC_STATUS_BLK_NIC_ADDR[%08x]\n",
6259 tr32(HOSTCC_STATUS_BLK_NIC_ADDR));
6261 /* Memory arbiter control block */
6262 printk("DEBUG: MEMARB_MODE[%08x] MEMARB_STATUS[%08x]\n",
6263 tr32(MEMARB_MODE), tr32(MEMARB_STATUS));
6265 /* Buffer manager control block */
6266 printk("DEBUG: BUFMGR_MODE[%08x] BUFMGR_STATUS[%08x]\n",
6267 tr32(BUFMGR_MODE), tr32(BUFMGR_STATUS));
6268 printk("DEBUG: BUFMGR_MB_POOL_ADDR[%08x] BUFMGR_MB_POOL_SIZE[%08x]\n",
6269 tr32(BUFMGR_MB_POOL_ADDR), tr32(BUFMGR_MB_POOL_SIZE));
6270 printk("DEBUG: BUFMGR_DMA_DESC_POOL_ADDR[%08x] "
6271 "BUFMGR_DMA_DESC_POOL_SIZE[%08x]\n",
6272 tr32(BUFMGR_DMA_DESC_POOL_ADDR),
6273 tr32(BUFMGR_DMA_DESC_POOL_SIZE));
6275 /* Read DMA control block */
6276 printk("DEBUG: RDMAC_MODE[%08x] RDMAC_STATUS[%08x]\n",
6277 tr32(RDMAC_MODE), tr32(RDMAC_STATUS));
6279 /* Write DMA control block */
6280 printk("DEBUG: WDMAC_MODE[%08x] WDMAC_STATUS[%08x]\n",
6281 tr32(WDMAC_MODE), tr32(WDMAC_STATUS));
6283 /* DMA completion block */
6284 printk("DEBUG: DMAC_MODE[%08x]\n",
6288 printk("DEBUG: GRC_MODE[%08x] GRC_MISC_CFG[%08x]\n",
6289 tr32(GRC_MODE), tr32(GRC_MISC_CFG));
6290 printk("DEBUG: GRC_LOCAL_CTRL[%08x]\n",
6291 tr32(GRC_LOCAL_CTRL));
6294 printk("DEBUG: RCVDBDI_JUMBO_BD[%08x%08x:%08x:%08x]\n",
6295 tr32(RCVDBDI_JUMBO_BD + 0x0),
6296 tr32(RCVDBDI_JUMBO_BD + 0x4),
6297 tr32(RCVDBDI_JUMBO_BD + 0x8),
6298 tr32(RCVDBDI_JUMBO_BD + 0xc));
6299 printk("DEBUG: RCVDBDI_STD_BD[%08x%08x:%08x:%08x]\n",
6300 tr32(RCVDBDI_STD_BD + 0x0),
6301 tr32(RCVDBDI_STD_BD + 0x4),
6302 tr32(RCVDBDI_STD_BD + 0x8),
6303 tr32(RCVDBDI_STD_BD + 0xc));
6304 printk("DEBUG: RCVDBDI_MINI_BD[%08x%08x:%08x:%08x]\n",
6305 tr32(RCVDBDI_MINI_BD + 0x0),
6306 tr32(RCVDBDI_MINI_BD + 0x4),
6307 tr32(RCVDBDI_MINI_BD + 0x8),
6308 tr32(RCVDBDI_MINI_BD + 0xc));
6310 tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x0, &val32);
6311 tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x4, &val32_2);
6312 tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x8, &val32_3);
6313 tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0xc, &val32_4);
6314 printk("DEBUG: SRAM_SEND_RCB_0[%08x%08x:%08x:%08x]\n",
6315 val32, val32_2, val32_3, val32_4);
6317 tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x0, &val32);
6318 tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x4, &val32_2);
6319 tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x8, &val32_3);
6320 tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0xc, &val32_4);
6321 printk("DEBUG: SRAM_RCV_RET_RCB_0[%08x%08x:%08x:%08x]\n",
6322 val32, val32_2, val32_3, val32_4);
6324 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x0, &val32);
6325 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x4, &val32_2);
6326 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x8, &val32_3);
6327 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0xc, &val32_4);
6328 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x10, &val32_5);
6329 printk("DEBUG: SRAM_STATUS_BLK[%08x:%08x:%08x:%08x:%08x]\n",
6330 val32, val32_2, val32_3, val32_4, val32_5);
6332 /* SW status block */
6333 printk("DEBUG: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
6334 tp->hw_status->status,
6335 tp->hw_status->status_tag,
6336 tp->hw_status->rx_jumbo_consumer,
6337 tp->hw_status->rx_consumer,
6338 tp->hw_status->rx_mini_consumer,
6339 tp->hw_status->idx[0].rx_producer,
6340 tp->hw_status->idx[0].tx_consumer);
6342 /* SW statistics block */
6343 printk("DEBUG: Host statistics block [%08x:%08x:%08x:%08x]\n",
6344 ((u32 *)tp->hw_stats)[0],
6345 ((u32 *)tp->hw_stats)[1],
6346 ((u32 *)tp->hw_stats)[2],
6347 ((u32 *)tp->hw_stats)[3]);
6350 printk("DEBUG: SNDHOST_PROD[%08x%08x] SNDNIC_PROD[%08x%08x]\n",
6351 tr32(MAILBOX_SNDHOST_PROD_IDX_0 + 0x0),
6352 tr32(MAILBOX_SNDHOST_PROD_IDX_0 + 0x4),
6353 tr32(MAILBOX_SNDNIC_PROD_IDX_0 + 0x0),
6354 tr32(MAILBOX_SNDNIC_PROD_IDX_0 + 0x4));
6356 /* NIC side send descriptors. */
6357 for (i = 0; i < 6; i++) {
6360 txd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_TX_BUFFER_DESC
6361 + (i * sizeof(struct tg3_tx_buffer_desc));
6362 printk("DEBUG: NIC TXD(%d)[%08x:%08x:%08x:%08x]\n",
6364 readl(txd + 0x0), readl(txd + 0x4),
6365 readl(txd + 0x8), readl(txd + 0xc));
6368 /* NIC side RX descriptors. */
6369 for (i = 0; i < 6; i++) {
6372 rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_BUFFER_DESC
6373 + (i * sizeof(struct tg3_rx_buffer_desc));
6374 printk("DEBUG: NIC RXD_STD(%d)[0][%08x:%08x:%08x:%08x]\n",
6376 readl(rxd + 0x0), readl(rxd + 0x4),
6377 readl(rxd + 0x8), readl(rxd + 0xc));
6378 rxd += (4 * sizeof(u32));
6379 printk("DEBUG: NIC RXD_STD(%d)[1][%08x:%08x:%08x:%08x]\n",
6381 readl(rxd + 0x0), readl(rxd + 0x4),
6382 readl(rxd + 0x8), readl(rxd + 0xc));
6385 for (i = 0; i < 6; i++) {
6388 rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_JUMBO_BUFFER_DESC
6389 + (i * sizeof(struct tg3_rx_buffer_desc));
6390 printk("DEBUG: NIC RXD_JUMBO(%d)[0][%08x:%08x:%08x:%08x]\n",
6392 readl(rxd + 0x0), readl(rxd + 0x4),
6393 readl(rxd + 0x8), readl(rxd + 0xc));
6394 rxd += (4 * sizeof(u32));
6395 printk("DEBUG: NIC RXD_JUMBO(%d)[1][%08x:%08x:%08x:%08x]\n",
6397 readl(rxd + 0x0), readl(rxd + 0x4),
6398 readl(rxd + 0x8), readl(rxd + 0xc));
6403 static struct net_device_stats *tg3_get_stats(struct net_device *);
6404 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *);
6406 static int tg3_close(struct net_device *dev)
6408 struct tg3 *tp = netdev_priv(dev);
6410 netif_stop_queue(dev);
6412 del_timer_sync(&tp->timer);
6414 tg3_full_lock(tp, 1);
6419 tg3_disable_ints(tp);
6421 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
6424 ~(TG3_FLAG_INIT_COMPLETE |
6425 TG3_FLAG_GOT_SERDES_FLOWCTL);
6426 netif_carrier_off(tp->dev);
6428 tg3_full_unlock(tp);
6430 free_irq(tp->pdev->irq, dev);
6431 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6432 pci_disable_msi(tp->pdev);
6433 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
6436 memcpy(&tp->net_stats_prev, tg3_get_stats(tp->dev),
6437 sizeof(tp->net_stats_prev));
6438 memcpy(&tp->estats_prev, tg3_get_estats(tp),
6439 sizeof(tp->estats_prev));
6441 tg3_free_consistent(tp);
6446 static inline unsigned long get_stat64(tg3_stat64_t *val)
6450 #if (BITS_PER_LONG == 32)
6453 ret = ((u64)val->high << 32) | ((u64)val->low);
6458 static unsigned long calc_crc_errors(struct tg3 *tp)
6460 struct tg3_hw_stats *hw_stats = tp->hw_stats;
6462 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
6463 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
6464 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
6467 spin_lock_bh(&tp->lock);
6468 if (!tg3_readphy(tp, 0x1e, &val)) {
6469 tg3_writephy(tp, 0x1e, val | 0x8000);
6470 tg3_readphy(tp, 0x14, &val);
6473 spin_unlock_bh(&tp->lock);
6475 tp->phy_crc_errors += val;
6477 return tp->phy_crc_errors;
6480 return get_stat64(&hw_stats->rx_fcs_errors);
6483 #define ESTAT_ADD(member) \
6484 estats->member = old_estats->member + \
6485 get_stat64(&hw_stats->member)
6487 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *tp)
6489 struct tg3_ethtool_stats *estats = &tp->estats;
6490 struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
6491 struct tg3_hw_stats *hw_stats = tp->hw_stats;
6496 ESTAT_ADD(rx_octets);
6497 ESTAT_ADD(rx_fragments);
6498 ESTAT_ADD(rx_ucast_packets);
6499 ESTAT_ADD(rx_mcast_packets);
6500 ESTAT_ADD(rx_bcast_packets);
6501 ESTAT_ADD(rx_fcs_errors);
6502 ESTAT_ADD(rx_align_errors);
6503 ESTAT_ADD(rx_xon_pause_rcvd);
6504 ESTAT_ADD(rx_xoff_pause_rcvd);
6505 ESTAT_ADD(rx_mac_ctrl_rcvd);
6506 ESTAT_ADD(rx_xoff_entered);
6507 ESTAT_ADD(rx_frame_too_long_errors);
6508 ESTAT_ADD(rx_jabbers);
6509 ESTAT_ADD(rx_undersize_packets);
6510 ESTAT_ADD(rx_in_length_errors);
6511 ESTAT_ADD(rx_out_length_errors);
6512 ESTAT_ADD(rx_64_or_less_octet_packets);
6513 ESTAT_ADD(rx_65_to_127_octet_packets);
6514 ESTAT_ADD(rx_128_to_255_octet_packets);
6515 ESTAT_ADD(rx_256_to_511_octet_packets);
6516 ESTAT_ADD(rx_512_to_1023_octet_packets);
6517 ESTAT_ADD(rx_1024_to_1522_octet_packets);
6518 ESTAT_ADD(rx_1523_to_2047_octet_packets);
6519 ESTAT_ADD(rx_2048_to_4095_octet_packets);
6520 ESTAT_ADD(rx_4096_to_8191_octet_packets);
6521 ESTAT_ADD(rx_8192_to_9022_octet_packets);
6523 ESTAT_ADD(tx_octets);
6524 ESTAT_ADD(tx_collisions);
6525 ESTAT_ADD(tx_xon_sent);
6526 ESTAT_ADD(tx_xoff_sent);
6527 ESTAT_ADD(tx_flow_control);
6528 ESTAT_ADD(tx_mac_errors);
6529 ESTAT_ADD(tx_single_collisions);
6530 ESTAT_ADD(tx_mult_collisions);
6531 ESTAT_ADD(tx_deferred);
6532 ESTAT_ADD(tx_excessive_collisions);
6533 ESTAT_ADD(tx_late_collisions);
6534 ESTAT_ADD(tx_collide_2times);
6535 ESTAT_ADD(tx_collide_3times);
6536 ESTAT_ADD(tx_collide_4times);
6537 ESTAT_ADD(tx_collide_5times);
6538 ESTAT_ADD(tx_collide_6times);
6539 ESTAT_ADD(tx_collide_7times);
6540 ESTAT_ADD(tx_collide_8times);
6541 ESTAT_ADD(tx_collide_9times);
6542 ESTAT_ADD(tx_collide_10times);
6543 ESTAT_ADD(tx_collide_11times);
6544 ESTAT_ADD(tx_collide_12times);
6545 ESTAT_ADD(tx_collide_13times);
6546 ESTAT_ADD(tx_collide_14times);
6547 ESTAT_ADD(tx_collide_15times);
6548 ESTAT_ADD(tx_ucast_packets);
6549 ESTAT_ADD(tx_mcast_packets);
6550 ESTAT_ADD(tx_bcast_packets);
6551 ESTAT_ADD(tx_carrier_sense_errors);
6552 ESTAT_ADD(tx_discards);
6553 ESTAT_ADD(tx_errors);
6555 ESTAT_ADD(dma_writeq_full);
6556 ESTAT_ADD(dma_write_prioq_full);
6557 ESTAT_ADD(rxbds_empty);
6558 ESTAT_ADD(rx_discards);
6559 ESTAT_ADD(rx_errors);
6560 ESTAT_ADD(rx_threshold_hit);
6562 ESTAT_ADD(dma_readq_full);
6563 ESTAT_ADD(dma_read_prioq_full);
6564 ESTAT_ADD(tx_comp_queue_full);
6566 ESTAT_ADD(ring_set_send_prod_index);
6567 ESTAT_ADD(ring_status_update);
6568 ESTAT_ADD(nic_irqs);
6569 ESTAT_ADD(nic_avoided_irqs);
6570 ESTAT_ADD(nic_tx_threshold_hit);
6575 static struct net_device_stats *tg3_get_stats(struct net_device *dev)
6577 struct tg3 *tp = netdev_priv(dev);
6578 struct net_device_stats *stats = &tp->net_stats;
6579 struct net_device_stats *old_stats = &tp->net_stats_prev;
6580 struct tg3_hw_stats *hw_stats = tp->hw_stats;
6585 stats->rx_packets = old_stats->rx_packets +
6586 get_stat64(&hw_stats->rx_ucast_packets) +
6587 get_stat64(&hw_stats->rx_mcast_packets) +
6588 get_stat64(&hw_stats->rx_bcast_packets);
6590 stats->tx_packets = old_stats->tx_packets +
6591 get_stat64(&hw_stats->tx_ucast_packets) +
6592 get_stat64(&hw_stats->tx_mcast_packets) +
6593 get_stat64(&hw_stats->tx_bcast_packets);
6595 stats->rx_bytes = old_stats->rx_bytes +
6596 get_stat64(&hw_stats->rx_octets);
6597 stats->tx_bytes = old_stats->tx_bytes +
6598 get_stat64(&hw_stats->tx_octets);
6600 stats->rx_errors = old_stats->rx_errors +
6601 get_stat64(&hw_stats->rx_errors) +
6602 get_stat64(&hw_stats->rx_discards);
6603 stats->tx_errors = old_stats->tx_errors +
6604 get_stat64(&hw_stats->tx_errors) +
6605 get_stat64(&hw_stats->tx_mac_errors) +
6606 get_stat64(&hw_stats->tx_carrier_sense_errors) +
6607 get_stat64(&hw_stats->tx_discards);
6609 stats->multicast = old_stats->multicast +
6610 get_stat64(&hw_stats->rx_mcast_packets);
6611 stats->collisions = old_stats->collisions +
6612 get_stat64(&hw_stats->tx_collisions);
6614 stats->rx_length_errors = old_stats->rx_length_errors +
6615 get_stat64(&hw_stats->rx_frame_too_long_errors) +
6616 get_stat64(&hw_stats->rx_undersize_packets);
6618 stats->rx_over_errors = old_stats->rx_over_errors +
6619 get_stat64(&hw_stats->rxbds_empty);
6620 stats->rx_frame_errors = old_stats->rx_frame_errors +
6621 get_stat64(&hw_stats->rx_align_errors);
6622 stats->tx_aborted_errors = old_stats->tx_aborted_errors +
6623 get_stat64(&hw_stats->tx_discards);
6624 stats->tx_carrier_errors = old_stats->tx_carrier_errors +
6625 get_stat64(&hw_stats->tx_carrier_sense_errors);
6627 stats->rx_crc_errors = old_stats->rx_crc_errors +
6628 calc_crc_errors(tp);
6633 static inline u32 calc_crc(unsigned char *buf, int len)
6641 for (j = 0; j < len; j++) {
6644 for (k = 0; k < 8; k++) {
6658 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
6660 /* accept or reject all multicast frames */
6661 tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
6662 tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
6663 tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
6664 tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
6667 static void __tg3_set_rx_mode(struct net_device *dev)
6669 struct tg3 *tp = netdev_priv(dev);
6672 rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
6673 RX_MODE_KEEP_VLAN_TAG);
6675 /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
6678 #if TG3_VLAN_TAG_USED
6680 !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
6681 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
6683 /* By definition, VLAN is disabled always in this
6686 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
6687 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
6690 if (dev->flags & IFF_PROMISC) {
6691 /* Promiscuous mode. */
6692 rx_mode |= RX_MODE_PROMISC;
6693 } else if (dev->flags & IFF_ALLMULTI) {
6694 /* Accept all multicast. */
6695 tg3_set_multi (tp, 1);
6696 } else if (dev->mc_count < 1) {
6697 /* Reject all multicast. */
6698 tg3_set_multi (tp, 0);
6700 /* Accept one or more multicast(s). */
6701 struct dev_mc_list *mclist;
6703 u32 mc_filter[4] = { 0, };
6708 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
6709 i++, mclist = mclist->next) {
6711 crc = calc_crc (mclist->dmi_addr, ETH_ALEN);
6713 regidx = (bit & 0x60) >> 5;
6715 mc_filter[regidx] |= (1 << bit);
6718 tw32(MAC_HASH_REG_0, mc_filter[0]);
6719 tw32(MAC_HASH_REG_1, mc_filter[1]);
6720 tw32(MAC_HASH_REG_2, mc_filter[2]);
6721 tw32(MAC_HASH_REG_3, mc_filter[3]);
6724 if (rx_mode != tp->rx_mode) {
6725 tp->rx_mode = rx_mode;
6726 tw32_f(MAC_RX_MODE, rx_mode);
6731 static void tg3_set_rx_mode(struct net_device *dev)
6733 struct tg3 *tp = netdev_priv(dev);
6735 tg3_full_lock(tp, 0);
6736 __tg3_set_rx_mode(dev);
6737 tg3_full_unlock(tp);
6740 #define TG3_REGDUMP_LEN (32 * 1024)
6742 static int tg3_get_regs_len(struct net_device *dev)
6744 return TG3_REGDUMP_LEN;
6747 static void tg3_get_regs(struct net_device *dev,
6748 struct ethtool_regs *regs, void *_p)
6751 struct tg3 *tp = netdev_priv(dev);
6757 memset(p, 0, TG3_REGDUMP_LEN);
6759 tg3_full_lock(tp, 0);
6761 #define __GET_REG32(reg) (*(p)++ = tr32(reg))
6762 #define GET_REG32_LOOP(base,len) \
6763 do { p = (u32 *)(orig_p + (base)); \
6764 for (i = 0; i < len; i += 4) \
6765 __GET_REG32((base) + i); \
6767 #define GET_REG32_1(reg) \
6768 do { p = (u32 *)(orig_p + (reg)); \
6769 __GET_REG32((reg)); \
6772 GET_REG32_LOOP(TG3PCI_VENDOR, 0xb0);
6773 GET_REG32_LOOP(MAILBOX_INTERRUPT_0, 0x200);
6774 GET_REG32_LOOP(MAC_MODE, 0x4f0);
6775 GET_REG32_LOOP(SNDDATAI_MODE, 0xe0);
6776 GET_REG32_1(SNDDATAC_MODE);
6777 GET_REG32_LOOP(SNDBDS_MODE, 0x80);
6778 GET_REG32_LOOP(SNDBDI_MODE, 0x48);
6779 GET_REG32_1(SNDBDC_MODE);
6780 GET_REG32_LOOP(RCVLPC_MODE, 0x20);
6781 GET_REG32_LOOP(RCVLPC_SELLST_BASE, 0x15c);
6782 GET_REG32_LOOP(RCVDBDI_MODE, 0x0c);
6783 GET_REG32_LOOP(RCVDBDI_JUMBO_BD, 0x3c);
6784 GET_REG32_LOOP(RCVDBDI_BD_PROD_IDX_0, 0x44);
6785 GET_REG32_1(RCVDCC_MODE);
6786 GET_REG32_LOOP(RCVBDI_MODE, 0x20);
6787 GET_REG32_LOOP(RCVCC_MODE, 0x14);
6788 GET_REG32_LOOP(RCVLSC_MODE, 0x08);
6789 GET_REG32_1(MBFREE_MODE);
6790 GET_REG32_LOOP(HOSTCC_MODE, 0x100);
6791 GET_REG32_LOOP(MEMARB_MODE, 0x10);
6792 GET_REG32_LOOP(BUFMGR_MODE, 0x58);
6793 GET_REG32_LOOP(RDMAC_MODE, 0x08);
6794 GET_REG32_LOOP(WDMAC_MODE, 0x08);
6795 GET_REG32_LOOP(RX_CPU_BASE, 0x280);
6796 GET_REG32_LOOP(TX_CPU_BASE, 0x280);
6797 GET_REG32_LOOP(GRCMBOX_INTERRUPT_0, 0x110);
6798 GET_REG32_LOOP(FTQ_RESET, 0x120);
6799 GET_REG32_LOOP(MSGINT_MODE, 0x0c);
6800 GET_REG32_1(DMAC_MODE);
6801 GET_REG32_LOOP(GRC_MODE, 0x4c);
6802 if (tp->tg3_flags & TG3_FLAG_NVRAM)
6803 GET_REG32_LOOP(NVRAM_CMD, 0x24);
6806 #undef GET_REG32_LOOP
6809 tg3_full_unlock(tp);
6812 static int tg3_get_eeprom_len(struct net_device *dev)
6814 struct tg3 *tp = netdev_priv(dev);
6816 return tp->nvram_size;
6819 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val);
6821 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
6823 struct tg3 *tp = netdev_priv(dev);
6826 u32 i, offset, len, val, b_offset, b_count;
6828 offset = eeprom->offset;
6832 eeprom->magic = TG3_EEPROM_MAGIC;
6835 /* adjustments to start on required 4 byte boundary */
6836 b_offset = offset & 3;
6837 b_count = 4 - b_offset;
6838 if (b_count > len) {
6839 /* i.e. offset=1 len=2 */
6842 ret = tg3_nvram_read(tp, offset-b_offset, &val);
6845 val = cpu_to_le32(val);
6846 memcpy(data, ((char*)&val) + b_offset, b_count);
6849 eeprom->len += b_count;
6852 /* read bytes upto the last 4 byte boundary */
6853 pd = &data[eeprom->len];
6854 for (i = 0; i < (len - (len & 3)); i += 4) {
6855 ret = tg3_nvram_read(tp, offset + i, &val);
6860 val = cpu_to_le32(val);
6861 memcpy(pd + i, &val, 4);
6866 /* read last bytes not ending on 4 byte boundary */
6867 pd = &data[eeprom->len];
6869 b_offset = offset + len - b_count;
6870 ret = tg3_nvram_read(tp, b_offset, &val);
6873 val = cpu_to_le32(val);
6874 memcpy(pd, ((char*)&val), b_count);
6875 eeprom->len += b_count;
6880 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf);
6882 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
6884 struct tg3 *tp = netdev_priv(dev);
6886 u32 offset, len, b_offset, odd_len, start, end;
6889 if (eeprom->magic != TG3_EEPROM_MAGIC)
6892 offset = eeprom->offset;
6895 if ((b_offset = (offset & 3))) {
6896 /* adjustments to start on required 4 byte boundary */
6897 ret = tg3_nvram_read(tp, offset-b_offset, &start);
6900 start = cpu_to_le32(start);
6909 /* adjustments to end on required 4 byte boundary */
6911 len = (len + 3) & ~3;
6912 ret = tg3_nvram_read(tp, offset+len-4, &end);
6915 end = cpu_to_le32(end);
6919 if (b_offset || odd_len) {
6920 buf = kmalloc(len, GFP_KERNEL);
6924 memcpy(buf, &start, 4);
6926 memcpy(buf+len-4, &end, 4);
6927 memcpy(buf + b_offset, data, eeprom->len);
6930 ret = tg3_nvram_write_block(tp, offset, len, buf);
6938 static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
6940 struct tg3 *tp = netdev_priv(dev);
6942 cmd->supported = (SUPPORTED_Autoneg);
6944 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
6945 cmd->supported |= (SUPPORTED_1000baseT_Half |
6946 SUPPORTED_1000baseT_Full);
6948 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES))
6949 cmd->supported |= (SUPPORTED_100baseT_Half |
6950 SUPPORTED_100baseT_Full |
6951 SUPPORTED_10baseT_Half |
6952 SUPPORTED_10baseT_Full |
6955 cmd->supported |= SUPPORTED_FIBRE;
6957 cmd->advertising = tp->link_config.advertising;
6958 if (netif_running(dev)) {
6959 cmd->speed = tp->link_config.active_speed;
6960 cmd->duplex = tp->link_config.active_duplex;
6963 cmd->phy_address = PHY_ADDR;
6964 cmd->transceiver = 0;
6965 cmd->autoneg = tp->link_config.autoneg;
6971 static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
6973 struct tg3 *tp = netdev_priv(dev);
6975 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
6976 /* These are the only valid advertisement bits allowed. */
6977 if (cmd->autoneg == AUTONEG_ENABLE &&
6978 (cmd->advertising & ~(ADVERTISED_1000baseT_Half |
6979 ADVERTISED_1000baseT_Full |
6980 ADVERTISED_Autoneg |
6985 tg3_full_lock(tp, 0);
6987 tp->link_config.autoneg = cmd->autoneg;
6988 if (cmd->autoneg == AUTONEG_ENABLE) {
6989 tp->link_config.advertising = cmd->advertising;
6990 tp->link_config.speed = SPEED_INVALID;
6991 tp->link_config.duplex = DUPLEX_INVALID;
6993 tp->link_config.advertising = 0;
6994 tp->link_config.speed = cmd->speed;
6995 tp->link_config.duplex = cmd->duplex;
6998 if (netif_running(dev))
6999 tg3_setup_phy(tp, 1);
7001 tg3_full_unlock(tp);
7006 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
7008 struct tg3 *tp = netdev_priv(dev);
7010 strcpy(info->driver, DRV_MODULE_NAME);
7011 strcpy(info->version, DRV_MODULE_VERSION);
7012 strcpy(info->bus_info, pci_name(tp->pdev));
7015 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
7017 struct tg3 *tp = netdev_priv(dev);
7019 wol->supported = WAKE_MAGIC;
7021 if (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)
7022 wol->wolopts = WAKE_MAGIC;
7023 memset(&wol->sopass, 0, sizeof(wol->sopass));
7026 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
7028 struct tg3 *tp = netdev_priv(dev);
7030 if (wol->wolopts & ~WAKE_MAGIC)
7032 if ((wol->wolopts & WAKE_MAGIC) &&
7033 tp->tg3_flags2 & TG3_FLG2_PHY_SERDES &&
7034 !(tp->tg3_flags & TG3_FLAG_SERDES_WOL_CAP))
7037 spin_lock_bh(&tp->lock);
7038 if (wol->wolopts & WAKE_MAGIC)
7039 tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
7041 tp->tg3_flags &= ~TG3_FLAG_WOL_ENABLE;
7042 spin_unlock_bh(&tp->lock);
7047 static u32 tg3_get_msglevel(struct net_device *dev)
7049 struct tg3 *tp = netdev_priv(dev);
7050 return tp->msg_enable;
7053 static void tg3_set_msglevel(struct net_device *dev, u32 value)
7055 struct tg3 *tp = netdev_priv(dev);
7056 tp->msg_enable = value;
7059 #if TG3_TSO_SUPPORT != 0
7060 static int tg3_set_tso(struct net_device *dev, u32 value)
7062 struct tg3 *tp = netdev_priv(dev);
7064 if (!(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
7069 return ethtool_op_set_tso(dev, value);
7073 static int tg3_nway_reset(struct net_device *dev)
7075 struct tg3 *tp = netdev_priv(dev);
7079 if (!netif_running(dev))
7082 spin_lock_bh(&tp->lock);
7084 tg3_readphy(tp, MII_BMCR, &bmcr);
7085 if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
7086 (bmcr & BMCR_ANENABLE)) {
7087 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART);
7090 spin_unlock_bh(&tp->lock);
7095 static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
7097 struct tg3 *tp = netdev_priv(dev);
7099 ering->rx_max_pending = TG3_RX_RING_SIZE - 1;
7100 ering->rx_mini_max_pending = 0;
7101 ering->rx_jumbo_max_pending = TG3_RX_JUMBO_RING_SIZE - 1;
7103 ering->rx_pending = tp->rx_pending;
7104 ering->rx_mini_pending = 0;
7105 ering->rx_jumbo_pending = tp->rx_jumbo_pending;
7106 ering->tx_pending = tp->tx_pending;
7109 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
7111 struct tg3 *tp = netdev_priv(dev);
7114 if ((ering->rx_pending > TG3_RX_RING_SIZE - 1) ||
7115 (ering->rx_jumbo_pending > TG3_RX_JUMBO_RING_SIZE - 1) ||
7116 (ering->tx_pending > TG3_TX_RING_SIZE - 1))
7119 if (netif_running(dev)) {
7124 tg3_full_lock(tp, irq_sync);
7126 tp->rx_pending = ering->rx_pending;
7128 if ((tp->tg3_flags2 & TG3_FLG2_MAX_RXPEND_64) &&
7129 tp->rx_pending > 63)
7130 tp->rx_pending = 63;
7131 tp->rx_jumbo_pending = ering->rx_jumbo_pending;
7132 tp->tx_pending = ering->tx_pending;
7134 if (netif_running(dev)) {
7135 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
7137 tg3_netif_start(tp);
7140 tg3_full_unlock(tp);
7145 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
7147 struct tg3 *tp = netdev_priv(dev);
7149 epause->autoneg = (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG) != 0;
7150 epause->rx_pause = (tp->tg3_flags & TG3_FLAG_RX_PAUSE) != 0;
7151 epause->tx_pause = (tp->tg3_flags & TG3_FLAG_TX_PAUSE) != 0;
7154 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
7156 struct tg3 *tp = netdev_priv(dev);
7159 if (netif_running(dev)) {
7164 tg3_full_lock(tp, irq_sync);
7166 if (epause->autoneg)
7167 tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
7169 tp->tg3_flags &= ~TG3_FLAG_PAUSE_AUTONEG;
7170 if (epause->rx_pause)
7171 tp->tg3_flags |= TG3_FLAG_RX_PAUSE;
7173 tp->tg3_flags &= ~TG3_FLAG_RX_PAUSE;
7174 if (epause->tx_pause)
7175 tp->tg3_flags |= TG3_FLAG_TX_PAUSE;
7177 tp->tg3_flags &= ~TG3_FLAG_TX_PAUSE;
7179 if (netif_running(dev)) {
7180 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
7182 tg3_netif_start(tp);
7185 tg3_full_unlock(tp);
7190 static u32 tg3_get_rx_csum(struct net_device *dev)
7192 struct tg3 *tp = netdev_priv(dev);
7193 return (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0;
7196 static int tg3_set_rx_csum(struct net_device *dev, u32 data)
7198 struct tg3 *tp = netdev_priv(dev);
7200 if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
7206 spin_lock_bh(&tp->lock);
7208 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
7210 tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
7211 spin_unlock_bh(&tp->lock);
7216 static int tg3_set_tx_csum(struct net_device *dev, u32 data)
7218 struct tg3 *tp = netdev_priv(dev);
7220 if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
7227 dev->features |= NETIF_F_IP_CSUM;
7229 dev->features &= ~NETIF_F_IP_CSUM;
7234 static int tg3_get_stats_count (struct net_device *dev)
7236 return TG3_NUM_STATS;
7239 static int tg3_get_test_count (struct net_device *dev)
7241 return TG3_NUM_TEST;
7244 static void tg3_get_strings (struct net_device *dev, u32 stringset, u8 *buf)
7246 switch (stringset) {
7248 memcpy(buf, ðtool_stats_keys, sizeof(ethtool_stats_keys));
7251 memcpy(buf, ðtool_test_keys, sizeof(ethtool_test_keys));
7254 WARN_ON(1); /* we need a WARN() */
7259 static void tg3_get_ethtool_stats (struct net_device *dev,
7260 struct ethtool_stats *estats, u64 *tmp_stats)
7262 struct tg3 *tp = netdev_priv(dev);
7263 memcpy(tmp_stats, tg3_get_estats(tp), sizeof(tp->estats));
7266 #define NVRAM_TEST_SIZE 0x100
7268 static int tg3_test_nvram(struct tg3 *tp)
7273 buf = kmalloc(NVRAM_TEST_SIZE, GFP_KERNEL);
7277 for (i = 0, j = 0; i < NVRAM_TEST_SIZE; i += 4, j++) {
7280 if ((err = tg3_nvram_read(tp, i, &val)) != 0)
7282 buf[j] = cpu_to_le32(val);
7284 if (i < NVRAM_TEST_SIZE)
7288 if (cpu_to_be32(buf[0]) != TG3_EEPROM_MAGIC)
7291 /* Bootstrap checksum at offset 0x10 */
7292 csum = calc_crc((unsigned char *) buf, 0x10);
7293 if(csum != cpu_to_le32(buf[0x10/4]))
7296 /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
7297 csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
7298 if (csum != cpu_to_le32(buf[0xfc/4]))
7308 #define TG3_SERDES_TIMEOUT_SEC 2
7309 #define TG3_COPPER_TIMEOUT_SEC 6
7311 static int tg3_test_link(struct tg3 *tp)
7315 if (!netif_running(tp->dev))
7318 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
7319 max = TG3_SERDES_TIMEOUT_SEC;
7321 max = TG3_COPPER_TIMEOUT_SEC;
7323 for (i = 0; i < max; i++) {
7324 if (netif_carrier_ok(tp->dev))
7327 if (msleep_interruptible(1000))
7334 /* Only test the commonly used registers */
7335 static int tg3_test_registers(struct tg3 *tp)
7338 u32 offset, read_mask, write_mask, val, save_val, read_val;
7342 #define TG3_FL_5705 0x1
7343 #define TG3_FL_NOT_5705 0x2
7344 #define TG3_FL_NOT_5788 0x4
7348 /* MAC Control Registers */
7349 { MAC_MODE, TG3_FL_NOT_5705,
7350 0x00000000, 0x00ef6f8c },
7351 { MAC_MODE, TG3_FL_5705,
7352 0x00000000, 0x01ef6b8c },
7353 { MAC_STATUS, TG3_FL_NOT_5705,
7354 0x03800107, 0x00000000 },
7355 { MAC_STATUS, TG3_FL_5705,
7356 0x03800100, 0x00000000 },
7357 { MAC_ADDR_0_HIGH, 0x0000,
7358 0x00000000, 0x0000ffff },
7359 { MAC_ADDR_0_LOW, 0x0000,
7360 0x00000000, 0xffffffff },
7361 { MAC_RX_MTU_SIZE, 0x0000,
7362 0x00000000, 0x0000ffff },
7363 { MAC_TX_MODE, 0x0000,
7364 0x00000000, 0x00000070 },
7365 { MAC_TX_LENGTHS, 0x0000,
7366 0x00000000, 0x00003fff },
7367 { MAC_RX_MODE, TG3_FL_NOT_5705,
7368 0x00000000, 0x000007fc },
7369 { MAC_RX_MODE, TG3_FL_5705,
7370 0x00000000, 0x000007dc },
7371 { MAC_HASH_REG_0, 0x0000,
7372 0x00000000, 0xffffffff },
7373 { MAC_HASH_REG_1, 0x0000,
7374 0x00000000, 0xffffffff },
7375 { MAC_HASH_REG_2, 0x0000,
7376 0x00000000, 0xffffffff },
7377 { MAC_HASH_REG_3, 0x0000,
7378 0x00000000, 0xffffffff },
7380 /* Receive Data and Receive BD Initiator Control Registers. */
7381 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
7382 0x00000000, 0xffffffff },
7383 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
7384 0x00000000, 0xffffffff },
7385 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
7386 0x00000000, 0x00000003 },
7387 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
7388 0x00000000, 0xffffffff },
7389 { RCVDBDI_STD_BD+0, 0x0000,
7390 0x00000000, 0xffffffff },
7391 { RCVDBDI_STD_BD+4, 0x0000,
7392 0x00000000, 0xffffffff },
7393 { RCVDBDI_STD_BD+8, 0x0000,
7394 0x00000000, 0xffff0002 },
7395 { RCVDBDI_STD_BD+0xc, 0x0000,
7396 0x00000000, 0xffffffff },
7398 /* Receive BD Initiator Control Registers. */
7399 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
7400 0x00000000, 0xffffffff },
7401 { RCVBDI_STD_THRESH, TG3_FL_5705,
7402 0x00000000, 0x000003ff },
7403 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
7404 0x00000000, 0xffffffff },
7406 /* Host Coalescing Control Registers. */
7407 { HOSTCC_MODE, TG3_FL_NOT_5705,
7408 0x00000000, 0x00000004 },
7409 { HOSTCC_MODE, TG3_FL_5705,
7410 0x00000000, 0x000000f6 },
7411 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
7412 0x00000000, 0xffffffff },
7413 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
7414 0x00000000, 0x000003ff },
7415 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
7416 0x00000000, 0xffffffff },
7417 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
7418 0x00000000, 0x000003ff },
7419 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
7420 0x00000000, 0xffffffff },
7421 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
7422 0x00000000, 0x000000ff },
7423 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
7424 0x00000000, 0xffffffff },
7425 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
7426 0x00000000, 0x000000ff },
7427 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
7428 0x00000000, 0xffffffff },
7429 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
7430 0x00000000, 0xffffffff },
7431 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
7432 0x00000000, 0xffffffff },
7433 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
7434 0x00000000, 0x000000ff },
7435 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
7436 0x00000000, 0xffffffff },
7437 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
7438 0x00000000, 0x000000ff },
7439 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
7440 0x00000000, 0xffffffff },
7441 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
7442 0x00000000, 0xffffffff },
7443 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
7444 0x00000000, 0xffffffff },
7445 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
7446 0x00000000, 0xffffffff },
7447 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
7448 0x00000000, 0xffffffff },
7449 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
7450 0xffffffff, 0x00000000 },
7451 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
7452 0xffffffff, 0x00000000 },
7454 /* Buffer Manager Control Registers. */
7455 { BUFMGR_MB_POOL_ADDR, 0x0000,
7456 0x00000000, 0x007fff80 },
7457 { BUFMGR_MB_POOL_SIZE, 0x0000,
7458 0x00000000, 0x007fffff },
7459 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
7460 0x00000000, 0x0000003f },
7461 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
7462 0x00000000, 0x000001ff },
7463 { BUFMGR_MB_HIGH_WATER, 0x0000,
7464 0x00000000, 0x000001ff },
7465 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
7466 0xffffffff, 0x00000000 },
7467 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
7468 0xffffffff, 0x00000000 },
7470 /* Mailbox Registers */
7471 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
7472 0x00000000, 0x000001ff },
7473 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
7474 0x00000000, 0x000001ff },
7475 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
7476 0x00000000, 0x000007ff },
7477 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
7478 0x00000000, 0x000001ff },
7480 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
7483 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
7488 for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
7489 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
7492 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
7495 if ((tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
7496 (reg_tbl[i].flags & TG3_FL_NOT_5788))
7499 offset = (u32) reg_tbl[i].offset;
7500 read_mask = reg_tbl[i].read_mask;
7501 write_mask = reg_tbl[i].write_mask;
7503 /* Save the original register content */
7504 save_val = tr32(offset);
7506 /* Determine the read-only value. */
7507 read_val = save_val & read_mask;
7509 /* Write zero to the register, then make sure the read-only bits
7510 * are not changed and the read/write bits are all zeros.
7516 /* Test the read-only and read/write bits. */
7517 if (((val & read_mask) != read_val) || (val & write_mask))
7520 /* Write ones to all the bits defined by RdMask and WrMask, then
7521 * make sure the read-only bits are not changed and the
7522 * read/write bits are all ones.
7524 tw32(offset, read_mask | write_mask);
7528 /* Test the read-only bits. */
7529 if ((val & read_mask) != read_val)
7532 /* Test the read/write bits. */
7533 if ((val & write_mask) != write_mask)
7536 tw32(offset, save_val);
7542 printk(KERN_ERR PFX "Register test failed at offset %x\n", offset);
7543 tw32(offset, save_val);
7547 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
7549 static u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
7553 for (i = 0; i < sizeof(test_pattern)/sizeof(u32); i++) {
7554 for (j = 0; j < len; j += 4) {
7557 tg3_write_mem(tp, offset + j, test_pattern[i]);
7558 tg3_read_mem(tp, offset + j, &val);
7559 if (val != test_pattern[i])
7566 static int tg3_test_memory(struct tg3 *tp)
7568 static struct mem_entry {
7571 } mem_tbl_570x[] = {
7572 { 0x00000000, 0x01000},
7573 { 0x00002000, 0x1c000},
7574 { 0xffffffff, 0x00000}
7575 }, mem_tbl_5705[] = {
7576 { 0x00000100, 0x0000c},
7577 { 0x00000200, 0x00008},
7578 { 0x00000b50, 0x00400},
7579 { 0x00004000, 0x00800},
7580 { 0x00006000, 0x01000},
7581 { 0x00008000, 0x02000},
7582 { 0x00010000, 0x0e000},
7583 { 0xffffffff, 0x00000}
7585 struct mem_entry *mem_tbl;
7589 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
7590 mem_tbl = mem_tbl_5705;
7592 mem_tbl = mem_tbl_570x;
7594 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
7595 if ((err = tg3_do_mem_test(tp, mem_tbl[i].offset,
7596 mem_tbl[i].len)) != 0)
7603 static int tg3_test_loopback(struct tg3 *tp)
7605 u32 mac_mode, send_idx, rx_start_idx, rx_idx, tx_idx, opaque_key;
7607 struct sk_buff *skb, *rx_skb;
7610 int num_pkts, tx_len, rx_len, i, err;
7611 struct tg3_rx_buffer_desc *desc;
7613 if (!netif_running(tp->dev))
7618 tg3_abort_hw(tp, 1);
7622 mac_mode = (tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK) |
7623 MAC_MODE_PORT_INT_LPBACK | MAC_MODE_LINK_POLARITY |
7624 MAC_MODE_PORT_MODE_GMII;
7625 tw32(MAC_MODE, mac_mode);
7628 skb = dev_alloc_skb(tx_len);
7629 tx_data = skb_put(skb, tx_len);
7630 memcpy(tx_data, tp->dev->dev_addr, 6);
7631 memset(tx_data + 6, 0x0, 8);
7633 tw32(MAC_RX_MTU_SIZE, tx_len + 4);
7635 for (i = 14; i < tx_len; i++)
7636 tx_data[i] = (u8) (i & 0xff);
7638 map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
7640 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
7645 rx_start_idx = tp->hw_status->idx[0].rx_producer;
7650 tg3_set_txd(tp, send_idx, map, tx_len, 0, 1);
7655 tw32_tx_mbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW, send_idx);
7656 tr32(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW);
7660 for (i = 0; i < 10; i++) {
7661 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
7666 tx_idx = tp->hw_status->idx[0].tx_consumer;
7667 rx_idx = tp->hw_status->idx[0].rx_producer;
7668 if ((tx_idx == send_idx) &&
7669 (rx_idx == (rx_start_idx + num_pkts)))
7673 pci_unmap_single(tp->pdev, map, tx_len, PCI_DMA_TODEVICE);
7676 if (tx_idx != send_idx)
7679 if (rx_idx != rx_start_idx + num_pkts)
7682 desc = &tp->rx_rcb[rx_start_idx];
7683 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
7684 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
7685 if (opaque_key != RXD_OPAQUE_RING_STD)
7688 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
7689 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
7692 rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4;
7693 if (rx_len != tx_len)
7696 rx_skb = tp->rx_std_buffers[desc_idx].skb;
7698 map = pci_unmap_addr(&tp->rx_std_buffers[desc_idx], mapping);
7699 pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len, PCI_DMA_FROMDEVICE);
7701 for (i = 14; i < tx_len; i++) {
7702 if (*(rx_skb->data + i) != (u8) (i & 0xff))
7707 /* tg3_free_rings will unmap and free the rx_skb */
7712 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
7715 struct tg3 *tp = netdev_priv(dev);
7717 memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
7719 if (tg3_test_nvram(tp) != 0) {
7720 etest->flags |= ETH_TEST_FL_FAILED;
7723 if (tg3_test_link(tp) != 0) {
7724 etest->flags |= ETH_TEST_FL_FAILED;
7727 if (etest->flags & ETH_TEST_FL_OFFLINE) {
7730 if (netif_running(dev)) {
7735 tg3_full_lock(tp, irq_sync);
7737 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
7739 tg3_halt_cpu(tp, RX_CPU_BASE);
7740 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
7741 tg3_halt_cpu(tp, TX_CPU_BASE);
7742 tg3_nvram_unlock(tp);
7744 if (tg3_test_registers(tp) != 0) {
7745 etest->flags |= ETH_TEST_FL_FAILED;
7748 if (tg3_test_memory(tp) != 0) {
7749 etest->flags |= ETH_TEST_FL_FAILED;
7752 if (tg3_test_loopback(tp) != 0) {
7753 etest->flags |= ETH_TEST_FL_FAILED;
7757 tg3_full_unlock(tp);
7759 if (tg3_test_interrupt(tp) != 0) {
7760 etest->flags |= ETH_TEST_FL_FAILED;
7764 tg3_full_lock(tp, 0);
7766 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
7767 if (netif_running(dev)) {
7768 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
7770 tg3_netif_start(tp);
7773 tg3_full_unlock(tp);
7777 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
7779 struct mii_ioctl_data *data = if_mii(ifr);
7780 struct tg3 *tp = netdev_priv(dev);
7785 data->phy_id = PHY_ADDR;
7791 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
7792 break; /* We have no PHY */
7794 spin_lock_bh(&tp->lock);
7795 err = tg3_readphy(tp, data->reg_num & 0x1f, &mii_regval);
7796 spin_unlock_bh(&tp->lock);
7798 data->val_out = mii_regval;
7804 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
7805 break; /* We have no PHY */
7807 if (!capable(CAP_NET_ADMIN))
7810 spin_lock_bh(&tp->lock);
7811 err = tg3_writephy(tp, data->reg_num & 0x1f, data->val_in);
7812 spin_unlock_bh(&tp->lock);
7823 #if TG3_VLAN_TAG_USED
7824 static void tg3_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
7826 struct tg3 *tp = netdev_priv(dev);
7828 tg3_full_lock(tp, 0);
7832 /* Update RX_MODE_KEEP_VLAN_TAG bit in RX_MODE register. */
7833 __tg3_set_rx_mode(dev);
7835 tg3_full_unlock(tp);
7838 static void tg3_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
7840 struct tg3 *tp = netdev_priv(dev);
7842 tg3_full_lock(tp, 0);
7844 tp->vlgrp->vlan_devices[vid] = NULL;
7845 tg3_full_unlock(tp);
7849 static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
7851 struct tg3 *tp = netdev_priv(dev);
7853 memcpy(ec, &tp->coal, sizeof(*ec));
7857 static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
7859 struct tg3 *tp = netdev_priv(dev);
7860 u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
7861 u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
7863 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
7864 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
7865 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
7866 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
7867 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
7870 if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
7871 (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
7872 (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
7873 (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
7874 (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
7875 (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
7876 (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
7877 (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
7878 (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
7879 (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
7882 /* No rx interrupts will be generated if both are zero */
7883 if ((ec->rx_coalesce_usecs == 0) &&
7884 (ec->rx_max_coalesced_frames == 0))
7887 /* No tx interrupts will be generated if both are zero */
7888 if ((ec->tx_coalesce_usecs == 0) &&
7889 (ec->tx_max_coalesced_frames == 0))
7892 /* Only copy relevant parameters, ignore all others. */
7893 tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
7894 tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
7895 tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
7896 tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
7897 tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
7898 tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
7899 tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
7900 tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
7901 tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
7903 if (netif_running(dev)) {
7904 tg3_full_lock(tp, 0);
7905 __tg3_set_coalesce(tp, &tp->coal);
7906 tg3_full_unlock(tp);
7911 static struct ethtool_ops tg3_ethtool_ops = {
7912 .get_settings = tg3_get_settings,
7913 .set_settings = tg3_set_settings,
7914 .get_drvinfo = tg3_get_drvinfo,
7915 .get_regs_len = tg3_get_regs_len,
7916 .get_regs = tg3_get_regs,
7917 .get_wol = tg3_get_wol,
7918 .set_wol = tg3_set_wol,
7919 .get_msglevel = tg3_get_msglevel,
7920 .set_msglevel = tg3_set_msglevel,
7921 .nway_reset = tg3_nway_reset,
7922 .get_link = ethtool_op_get_link,
7923 .get_eeprom_len = tg3_get_eeprom_len,
7924 .get_eeprom = tg3_get_eeprom,
7925 .set_eeprom = tg3_set_eeprom,
7926 .get_ringparam = tg3_get_ringparam,
7927 .set_ringparam = tg3_set_ringparam,
7928 .get_pauseparam = tg3_get_pauseparam,
7929 .set_pauseparam = tg3_set_pauseparam,
7930 .get_rx_csum = tg3_get_rx_csum,
7931 .set_rx_csum = tg3_set_rx_csum,
7932 .get_tx_csum = ethtool_op_get_tx_csum,
7933 .set_tx_csum = tg3_set_tx_csum,
7934 .get_sg = ethtool_op_get_sg,
7935 .set_sg = ethtool_op_set_sg,
7936 #if TG3_TSO_SUPPORT != 0
7937 .get_tso = ethtool_op_get_tso,
7938 .set_tso = tg3_set_tso,
7940 .self_test_count = tg3_get_test_count,
7941 .self_test = tg3_self_test,
7942 .get_strings = tg3_get_strings,
7943 .get_stats_count = tg3_get_stats_count,
7944 .get_ethtool_stats = tg3_get_ethtool_stats,
7945 .get_coalesce = tg3_get_coalesce,
7946 .set_coalesce = tg3_set_coalesce,
7949 static void __devinit tg3_get_eeprom_size(struct tg3 *tp)
7953 tp->nvram_size = EEPROM_CHIP_SIZE;
7955 if (tg3_nvram_read(tp, 0, &val) != 0)
7958 if (swab32(val) != TG3_EEPROM_MAGIC)
7962 * Size the chip by reading offsets at increasing powers of two.
7963 * When we encounter our validation signature, we know the addressing
7964 * has wrapped around, and thus have our chip size.
7968 while (cursize < tp->nvram_size) {
7969 if (tg3_nvram_read(tp, cursize, &val) != 0)
7972 if (swab32(val) == TG3_EEPROM_MAGIC)
7978 tp->nvram_size = cursize;
7981 static void __devinit tg3_get_nvram_size(struct tg3 *tp)
7985 if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
7987 tp->nvram_size = (val >> 16) * 1024;
7991 tp->nvram_size = 0x20000;
7994 static void __devinit tg3_get_nvram_info(struct tg3 *tp)
7998 nvcfg1 = tr32(NVRAM_CFG1);
7999 if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
8000 tp->tg3_flags2 |= TG3_FLG2_FLASH;
8003 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
8004 tw32(NVRAM_CFG1, nvcfg1);
8007 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
8008 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
8009 case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
8010 tp->nvram_jedecnum = JEDEC_ATMEL;
8011 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
8012 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
8014 case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
8015 tp->nvram_jedecnum = JEDEC_ATMEL;
8016 tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
8018 case FLASH_VENDOR_ATMEL_EEPROM:
8019 tp->nvram_jedecnum = JEDEC_ATMEL;
8020 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
8021 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
8023 case FLASH_VENDOR_ST:
8024 tp->nvram_jedecnum = JEDEC_ST;
8025 tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
8026 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
8028 case FLASH_VENDOR_SAIFUN:
8029 tp->nvram_jedecnum = JEDEC_SAIFUN;
8030 tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
8032 case FLASH_VENDOR_SST_SMALL:
8033 case FLASH_VENDOR_SST_LARGE:
8034 tp->nvram_jedecnum = JEDEC_SST;
8035 tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
8040 tp->nvram_jedecnum = JEDEC_ATMEL;
8041 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
8042 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
8046 static void __devinit tg3_get_5752_nvram_info(struct tg3 *tp)
8050 nvcfg1 = tr32(NVRAM_CFG1);
8052 /* NVRAM protection for TPM */
8053 if (nvcfg1 & (1 << 27))
8054 tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM;
8056 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
8057 case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
8058 case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
8059 tp->nvram_jedecnum = JEDEC_ATMEL;
8060 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
8062 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
8063 tp->nvram_jedecnum = JEDEC_ATMEL;
8064 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
8065 tp->tg3_flags2 |= TG3_FLG2_FLASH;
8067 case FLASH_5752VENDOR_ST_M45PE10:
8068 case FLASH_5752VENDOR_ST_M45PE20:
8069 case FLASH_5752VENDOR_ST_M45PE40:
8070 tp->nvram_jedecnum = JEDEC_ST;
8071 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
8072 tp->tg3_flags2 |= TG3_FLG2_FLASH;
8076 if (tp->tg3_flags2 & TG3_FLG2_FLASH) {
8077 switch (nvcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
8078 case FLASH_5752PAGE_SIZE_256:
8079 tp->nvram_pagesize = 256;
8081 case FLASH_5752PAGE_SIZE_512:
8082 tp->nvram_pagesize = 512;
8084 case FLASH_5752PAGE_SIZE_1K:
8085 tp->nvram_pagesize = 1024;
8087 case FLASH_5752PAGE_SIZE_2K:
8088 tp->nvram_pagesize = 2048;
8090 case FLASH_5752PAGE_SIZE_4K:
8091 tp->nvram_pagesize = 4096;
8093 case FLASH_5752PAGE_SIZE_264:
8094 tp->nvram_pagesize = 264;
8099 /* For eeprom, set pagesize to maximum eeprom size */
8100 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
8102 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
8103 tw32(NVRAM_CFG1, nvcfg1);
8107 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
8108 static void __devinit tg3_nvram_init(struct tg3 *tp)
8112 if (tp->tg3_flags2 & TG3_FLG2_SUN_570X)
8115 tw32_f(GRC_EEPROM_ADDR,
8116 (EEPROM_ADDR_FSM_RESET |
8117 (EEPROM_DEFAULT_CLOCK_PERIOD <<
8118 EEPROM_ADDR_CLKPERD_SHIFT)));
8120 /* XXX schedule_timeout() ... */
8121 for (j = 0; j < 100; j++)
8124 /* Enable seeprom accesses. */
8125 tw32_f(GRC_LOCAL_CTRL,
8126 tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
8129 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
8130 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
8131 tp->tg3_flags |= TG3_FLAG_NVRAM;
8133 tg3_enable_nvram_access(tp);
8135 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
8136 tg3_get_5752_nvram_info(tp);
8138 tg3_get_nvram_info(tp);
8140 tg3_get_nvram_size(tp);
8142 tg3_disable_nvram_access(tp);
8145 tp->tg3_flags &= ~(TG3_FLAG_NVRAM | TG3_FLAG_NVRAM_BUFFERED);
8147 tg3_get_eeprom_size(tp);
8151 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
8152 u32 offset, u32 *val)
8157 if (offset > EEPROM_ADDR_ADDR_MASK ||
8161 tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
8162 EEPROM_ADDR_DEVID_MASK |
8164 tw32(GRC_EEPROM_ADDR,
8166 (0 << EEPROM_ADDR_DEVID_SHIFT) |
8167 ((offset << EEPROM_ADDR_ADDR_SHIFT) &
8168 EEPROM_ADDR_ADDR_MASK) |
8169 EEPROM_ADDR_READ | EEPROM_ADDR_START);
8171 for (i = 0; i < 10000; i++) {
8172 tmp = tr32(GRC_EEPROM_ADDR);
8174 if (tmp & EEPROM_ADDR_COMPLETE)
8178 if (!(tmp & EEPROM_ADDR_COMPLETE))
8181 *val = tr32(GRC_EEPROM_DATA);
8185 #define NVRAM_CMD_TIMEOUT 10000
8187 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
8191 tw32(NVRAM_CMD, nvram_cmd);
8192 for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
8194 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
8199 if (i == NVRAM_CMD_TIMEOUT) {
8205 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
8209 if (tp->tg3_flags2 & TG3_FLG2_SUN_570X) {
8210 printk(KERN_ERR PFX "Attempt to do nvram_read on Sun 570X\n");
8214 if (!(tp->tg3_flags & TG3_FLAG_NVRAM))
8215 return tg3_nvram_read_using_eeprom(tp, offset, val);
8217 if ((tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) &&
8218 (tp->tg3_flags2 & TG3_FLG2_FLASH) &&
8219 (tp->nvram_jedecnum == JEDEC_ATMEL)) {
8221 offset = ((offset / tp->nvram_pagesize) <<
8222 ATMEL_AT45DB0X1B_PAGE_POS) +
8223 (offset % tp->nvram_pagesize);
8226 if (offset > NVRAM_ADDR_MSK)
8231 tg3_enable_nvram_access(tp);
8233 tw32(NVRAM_ADDR, offset);
8234 ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
8235 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
8238 *val = swab32(tr32(NVRAM_RDDATA));
8240 tg3_nvram_unlock(tp);
8242 tg3_disable_nvram_access(tp);
8247 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
8248 u32 offset, u32 len, u8 *buf)
8253 for (i = 0; i < len; i += 4) {
8258 memcpy(&data, buf + i, 4);
8260 tw32(GRC_EEPROM_DATA, cpu_to_le32(data));
8262 val = tr32(GRC_EEPROM_ADDR);
8263 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
8265 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
8267 tw32(GRC_EEPROM_ADDR, val |
8268 (0 << EEPROM_ADDR_DEVID_SHIFT) |
8269 (addr & EEPROM_ADDR_ADDR_MASK) |
8273 for (j = 0; j < 10000; j++) {
8274 val = tr32(GRC_EEPROM_ADDR);
8276 if (val & EEPROM_ADDR_COMPLETE)
8280 if (!(val & EEPROM_ADDR_COMPLETE)) {
8289 /* offset and length are dword aligned */
8290 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
8294 u32 pagesize = tp->nvram_pagesize;
8295 u32 pagemask = pagesize - 1;
8299 tmp = kmalloc(pagesize, GFP_KERNEL);
8305 u32 phy_addr, page_off, size;
8307 phy_addr = offset & ~pagemask;
8309 for (j = 0; j < pagesize; j += 4) {
8310 if ((ret = tg3_nvram_read(tp, phy_addr + j,
8311 (u32 *) (tmp + j))))
8317 page_off = offset & pagemask;
8324 memcpy(tmp + page_off, buf, size);
8326 offset = offset + (pagesize - page_off);
8328 tg3_enable_nvram_access(tp);
8331 * Before we can erase the flash page, we need
8332 * to issue a special "write enable" command.
8334 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
8336 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
8339 /* Erase the target page */
8340 tw32(NVRAM_ADDR, phy_addr);
8342 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
8343 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
8345 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
8348 /* Issue another write enable to start the write. */
8349 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
8351 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
8354 for (j = 0; j < pagesize; j += 4) {
8357 data = *((u32 *) (tmp + j));
8358 tw32(NVRAM_WRDATA, cpu_to_be32(data));
8360 tw32(NVRAM_ADDR, phy_addr + j);
8362 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
8366 nvram_cmd |= NVRAM_CMD_FIRST;
8367 else if (j == (pagesize - 4))
8368 nvram_cmd |= NVRAM_CMD_LAST;
8370 if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
8377 nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
8378 tg3_nvram_exec_cmd(tp, nvram_cmd);
8385 /* offset and length are dword aligned */
8386 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
8391 for (i = 0; i < len; i += 4, offset += 4) {
8392 u32 data, page_off, phy_addr, nvram_cmd;
8394 memcpy(&data, buf + i, 4);
8395 tw32(NVRAM_WRDATA, cpu_to_be32(data));
8397 page_off = offset % tp->nvram_pagesize;
8399 if ((tp->tg3_flags2 & TG3_FLG2_FLASH) &&
8400 (tp->nvram_jedecnum == JEDEC_ATMEL)) {
8402 phy_addr = ((offset / tp->nvram_pagesize) <<
8403 ATMEL_AT45DB0X1B_PAGE_POS) + page_off;
8409 tw32(NVRAM_ADDR, phy_addr);
8411 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
8413 if ((page_off == 0) || (i == 0))
8414 nvram_cmd |= NVRAM_CMD_FIRST;
8415 else if (page_off == (tp->nvram_pagesize - 4))
8416 nvram_cmd |= NVRAM_CMD_LAST;
8419 nvram_cmd |= NVRAM_CMD_LAST;
8421 if ((tp->nvram_jedecnum == JEDEC_ST) &&
8422 (nvram_cmd & NVRAM_CMD_FIRST)) {
8424 if ((ret = tg3_nvram_exec_cmd(tp,
8425 NVRAM_CMD_WREN | NVRAM_CMD_GO |
8430 if (!(tp->tg3_flags2 & TG3_FLG2_FLASH)) {
8431 /* We always do complete word writes to eeprom. */
8432 nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
8435 if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
8441 /* offset and length are dword aligned */
8442 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
8446 if (tp->tg3_flags2 & TG3_FLG2_SUN_570X) {
8447 printk(KERN_ERR PFX "Attempt to do nvram_write on Sun 570X\n");
8451 if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
8452 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
8453 ~GRC_LCLCTRL_GPIO_OUTPUT1);
8457 if (!(tp->tg3_flags & TG3_FLAG_NVRAM)) {
8458 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
8465 tg3_enable_nvram_access(tp);
8466 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
8467 !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM))
8468 tw32(NVRAM_WRITE1, 0x406);
8470 grc_mode = tr32(GRC_MODE);
8471 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
8473 if ((tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) ||
8474 !(tp->tg3_flags2 & TG3_FLG2_FLASH)) {
8476 ret = tg3_nvram_write_block_buffered(tp, offset, len,
8480 ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
8484 grc_mode = tr32(GRC_MODE);
8485 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
8487 tg3_disable_nvram_access(tp);
8488 tg3_nvram_unlock(tp);
8491 if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
8492 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
8499 struct subsys_tbl_ent {
8500 u16 subsys_vendor, subsys_devid;
8504 static struct subsys_tbl_ent subsys_id_to_phy_id[] = {
8505 /* Broadcom boards. */
8506 { PCI_VENDOR_ID_BROADCOM, 0x1644, PHY_ID_BCM5401 }, /* BCM95700A6 */
8507 { PCI_VENDOR_ID_BROADCOM, 0x0001, PHY_ID_BCM5701 }, /* BCM95701A5 */
8508 { PCI_VENDOR_ID_BROADCOM, 0x0002, PHY_ID_BCM8002 }, /* BCM95700T6 */
8509 { PCI_VENDOR_ID_BROADCOM, 0x0003, 0 }, /* BCM95700A9 */
8510 { PCI_VENDOR_ID_BROADCOM, 0x0005, PHY_ID_BCM5701 }, /* BCM95701T1 */
8511 { PCI_VENDOR_ID_BROADCOM, 0x0006, PHY_ID_BCM5701 }, /* BCM95701T8 */
8512 { PCI_VENDOR_ID_BROADCOM, 0x0007, 0 }, /* BCM95701A7 */
8513 { PCI_VENDOR_ID_BROADCOM, 0x0008, PHY_ID_BCM5701 }, /* BCM95701A10 */
8514 { PCI_VENDOR_ID_BROADCOM, 0x8008, PHY_ID_BCM5701 }, /* BCM95701A12 */
8515 { PCI_VENDOR_ID_BROADCOM, 0x0009, PHY_ID_BCM5703 }, /* BCM95703Ax1 */
8516 { PCI_VENDOR_ID_BROADCOM, 0x8009, PHY_ID_BCM5703 }, /* BCM95703Ax2 */
8519 { PCI_VENDOR_ID_3COM, 0x1000, PHY_ID_BCM5401 }, /* 3C996T */
8520 { PCI_VENDOR_ID_3COM, 0x1006, PHY_ID_BCM5701 }, /* 3C996BT */
8521 { PCI_VENDOR_ID_3COM, 0x1004, 0 }, /* 3C996SX */
8522 { PCI_VENDOR_ID_3COM, 0x1007, PHY_ID_BCM5701 }, /* 3C1000T */
8523 { PCI_VENDOR_ID_3COM, 0x1008, PHY_ID_BCM5701 }, /* 3C940BR01 */
8526 { PCI_VENDOR_ID_DELL, 0x00d1, PHY_ID_BCM5401 }, /* VIPER */
8527 { PCI_VENDOR_ID_DELL, 0x0106, PHY_ID_BCM5401 }, /* JAGUAR */
8528 { PCI_VENDOR_ID_DELL, 0x0109, PHY_ID_BCM5411 }, /* MERLOT */
8529 { PCI_VENDOR_ID_DELL, 0x010a, PHY_ID_BCM5411 }, /* SLIM_MERLOT */
8531 /* Compaq boards. */
8532 { PCI_VENDOR_ID_COMPAQ, 0x007c, PHY_ID_BCM5701 }, /* BANSHEE */
8533 { PCI_VENDOR_ID_COMPAQ, 0x009a, PHY_ID_BCM5701 }, /* BANSHEE_2 */
8534 { PCI_VENDOR_ID_COMPAQ, 0x007d, 0 }, /* CHANGELING */
8535 { PCI_VENDOR_ID_COMPAQ, 0x0085, PHY_ID_BCM5701 }, /* NC7780 */
8536 { PCI_VENDOR_ID_COMPAQ, 0x0099, PHY_ID_BCM5701 }, /* NC7780_2 */
8539 { PCI_VENDOR_ID_IBM, 0x0281, 0 } /* IBM??? */
8542 static inline struct subsys_tbl_ent *lookup_by_subsys(struct tg3 *tp)
8546 for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
8547 if ((subsys_id_to_phy_id[i].subsys_vendor ==
8548 tp->pdev->subsystem_vendor) &&
8549 (subsys_id_to_phy_id[i].subsys_devid ==
8550 tp->pdev->subsystem_device))
8551 return &subsys_id_to_phy_id[i];
8556 /* Since this function may be called in D3-hot power state during
8557 * tg3_init_one(), only config cycles are allowed.
8559 static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
8563 /* Make sure register accesses (indirect or otherwise)
8564 * will function correctly.
8566 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
8567 tp->misc_host_ctrl);
8569 tp->phy_id = PHY_ID_INVALID;
8570 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
8572 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
8573 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
8574 u32 nic_cfg, led_cfg;
8575 u32 nic_phy_id, ver, cfg2 = 0, eeprom_phy_id;
8576 int eeprom_phy_serdes = 0;
8578 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
8579 tp->nic_sram_data_cfg = nic_cfg;
8581 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
8582 ver >>= NIC_SRAM_DATA_VER_SHIFT;
8583 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700) &&
8584 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) &&
8585 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5703) &&
8586 (ver > 0) && (ver < 0x100))
8587 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
8589 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
8590 NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
8591 eeprom_phy_serdes = 1;
8593 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
8594 if (nic_phy_id != 0) {
8595 u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
8596 u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
8598 eeprom_phy_id = (id1 >> 16) << 10;
8599 eeprom_phy_id |= (id2 & 0xfc00) << 16;
8600 eeprom_phy_id |= (id2 & 0x03ff) << 0;
8604 tp->phy_id = eeprom_phy_id;
8605 if (eeprom_phy_serdes)
8606 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
8608 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
8609 led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
8610 SHASTA_EXT_LED_MODE_MASK);
8612 led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
8616 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
8617 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
8620 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
8621 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
8624 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
8625 tp->led_ctrl = LED_CTRL_MODE_MAC;
8627 /* Default to PHY_1_MODE if 0 (MAC_MODE) is
8628 * read on some older 5700/5701 bootcode.
8630 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
8632 GET_ASIC_REV(tp->pci_chip_rev_id) ==
8634 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
8638 case SHASTA_EXT_LED_SHARED:
8639 tp->led_ctrl = LED_CTRL_MODE_SHARED;
8640 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
8641 tp->pci_chip_rev_id != CHIPREV_ID_5750_A1)
8642 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
8643 LED_CTRL_MODE_PHY_2);
8646 case SHASTA_EXT_LED_MAC:
8647 tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
8650 case SHASTA_EXT_LED_COMBO:
8651 tp->led_ctrl = LED_CTRL_MODE_COMBO;
8652 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0)
8653 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
8654 LED_CTRL_MODE_PHY_2);
8659 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
8660 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) &&
8661 tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
8662 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
8664 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700) &&
8665 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) &&
8666 (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP))
8667 tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT;
8669 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
8670 tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
8671 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
8672 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
8674 if (nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL)
8675 tp->tg3_flags |= TG3_FLAG_SERDES_WOL_CAP;
8677 if (cfg2 & (1 << 17))
8678 tp->tg3_flags2 |= TG3_FLG2_CAPACITIVE_COUPLING;
8680 /* serdes signal pre-emphasis in register 0x590 set by */
8681 /* bootcode if bit 18 is set */
8682 if (cfg2 & (1 << 18))
8683 tp->tg3_flags2 |= TG3_FLG2_SERDES_PREEMPHASIS;
8687 static int __devinit tg3_phy_probe(struct tg3 *tp)
8689 u32 hw_phy_id_1, hw_phy_id_2;
8690 u32 hw_phy_id, hw_phy_id_masked;
8693 /* Reading the PHY ID register can conflict with ASF
8694 * firwmare access to the PHY hardware.
8697 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
8698 hw_phy_id = hw_phy_id_masked = PHY_ID_INVALID;
8700 /* Now read the physical PHY_ID from the chip and verify
8701 * that it is sane. If it doesn't look good, we fall back
8702 * to either the hard-coded table based PHY_ID and failing
8703 * that the value found in the eeprom area.
8705 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
8706 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
8708 hw_phy_id = (hw_phy_id_1 & 0xffff) << 10;
8709 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
8710 hw_phy_id |= (hw_phy_id_2 & 0x03ff) << 0;
8712 hw_phy_id_masked = hw_phy_id & PHY_ID_MASK;
8715 if (!err && KNOWN_PHY_ID(hw_phy_id_masked)) {
8716 tp->phy_id = hw_phy_id;
8717 if (hw_phy_id_masked == PHY_ID_BCM8002)
8718 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
8720 if (tp->phy_id != PHY_ID_INVALID) {
8721 /* Do nothing, phy ID already set up in
8722 * tg3_get_eeprom_hw_cfg().
8725 struct subsys_tbl_ent *p;
8727 /* No eeprom signature? Try the hardcoded
8728 * subsys device table.
8730 p = lookup_by_subsys(tp);
8734 tp->phy_id = p->phy_id;
8736 tp->phy_id == PHY_ID_BCM8002)
8737 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
8741 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
8742 !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
8743 u32 bmsr, adv_reg, tg3_ctrl;
8745 tg3_readphy(tp, MII_BMSR, &bmsr);
8746 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
8747 (bmsr & BMSR_LSTATUS))
8748 goto skip_phy_reset;
8750 err = tg3_phy_reset(tp);
8754 adv_reg = (ADVERTISE_10HALF | ADVERTISE_10FULL |
8755 ADVERTISE_100HALF | ADVERTISE_100FULL |
8756 ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
8758 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
8759 tg3_ctrl = (MII_TG3_CTRL_ADV_1000_HALF |
8760 MII_TG3_CTRL_ADV_1000_FULL);
8761 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
8762 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
8763 tg3_ctrl |= (MII_TG3_CTRL_AS_MASTER |
8764 MII_TG3_CTRL_ENABLE_AS_MASTER);
8767 if (!tg3_copper_is_advertising_all(tp)) {
8768 tg3_writephy(tp, MII_ADVERTISE, adv_reg);
8770 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
8771 tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
8773 tg3_writephy(tp, MII_BMCR,
8774 BMCR_ANENABLE | BMCR_ANRESTART);
8776 tg3_phy_set_wirespeed(tp);
8778 tg3_writephy(tp, MII_ADVERTISE, adv_reg);
8779 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
8780 tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
8784 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
8785 err = tg3_init_5401phy_dsp(tp);
8790 if (!err && ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401)) {
8791 err = tg3_init_5401phy_dsp(tp);
8794 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
8795 tp->link_config.advertising =
8796 (ADVERTISED_1000baseT_Half |
8797 ADVERTISED_1000baseT_Full |
8798 ADVERTISED_Autoneg |
8800 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
8801 tp->link_config.advertising &=
8802 ~(ADVERTISED_1000baseT_Half |
8803 ADVERTISED_1000baseT_Full);
8808 static void __devinit tg3_read_partno(struct tg3 *tp)
8810 unsigned char vpd_data[256];
8813 if (tp->tg3_flags2 & TG3_FLG2_SUN_570X) {
8814 /* Sun decided not to put the necessary bits in the
8815 * NVRAM of their onboard tg3 parts :(
8817 strcpy(tp->board_part_number, "Sun 570X");
8821 for (i = 0; i < 256; i += 4) {
8824 if (tg3_nvram_read(tp, 0x100 + i, &tmp))
8827 vpd_data[i + 0] = ((tmp >> 0) & 0xff);
8828 vpd_data[i + 1] = ((tmp >> 8) & 0xff);
8829 vpd_data[i + 2] = ((tmp >> 16) & 0xff);
8830 vpd_data[i + 3] = ((tmp >> 24) & 0xff);
8833 /* Now parse and find the part number. */
8834 for (i = 0; i < 256; ) {
8835 unsigned char val = vpd_data[i];
8838 if (val == 0x82 || val == 0x91) {
8841 (vpd_data[i + 2] << 8)));
8848 block_end = (i + 3 +
8850 (vpd_data[i + 2] << 8)));
8852 while (i < block_end) {
8853 if (vpd_data[i + 0] == 'P' &&
8854 vpd_data[i + 1] == 'N') {
8855 int partno_len = vpd_data[i + 2];
8857 if (partno_len > 24)
8860 memcpy(tp->board_part_number,
8869 /* Part number not found. */
8874 strcpy(tp->board_part_number, "none");
8877 #ifdef CONFIG_SPARC64
8878 static int __devinit tg3_is_sun_570X(struct tg3 *tp)
8880 struct pci_dev *pdev = tp->pdev;
8881 struct pcidev_cookie *pcp = pdev->sysdata;
8884 int node = pcp->prom_node;
8888 err = prom_getproperty(node, "subsystem-vendor-id",
8889 (char *) &venid, sizeof(venid));
8890 if (err == 0 || err == -1)
8892 if (venid == PCI_VENDOR_ID_SUN)
8899 static int __devinit tg3_get_invariants(struct tg3 *tp)
8901 static struct pci_device_id write_reorder_chipsets[] = {
8902 { PCI_DEVICE(PCI_VENDOR_ID_INTEL,
8903 PCI_DEVICE_ID_INTEL_82801AA_8) },
8904 { PCI_DEVICE(PCI_VENDOR_ID_INTEL,
8905 PCI_DEVICE_ID_INTEL_82801AB_8) },
8906 { PCI_DEVICE(PCI_VENDOR_ID_INTEL,
8907 PCI_DEVICE_ID_INTEL_82801BA_11) },
8908 { PCI_DEVICE(PCI_VENDOR_ID_INTEL,
8909 PCI_DEVICE_ID_INTEL_82801BA_6) },
8910 { PCI_DEVICE(PCI_VENDOR_ID_AMD,
8911 PCI_DEVICE_ID_AMD_FE_GATE_700C) },
8915 u32 cacheline_sz_reg;
8916 u32 pci_state_reg, grc_misc_cfg;
8921 #ifdef CONFIG_SPARC64
8922 if (tg3_is_sun_570X(tp))
8923 tp->tg3_flags2 |= TG3_FLG2_SUN_570X;
8926 /* If we have an AMD 762 or Intel ICH/ICH0/ICH2 chipset, write
8927 * reordering to the mailbox registers done by the host
8928 * controller can cause major troubles. We read back from
8929 * every mailbox register write to force the writes to be
8930 * posted to the chip in order.
8932 if (pci_dev_present(write_reorder_chipsets))
8933 tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;
8935 /* Force memory write invalidate off. If we leave it on,
8936 * then on 5700_BX chips we have to enable a workaround.
8937 * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
8938 * to match the cacheline size. The Broadcom driver have this
8939 * workaround but turns MWI off all the times so never uses
8940 * it. This seems to suggest that the workaround is insufficient.
8942 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
8943 pci_cmd &= ~PCI_COMMAND_INVALIDATE;
8944 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
8946 /* It is absolutely critical that TG3PCI_MISC_HOST_CTRL
8947 * has the register indirect write enable bit set before
8948 * we try to access any of the MMIO registers. It is also
8949 * critical that the PCI-X hw workaround situation is decided
8950 * before that as well.
8952 pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
8955 tp->pci_chip_rev_id = (misc_ctrl_reg >>
8956 MISC_HOST_CTRL_CHIPREV_SHIFT);
8958 /* Wrong chip ID in 5752 A0. This code can be removed later
8959 * as A0 is not in production.
8961 if (tp->pci_chip_rev_id == CHIPREV_ID_5752_A0_HW)
8962 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
8964 /* Find msi capability. */
8965 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780)
8966 tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI);
8968 /* Initialize misc host control in PCI block. */
8969 tp->misc_host_ctrl |= (misc_ctrl_reg &
8970 MISC_HOST_CTRL_CHIPREV);
8971 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
8972 tp->misc_host_ctrl);
8974 pci_read_config_dword(tp->pdev, TG3PCI_CACHELINESZ,
8977 tp->pci_cacheline_sz = (cacheline_sz_reg >> 0) & 0xff;
8978 tp->pci_lat_timer = (cacheline_sz_reg >> 8) & 0xff;
8979 tp->pci_hdr_type = (cacheline_sz_reg >> 16) & 0xff;
8980 tp->pci_bist = (cacheline_sz_reg >> 24) & 0xff;
8982 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
8983 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
8984 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780)
8985 tp->tg3_flags2 |= TG3_FLG2_5750_PLUS;
8987 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) ||
8988 (tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
8989 tp->tg3_flags2 |= TG3_FLG2_5705_PLUS;
8991 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
8992 tp->tg3_flags2 |= TG3_FLG2_HW_TSO;
8994 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705 &&
8995 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5750 &&
8996 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752)
8997 tp->tg3_flags2 |= TG3_FLG2_JUMBO_CAPABLE;
8999 if (pci_find_capability(tp->pdev, PCI_CAP_ID_EXP) != 0)
9000 tp->tg3_flags2 |= TG3_FLG2_PCI_EXPRESS;
9002 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
9003 tp->pci_lat_timer < 64) {
9004 tp->pci_lat_timer = 64;
9006 cacheline_sz_reg = ((tp->pci_cacheline_sz & 0xff) << 0);
9007 cacheline_sz_reg |= ((tp->pci_lat_timer & 0xff) << 8);
9008 cacheline_sz_reg |= ((tp->pci_hdr_type & 0xff) << 16);
9009 cacheline_sz_reg |= ((tp->pci_bist & 0xff) << 24);
9011 pci_write_config_dword(tp->pdev, TG3PCI_CACHELINESZ,
9015 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
9018 if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0) {
9019 tp->tg3_flags |= TG3_FLAG_PCIX_MODE;
9021 /* If this is a 5700 BX chipset, and we are in PCI-X
9022 * mode, enable register write workaround.
9024 * The workaround is to use indirect register accesses
9025 * for all chip writes not to mailbox registers.
9027 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX) {
9031 tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
9033 /* The chip can have it's power management PCI config
9034 * space registers clobbered due to this bug.
9035 * So explicitly force the chip into D0 here.
9037 pci_read_config_dword(tp->pdev, TG3PCI_PM_CTRL_STAT,
9039 pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
9040 pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
9041 pci_write_config_dword(tp->pdev, TG3PCI_PM_CTRL_STAT,
9044 /* Also, force SERR#/PERR# in PCI command. */
9045 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
9046 pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
9047 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
9051 /* Back to back register writes can cause problems on this chip,
9052 * the workaround is to read back all reg writes except those to
9053 * mailbox regs. See tg3_write_indirect_reg32().
9055 * PCI Express 5750_A0 rev chips need this workaround too.
9057 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
9058 ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
9059 tp->pci_chip_rev_id == CHIPREV_ID_5750_A0))
9060 tp->tg3_flags |= TG3_FLAG_5701_REG_WRITE_BUG;
9062 if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
9063 tp->tg3_flags |= TG3_FLAG_PCI_HIGH_SPEED;
9064 if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
9065 tp->tg3_flags |= TG3_FLAG_PCI_32BIT;
9067 /* Chip-specific fixup from Broadcom driver */
9068 if ((tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) &&
9069 (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
9070 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
9071 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
9074 /* Get eeprom hw config before calling tg3_set_power_state().
9075 * In particular, the TG3_FLAG_EEPROM_WRITE_PROT flag must be
9076 * determined before calling tg3_set_power_state() so that
9077 * we know whether or not to switch out of Vaux power.
9078 * When the flag is set, it means that GPIO1 is used for eeprom
9079 * write protect and also implies that it is a LOM where GPIOs
9080 * are not used to switch power.
9082 tg3_get_eeprom_hw_cfg(tp);
9084 /* Set up tp->grc_local_ctrl before calling tg3_set_power_state().
9085 * GPIO1 driven high will bring 5700's external PHY out of reset.
9086 * It is also used as eeprom write protect on LOMs.
9088 tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
9089 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
9090 (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT))
9091 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
9092 GRC_LCLCTRL_GPIO_OUTPUT1);
9093 /* Unused GPIO3 must be driven as output on 5752 because there
9094 * are no pull-up resistors on unused GPIO pins.
9096 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
9097 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
9099 /* Force the chip into D0. */
9100 err = tg3_set_power_state(tp, 0);
9102 printk(KERN_ERR PFX "(%s) transition to D0 failed\n",
9103 pci_name(tp->pdev));
9107 /* 5700 B0 chips do not support checksumming correctly due
9110 if (tp->pci_chip_rev_id == CHIPREV_ID_5700_B0)
9111 tp->tg3_flags |= TG3_FLAG_BROKEN_CHECKSUMS;
9113 /* Pseudo-header checksum is done by hardware logic and not
9114 * the offload processers, so make the chip do the pseudo-
9115 * header checksums on receive. For transmit it is more
9116 * convenient to do the pseudo-header checksum in software
9117 * as Linux does that on transmit for us in all cases.
9119 tp->tg3_flags |= TG3_FLAG_NO_TX_PSEUDO_CSUM;
9120 tp->tg3_flags &= ~TG3_FLAG_NO_RX_PSEUDO_CSUM;
9122 /* Derive initial jumbo mode from MTU assigned in
9123 * ether_setup() via the alloc_etherdev() call
9125 if (tp->dev->mtu > ETH_DATA_LEN &&
9126 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5780)
9127 tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE;
9129 /* Determine WakeOnLan speed to use. */
9130 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
9131 tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
9132 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0 ||
9133 tp->pci_chip_rev_id == CHIPREV_ID_5701_B2) {
9134 tp->tg3_flags &= ~(TG3_FLAG_WOL_SPEED_100MB);
9136 tp->tg3_flags |= TG3_FLAG_WOL_SPEED_100MB;
9139 /* A few boards don't want Ethernet@WireSpeed phy feature */
9140 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
9141 ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
9142 (tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) &&
9143 (tp->pci_chip_rev_id != CHIPREV_ID_5705_A1)))
9144 tp->tg3_flags2 |= TG3_FLG2_NO_ETH_WIRE_SPEED;
9146 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5703_AX ||
9147 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_AX)
9148 tp->tg3_flags2 |= TG3_FLG2_PHY_ADC_BUG;
9149 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0)
9150 tp->tg3_flags2 |= TG3_FLG2_PHY_5704_A0_BUG;
9152 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
9153 tp->tg3_flags2 |= TG3_FLG2_PHY_BER_BUG;
9155 tp->coalesce_mode = 0;
9156 if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX &&
9157 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX)
9158 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
9160 /* Initialize MAC MI mode, polling disabled. */
9161 tw32_f(MAC_MI_MODE, tp->mi_mode);
9164 /* Initialize data/descriptor byte/word swapping. */
9165 val = tr32(GRC_MODE);
9166 val &= GRC_MODE_HOST_STACKUP;
9167 tw32(GRC_MODE, val | tp->grc_mode);
9169 tg3_switch_clocks(tp);
9171 /* Clear this out for sanity. */
9172 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
9174 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
9176 if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
9177 (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) == 0) {
9178 u32 chiprevid = GET_CHIP_REV_ID(tp->misc_host_ctrl);
9180 if (chiprevid == CHIPREV_ID_5701_A0 ||
9181 chiprevid == CHIPREV_ID_5701_B0 ||
9182 chiprevid == CHIPREV_ID_5701_B2 ||
9183 chiprevid == CHIPREV_ID_5701_B5) {
9184 void __iomem *sram_base;
9186 /* Write some dummy words into the SRAM status block
9187 * area, see if it reads back correctly. If the return
9188 * value is bad, force enable the PCIX workaround.
9190 sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
9192 writel(0x00000000, sram_base);
9193 writel(0x00000000, sram_base + 4);
9194 writel(0xffffffff, sram_base + 4);
9195 if (readl(sram_base) != 0x00000000)
9196 tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
9203 grc_misc_cfg = tr32(GRC_MISC_CFG);
9204 grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
9206 /* Broadcom's driver says that CIOBE multisplit has a bug */
9208 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
9209 grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5704CIOBE) {
9210 tp->tg3_flags |= TG3_FLAG_SPLIT_MODE;
9211 tp->split_mode_max_reqs = SPLIT_MODE_5704_MAX_REQ;
9214 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
9215 (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
9216 grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
9217 tp->tg3_flags2 |= TG3_FLG2_IS_5788;
9219 if (!(tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
9220 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700))
9221 tp->tg3_flags |= TG3_FLAG_TAGGED_STATUS;
9222 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) {
9223 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
9224 HOSTCC_MODE_CLRTICK_TXBD);
9226 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
9227 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
9228 tp->misc_host_ctrl);
9231 /* these are limited to 10/100 only */
9232 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
9233 (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
9234 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
9235 tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
9236 (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901 ||
9237 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901_2 ||
9238 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5705F)) ||
9239 (tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
9240 (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5751F ||
9241 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5753F)))
9242 tp->tg3_flags |= TG3_FLAG_10_100_ONLY;
9244 err = tg3_phy_probe(tp);
9246 printk(KERN_ERR PFX "(%s) phy probe failed, err %d\n",
9247 pci_name(tp->pdev), err);
9248 /* ... but do not return immediately ... */
9251 tg3_read_partno(tp);
9253 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
9254 tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
9256 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
9257 tp->tg3_flags |= TG3_FLAG_USE_MI_INTERRUPT;
9259 tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
9262 /* 5700 {AX,BX} chips have a broken status block link
9263 * change bit implementation, so we must use the
9264 * status register in those cases.
9266 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
9267 tp->tg3_flags |= TG3_FLAG_USE_LINKCHG_REG;
9269 tp->tg3_flags &= ~TG3_FLAG_USE_LINKCHG_REG;
9271 /* The led_ctrl is set during tg3_phy_probe, here we might
9272 * have to force the link status polling mechanism based
9273 * upon subsystem IDs.
9275 if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
9276 !(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
9277 tp->tg3_flags |= (TG3_FLAG_USE_MI_INTERRUPT |
9278 TG3_FLAG_USE_LINKCHG_REG);
9281 /* For all SERDES we poll the MAC status register. */
9282 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
9283 tp->tg3_flags |= TG3_FLAG_POLL_SERDES;
9285 tp->tg3_flags &= ~TG3_FLAG_POLL_SERDES;
9287 /* 5700 BX chips need to have their TX producer index mailboxes
9288 * written twice to workaround a bug.
9290 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX)
9291 tp->tg3_flags |= TG3_FLAG_TXD_MBOX_HWBUG;
9293 tp->tg3_flags &= ~TG3_FLAG_TXD_MBOX_HWBUG;
9295 /* It seems all chips can get confused if TX buffers
9296 * straddle the 4GB address boundary in some cases.
9298 tp->dev->hard_start_xmit = tg3_start_xmit;
9301 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
9302 (tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0)
9305 /* By default, disable wake-on-lan. User can change this
9306 * using ETHTOOL_SWOL.
9308 tp->tg3_flags &= ~TG3_FLAG_WOL_ENABLE;
9313 #ifdef CONFIG_SPARC64
9314 static int __devinit tg3_get_macaddr_sparc(struct tg3 *tp)
9316 struct net_device *dev = tp->dev;
9317 struct pci_dev *pdev = tp->pdev;
9318 struct pcidev_cookie *pcp = pdev->sysdata;
9321 int node = pcp->prom_node;
9323 if (prom_getproplen(node, "local-mac-address") == 6) {
9324 prom_getproperty(node, "local-mac-address",
9332 static int __devinit tg3_get_default_macaddr_sparc(struct tg3 *tp)
9334 struct net_device *dev = tp->dev;
9336 memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
9341 static int __devinit tg3_get_device_address(struct tg3 *tp)
9343 struct net_device *dev = tp->dev;
9344 u32 hi, lo, mac_offset;
9346 #ifdef CONFIG_SPARC64
9347 if (!tg3_get_macaddr_sparc(tp))
9352 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
9353 !(tp->tg3_flags & TG3_FLG2_SUN_570X)) ||
9354 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) {
9355 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
9357 if (tg3_nvram_lock(tp))
9358 tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
9360 tg3_nvram_unlock(tp);
9363 /* First try to get it from MAC address mailbox. */
9364 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
9365 if ((hi >> 16) == 0x484b) {
9366 dev->dev_addr[0] = (hi >> 8) & 0xff;
9367 dev->dev_addr[1] = (hi >> 0) & 0xff;
9369 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
9370 dev->dev_addr[2] = (lo >> 24) & 0xff;
9371 dev->dev_addr[3] = (lo >> 16) & 0xff;
9372 dev->dev_addr[4] = (lo >> 8) & 0xff;
9373 dev->dev_addr[5] = (lo >> 0) & 0xff;
9375 /* Next, try NVRAM. */
9376 else if (!(tp->tg3_flags & TG3_FLG2_SUN_570X) &&
9377 !tg3_nvram_read(tp, mac_offset + 0, &hi) &&
9378 !tg3_nvram_read(tp, mac_offset + 4, &lo)) {
9379 dev->dev_addr[0] = ((hi >> 16) & 0xff);
9380 dev->dev_addr[1] = ((hi >> 24) & 0xff);
9381 dev->dev_addr[2] = ((lo >> 0) & 0xff);
9382 dev->dev_addr[3] = ((lo >> 8) & 0xff);
9383 dev->dev_addr[4] = ((lo >> 16) & 0xff);
9384 dev->dev_addr[5] = ((lo >> 24) & 0xff);
9386 /* Finally just fetch it out of the MAC control regs. */
9388 hi = tr32(MAC_ADDR_0_HIGH);
9389 lo = tr32(MAC_ADDR_0_LOW);
9391 dev->dev_addr[5] = lo & 0xff;
9392 dev->dev_addr[4] = (lo >> 8) & 0xff;
9393 dev->dev_addr[3] = (lo >> 16) & 0xff;
9394 dev->dev_addr[2] = (lo >> 24) & 0xff;
9395 dev->dev_addr[1] = hi & 0xff;
9396 dev->dev_addr[0] = (hi >> 8) & 0xff;
9399 if (!is_valid_ether_addr(&dev->dev_addr[0])) {
9400 #ifdef CONFIG_SPARC64
9401 if (!tg3_get_default_macaddr_sparc(tp))
9409 #define BOUNDARY_SINGLE_CACHELINE 1
9410 #define BOUNDARY_MULTI_CACHELINE 2
9412 static u32 __devinit tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
9418 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
9420 cacheline_size = 1024;
9422 cacheline_size = (int) byte * 4;
9424 /* On 5703 and later chips, the boundary bits have no
9427 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
9428 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
9429 !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
9432 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
9433 goal = BOUNDARY_MULTI_CACHELINE;
9435 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
9436 goal = BOUNDARY_SINGLE_CACHELINE;
9445 /* PCI controllers on most RISC systems tend to disconnect
9446 * when a device tries to burst across a cache-line boundary.
9447 * Therefore, letting tg3 do so just wastes PCI bandwidth.
9449 * Unfortunately, for PCI-E there are only limited
9450 * write-side controls for this, and thus for reads
9451 * we will still get the disconnects. We'll also waste
9452 * these PCI cycles for both read and write for chips
9453 * other than 5700 and 5701 which do not implement the
9456 if ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
9457 !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
9458 switch (cacheline_size) {
9463 if (goal == BOUNDARY_SINGLE_CACHELINE) {
9464 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
9465 DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
9467 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
9468 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
9473 val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
9474 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
9478 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
9479 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
9482 } else if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
9483 switch (cacheline_size) {
9487 if (goal == BOUNDARY_SINGLE_CACHELINE) {
9488 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
9489 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
9495 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
9496 val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
9500 switch (cacheline_size) {
9502 if (goal == BOUNDARY_SINGLE_CACHELINE) {
9503 val |= (DMA_RWCTRL_READ_BNDRY_16 |
9504 DMA_RWCTRL_WRITE_BNDRY_16);
9509 if (goal == BOUNDARY_SINGLE_CACHELINE) {
9510 val |= (DMA_RWCTRL_READ_BNDRY_32 |
9511 DMA_RWCTRL_WRITE_BNDRY_32);
9516 if (goal == BOUNDARY_SINGLE_CACHELINE) {
9517 val |= (DMA_RWCTRL_READ_BNDRY_64 |
9518 DMA_RWCTRL_WRITE_BNDRY_64);
9523 if (goal == BOUNDARY_SINGLE_CACHELINE) {
9524 val |= (DMA_RWCTRL_READ_BNDRY_128 |
9525 DMA_RWCTRL_WRITE_BNDRY_128);
9530 val |= (DMA_RWCTRL_READ_BNDRY_256 |
9531 DMA_RWCTRL_WRITE_BNDRY_256);
9534 val |= (DMA_RWCTRL_READ_BNDRY_512 |
9535 DMA_RWCTRL_WRITE_BNDRY_512);
9539 val |= (DMA_RWCTRL_READ_BNDRY_1024 |
9540 DMA_RWCTRL_WRITE_BNDRY_1024);
9549 static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma, int size, int to_device)
9551 struct tg3_internal_buffer_desc test_desc;
9555 sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
9557 tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
9558 tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
9559 tw32(RDMAC_STATUS, 0);
9560 tw32(WDMAC_STATUS, 0);
9562 tw32(BUFMGR_MODE, 0);
9565 test_desc.addr_hi = ((u64) buf_dma) >> 32;
9566 test_desc.addr_lo = buf_dma & 0xffffffff;
9567 test_desc.nic_mbuf = 0x00002100;
9568 test_desc.len = size;
9571 * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
9572 * the *second* time the tg3 driver was getting loaded after an
9575 * Broadcom tells me:
9576 * ...the DMA engine is connected to the GRC block and a DMA
9577 * reset may affect the GRC block in some unpredictable way...
9578 * The behavior of resets to individual blocks has not been tested.
9580 * Broadcom noted the GRC reset will also reset all sub-components.
9583 test_desc.cqid_sqid = (13 << 8) | 2;
9585 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
9588 test_desc.cqid_sqid = (16 << 8) | 7;
9590 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
9593 test_desc.flags = 0x00000005;
9595 for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
9598 val = *(((u32 *)&test_desc) + i);
9599 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
9600 sram_dma_descs + (i * sizeof(u32)));
9601 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
9603 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
9606 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
9608 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
9612 for (i = 0; i < 40; i++) {
9616 val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
9618 val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
9619 if ((val & 0xffff) == sram_dma_descs) {
9630 #define TEST_BUFFER_SIZE 0x2000
9632 static int __devinit tg3_test_dma(struct tg3 *tp)
9635 u32 *buf, saved_dma_rwctrl;
9638 buf = pci_alloc_consistent(tp->pdev, TEST_BUFFER_SIZE, &buf_dma);
9644 tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
9645 (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
9647 tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
9649 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
9650 /* DMA read watermark not used on PCIE */
9651 tp->dma_rwctrl |= 0x00180000;
9652 } else if (!(tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
9653 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
9654 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
9655 tp->dma_rwctrl |= 0x003f0000;
9657 tp->dma_rwctrl |= 0x003f000f;
9659 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
9660 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
9661 u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
9663 if (ccval == 0x6 || ccval == 0x7)
9664 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
9666 /* Set bit 23 to enable PCIX hw bug fix */
9667 tp->dma_rwctrl |= 0x009f0000;
9668 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) {
9669 /* 5780 always in PCIX mode */
9670 tp->dma_rwctrl |= 0x00144000;
9672 tp->dma_rwctrl |= 0x001b000f;
9676 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
9677 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
9678 tp->dma_rwctrl &= 0xfffffff0;
9680 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
9681 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
9682 /* Remove this if it causes problems for some boards. */
9683 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
9685 /* On 5700/5701 chips, we need to set this bit.
9686 * Otherwise the chip will issue cacheline transactions
9687 * to streamable DMA memory with not all the byte
9688 * enables turned on. This is an error on several
9689 * RISC PCI controllers, in particular sparc64.
9691 * On 5703/5704 chips, this bit has been reassigned
9692 * a different meaning. In particular, it is used
9693 * on those chips to enable a PCI-X workaround.
9695 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
9698 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
9701 /* Unneeded, already done by tg3_get_invariants. */
9702 tg3_switch_clocks(tp);
9706 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
9707 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
9710 /* It is best to perform DMA test with maximum write burst size
9711 * to expose the 5700/5701 write DMA bug.
9713 saved_dma_rwctrl = tp->dma_rwctrl;
9714 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
9715 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
9720 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
9723 /* Send the buffer to the chip. */
9724 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1);
9726 printk(KERN_ERR "tg3_test_dma() Write the buffer failed %d\n", ret);
9731 /* validate data reached card RAM correctly. */
9732 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
9734 tg3_read_mem(tp, 0x2100 + (i*4), &val);
9735 if (le32_to_cpu(val) != p[i]) {
9736 printk(KERN_ERR " tg3_test_dma() Card buffer corrupted on write! (%d != %d)\n", val, i);
9737 /* ret = -ENODEV here? */
9742 /* Now read it back. */
9743 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0);
9745 printk(KERN_ERR "tg3_test_dma() Read the buffer failed %d\n", ret);
9751 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
9755 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
9756 DMA_RWCTRL_WRITE_BNDRY_16) {
9757 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
9758 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
9759 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
9762 printk(KERN_ERR "tg3_test_dma() buffer corrupted on read back! (%d != %d)\n", p[i], i);
9768 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
9774 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
9775 DMA_RWCTRL_WRITE_BNDRY_16) {
9776 static struct pci_device_id dma_wait_state_chipsets[] = {
9777 { PCI_DEVICE(PCI_VENDOR_ID_APPLE,
9778 PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
9782 /* DMA test passed without adjusting DMA boundary,
9783 * now look for chipsets that are known to expose the
9784 * DMA bug without failing the test.
9786 if (pci_dev_present(dma_wait_state_chipsets)) {
9787 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
9788 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
9791 /* Safe to use the calculated DMA boundary. */
9792 tp->dma_rwctrl = saved_dma_rwctrl;
9794 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
9798 pci_free_consistent(tp->pdev, TEST_BUFFER_SIZE, buf, buf_dma);
9803 static void __devinit tg3_init_link_config(struct tg3 *tp)
9805 tp->link_config.advertising =
9806 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
9807 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
9808 ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full |
9809 ADVERTISED_Autoneg | ADVERTISED_MII);
9810 tp->link_config.speed = SPEED_INVALID;
9811 tp->link_config.duplex = DUPLEX_INVALID;
9812 tp->link_config.autoneg = AUTONEG_ENABLE;
9813 netif_carrier_off(tp->dev);
9814 tp->link_config.active_speed = SPEED_INVALID;
9815 tp->link_config.active_duplex = DUPLEX_INVALID;
9816 tp->link_config.phy_is_low_power = 0;
9817 tp->link_config.orig_speed = SPEED_INVALID;
9818 tp->link_config.orig_duplex = DUPLEX_INVALID;
9819 tp->link_config.orig_autoneg = AUTONEG_INVALID;
9822 static void __devinit tg3_init_bufmgr_config(struct tg3 *tp)
9824 tp->bufmgr_config.mbuf_read_dma_low_water =
9825 DEFAULT_MB_RDMA_LOW_WATER;
9826 tp->bufmgr_config.mbuf_mac_rx_low_water =
9827 DEFAULT_MB_MACRX_LOW_WATER;
9828 tp->bufmgr_config.mbuf_high_water =
9829 DEFAULT_MB_HIGH_WATER;
9831 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
9832 DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
9833 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
9834 DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
9835 tp->bufmgr_config.mbuf_high_water_jumbo =
9836 DEFAULT_MB_HIGH_WATER_JUMBO;
9838 tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
9839 tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
9842 static char * __devinit tg3_phy_string(struct tg3 *tp)
9844 switch (tp->phy_id & PHY_ID_MASK) {
9845 case PHY_ID_BCM5400: return "5400";
9846 case PHY_ID_BCM5401: return "5401";
9847 case PHY_ID_BCM5411: return "5411";
9848 case PHY_ID_BCM5701: return "5701";
9849 case PHY_ID_BCM5703: return "5703";
9850 case PHY_ID_BCM5704: return "5704";
9851 case PHY_ID_BCM5705: return "5705";
9852 case PHY_ID_BCM5750: return "5750";
9853 case PHY_ID_BCM5752: return "5752";
9854 case PHY_ID_BCM5780: return "5780";
9855 case PHY_ID_BCM8002: return "8002/serdes";
9856 case 0: return "serdes";
9857 default: return "unknown";
9861 static struct pci_dev * __devinit tg3_find_5704_peer(struct tg3 *tp)
9863 struct pci_dev *peer;
9864 unsigned int func, devnr = tp->pdev->devfn & ~7;
9866 for (func = 0; func < 8; func++) {
9867 peer = pci_get_slot(tp->pdev->bus, devnr | func);
9868 if (peer && peer != tp->pdev)
9872 if (!peer || peer == tp->pdev)
9876 * We don't need to keep the refcount elevated; there's no way
9877 * to remove one half of this device without removing the other
9884 static void __devinit tg3_init_coal(struct tg3 *tp)
9886 struct ethtool_coalesce *ec = &tp->coal;
9888 memset(ec, 0, sizeof(*ec));
9889 ec->cmd = ETHTOOL_GCOALESCE;
9890 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
9891 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
9892 ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
9893 ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
9894 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
9895 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
9896 ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
9897 ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
9898 ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
9900 if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
9901 HOSTCC_MODE_CLRTICK_TXBD)) {
9902 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
9903 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
9904 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
9905 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
9908 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
9909 ec->rx_coalesce_usecs_irq = 0;
9910 ec->tx_coalesce_usecs_irq = 0;
9911 ec->stats_block_coalesce_usecs = 0;
9915 static int __devinit tg3_init_one(struct pci_dev *pdev,
9916 const struct pci_device_id *ent)
9918 static int tg3_version_printed = 0;
9919 unsigned long tg3reg_base, tg3reg_len;
9920 struct net_device *dev;
9922 int i, err, pci_using_dac, pm_cap;
9924 if (tg3_version_printed++ == 0)
9925 printk(KERN_INFO "%s", version);
9927 err = pci_enable_device(pdev);
9929 printk(KERN_ERR PFX "Cannot enable PCI device, "
9934 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
9935 printk(KERN_ERR PFX "Cannot find proper PCI device "
9936 "base address, aborting.\n");
9938 goto err_out_disable_pdev;
9941 err = pci_request_regions(pdev, DRV_MODULE_NAME);
9943 printk(KERN_ERR PFX "Cannot obtain PCI resources, "
9945 goto err_out_disable_pdev;
9948 pci_set_master(pdev);
9950 /* Find power-management capability. */
9951 pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
9953 printk(KERN_ERR PFX "Cannot find PowerManagement capability, "
9956 goto err_out_free_res;
9959 /* Configure DMA attributes. */
9960 err = pci_set_dma_mask(pdev, 0xffffffffffffffffULL);
9963 err = pci_set_consistent_dma_mask(pdev, 0xffffffffffffffffULL);
9965 printk(KERN_ERR PFX "Unable to obtain 64 bit DMA "
9966 "for consistent allocations\n");
9967 goto err_out_free_res;
9970 err = pci_set_dma_mask(pdev, 0xffffffffULL);
9972 printk(KERN_ERR PFX "No usable DMA configuration, "
9974 goto err_out_free_res;
9979 tg3reg_base = pci_resource_start(pdev, 0);
9980 tg3reg_len = pci_resource_len(pdev, 0);
9982 dev = alloc_etherdev(sizeof(*tp));
9984 printk(KERN_ERR PFX "Etherdev alloc failed, aborting.\n");
9986 goto err_out_free_res;
9989 SET_MODULE_OWNER(dev);
9990 SET_NETDEV_DEV(dev, &pdev->dev);
9993 dev->features |= NETIF_F_HIGHDMA;
9994 dev->features |= NETIF_F_LLTX;
9995 #if TG3_VLAN_TAG_USED
9996 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
9997 dev->vlan_rx_register = tg3_vlan_rx_register;
9998 dev->vlan_rx_kill_vid = tg3_vlan_rx_kill_vid;
10001 tp = netdev_priv(dev);
10004 tp->pm_cap = pm_cap;
10005 tp->mac_mode = TG3_DEF_MAC_MODE;
10006 tp->rx_mode = TG3_DEF_RX_MODE;
10007 tp->tx_mode = TG3_DEF_TX_MODE;
10008 tp->mi_mode = MAC_MI_MODE_BASE;
10010 tp->msg_enable = tg3_debug;
10012 tp->msg_enable = TG3_DEF_MSG_ENABLE;
10014 /* The word/byte swap controls here control register access byte
10015 * swapping. DMA data byte swapping is controlled in the GRC_MODE
10018 tp->misc_host_ctrl =
10019 MISC_HOST_CTRL_MASK_PCI_INT |
10020 MISC_HOST_CTRL_WORD_SWAP |
10021 MISC_HOST_CTRL_INDIR_ACCESS |
10022 MISC_HOST_CTRL_PCISTATE_RW;
10024 /* The NONFRM (non-frame) byte/word swap controls take effect
10025 * on descriptor entries, anything which isn't packet data.
10027 * The StrongARM chips on the board (one for tx, one for rx)
10028 * are running in big-endian mode.
10030 tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
10031 GRC_MODE_WSWAP_NONFRM_DATA);
10032 #ifdef __BIG_ENDIAN
10033 tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
10035 spin_lock_init(&tp->lock);
10036 spin_lock_init(&tp->tx_lock);
10037 spin_lock_init(&tp->indirect_lock);
10038 INIT_WORK(&tp->reset_task, tg3_reset_task, tp);
10040 tp->regs = ioremap_nocache(tg3reg_base, tg3reg_len);
10041 if (tp->regs == 0UL) {
10042 printk(KERN_ERR PFX "Cannot map device registers, "
10045 goto err_out_free_dev;
10048 tg3_init_link_config(tp);
10050 tg3_init_bufmgr_config(tp);
10052 tp->rx_pending = TG3_DEF_RX_RING_PENDING;
10053 tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
10054 tp->tx_pending = TG3_DEF_TX_RING_PENDING;
10056 dev->open = tg3_open;
10057 dev->stop = tg3_close;
10058 dev->get_stats = tg3_get_stats;
10059 dev->set_multicast_list = tg3_set_rx_mode;
10060 dev->set_mac_address = tg3_set_mac_addr;
10061 dev->do_ioctl = tg3_ioctl;
10062 dev->tx_timeout = tg3_tx_timeout;
10063 dev->poll = tg3_poll;
10064 dev->ethtool_ops = &tg3_ethtool_ops;
10066 dev->watchdog_timeo = TG3_TX_TIMEOUT;
10067 dev->change_mtu = tg3_change_mtu;
10068 dev->irq = pdev->irq;
10069 #ifdef CONFIG_NET_POLL_CONTROLLER
10070 dev->poll_controller = tg3_poll_controller;
10073 err = tg3_get_invariants(tp);
10075 printk(KERN_ERR PFX "Problem fetching invariants of chip, "
10077 goto err_out_iounmap;
10080 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
10081 tp->bufmgr_config.mbuf_read_dma_low_water =
10082 DEFAULT_MB_RDMA_LOW_WATER_5705;
10083 tp->bufmgr_config.mbuf_mac_rx_low_water =
10084 DEFAULT_MB_MACRX_LOW_WATER_5705;
10085 tp->bufmgr_config.mbuf_high_water =
10086 DEFAULT_MB_HIGH_WATER_5705;
10089 #if TG3_TSO_SUPPORT != 0
10090 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
10091 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
10093 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
10094 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
10095 tp->pci_chip_rev_id == CHIPREV_ID_5705_A0 ||
10096 (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0) {
10097 tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
10099 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
10102 /* TSO is off by default, user can enable using ethtool. */
10104 if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)
10105 dev->features |= NETIF_F_TSO;
10110 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 &&
10111 !(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) &&
10112 !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
10113 tp->tg3_flags2 |= TG3_FLG2_MAX_RXPEND_64;
10114 tp->rx_pending = 63;
10117 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
10118 tp->pdev_peer = tg3_find_5704_peer(tp);
10120 err = tg3_get_device_address(tp);
10122 printk(KERN_ERR PFX "Could not obtain valid ethernet address, "
10124 goto err_out_iounmap;
10128 * Reset chip in case UNDI or EFI driver did not shutdown
10129 * DMA self test will enable WDMAC and we'll see (spurious)
10130 * pending DMA on the PCI bus at that point.
10132 if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
10133 (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
10134 pci_save_state(tp->pdev);
10135 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
10136 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10139 err = tg3_test_dma(tp);
10141 printk(KERN_ERR PFX "DMA engine test failed, aborting.\n");
10142 goto err_out_iounmap;
10145 /* Tigon3 can do ipv4 only... and some chips have buggy
10148 if ((tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) == 0) {
10149 dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM;
10150 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
10152 tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
10154 if (tp->tg3_flags2 & TG3_FLG2_IS_5788)
10155 dev->features &= ~NETIF_F_HIGHDMA;
10157 /* flow control autonegotiation is default behavior */
10158 tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
10162 err = register_netdev(dev);
10164 printk(KERN_ERR PFX "Cannot register net device, "
10166 goto err_out_iounmap;
10169 pci_set_drvdata(pdev, dev);
10171 /* Now that we have fully setup the chip, save away a snapshot
10172 * of the PCI config space. We need to restore this after
10173 * GRC_MISC_CFG core clock resets and some resume events.
10175 pci_save_state(tp->pdev);
10177 printk(KERN_INFO "%s: Tigon3 [partno(%s) rev %04x PHY(%s)] (PCI%s:%s:%s) %sBaseT Ethernet ",
10179 tp->board_part_number,
10180 tp->pci_chip_rev_id,
10181 tg3_phy_string(tp),
10182 ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ? "X" : ""),
10183 ((tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED) ?
10184 ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ? "133MHz" : "66MHz") :
10185 ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ? "100MHz" : "33MHz")),
10186 ((tp->tg3_flags & TG3_FLAG_PCI_32BIT) ? "32-bit" : "64-bit"),
10187 (tp->tg3_flags & TG3_FLAG_10_100_ONLY) ? "10/100" : "10/100/1000");
10189 for (i = 0; i < 6; i++)
10190 printk("%2.2x%c", dev->dev_addr[i],
10191 i == 5 ? '\n' : ':');
10193 printk(KERN_INFO "%s: RXcsums[%d] LinkChgREG[%d] "
10194 "MIirq[%d] ASF[%d] Split[%d] WireSpeed[%d] "
10197 (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0,
10198 (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) != 0,
10199 (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) != 0,
10200 (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0,
10201 (tp->tg3_flags & TG3_FLAG_SPLIT_MODE) != 0,
10202 (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED) == 0,
10203 (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) != 0);
10204 printk(KERN_INFO "%s: dma_rwctrl[%08x]\n",
10205 dev->name, tp->dma_rwctrl);
10216 pci_release_regions(pdev);
10218 err_out_disable_pdev:
10219 pci_disable_device(pdev);
10220 pci_set_drvdata(pdev, NULL);
10224 static void __devexit tg3_remove_one(struct pci_dev *pdev)
10226 struct net_device *dev = pci_get_drvdata(pdev);
10229 struct tg3 *tp = netdev_priv(dev);
10231 unregister_netdev(dev);
10234 pci_release_regions(pdev);
10235 pci_disable_device(pdev);
10236 pci_set_drvdata(pdev, NULL);
10240 static int tg3_suspend(struct pci_dev *pdev, pm_message_t state)
10242 struct net_device *dev = pci_get_drvdata(pdev);
10243 struct tg3 *tp = netdev_priv(dev);
10246 if (!netif_running(dev))
10249 tg3_netif_stop(tp);
10251 del_timer_sync(&tp->timer);
10253 tg3_full_lock(tp, 1);
10254 tg3_disable_ints(tp);
10255 tg3_full_unlock(tp);
10257 netif_device_detach(dev);
10259 tg3_full_lock(tp, 0);
10260 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10261 tg3_full_unlock(tp);
10263 err = tg3_set_power_state(tp, pci_choose_state(pdev, state));
10265 tg3_full_lock(tp, 0);
10269 tp->timer.expires = jiffies + tp->timer_offset;
10270 add_timer(&tp->timer);
10272 netif_device_attach(dev);
10273 tg3_netif_start(tp);
10275 tg3_full_unlock(tp);
10281 static int tg3_resume(struct pci_dev *pdev)
10283 struct net_device *dev = pci_get_drvdata(pdev);
10284 struct tg3 *tp = netdev_priv(dev);
10287 if (!netif_running(dev))
10290 pci_restore_state(tp->pdev);
10292 err = tg3_set_power_state(tp, 0);
10296 netif_device_attach(dev);
10298 tg3_full_lock(tp, 0);
10302 tp->timer.expires = jiffies + tp->timer_offset;
10303 add_timer(&tp->timer);
10305 tg3_netif_start(tp);
10307 tg3_full_unlock(tp);
10312 static struct pci_driver tg3_driver = {
10313 .name = DRV_MODULE_NAME,
10314 .id_table = tg3_pci_tbl,
10315 .probe = tg3_init_one,
10316 .remove = __devexit_p(tg3_remove_one),
10317 .suspend = tg3_suspend,
10318 .resume = tg3_resume
10321 static int __init tg3_init(void)
10323 return pci_module_init(&tg3_driver);
10326 static void __exit tg3_cleanup(void)
10328 pci_unregister_driver(&tg3_driver);
10331 module_init(tg3_init);
10332 module_exit(tg3_cleanup);