2 * tg3.c: Broadcom Tigon3 ethernet driver.
4 * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5 * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6 * Copyright (C) 2004 Sun Microsystems Inc.
7 * Copyright (C) 2005 Broadcom Corporation.
10 * Derived from proprietary unpublished source code,
11 * Copyright (C) 2000-2003 Broadcom Corporation.
13 * Permission is hereby granted for the distribution of this firmware
14 * data in hexadecimal or equivalent format, provided this copyright
15 * notice is accompanying it.
19 #include <linux/module.h>
20 #include <linux/moduleparam.h>
21 #include <linux/kernel.h>
22 #include <linux/types.h>
23 #include <linux/compiler.h>
24 #include <linux/slab.h>
25 #include <linux/delay.h>
27 #include <linux/init.h>
28 #include <linux/ioport.h>
29 #include <linux/pci.h>
30 #include <linux/netdevice.h>
31 #include <linux/etherdevice.h>
32 #include <linux/skbuff.h>
33 #include <linux/ethtool.h>
34 #include <linux/mii.h>
35 #include <linux/if_vlan.h>
37 #include <linux/tcp.h>
38 #include <linux/workqueue.h>
39 #include <linux/prefetch.h>
40 #include <linux/dma-mapping.h>
42 #include <net/checksum.h>
44 #include <asm/system.h>
46 #include <asm/byteorder.h>
47 #include <asm/uaccess.h>
50 #include <asm/idprom.h>
51 #include <asm/oplib.h>
55 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
56 #define TG3_VLAN_TAG_USED 1
58 #define TG3_VLAN_TAG_USED 0
62 #define TG3_TSO_SUPPORT 1
64 #define TG3_TSO_SUPPORT 0
69 #define DRV_MODULE_NAME "tg3"
70 #define PFX DRV_MODULE_NAME ": "
71 #define DRV_MODULE_VERSION "3.61"
72 #define DRV_MODULE_RELDATE "June 29, 2006"
74 #define TG3_DEF_MAC_MODE 0
75 #define TG3_DEF_RX_MODE 0
76 #define TG3_DEF_TX_MODE 0
77 #define TG3_DEF_MSG_ENABLE \
87 /* length of time before we decide the hardware is borked,
88 * and dev->tx_timeout() should be called to fix the problem
90 #define TG3_TX_TIMEOUT (5 * HZ)
92 /* hardware minimum and maximum for a single frame's data payload */
93 #define TG3_MIN_MTU 60
94 #define TG3_MAX_MTU(tp) \
95 ((tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) ? 9000 : 1500)
97 /* These numbers seem to be hard coded in the NIC firmware somehow.
98 * You can't change the ring sizes, but you can change where you place
99 * them in the NIC onboard memory.
101 #define TG3_RX_RING_SIZE 512
102 #define TG3_DEF_RX_RING_PENDING 200
103 #define TG3_RX_JUMBO_RING_SIZE 256
104 #define TG3_DEF_RX_JUMBO_RING_PENDING 100
106 /* Do not place this n-ring entries value into the tp struct itself,
107 * we really want to expose these constants to GCC so that modulo et
108 * al. operations are done with shifts and masks instead of with
109 * hw multiply/modulo instructions. Another solution would be to
110 * replace things like '% foo' with '& (foo - 1)'.
112 #define TG3_RX_RCB_RING_SIZE(tp) \
113 ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ? 512 : 1024)
115 #define TG3_TX_RING_SIZE 512
116 #define TG3_DEF_TX_RING_PENDING (TG3_TX_RING_SIZE - 1)
118 #define TG3_RX_RING_BYTES (sizeof(struct tg3_rx_buffer_desc) * \
120 #define TG3_RX_JUMBO_RING_BYTES (sizeof(struct tg3_rx_buffer_desc) * \
121 TG3_RX_JUMBO_RING_SIZE)
122 #define TG3_RX_RCB_RING_BYTES(tp) (sizeof(struct tg3_rx_buffer_desc) * \
123 TG3_RX_RCB_RING_SIZE(tp))
124 #define TG3_TX_RING_BYTES (sizeof(struct tg3_tx_buffer_desc) * \
126 #define TX_BUFFS_AVAIL(TP) \
127 ((TP)->tx_pending - \
128 (((TP)->tx_prod - (TP)->tx_cons) & (TG3_TX_RING_SIZE - 1)))
129 #define NEXT_TX(N) (((N) + 1) & (TG3_TX_RING_SIZE - 1))
131 #define RX_PKT_BUF_SZ (1536 + tp->rx_offset + 64)
132 #define RX_JUMBO_PKT_BUF_SZ (9046 + tp->rx_offset + 64)
134 /* minimum number of free TX descriptors required to wake up TX process */
135 #define TG3_TX_WAKEUP_THRESH (TG3_TX_RING_SIZE / 4)
137 /* number of ETHTOOL_GSTATS u64's */
138 #define TG3_NUM_STATS (sizeof(struct tg3_ethtool_stats)/sizeof(u64))
140 #define TG3_NUM_TEST 6
142 static char version[] __devinitdata =
143 DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
145 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
146 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
147 MODULE_LICENSE("GPL");
148 MODULE_VERSION(DRV_MODULE_VERSION);
150 static int tg3_debug = -1; /* -1 == use TG3_DEF_MSG_ENABLE as value */
151 module_param(tg3_debug, int, 0);
152 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
154 static struct pci_device_id tg3_pci_tbl[] = {
155 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700,
156 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
157 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701,
158 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
159 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702,
160 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
161 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703,
162 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
163 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704,
164 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
165 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE,
166 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
167 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705,
168 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
169 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2,
170 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
171 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M,
172 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
173 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2,
174 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
175 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X,
176 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
177 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X,
178 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
179 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S,
180 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
181 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3,
182 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
183 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3,
184 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
185 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782,
186 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
187 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788,
188 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
189 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789,
190 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
191 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901,
192 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
193 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2,
194 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
195 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2,
196 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
197 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F,
198 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
199 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5720,
200 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
201 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721,
202 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
203 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750,
204 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
205 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751,
206 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
207 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750M,
208 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
209 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M,
210 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
211 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F,
212 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
213 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752,
214 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
215 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M,
216 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
217 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753,
218 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
219 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M,
220 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
221 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F,
222 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
223 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754,
224 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
225 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M,
226 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
227 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755,
228 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
229 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M,
230 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
231 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786,
232 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
233 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787,
234 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
235 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M,
236 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
237 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714,
238 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
239 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S,
240 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
241 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715,
242 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
243 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S,
244 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
245 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780,
246 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
247 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S,
248 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
249 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781,
250 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
251 { PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX,
252 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
253 { PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX,
254 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
255 { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000,
256 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
257 { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001,
258 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
259 { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003,
260 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
261 { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100,
262 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
263 { PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3,
264 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
268 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
271 const char string[ETH_GSTRING_LEN];
272 } ethtool_stats_keys[TG3_NUM_STATS] = {
275 { "rx_ucast_packets" },
276 { "rx_mcast_packets" },
277 { "rx_bcast_packets" },
279 { "rx_align_errors" },
280 { "rx_xon_pause_rcvd" },
281 { "rx_xoff_pause_rcvd" },
282 { "rx_mac_ctrl_rcvd" },
283 { "rx_xoff_entered" },
284 { "rx_frame_too_long_errors" },
286 { "rx_undersize_packets" },
287 { "rx_in_length_errors" },
288 { "rx_out_length_errors" },
289 { "rx_64_or_less_octet_packets" },
290 { "rx_65_to_127_octet_packets" },
291 { "rx_128_to_255_octet_packets" },
292 { "rx_256_to_511_octet_packets" },
293 { "rx_512_to_1023_octet_packets" },
294 { "rx_1024_to_1522_octet_packets" },
295 { "rx_1523_to_2047_octet_packets" },
296 { "rx_2048_to_4095_octet_packets" },
297 { "rx_4096_to_8191_octet_packets" },
298 { "rx_8192_to_9022_octet_packets" },
305 { "tx_flow_control" },
307 { "tx_single_collisions" },
308 { "tx_mult_collisions" },
310 { "tx_excessive_collisions" },
311 { "tx_late_collisions" },
312 { "tx_collide_2times" },
313 { "tx_collide_3times" },
314 { "tx_collide_4times" },
315 { "tx_collide_5times" },
316 { "tx_collide_6times" },
317 { "tx_collide_7times" },
318 { "tx_collide_8times" },
319 { "tx_collide_9times" },
320 { "tx_collide_10times" },
321 { "tx_collide_11times" },
322 { "tx_collide_12times" },
323 { "tx_collide_13times" },
324 { "tx_collide_14times" },
325 { "tx_collide_15times" },
326 { "tx_ucast_packets" },
327 { "tx_mcast_packets" },
328 { "tx_bcast_packets" },
329 { "tx_carrier_sense_errors" },
333 { "dma_writeq_full" },
334 { "dma_write_prioq_full" },
338 { "rx_threshold_hit" },
340 { "dma_readq_full" },
341 { "dma_read_prioq_full" },
342 { "tx_comp_queue_full" },
344 { "ring_set_send_prod_index" },
345 { "ring_status_update" },
347 { "nic_avoided_irqs" },
348 { "nic_tx_threshold_hit" }
352 const char string[ETH_GSTRING_LEN];
353 } ethtool_test_keys[TG3_NUM_TEST] = {
354 { "nvram test (online) " },
355 { "link test (online) " },
356 { "register test (offline)" },
357 { "memory test (offline)" },
358 { "loopback test (offline)" },
359 { "interrupt test (offline)" },
362 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
364 writel(val, tp->regs + off);
367 static u32 tg3_read32(struct tg3 *tp, u32 off)
369 return (readl(tp->regs + off));
372 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
376 spin_lock_irqsave(&tp->indirect_lock, flags);
377 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
378 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
379 spin_unlock_irqrestore(&tp->indirect_lock, flags);
382 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
384 writel(val, tp->regs + off);
385 readl(tp->regs + off);
388 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
393 spin_lock_irqsave(&tp->indirect_lock, flags);
394 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
395 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
396 spin_unlock_irqrestore(&tp->indirect_lock, flags);
400 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
404 if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
405 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
406 TG3_64BIT_REG_LOW, val);
409 if (off == (MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW)) {
410 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
411 TG3_64BIT_REG_LOW, val);
415 spin_lock_irqsave(&tp->indirect_lock, flags);
416 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
417 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
418 spin_unlock_irqrestore(&tp->indirect_lock, flags);
420 /* In indirect mode when disabling interrupts, we also need
421 * to clear the interrupt bit in the GRC local ctrl register.
423 if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
425 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
426 tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
430 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
435 spin_lock_irqsave(&tp->indirect_lock, flags);
436 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
437 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
438 spin_unlock_irqrestore(&tp->indirect_lock, flags);
442 /* usec_wait specifies the wait time in usec when writing to certain registers
443 * where it is unsafe to read back the register without some delay.
444 * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
445 * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
447 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
449 if ((tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) ||
450 (tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND))
451 /* Non-posted methods */
452 tp->write32(tp, off, val);
455 tg3_write32(tp, off, val);
460 /* Wait again after the read for the posted method to guarantee that
461 * the wait time is met.
467 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
469 tp->write32_mbox(tp, off, val);
470 if (!(tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) &&
471 !(tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND))
472 tp->read32_mbox(tp, off);
475 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
477 void __iomem *mbox = tp->regs + off;
479 if (tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG)
481 if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
485 #define tw32_mailbox(reg, val) tp->write32_mbox(tp, reg, val)
486 #define tw32_mailbox_f(reg, val) tw32_mailbox_flush(tp, (reg), (val))
487 #define tw32_rx_mbox(reg, val) tp->write32_rx_mbox(tp, reg, val)
488 #define tw32_tx_mbox(reg, val) tp->write32_tx_mbox(tp, reg, val)
489 #define tr32_mailbox(reg) tp->read32_mbox(tp, reg)
491 #define tw32(reg,val) tp->write32(tp, reg, val)
492 #define tw32_f(reg,val) _tw32_flush(tp,(reg),(val), 0)
493 #define tw32_wait_f(reg,val,us) _tw32_flush(tp,(reg),(val), (us))
494 #define tr32(reg) tp->read32(tp, reg)
496 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
500 spin_lock_irqsave(&tp->indirect_lock, flags);
501 if (tp->tg3_flags & TG3_FLAG_SRAM_USE_CONFIG) {
502 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
503 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
505 /* Always leave this as zero. */
506 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
508 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
509 tw32_f(TG3PCI_MEM_WIN_DATA, val);
511 /* Always leave this as zero. */
512 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
514 spin_unlock_irqrestore(&tp->indirect_lock, flags);
517 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
521 spin_lock_irqsave(&tp->indirect_lock, flags);
522 if (tp->tg3_flags & TG3_FLAG_SRAM_USE_CONFIG) {
523 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
524 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
526 /* Always leave this as zero. */
527 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
529 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
530 *val = tr32(TG3PCI_MEM_WIN_DATA);
532 /* Always leave this as zero. */
533 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
535 spin_unlock_irqrestore(&tp->indirect_lock, flags);
538 static void tg3_disable_ints(struct tg3 *tp)
540 tw32(TG3PCI_MISC_HOST_CTRL,
541 (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
542 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
545 static inline void tg3_cond_int(struct tg3 *tp)
547 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
548 (tp->hw_status->status & SD_STATUS_UPDATED))
549 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
552 static void tg3_enable_ints(struct tg3 *tp)
557 tw32(TG3PCI_MISC_HOST_CTRL,
558 (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
559 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
560 (tp->last_tag << 24));
561 if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI)
562 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
563 (tp->last_tag << 24));
567 static inline unsigned int tg3_has_work(struct tg3 *tp)
569 struct tg3_hw_status *sblk = tp->hw_status;
570 unsigned int work_exists = 0;
572 /* check for phy events */
573 if (!(tp->tg3_flags &
574 (TG3_FLAG_USE_LINKCHG_REG |
575 TG3_FLAG_POLL_SERDES))) {
576 if (sblk->status & SD_STATUS_LINK_CHG)
579 /* check for RX/TX work to do */
580 if (sblk->idx[0].tx_consumer != tp->tx_cons ||
581 sblk->idx[0].rx_producer != tp->rx_rcb_ptr)
588 * similar to tg3_enable_ints, but it accurately determines whether there
589 * is new work pending and can return without flushing the PIO write
590 * which reenables interrupts
592 static void tg3_restart_ints(struct tg3 *tp)
594 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
598 /* When doing tagged status, this work check is unnecessary.
599 * The last_tag we write above tells the chip which piece of
600 * work we've completed.
602 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
604 tw32(HOSTCC_MODE, tp->coalesce_mode |
605 (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
608 static inline void tg3_netif_stop(struct tg3 *tp)
610 tp->dev->trans_start = jiffies; /* prevent tx timeout */
611 netif_poll_disable(tp->dev);
612 netif_tx_disable(tp->dev);
615 static inline void tg3_netif_start(struct tg3 *tp)
617 netif_wake_queue(tp->dev);
618 /* NOTE: unconditional netif_wake_queue is only appropriate
619 * so long as all callers are assured to have free tx slots
620 * (such as after tg3_init_hw)
622 netif_poll_enable(tp->dev);
623 tp->hw_status->status |= SD_STATUS_UPDATED;
627 static void tg3_switch_clocks(struct tg3 *tp)
629 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
632 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
635 orig_clock_ctrl = clock_ctrl;
636 clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
637 CLOCK_CTRL_CLKRUN_OENABLE |
639 tp->pci_clock_ctrl = clock_ctrl;
641 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
642 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
643 tw32_wait_f(TG3PCI_CLOCK_CTRL,
644 clock_ctrl | CLOCK_CTRL_625_CORE, 40);
646 } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
647 tw32_wait_f(TG3PCI_CLOCK_CTRL,
649 (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
651 tw32_wait_f(TG3PCI_CLOCK_CTRL,
652 clock_ctrl | (CLOCK_CTRL_ALTCLK),
655 tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
658 #define PHY_BUSY_LOOPS 5000
660 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
666 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
668 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
674 frame_val = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
675 MI_COM_PHY_ADDR_MASK);
676 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
677 MI_COM_REG_ADDR_MASK);
678 frame_val |= (MI_COM_CMD_READ | MI_COM_START);
680 tw32_f(MAC_MI_COM, frame_val);
682 loops = PHY_BUSY_LOOPS;
685 frame_val = tr32(MAC_MI_COM);
687 if ((frame_val & MI_COM_BUSY) == 0) {
689 frame_val = tr32(MAC_MI_COM);
697 *val = frame_val & MI_COM_DATA_MASK;
701 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
702 tw32_f(MAC_MI_MODE, tp->mi_mode);
709 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
715 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
717 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
721 frame_val = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
722 MI_COM_PHY_ADDR_MASK);
723 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
724 MI_COM_REG_ADDR_MASK);
725 frame_val |= (val & MI_COM_DATA_MASK);
726 frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
728 tw32_f(MAC_MI_COM, frame_val);
730 loops = PHY_BUSY_LOOPS;
733 frame_val = tr32(MAC_MI_COM);
734 if ((frame_val & MI_COM_BUSY) == 0) {
736 frame_val = tr32(MAC_MI_COM);
746 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
747 tw32_f(MAC_MI_MODE, tp->mi_mode);
754 static void tg3_phy_set_wirespeed(struct tg3 *tp)
758 if (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED)
761 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x7007) &&
762 !tg3_readphy(tp, MII_TG3_AUX_CTRL, &val))
763 tg3_writephy(tp, MII_TG3_AUX_CTRL,
764 (val | (1 << 15) | (1 << 4)));
767 static int tg3_bmcr_reset(struct tg3 *tp)
772 /* OK, reset it, and poll the BMCR_RESET bit until it
773 * clears or we time out.
775 phy_control = BMCR_RESET;
776 err = tg3_writephy(tp, MII_BMCR, phy_control);
782 err = tg3_readphy(tp, MII_BMCR, &phy_control);
786 if ((phy_control & BMCR_RESET) == 0) {
798 static int tg3_wait_macro_done(struct tg3 *tp)
805 if (!tg3_readphy(tp, 0x16, &tmp32)) {
806 if ((tmp32 & 0x1000) == 0)
816 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
818 static const u32 test_pat[4][6] = {
819 { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
820 { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
821 { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
822 { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
826 for (chan = 0; chan < 4; chan++) {
829 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
830 (chan * 0x2000) | 0x0200);
831 tg3_writephy(tp, 0x16, 0x0002);
833 for (i = 0; i < 6; i++)
834 tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
837 tg3_writephy(tp, 0x16, 0x0202);
838 if (tg3_wait_macro_done(tp)) {
843 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
844 (chan * 0x2000) | 0x0200);
845 tg3_writephy(tp, 0x16, 0x0082);
846 if (tg3_wait_macro_done(tp)) {
851 tg3_writephy(tp, 0x16, 0x0802);
852 if (tg3_wait_macro_done(tp)) {
857 for (i = 0; i < 6; i += 2) {
860 if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
861 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
862 tg3_wait_macro_done(tp)) {
868 if (low != test_pat[chan][i] ||
869 high != test_pat[chan][i+1]) {
870 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
871 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
872 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
882 static int tg3_phy_reset_chanpat(struct tg3 *tp)
886 for (chan = 0; chan < 4; chan++) {
889 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
890 (chan * 0x2000) | 0x0200);
891 tg3_writephy(tp, 0x16, 0x0002);
892 for (i = 0; i < 6; i++)
893 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
894 tg3_writephy(tp, 0x16, 0x0202);
895 if (tg3_wait_macro_done(tp))
902 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
904 u32 reg32, phy9_orig;
905 int retries, do_phy_reset, err;
911 err = tg3_bmcr_reset(tp);
917 /* Disable transmitter and interrupt. */
918 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, ®32))
922 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
924 /* Set full-duplex, 1000 mbps. */
925 tg3_writephy(tp, MII_BMCR,
926 BMCR_FULLDPLX | TG3_BMCR_SPEED1000);
928 /* Set to master mode. */
929 if (tg3_readphy(tp, MII_TG3_CTRL, &phy9_orig))
932 tg3_writephy(tp, MII_TG3_CTRL,
933 (MII_TG3_CTRL_AS_MASTER |
934 MII_TG3_CTRL_ENABLE_AS_MASTER));
936 /* Enable SM_DSP_CLOCK and 6dB. */
937 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
939 /* Block the PHY control access. */
940 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
941 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0800);
943 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
948 err = tg3_phy_reset_chanpat(tp);
952 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
953 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0000);
955 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
956 tg3_writephy(tp, 0x16, 0x0000);
958 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
959 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
960 /* Set Extended packet length bit for jumbo frames */
961 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4400);
964 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
967 tg3_writephy(tp, MII_TG3_CTRL, phy9_orig);
969 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, ®32)) {
971 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
978 static void tg3_link_report(struct tg3 *);
980 /* This will reset the tigon3 PHY if there is no valid
981 * link unless the FORCE argument is non-zero.
983 static int tg3_phy_reset(struct tg3 *tp)
988 err = tg3_readphy(tp, MII_BMSR, &phy_status);
989 err |= tg3_readphy(tp, MII_BMSR, &phy_status);
993 if (netif_running(tp->dev) && netif_carrier_ok(tp->dev)) {
994 netif_carrier_off(tp->dev);
998 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
999 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
1000 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
1001 err = tg3_phy_reset_5703_4_5(tp);
1007 err = tg3_bmcr_reset(tp);
1012 if (tp->tg3_flags2 & TG3_FLG2_PHY_ADC_BUG) {
1013 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1014 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1015 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x2aaa);
1016 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1017 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0323);
1018 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1020 if (tp->tg3_flags2 & TG3_FLG2_PHY_5704_A0_BUG) {
1021 tg3_writephy(tp, 0x1c, 0x8d68);
1022 tg3_writephy(tp, 0x1c, 0x8d68);
1024 if (tp->tg3_flags2 & TG3_FLG2_PHY_BER_BUG) {
1025 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1026 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1027 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x310b);
1028 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1029 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x9506);
1030 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x401f);
1031 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x14e2);
1032 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1034 else if (tp->tg3_flags2 & TG3_FLG2_PHY_JITTER_BUG) {
1035 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1036 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1037 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
1038 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1040 /* Set Extended packet length bit (bit 14) on all chips that */
1041 /* support jumbo frames */
1042 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
1043 /* Cannot do read-modify-write on 5401 */
1044 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
1045 } else if (tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) {
1048 /* Set bit 14 with read-modify-write to preserve other bits */
1049 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0007) &&
1050 !tg3_readphy(tp, MII_TG3_AUX_CTRL, &phy_reg))
1051 tg3_writephy(tp, MII_TG3_AUX_CTRL, phy_reg | 0x4000);
1054 /* Set phy register 0x10 bit 0 to high fifo elasticity to support
1055 * jumbo frames transmission.
1057 if (tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) {
1060 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &phy_reg))
1061 tg3_writephy(tp, MII_TG3_EXT_CTRL,
1062 phy_reg | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
1065 tg3_phy_set_wirespeed(tp);
1069 static void tg3_frob_aux_power(struct tg3 *tp)
1071 struct tg3 *tp_peer = tp;
1073 if ((tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) != 0)
1076 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
1077 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)) {
1078 struct net_device *dev_peer;
1080 dev_peer = pci_get_drvdata(tp->pdev_peer);
1081 /* remove_one() may have been run on the peer. */
1085 tp_peer = netdev_priv(dev_peer);
1088 if ((tp->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
1089 (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0 ||
1090 (tp_peer->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
1091 (tp_peer->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0) {
1092 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1093 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1094 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1095 (GRC_LCLCTRL_GPIO_OE0 |
1096 GRC_LCLCTRL_GPIO_OE1 |
1097 GRC_LCLCTRL_GPIO_OE2 |
1098 GRC_LCLCTRL_GPIO_OUTPUT0 |
1099 GRC_LCLCTRL_GPIO_OUTPUT1),
1103 u32 grc_local_ctrl = 0;
1105 if (tp_peer != tp &&
1106 (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
1109 /* Workaround to prevent overdrawing Amps. */
1110 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
1112 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
1113 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1114 grc_local_ctrl, 100);
1117 /* On 5753 and variants, GPIO2 cannot be used. */
1118 no_gpio2 = tp->nic_sram_data_cfg &
1119 NIC_SRAM_DATA_CFG_NO_GPIO2;
1121 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
1122 GRC_LCLCTRL_GPIO_OE1 |
1123 GRC_LCLCTRL_GPIO_OE2 |
1124 GRC_LCLCTRL_GPIO_OUTPUT1 |
1125 GRC_LCLCTRL_GPIO_OUTPUT2;
1127 grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
1128 GRC_LCLCTRL_GPIO_OUTPUT2);
1130 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1131 grc_local_ctrl, 100);
1133 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
1135 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1136 grc_local_ctrl, 100);
1139 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
1140 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1141 grc_local_ctrl, 100);
1145 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
1146 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
1147 if (tp_peer != tp &&
1148 (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
1151 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1152 (GRC_LCLCTRL_GPIO_OE1 |
1153 GRC_LCLCTRL_GPIO_OUTPUT1), 100);
1155 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1156 GRC_LCLCTRL_GPIO_OE1, 100);
1158 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1159 (GRC_LCLCTRL_GPIO_OE1 |
1160 GRC_LCLCTRL_GPIO_OUTPUT1), 100);
1165 static int tg3_setup_phy(struct tg3 *, int);
1167 #define RESET_KIND_SHUTDOWN 0
1168 #define RESET_KIND_INIT 1
1169 #define RESET_KIND_SUSPEND 2
1171 static void tg3_write_sig_post_reset(struct tg3 *, int);
1172 static int tg3_halt_cpu(struct tg3 *, u32);
1173 static int tg3_nvram_lock(struct tg3 *);
1174 static void tg3_nvram_unlock(struct tg3 *);
1176 static void tg3_power_down_phy(struct tg3 *tp)
1178 /* The PHY should not be powered down on some chips because
1181 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1182 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
1183 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 &&
1184 (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)))
1186 tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
1189 static int tg3_set_power_state(struct tg3 *tp, pci_power_t state)
1192 u16 power_control, power_caps;
1193 int pm = tp->pm_cap;
1195 /* Make sure register accesses (indirect or otherwise)
1196 * will function correctly.
1198 pci_write_config_dword(tp->pdev,
1199 TG3PCI_MISC_HOST_CTRL,
1200 tp->misc_host_ctrl);
1202 pci_read_config_word(tp->pdev,
1205 power_control |= PCI_PM_CTRL_PME_STATUS;
1206 power_control &= ~(PCI_PM_CTRL_STATE_MASK);
1210 pci_write_config_word(tp->pdev,
1213 udelay(100); /* Delay after power state change */
1215 /* Switch out of Vaux if it is not a LOM */
1216 if (!(tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT))
1217 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl, 100);
1234 printk(KERN_WARNING PFX "%s: Invalid power state (%d) "
1236 tp->dev->name, state);
1240 power_control |= PCI_PM_CTRL_PME_ENABLE;
1242 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
1243 tw32(TG3PCI_MISC_HOST_CTRL,
1244 misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
1246 if (tp->link_config.phy_is_low_power == 0) {
1247 tp->link_config.phy_is_low_power = 1;
1248 tp->link_config.orig_speed = tp->link_config.speed;
1249 tp->link_config.orig_duplex = tp->link_config.duplex;
1250 tp->link_config.orig_autoneg = tp->link_config.autoneg;
1253 if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)) {
1254 tp->link_config.speed = SPEED_10;
1255 tp->link_config.duplex = DUPLEX_HALF;
1256 tp->link_config.autoneg = AUTONEG_ENABLE;
1257 tg3_setup_phy(tp, 0);
1260 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
1264 for (i = 0; i < 200; i++) {
1265 tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
1266 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
1271 tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
1272 WOL_DRV_STATE_SHUTDOWN |
1273 WOL_DRV_WOL | WOL_SET_MAGIC_PKT);
1275 pci_read_config_word(tp->pdev, pm + PCI_PM_PMC, &power_caps);
1277 if (tp->tg3_flags & TG3_FLAG_WOL_ENABLE) {
1280 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
1281 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x5a);
1284 mac_mode = MAC_MODE_PORT_MODE_MII;
1286 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 ||
1287 !(tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB))
1288 mac_mode |= MAC_MODE_LINK_POLARITY;
1290 mac_mode = MAC_MODE_PORT_MODE_TBI;
1293 if (!(tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
1294 tw32(MAC_LED_CTRL, tp->led_ctrl);
1296 if (((power_caps & PCI_PM_CAP_PME_D3cold) &&
1297 (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)))
1298 mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
1300 tw32_f(MAC_MODE, mac_mode);
1303 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
1307 if (!(tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB) &&
1308 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1309 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
1312 base_val = tp->pci_clock_ctrl;
1313 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
1314 CLOCK_CTRL_TXCLK_DISABLE);
1316 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
1317 CLOCK_CTRL_PWRDOWN_PLL133, 40);
1318 } else if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
1320 } else if (!((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
1321 (tp->tg3_flags & TG3_FLAG_ENABLE_ASF))) {
1322 u32 newbits1, newbits2;
1324 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1325 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1326 newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
1327 CLOCK_CTRL_TXCLK_DISABLE |
1329 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
1330 } else if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
1331 newbits1 = CLOCK_CTRL_625_CORE;
1332 newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
1334 newbits1 = CLOCK_CTRL_ALTCLK;
1335 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
1338 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
1341 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
1344 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
1347 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1348 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1349 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
1350 CLOCK_CTRL_TXCLK_DISABLE |
1351 CLOCK_CTRL_44MHZ_CORE);
1353 newbits3 = CLOCK_CTRL_44MHZ_CORE;
1356 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1357 tp->pci_clock_ctrl | newbits3, 40);
1361 if (!(tp->tg3_flags & TG3_FLAG_WOL_ENABLE) &&
1362 !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
1363 /* Turn off the PHY */
1364 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
1365 tg3_writephy(tp, MII_TG3_EXT_CTRL,
1366 MII_TG3_EXT_CTRL_FORCE_LED_OFF);
1367 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x01b2);
1368 tg3_power_down_phy(tp);
1372 tg3_frob_aux_power(tp);
1374 /* Workaround for unstable PLL clock */
1375 if ((GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX) ||
1376 (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX)) {
1377 u32 val = tr32(0x7d00);
1379 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
1381 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
1384 err = tg3_nvram_lock(tp);
1385 tg3_halt_cpu(tp, RX_CPU_BASE);
1387 tg3_nvram_unlock(tp);
1391 tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
1393 /* Finally, set the new power state. */
1394 pci_write_config_word(tp->pdev, pm + PCI_PM_CTRL, power_control);
1395 udelay(100); /* Delay after power state change */
1400 static void tg3_link_report(struct tg3 *tp)
1402 if (!netif_carrier_ok(tp->dev)) {
1403 printk(KERN_INFO PFX "%s: Link is down.\n", tp->dev->name);
1405 printk(KERN_INFO PFX "%s: Link is up at %d Mbps, %s duplex.\n",
1407 (tp->link_config.active_speed == SPEED_1000 ?
1409 (tp->link_config.active_speed == SPEED_100 ?
1411 (tp->link_config.active_duplex == DUPLEX_FULL ?
1414 printk(KERN_INFO PFX "%s: Flow control is %s for TX and "
1417 (tp->tg3_flags & TG3_FLAG_TX_PAUSE) ? "on" : "off",
1418 (tp->tg3_flags & TG3_FLAG_RX_PAUSE) ? "on" : "off");
1422 static void tg3_setup_flow_control(struct tg3 *tp, u32 local_adv, u32 remote_adv)
1424 u32 new_tg3_flags = 0;
1425 u32 old_rx_mode = tp->rx_mode;
1426 u32 old_tx_mode = tp->tx_mode;
1428 if (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG) {
1430 /* Convert 1000BaseX flow control bits to 1000BaseT
1431 * bits before resolving flow control.
1433 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
1434 local_adv &= ~(ADVERTISE_PAUSE_CAP |
1435 ADVERTISE_PAUSE_ASYM);
1436 remote_adv &= ~(LPA_PAUSE_CAP | LPA_PAUSE_ASYM);
1438 if (local_adv & ADVERTISE_1000XPAUSE)
1439 local_adv |= ADVERTISE_PAUSE_CAP;
1440 if (local_adv & ADVERTISE_1000XPSE_ASYM)
1441 local_adv |= ADVERTISE_PAUSE_ASYM;
1442 if (remote_adv & LPA_1000XPAUSE)
1443 remote_adv |= LPA_PAUSE_CAP;
1444 if (remote_adv & LPA_1000XPAUSE_ASYM)
1445 remote_adv |= LPA_PAUSE_ASYM;
1448 if (local_adv & ADVERTISE_PAUSE_CAP) {
1449 if (local_adv & ADVERTISE_PAUSE_ASYM) {
1450 if (remote_adv & LPA_PAUSE_CAP)
1452 (TG3_FLAG_RX_PAUSE |
1454 else if (remote_adv & LPA_PAUSE_ASYM)
1456 (TG3_FLAG_RX_PAUSE);
1458 if (remote_adv & LPA_PAUSE_CAP)
1460 (TG3_FLAG_RX_PAUSE |
1463 } else if (local_adv & ADVERTISE_PAUSE_ASYM) {
1464 if ((remote_adv & LPA_PAUSE_CAP) &&
1465 (remote_adv & LPA_PAUSE_ASYM))
1466 new_tg3_flags |= TG3_FLAG_TX_PAUSE;
1469 tp->tg3_flags &= ~(TG3_FLAG_RX_PAUSE | TG3_FLAG_TX_PAUSE);
1470 tp->tg3_flags |= new_tg3_flags;
1472 new_tg3_flags = tp->tg3_flags;
1475 if (new_tg3_flags & TG3_FLAG_RX_PAUSE)
1476 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1478 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1480 if (old_rx_mode != tp->rx_mode) {
1481 tw32_f(MAC_RX_MODE, tp->rx_mode);
1484 if (new_tg3_flags & TG3_FLAG_TX_PAUSE)
1485 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1487 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1489 if (old_tx_mode != tp->tx_mode) {
1490 tw32_f(MAC_TX_MODE, tp->tx_mode);
1494 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
1496 switch (val & MII_TG3_AUX_STAT_SPDMASK) {
1497 case MII_TG3_AUX_STAT_10HALF:
1499 *duplex = DUPLEX_HALF;
1502 case MII_TG3_AUX_STAT_10FULL:
1504 *duplex = DUPLEX_FULL;
1507 case MII_TG3_AUX_STAT_100HALF:
1509 *duplex = DUPLEX_HALF;
1512 case MII_TG3_AUX_STAT_100FULL:
1514 *duplex = DUPLEX_FULL;
1517 case MII_TG3_AUX_STAT_1000HALF:
1518 *speed = SPEED_1000;
1519 *duplex = DUPLEX_HALF;
1522 case MII_TG3_AUX_STAT_1000FULL:
1523 *speed = SPEED_1000;
1524 *duplex = DUPLEX_FULL;
1528 *speed = SPEED_INVALID;
1529 *duplex = DUPLEX_INVALID;
1534 static void tg3_phy_copper_begin(struct tg3 *tp)
1539 if (tp->link_config.phy_is_low_power) {
1540 /* Entering low power mode. Disable gigabit and
1541 * 100baseT advertisements.
1543 tg3_writephy(tp, MII_TG3_CTRL, 0);
1545 new_adv = (ADVERTISE_10HALF | ADVERTISE_10FULL |
1546 ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
1547 if (tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB)
1548 new_adv |= (ADVERTISE_100HALF | ADVERTISE_100FULL);
1550 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1551 } else if (tp->link_config.speed == SPEED_INVALID) {
1552 tp->link_config.advertising =
1553 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
1554 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
1555 ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full |
1556 ADVERTISED_Autoneg | ADVERTISED_MII);
1558 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
1559 tp->link_config.advertising &=
1560 ~(ADVERTISED_1000baseT_Half |
1561 ADVERTISED_1000baseT_Full);
1563 new_adv = (ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
1564 if (tp->link_config.advertising & ADVERTISED_10baseT_Half)
1565 new_adv |= ADVERTISE_10HALF;
1566 if (tp->link_config.advertising & ADVERTISED_10baseT_Full)
1567 new_adv |= ADVERTISE_10FULL;
1568 if (tp->link_config.advertising & ADVERTISED_100baseT_Half)
1569 new_adv |= ADVERTISE_100HALF;
1570 if (tp->link_config.advertising & ADVERTISED_100baseT_Full)
1571 new_adv |= ADVERTISE_100FULL;
1572 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1574 if (tp->link_config.advertising &
1575 (ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full)) {
1577 if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
1578 new_adv |= MII_TG3_CTRL_ADV_1000_HALF;
1579 if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
1580 new_adv |= MII_TG3_CTRL_ADV_1000_FULL;
1581 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY) &&
1582 (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1583 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0))
1584 new_adv |= (MII_TG3_CTRL_AS_MASTER |
1585 MII_TG3_CTRL_ENABLE_AS_MASTER);
1586 tg3_writephy(tp, MII_TG3_CTRL, new_adv);
1588 tg3_writephy(tp, MII_TG3_CTRL, 0);
1591 /* Asking for a specific link mode. */
1592 if (tp->link_config.speed == SPEED_1000) {
1593 new_adv = ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP;
1594 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1596 if (tp->link_config.duplex == DUPLEX_FULL)
1597 new_adv = MII_TG3_CTRL_ADV_1000_FULL;
1599 new_adv = MII_TG3_CTRL_ADV_1000_HALF;
1600 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1601 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
1602 new_adv |= (MII_TG3_CTRL_AS_MASTER |
1603 MII_TG3_CTRL_ENABLE_AS_MASTER);
1604 tg3_writephy(tp, MII_TG3_CTRL, new_adv);
1606 tg3_writephy(tp, MII_TG3_CTRL, 0);
1608 new_adv = ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP;
1609 if (tp->link_config.speed == SPEED_100) {
1610 if (tp->link_config.duplex == DUPLEX_FULL)
1611 new_adv |= ADVERTISE_100FULL;
1613 new_adv |= ADVERTISE_100HALF;
1615 if (tp->link_config.duplex == DUPLEX_FULL)
1616 new_adv |= ADVERTISE_10FULL;
1618 new_adv |= ADVERTISE_10HALF;
1620 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1624 if (tp->link_config.autoneg == AUTONEG_DISABLE &&
1625 tp->link_config.speed != SPEED_INVALID) {
1626 u32 bmcr, orig_bmcr;
1628 tp->link_config.active_speed = tp->link_config.speed;
1629 tp->link_config.active_duplex = tp->link_config.duplex;
1632 switch (tp->link_config.speed) {
1638 bmcr |= BMCR_SPEED100;
1642 bmcr |= TG3_BMCR_SPEED1000;
1646 if (tp->link_config.duplex == DUPLEX_FULL)
1647 bmcr |= BMCR_FULLDPLX;
1649 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
1650 (bmcr != orig_bmcr)) {
1651 tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
1652 for (i = 0; i < 1500; i++) {
1656 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
1657 tg3_readphy(tp, MII_BMSR, &tmp))
1659 if (!(tmp & BMSR_LSTATUS)) {
1664 tg3_writephy(tp, MII_BMCR, bmcr);
1668 tg3_writephy(tp, MII_BMCR,
1669 BMCR_ANENABLE | BMCR_ANRESTART);
1673 static int tg3_init_5401phy_dsp(struct tg3 *tp)
1677 /* Turn off tap power management. */
1678 /* Set Extended packet length bit */
1679 err = tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
1681 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0012);
1682 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1804);
1684 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0013);
1685 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1204);
1687 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
1688 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0132);
1690 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
1691 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0232);
1693 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1694 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0a20);
1701 static int tg3_copper_is_advertising_all(struct tg3 *tp)
1703 u32 adv_reg, all_mask;
1705 if (tg3_readphy(tp, MII_ADVERTISE, &adv_reg))
1708 all_mask = (ADVERTISE_10HALF | ADVERTISE_10FULL |
1709 ADVERTISE_100HALF | ADVERTISE_100FULL);
1710 if ((adv_reg & all_mask) != all_mask)
1712 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
1715 if (tg3_readphy(tp, MII_TG3_CTRL, &tg3_ctrl))
1718 all_mask = (MII_TG3_CTRL_ADV_1000_HALF |
1719 MII_TG3_CTRL_ADV_1000_FULL);
1720 if ((tg3_ctrl & all_mask) != all_mask)
1726 static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
1728 int current_link_up;
1737 (MAC_STATUS_SYNC_CHANGED |
1738 MAC_STATUS_CFG_CHANGED |
1739 MAC_STATUS_MI_COMPLETION |
1740 MAC_STATUS_LNKSTATE_CHANGED));
1743 tp->mi_mode = MAC_MI_MODE_BASE;
1744 tw32_f(MAC_MI_MODE, tp->mi_mode);
1747 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x02);
1749 /* Some third-party PHYs need to be reset on link going
1752 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
1753 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
1754 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
1755 netif_carrier_ok(tp->dev)) {
1756 tg3_readphy(tp, MII_BMSR, &bmsr);
1757 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
1758 !(bmsr & BMSR_LSTATUS))
1764 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
1765 tg3_readphy(tp, MII_BMSR, &bmsr);
1766 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
1767 !(tp->tg3_flags & TG3_FLAG_INIT_COMPLETE))
1770 if (!(bmsr & BMSR_LSTATUS)) {
1771 err = tg3_init_5401phy_dsp(tp);
1775 tg3_readphy(tp, MII_BMSR, &bmsr);
1776 for (i = 0; i < 1000; i++) {
1778 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
1779 (bmsr & BMSR_LSTATUS)) {
1785 if ((tp->phy_id & PHY_ID_REV_MASK) == PHY_REV_BCM5401_B0 &&
1786 !(bmsr & BMSR_LSTATUS) &&
1787 tp->link_config.active_speed == SPEED_1000) {
1788 err = tg3_phy_reset(tp);
1790 err = tg3_init_5401phy_dsp(tp);
1795 } else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1796 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
1797 /* 5701 {A0,B0} CRC bug workaround */
1798 tg3_writephy(tp, 0x15, 0x0a75);
1799 tg3_writephy(tp, 0x1c, 0x8c68);
1800 tg3_writephy(tp, 0x1c, 0x8d68);
1801 tg3_writephy(tp, 0x1c, 0x8c68);
1804 /* Clear pending interrupts... */
1805 tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
1806 tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
1808 if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT)
1809 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
1811 tg3_writephy(tp, MII_TG3_IMASK, ~0);
1813 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1814 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1815 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
1816 tg3_writephy(tp, MII_TG3_EXT_CTRL,
1817 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
1819 tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
1822 current_link_up = 0;
1823 current_speed = SPEED_INVALID;
1824 current_duplex = DUPLEX_INVALID;
1826 if (tp->tg3_flags2 & TG3_FLG2_CAPACITIVE_COUPLING) {
1829 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4007);
1830 tg3_readphy(tp, MII_TG3_AUX_CTRL, &val);
1831 if (!(val & (1 << 10))) {
1833 tg3_writephy(tp, MII_TG3_AUX_CTRL, val);
1839 for (i = 0; i < 100; i++) {
1840 tg3_readphy(tp, MII_BMSR, &bmsr);
1841 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
1842 (bmsr & BMSR_LSTATUS))
1847 if (bmsr & BMSR_LSTATUS) {
1850 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
1851 for (i = 0; i < 2000; i++) {
1853 if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
1858 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
1863 for (i = 0; i < 200; i++) {
1864 tg3_readphy(tp, MII_BMCR, &bmcr);
1865 if (tg3_readphy(tp, MII_BMCR, &bmcr))
1867 if (bmcr && bmcr != 0x7fff)
1872 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
1873 if (bmcr & BMCR_ANENABLE) {
1874 current_link_up = 1;
1876 /* Force autoneg restart if we are exiting
1879 if (!tg3_copper_is_advertising_all(tp))
1880 current_link_up = 0;
1882 current_link_up = 0;
1885 if (!(bmcr & BMCR_ANENABLE) &&
1886 tp->link_config.speed == current_speed &&
1887 tp->link_config.duplex == current_duplex) {
1888 current_link_up = 1;
1890 current_link_up = 0;
1894 tp->link_config.active_speed = current_speed;
1895 tp->link_config.active_duplex = current_duplex;
1898 if (current_link_up == 1 &&
1899 (tp->link_config.active_duplex == DUPLEX_FULL) &&
1900 (tp->link_config.autoneg == AUTONEG_ENABLE)) {
1901 u32 local_adv, remote_adv;
1903 if (tg3_readphy(tp, MII_ADVERTISE, &local_adv))
1905 local_adv &= (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
1907 if (tg3_readphy(tp, MII_LPA, &remote_adv))
1910 remote_adv &= (LPA_PAUSE_CAP | LPA_PAUSE_ASYM);
1912 /* If we are not advertising full pause capability,
1913 * something is wrong. Bring the link down and reconfigure.
1915 if (local_adv != ADVERTISE_PAUSE_CAP) {
1916 current_link_up = 0;
1918 tg3_setup_flow_control(tp, local_adv, remote_adv);
1922 if (current_link_up == 0 || tp->link_config.phy_is_low_power) {
1925 tg3_phy_copper_begin(tp);
1927 tg3_readphy(tp, MII_BMSR, &tmp);
1928 if (!tg3_readphy(tp, MII_BMSR, &tmp) &&
1929 (tmp & BMSR_LSTATUS))
1930 current_link_up = 1;
1933 tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
1934 if (current_link_up == 1) {
1935 if (tp->link_config.active_speed == SPEED_100 ||
1936 tp->link_config.active_speed == SPEED_10)
1937 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
1939 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
1941 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
1943 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
1944 if (tp->link_config.active_duplex == DUPLEX_HALF)
1945 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
1947 tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
1948 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
1949 if ((tp->led_ctrl == LED_CTRL_MODE_PHY_2) ||
1950 (current_link_up == 1 &&
1951 tp->link_config.active_speed == SPEED_10))
1952 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
1954 if (current_link_up == 1)
1955 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
1958 /* ??? Without this setting Netgear GA302T PHY does not
1959 * ??? send/receive packets...
1961 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411 &&
1962 tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) {
1963 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
1964 tw32_f(MAC_MI_MODE, tp->mi_mode);
1968 tw32_f(MAC_MODE, tp->mac_mode);
1971 if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
1972 /* Polled via timer. */
1973 tw32_f(MAC_EVENT, 0);
1975 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
1979 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 &&
1980 current_link_up == 1 &&
1981 tp->link_config.active_speed == SPEED_1000 &&
1982 ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ||
1983 (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED))) {
1986 (MAC_STATUS_SYNC_CHANGED |
1987 MAC_STATUS_CFG_CHANGED));
1990 NIC_SRAM_FIRMWARE_MBOX,
1991 NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
1994 if (current_link_up != netif_carrier_ok(tp->dev)) {
1995 if (current_link_up)
1996 netif_carrier_on(tp->dev);
1998 netif_carrier_off(tp->dev);
1999 tg3_link_report(tp);
2005 struct tg3_fiber_aneginfo {
2007 #define ANEG_STATE_UNKNOWN 0
2008 #define ANEG_STATE_AN_ENABLE 1
2009 #define ANEG_STATE_RESTART_INIT 2
2010 #define ANEG_STATE_RESTART 3
2011 #define ANEG_STATE_DISABLE_LINK_OK 4
2012 #define ANEG_STATE_ABILITY_DETECT_INIT 5
2013 #define ANEG_STATE_ABILITY_DETECT 6
2014 #define ANEG_STATE_ACK_DETECT_INIT 7
2015 #define ANEG_STATE_ACK_DETECT 8
2016 #define ANEG_STATE_COMPLETE_ACK_INIT 9
2017 #define ANEG_STATE_COMPLETE_ACK 10
2018 #define ANEG_STATE_IDLE_DETECT_INIT 11
2019 #define ANEG_STATE_IDLE_DETECT 12
2020 #define ANEG_STATE_LINK_OK 13
2021 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT 14
2022 #define ANEG_STATE_NEXT_PAGE_WAIT 15
2025 #define MR_AN_ENABLE 0x00000001
2026 #define MR_RESTART_AN 0x00000002
2027 #define MR_AN_COMPLETE 0x00000004
2028 #define MR_PAGE_RX 0x00000008
2029 #define MR_NP_LOADED 0x00000010
2030 #define MR_TOGGLE_TX 0x00000020
2031 #define MR_LP_ADV_FULL_DUPLEX 0x00000040
2032 #define MR_LP_ADV_HALF_DUPLEX 0x00000080
2033 #define MR_LP_ADV_SYM_PAUSE 0x00000100
2034 #define MR_LP_ADV_ASYM_PAUSE 0x00000200
2035 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
2036 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
2037 #define MR_LP_ADV_NEXT_PAGE 0x00001000
2038 #define MR_TOGGLE_RX 0x00002000
2039 #define MR_NP_RX 0x00004000
2041 #define MR_LINK_OK 0x80000000
2043 unsigned long link_time, cur_time;
2045 u32 ability_match_cfg;
2046 int ability_match_count;
2048 char ability_match, idle_match, ack_match;
2050 u32 txconfig, rxconfig;
2051 #define ANEG_CFG_NP 0x00000080
2052 #define ANEG_CFG_ACK 0x00000040
2053 #define ANEG_CFG_RF2 0x00000020
2054 #define ANEG_CFG_RF1 0x00000010
2055 #define ANEG_CFG_PS2 0x00000001
2056 #define ANEG_CFG_PS1 0x00008000
2057 #define ANEG_CFG_HD 0x00004000
2058 #define ANEG_CFG_FD 0x00002000
2059 #define ANEG_CFG_INVAL 0x00001f06
2064 #define ANEG_TIMER_ENAB 2
2065 #define ANEG_FAILED -1
2067 #define ANEG_STATE_SETTLE_TIME 10000
2069 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
2070 struct tg3_fiber_aneginfo *ap)
2072 unsigned long delta;
2076 if (ap->state == ANEG_STATE_UNKNOWN) {
2080 ap->ability_match_cfg = 0;
2081 ap->ability_match_count = 0;
2082 ap->ability_match = 0;
2088 if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
2089 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
2091 if (rx_cfg_reg != ap->ability_match_cfg) {
2092 ap->ability_match_cfg = rx_cfg_reg;
2093 ap->ability_match = 0;
2094 ap->ability_match_count = 0;
2096 if (++ap->ability_match_count > 1) {
2097 ap->ability_match = 1;
2098 ap->ability_match_cfg = rx_cfg_reg;
2101 if (rx_cfg_reg & ANEG_CFG_ACK)
2109 ap->ability_match_cfg = 0;
2110 ap->ability_match_count = 0;
2111 ap->ability_match = 0;
2117 ap->rxconfig = rx_cfg_reg;
2121 case ANEG_STATE_UNKNOWN:
2122 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
2123 ap->state = ANEG_STATE_AN_ENABLE;
2126 case ANEG_STATE_AN_ENABLE:
2127 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
2128 if (ap->flags & MR_AN_ENABLE) {
2131 ap->ability_match_cfg = 0;
2132 ap->ability_match_count = 0;
2133 ap->ability_match = 0;
2137 ap->state = ANEG_STATE_RESTART_INIT;
2139 ap->state = ANEG_STATE_DISABLE_LINK_OK;
2143 case ANEG_STATE_RESTART_INIT:
2144 ap->link_time = ap->cur_time;
2145 ap->flags &= ~(MR_NP_LOADED);
2147 tw32(MAC_TX_AUTO_NEG, 0);
2148 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
2149 tw32_f(MAC_MODE, tp->mac_mode);
2152 ret = ANEG_TIMER_ENAB;
2153 ap->state = ANEG_STATE_RESTART;
2156 case ANEG_STATE_RESTART:
2157 delta = ap->cur_time - ap->link_time;
2158 if (delta > ANEG_STATE_SETTLE_TIME) {
2159 ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
2161 ret = ANEG_TIMER_ENAB;
2165 case ANEG_STATE_DISABLE_LINK_OK:
2169 case ANEG_STATE_ABILITY_DETECT_INIT:
2170 ap->flags &= ~(MR_TOGGLE_TX);
2171 ap->txconfig = (ANEG_CFG_FD | ANEG_CFG_PS1);
2172 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
2173 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
2174 tw32_f(MAC_MODE, tp->mac_mode);
2177 ap->state = ANEG_STATE_ABILITY_DETECT;
2180 case ANEG_STATE_ABILITY_DETECT:
2181 if (ap->ability_match != 0 && ap->rxconfig != 0) {
2182 ap->state = ANEG_STATE_ACK_DETECT_INIT;
2186 case ANEG_STATE_ACK_DETECT_INIT:
2187 ap->txconfig |= ANEG_CFG_ACK;
2188 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
2189 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
2190 tw32_f(MAC_MODE, tp->mac_mode);
2193 ap->state = ANEG_STATE_ACK_DETECT;
2196 case ANEG_STATE_ACK_DETECT:
2197 if (ap->ack_match != 0) {
2198 if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
2199 (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
2200 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
2202 ap->state = ANEG_STATE_AN_ENABLE;
2204 } else if (ap->ability_match != 0 &&
2205 ap->rxconfig == 0) {
2206 ap->state = ANEG_STATE_AN_ENABLE;
2210 case ANEG_STATE_COMPLETE_ACK_INIT:
2211 if (ap->rxconfig & ANEG_CFG_INVAL) {
2215 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
2216 MR_LP_ADV_HALF_DUPLEX |
2217 MR_LP_ADV_SYM_PAUSE |
2218 MR_LP_ADV_ASYM_PAUSE |
2219 MR_LP_ADV_REMOTE_FAULT1 |
2220 MR_LP_ADV_REMOTE_FAULT2 |
2221 MR_LP_ADV_NEXT_PAGE |
2224 if (ap->rxconfig & ANEG_CFG_FD)
2225 ap->flags |= MR_LP_ADV_FULL_DUPLEX;
2226 if (ap->rxconfig & ANEG_CFG_HD)
2227 ap->flags |= MR_LP_ADV_HALF_DUPLEX;
2228 if (ap->rxconfig & ANEG_CFG_PS1)
2229 ap->flags |= MR_LP_ADV_SYM_PAUSE;
2230 if (ap->rxconfig & ANEG_CFG_PS2)
2231 ap->flags |= MR_LP_ADV_ASYM_PAUSE;
2232 if (ap->rxconfig & ANEG_CFG_RF1)
2233 ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
2234 if (ap->rxconfig & ANEG_CFG_RF2)
2235 ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
2236 if (ap->rxconfig & ANEG_CFG_NP)
2237 ap->flags |= MR_LP_ADV_NEXT_PAGE;
2239 ap->link_time = ap->cur_time;
2241 ap->flags ^= (MR_TOGGLE_TX);
2242 if (ap->rxconfig & 0x0008)
2243 ap->flags |= MR_TOGGLE_RX;
2244 if (ap->rxconfig & ANEG_CFG_NP)
2245 ap->flags |= MR_NP_RX;
2246 ap->flags |= MR_PAGE_RX;
2248 ap->state = ANEG_STATE_COMPLETE_ACK;
2249 ret = ANEG_TIMER_ENAB;
2252 case ANEG_STATE_COMPLETE_ACK:
2253 if (ap->ability_match != 0 &&
2254 ap->rxconfig == 0) {
2255 ap->state = ANEG_STATE_AN_ENABLE;
2258 delta = ap->cur_time - ap->link_time;
2259 if (delta > ANEG_STATE_SETTLE_TIME) {
2260 if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
2261 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
2263 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
2264 !(ap->flags & MR_NP_RX)) {
2265 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
2273 case ANEG_STATE_IDLE_DETECT_INIT:
2274 ap->link_time = ap->cur_time;
2275 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
2276 tw32_f(MAC_MODE, tp->mac_mode);
2279 ap->state = ANEG_STATE_IDLE_DETECT;
2280 ret = ANEG_TIMER_ENAB;
2283 case ANEG_STATE_IDLE_DETECT:
2284 if (ap->ability_match != 0 &&
2285 ap->rxconfig == 0) {
2286 ap->state = ANEG_STATE_AN_ENABLE;
2289 delta = ap->cur_time - ap->link_time;
2290 if (delta > ANEG_STATE_SETTLE_TIME) {
2291 /* XXX another gem from the Broadcom driver :( */
2292 ap->state = ANEG_STATE_LINK_OK;
2296 case ANEG_STATE_LINK_OK:
2297 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
2301 case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
2302 /* ??? unimplemented */
2305 case ANEG_STATE_NEXT_PAGE_WAIT:
2306 /* ??? unimplemented */
2317 static int fiber_autoneg(struct tg3 *tp, u32 *flags)
2320 struct tg3_fiber_aneginfo aninfo;
2321 int status = ANEG_FAILED;
2325 tw32_f(MAC_TX_AUTO_NEG, 0);
2327 tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
2328 tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
2331 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
2334 memset(&aninfo, 0, sizeof(aninfo));
2335 aninfo.flags |= MR_AN_ENABLE;
2336 aninfo.state = ANEG_STATE_UNKNOWN;
2337 aninfo.cur_time = 0;
2339 while (++tick < 195000) {
2340 status = tg3_fiber_aneg_smachine(tp, &aninfo);
2341 if (status == ANEG_DONE || status == ANEG_FAILED)
2347 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
2348 tw32_f(MAC_MODE, tp->mac_mode);
2351 *flags = aninfo.flags;
2353 if (status == ANEG_DONE &&
2354 (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
2355 MR_LP_ADV_FULL_DUPLEX)))
2361 static void tg3_init_bcm8002(struct tg3 *tp)
2363 u32 mac_status = tr32(MAC_STATUS);
2366 /* Reset when initting first time or we have a link. */
2367 if ((tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) &&
2368 !(mac_status & MAC_STATUS_PCS_SYNCED))
2371 /* Set PLL lock range. */
2372 tg3_writephy(tp, 0x16, 0x8007);
2375 tg3_writephy(tp, MII_BMCR, BMCR_RESET);
2377 /* Wait for reset to complete. */
2378 /* XXX schedule_timeout() ... */
2379 for (i = 0; i < 500; i++)
2382 /* Config mode; select PMA/Ch 1 regs. */
2383 tg3_writephy(tp, 0x10, 0x8411);
2385 /* Enable auto-lock and comdet, select txclk for tx. */
2386 tg3_writephy(tp, 0x11, 0x0a10);
2388 tg3_writephy(tp, 0x18, 0x00a0);
2389 tg3_writephy(tp, 0x16, 0x41ff);
2391 /* Assert and deassert POR. */
2392 tg3_writephy(tp, 0x13, 0x0400);
2394 tg3_writephy(tp, 0x13, 0x0000);
2396 tg3_writephy(tp, 0x11, 0x0a50);
2398 tg3_writephy(tp, 0x11, 0x0a10);
2400 /* Wait for signal to stabilize */
2401 /* XXX schedule_timeout() ... */
2402 for (i = 0; i < 15000; i++)
2405 /* Deselect the channel register so we can read the PHYID
2408 tg3_writephy(tp, 0x10, 0x8011);
2411 static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
2413 u32 sg_dig_ctrl, sg_dig_status;
2414 u32 serdes_cfg, expected_sg_dig_ctrl;
2415 int workaround, port_a;
2416 int current_link_up;
2419 expected_sg_dig_ctrl = 0;
2422 current_link_up = 0;
2424 if (tp->pci_chip_rev_id != CHIPREV_ID_5704_A0 &&
2425 tp->pci_chip_rev_id != CHIPREV_ID_5704_A1) {
2427 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
2430 /* preserve bits 0-11,13,14 for signal pre-emphasis */
2431 /* preserve bits 20-23 for voltage regulator */
2432 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
2435 sg_dig_ctrl = tr32(SG_DIG_CTRL);
2437 if (tp->link_config.autoneg != AUTONEG_ENABLE) {
2438 if (sg_dig_ctrl & (1 << 31)) {
2440 u32 val = serdes_cfg;
2446 tw32_f(MAC_SERDES_CFG, val);
2448 tw32_f(SG_DIG_CTRL, 0x01388400);
2450 if (mac_status & MAC_STATUS_PCS_SYNCED) {
2451 tg3_setup_flow_control(tp, 0, 0);
2452 current_link_up = 1;
2457 /* Want auto-negotiation. */
2458 expected_sg_dig_ctrl = 0x81388400;
2460 /* Pause capability */
2461 expected_sg_dig_ctrl |= (1 << 11);
2463 /* Asymettric pause */
2464 expected_sg_dig_ctrl |= (1 << 12);
2466 if (sg_dig_ctrl != expected_sg_dig_ctrl) {
2468 tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
2469 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | (1 << 30));
2471 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
2473 tp->tg3_flags2 |= TG3_FLG2_PHY_JUST_INITTED;
2474 } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
2475 MAC_STATUS_SIGNAL_DET)) {
2478 /* Giver time to negotiate (~200ms) */
2479 for (i = 0; i < 40000; i++) {
2480 sg_dig_status = tr32(SG_DIG_STATUS);
2481 if (sg_dig_status & (0x3))
2485 mac_status = tr32(MAC_STATUS);
2487 if ((sg_dig_status & (1 << 1)) &&
2488 (mac_status & MAC_STATUS_PCS_SYNCED)) {
2489 u32 local_adv, remote_adv;
2491 local_adv = ADVERTISE_PAUSE_CAP;
2493 if (sg_dig_status & (1 << 19))
2494 remote_adv |= LPA_PAUSE_CAP;
2495 if (sg_dig_status & (1 << 20))
2496 remote_adv |= LPA_PAUSE_ASYM;
2498 tg3_setup_flow_control(tp, local_adv, remote_adv);
2499 current_link_up = 1;
2500 tp->tg3_flags2 &= ~TG3_FLG2_PHY_JUST_INITTED;
2501 } else if (!(sg_dig_status & (1 << 1))) {
2502 if (tp->tg3_flags2 & TG3_FLG2_PHY_JUST_INITTED)
2503 tp->tg3_flags2 &= ~TG3_FLG2_PHY_JUST_INITTED;
2506 u32 val = serdes_cfg;
2513 tw32_f(MAC_SERDES_CFG, val);
2516 tw32_f(SG_DIG_CTRL, 0x01388400);
2519 /* Link parallel detection - link is up */
2520 /* only if we have PCS_SYNC and not */
2521 /* receiving config code words */
2522 mac_status = tr32(MAC_STATUS);
2523 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
2524 !(mac_status & MAC_STATUS_RCVD_CFG)) {
2525 tg3_setup_flow_control(tp, 0, 0);
2526 current_link_up = 1;
2533 return current_link_up;
2536 static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
2538 int current_link_up = 0;
2540 if (!(mac_status & MAC_STATUS_PCS_SYNCED)) {
2541 tp->tg3_flags &= ~TG3_FLAG_GOT_SERDES_FLOWCTL;
2545 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2549 if (fiber_autoneg(tp, &flags)) {
2550 u32 local_adv, remote_adv;
2552 local_adv = ADVERTISE_PAUSE_CAP;
2554 if (flags & MR_LP_ADV_SYM_PAUSE)
2555 remote_adv |= LPA_PAUSE_CAP;
2556 if (flags & MR_LP_ADV_ASYM_PAUSE)
2557 remote_adv |= LPA_PAUSE_ASYM;
2559 tg3_setup_flow_control(tp, local_adv, remote_adv);
2561 tp->tg3_flags |= TG3_FLAG_GOT_SERDES_FLOWCTL;
2562 current_link_up = 1;
2564 for (i = 0; i < 30; i++) {
2567 (MAC_STATUS_SYNC_CHANGED |
2568 MAC_STATUS_CFG_CHANGED));
2570 if ((tr32(MAC_STATUS) &
2571 (MAC_STATUS_SYNC_CHANGED |
2572 MAC_STATUS_CFG_CHANGED)) == 0)
2576 mac_status = tr32(MAC_STATUS);
2577 if (current_link_up == 0 &&
2578 (mac_status & MAC_STATUS_PCS_SYNCED) &&
2579 !(mac_status & MAC_STATUS_RCVD_CFG))
2580 current_link_up = 1;
2582 /* Forcing 1000FD link up. */
2583 current_link_up = 1;
2584 tp->tg3_flags |= TG3_FLAG_GOT_SERDES_FLOWCTL;
2586 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
2591 return current_link_up;
2594 static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
2597 u16 orig_active_speed;
2598 u8 orig_active_duplex;
2600 int current_link_up;
2604 (tp->tg3_flags & (TG3_FLAG_RX_PAUSE |
2605 TG3_FLAG_TX_PAUSE));
2606 orig_active_speed = tp->link_config.active_speed;
2607 orig_active_duplex = tp->link_config.active_duplex;
2609 if (!(tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG) &&
2610 netif_carrier_ok(tp->dev) &&
2611 (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE)) {
2612 mac_status = tr32(MAC_STATUS);
2613 mac_status &= (MAC_STATUS_PCS_SYNCED |
2614 MAC_STATUS_SIGNAL_DET |
2615 MAC_STATUS_CFG_CHANGED |
2616 MAC_STATUS_RCVD_CFG);
2617 if (mac_status == (MAC_STATUS_PCS_SYNCED |
2618 MAC_STATUS_SIGNAL_DET)) {
2619 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
2620 MAC_STATUS_CFG_CHANGED));
2625 tw32_f(MAC_TX_AUTO_NEG, 0);
2627 tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
2628 tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
2629 tw32_f(MAC_MODE, tp->mac_mode);
2632 if (tp->phy_id == PHY_ID_BCM8002)
2633 tg3_init_bcm8002(tp);
2635 /* Enable link change event even when serdes polling. */
2636 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2639 current_link_up = 0;
2640 mac_status = tr32(MAC_STATUS);
2642 if (tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG)
2643 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
2645 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
2647 tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
2648 tw32_f(MAC_MODE, tp->mac_mode);
2651 tp->hw_status->status =
2652 (SD_STATUS_UPDATED |
2653 (tp->hw_status->status & ~SD_STATUS_LINK_CHG));
2655 for (i = 0; i < 100; i++) {
2656 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
2657 MAC_STATUS_CFG_CHANGED));
2659 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
2660 MAC_STATUS_CFG_CHANGED)) == 0)
2664 mac_status = tr32(MAC_STATUS);
2665 if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
2666 current_link_up = 0;
2667 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2668 tw32_f(MAC_MODE, (tp->mac_mode |
2669 MAC_MODE_SEND_CONFIGS));
2671 tw32_f(MAC_MODE, tp->mac_mode);
2675 if (current_link_up == 1) {
2676 tp->link_config.active_speed = SPEED_1000;
2677 tp->link_config.active_duplex = DUPLEX_FULL;
2678 tw32(MAC_LED_CTRL, (tp->led_ctrl |
2679 LED_CTRL_LNKLED_OVERRIDE |
2680 LED_CTRL_1000MBPS_ON));
2682 tp->link_config.active_speed = SPEED_INVALID;
2683 tp->link_config.active_duplex = DUPLEX_INVALID;
2684 tw32(MAC_LED_CTRL, (tp->led_ctrl |
2685 LED_CTRL_LNKLED_OVERRIDE |
2686 LED_CTRL_TRAFFIC_OVERRIDE));
2689 if (current_link_up != netif_carrier_ok(tp->dev)) {
2690 if (current_link_up)
2691 netif_carrier_on(tp->dev);
2693 netif_carrier_off(tp->dev);
2694 tg3_link_report(tp);
2697 tp->tg3_flags & (TG3_FLAG_RX_PAUSE |
2699 if (orig_pause_cfg != now_pause_cfg ||
2700 orig_active_speed != tp->link_config.active_speed ||
2701 orig_active_duplex != tp->link_config.active_duplex)
2702 tg3_link_report(tp);
2708 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
2710 int current_link_up, err = 0;
2715 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
2716 tw32_f(MAC_MODE, tp->mac_mode);
2722 (MAC_STATUS_SYNC_CHANGED |
2723 MAC_STATUS_CFG_CHANGED |
2724 MAC_STATUS_MI_COMPLETION |
2725 MAC_STATUS_LNKSTATE_CHANGED));
2731 current_link_up = 0;
2732 current_speed = SPEED_INVALID;
2733 current_duplex = DUPLEX_INVALID;
2735 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
2736 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
2737 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
2738 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
2739 bmsr |= BMSR_LSTATUS;
2741 bmsr &= ~BMSR_LSTATUS;
2744 err |= tg3_readphy(tp, MII_BMCR, &bmcr);
2746 if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
2747 (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT)) {
2748 /* do nothing, just check for link up at the end */
2749 } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2752 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
2753 new_adv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
2754 ADVERTISE_1000XPAUSE |
2755 ADVERTISE_1000XPSE_ASYM |
2758 /* Always advertise symmetric PAUSE just like copper */
2759 new_adv |= ADVERTISE_1000XPAUSE;
2761 if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
2762 new_adv |= ADVERTISE_1000XHALF;
2763 if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
2764 new_adv |= ADVERTISE_1000XFULL;
2766 if ((new_adv != adv) || !(bmcr & BMCR_ANENABLE)) {
2767 tg3_writephy(tp, MII_ADVERTISE, new_adv);
2768 bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
2769 tg3_writephy(tp, MII_BMCR, bmcr);
2771 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2772 tp->tg3_flags2 |= TG3_FLG2_PHY_JUST_INITTED;
2773 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2780 bmcr &= ~BMCR_SPEED1000;
2781 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
2783 if (tp->link_config.duplex == DUPLEX_FULL)
2784 new_bmcr |= BMCR_FULLDPLX;
2786 if (new_bmcr != bmcr) {
2787 /* BMCR_SPEED1000 is a reserved bit that needs
2788 * to be set on write.
2790 new_bmcr |= BMCR_SPEED1000;
2792 /* Force a linkdown */
2793 if (netif_carrier_ok(tp->dev)) {
2796 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
2797 adv &= ~(ADVERTISE_1000XFULL |
2798 ADVERTISE_1000XHALF |
2800 tg3_writephy(tp, MII_ADVERTISE, adv);
2801 tg3_writephy(tp, MII_BMCR, bmcr |
2805 netif_carrier_off(tp->dev);
2807 tg3_writephy(tp, MII_BMCR, new_bmcr);
2809 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
2810 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
2811 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
2813 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
2814 bmsr |= BMSR_LSTATUS;
2816 bmsr &= ~BMSR_LSTATUS;
2818 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2822 if (bmsr & BMSR_LSTATUS) {
2823 current_speed = SPEED_1000;
2824 current_link_up = 1;
2825 if (bmcr & BMCR_FULLDPLX)
2826 current_duplex = DUPLEX_FULL;
2828 current_duplex = DUPLEX_HALF;
2830 if (bmcr & BMCR_ANENABLE) {
2831 u32 local_adv, remote_adv, common;
2833 err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
2834 err |= tg3_readphy(tp, MII_LPA, &remote_adv);
2835 common = local_adv & remote_adv;
2836 if (common & (ADVERTISE_1000XHALF |
2837 ADVERTISE_1000XFULL)) {
2838 if (common & ADVERTISE_1000XFULL)
2839 current_duplex = DUPLEX_FULL;
2841 current_duplex = DUPLEX_HALF;
2843 tg3_setup_flow_control(tp, local_adv,
2847 current_link_up = 0;
2851 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
2852 if (tp->link_config.active_duplex == DUPLEX_HALF)
2853 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
2855 tw32_f(MAC_MODE, tp->mac_mode);
2858 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2860 tp->link_config.active_speed = current_speed;
2861 tp->link_config.active_duplex = current_duplex;
2863 if (current_link_up != netif_carrier_ok(tp->dev)) {
2864 if (current_link_up)
2865 netif_carrier_on(tp->dev);
2867 netif_carrier_off(tp->dev);
2868 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2870 tg3_link_report(tp);
2875 static void tg3_serdes_parallel_detect(struct tg3 *tp)
2877 if (tp->tg3_flags2 & TG3_FLG2_PHY_JUST_INITTED) {
2878 /* Give autoneg time to complete. */
2879 tp->tg3_flags2 &= ~TG3_FLG2_PHY_JUST_INITTED;
2882 if (!netif_carrier_ok(tp->dev) &&
2883 (tp->link_config.autoneg == AUTONEG_ENABLE)) {
2886 tg3_readphy(tp, MII_BMCR, &bmcr);
2887 if (bmcr & BMCR_ANENABLE) {
2890 /* Select shadow register 0x1f */
2891 tg3_writephy(tp, 0x1c, 0x7c00);
2892 tg3_readphy(tp, 0x1c, &phy1);
2894 /* Select expansion interrupt status register */
2895 tg3_writephy(tp, 0x17, 0x0f01);
2896 tg3_readphy(tp, 0x15, &phy2);
2897 tg3_readphy(tp, 0x15, &phy2);
2899 if ((phy1 & 0x10) && !(phy2 & 0x20)) {
2900 /* We have signal detect and not receiving
2901 * config code words, link is up by parallel
2905 bmcr &= ~BMCR_ANENABLE;
2906 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
2907 tg3_writephy(tp, MII_BMCR, bmcr);
2908 tp->tg3_flags2 |= TG3_FLG2_PARALLEL_DETECT;
2912 else if (netif_carrier_ok(tp->dev) &&
2913 (tp->link_config.autoneg == AUTONEG_ENABLE) &&
2914 (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT)) {
2917 /* Select expansion interrupt status register */
2918 tg3_writephy(tp, 0x17, 0x0f01);
2919 tg3_readphy(tp, 0x15, &phy2);
2923 /* Config code words received, turn on autoneg. */
2924 tg3_readphy(tp, MII_BMCR, &bmcr);
2925 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
2927 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2933 static int tg3_setup_phy(struct tg3 *tp, int force_reset)
2937 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
2938 err = tg3_setup_fiber_phy(tp, force_reset);
2939 } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
2940 err = tg3_setup_fiber_mii_phy(tp, force_reset);
2942 err = tg3_setup_copper_phy(tp, force_reset);
2945 if (tp->link_config.active_speed == SPEED_1000 &&
2946 tp->link_config.active_duplex == DUPLEX_HALF)
2947 tw32(MAC_TX_LENGTHS,
2948 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2949 (6 << TX_LENGTHS_IPG_SHIFT) |
2950 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
2952 tw32(MAC_TX_LENGTHS,
2953 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2954 (6 << TX_LENGTHS_IPG_SHIFT) |
2955 (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
2957 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
2958 if (netif_carrier_ok(tp->dev)) {
2959 tw32(HOSTCC_STAT_COAL_TICKS,
2960 tp->coal.stats_block_coalesce_usecs);
2962 tw32(HOSTCC_STAT_COAL_TICKS, 0);
2969 /* This is called whenever we suspect that the system chipset is re-
2970 * ordering the sequence of MMIO to the tx send mailbox. The symptom
2971 * is bogus tx completions. We try to recover by setting the
2972 * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
2975 static void tg3_tx_recover(struct tg3 *tp)
2977 BUG_ON((tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) ||
2978 tp->write32_tx_mbox == tg3_write_indirect_mbox);
2980 printk(KERN_WARNING PFX "%s: The system may be re-ordering memory-"
2981 "mapped I/O cycles to the network device, attempting to "
2982 "recover. Please report the problem to the driver maintainer "
2983 "and include system chipset information.\n", tp->dev->name);
2985 spin_lock(&tp->lock);
2986 tp->tg3_flags |= TG3_FLAG_TX_RECOVERY_PENDING;
2987 spin_unlock(&tp->lock);
2990 /* Tigon3 never reports partial packet sends. So we do not
2991 * need special logic to handle SKBs that have not had all
2992 * of their frags sent yet, like SunGEM does.
2994 static void tg3_tx(struct tg3 *tp)
2996 u32 hw_idx = tp->hw_status->idx[0].tx_consumer;
2997 u32 sw_idx = tp->tx_cons;
2999 while (sw_idx != hw_idx) {
3000 struct tx_ring_info *ri = &tp->tx_buffers[sw_idx];
3001 struct sk_buff *skb = ri->skb;
3004 if (unlikely(skb == NULL)) {
3009 pci_unmap_single(tp->pdev,
3010 pci_unmap_addr(ri, mapping),
3016 sw_idx = NEXT_TX(sw_idx);
3018 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
3019 ri = &tp->tx_buffers[sw_idx];
3020 if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
3023 pci_unmap_page(tp->pdev,
3024 pci_unmap_addr(ri, mapping),
3025 skb_shinfo(skb)->frags[i].size,
3028 sw_idx = NEXT_TX(sw_idx);
3033 if (unlikely(tx_bug)) {
3039 tp->tx_cons = sw_idx;
3041 if (unlikely(netif_queue_stopped(tp->dev))) {
3042 spin_lock(&tp->tx_lock);
3043 if (netif_queue_stopped(tp->dev) &&
3044 (TX_BUFFS_AVAIL(tp) > TG3_TX_WAKEUP_THRESH))
3045 netif_wake_queue(tp->dev);
3046 spin_unlock(&tp->tx_lock);
3050 /* Returns size of skb allocated or < 0 on error.
3052 * We only need to fill in the address because the other members
3053 * of the RX descriptor are invariant, see tg3_init_rings.
3055 * Note the purposeful assymetry of cpu vs. chip accesses. For
3056 * posting buffers we only dirty the first cache line of the RX
3057 * descriptor (containing the address). Whereas for the RX status
3058 * buffers the cpu only reads the last cacheline of the RX descriptor
3059 * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
3061 static int tg3_alloc_rx_skb(struct tg3 *tp, u32 opaque_key,
3062 int src_idx, u32 dest_idx_unmasked)
3064 struct tg3_rx_buffer_desc *desc;
3065 struct ring_info *map, *src_map;
3066 struct sk_buff *skb;
3068 int skb_size, dest_idx;
3071 switch (opaque_key) {
3072 case RXD_OPAQUE_RING_STD:
3073 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
3074 desc = &tp->rx_std[dest_idx];
3075 map = &tp->rx_std_buffers[dest_idx];
3077 src_map = &tp->rx_std_buffers[src_idx];
3078 skb_size = tp->rx_pkt_buf_sz;
3081 case RXD_OPAQUE_RING_JUMBO:
3082 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
3083 desc = &tp->rx_jumbo[dest_idx];
3084 map = &tp->rx_jumbo_buffers[dest_idx];
3086 src_map = &tp->rx_jumbo_buffers[src_idx];
3087 skb_size = RX_JUMBO_PKT_BUF_SZ;
3094 /* Do not overwrite any of the map or rp information
3095 * until we are sure we can commit to a new buffer.
3097 * Callers depend upon this behavior and assume that
3098 * we leave everything unchanged if we fail.
3100 skb = dev_alloc_skb(skb_size);
3105 skb_reserve(skb, tp->rx_offset);
3107 mapping = pci_map_single(tp->pdev, skb->data,
3108 skb_size - tp->rx_offset,
3109 PCI_DMA_FROMDEVICE);
3112 pci_unmap_addr_set(map, mapping, mapping);
3114 if (src_map != NULL)
3115 src_map->skb = NULL;
3117 desc->addr_hi = ((u64)mapping >> 32);
3118 desc->addr_lo = ((u64)mapping & 0xffffffff);
3123 /* We only need to move over in the address because the other
3124 * members of the RX descriptor are invariant. See notes above
3125 * tg3_alloc_rx_skb for full details.
3127 static void tg3_recycle_rx(struct tg3 *tp, u32 opaque_key,
3128 int src_idx, u32 dest_idx_unmasked)
3130 struct tg3_rx_buffer_desc *src_desc, *dest_desc;
3131 struct ring_info *src_map, *dest_map;
3134 switch (opaque_key) {
3135 case RXD_OPAQUE_RING_STD:
3136 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
3137 dest_desc = &tp->rx_std[dest_idx];
3138 dest_map = &tp->rx_std_buffers[dest_idx];
3139 src_desc = &tp->rx_std[src_idx];
3140 src_map = &tp->rx_std_buffers[src_idx];
3143 case RXD_OPAQUE_RING_JUMBO:
3144 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
3145 dest_desc = &tp->rx_jumbo[dest_idx];
3146 dest_map = &tp->rx_jumbo_buffers[dest_idx];
3147 src_desc = &tp->rx_jumbo[src_idx];
3148 src_map = &tp->rx_jumbo_buffers[src_idx];
3155 dest_map->skb = src_map->skb;
3156 pci_unmap_addr_set(dest_map, mapping,
3157 pci_unmap_addr(src_map, mapping));
3158 dest_desc->addr_hi = src_desc->addr_hi;
3159 dest_desc->addr_lo = src_desc->addr_lo;
3161 src_map->skb = NULL;
3164 #if TG3_VLAN_TAG_USED
3165 static int tg3_vlan_rx(struct tg3 *tp, struct sk_buff *skb, u16 vlan_tag)
3167 return vlan_hwaccel_receive_skb(skb, tp->vlgrp, vlan_tag);
3171 /* The RX ring scheme is composed of multiple rings which post fresh
3172 * buffers to the chip, and one special ring the chip uses to report
3173 * status back to the host.
3175 * The special ring reports the status of received packets to the
3176 * host. The chip does not write into the original descriptor the
3177 * RX buffer was obtained from. The chip simply takes the original
3178 * descriptor as provided by the host, updates the status and length
3179 * field, then writes this into the next status ring entry.
3181 * Each ring the host uses to post buffers to the chip is described
3182 * by a TG3_BDINFO entry in the chips SRAM area. When a packet arrives,
3183 * it is first placed into the on-chip ram. When the packet's length
3184 * is known, it walks down the TG3_BDINFO entries to select the ring.
3185 * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
3186 * which is within the range of the new packet's length is chosen.
3188 * The "separate ring for rx status" scheme may sound queer, but it makes
3189 * sense from a cache coherency perspective. If only the host writes
3190 * to the buffer post rings, and only the chip writes to the rx status
3191 * rings, then cache lines never move beyond shared-modified state.
3192 * If both the host and chip were to write into the same ring, cache line
3193 * eviction could occur since both entities want it in an exclusive state.
3195 static int tg3_rx(struct tg3 *tp, int budget)
3197 u32 work_mask, rx_std_posted = 0;
3198 u32 sw_idx = tp->rx_rcb_ptr;
3202 hw_idx = tp->hw_status->idx[0].rx_producer;
3204 * We need to order the read of hw_idx and the read of
3205 * the opaque cookie.
3210 while (sw_idx != hw_idx && budget > 0) {
3211 struct tg3_rx_buffer_desc *desc = &tp->rx_rcb[sw_idx];
3213 struct sk_buff *skb;
3214 dma_addr_t dma_addr;
3215 u32 opaque_key, desc_idx, *post_ptr;
3217 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
3218 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
3219 if (opaque_key == RXD_OPAQUE_RING_STD) {
3220 dma_addr = pci_unmap_addr(&tp->rx_std_buffers[desc_idx],
3222 skb = tp->rx_std_buffers[desc_idx].skb;
3223 post_ptr = &tp->rx_std_ptr;
3225 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
3226 dma_addr = pci_unmap_addr(&tp->rx_jumbo_buffers[desc_idx],
3228 skb = tp->rx_jumbo_buffers[desc_idx].skb;
3229 post_ptr = &tp->rx_jumbo_ptr;
3232 goto next_pkt_nopost;
3235 work_mask |= opaque_key;
3237 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
3238 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
3240 tg3_recycle_rx(tp, opaque_key,
3241 desc_idx, *post_ptr);
3243 /* Other statistics kept track of by card. */
3244 tp->net_stats.rx_dropped++;
3248 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4; /* omit crc */
3250 if (len > RX_COPY_THRESHOLD
3251 && tp->rx_offset == 2
3252 /* rx_offset != 2 iff this is a 5701 card running
3253 * in PCI-X mode [see tg3_get_invariants()] */
3257 skb_size = tg3_alloc_rx_skb(tp, opaque_key,
3258 desc_idx, *post_ptr);
3262 pci_unmap_single(tp->pdev, dma_addr,
3263 skb_size - tp->rx_offset,
3264 PCI_DMA_FROMDEVICE);
3268 struct sk_buff *copy_skb;
3270 tg3_recycle_rx(tp, opaque_key,
3271 desc_idx, *post_ptr);
3273 copy_skb = dev_alloc_skb(len + 2);
3274 if (copy_skb == NULL)
3275 goto drop_it_no_recycle;
3277 copy_skb->dev = tp->dev;
3278 skb_reserve(copy_skb, 2);
3279 skb_put(copy_skb, len);
3280 pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
3281 memcpy(copy_skb->data, skb->data, len);
3282 pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
3284 /* We'll reuse the original ring buffer. */
3288 if ((tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) &&
3289 (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
3290 (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
3291 >> RXD_TCPCSUM_SHIFT) == 0xffff))
3292 skb->ip_summed = CHECKSUM_UNNECESSARY;
3294 skb->ip_summed = CHECKSUM_NONE;
3296 skb->protocol = eth_type_trans(skb, tp->dev);
3297 #if TG3_VLAN_TAG_USED
3298 if (tp->vlgrp != NULL &&
3299 desc->type_flags & RXD_FLAG_VLAN) {
3300 tg3_vlan_rx(tp, skb,
3301 desc->err_vlan & RXD_VLAN_MASK);
3304 netif_receive_skb(skb);
3306 tp->dev->last_rx = jiffies;
3313 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
3314 u32 idx = *post_ptr % TG3_RX_RING_SIZE;
3316 tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX +
3317 TG3_64BIT_REG_LOW, idx);
3318 work_mask &= ~RXD_OPAQUE_RING_STD;
3323 sw_idx %= TG3_RX_RCB_RING_SIZE(tp);
3325 /* Refresh hw_idx to see if there is new work */
3326 if (sw_idx == hw_idx) {
3327 hw_idx = tp->hw_status->idx[0].rx_producer;
3332 /* ACK the status ring. */
3333 tp->rx_rcb_ptr = sw_idx;
3334 tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW, sw_idx);
3336 /* Refill RX ring(s). */
3337 if (work_mask & RXD_OPAQUE_RING_STD) {
3338 sw_idx = tp->rx_std_ptr % TG3_RX_RING_SIZE;
3339 tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
3342 if (work_mask & RXD_OPAQUE_RING_JUMBO) {
3343 sw_idx = tp->rx_jumbo_ptr % TG3_RX_JUMBO_RING_SIZE;
3344 tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
3352 static int tg3_poll(struct net_device *netdev, int *budget)
3354 struct tg3 *tp = netdev_priv(netdev);
3355 struct tg3_hw_status *sblk = tp->hw_status;
3358 /* handle link change and other phy events */
3359 if (!(tp->tg3_flags &
3360 (TG3_FLAG_USE_LINKCHG_REG |
3361 TG3_FLAG_POLL_SERDES))) {
3362 if (sblk->status & SD_STATUS_LINK_CHG) {
3363 sblk->status = SD_STATUS_UPDATED |
3364 (sblk->status & ~SD_STATUS_LINK_CHG);
3365 spin_lock(&tp->lock);
3366 tg3_setup_phy(tp, 0);
3367 spin_unlock(&tp->lock);
3371 /* run TX completion thread */
3372 if (sblk->idx[0].tx_consumer != tp->tx_cons) {
3374 if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING)) {
3375 netif_rx_complete(netdev);
3376 schedule_work(&tp->reset_task);
3381 /* run RX thread, within the bounds set by NAPI.
3382 * All RX "locking" is done by ensuring outside
3383 * code synchronizes with dev->poll()
3385 if (sblk->idx[0].rx_producer != tp->rx_rcb_ptr) {
3386 int orig_budget = *budget;
3389 if (orig_budget > netdev->quota)
3390 orig_budget = netdev->quota;
3392 work_done = tg3_rx(tp, orig_budget);
3394 *budget -= work_done;
3395 netdev->quota -= work_done;
3398 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) {
3399 tp->last_tag = sblk->status_tag;
3402 sblk->status &= ~SD_STATUS_UPDATED;
3404 /* if no more work, tell net stack and NIC we're done */
3405 done = !tg3_has_work(tp);
3407 netif_rx_complete(netdev);
3408 tg3_restart_ints(tp);
3411 return (done ? 0 : 1);
3414 static void tg3_irq_quiesce(struct tg3 *tp)
3416 BUG_ON(tp->irq_sync);
3421 synchronize_irq(tp->pdev->irq);
3424 static inline int tg3_irq_sync(struct tg3 *tp)
3426 return tp->irq_sync;
3429 /* Fully shutdown all tg3 driver activity elsewhere in the system.
3430 * If irq_sync is non-zero, then the IRQ handler must be synchronized
3431 * with as well. Most of the time, this is not necessary except when
3432 * shutting down the device.
3434 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
3437 tg3_irq_quiesce(tp);
3438 spin_lock_bh(&tp->lock);
3441 static inline void tg3_full_unlock(struct tg3 *tp)
3443 spin_unlock_bh(&tp->lock);
3446 /* One-shot MSI handler - Chip automatically disables interrupt
3447 * after sending MSI so driver doesn't have to do it.
3449 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id, struct pt_regs *regs)
3451 struct net_device *dev = dev_id;
3452 struct tg3 *tp = netdev_priv(dev);
3454 prefetch(tp->hw_status);
3455 prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
3457 if (likely(!tg3_irq_sync(tp)))
3458 netif_rx_schedule(dev); /* schedule NAPI poll */
3463 /* MSI ISR - No need to check for interrupt sharing and no need to
3464 * flush status block and interrupt mailbox. PCI ordering rules
3465 * guarantee that MSI will arrive after the status block.
3467 static irqreturn_t tg3_msi(int irq, void *dev_id, struct pt_regs *regs)
3469 struct net_device *dev = dev_id;
3470 struct tg3 *tp = netdev_priv(dev);
3472 prefetch(tp->hw_status);
3473 prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
3475 * Writing any value to intr-mbox-0 clears PCI INTA# and
3476 * chip-internal interrupt pending events.
3477 * Writing non-zero to intr-mbox-0 additional tells the
3478 * NIC to stop sending us irqs, engaging "in-intr-handler"
3481 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
3482 if (likely(!tg3_irq_sync(tp)))
3483 netif_rx_schedule(dev); /* schedule NAPI poll */
3485 return IRQ_RETVAL(1);
3488 static irqreturn_t tg3_interrupt(int irq, void *dev_id, struct pt_regs *regs)
3490 struct net_device *dev = dev_id;
3491 struct tg3 *tp = netdev_priv(dev);
3492 struct tg3_hw_status *sblk = tp->hw_status;
3493 unsigned int handled = 1;
3495 /* In INTx mode, it is possible for the interrupt to arrive at
3496 * the CPU before the status block posted prior to the interrupt.
3497 * Reading the PCI State register will confirm whether the
3498 * interrupt is ours and will flush the status block.
3500 if ((sblk->status & SD_STATUS_UPDATED) ||
3501 !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
3503 * Writing any value to intr-mbox-0 clears PCI INTA# and
3504 * chip-internal interrupt pending events.
3505 * Writing non-zero to intr-mbox-0 additional tells the
3506 * NIC to stop sending us irqs, engaging "in-intr-handler"
3509 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
3511 if (tg3_irq_sync(tp))
3513 sblk->status &= ~SD_STATUS_UPDATED;
3514 if (likely(tg3_has_work(tp))) {
3515 prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
3516 netif_rx_schedule(dev); /* schedule NAPI poll */
3518 /* No work, shared interrupt perhaps? re-enable
3519 * interrupts, and flush that PCI write
3521 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
3524 } else { /* shared interrupt */
3528 return IRQ_RETVAL(handled);
3531 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id, struct pt_regs *regs)
3533 struct net_device *dev = dev_id;
3534 struct tg3 *tp = netdev_priv(dev);
3535 struct tg3_hw_status *sblk = tp->hw_status;
3536 unsigned int handled = 1;
3538 /* In INTx mode, it is possible for the interrupt to arrive at
3539 * the CPU before the status block posted prior to the interrupt.
3540 * Reading the PCI State register will confirm whether the
3541 * interrupt is ours and will flush the status block.
3543 if ((sblk->status_tag != tp->last_tag) ||
3544 !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
3546 * writing any value to intr-mbox-0 clears PCI INTA# and
3547 * chip-internal interrupt pending events.
3548 * writing non-zero to intr-mbox-0 additional tells the
3549 * NIC to stop sending us irqs, engaging "in-intr-handler"
3552 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
3554 if (tg3_irq_sync(tp))
3556 if (netif_rx_schedule_prep(dev)) {
3557 prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
3558 /* Update last_tag to mark that this status has been
3559 * seen. Because interrupt may be shared, we may be
3560 * racing with tg3_poll(), so only update last_tag
3561 * if tg3_poll() is not scheduled.
3563 tp->last_tag = sblk->status_tag;
3564 __netif_rx_schedule(dev);
3566 } else { /* shared interrupt */
3570 return IRQ_RETVAL(handled);
3573 /* ISR for interrupt test */
3574 static irqreturn_t tg3_test_isr(int irq, void *dev_id,
3575 struct pt_regs *regs)
3577 struct net_device *dev = dev_id;
3578 struct tg3 *tp = netdev_priv(dev);
3579 struct tg3_hw_status *sblk = tp->hw_status;
3581 if ((sblk->status & SD_STATUS_UPDATED) ||
3582 !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
3583 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
3585 return IRQ_RETVAL(1);
3587 return IRQ_RETVAL(0);
3590 static int tg3_init_hw(struct tg3 *, int);
3591 static int tg3_halt(struct tg3 *, int, int);
3593 #ifdef CONFIG_NET_POLL_CONTROLLER
3594 static void tg3_poll_controller(struct net_device *dev)
3596 struct tg3 *tp = netdev_priv(dev);
3598 tg3_interrupt(tp->pdev->irq, dev, NULL);
3602 static void tg3_reset_task(void *_data)
3604 struct tg3 *tp = _data;
3605 unsigned int restart_timer;
3607 tg3_full_lock(tp, 0);
3608 tp->tg3_flags |= TG3_FLAG_IN_RESET_TASK;
3610 if (!netif_running(tp->dev)) {
3611 tp->tg3_flags &= ~TG3_FLAG_IN_RESET_TASK;
3612 tg3_full_unlock(tp);
3616 tg3_full_unlock(tp);
3620 tg3_full_lock(tp, 1);
3622 restart_timer = tp->tg3_flags2 & TG3_FLG2_RESTART_TIMER;
3623 tp->tg3_flags2 &= ~TG3_FLG2_RESTART_TIMER;
3625 if (tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING) {
3626 tp->write32_tx_mbox = tg3_write32_tx_mbox;
3627 tp->write32_rx_mbox = tg3_write_flush_reg32;
3628 tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;
3629 tp->tg3_flags &= ~TG3_FLAG_TX_RECOVERY_PENDING;
3632 tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
3635 tg3_netif_start(tp);
3638 mod_timer(&tp->timer, jiffies + 1);
3640 tp->tg3_flags &= ~TG3_FLAG_IN_RESET_TASK;
3642 tg3_full_unlock(tp);
3645 static void tg3_tx_timeout(struct net_device *dev)
3647 struct tg3 *tp = netdev_priv(dev);
3649 printk(KERN_ERR PFX "%s: transmit timed out, resetting\n",
3652 schedule_work(&tp->reset_task);
3655 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
3656 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
3658 u32 base = (u32) mapping & 0xffffffff;
3660 return ((base > 0xffffdcc0) &&
3661 (base + len + 8 < base));
3664 /* Test for DMA addresses > 40-bit */
3665 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
3668 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
3669 if (tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG)
3670 return (((u64) mapping + len) > DMA_40BIT_MASK);
3677 static void tg3_set_txd(struct tg3 *, int, dma_addr_t, int, u32, u32);
3679 /* Workaround 4GB and 40-bit hardware DMA bugs. */
3680 static int tigon3_dma_hwbug_workaround(struct tg3 *tp, struct sk_buff *skb,
3681 u32 last_plus_one, u32 *start,
3682 u32 base_flags, u32 mss)
3684 struct sk_buff *new_skb = skb_copy(skb, GFP_ATOMIC);
3685 dma_addr_t new_addr = 0;
3692 /* New SKB is guaranteed to be linear. */
3694 new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
3696 /* Make sure new skb does not cross any 4G boundaries.
3697 * Drop the packet if it does.
3699 if (tg3_4g_overflow_test(new_addr, new_skb->len)) {
3701 dev_kfree_skb(new_skb);
3704 tg3_set_txd(tp, entry, new_addr, new_skb->len,
3705 base_flags, 1 | (mss << 1));
3706 *start = NEXT_TX(entry);
3710 /* Now clean up the sw ring entries. */
3712 while (entry != last_plus_one) {
3716 len = skb_headlen(skb);
3718 len = skb_shinfo(skb)->frags[i-1].size;
3719 pci_unmap_single(tp->pdev,
3720 pci_unmap_addr(&tp->tx_buffers[entry], mapping),
3721 len, PCI_DMA_TODEVICE);
3723 tp->tx_buffers[entry].skb = new_skb;
3724 pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, new_addr);
3726 tp->tx_buffers[entry].skb = NULL;
3728 entry = NEXT_TX(entry);
3737 static void tg3_set_txd(struct tg3 *tp, int entry,
3738 dma_addr_t mapping, int len, u32 flags,
3741 struct tg3_tx_buffer_desc *txd = &tp->tx_ring[entry];
3742 int is_end = (mss_and_is_end & 0x1);
3743 u32 mss = (mss_and_is_end >> 1);
3747 flags |= TXD_FLAG_END;
3748 if (flags & TXD_FLAG_VLAN) {
3749 vlan_tag = flags >> 16;
3752 vlan_tag |= (mss << TXD_MSS_SHIFT);
3754 txd->addr_hi = ((u64) mapping >> 32);
3755 txd->addr_lo = ((u64) mapping & 0xffffffff);
3756 txd->len_flags = (len << TXD_LEN_SHIFT) | flags;
3757 txd->vlan_tag = vlan_tag << TXD_VLAN_TAG_SHIFT;
3760 /* hard_start_xmit for devices that don't have any bugs and
3761 * support TG3_FLG2_HW_TSO_2 only.
3763 static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
3765 struct tg3 *tp = netdev_priv(dev);
3767 u32 len, entry, base_flags, mss;
3769 len = skb_headlen(skb);
3771 /* We are running in BH disabled context with netif_tx_lock
3772 * and TX reclaim runs via tp->poll inside of a software
3773 * interrupt. Furthermore, IRQ processing runs lockless so we have
3774 * no IRQ context deadlocks to worry about either. Rejoice!
3776 if (unlikely(TX_BUFFS_AVAIL(tp) <= (skb_shinfo(skb)->nr_frags + 1))) {
3777 if (!netif_queue_stopped(dev)) {
3778 netif_stop_queue(dev);
3780 /* This is a hard error, log it. */
3781 printk(KERN_ERR PFX "%s: BUG! Tx Ring full when "
3782 "queue awake!\n", dev->name);
3784 return NETDEV_TX_BUSY;
3787 entry = tp->tx_prod;
3789 #if TG3_TSO_SUPPORT != 0
3791 if (skb->len > (tp->dev->mtu + ETH_HLEN) &&
3792 (mss = skb_shinfo(skb)->gso_size) != 0) {
3793 int tcp_opt_len, ip_tcp_len;
3795 if (skb_header_cloned(skb) &&
3796 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
3801 tcp_opt_len = ((skb->h.th->doff - 5) * 4);
3802 ip_tcp_len = (skb->nh.iph->ihl * 4) + sizeof(struct tcphdr);
3804 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
3805 TXD_FLAG_CPU_POST_DMA);
3807 skb->nh.iph->check = 0;
3808 skb->nh.iph->tot_len = htons(mss + ip_tcp_len + tcp_opt_len);
3810 skb->h.th->check = 0;
3812 mss |= (ip_tcp_len + tcp_opt_len) << 9;
3814 else if (skb->ip_summed == CHECKSUM_HW)
3815 base_flags |= TXD_FLAG_TCPUDP_CSUM;
3818 if (skb->ip_summed == CHECKSUM_HW)
3819 base_flags |= TXD_FLAG_TCPUDP_CSUM;
3821 #if TG3_VLAN_TAG_USED
3822 if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
3823 base_flags |= (TXD_FLAG_VLAN |
3824 (vlan_tx_tag_get(skb) << 16));
3827 /* Queue skb data, a.k.a. the main skb fragment. */
3828 mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
3830 tp->tx_buffers[entry].skb = skb;
3831 pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
3833 tg3_set_txd(tp, entry, mapping, len, base_flags,
3834 (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
3836 entry = NEXT_TX(entry);
3838 /* Now loop through additional data fragments, and queue them. */
3839 if (skb_shinfo(skb)->nr_frags > 0) {
3840 unsigned int i, last;
3842 last = skb_shinfo(skb)->nr_frags - 1;
3843 for (i = 0; i <= last; i++) {
3844 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3847 mapping = pci_map_page(tp->pdev,
3850 len, PCI_DMA_TODEVICE);
3852 tp->tx_buffers[entry].skb = NULL;
3853 pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
3855 tg3_set_txd(tp, entry, mapping, len,
3856 base_flags, (i == last) | (mss << 1));
3858 entry = NEXT_TX(entry);
3862 /* Packets are ready, update Tx producer idx local and on card. */
3863 tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry);
3865 tp->tx_prod = entry;
3866 if (unlikely(TX_BUFFS_AVAIL(tp) <= (MAX_SKB_FRAGS + 1))) {
3867 spin_lock(&tp->tx_lock);
3868 netif_stop_queue(dev);
3869 if (TX_BUFFS_AVAIL(tp) > TG3_TX_WAKEUP_THRESH)
3870 netif_wake_queue(tp->dev);
3871 spin_unlock(&tp->tx_lock);
3877 dev->trans_start = jiffies;
3879 return NETDEV_TX_OK;
3882 #if TG3_TSO_SUPPORT != 0
3883 static int tg3_start_xmit_dma_bug(struct sk_buff *, struct net_device *);
3885 /* Use GSO to workaround a rare TSO bug that may be triggered when the
3886 * TSO header is greater than 80 bytes.
3888 static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
3890 struct sk_buff *segs, *nskb;
3892 /* Estimate the number of fragments in the worst case */
3893 if (unlikely(TX_BUFFS_AVAIL(tp) <= (skb_shinfo(skb)->gso_segs * 3))) {
3894 netif_stop_queue(tp->dev);
3895 return NETDEV_TX_BUSY;
3898 segs = skb_gso_segment(skb, tp->dev->features & ~NETIF_F_TSO);
3899 if (unlikely(IS_ERR(segs)))
3900 goto tg3_tso_bug_end;
3906 tg3_start_xmit_dma_bug(nskb, tp->dev);
3912 return NETDEV_TX_OK;
3916 /* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
3917 * support TG3_FLG2_HW_TSO_1 or firmware TSO only.
3919 static int tg3_start_xmit_dma_bug(struct sk_buff *skb, struct net_device *dev)
3921 struct tg3 *tp = netdev_priv(dev);
3923 u32 len, entry, base_flags, mss;
3924 int would_hit_hwbug;
3926 len = skb_headlen(skb);
3928 /* We are running in BH disabled context with netif_tx_lock
3929 * and TX reclaim runs via tp->poll inside of a software
3930 * interrupt. Furthermore, IRQ processing runs lockless so we have
3931 * no IRQ context deadlocks to worry about either. Rejoice!
3933 if (unlikely(TX_BUFFS_AVAIL(tp) <= (skb_shinfo(skb)->nr_frags + 1))) {
3934 if (!netif_queue_stopped(dev)) {
3935 netif_stop_queue(dev);
3937 /* This is a hard error, log it. */
3938 printk(KERN_ERR PFX "%s: BUG! Tx Ring full when "
3939 "queue awake!\n", dev->name);
3941 return NETDEV_TX_BUSY;
3944 entry = tp->tx_prod;
3946 if (skb->ip_summed == CHECKSUM_HW)
3947 base_flags |= TXD_FLAG_TCPUDP_CSUM;
3948 #if TG3_TSO_SUPPORT != 0
3950 if (skb->len > (tp->dev->mtu + ETH_HLEN) &&
3951 (mss = skb_shinfo(skb)->gso_size) != 0) {
3952 int tcp_opt_len, ip_tcp_len, hdr_len;
3954 if (skb_header_cloned(skb) &&
3955 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
3960 tcp_opt_len = ((skb->h.th->doff - 5) * 4);
3961 ip_tcp_len = (skb->nh.iph->ihl * 4) + sizeof(struct tcphdr);
3963 hdr_len = ip_tcp_len + tcp_opt_len;
3964 if (unlikely((ETH_HLEN + hdr_len) > 80) &&
3965 (tp->tg3_flags2 & TG3_FLG2_HW_TSO_1_BUG))
3966 return (tg3_tso_bug(tp, skb));
3968 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
3969 TXD_FLAG_CPU_POST_DMA);
3971 skb->nh.iph->check = 0;
3972 skb->nh.iph->tot_len = htons(mss + hdr_len);
3973 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
3974 skb->h.th->check = 0;
3975 base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
3979 ~csum_tcpudp_magic(skb->nh.iph->saddr,
3984 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO) ||
3985 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)) {
3986 if (tcp_opt_len || skb->nh.iph->ihl > 5) {
3989 tsflags = ((skb->nh.iph->ihl - 5) +
3990 (tcp_opt_len >> 2));
3991 mss |= (tsflags << 11);
3994 if (tcp_opt_len || skb->nh.iph->ihl > 5) {
3997 tsflags = ((skb->nh.iph->ihl - 5) +
3998 (tcp_opt_len >> 2));
3999 base_flags |= tsflags << 12;
4006 #if TG3_VLAN_TAG_USED
4007 if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
4008 base_flags |= (TXD_FLAG_VLAN |
4009 (vlan_tx_tag_get(skb) << 16));
4012 /* Queue skb data, a.k.a. the main skb fragment. */
4013 mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
4015 tp->tx_buffers[entry].skb = skb;
4016 pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
4018 would_hit_hwbug = 0;
4020 if (tg3_4g_overflow_test(mapping, len))
4021 would_hit_hwbug = 1;
4023 tg3_set_txd(tp, entry, mapping, len, base_flags,
4024 (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
4026 entry = NEXT_TX(entry);
4028 /* Now loop through additional data fragments, and queue them. */
4029 if (skb_shinfo(skb)->nr_frags > 0) {
4030 unsigned int i, last;
4032 last = skb_shinfo(skb)->nr_frags - 1;
4033 for (i = 0; i <= last; i++) {
4034 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4037 mapping = pci_map_page(tp->pdev,
4040 len, PCI_DMA_TODEVICE);
4042 tp->tx_buffers[entry].skb = NULL;
4043 pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
4045 if (tg3_4g_overflow_test(mapping, len))
4046 would_hit_hwbug = 1;
4048 if (tg3_40bit_overflow_test(tp, mapping, len))
4049 would_hit_hwbug = 1;
4051 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
4052 tg3_set_txd(tp, entry, mapping, len,
4053 base_flags, (i == last)|(mss << 1));
4055 tg3_set_txd(tp, entry, mapping, len,
4056 base_flags, (i == last));
4058 entry = NEXT_TX(entry);
4062 if (would_hit_hwbug) {
4063 u32 last_plus_one = entry;
4066 start = entry - 1 - skb_shinfo(skb)->nr_frags;
4067 start &= (TG3_TX_RING_SIZE - 1);
4069 /* If the workaround fails due to memory/mapping
4070 * failure, silently drop this packet.
4072 if (tigon3_dma_hwbug_workaround(tp, skb, last_plus_one,
4073 &start, base_flags, mss))
4079 /* Packets are ready, update Tx producer idx local and on card. */
4080 tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry);
4082 tp->tx_prod = entry;
4083 if (unlikely(TX_BUFFS_AVAIL(tp) <= (MAX_SKB_FRAGS + 1))) {
4084 spin_lock(&tp->tx_lock);
4085 netif_stop_queue(dev);
4086 if (TX_BUFFS_AVAIL(tp) > TG3_TX_WAKEUP_THRESH)
4087 netif_wake_queue(tp->dev);
4088 spin_unlock(&tp->tx_lock);
4094 dev->trans_start = jiffies;
4096 return NETDEV_TX_OK;
4099 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
4104 if (new_mtu > ETH_DATA_LEN) {
4105 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
4106 tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
4107 ethtool_op_set_tso(dev, 0);
4110 tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE;
4112 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
4113 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
4114 tp->tg3_flags &= ~TG3_FLAG_JUMBO_RING_ENABLE;
4118 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
4120 struct tg3 *tp = netdev_priv(dev);
4122 if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
4125 if (!netif_running(dev)) {
4126 /* We'll just catch it later when the
4129 tg3_set_mtu(dev, tp, new_mtu);
4135 tg3_full_lock(tp, 1);
4137 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
4139 tg3_set_mtu(dev, tp, new_mtu);
4143 tg3_netif_start(tp);
4145 tg3_full_unlock(tp);
4150 /* Free up pending packets in all rx/tx rings.
4152 * The chip has been shut down and the driver detached from
4153 * the networking, so no interrupts or new tx packets will
4154 * end up in the driver. tp->{tx,}lock is not held and we are not
4155 * in an interrupt context and thus may sleep.
4157 static void tg3_free_rings(struct tg3 *tp)
4159 struct ring_info *rxp;
4162 for (i = 0; i < TG3_RX_RING_SIZE; i++) {
4163 rxp = &tp->rx_std_buffers[i];
4165 if (rxp->skb == NULL)
4167 pci_unmap_single(tp->pdev,
4168 pci_unmap_addr(rxp, mapping),
4169 tp->rx_pkt_buf_sz - tp->rx_offset,
4170 PCI_DMA_FROMDEVICE);
4171 dev_kfree_skb_any(rxp->skb);
4175 for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
4176 rxp = &tp->rx_jumbo_buffers[i];
4178 if (rxp->skb == NULL)
4180 pci_unmap_single(tp->pdev,
4181 pci_unmap_addr(rxp, mapping),
4182 RX_JUMBO_PKT_BUF_SZ - tp->rx_offset,
4183 PCI_DMA_FROMDEVICE);
4184 dev_kfree_skb_any(rxp->skb);
4188 for (i = 0; i < TG3_TX_RING_SIZE; ) {
4189 struct tx_ring_info *txp;
4190 struct sk_buff *skb;
4193 txp = &tp->tx_buffers[i];
4201 pci_unmap_single(tp->pdev,
4202 pci_unmap_addr(txp, mapping),
4209 for (j = 0; j < skb_shinfo(skb)->nr_frags; j++) {
4210 txp = &tp->tx_buffers[i & (TG3_TX_RING_SIZE - 1)];
4211 pci_unmap_page(tp->pdev,
4212 pci_unmap_addr(txp, mapping),
4213 skb_shinfo(skb)->frags[j].size,
4218 dev_kfree_skb_any(skb);
4222 /* Initialize tx/rx rings for packet processing.
4224 * The chip has been shut down and the driver detached from
4225 * the networking, so no interrupts or new tx packets will
4226 * end up in the driver. tp->{tx,}lock are held and thus
4229 static void tg3_init_rings(struct tg3 *tp)
4233 /* Free up all the SKBs. */
4236 /* Zero out all descriptors. */
4237 memset(tp->rx_std, 0, TG3_RX_RING_BYTES);
4238 memset(tp->rx_jumbo, 0, TG3_RX_JUMBO_RING_BYTES);
4239 memset(tp->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
4240 memset(tp->tx_ring, 0, TG3_TX_RING_BYTES);
4242 tp->rx_pkt_buf_sz = RX_PKT_BUF_SZ;
4243 if ((tp->tg3_flags2 & TG3_FLG2_5780_CLASS) &&
4244 (tp->dev->mtu > ETH_DATA_LEN))
4245 tp->rx_pkt_buf_sz = RX_JUMBO_PKT_BUF_SZ;
4247 /* Initialize invariants of the rings, we only set this
4248 * stuff once. This works because the card does not
4249 * write into the rx buffer posting rings.
4251 for (i = 0; i < TG3_RX_RING_SIZE; i++) {
4252 struct tg3_rx_buffer_desc *rxd;
4254 rxd = &tp->rx_std[i];
4255 rxd->idx_len = (tp->rx_pkt_buf_sz - tp->rx_offset - 64)
4257 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
4258 rxd->opaque = (RXD_OPAQUE_RING_STD |
4259 (i << RXD_OPAQUE_INDEX_SHIFT));
4262 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
4263 for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
4264 struct tg3_rx_buffer_desc *rxd;
4266 rxd = &tp->rx_jumbo[i];
4267 rxd->idx_len = (RX_JUMBO_PKT_BUF_SZ - tp->rx_offset - 64)
4269 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
4271 rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
4272 (i << RXD_OPAQUE_INDEX_SHIFT));
4276 /* Now allocate fresh SKBs for each rx ring. */
4277 for (i = 0; i < tp->rx_pending; i++) {
4278 if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_STD,
4283 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
4284 for (i = 0; i < tp->rx_jumbo_pending; i++) {
4285 if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_JUMBO,
4293 * Must not be invoked with interrupt sources disabled and
4294 * the hardware shutdown down.
4296 static void tg3_free_consistent(struct tg3 *tp)
4298 kfree(tp->rx_std_buffers);
4299 tp->rx_std_buffers = NULL;
4301 pci_free_consistent(tp->pdev, TG3_RX_RING_BYTES,
4302 tp->rx_std, tp->rx_std_mapping);
4306 pci_free_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
4307 tp->rx_jumbo, tp->rx_jumbo_mapping);
4308 tp->rx_jumbo = NULL;
4311 pci_free_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
4312 tp->rx_rcb, tp->rx_rcb_mapping);
4316 pci_free_consistent(tp->pdev, TG3_TX_RING_BYTES,
4317 tp->tx_ring, tp->tx_desc_mapping);
4320 if (tp->hw_status) {
4321 pci_free_consistent(tp->pdev, TG3_HW_STATUS_SIZE,
4322 tp->hw_status, tp->status_mapping);
4323 tp->hw_status = NULL;
4326 pci_free_consistent(tp->pdev, sizeof(struct tg3_hw_stats),
4327 tp->hw_stats, tp->stats_mapping);
4328 tp->hw_stats = NULL;
4333 * Must not be invoked with interrupt sources disabled and
4334 * the hardware shutdown down. Can sleep.
4336 static int tg3_alloc_consistent(struct tg3 *tp)
4338 tp->rx_std_buffers = kmalloc((sizeof(struct ring_info) *
4340 TG3_RX_JUMBO_RING_SIZE)) +
4341 (sizeof(struct tx_ring_info) *
4344 if (!tp->rx_std_buffers)
4347 memset(tp->rx_std_buffers, 0,
4348 (sizeof(struct ring_info) *
4350 TG3_RX_JUMBO_RING_SIZE)) +
4351 (sizeof(struct tx_ring_info) *
4354 tp->rx_jumbo_buffers = &tp->rx_std_buffers[TG3_RX_RING_SIZE];
4355 tp->tx_buffers = (struct tx_ring_info *)
4356 &tp->rx_jumbo_buffers[TG3_RX_JUMBO_RING_SIZE];
4358 tp->rx_std = pci_alloc_consistent(tp->pdev, TG3_RX_RING_BYTES,
4359 &tp->rx_std_mapping);
4363 tp->rx_jumbo = pci_alloc_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
4364 &tp->rx_jumbo_mapping);
4369 tp->rx_rcb = pci_alloc_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
4370 &tp->rx_rcb_mapping);
4374 tp->tx_ring = pci_alloc_consistent(tp->pdev, TG3_TX_RING_BYTES,
4375 &tp->tx_desc_mapping);
4379 tp->hw_status = pci_alloc_consistent(tp->pdev,
4381 &tp->status_mapping);
4385 tp->hw_stats = pci_alloc_consistent(tp->pdev,
4386 sizeof(struct tg3_hw_stats),
4387 &tp->stats_mapping);
4391 memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
4392 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
4397 tg3_free_consistent(tp);
4401 #define MAX_WAIT_CNT 1000
4403 /* To stop a block, clear the enable bit and poll till it
4404 * clears. tp->lock is held.
4406 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, int silent)
4411 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
4418 /* We can't enable/disable these bits of the
4419 * 5705/5750, just say success.
4432 for (i = 0; i < MAX_WAIT_CNT; i++) {
4435 if ((val & enable_bit) == 0)
4439 if (i == MAX_WAIT_CNT && !silent) {
4440 printk(KERN_ERR PFX "tg3_stop_block timed out, "
4441 "ofs=%lx enable_bit=%x\n",
4449 /* tp->lock is held. */
4450 static int tg3_abort_hw(struct tg3 *tp, int silent)
4454 tg3_disable_ints(tp);
4456 tp->rx_mode &= ~RX_MODE_ENABLE;
4457 tw32_f(MAC_RX_MODE, tp->rx_mode);
4460 err = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
4461 err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
4462 err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
4463 err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
4464 err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
4465 err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
4467 err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
4468 err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
4469 err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
4470 err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
4471 err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
4472 err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
4473 err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
4475 tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
4476 tw32_f(MAC_MODE, tp->mac_mode);
4479 tp->tx_mode &= ~TX_MODE_ENABLE;
4480 tw32_f(MAC_TX_MODE, tp->tx_mode);
4482 for (i = 0; i < MAX_WAIT_CNT; i++) {
4484 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
4487 if (i >= MAX_WAIT_CNT) {
4488 printk(KERN_ERR PFX "tg3_abort_hw timed out for %s, "
4489 "TX_MODE_ENABLE will not clear MAC_TX_MODE=%08x\n",
4490 tp->dev->name, tr32(MAC_TX_MODE));
4494 err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
4495 err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
4496 err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
4498 tw32(FTQ_RESET, 0xffffffff);
4499 tw32(FTQ_RESET, 0x00000000);
4501 err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
4502 err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
4505 memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
4507 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
4512 /* tp->lock is held. */
4513 static int tg3_nvram_lock(struct tg3 *tp)
4515 if (tp->tg3_flags & TG3_FLAG_NVRAM) {
4518 if (tp->nvram_lock_cnt == 0) {
4519 tw32(NVRAM_SWARB, SWARB_REQ_SET1);
4520 for (i = 0; i < 8000; i++) {
4521 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
4526 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
4530 tp->nvram_lock_cnt++;
4535 /* tp->lock is held. */
4536 static void tg3_nvram_unlock(struct tg3 *tp)
4538 if (tp->tg3_flags & TG3_FLAG_NVRAM) {
4539 if (tp->nvram_lock_cnt > 0)
4540 tp->nvram_lock_cnt--;
4541 if (tp->nvram_lock_cnt == 0)
4542 tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
4546 /* tp->lock is held. */
4547 static void tg3_enable_nvram_access(struct tg3 *tp)
4549 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
4550 !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM)) {
4551 u32 nvaccess = tr32(NVRAM_ACCESS);
4553 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
4557 /* tp->lock is held. */
4558 static void tg3_disable_nvram_access(struct tg3 *tp)
4560 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
4561 !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM)) {
4562 u32 nvaccess = tr32(NVRAM_ACCESS);
4564 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
4568 /* tp->lock is held. */
4569 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
4571 tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
4572 NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
4574 if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
4576 case RESET_KIND_INIT:
4577 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4581 case RESET_KIND_SHUTDOWN:
4582 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4586 case RESET_KIND_SUSPEND:
4587 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4597 /* tp->lock is held. */
4598 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
4600 if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
4602 case RESET_KIND_INIT:
4603 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4604 DRV_STATE_START_DONE);
4607 case RESET_KIND_SHUTDOWN:
4608 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4609 DRV_STATE_UNLOAD_DONE);
4618 /* tp->lock is held. */
4619 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
4621 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
4623 case RESET_KIND_INIT:
4624 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4628 case RESET_KIND_SHUTDOWN:
4629 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4633 case RESET_KIND_SUSPEND:
4634 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4644 static void tg3_stop_fw(struct tg3 *);
4646 /* tp->lock is held. */
4647 static int tg3_chip_reset(struct tg3 *tp)
4650 void (*write_op)(struct tg3 *, u32, u32);
4655 /* No matching tg3_nvram_unlock() after this because
4656 * chip reset below will undo the nvram lock.
4658 tp->nvram_lock_cnt = 0;
4660 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
4661 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
4662 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
4663 tw32(GRC_FASTBOOT_PC, 0);
4666 * We must avoid the readl() that normally takes place.
4667 * It locks machines, causes machine checks, and other
4668 * fun things. So, temporarily disable the 5701
4669 * hardware workaround, while we do the reset.
4671 write_op = tp->write32;
4672 if (write_op == tg3_write_flush_reg32)
4673 tp->write32 = tg3_write32;
4676 val = GRC_MISC_CFG_CORECLK_RESET;
4678 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
4679 if (tr32(0x7e2c) == 0x60) {
4682 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
4683 tw32(GRC_MISC_CFG, (1 << 29));
4688 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
4689 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
4690 tw32(GRC_MISC_CFG, val);
4692 /* restore 5701 hardware bug workaround write method */
4693 tp->write32 = write_op;
4695 /* Unfortunately, we have to delay before the PCI read back.
4696 * Some 575X chips even will not respond to a PCI cfg access
4697 * when the reset command is given to the chip.
4699 * How do these hardware designers expect things to work
4700 * properly if the PCI write is posted for a long period
4701 * of time? It is always necessary to have some method by
4702 * which a register read back can occur to push the write
4703 * out which does the reset.
4705 * For most tg3 variants the trick below was working.
4710 /* Flush PCI posted writes. The normal MMIO registers
4711 * are inaccessible at this time so this is the only
4712 * way to make this reliably (actually, this is no longer
4713 * the case, see above). I tried to use indirect
4714 * register read/write but this upset some 5701 variants.
4716 pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
4720 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
4721 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) {
4725 /* Wait for link training to complete. */
4726 for (i = 0; i < 5000; i++)
4729 pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
4730 pci_write_config_dword(tp->pdev, 0xc4,
4731 cfg_val | (1 << 15));
4733 /* Set PCIE max payload size and clear error status. */
4734 pci_write_config_dword(tp->pdev, 0xd8, 0xf5000);
4737 /* Re-enable indirect register accesses. */
4738 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
4739 tp->misc_host_ctrl);
4741 /* Set MAX PCI retry to zero. */
4742 val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
4743 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
4744 (tp->tg3_flags & TG3_FLAG_PCIX_MODE))
4745 val |= PCISTATE_RETRY_SAME_DMA;
4746 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
4748 pci_restore_state(tp->pdev);
4750 /* Make sure PCI-X relaxed ordering bit is clear. */
4751 pci_read_config_dword(tp->pdev, TG3PCI_X_CAPS, &val);
4752 val &= ~PCIX_CAPS_RELAXED_ORDERING;
4753 pci_write_config_dword(tp->pdev, TG3PCI_X_CAPS, val);
4755 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
4758 /* Chip reset on 5780 will reset MSI enable bit,
4759 * so need to restore it.
4761 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
4764 pci_read_config_word(tp->pdev,
4765 tp->msi_cap + PCI_MSI_FLAGS,
4767 pci_write_config_word(tp->pdev,
4768 tp->msi_cap + PCI_MSI_FLAGS,
4769 ctrl | PCI_MSI_FLAGS_ENABLE);
4770 val = tr32(MSGINT_MODE);
4771 tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
4774 val = tr32(MEMARB_MODE);
4775 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
4778 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
4780 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A3) {
4782 tw32(0x5000, 0x400);
4785 tw32(GRC_MODE, tp->grc_mode);
4787 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0) {
4788 u32 val = tr32(0xc4);
4790 tw32(0xc4, val | (1 << 15));
4793 if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
4794 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
4795 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
4796 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0)
4797 tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
4798 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
4801 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
4802 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
4803 tw32_f(MAC_MODE, tp->mac_mode);
4804 } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
4805 tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
4806 tw32_f(MAC_MODE, tp->mac_mode);
4808 tw32_f(MAC_MODE, 0);
4811 /* Wait for firmware initialization to complete. */
4812 for (i = 0; i < 100000; i++) {
4813 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
4814 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
4819 /* Chip might not be fitted with firmare. Some Sun onboard
4820 * parts are configured like that. So don't signal the timeout
4821 * of the above loop as an error, but do report the lack of
4822 * running firmware once.
4825 !(tp->tg3_flags2 & TG3_FLG2_NO_FWARE_REPORTED)) {
4826 tp->tg3_flags2 |= TG3_FLG2_NO_FWARE_REPORTED;
4828 printk(KERN_INFO PFX "%s: No firmware running.\n",
4832 if ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
4833 tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
4834 u32 val = tr32(0x7c00);
4836 tw32(0x7c00, val | (1 << 25));
4839 /* Reprobe ASF enable state. */
4840 tp->tg3_flags &= ~TG3_FLAG_ENABLE_ASF;
4841 tp->tg3_flags2 &= ~TG3_FLG2_ASF_NEW_HANDSHAKE;
4842 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
4843 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
4846 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
4847 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
4848 tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
4849 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
4850 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
4857 /* tp->lock is held. */
4858 static void tg3_stop_fw(struct tg3 *tp)
4860 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
4864 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
4865 val = tr32(GRC_RX_CPU_EVENT);
4867 tw32(GRC_RX_CPU_EVENT, val);
4869 /* Wait for RX cpu to ACK the event. */
4870 for (i = 0; i < 100; i++) {
4871 if (!(tr32(GRC_RX_CPU_EVENT) & (1 << 14)))
4878 /* tp->lock is held. */
4879 static int tg3_halt(struct tg3 *tp, int kind, int silent)
4885 tg3_write_sig_pre_reset(tp, kind);
4887 tg3_abort_hw(tp, silent);
4888 err = tg3_chip_reset(tp);
4890 tg3_write_sig_legacy(tp, kind);
4891 tg3_write_sig_post_reset(tp, kind);
4899 #define TG3_FW_RELEASE_MAJOR 0x0
4900 #define TG3_FW_RELASE_MINOR 0x0
4901 #define TG3_FW_RELEASE_FIX 0x0
4902 #define TG3_FW_START_ADDR 0x08000000
4903 #define TG3_FW_TEXT_ADDR 0x08000000
4904 #define TG3_FW_TEXT_LEN 0x9c0
4905 #define TG3_FW_RODATA_ADDR 0x080009c0
4906 #define TG3_FW_RODATA_LEN 0x60
4907 #define TG3_FW_DATA_ADDR 0x08000a40
4908 #define TG3_FW_DATA_LEN 0x20
4909 #define TG3_FW_SBSS_ADDR 0x08000a60
4910 #define TG3_FW_SBSS_LEN 0xc
4911 #define TG3_FW_BSS_ADDR 0x08000a70
4912 #define TG3_FW_BSS_LEN 0x10
4914 static u32 tg3FwText[(TG3_FW_TEXT_LEN / sizeof(u32)) + 1] = {
4915 0x00000000, 0x10000003, 0x00000000, 0x0000000d, 0x0000000d, 0x3c1d0800,
4916 0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100000, 0x0e000018, 0x00000000,
4917 0x0000000d, 0x3c1d0800, 0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100034,
4918 0x0e00021c, 0x00000000, 0x0000000d, 0x00000000, 0x00000000, 0x00000000,
4919 0x27bdffe0, 0x3c1cc000, 0xafbf0018, 0xaf80680c, 0x0e00004c, 0x241b2105,
4920 0x97850000, 0x97870002, 0x9782002c, 0x9783002e, 0x3c040800, 0x248409c0,
4921 0xafa00014, 0x00021400, 0x00621825, 0x00052c00, 0xafa30010, 0x8f860010,
4922 0x00e52825, 0x0e000060, 0x24070102, 0x3c02ac00, 0x34420100, 0x3c03ac01,
4923 0x34630100, 0xaf820490, 0x3c02ffff, 0xaf820494, 0xaf830498, 0xaf82049c,
4924 0x24020001, 0xaf825ce0, 0x0e00003f, 0xaf825d00, 0x0e000140, 0x00000000,
4925 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x2402ffff, 0xaf825404, 0x8f835400,
4926 0x34630400, 0xaf835400, 0xaf825404, 0x3c020800, 0x24420034, 0xaf82541c,
4927 0x03e00008, 0xaf805400, 0x00000000, 0x00000000, 0x3c020800, 0x34423000,
4928 0x3c030800, 0x34633000, 0x3c040800, 0x348437ff, 0x3c010800, 0xac220a64,
4929 0x24020040, 0x3c010800, 0xac220a68, 0x3c010800, 0xac200a60, 0xac600000,
4930 0x24630004, 0x0083102b, 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000,
4931 0x00804821, 0x8faa0010, 0x3c020800, 0x8c420a60, 0x3c040800, 0x8c840a68,
4932 0x8fab0014, 0x24430001, 0x0044102b, 0x3c010800, 0xac230a60, 0x14400003,
4933 0x00004021, 0x3c010800, 0xac200a60, 0x3c020800, 0x8c420a60, 0x3c030800,
4934 0x8c630a64, 0x91240000, 0x00021140, 0x00431021, 0x00481021, 0x25080001,
4935 0xa0440000, 0x29020008, 0x1440fff4, 0x25290001, 0x3c020800, 0x8c420a60,
4936 0x3c030800, 0x8c630a64, 0x8f84680c, 0x00021140, 0x00431021, 0xac440008,
4937 0xac45000c, 0xac460010, 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c,
4938 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4939 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4941 0x02000008, 0x00000000, 0x0a0001e3, 0x3c0a0001, 0x0a0001e3, 0x3c0a0002,
4942 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4943 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4944 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4945 0x0a0001e3, 0x3c0a0007, 0x0a0001e3, 0x3c0a0008, 0x0a0001e3, 0x3c0a0009,
4946 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a000b,
4947 0x0a0001e3, 0x3c0a000c, 0x0a0001e3, 0x3c0a000d, 0x0a0001e3, 0x00000000,
4948 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a000e, 0x0a0001e3, 0x00000000,
4949 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4950 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4951 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a0013, 0x0a0001e3, 0x3c0a0014,
4952 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4953 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4954 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4955 0x27bdffe0, 0x00001821, 0x00001021, 0xafbf0018, 0xafb10014, 0xafb00010,
4956 0x3c010800, 0x00220821, 0xac200a70, 0x3c010800, 0x00220821, 0xac200a74,
4957 0x3c010800, 0x00220821, 0xac200a78, 0x24630001, 0x1860fff5, 0x2442000c,
4958 0x24110001, 0x8f906810, 0x32020004, 0x14400005, 0x24040001, 0x3c020800,
4959 0x8c420a78, 0x18400003, 0x00002021, 0x0e000182, 0x00000000, 0x32020001,
4960 0x10400003, 0x00000000, 0x0e000169, 0x00000000, 0x0a000153, 0xaf915028,
4961 0x8fbf0018, 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0020, 0x3c050800,
4962 0x8ca50a70, 0x3c060800, 0x8cc60a80, 0x3c070800, 0x8ce70a78, 0x27bdffe0,
4963 0x3c040800, 0x248409d0, 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014,
4964 0x0e00017b, 0x00002021, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x24020001,
4965 0x8f836810, 0x00821004, 0x00021027, 0x00621824, 0x03e00008, 0xaf836810,
4966 0x27bdffd8, 0xafbf0024, 0x1080002e, 0xafb00020, 0x8f825cec, 0xafa20018,
4967 0x8f825cec, 0x3c100800, 0x26100a78, 0xafa2001c, 0x34028000, 0xaf825cec,
4968 0x8e020000, 0x18400016, 0x00000000, 0x3c020800, 0x94420a74, 0x8fa3001c,
4969 0x000221c0, 0xac830004, 0x8fa2001c, 0x3c010800, 0x0e000201, 0xac220a74,
4970 0x10400005, 0x00000000, 0x8e020000, 0x24420001, 0x0a0001df, 0xae020000,
4971 0x3c020800, 0x8c420a70, 0x00021c02, 0x000321c0, 0x0a0001c5, 0xafa2001c,
4972 0x0e000201, 0x00000000, 0x1040001f, 0x00000000, 0x8e020000, 0x8fa3001c,
4973 0x24420001, 0x3c010800, 0xac230a70, 0x3c010800, 0xac230a74, 0x0a0001df,
4974 0xae020000, 0x3c100800, 0x26100a78, 0x8e020000, 0x18400028, 0x00000000,
4975 0x0e000201, 0x00000000, 0x14400024, 0x00000000, 0x8e020000, 0x3c030800,
4976 0x8c630a70, 0x2442ffff, 0xafa3001c, 0x18400006, 0xae020000, 0x00031402,
4977 0x000221c0, 0x8c820004, 0x3c010800, 0xac220a70, 0x97a2001e, 0x2442ff00,
4978 0x2c420300, 0x1440000b, 0x24024000, 0x3c040800, 0x248409dc, 0xafa00010,
4979 0xafa00014, 0x8fa6001c, 0x24050008, 0x0e000060, 0x00003821, 0x0a0001df,
4980 0x00000000, 0xaf825cf8, 0x3c020800, 0x8c420a40, 0x8fa3001c, 0x24420001,
4981 0xaf835cf8, 0x3c010800, 0xac220a40, 0x8fbf0024, 0x8fb00020, 0x03e00008,
4982 0x27bd0028, 0x27bdffe0, 0x3c040800, 0x248409e8, 0x00002821, 0x00003021,
4983 0x00003821, 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014, 0x8fbf0018,
4984 0x03e00008, 0x27bd0020, 0x8f82680c, 0x8f85680c, 0x00021827, 0x0003182b,
4985 0x00031823, 0x00431024, 0x00441021, 0x00a2282b, 0x10a00006, 0x00000000,
4986 0x00401821, 0x8f82680c, 0x0043102b, 0x1440fffd, 0x00000000, 0x03e00008,
4987 0x00000000, 0x3c040800, 0x8c840000, 0x3c030800, 0x8c630a40, 0x0064102b,
4988 0x54400002, 0x00831023, 0x00641023, 0x2c420008, 0x03e00008, 0x38420001,
4989 0x27bdffe0, 0x00802821, 0x3c040800, 0x24840a00, 0x00003021, 0x00003821,
4990 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014, 0x0a000216, 0x00000000,
4991 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x00000000, 0x27bdffe0, 0x3c1cc000,
4992 0xafbf0018, 0x0e00004c, 0xaf80680c, 0x3c040800, 0x24840a10, 0x03802821,
4993 0x00003021, 0x00003821, 0xafa00010, 0x0e000060, 0xafa00014, 0x2402ffff,
4994 0xaf825404, 0x3c0200aa, 0x0e000234, 0xaf825434, 0x8fbf0018, 0x03e00008,
4995 0x27bd0020, 0x00000000, 0x00000000, 0x00000000, 0x27bdffe8, 0xafb00010,
4996 0x24100001, 0xafbf0014, 0x3c01c003, 0xac200000, 0x8f826810, 0x30422000,
4997 0x10400003, 0x00000000, 0x0e000246, 0x00000000, 0x0a00023a, 0xaf905428,
4998 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x27bdfff8, 0x8f845d0c,
4999 0x3c0200ff, 0x3c030800, 0x8c630a50, 0x3442fff8, 0x00821024, 0x1043001e,
5000 0x3c0500ff, 0x34a5fff8, 0x3c06c003, 0x3c074000, 0x00851824, 0x8c620010,
5001 0x3c010800, 0xac230a50, 0x30420008, 0x10400005, 0x00871025, 0x8cc20000,
5002 0x24420001, 0xacc20000, 0x00871025, 0xaf825d0c, 0x8fa20000, 0x24420001,
5003 0xafa20000, 0x8fa20000, 0x8fa20000, 0x24420001, 0xafa20000, 0x8fa20000,
5004 0x8f845d0c, 0x3c030800, 0x8c630a50, 0x00851024, 0x1443ffe8, 0x00851824,
5005 0x27bd0008, 0x03e00008, 0x00000000, 0x00000000, 0x00000000
5008 static u32 tg3FwRodata[(TG3_FW_RODATA_LEN / sizeof(u32)) + 1] = {
5009 0x35373031, 0x726c7341, 0x00000000, 0x00000000, 0x53774576, 0x656e7430,
5010 0x00000000, 0x726c7045, 0x76656e74, 0x31000000, 0x556e6b6e, 0x45766e74,
5011 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x66617461, 0x6c457272,
5012 0x00000000, 0x00000000, 0x4d61696e, 0x43707542, 0x00000000, 0x00000000,
5016 #if 0 /* All zeros, don't eat up space with it. */
5017 u32 tg3FwData[(TG3_FW_DATA_LEN / sizeof(u32)) + 1] = {
5018 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
5019 0x00000000, 0x00000000, 0x00000000, 0x00000000
5023 #define RX_CPU_SCRATCH_BASE 0x30000
5024 #define RX_CPU_SCRATCH_SIZE 0x04000
5025 #define TX_CPU_SCRATCH_BASE 0x34000
5026 #define TX_CPU_SCRATCH_SIZE 0x04000
5028 /* tp->lock is held. */
5029 static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
5033 BUG_ON(offset == TX_CPU_BASE &&
5034 (tp->tg3_flags2 & TG3_FLG2_5705_PLUS));
5036 if (offset == RX_CPU_BASE) {
5037 for (i = 0; i < 10000; i++) {
5038 tw32(offset + CPU_STATE, 0xffffffff);
5039 tw32(offset + CPU_MODE, CPU_MODE_HALT);
5040 if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
5044 tw32(offset + CPU_STATE, 0xffffffff);
5045 tw32_f(offset + CPU_MODE, CPU_MODE_HALT);
5048 for (i = 0; i < 10000; i++) {
5049 tw32(offset + CPU_STATE, 0xffffffff);
5050 tw32(offset + CPU_MODE, CPU_MODE_HALT);
5051 if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
5057 printk(KERN_ERR PFX "tg3_reset_cpu timed out for %s, "
5060 (offset == RX_CPU_BASE ? "RX" : "TX"));
5064 /* Clear firmware's nvram arbitration. */
5065 if (tp->tg3_flags & TG3_FLAG_NVRAM)
5066 tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
5071 unsigned int text_base;
5072 unsigned int text_len;
5074 unsigned int rodata_base;
5075 unsigned int rodata_len;
5077 unsigned int data_base;
5078 unsigned int data_len;
5082 /* tp->lock is held. */
5083 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base, u32 cpu_scratch_base,
5084 int cpu_scratch_size, struct fw_info *info)
5086 int err, lock_err, i;
5087 void (*write_op)(struct tg3 *, u32, u32);
5089 if (cpu_base == TX_CPU_BASE &&
5090 (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5091 printk(KERN_ERR PFX "tg3_load_firmware_cpu: Trying to load "
5092 "TX cpu firmware on %s which is 5705.\n",
5097 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
5098 write_op = tg3_write_mem;
5100 write_op = tg3_write_indirect_reg32;
5102 /* It is possible that bootcode is still loading at this point.
5103 * Get the nvram lock first before halting the cpu.
5105 lock_err = tg3_nvram_lock(tp);
5106 err = tg3_halt_cpu(tp, cpu_base);
5108 tg3_nvram_unlock(tp);
5112 for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
5113 write_op(tp, cpu_scratch_base + i, 0);
5114 tw32(cpu_base + CPU_STATE, 0xffffffff);
5115 tw32(cpu_base + CPU_MODE, tr32(cpu_base+CPU_MODE)|CPU_MODE_HALT);
5116 for (i = 0; i < (info->text_len / sizeof(u32)); i++)
5117 write_op(tp, (cpu_scratch_base +
5118 (info->text_base & 0xffff) +
5121 info->text_data[i] : 0));
5122 for (i = 0; i < (info->rodata_len / sizeof(u32)); i++)
5123 write_op(tp, (cpu_scratch_base +
5124 (info->rodata_base & 0xffff) +
5126 (info->rodata_data ?
5127 info->rodata_data[i] : 0));
5128 for (i = 0; i < (info->data_len / sizeof(u32)); i++)
5129 write_op(tp, (cpu_scratch_base +
5130 (info->data_base & 0xffff) +
5133 info->data_data[i] : 0));
5141 /* tp->lock is held. */
5142 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
5144 struct fw_info info;
5147 info.text_base = TG3_FW_TEXT_ADDR;
5148 info.text_len = TG3_FW_TEXT_LEN;
5149 info.text_data = &tg3FwText[0];
5150 info.rodata_base = TG3_FW_RODATA_ADDR;
5151 info.rodata_len = TG3_FW_RODATA_LEN;
5152 info.rodata_data = &tg3FwRodata[0];
5153 info.data_base = TG3_FW_DATA_ADDR;
5154 info.data_len = TG3_FW_DATA_LEN;
5155 info.data_data = NULL;
5157 err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
5158 RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
5163 err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
5164 TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
5169 /* Now startup only the RX cpu. */
5170 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
5171 tw32_f(RX_CPU_BASE + CPU_PC, TG3_FW_TEXT_ADDR);
5173 for (i = 0; i < 5; i++) {
5174 if (tr32(RX_CPU_BASE + CPU_PC) == TG3_FW_TEXT_ADDR)
5176 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
5177 tw32(RX_CPU_BASE + CPU_MODE, CPU_MODE_HALT);
5178 tw32_f(RX_CPU_BASE + CPU_PC, TG3_FW_TEXT_ADDR);
5182 printk(KERN_ERR PFX "tg3_load_firmware fails for %s "
5183 "to set RX CPU PC, is %08x should be %08x\n",
5184 tp->dev->name, tr32(RX_CPU_BASE + CPU_PC),
5188 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
5189 tw32_f(RX_CPU_BASE + CPU_MODE, 0x00000000);
5194 #if TG3_TSO_SUPPORT != 0
5196 #define TG3_TSO_FW_RELEASE_MAJOR 0x1
5197 #define TG3_TSO_FW_RELASE_MINOR 0x6
5198 #define TG3_TSO_FW_RELEASE_FIX 0x0
5199 #define TG3_TSO_FW_START_ADDR 0x08000000
5200 #define TG3_TSO_FW_TEXT_ADDR 0x08000000
5201 #define TG3_TSO_FW_TEXT_LEN 0x1aa0
5202 #define TG3_TSO_FW_RODATA_ADDR 0x08001aa0
5203 #define TG3_TSO_FW_RODATA_LEN 0x60
5204 #define TG3_TSO_FW_DATA_ADDR 0x08001b20
5205 #define TG3_TSO_FW_DATA_LEN 0x30
5206 #define TG3_TSO_FW_SBSS_ADDR 0x08001b50
5207 #define TG3_TSO_FW_SBSS_LEN 0x2c
5208 #define TG3_TSO_FW_BSS_ADDR 0x08001b80
5209 #define TG3_TSO_FW_BSS_LEN 0x894
5211 static u32 tg3TsoFwText[(TG3_TSO_FW_TEXT_LEN / 4) + 1] = {
5212 0x0e000003, 0x00000000, 0x08001b24, 0x00000000, 0x10000003, 0x00000000,
5213 0x0000000d, 0x0000000d, 0x3c1d0800, 0x37bd4000, 0x03a0f021, 0x3c100800,
5214 0x26100000, 0x0e000010, 0x00000000, 0x0000000d, 0x27bdffe0, 0x3c04fefe,
5215 0xafbf0018, 0x0e0005d8, 0x34840002, 0x0e000668, 0x00000000, 0x3c030800,
5216 0x90631b68, 0x24020002, 0x3c040800, 0x24841aac, 0x14620003, 0x24050001,
5217 0x3c040800, 0x24841aa0, 0x24060006, 0x00003821, 0xafa00010, 0x0e00067c,
5218 0xafa00014, 0x8f625c50, 0x34420001, 0xaf625c50, 0x8f625c90, 0x34420001,
5219 0xaf625c90, 0x2402ffff, 0x0e000034, 0xaf625404, 0x8fbf0018, 0x03e00008,
5220 0x27bd0020, 0x00000000, 0x00000000, 0x00000000, 0x27bdffe0, 0xafbf001c,
5221 0xafb20018, 0xafb10014, 0x0e00005b, 0xafb00010, 0x24120002, 0x24110001,
5222 0x8f706820, 0x32020100, 0x10400003, 0x00000000, 0x0e0000bb, 0x00000000,
5223 0x8f706820, 0x32022000, 0x10400004, 0x32020001, 0x0e0001f0, 0x24040001,
5224 0x32020001, 0x10400003, 0x00000000, 0x0e0000a3, 0x00000000, 0x3c020800,
5225 0x90421b98, 0x14520003, 0x00000000, 0x0e0004c0, 0x00000000, 0x0a00003c,
5226 0xaf715028, 0x8fbf001c, 0x8fb20018, 0x8fb10014, 0x8fb00010, 0x03e00008,
5227 0x27bd0020, 0x27bdffe0, 0x3c040800, 0x24841ac0, 0x00002821, 0x00003021,
5228 0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014, 0x3c040800,
5229 0x248423d8, 0xa4800000, 0x3c010800, 0xa0201b98, 0x3c010800, 0xac201b9c,
5230 0x3c010800, 0xac201ba0, 0x3c010800, 0xac201ba4, 0x3c010800, 0xac201bac,
5231 0x3c010800, 0xac201bb8, 0x3c010800, 0xac201bbc, 0x8f624434, 0x3c010800,
5232 0xac221b88, 0x8f624438, 0x3c010800, 0xac221b8c, 0x8f624410, 0xac80f7a8,
5233 0x3c010800, 0xac201b84, 0x3c010800, 0xac2023e0, 0x3c010800, 0xac2023c8,
5234 0x3c010800, 0xac2023cc, 0x3c010800, 0xac202400, 0x3c010800, 0xac221b90,
5235 0x8f620068, 0x24030007, 0x00021702, 0x10430005, 0x00000000, 0x8f620068,
5236 0x00021702, 0x14400004, 0x24020001, 0x3c010800, 0x0a000097, 0xac20240c,
5237 0xac820034, 0x3c040800, 0x24841acc, 0x3c050800, 0x8ca5240c, 0x00003021,
5238 0x00003821, 0xafa00010, 0x0e00067c, 0xafa00014, 0x8fbf0018, 0x03e00008,
5239 0x27bd0020, 0x27bdffe0, 0x3c040800, 0x24841ad8, 0x00002821, 0x00003021,
5240 0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014, 0x0e00005b,
5241 0x00000000, 0x0e0000b4, 0x00002021, 0x8fbf0018, 0x03e00008, 0x27bd0020,
5242 0x24020001, 0x8f636820, 0x00821004, 0x00021027, 0x00621824, 0x03e00008,
5243 0xaf636820, 0x27bdffd0, 0xafbf002c, 0xafb60028, 0xafb50024, 0xafb40020,
5244 0xafb3001c, 0xafb20018, 0xafb10014, 0xafb00010, 0x8f675c5c, 0x3c030800,
5245 0x24631bbc, 0x8c620000, 0x14470005, 0x3c0200ff, 0x3c020800, 0x90421b98,
5246 0x14400119, 0x3c0200ff, 0x3442fff8, 0x00e28824, 0xac670000, 0x00111902,
5247 0x306300ff, 0x30e20003, 0x000211c0, 0x00622825, 0x00a04021, 0x00071602,
5248 0x3c030800, 0x90631b98, 0x3044000f, 0x14600036, 0x00804821, 0x24020001,
5249 0x3c010800, 0xa0221b98, 0x00051100, 0x00821025, 0x3c010800, 0xac201b9c,
5250 0x3c010800, 0xac201ba0, 0x3c010800, 0xac201ba4, 0x3c010800, 0xac201bac,
5251 0x3c010800, 0xac201bb8, 0x3c010800, 0xac201bb0, 0x3c010800, 0xac201bb4,
5252 0x3c010800, 0xa42223d8, 0x9622000c, 0x30437fff, 0x3c010800, 0xa4222410,
5253 0x30428000, 0x3c010800, 0xa4231bc6, 0x10400005, 0x24020001, 0x3c010800,
5254 0xac2223f4, 0x0a000102, 0x2406003e, 0x24060036, 0x3c010800, 0xac2023f4,
5255 0x9622000a, 0x3c030800, 0x94631bc6, 0x3c010800, 0xac2023f0, 0x3c010800,
5256 0xac2023f8, 0x00021302, 0x00021080, 0x00c21021, 0x00621821, 0x3c010800,
5257 0xa42223d0, 0x3c010800, 0x0a000115, 0xa4231b96, 0x9622000c, 0x3c010800,
5258 0xa42223ec, 0x3c040800, 0x24841b9c, 0x8c820000, 0x00021100, 0x3c010800,
5259 0x00220821, 0xac311bc8, 0x8c820000, 0x00021100, 0x3c010800, 0x00220821,
5260 0xac271bcc, 0x8c820000, 0x25030001, 0x306601ff, 0x00021100, 0x3c010800,
5261 0x00220821, 0xac261bd0, 0x8c820000, 0x00021100, 0x3c010800, 0x00220821,
5262 0xac291bd4, 0x96230008, 0x3c020800, 0x8c421bac, 0x00432821, 0x3c010800,
5263 0xac251bac, 0x9622000a, 0x30420004, 0x14400018, 0x00061100, 0x8f630c14,
5264 0x3063000f, 0x2c620002, 0x1440000b, 0x3c02c000, 0x8f630c14, 0x3c020800,
5265 0x8c421b40, 0x3063000f, 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002,
5266 0x1040fff7, 0x3c02c000, 0x00e21825, 0xaf635c5c, 0x8f625c50, 0x30420002,
5267 0x10400014, 0x00000000, 0x0a000147, 0x00000000, 0x3c030800, 0x8c631b80,
5268 0x3c040800, 0x94841b94, 0x01221025, 0x3c010800, 0xa42223da, 0x24020001,
5269 0x3c010800, 0xac221bb8, 0x24630001, 0x0085202a, 0x3c010800, 0x10800003,
5270 0xac231b80, 0x3c010800, 0xa4251b94, 0x3c060800, 0x24c61b9c, 0x8cc20000,
5271 0x24420001, 0xacc20000, 0x28420080, 0x14400005, 0x00000000, 0x0e000656,
5272 0x24040002, 0x0a0001e6, 0x00000000, 0x3c020800, 0x8c421bb8, 0x10400078,
5273 0x24020001, 0x3c050800, 0x90a51b98, 0x14a20072, 0x00000000, 0x3c150800,
5274 0x96b51b96, 0x3c040800, 0x8c841bac, 0x32a3ffff, 0x0083102a, 0x1440006c,
5275 0x00000000, 0x14830003, 0x00000000, 0x3c010800, 0xac2523f0, 0x1060005c,
5276 0x00009021, 0x24d60004, 0x0060a021, 0x24d30014, 0x8ec20000, 0x00028100,
5277 0x3c110800, 0x02308821, 0x0e000625, 0x8e311bc8, 0x00402821, 0x10a00054,
5278 0x00000000, 0x9628000a, 0x31020040, 0x10400005, 0x2407180c, 0x8e22000c,
5279 0x2407188c, 0x00021400, 0xaca20018, 0x3c030800, 0x00701821, 0x8c631bd0,
5280 0x3c020800, 0x00501021, 0x8c421bd4, 0x00031d00, 0x00021400, 0x00621825,
5281 0xaca30014, 0x8ec30004, 0x96220008, 0x00432023, 0x3242ffff, 0x3083ffff,
5282 0x00431021, 0x0282102a, 0x14400002, 0x02b23023, 0x00803021, 0x8e620000,
5283 0x30c4ffff, 0x00441021, 0xae620000, 0x8e220000, 0xaca20000, 0x8e220004,
5284 0x8e63fff4, 0x00431021, 0xaca20004, 0xa4a6000e, 0x8e62fff4, 0x00441021,
5285 0xae62fff4, 0x96230008, 0x0043102a, 0x14400005, 0x02469021, 0x8e62fff0,
5286 0xae60fff4, 0x24420001, 0xae62fff0, 0xaca00008, 0x3242ffff, 0x14540008,
5287 0x24020305, 0x31020080, 0x54400001, 0x34e70010, 0x24020905, 0xa4a2000c,
5288 0x0a0001cb, 0x34e70020, 0xa4a2000c, 0x3c020800, 0x8c4223f0, 0x10400003,
5289 0x3c024b65, 0x0a0001d3, 0x34427654, 0x3c02b49a, 0x344289ab, 0xaca2001c,
5290 0x30e2ffff, 0xaca20010, 0x0e0005a2, 0x00a02021, 0x3242ffff, 0x0054102b,
5291 0x1440ffa9, 0x00000000, 0x24020002, 0x3c010800, 0x0a0001e6, 0xa0221b98,
5292 0x8ec2083c, 0x24420001, 0x0a0001e6, 0xaec2083c, 0x0e0004c0, 0x00000000,
5293 0x8fbf002c, 0x8fb60028, 0x8fb50024, 0x8fb40020, 0x8fb3001c, 0x8fb20018,
5294 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0030, 0x27bdffd0, 0xafbf0028,
5295 0xafb30024, 0xafb20020, 0xafb1001c, 0xafb00018, 0x8f725c9c, 0x3c0200ff,
5296 0x3442fff8, 0x3c070800, 0x24e71bb4, 0x02428824, 0x9623000e, 0x8ce20000,
5297 0x00431021, 0xace20000, 0x8e220010, 0x30420020, 0x14400011, 0x00809821,
5298 0x0e00063b, 0x02202021, 0x3c02c000, 0x02421825, 0xaf635c9c, 0x8f625c90,
5299 0x30420002, 0x1040011e, 0x00000000, 0xaf635c9c, 0x8f625c90, 0x30420002,
5300 0x10400119, 0x00000000, 0x0a00020d, 0x00000000, 0x8e240008, 0x8e230014,
5301 0x00041402, 0x000231c0, 0x00031502, 0x304201ff, 0x2442ffff, 0x3042007f,
5302 0x00031942, 0x30637800, 0x00021100, 0x24424000, 0x00624821, 0x9522000a,
5303 0x3084ffff, 0x30420008, 0x104000b0, 0x000429c0, 0x3c020800, 0x8c422400,
5304 0x14400024, 0x24c50008, 0x94c20014, 0x3c010800, 0xa42223d0, 0x8cc40010,
5305 0x00041402, 0x3c010800, 0xa42223d2, 0x3c010800, 0xa42423d4, 0x94c2000e,
5306 0x3083ffff, 0x00431023, 0x3c010800, 0xac222408, 0x94c2001a, 0x3c010800,
5307 0xac262400, 0x3c010800, 0xac322404, 0x3c010800, 0xac2223fc, 0x3c02c000,
5308 0x02421825, 0xaf635c9c, 0x8f625c90, 0x30420002, 0x104000e5, 0x00000000,
5309 0xaf635c9c, 0x8f625c90, 0x30420002, 0x104000e0, 0x00000000, 0x0a000246,
5310 0x00000000, 0x94c2000e, 0x3c030800, 0x946323d4, 0x00434023, 0x3103ffff,
5311 0x2c620008, 0x1040001c, 0x00000000, 0x94c20014, 0x24420028, 0x00a22821,
5312 0x00031042, 0x1840000b, 0x00002021, 0x24e60848, 0x00403821, 0x94a30000,
5313 0x8cc20000, 0x24840001, 0x00431021, 0xacc20000, 0x0087102a, 0x1440fff9,
5314 0x24a50002, 0x31020001, 0x1040001f, 0x3c024000, 0x3c040800, 0x248423fc,
5315 0xa0a00001, 0x94a30000, 0x8c820000, 0x00431021, 0x0a000285, 0xac820000,
5316 0x8f626800, 0x3c030010, 0x00431024, 0x10400009, 0x00000000, 0x94c2001a,
5317 0x3c030800, 0x8c6323fc, 0x00431021, 0x3c010800, 0xac2223fc, 0x0a000286,
5318 0x3c024000, 0x94c2001a, 0x94c4001c, 0x3c030800, 0x8c6323fc, 0x00441023,
5319 0x00621821, 0x3c010800, 0xac2323fc, 0x3c024000, 0x02421825, 0xaf635c9c,
5320 0x8f625c90, 0x30420002, 0x1440fffc, 0x00000000, 0x9522000a, 0x30420010,
5321 0x1040009b, 0x00000000, 0x3c030800, 0x946323d4, 0x3c070800, 0x24e72400,
5322 0x8ce40000, 0x8f626800, 0x24630030, 0x00832821, 0x3c030010, 0x00431024,
5323 0x1440000a, 0x00000000, 0x94a20004, 0x3c040800, 0x8c842408, 0x3c030800,
5324 0x8c6323fc, 0x00441023, 0x00621821, 0x3c010800, 0xac2323fc, 0x3c040800,
5325 0x8c8423fc, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402, 0x00822021,
5326 0x00041027, 0xa4a20006, 0x3c030800, 0x8c632404, 0x3c0200ff, 0x3442fff8,
5327 0x00628824, 0x96220008, 0x24050001, 0x24034000, 0x000231c0, 0x00801021,
5328 0xa4c2001a, 0xa4c0001c, 0xace00000, 0x3c010800, 0xac251b60, 0xaf635cb8,
5329 0x8f625cb0, 0x30420002, 0x10400003, 0x00000000, 0x3c010800, 0xac201b60,
5330 0x8e220008, 0xaf625cb8, 0x8f625cb0, 0x30420002, 0x10400003, 0x00000000,
5331 0x3c010800, 0xac201b60, 0x3c020800, 0x8c421b60, 0x1040ffec, 0x00000000,
5332 0x3c040800, 0x0e00063b, 0x8c842404, 0x0a00032a, 0x00000000, 0x3c030800,
5333 0x90631b98, 0x24020002, 0x14620003, 0x3c034b65, 0x0a0002e1, 0x00008021,
5334 0x8e22001c, 0x34637654, 0x10430002, 0x24100002, 0x24100001, 0x00c02021,
5335 0x0e000350, 0x02003021, 0x24020003, 0x3c010800, 0xa0221b98, 0x24020002,
5336 0x1202000a, 0x24020001, 0x3c030800, 0x8c6323f0, 0x10620006, 0x00000000,
5337 0x3c020800, 0x944223d8, 0x00021400, 0x0a00031f, 0xae220014, 0x3c040800,
5338 0x248423da, 0x94820000, 0x00021400, 0xae220014, 0x3c020800, 0x8c421bbc,
5339 0x3c03c000, 0x3c010800, 0xa0201b98, 0x00431025, 0xaf625c5c, 0x8f625c50,
5340 0x30420002, 0x10400009, 0x00000000, 0x2484f7e2, 0x8c820000, 0x00431025,
5341 0xaf625c5c, 0x8f625c50, 0x30420002, 0x1440fffa, 0x00000000, 0x3c020800,
5342 0x24421b84, 0x8c430000, 0x24630001, 0xac430000, 0x8f630c14, 0x3063000f,
5343 0x2c620002, 0x1440000c, 0x3c024000, 0x8f630c14, 0x3c020800, 0x8c421b40,
5344 0x3063000f, 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002, 0x1040fff7,
5345 0x00000000, 0x3c024000, 0x02421825, 0xaf635c9c, 0x8f625c90, 0x30420002,
5346 0x1440fffc, 0x00000000, 0x12600003, 0x00000000, 0x0e0004c0, 0x00000000,
5347 0x8fbf0028, 0x8fb30024, 0x8fb20020, 0x8fb1001c, 0x8fb00018, 0x03e00008,
5348 0x27bd0030, 0x8f634450, 0x3c040800, 0x24841b88, 0x8c820000, 0x00031c02,
5349 0x0043102b, 0x14400007, 0x3c038000, 0x8c840004, 0x8f624450, 0x00021c02,
5350 0x0083102b, 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024,
5351 0x1440fffd, 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3c024000,
5352 0x00822025, 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00000000,
5353 0x03e00008, 0x00000000, 0x27bdffe0, 0x00805821, 0x14c00011, 0x256e0008,
5354 0x3c020800, 0x8c4223f4, 0x10400007, 0x24020016, 0x3c010800, 0xa42223d2,
5355 0x2402002a, 0x3c010800, 0x0a000364, 0xa42223d4, 0x8d670010, 0x00071402,
5356 0x3c010800, 0xa42223d2, 0x3c010800, 0xa42723d4, 0x3c040800, 0x948423d4,
5357 0x3c030800, 0x946323d2, 0x95cf0006, 0x3c020800, 0x944223d0, 0x00832023,
5358 0x01e2c023, 0x3065ffff, 0x24a20028, 0x01c24821, 0x3082ffff, 0x14c0001a,
5359 0x01226021, 0x9582000c, 0x3042003f, 0x3c010800, 0xa42223d6, 0x95820004,
5360 0x95830006, 0x3c010800, 0xac2023e4, 0x3c010800, 0xac2023e8, 0x00021400,
5361 0x00431025, 0x3c010800, 0xac221bc0, 0x95220004, 0x3c010800, 0xa4221bc4,
5362 0x95230002, 0x01e51023, 0x0043102a, 0x10400010, 0x24020001, 0x3c010800,
5363 0x0a000398, 0xac2223f8, 0x3c030800, 0x8c6323e8, 0x3c020800, 0x94421bc4,
5364 0x00431021, 0xa5220004, 0x3c020800, 0x94421bc0, 0xa5820004, 0x3c020800,
5365 0x8c421bc0, 0xa5820006, 0x3c020800, 0x8c4223f0, 0x3c0d0800, 0x8dad23e4,
5366 0x3c0a0800, 0x144000e5, 0x8d4a23e8, 0x3c020800, 0x94421bc4, 0x004a1821,
5367 0x3063ffff, 0x0062182b, 0x24020002, 0x10c2000d, 0x01435023, 0x3c020800,
5368 0x944223d6, 0x30420009, 0x10400008, 0x00000000, 0x9582000c, 0x3042fff6,
5369 0xa582000c, 0x3c020800, 0x944223d6, 0x30420009, 0x01a26823, 0x3c020800,
5370 0x8c4223f8, 0x1040004a, 0x01203821, 0x3c020800, 0x944223d2, 0x00004021,
5371 0xa520000a, 0x01e21023, 0xa5220002, 0x3082ffff, 0x00021042, 0x18400008,
5372 0x00003021, 0x00401821, 0x94e20000, 0x25080001, 0x00c23021, 0x0103102a,
5373 0x1440fffb, 0x24e70002, 0x00061c02, 0x30c2ffff, 0x00623021, 0x00061402,
5374 0x00c23021, 0x00c02821, 0x00061027, 0xa522000a, 0x00003021, 0x2527000c,
5375 0x00004021, 0x94e20000, 0x25080001, 0x00c23021, 0x2d020004, 0x1440fffb,
5376 0x24e70002, 0x95220002, 0x00004021, 0x91230009, 0x00442023, 0x01803821,
5377 0x3082ffff, 0xa4e00010, 0x00621821, 0x00021042, 0x18400010, 0x00c33021,
5378 0x00404821, 0x94e20000, 0x24e70002, 0x00c23021, 0x30e2007f, 0x14400006,
5379 0x25080001, 0x8d630000, 0x3c02007f, 0x3442ff80, 0x00625824, 0x25670008,
5380 0x0109102a, 0x1440fff3, 0x00000000, 0x30820001, 0x10400005, 0x00061c02,
5381 0xa0e00001, 0x94e20000, 0x00c23021, 0x00061c02, 0x30c2ffff, 0x00623021,
5382 0x00061402, 0x00c23021, 0x0a00047d, 0x30c6ffff, 0x24020002, 0x14c20081,
5383 0x00000000, 0x3c020800, 0x8c42240c, 0x14400007, 0x00000000, 0x3c020800,
5384 0x944223d2, 0x95230002, 0x01e21023, 0x10620077, 0x00000000, 0x3c020800,
5385 0x944223d2, 0x01e21023, 0xa5220002, 0x3c020800, 0x8c42240c, 0x1040001a,
5386 0x31e3ffff, 0x8dc70010, 0x3c020800, 0x94421b96, 0x00e04021, 0x00072c02,
5387 0x00aa2021, 0x00431023, 0x00823823, 0x00072402, 0x30e2ffff, 0x00823821,
5388 0x00071027, 0xa522000a, 0x3102ffff, 0x3c040800, 0x948423d4, 0x00453023,
5389 0x00e02821, 0x00641823, 0x006d1821, 0x00c33021, 0x00061c02, 0x30c2ffff,
5390 0x0a00047d, 0x00623021, 0x01203821, 0x00004021, 0x3082ffff, 0x00021042,
5391 0x18400008, 0x00003021, 0x00401821, 0x94e20000, 0x25080001, 0x00c23021,
5392 0x0103102a, 0x1440fffb, 0x24e70002, 0x00061c02, 0x30c2ffff, 0x00623021,
5393 0x00061402, 0x00c23021, 0x00c02821, 0x00061027, 0xa522000a, 0x00003021,
5394 0x2527000c, 0x00004021, 0x94e20000, 0x25080001, 0x00c23021, 0x2d020004,
5395 0x1440fffb, 0x24e70002, 0x95220002, 0x00004021, 0x91230009, 0x00442023,
5396 0x01803821, 0x3082ffff, 0xa4e00010, 0x3c040800, 0x948423d4, 0x00621821,
5397 0x00c33021, 0x00061c02, 0x30c2ffff, 0x00623021, 0x00061c02, 0x3c020800,
5398 0x944223d0, 0x00c34821, 0x00441023, 0x00021fc2, 0x00431021, 0x00021043,
5399 0x18400010, 0x00003021, 0x00402021, 0x94e20000, 0x24e70002, 0x00c23021,
5400 0x30e2007f, 0x14400006, 0x25080001, 0x8d630000, 0x3c02007f, 0x3442ff80,
5401 0x00625824, 0x25670008, 0x0104102a, 0x1440fff3, 0x00000000, 0x3c020800,
5402 0x944223ec, 0x00c23021, 0x3122ffff, 0x00c23021, 0x00061c02, 0x30c2ffff,
5403 0x00623021, 0x00061402, 0x00c23021, 0x00c04021, 0x00061027, 0xa5820010,
5404 0xadc00014, 0x0a00049d, 0xadc00000, 0x8dc70010, 0x00e04021, 0x11400007,
5405 0x00072c02, 0x00aa3021, 0x00061402, 0x30c3ffff, 0x00433021, 0x00061402,
5406 0x00c22821, 0x00051027, 0xa522000a, 0x3c030800, 0x946323d4, 0x3102ffff,
5407 0x01e21021, 0x00433023, 0x00cd3021, 0x00061c02, 0x30c2ffff, 0x00623021,
5408 0x00061402, 0x00c23021, 0x00c04021, 0x00061027, 0xa5820010, 0x3102ffff,
5409 0x00051c00, 0x00431025, 0xadc20010, 0x3c020800, 0x8c4223f4, 0x10400005,
5410 0x2de205eb, 0x14400002, 0x25e2fff2, 0x34028870, 0xa5c20034, 0x3c030800,
5411 0x246323e8, 0x8c620000, 0x24420001, 0xac620000, 0x3c040800, 0x8c8423e4,
5412 0x3c020800, 0x8c421bc0, 0x3303ffff, 0x00832021, 0x00431821, 0x0062102b,
5413 0x3c010800, 0xac2423e4, 0x10400003, 0x2482ffff, 0x3c010800, 0xac2223e4,
5414 0x3c010800, 0xac231bc0, 0x03e00008, 0x27bd0020, 0x27bdffb8, 0x3c050800,
5415 0x24a51b96, 0xafbf0044, 0xafbe0040, 0xafb7003c, 0xafb60038, 0xafb50034,
5416 0xafb40030, 0xafb3002c, 0xafb20028, 0xafb10024, 0xafb00020, 0x94a90000,
5417 0x3c020800, 0x944223d0, 0x3c030800, 0x8c631bb0, 0x3c040800, 0x8c841bac,
5418 0x01221023, 0x0064182a, 0xa7a9001e, 0x106000be, 0xa7a20016, 0x24be0022,
5419 0x97b6001e, 0x24b3001a, 0x24b70016, 0x8fc20000, 0x14400008, 0x00000000,
5420 0x8fc2fff8, 0x97a30016, 0x8fc4fff4, 0x00431021, 0x0082202a, 0x148000b0,
5421 0x00000000, 0x97d50818, 0x32a2ffff, 0x104000a3, 0x00009021, 0x0040a021,
5422 0x00008821, 0x0e000625, 0x00000000, 0x00403021, 0x14c00007, 0x00000000,
5423 0x3c020800, 0x8c4223dc, 0x24420001, 0x3c010800, 0x0a000596, 0xac2223dc,
5424 0x3c100800, 0x02118021, 0x8e101bc8, 0x9608000a, 0x31020040, 0x10400005,
5425 0x2407180c, 0x8e02000c, 0x2407188c, 0x00021400, 0xacc20018, 0x31020080,
5426 0x54400001, 0x34e70010, 0x3c020800, 0x00511021, 0x8c421bd0, 0x3c030800,
5427 0x00711821, 0x8c631bd4, 0x00021500, 0x00031c00, 0x00431025, 0xacc20014,
5428 0x96040008, 0x3242ffff, 0x00821021, 0x0282102a, 0x14400002, 0x02b22823,
5429 0x00802821, 0x8e020000, 0x02459021, 0xacc20000, 0x8e020004, 0x00c02021,
5430 0x26310010, 0xac820004, 0x30e2ffff, 0xac800008, 0xa485000e, 0xac820010,
5431 0x24020305, 0x0e0005a2, 0xa482000c, 0x3242ffff, 0x0054102b, 0x1440ffc5,
5432 0x3242ffff, 0x0a00058e, 0x00000000, 0x8e620000, 0x8e63fffc, 0x0043102a,
5433 0x10400067, 0x00000000, 0x8e62fff0, 0x00028900, 0x3c100800, 0x02118021,
5434 0x0e000625, 0x8e101bc8, 0x00403021, 0x14c00005, 0x00000000, 0x8e62082c,
5435 0x24420001, 0x0a000596, 0xae62082c, 0x9608000a, 0x31020040, 0x10400005,
5436 0x2407180c, 0x8e02000c, 0x2407188c, 0x00021400, 0xacc20018, 0x3c020800,
5437 0x00511021, 0x8c421bd0, 0x3c030800, 0x00711821, 0x8c631bd4, 0x00021500,
5438 0x00031c00, 0x00431025, 0xacc20014, 0x8e63fff4, 0x96020008, 0x00432023,
5439 0x3242ffff, 0x3083ffff, 0x00431021, 0x02c2102a, 0x10400003, 0x00802821,
5440 0x97a9001e, 0x01322823, 0x8e620000, 0x30a4ffff, 0x00441021, 0xae620000,
5441 0xa4c5000e, 0x8e020000, 0xacc20000, 0x8e020004, 0x8e63fff4, 0x00431021,
5442 0xacc20004, 0x8e63fff4, 0x96020008, 0x00641821, 0x0062102a, 0x14400006,
5443 0x02459021, 0x8e62fff0, 0xae60fff4, 0x24420001, 0x0a000571, 0xae62fff0,
5444 0xae63fff4, 0xacc00008, 0x3242ffff, 0x10560003, 0x31020004, 0x10400006,
5445 0x24020305, 0x31020080, 0x54400001, 0x34e70010, 0x34e70020, 0x24020905,
5446 0xa4c2000c, 0x8ee30000, 0x8ee20004, 0x14620007, 0x3c02b49a, 0x8ee20860,
5447 0x54400001, 0x34e70400, 0x3c024b65, 0x0a000588, 0x34427654, 0x344289ab,
5448 0xacc2001c, 0x30e2ffff, 0xacc20010, 0x0e0005a2, 0x00c02021, 0x3242ffff,
5449 0x0056102b, 0x1440ff9b, 0x00000000, 0x8e620000, 0x8e63fffc, 0x0043102a,
5450 0x1440ff48, 0x00000000, 0x8fbf0044, 0x8fbe0040, 0x8fb7003c, 0x8fb60038,
5451 0x8fb50034, 0x8fb40030, 0x8fb3002c, 0x8fb20028, 0x8fb10024, 0x8fb00020,
5452 0x03e00008, 0x27bd0048, 0x27bdffe8, 0xafbf0014, 0xafb00010, 0x8f624450,
5453 0x8f634410, 0x0a0005b1, 0x00808021, 0x8f626820, 0x30422000, 0x10400003,
5454 0x00000000, 0x0e0001f0, 0x00002021, 0x8f624450, 0x8f634410, 0x3042ffff,
5455 0x0043102b, 0x1440fff5, 0x00000000, 0x8f630c14, 0x3063000f, 0x2c620002,
5456 0x1440000b, 0x00000000, 0x8f630c14, 0x3c020800, 0x8c421b40, 0x3063000f,
5457 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002, 0x1040fff7, 0x00000000,
5458 0xaf705c18, 0x8f625c10, 0x30420002, 0x10400009, 0x00000000, 0x8f626820,
5459 0x30422000, 0x1040fff8, 0x00000000, 0x0e0001f0, 0x00002021, 0x0a0005c4,
5460 0x00000000, 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x00000000,
5461 0x00000000, 0x00000000, 0x27bdffe8, 0x3c1bc000, 0xafbf0014, 0xafb00010,
5462 0xaf60680c, 0x8f626804, 0x34420082, 0xaf626804, 0x8f634000, 0x24020b50,
5463 0x3c010800, 0xac221b54, 0x24020b78, 0x3c010800, 0xac221b64, 0x34630002,
5464 0xaf634000, 0x0e000605, 0x00808021, 0x3c010800, 0xa0221b68, 0x304200ff,
5465 0x24030002, 0x14430005, 0x00000000, 0x3c020800, 0x8c421b54, 0x0a0005f8,
5466 0xac5000c0, 0x3c020800, 0x8c421b54, 0xac5000bc, 0x8f624434, 0x8f634438,
5467 0x8f644410, 0x3c010800, 0xac221b5c, 0x3c010800, 0xac231b6c, 0x3c010800,
5468 0xac241b58, 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x3c040800,
5469 0x8c870000, 0x3c03aa55, 0x3463aa55, 0x3c06c003, 0xac830000, 0x8cc20000,
5470 0x14430007, 0x24050002, 0x3c0355aa, 0x346355aa, 0xac830000, 0x8cc20000,
5471 0x50430001, 0x24050001, 0x3c020800, 0xac470000, 0x03e00008, 0x00a01021,
5472 0x27bdfff8, 0x18800009, 0x00002821, 0x8f63680c, 0x8f62680c, 0x1043fffe,
5473 0x00000000, 0x24a50001, 0x00a4102a, 0x1440fff9, 0x00000000, 0x03e00008,
5474 0x27bd0008, 0x8f634450, 0x3c020800, 0x8c421b5c, 0x00031c02, 0x0043102b,
5475 0x14400008, 0x3c038000, 0x3c040800, 0x8c841b6c, 0x8f624450, 0x00021c02,
5476 0x0083102b, 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024,
5477 0x1440fffd, 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3082ffff,
5478 0x2442e000, 0x2c422001, 0x14400003, 0x3c024000, 0x0a000648, 0x2402ffff,
5479 0x00822025, 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00001021,
5480 0x03e00008, 0x00000000, 0x8f624450, 0x3c030800, 0x8c631b58, 0x0a000651,
5481 0x3042ffff, 0x8f624450, 0x3042ffff, 0x0043102b, 0x1440fffc, 0x00000000,
5482 0x03e00008, 0x00000000, 0x27bdffe0, 0x00802821, 0x3c040800, 0x24841af0,
5483 0x00003021, 0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014,
5484 0x0a000660, 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x00000000,
5485 0x00000000, 0x00000000, 0x3c020800, 0x34423000, 0x3c030800, 0x34633000,
5486 0x3c040800, 0x348437ff, 0x3c010800, 0xac221b74, 0x24020040, 0x3c010800,
5487 0xac221b78, 0x3c010800, 0xac201b70, 0xac600000, 0x24630004, 0x0083102b,
5488 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000, 0x00804821, 0x8faa0010,
5489 0x3c020800, 0x8c421b70, 0x3c040800, 0x8c841b78, 0x8fab0014, 0x24430001,
5490 0x0044102b, 0x3c010800, 0xac231b70, 0x14400003, 0x00004021, 0x3c010800,
5491 0xac201b70, 0x3c020800, 0x8c421b70, 0x3c030800, 0x8c631b74, 0x91240000,
5492 0x00021140, 0x00431021, 0x00481021, 0x25080001, 0xa0440000, 0x29020008,
5493 0x1440fff4, 0x25290001, 0x3c020800, 0x8c421b70, 0x3c030800, 0x8c631b74,
5494 0x8f64680c, 0x00021140, 0x00431021, 0xac440008, 0xac45000c, 0xac460010,
5495 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c, 0x00000000, 0x00000000,
5498 static u32 tg3TsoFwRodata[] = {
5499 0x4d61696e, 0x43707542, 0x00000000, 0x4d61696e, 0x43707541, 0x00000000,
5500 0x00000000, 0x00000000, 0x73746b6f, 0x66666c64, 0x496e0000, 0x73746b6f,
5501 0x66662a2a, 0x00000000, 0x53774576, 0x656e7430, 0x00000000, 0x00000000,
5502 0x00000000, 0x00000000, 0x66617461, 0x6c457272, 0x00000000, 0x00000000,
5506 static u32 tg3TsoFwData[] = {
5507 0x00000000, 0x73746b6f, 0x66666c64, 0x5f76312e, 0x362e3000, 0x00000000,
5508 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
5512 /* 5705 needs a special version of the TSO firmware. */
5513 #define TG3_TSO5_FW_RELEASE_MAJOR 0x1
5514 #define TG3_TSO5_FW_RELASE_MINOR 0x2
5515 #define TG3_TSO5_FW_RELEASE_FIX 0x0
5516 #define TG3_TSO5_FW_START_ADDR 0x00010000
5517 #define TG3_TSO5_FW_TEXT_ADDR 0x00010000
5518 #define TG3_TSO5_FW_TEXT_LEN 0xe90
5519 #define TG3_TSO5_FW_RODATA_ADDR 0x00010e90
5520 #define TG3_TSO5_FW_RODATA_LEN 0x50
5521 #define TG3_TSO5_FW_DATA_ADDR 0x00010f00
5522 #define TG3_TSO5_FW_DATA_LEN 0x20
5523 #define TG3_TSO5_FW_SBSS_ADDR 0x00010f20
5524 #define TG3_TSO5_FW_SBSS_LEN 0x28
5525 #define TG3_TSO5_FW_BSS_ADDR 0x00010f50
5526 #define TG3_TSO5_FW_BSS_LEN 0x88
5528 static u32 tg3Tso5FwText[(TG3_TSO5_FW_TEXT_LEN / 4) + 1] = {
5529 0x0c004003, 0x00000000, 0x00010f04, 0x00000000, 0x10000003, 0x00000000,
5530 0x0000000d, 0x0000000d, 0x3c1d0001, 0x37bde000, 0x03a0f021, 0x3c100001,
5531 0x26100000, 0x0c004010, 0x00000000, 0x0000000d, 0x27bdffe0, 0x3c04fefe,
5532 0xafbf0018, 0x0c0042e8, 0x34840002, 0x0c004364, 0x00000000, 0x3c030001,
5533 0x90630f34, 0x24020002, 0x3c040001, 0x24840e9c, 0x14620003, 0x24050001,
5534 0x3c040001, 0x24840e90, 0x24060002, 0x00003821, 0xafa00010, 0x0c004378,
5535 0xafa00014, 0x0c00402c, 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020,
5536 0x00000000, 0x00000000, 0x27bdffe0, 0xafbf001c, 0xafb20018, 0xafb10014,
5537 0x0c0042d4, 0xafb00010, 0x3c128000, 0x24110001, 0x8f706810, 0x32020400,
5538 0x10400007, 0x00000000, 0x8f641008, 0x00921024, 0x14400003, 0x00000000,
5539 0x0c004064, 0x00000000, 0x3c020001, 0x90420f56, 0x10510003, 0x32020200,
5540 0x1040fff1, 0x00000000, 0x0c0041b4, 0x00000000, 0x08004034, 0x00000000,
5541 0x8fbf001c, 0x8fb20018, 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0020,
5542 0x27bdffe0, 0x3c040001, 0x24840eb0, 0x00002821, 0x00003021, 0x00003821,
5543 0xafbf0018, 0xafa00010, 0x0c004378, 0xafa00014, 0x0000d021, 0x24020130,
5544 0xaf625000, 0x3c010001, 0xa4200f50, 0x3c010001, 0xa0200f57, 0x8fbf0018,
5545 0x03e00008, 0x27bd0020, 0x00000000, 0x00000000, 0x3c030001, 0x24630f60,
5546 0x90620000, 0x27bdfff0, 0x14400003, 0x0080c021, 0x08004073, 0x00004821,
5547 0x3c022000, 0x03021024, 0x10400003, 0x24090002, 0x08004073, 0xa0600000,
5548 0x24090001, 0x00181040, 0x30431f80, 0x346f8008, 0x1520004b, 0x25eb0028,
5549 0x3c040001, 0x00832021, 0x8c848010, 0x3c050001, 0x24a50f7a, 0x00041402,
5550 0xa0a20000, 0x3c010001, 0xa0240f7b, 0x3c020001, 0x00431021, 0x94428014,
5551 0x3c010001, 0xa0220f7c, 0x3c0c0001, 0x01836021, 0x8d8c8018, 0x304200ff,
5552 0x24420008, 0x000220c3, 0x24020001, 0x3c010001, 0xa0220f60, 0x0124102b,
5553 0x1040000c, 0x00003821, 0x24a6000e, 0x01602821, 0x8ca20000, 0x8ca30004,
5554 0x24a50008, 0x24e70001, 0xacc20000, 0xacc30004, 0x00e4102b, 0x1440fff8,
5555 0x24c60008, 0x00003821, 0x3c080001, 0x25080f7b, 0x91060000, 0x3c020001,
5556 0x90420f7c, 0x2503000d, 0x00c32821, 0x00461023, 0x00021fc2, 0x00431021,
5557 0x00021043, 0x1840000c, 0x00002021, 0x91020001, 0x00461023, 0x00021fc2,
5558 0x00431021, 0x00021843, 0x94a20000, 0x24e70001, 0x00822021, 0x00e3102a,
5559 0x1440fffb, 0x24a50002, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402,
5560 0x00822021, 0x3c02ffff, 0x01821024, 0x3083ffff, 0x00431025, 0x3c010001,
5561 0x080040fa, 0xac220f80, 0x3c050001, 0x24a50f7c, 0x90a20000, 0x3c0c0001,
5562 0x01836021, 0x8d8c8018, 0x000220c2, 0x1080000e, 0x00003821, 0x01603021,
5563 0x24a5000c, 0x8ca20000, 0x8ca30004, 0x24a50008, 0x24e70001, 0xacc20000,
5564 0xacc30004, 0x00e4102b, 0x1440fff8, 0x24c60008, 0x3c050001, 0x24a50f7c,
5565 0x90a20000, 0x30430007, 0x24020004, 0x10620011, 0x28620005, 0x10400005,
5566 0x24020002, 0x10620008, 0x000710c0, 0x080040fa, 0x00000000, 0x24020006,
5567 0x1062000e, 0x000710c0, 0x080040fa, 0x00000000, 0x00a21821, 0x9463000c,
5568 0x004b1021, 0x080040fa, 0xa4430000, 0x000710c0, 0x00a21821, 0x8c63000c,
5569 0x004b1021, 0x080040fa, 0xac430000, 0x00a21821, 0x8c63000c, 0x004b2021,
5570 0x00a21021, 0xac830000, 0x94420010, 0xa4820004, 0x95e70006, 0x3c020001,
5571 0x90420f7c, 0x3c030001, 0x90630f7a, 0x00e2c823, 0x3c020001, 0x90420f7b,
5572 0x24630028, 0x01e34021, 0x24420028, 0x15200012, 0x01e23021, 0x94c2000c,
5573 0x3c010001, 0xa4220f78, 0x94c20004, 0x94c30006, 0x3c010001, 0xa4200f76,
5574 0x3c010001, 0xa4200f72, 0x00021400, 0x00431025, 0x3c010001, 0xac220f6c,
5575 0x95020004, 0x3c010001, 0x08004124, 0xa4220f70, 0x3c020001, 0x94420f70,
5576 0x3c030001, 0x94630f72, 0x00431021, 0xa5020004, 0x3c020001, 0x94420f6c,
5577 0xa4c20004, 0x3c020001, 0x8c420f6c, 0xa4c20006, 0x3c040001, 0x94840f72,
5578 0x3c020001, 0x94420f70, 0x3c0a0001, 0x954a0f76, 0x00441821, 0x3063ffff,
5579 0x0062182a, 0x24020002, 0x1122000b, 0x00832023, 0x3c030001, 0x94630f78,
5580 0x30620009, 0x10400006, 0x3062fff6, 0xa4c2000c, 0x3c020001, 0x94420f78,
5581 0x30420009, 0x01425023, 0x24020001, 0x1122001b, 0x29220002, 0x50400005,
5582 0x24020002, 0x11200007, 0x31a2ffff, 0x08004197, 0x00000000, 0x1122001d,
5583 0x24020016, 0x08004197, 0x31a2ffff, 0x3c0e0001, 0x95ce0f80, 0x10800005,
5584 0x01806821, 0x01c42021, 0x00041c02, 0x3082ffff, 0x00627021, 0x000e1027,
5585 0xa502000a, 0x3c030001, 0x90630f7b, 0x31a2ffff, 0x00e21021, 0x0800418d,
5586 0x00432023, 0x3c020001, 0x94420f80, 0x00442021, 0x00041c02, 0x3082ffff,
5587 0x00622021, 0x00807021, 0x00041027, 0x08004185, 0xa502000a, 0x3c050001,
5588 0x24a50f7a, 0x90a30000, 0x14620002, 0x24e2fff2, 0xa5e20034, 0x90a20000,
5589 0x00e21023, 0xa5020002, 0x3c030001, 0x94630f80, 0x3c020001, 0x94420f5a,
5590 0x30e5ffff, 0x00641821, 0x00451023, 0x00622023, 0x00041c02, 0x3082ffff,
5591 0x00622021, 0x00041027, 0xa502000a, 0x3c030001, 0x90630f7c, 0x24620001,
5592 0x14a20005, 0x00807021, 0x01631021, 0x90420000, 0x08004185, 0x00026200,
5593 0x24620002, 0x14a20003, 0x306200fe, 0x004b1021, 0x944c0000, 0x3c020001,
5594 0x94420f82, 0x3183ffff, 0x3c040001, 0x90840f7b, 0x00431021, 0x00e21021,
5595 0x00442023, 0x008a2021, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402,
5596 0x00822021, 0x00806821, 0x00041027, 0xa4c20010, 0x31a2ffff, 0x000e1c00,
5597 0x00431025, 0x3c040001, 0x24840f72, 0xade20010, 0x94820000, 0x3c050001,
5598 0x94a50f76, 0x3c030001, 0x8c630f6c, 0x24420001, 0x00b92821, 0xa4820000,
5599 0x3322ffff, 0x00622021, 0x0083182b, 0x3c010001, 0xa4250f76, 0x10600003,
5600 0x24a2ffff, 0x3c010001, 0xa4220f76, 0x3c024000, 0x03021025, 0x3c010001,
5601 0xac240f6c, 0xaf621008, 0x03e00008, 0x27bd0010, 0x3c030001, 0x90630f56,
5602 0x27bdffe8, 0x24020001, 0xafbf0014, 0x10620026, 0xafb00010, 0x8f620cf4,
5603 0x2442ffff, 0x3042007f, 0x00021100, 0x8c434000, 0x3c010001, 0xac230f64,
5604 0x8c434008, 0x24444000, 0x8c5c4004, 0x30620040, 0x14400002, 0x24020088,
5605 0x24020008, 0x3c010001, 0xa4220f68, 0x30620004, 0x10400005, 0x24020001,
5606 0x3c010001, 0xa0220f57, 0x080041d5, 0x00031402, 0x3c010001, 0xa0200f57,
5607 0x00031402, 0x3c010001, 0xa4220f54, 0x9483000c, 0x24020001, 0x3c010001,
5608 0xa4200f50, 0x3c010001, 0xa0220f56, 0x3c010001, 0xa4230f62, 0x24020001,
5609 0x1342001e, 0x00000000, 0x13400005, 0x24020003, 0x13420067, 0x00000000,
5610 0x080042cf, 0x00000000, 0x3c020001, 0x94420f62, 0x241a0001, 0x3c010001,
5611 0xa4200f5e, 0x3c010001, 0xa4200f52, 0x304407ff, 0x00021bc2, 0x00031823,
5612 0x3063003e, 0x34630036, 0x00021242, 0x3042003c, 0x00621821, 0x3c010001,
5613 0xa4240f58, 0x00832021, 0x24630030, 0x3c010001, 0xa4240f5a, 0x3c010001,
5614 0xa4230f5c, 0x3c060001, 0x24c60f52, 0x94c50000, 0x94c30002, 0x3c040001,
5615 0x94840f5a, 0x00651021, 0x0044102a, 0x10400013, 0x3c108000, 0x00a31021,
5616 0xa4c20000, 0x3c02a000, 0xaf620cf4, 0x3c010001, 0xa0200f56, 0x8f641008,
5617 0x00901024, 0x14400003, 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4,
5618 0x00501024, 0x104000b7, 0x00000000, 0x0800420f, 0x00000000, 0x3c030001,
5619 0x94630f50, 0x00851023, 0xa4c40000, 0x00621821, 0x3042ffff, 0x3c010001,
5620 0xa4230f50, 0xaf620ce8, 0x3c020001, 0x94420f68, 0x34420024, 0xaf620cec,
5621 0x94c30002, 0x3c020001, 0x94420f50, 0x14620012, 0x3c028000, 0x3c108000,
5622 0x3c02a000, 0xaf620cf4, 0x3c010001, 0xa0200f56, 0x8f641008, 0x00901024,
5623 0x14400003, 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4, 0x00501024,
5624 0x1440fff7, 0x00000000, 0x080042cf, 0x241a0003, 0xaf620cf4, 0x3c108000,
5625 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064, 0x00000000,
5626 0x8f620cf4, 0x00501024, 0x1440fff7, 0x00000000, 0x080042cf, 0x241a0003,
5627 0x3c070001, 0x24e70f50, 0x94e20000, 0x03821021, 0xaf620ce0, 0x3c020001,
5628 0x8c420f64, 0xaf620ce4, 0x3c050001, 0x94a50f54, 0x94e30000, 0x3c040001,
5629 0x94840f58, 0x3c020001, 0x94420f5e, 0x00a32823, 0x00822023, 0x30a6ffff,
5630 0x3083ffff, 0x00c3102b, 0x14400043, 0x00000000, 0x3c020001, 0x94420f5c,
5631 0x00021400, 0x00621025, 0xaf620ce8, 0x94e20000, 0x3c030001, 0x94630f54,
5632 0x00441021, 0xa4e20000, 0x3042ffff, 0x14430021, 0x3c020008, 0x3c020001,
5633 0x90420f57, 0x10400006, 0x3c03000c, 0x3c020001, 0x94420f68, 0x34630624,
5634 0x0800427c, 0x0000d021, 0x3c020001, 0x94420f68, 0x3c030008, 0x34630624,
5635 0x00431025, 0xaf620cec, 0x3c108000, 0x3c02a000, 0xaf620cf4, 0x3c010001,
5636 0xa0200f56, 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064,
5637 0x00000000, 0x8f620cf4, 0x00501024, 0x10400015, 0x00000000, 0x08004283,
5638 0x00000000, 0x3c030001, 0x94630f68, 0x34420624, 0x3c108000, 0x00621825,
5639 0x3c028000, 0xaf630cec, 0xaf620cf4, 0x8f641008, 0x00901024, 0x14400003,
5640 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4, 0x00501024, 0x1440fff7,
5641 0x00000000, 0x3c010001, 0x080042cf, 0xa4200f5e, 0x3c020001, 0x94420f5c,
5642 0x00021400, 0x00c21025, 0xaf620ce8, 0x3c020001, 0x90420f57, 0x10400009,
5643 0x3c03000c, 0x3c020001, 0x94420f68, 0x34630624, 0x0000d021, 0x00431025,
5644 0xaf620cec, 0x080042c1, 0x3c108000, 0x3c020001, 0x94420f68, 0x3c030008,
5645 0x34630604, 0x00431025, 0xaf620cec, 0x3c020001, 0x94420f5e, 0x00451021,
5646 0x3c010001, 0xa4220f5e, 0x3c108000, 0x3c02a000, 0xaf620cf4, 0x3c010001,
5647 0xa0200f56, 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064,
5648 0x00000000, 0x8f620cf4, 0x00501024, 0x1440fff7, 0x00000000, 0x8fbf0014,
5649 0x8fb00010, 0x03e00008, 0x27bd0018, 0x00000000, 0x27bdffe0, 0x3c040001,
5650 0x24840ec0, 0x00002821, 0x00003021, 0x00003821, 0xafbf0018, 0xafa00010,
5651 0x0c004378, 0xafa00014, 0x0000d021, 0x24020130, 0xaf625000, 0x3c010001,
5652 0xa4200f50, 0x3c010001, 0xa0200f57, 0x8fbf0018, 0x03e00008, 0x27bd0020,
5653 0x27bdffe8, 0x3c1bc000, 0xafbf0014, 0xafb00010, 0xaf60680c, 0x8f626804,
5654 0x34420082, 0xaf626804, 0x8f634000, 0x24020b50, 0x3c010001, 0xac220f20,
5655 0x24020b78, 0x3c010001, 0xac220f30, 0x34630002, 0xaf634000, 0x0c004315,
5656 0x00808021, 0x3c010001, 0xa0220f34, 0x304200ff, 0x24030002, 0x14430005,
5657 0x00000000, 0x3c020001, 0x8c420f20, 0x08004308, 0xac5000c0, 0x3c020001,
5658 0x8c420f20, 0xac5000bc, 0x8f624434, 0x8f634438, 0x8f644410, 0x3c010001,
5659 0xac220f28, 0x3c010001, 0xac230f38, 0x3c010001, 0xac240f24, 0x8fbf0014,
5660 0x8fb00010, 0x03e00008, 0x27bd0018, 0x03e00008, 0x24020001, 0x27bdfff8,
5661 0x18800009, 0x00002821, 0x8f63680c, 0x8f62680c, 0x1043fffe, 0x00000000,
5662 0x24a50001, 0x00a4102a, 0x1440fff9, 0x00000000, 0x03e00008, 0x27bd0008,
5663 0x8f634450, 0x3c020001, 0x8c420f28, 0x00031c02, 0x0043102b, 0x14400008,
5664 0x3c038000, 0x3c040001, 0x8c840f38, 0x8f624450, 0x00021c02, 0x0083102b,
5665 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024, 0x1440fffd,
5666 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3082ffff, 0x2442e000,
5667 0x2c422001, 0x14400003, 0x3c024000, 0x08004347, 0x2402ffff, 0x00822025,
5668 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00001021, 0x03e00008,
5669 0x00000000, 0x8f624450, 0x3c030001, 0x8c630f24, 0x08004350, 0x3042ffff,
5670 0x8f624450, 0x3042ffff, 0x0043102b, 0x1440fffc, 0x00000000, 0x03e00008,
5671 0x00000000, 0x27bdffe0, 0x00802821, 0x3c040001, 0x24840ed0, 0x00003021,
5672 0x00003821, 0xafbf0018, 0xafa00010, 0x0c004378, 0xafa00014, 0x0800435f,
5673 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x3c020001, 0x3442d600,
5674 0x3c030001, 0x3463d600, 0x3c040001, 0x3484ddff, 0x3c010001, 0xac220f40,
5675 0x24020040, 0x3c010001, 0xac220f44, 0x3c010001, 0xac200f3c, 0xac600000,
5676 0x24630004, 0x0083102b, 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000,
5677 0x00804821, 0x8faa0010, 0x3c020001, 0x8c420f3c, 0x3c040001, 0x8c840f44,
5678 0x8fab0014, 0x24430001, 0x0044102b, 0x3c010001, 0xac230f3c, 0x14400003,
5679 0x00004021, 0x3c010001, 0xac200f3c, 0x3c020001, 0x8c420f3c, 0x3c030001,
5680 0x8c630f40, 0x91240000, 0x00021140, 0x00431021, 0x00481021, 0x25080001,
5681 0xa0440000, 0x29020008, 0x1440fff4, 0x25290001, 0x3c020001, 0x8c420f3c,
5682 0x3c030001, 0x8c630f40, 0x8f64680c, 0x00021140, 0x00431021, 0xac440008,
5683 0xac45000c, 0xac460010, 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c,
5684 0x00000000, 0x00000000, 0x00000000,
5687 static u32 tg3Tso5FwRodata[(TG3_TSO5_FW_RODATA_LEN / 4) + 1] = {
5688 0x4d61696e, 0x43707542, 0x00000000, 0x4d61696e, 0x43707541, 0x00000000,
5689 0x00000000, 0x00000000, 0x73746b6f, 0x66666c64, 0x00000000, 0x00000000,
5690 0x73746b6f, 0x66666c64, 0x00000000, 0x00000000, 0x66617461, 0x6c457272,
5691 0x00000000, 0x00000000, 0x00000000,
5694 static u32 tg3Tso5FwData[(TG3_TSO5_FW_DATA_LEN / 4) + 1] = {
5695 0x00000000, 0x73746b6f, 0x66666c64, 0x5f76312e, 0x322e3000, 0x00000000,
5696 0x00000000, 0x00000000, 0x00000000,
5699 /* tp->lock is held. */
5700 static int tg3_load_tso_firmware(struct tg3 *tp)
5702 struct fw_info info;
5703 unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
5706 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
5709 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
5710 info.text_base = TG3_TSO5_FW_TEXT_ADDR;
5711 info.text_len = TG3_TSO5_FW_TEXT_LEN;
5712 info.text_data = &tg3Tso5FwText[0];
5713 info.rodata_base = TG3_TSO5_FW_RODATA_ADDR;
5714 info.rodata_len = TG3_TSO5_FW_RODATA_LEN;
5715 info.rodata_data = &tg3Tso5FwRodata[0];
5716 info.data_base = TG3_TSO5_FW_DATA_ADDR;
5717 info.data_len = TG3_TSO5_FW_DATA_LEN;
5718 info.data_data = &tg3Tso5FwData[0];
5719 cpu_base = RX_CPU_BASE;
5720 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
5721 cpu_scratch_size = (info.text_len +
5724 TG3_TSO5_FW_SBSS_LEN +
5725 TG3_TSO5_FW_BSS_LEN);
5727 info.text_base = TG3_TSO_FW_TEXT_ADDR;
5728 info.text_len = TG3_TSO_FW_TEXT_LEN;
5729 info.text_data = &tg3TsoFwText[0];
5730 info.rodata_base = TG3_TSO_FW_RODATA_ADDR;
5731 info.rodata_len = TG3_TSO_FW_RODATA_LEN;
5732 info.rodata_data = &tg3TsoFwRodata[0];
5733 info.data_base = TG3_TSO_FW_DATA_ADDR;
5734 info.data_len = TG3_TSO_FW_DATA_LEN;
5735 info.data_data = &tg3TsoFwData[0];
5736 cpu_base = TX_CPU_BASE;
5737 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
5738 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
5741 err = tg3_load_firmware_cpu(tp, cpu_base,
5742 cpu_scratch_base, cpu_scratch_size,
5747 /* Now startup the cpu. */
5748 tw32(cpu_base + CPU_STATE, 0xffffffff);
5749 tw32_f(cpu_base + CPU_PC, info.text_base);
5751 for (i = 0; i < 5; i++) {
5752 if (tr32(cpu_base + CPU_PC) == info.text_base)
5754 tw32(cpu_base + CPU_STATE, 0xffffffff);
5755 tw32(cpu_base + CPU_MODE, CPU_MODE_HALT);
5756 tw32_f(cpu_base + CPU_PC, info.text_base);
5760 printk(KERN_ERR PFX "tg3_load_tso_firmware fails for %s "
5761 "to set CPU PC, is %08x should be %08x\n",
5762 tp->dev->name, tr32(cpu_base + CPU_PC),
5766 tw32(cpu_base + CPU_STATE, 0xffffffff);
5767 tw32_f(cpu_base + CPU_MODE, 0x00000000);
5771 #endif /* TG3_TSO_SUPPORT != 0 */
5773 /* tp->lock is held. */
5774 static void __tg3_set_mac_addr(struct tg3 *tp)
5776 u32 addr_high, addr_low;
5779 addr_high = ((tp->dev->dev_addr[0] << 8) |
5780 tp->dev->dev_addr[1]);
5781 addr_low = ((tp->dev->dev_addr[2] << 24) |
5782 (tp->dev->dev_addr[3] << 16) |
5783 (tp->dev->dev_addr[4] << 8) |
5784 (tp->dev->dev_addr[5] << 0));
5785 for (i = 0; i < 4; i++) {
5786 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
5787 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
5790 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
5791 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
5792 for (i = 0; i < 12; i++) {
5793 tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
5794 tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
5798 addr_high = (tp->dev->dev_addr[0] +
5799 tp->dev->dev_addr[1] +
5800 tp->dev->dev_addr[2] +
5801 tp->dev->dev_addr[3] +
5802 tp->dev->dev_addr[4] +
5803 tp->dev->dev_addr[5]) &
5804 TX_BACKOFF_SEED_MASK;
5805 tw32(MAC_TX_BACKOFF_SEED, addr_high);
5808 static int tg3_set_mac_addr(struct net_device *dev, void *p)
5810 struct tg3 *tp = netdev_priv(dev);
5811 struct sockaddr *addr = p;
5813 if (!is_valid_ether_addr(addr->sa_data))
5816 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
5818 if (!netif_running(dev))
5821 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
5822 /* Reset chip so that ASF can re-init any MAC addresses it
5826 tg3_full_lock(tp, 1);
5828 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
5831 tg3_netif_start(tp);
5832 tg3_full_unlock(tp);
5834 spin_lock_bh(&tp->lock);
5835 __tg3_set_mac_addr(tp);
5836 spin_unlock_bh(&tp->lock);
5842 /* tp->lock is held. */
5843 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
5844 dma_addr_t mapping, u32 maxlen_flags,
5848 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
5849 ((u64) mapping >> 32));
5851 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
5852 ((u64) mapping & 0xffffffff));
5854 (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
5857 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
5859 (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
5863 static void __tg3_set_rx_mode(struct net_device *);
5864 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
5866 tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
5867 tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
5868 tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
5869 tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
5870 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5871 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
5872 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
5874 tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
5875 tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
5876 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5877 u32 val = ec->stats_block_coalesce_usecs;
5879 if (!netif_carrier_ok(tp->dev))
5882 tw32(HOSTCC_STAT_COAL_TICKS, val);
5886 /* tp->lock is held. */
5887 static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
5889 u32 val, rdmac_mode;
5892 tg3_disable_ints(tp);
5896 tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
5898 if (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) {
5899 tg3_abort_hw(tp, 1);
5902 if ((tp->tg3_flags2 & TG3_FLG2_MII_SERDES) && reset_phy)
5905 err = tg3_chip_reset(tp);
5909 tg3_write_sig_legacy(tp, RESET_KIND_INIT);
5911 /* This works around an issue with Athlon chipsets on
5912 * B3 tigon3 silicon. This bit has no effect on any
5913 * other revision. But do not set this on PCI Express
5916 if (!(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
5917 tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
5918 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
5920 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
5921 (tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
5922 val = tr32(TG3PCI_PCISTATE);
5923 val |= PCISTATE_RETRY_SAME_DMA;
5924 tw32(TG3PCI_PCISTATE, val);
5927 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_BX) {
5928 /* Enable some hw fixes. */
5929 val = tr32(TG3PCI_MSI_DATA);
5930 val |= (1 << 26) | (1 << 28) | (1 << 29);
5931 tw32(TG3PCI_MSI_DATA, val);
5934 /* Descriptor ring init may make accesses to the
5935 * NIC SRAM area to setup the TX descriptors, so we
5936 * can only do this after the hardware has been
5937 * successfully reset.
5941 /* This value is determined during the probe time DMA
5942 * engine test, tg3_test_dma.
5944 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
5946 tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
5947 GRC_MODE_4X_NIC_SEND_RINGS |
5948 GRC_MODE_NO_TX_PHDR_CSUM |
5949 GRC_MODE_NO_RX_PHDR_CSUM);
5950 tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
5952 /* Pseudo-header checksum is done by hardware logic and not
5953 * the offload processers, so make the chip do the pseudo-
5954 * header checksums on receive. For transmit it is more
5955 * convenient to do the pseudo-header checksum in software
5956 * as Linux does that on transmit for us in all cases.
5958 tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
5962 (GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP));
5964 /* Setup the timer prescalar register. Clock is always 66Mhz. */
5965 val = tr32(GRC_MISC_CFG);
5967 val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
5968 tw32(GRC_MISC_CFG, val);
5970 /* Initialize MBUF/DESC pool. */
5971 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
5973 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
5974 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
5975 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
5976 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
5978 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
5979 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
5980 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
5982 #if TG3_TSO_SUPPORT != 0
5983 else if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
5986 fw_len = (TG3_TSO5_FW_TEXT_LEN +
5987 TG3_TSO5_FW_RODATA_LEN +
5988 TG3_TSO5_FW_DATA_LEN +
5989 TG3_TSO5_FW_SBSS_LEN +
5990 TG3_TSO5_FW_BSS_LEN);
5991 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
5992 tw32(BUFMGR_MB_POOL_ADDR,
5993 NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
5994 tw32(BUFMGR_MB_POOL_SIZE,
5995 NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
5999 if (tp->dev->mtu <= ETH_DATA_LEN) {
6000 tw32(BUFMGR_MB_RDMA_LOW_WATER,
6001 tp->bufmgr_config.mbuf_read_dma_low_water);
6002 tw32(BUFMGR_MB_MACRX_LOW_WATER,
6003 tp->bufmgr_config.mbuf_mac_rx_low_water);
6004 tw32(BUFMGR_MB_HIGH_WATER,
6005 tp->bufmgr_config.mbuf_high_water);
6007 tw32(BUFMGR_MB_RDMA_LOW_WATER,
6008 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
6009 tw32(BUFMGR_MB_MACRX_LOW_WATER,
6010 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
6011 tw32(BUFMGR_MB_HIGH_WATER,
6012 tp->bufmgr_config.mbuf_high_water_jumbo);
6014 tw32(BUFMGR_DMA_LOW_WATER,
6015 tp->bufmgr_config.dma_low_water);
6016 tw32(BUFMGR_DMA_HIGH_WATER,
6017 tp->bufmgr_config.dma_high_water);
6019 tw32(BUFMGR_MODE, BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE);
6020 for (i = 0; i < 2000; i++) {
6021 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
6026 printk(KERN_ERR PFX "tg3_reset_hw cannot enable BUFMGR for %s.\n",
6031 /* Setup replenish threshold. */
6032 val = tp->rx_pending / 8;
6035 else if (val > tp->rx_std_max_post)
6036 val = tp->rx_std_max_post;
6038 tw32(RCVBDI_STD_THRESH, val);
6040 /* Initialize TG3_BDINFO's at:
6041 * RCVDBDI_STD_BD: standard eth size rx ring
6042 * RCVDBDI_JUMBO_BD: jumbo frame rx ring
6043 * RCVDBDI_MINI_BD: small frame rx ring (??? does not work)
6046 * TG3_BDINFO_HOST_ADDR: high/low parts of DMA address of ring
6047 * TG3_BDINFO_MAXLEN_FLAGS: (rx max buffer size << 16) |
6048 * ring attribute flags
6049 * TG3_BDINFO_NIC_ADDR: location of descriptors in nic SRAM
6051 * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
6052 * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
6054 * The size of each ring is fixed in the firmware, but the location is
6057 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
6058 ((u64) tp->rx_std_mapping >> 32));
6059 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
6060 ((u64) tp->rx_std_mapping & 0xffffffff));
6061 tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
6062 NIC_SRAM_RX_BUFFER_DESC);
6064 /* Don't even try to program the JUMBO/MINI buffer descriptor
6067 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
6068 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS,
6069 RX_STD_MAX_SIZE_5705 << BDINFO_FLAGS_MAXLEN_SHIFT);
6071 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS,
6072 RX_STD_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT);
6074 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
6075 BDINFO_FLAGS_DISABLED);
6077 /* Setup replenish threshold. */
6078 tw32(RCVBDI_JUMBO_THRESH, tp->rx_jumbo_pending / 8);
6080 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
6081 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
6082 ((u64) tp->rx_jumbo_mapping >> 32));
6083 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
6084 ((u64) tp->rx_jumbo_mapping & 0xffffffff));
6085 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
6086 RX_JUMBO_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT);
6087 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
6088 NIC_SRAM_RX_JUMBO_BUFFER_DESC);
6090 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
6091 BDINFO_FLAGS_DISABLED);
6096 /* There is only one send ring on 5705/5750, no need to explicitly
6097 * disable the others.
6099 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6100 /* Clear out send RCB ring in SRAM. */
6101 for (i = NIC_SRAM_SEND_RCB; i < NIC_SRAM_RCV_RET_RCB; i += TG3_BDINFO_SIZE)
6102 tg3_write_mem(tp, i + TG3_BDINFO_MAXLEN_FLAGS,
6103 BDINFO_FLAGS_DISABLED);
6108 tw32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0);
6109 tw32_tx_mbox(MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0);
6111 tg3_set_bdinfo(tp, NIC_SRAM_SEND_RCB,
6112 tp->tx_desc_mapping,
6113 (TG3_TX_RING_SIZE <<
6114 BDINFO_FLAGS_MAXLEN_SHIFT),
6115 NIC_SRAM_TX_BUFFER_DESC);
6117 /* There is only one receive return ring on 5705/5750, no need
6118 * to explicitly disable the others.
6120 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6121 for (i = NIC_SRAM_RCV_RET_RCB; i < NIC_SRAM_STATS_BLK;
6122 i += TG3_BDINFO_SIZE) {
6123 tg3_write_mem(tp, i + TG3_BDINFO_MAXLEN_FLAGS,
6124 BDINFO_FLAGS_DISABLED);
6129 tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW, 0);
6131 tg3_set_bdinfo(tp, NIC_SRAM_RCV_RET_RCB,
6133 (TG3_RX_RCB_RING_SIZE(tp) <<
6134 BDINFO_FLAGS_MAXLEN_SHIFT),
6137 tp->rx_std_ptr = tp->rx_pending;
6138 tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
6141 tp->rx_jumbo_ptr = (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) ?
6142 tp->rx_jumbo_pending : 0;
6143 tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
6146 /* Initialize MAC address and backoff seed. */
6147 __tg3_set_mac_addr(tp);
6149 /* MTU + ethernet header + FCS + optional VLAN tag */
6150 tw32(MAC_RX_MTU_SIZE, tp->dev->mtu + ETH_HLEN + 8);
6152 /* The slot time is changed by tg3_setup_phy if we
6153 * run at gigabit with half duplex.
6155 tw32(MAC_TX_LENGTHS,
6156 (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
6157 (6 << TX_LENGTHS_IPG_SHIFT) |
6158 (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
6160 /* Receive rules. */
6161 tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
6162 tw32(RCVLPC_CONFIG, 0x0181);
6164 /* Calculate RDMAC_MODE setting early, we need it to determine
6165 * the RCVLPC_STATE_ENABLE mask.
6167 rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
6168 RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
6169 RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
6170 RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
6171 RDMAC_MODE_LNGREAD_ENAB);
6172 if (tp->tg3_flags & TG3_FLAG_SPLIT_MODE)
6173 rdmac_mode |= RDMAC_MODE_SPLIT_ENABLE;
6175 /* If statement applies to 5705 and 5750 PCI devices only */
6176 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
6177 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
6178 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)) {
6179 if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE &&
6180 (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
6181 tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
6182 rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
6183 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
6184 !(tp->tg3_flags2 & TG3_FLG2_IS_5788)) {
6185 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
6189 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)
6190 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
6192 #if TG3_TSO_SUPPORT != 0
6193 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
6194 rdmac_mode |= (1 << 27);
6197 /* Receive/send statistics. */
6198 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
6199 val = tr32(RCVLPC_STATS_ENABLE);
6200 val &= ~RCVLPC_STATSENAB_DACK_FIX;
6201 tw32(RCVLPC_STATS_ENABLE, val);
6202 } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
6203 (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
6204 val = tr32(RCVLPC_STATS_ENABLE);
6205 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
6206 tw32(RCVLPC_STATS_ENABLE, val);
6208 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
6210 tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
6211 tw32(SNDDATAI_STATSENAB, 0xffffff);
6212 tw32(SNDDATAI_STATSCTRL,
6213 (SNDDATAI_SCTRL_ENABLE |
6214 SNDDATAI_SCTRL_FASTUPD));
6216 /* Setup host coalescing engine. */
6217 tw32(HOSTCC_MODE, 0);
6218 for (i = 0; i < 2000; i++) {
6219 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
6224 __tg3_set_coalesce(tp, &tp->coal);
6226 /* set status block DMA address */
6227 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
6228 ((u64) tp->status_mapping >> 32));
6229 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
6230 ((u64) tp->status_mapping & 0xffffffff));
6232 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6233 /* Status/statistics block address. See tg3_timer,
6234 * the tg3_periodic_fetch_stats call there, and
6235 * tg3_get_stats to see how this works for 5705/5750 chips.
6237 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
6238 ((u64) tp->stats_mapping >> 32));
6239 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
6240 ((u64) tp->stats_mapping & 0xffffffff));
6241 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
6242 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
6245 tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
6247 tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
6248 tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
6249 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
6250 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
6252 /* Clear statistics/status block in chip, and status block in ram. */
6253 for (i = NIC_SRAM_STATS_BLK;
6254 i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
6256 tg3_write_mem(tp, i, 0);
6259 memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
6261 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
6262 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
6263 /* reset to prevent losing 1st rx packet intermittently */
6264 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
6268 tp->mac_mode = MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
6269 MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE | MAC_MODE_FHDE_ENABLE;
6270 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
6273 /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
6274 * If TG3_FLAG_EEPROM_WRITE_PROT is set, we should read the
6275 * register to preserve the GPIO settings for LOMs. The GPIOs,
6276 * whether used as inputs or outputs, are set by boot code after
6279 if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
6282 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE2 |
6283 GRC_LCLCTRL_GPIO_OUTPUT0 | GRC_LCLCTRL_GPIO_OUTPUT2;
6285 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
6286 gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
6287 GRC_LCLCTRL_GPIO_OUTPUT3;
6289 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
6290 gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
6292 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
6294 /* GPIO1 must be driven high for eeprom write protect */
6295 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
6296 GRC_LCLCTRL_GPIO_OUTPUT1);
6298 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
6301 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0);
6304 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6305 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
6309 val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
6310 WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
6311 WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
6312 WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
6313 WDMAC_MODE_LNGREAD_ENAB);
6315 /* If statement applies to 5705 and 5750 PCI devices only */
6316 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
6317 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
6318 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
6319 if ((tp->tg3_flags & TG3_FLG2_TSO_CAPABLE) &&
6320 (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
6321 tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
6323 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
6324 !(tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
6325 !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
6326 val |= WDMAC_MODE_RX_ACCEL;
6330 /* Enable host coalescing bug fix */
6331 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755) ||
6332 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787))
6335 tw32_f(WDMAC_MODE, val);
6338 if ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0) {
6339 val = tr32(TG3PCI_X_CAPS);
6340 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) {
6341 val &= ~PCIX_CAPS_BURST_MASK;
6342 val |= (PCIX_CAPS_MAX_BURST_CPIOB << PCIX_CAPS_BURST_SHIFT);
6343 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
6344 val &= ~(PCIX_CAPS_SPLIT_MASK | PCIX_CAPS_BURST_MASK);
6345 val |= (PCIX_CAPS_MAX_BURST_CPIOB << PCIX_CAPS_BURST_SHIFT);
6346 if (tp->tg3_flags & TG3_FLAG_SPLIT_MODE)
6347 val |= (tp->split_mode_max_reqs <<
6348 PCIX_CAPS_SPLIT_SHIFT);
6350 tw32(TG3PCI_X_CAPS, val);
6353 tw32_f(RDMAC_MODE, rdmac_mode);
6356 tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
6357 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
6358 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
6359 tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
6360 tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
6361 tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
6362 tw32(RCVDBDI_MODE, RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ);
6363 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
6364 #if TG3_TSO_SUPPORT != 0
6365 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
6366 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
6368 tw32(SNDBDI_MODE, SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE);
6369 tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
6371 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
6372 err = tg3_load_5701_a0_firmware_fix(tp);
6377 #if TG3_TSO_SUPPORT != 0
6378 if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
6379 err = tg3_load_tso_firmware(tp);
6385 tp->tx_mode = TX_MODE_ENABLE;
6386 tw32_f(MAC_TX_MODE, tp->tx_mode);
6389 tp->rx_mode = RX_MODE_ENABLE;
6390 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
6391 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
6393 tw32_f(MAC_RX_MODE, tp->rx_mode);
6396 if (tp->link_config.phy_is_low_power) {
6397 tp->link_config.phy_is_low_power = 0;
6398 tp->link_config.speed = tp->link_config.orig_speed;
6399 tp->link_config.duplex = tp->link_config.orig_duplex;
6400 tp->link_config.autoneg = tp->link_config.orig_autoneg;
6403 tp->mi_mode = MAC_MI_MODE_BASE;
6404 tw32_f(MAC_MI_MODE, tp->mi_mode);
6407 tw32(MAC_LED_CTRL, tp->led_ctrl);
6409 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
6410 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
6411 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
6414 tw32_f(MAC_RX_MODE, tp->rx_mode);
6417 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
6418 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) &&
6419 !(tp->tg3_flags2 & TG3_FLG2_SERDES_PREEMPHASIS)) {
6420 /* Set drive transmission level to 1.2V */
6421 /* only if the signal pre-emphasis bit is not set */
6422 val = tr32(MAC_SERDES_CFG);
6425 tw32(MAC_SERDES_CFG, val);
6427 if (tp->pci_chip_rev_id == CHIPREV_ID_5703_A1)
6428 tw32(MAC_SERDES_CFG, 0x616000);
6431 /* Prevent chip from dropping frames when flow control
6434 tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, 2);
6436 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
6437 (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
6438 /* Use hardware link auto-negotiation */
6439 tp->tg3_flags2 |= TG3_FLG2_HW_AUTONEG;
6442 if ((tp->tg3_flags2 & TG3_FLG2_MII_SERDES) &&
6443 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)) {
6446 tmp = tr32(SERDES_RX_CTRL);
6447 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
6448 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
6449 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
6450 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
6453 err = tg3_setup_phy(tp, reset_phy);
6457 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
6460 /* Clear CRC stats. */
6461 if (!tg3_readphy(tp, 0x1e, &tmp)) {
6462 tg3_writephy(tp, 0x1e, tmp | 0x8000);
6463 tg3_readphy(tp, 0x14, &tmp);
6467 __tg3_set_rx_mode(tp->dev);
6469 /* Initialize receive rules. */
6470 tw32(MAC_RCV_RULE_0, 0xc2000000 & RCV_RULE_DISABLE_MASK);
6471 tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
6472 tw32(MAC_RCV_RULE_1, 0x86000004 & RCV_RULE_DISABLE_MASK);
6473 tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
6475 if ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
6476 !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
6480 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF)
6484 tw32(MAC_RCV_RULE_15, 0); tw32(MAC_RCV_VALUE_15, 0);
6486 tw32(MAC_RCV_RULE_14, 0); tw32(MAC_RCV_VALUE_14, 0);
6488 tw32(MAC_RCV_RULE_13, 0); tw32(MAC_RCV_VALUE_13, 0);
6490 tw32(MAC_RCV_RULE_12, 0); tw32(MAC_RCV_VALUE_12, 0);
6492 tw32(MAC_RCV_RULE_11, 0); tw32(MAC_RCV_VALUE_11, 0);
6494 tw32(MAC_RCV_RULE_10, 0); tw32(MAC_RCV_VALUE_10, 0);
6496 tw32(MAC_RCV_RULE_9, 0); tw32(MAC_RCV_VALUE_9, 0);
6498 tw32(MAC_RCV_RULE_8, 0); tw32(MAC_RCV_VALUE_8, 0);
6500 tw32(MAC_RCV_RULE_7, 0); tw32(MAC_RCV_VALUE_7, 0);
6502 tw32(MAC_RCV_RULE_6, 0); tw32(MAC_RCV_VALUE_6, 0);
6504 tw32(MAC_RCV_RULE_5, 0); tw32(MAC_RCV_VALUE_5, 0);
6506 tw32(MAC_RCV_RULE_4, 0); tw32(MAC_RCV_VALUE_4, 0);
6508 /* tw32(MAC_RCV_RULE_3, 0); tw32(MAC_RCV_VALUE_3, 0); */
6510 /* tw32(MAC_RCV_RULE_2, 0); tw32(MAC_RCV_VALUE_2, 0); */
6518 tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
6523 /* Called at device open time to get the chip ready for
6524 * packet processing. Invoked with tp->lock held.
6526 static int tg3_init_hw(struct tg3 *tp, int reset_phy)
6530 /* Force the chip into D0. */
6531 err = tg3_set_power_state(tp, PCI_D0);
6535 tg3_switch_clocks(tp);
6537 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
6539 err = tg3_reset_hw(tp, reset_phy);
6545 #define TG3_STAT_ADD32(PSTAT, REG) \
6546 do { u32 __val = tr32(REG); \
6547 (PSTAT)->low += __val; \
6548 if ((PSTAT)->low < __val) \
6549 (PSTAT)->high += 1; \
6552 static void tg3_periodic_fetch_stats(struct tg3 *tp)
6554 struct tg3_hw_stats *sp = tp->hw_stats;
6556 if (!netif_carrier_ok(tp->dev))
6559 TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
6560 TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
6561 TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
6562 TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
6563 TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
6564 TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
6565 TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
6566 TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
6567 TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
6568 TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
6569 TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
6570 TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
6571 TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
6573 TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
6574 TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
6575 TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
6576 TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
6577 TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
6578 TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
6579 TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
6580 TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
6581 TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
6582 TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
6583 TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
6584 TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
6585 TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
6586 TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
6588 TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
6589 TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
6590 TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
6593 static void tg3_timer(unsigned long __opaque)
6595 struct tg3 *tp = (struct tg3 *) __opaque;
6600 spin_lock(&tp->lock);
6602 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) {
6603 /* All of this garbage is because when using non-tagged
6604 * IRQ status the mailbox/status_block protocol the chip
6605 * uses with the cpu is race prone.
6607 if (tp->hw_status->status & SD_STATUS_UPDATED) {
6608 tw32(GRC_LOCAL_CTRL,
6609 tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
6611 tw32(HOSTCC_MODE, tp->coalesce_mode |
6612 (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
6615 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
6616 tp->tg3_flags2 |= TG3_FLG2_RESTART_TIMER;
6617 spin_unlock(&tp->lock);
6618 schedule_work(&tp->reset_task);
6623 /* This part only runs once per second. */
6624 if (!--tp->timer_counter) {
6625 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
6626 tg3_periodic_fetch_stats(tp);
6628 if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
6632 mac_stat = tr32(MAC_STATUS);
6635 if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) {
6636 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
6638 } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
6642 tg3_setup_phy(tp, 0);
6643 } else if (tp->tg3_flags & TG3_FLAG_POLL_SERDES) {
6644 u32 mac_stat = tr32(MAC_STATUS);
6647 if (netif_carrier_ok(tp->dev) &&
6648 (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
6651 if (! netif_carrier_ok(tp->dev) &&
6652 (mac_stat & (MAC_STATUS_PCS_SYNCED |
6653 MAC_STATUS_SIGNAL_DET))) {
6659 ~MAC_MODE_PORT_MODE_MASK));
6661 tw32_f(MAC_MODE, tp->mac_mode);
6663 tg3_setup_phy(tp, 0);
6665 } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
6666 tg3_serdes_parallel_detect(tp);
6668 tp->timer_counter = tp->timer_multiplier;
6671 /* Heartbeat is only sent once every 2 seconds. */
6672 if (!--tp->asf_counter) {
6673 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
6676 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
6677 FWCMD_NICDRV_ALIVE2);
6678 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
6679 /* 5 seconds timeout */
6680 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, 5);
6681 val = tr32(GRC_RX_CPU_EVENT);
6683 tw32(GRC_RX_CPU_EVENT, val);
6685 tp->asf_counter = tp->asf_multiplier;
6688 spin_unlock(&tp->lock);
6691 tp->timer.expires = jiffies + tp->timer_offset;
6692 add_timer(&tp->timer);
6695 static int tg3_request_irq(struct tg3 *tp)
6697 irqreturn_t (*fn)(int, void *, struct pt_regs *);
6698 unsigned long flags;
6699 struct net_device *dev = tp->dev;
6701 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6703 if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI)
6705 flags = SA_SAMPLE_RANDOM;
6708 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
6709 fn = tg3_interrupt_tagged;
6710 flags = SA_SHIRQ | SA_SAMPLE_RANDOM;
6712 return (request_irq(tp->pdev->irq, fn, flags, dev->name, dev));
6715 static int tg3_test_interrupt(struct tg3 *tp)
6717 struct net_device *dev = tp->dev;
6721 if (!netif_running(dev))
6724 tg3_disable_ints(tp);
6726 free_irq(tp->pdev->irq, dev);
6728 err = request_irq(tp->pdev->irq, tg3_test_isr,
6729 SA_SHIRQ | SA_SAMPLE_RANDOM, dev->name, dev);
6733 tp->hw_status->status &= ~SD_STATUS_UPDATED;
6734 tg3_enable_ints(tp);
6736 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
6739 for (i = 0; i < 5; i++) {
6740 int_mbox = tr32_mailbox(MAILBOX_INTERRUPT_0 +
6747 tg3_disable_ints(tp);
6749 free_irq(tp->pdev->irq, dev);
6751 err = tg3_request_irq(tp);
6762 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
6763 * successfully restored
6765 static int tg3_test_msi(struct tg3 *tp)
6767 struct net_device *dev = tp->dev;
6771 if (!(tp->tg3_flags2 & TG3_FLG2_USING_MSI))
6774 /* Turn off SERR reporting in case MSI terminates with Master
6777 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
6778 pci_write_config_word(tp->pdev, PCI_COMMAND,
6779 pci_cmd & ~PCI_COMMAND_SERR);
6781 err = tg3_test_interrupt(tp);
6783 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
6788 /* other failures */
6792 /* MSI test failed, go back to INTx mode */
6793 printk(KERN_WARNING PFX "%s: No interrupt was generated using MSI, "
6794 "switching to INTx mode. Please report this failure to "
6795 "the PCI maintainer and include system chipset information.\n",
6798 free_irq(tp->pdev->irq, dev);
6799 pci_disable_msi(tp->pdev);
6801 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
6803 err = tg3_request_irq(tp);
6807 /* Need to reset the chip because the MSI cycle may have terminated
6808 * with Master Abort.
6810 tg3_full_lock(tp, 1);
6812 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
6813 err = tg3_init_hw(tp, 1);
6815 tg3_full_unlock(tp);
6818 free_irq(tp->pdev->irq, dev);
6823 static int tg3_open(struct net_device *dev)
6825 struct tg3 *tp = netdev_priv(dev);
6828 tg3_full_lock(tp, 0);
6830 err = tg3_set_power_state(tp, PCI_D0);
6834 tg3_disable_ints(tp);
6835 tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
6837 tg3_full_unlock(tp);
6839 /* The placement of this call is tied
6840 * to the setup and use of Host TX descriptors.
6842 err = tg3_alloc_consistent(tp);
6846 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
6847 (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5750_AX) &&
6848 (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5750_BX) &&
6849 !((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) &&
6850 (tp->pdev_peer == tp->pdev))) {
6851 /* All MSI supporting chips should support tagged
6852 * status. Assert that this is the case.
6854 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) {
6855 printk(KERN_WARNING PFX "%s: MSI without TAGGED? "
6856 "Not using MSI.\n", tp->dev->name);
6857 } else if (pci_enable_msi(tp->pdev) == 0) {
6860 msi_mode = tr32(MSGINT_MODE);
6861 tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
6862 tp->tg3_flags2 |= TG3_FLG2_USING_MSI;
6865 err = tg3_request_irq(tp);
6868 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6869 pci_disable_msi(tp->pdev);
6870 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
6872 tg3_free_consistent(tp);
6876 tg3_full_lock(tp, 0);
6878 err = tg3_init_hw(tp, 1);
6880 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
6883 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
6884 tp->timer_offset = HZ;
6886 tp->timer_offset = HZ / 10;
6888 BUG_ON(tp->timer_offset > HZ);
6889 tp->timer_counter = tp->timer_multiplier =
6890 (HZ / tp->timer_offset);
6891 tp->asf_counter = tp->asf_multiplier =
6892 ((HZ / tp->timer_offset) * 2);
6894 init_timer(&tp->timer);
6895 tp->timer.expires = jiffies + tp->timer_offset;
6896 tp->timer.data = (unsigned long) tp;
6897 tp->timer.function = tg3_timer;
6900 tg3_full_unlock(tp);
6903 free_irq(tp->pdev->irq, dev);
6904 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6905 pci_disable_msi(tp->pdev);
6906 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
6908 tg3_free_consistent(tp);
6912 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6913 err = tg3_test_msi(tp);
6916 tg3_full_lock(tp, 0);
6918 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6919 pci_disable_msi(tp->pdev);
6920 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
6922 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
6924 tg3_free_consistent(tp);
6926 tg3_full_unlock(tp);
6931 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6932 if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI) {
6933 u32 val = tr32(0x7c04);
6935 tw32(0x7c04, val | (1 << 29));
6940 tg3_full_lock(tp, 0);
6942 add_timer(&tp->timer);
6943 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
6944 tg3_enable_ints(tp);
6946 tg3_full_unlock(tp);
6948 netif_start_queue(dev);
6954 /*static*/ void tg3_dump_state(struct tg3 *tp)
6956 u32 val32, val32_2, val32_3, val32_4, val32_5;
6960 pci_read_config_word(tp->pdev, PCI_STATUS, &val16);
6961 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE, &val32);
6962 printk("DEBUG: PCI status [%04x] TG3PCI state[%08x]\n",
6966 printk("DEBUG: MAC_MODE[%08x] MAC_STATUS[%08x]\n",
6967 tr32(MAC_MODE), tr32(MAC_STATUS));
6968 printk(" MAC_EVENT[%08x] MAC_LED_CTRL[%08x]\n",
6969 tr32(MAC_EVENT), tr32(MAC_LED_CTRL));
6970 printk("DEBUG: MAC_TX_MODE[%08x] MAC_TX_STATUS[%08x]\n",
6971 tr32(MAC_TX_MODE), tr32(MAC_TX_STATUS));
6972 printk(" MAC_RX_MODE[%08x] MAC_RX_STATUS[%08x]\n",
6973 tr32(MAC_RX_MODE), tr32(MAC_RX_STATUS));
6975 /* Send data initiator control block */
6976 printk("DEBUG: SNDDATAI_MODE[%08x] SNDDATAI_STATUS[%08x]\n",
6977 tr32(SNDDATAI_MODE), tr32(SNDDATAI_STATUS));
6978 printk(" SNDDATAI_STATSCTRL[%08x]\n",
6979 tr32(SNDDATAI_STATSCTRL));
6981 /* Send data completion control block */
6982 printk("DEBUG: SNDDATAC_MODE[%08x]\n", tr32(SNDDATAC_MODE));
6984 /* Send BD ring selector block */
6985 printk("DEBUG: SNDBDS_MODE[%08x] SNDBDS_STATUS[%08x]\n",
6986 tr32(SNDBDS_MODE), tr32(SNDBDS_STATUS));
6988 /* Send BD initiator control block */
6989 printk("DEBUG: SNDBDI_MODE[%08x] SNDBDI_STATUS[%08x]\n",
6990 tr32(SNDBDI_MODE), tr32(SNDBDI_STATUS));
6992 /* Send BD completion control block */
6993 printk("DEBUG: SNDBDC_MODE[%08x]\n", tr32(SNDBDC_MODE));
6995 /* Receive list placement control block */
6996 printk("DEBUG: RCVLPC_MODE[%08x] RCVLPC_STATUS[%08x]\n",
6997 tr32(RCVLPC_MODE), tr32(RCVLPC_STATUS));
6998 printk(" RCVLPC_STATSCTRL[%08x]\n",
6999 tr32(RCVLPC_STATSCTRL));
7001 /* Receive data and receive BD initiator control block */
7002 printk("DEBUG: RCVDBDI_MODE[%08x] RCVDBDI_STATUS[%08x]\n",
7003 tr32(RCVDBDI_MODE), tr32(RCVDBDI_STATUS));
7005 /* Receive data completion control block */
7006 printk("DEBUG: RCVDCC_MODE[%08x]\n",
7009 /* Receive BD initiator control block */
7010 printk("DEBUG: RCVBDI_MODE[%08x] RCVBDI_STATUS[%08x]\n",
7011 tr32(RCVBDI_MODE), tr32(RCVBDI_STATUS));
7013 /* Receive BD completion control block */
7014 printk("DEBUG: RCVCC_MODE[%08x] RCVCC_STATUS[%08x]\n",
7015 tr32(RCVCC_MODE), tr32(RCVCC_STATUS));
7017 /* Receive list selector control block */
7018 printk("DEBUG: RCVLSC_MODE[%08x] RCVLSC_STATUS[%08x]\n",
7019 tr32(RCVLSC_MODE), tr32(RCVLSC_STATUS));
7021 /* Mbuf cluster free block */
7022 printk("DEBUG: MBFREE_MODE[%08x] MBFREE_STATUS[%08x]\n",
7023 tr32(MBFREE_MODE), tr32(MBFREE_STATUS));
7025 /* Host coalescing control block */
7026 printk("DEBUG: HOSTCC_MODE[%08x] HOSTCC_STATUS[%08x]\n",
7027 tr32(HOSTCC_MODE), tr32(HOSTCC_STATUS));
7028 printk("DEBUG: HOSTCC_STATS_BLK_HOST_ADDR[%08x%08x]\n",
7029 tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
7030 tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
7031 printk("DEBUG: HOSTCC_STATUS_BLK_HOST_ADDR[%08x%08x]\n",
7032 tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
7033 tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
7034 printk("DEBUG: HOSTCC_STATS_BLK_NIC_ADDR[%08x]\n",
7035 tr32(HOSTCC_STATS_BLK_NIC_ADDR));
7036 printk("DEBUG: HOSTCC_STATUS_BLK_NIC_ADDR[%08x]\n",
7037 tr32(HOSTCC_STATUS_BLK_NIC_ADDR));
7039 /* Memory arbiter control block */
7040 printk("DEBUG: MEMARB_MODE[%08x] MEMARB_STATUS[%08x]\n",
7041 tr32(MEMARB_MODE), tr32(MEMARB_STATUS));
7043 /* Buffer manager control block */
7044 printk("DEBUG: BUFMGR_MODE[%08x] BUFMGR_STATUS[%08x]\n",
7045 tr32(BUFMGR_MODE), tr32(BUFMGR_STATUS));
7046 printk("DEBUG: BUFMGR_MB_POOL_ADDR[%08x] BUFMGR_MB_POOL_SIZE[%08x]\n",
7047 tr32(BUFMGR_MB_POOL_ADDR), tr32(BUFMGR_MB_POOL_SIZE));
7048 printk("DEBUG: BUFMGR_DMA_DESC_POOL_ADDR[%08x] "
7049 "BUFMGR_DMA_DESC_POOL_SIZE[%08x]\n",
7050 tr32(BUFMGR_DMA_DESC_POOL_ADDR),
7051 tr32(BUFMGR_DMA_DESC_POOL_SIZE));
7053 /* Read DMA control block */
7054 printk("DEBUG: RDMAC_MODE[%08x] RDMAC_STATUS[%08x]\n",
7055 tr32(RDMAC_MODE), tr32(RDMAC_STATUS));
7057 /* Write DMA control block */
7058 printk("DEBUG: WDMAC_MODE[%08x] WDMAC_STATUS[%08x]\n",
7059 tr32(WDMAC_MODE), tr32(WDMAC_STATUS));
7061 /* DMA completion block */
7062 printk("DEBUG: DMAC_MODE[%08x]\n",
7066 printk("DEBUG: GRC_MODE[%08x] GRC_MISC_CFG[%08x]\n",
7067 tr32(GRC_MODE), tr32(GRC_MISC_CFG));
7068 printk("DEBUG: GRC_LOCAL_CTRL[%08x]\n",
7069 tr32(GRC_LOCAL_CTRL));
7072 printk("DEBUG: RCVDBDI_JUMBO_BD[%08x%08x:%08x:%08x]\n",
7073 tr32(RCVDBDI_JUMBO_BD + 0x0),
7074 tr32(RCVDBDI_JUMBO_BD + 0x4),
7075 tr32(RCVDBDI_JUMBO_BD + 0x8),
7076 tr32(RCVDBDI_JUMBO_BD + 0xc));
7077 printk("DEBUG: RCVDBDI_STD_BD[%08x%08x:%08x:%08x]\n",
7078 tr32(RCVDBDI_STD_BD + 0x0),
7079 tr32(RCVDBDI_STD_BD + 0x4),
7080 tr32(RCVDBDI_STD_BD + 0x8),
7081 tr32(RCVDBDI_STD_BD + 0xc));
7082 printk("DEBUG: RCVDBDI_MINI_BD[%08x%08x:%08x:%08x]\n",
7083 tr32(RCVDBDI_MINI_BD + 0x0),
7084 tr32(RCVDBDI_MINI_BD + 0x4),
7085 tr32(RCVDBDI_MINI_BD + 0x8),
7086 tr32(RCVDBDI_MINI_BD + 0xc));
7088 tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x0, &val32);
7089 tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x4, &val32_2);
7090 tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x8, &val32_3);
7091 tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0xc, &val32_4);
7092 printk("DEBUG: SRAM_SEND_RCB_0[%08x%08x:%08x:%08x]\n",
7093 val32, val32_2, val32_3, val32_4);
7095 tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x0, &val32);
7096 tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x4, &val32_2);
7097 tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x8, &val32_3);
7098 tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0xc, &val32_4);
7099 printk("DEBUG: SRAM_RCV_RET_RCB_0[%08x%08x:%08x:%08x]\n",
7100 val32, val32_2, val32_3, val32_4);
7102 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x0, &val32);
7103 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x4, &val32_2);
7104 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x8, &val32_3);
7105 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0xc, &val32_4);
7106 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x10, &val32_5);
7107 printk("DEBUG: SRAM_STATUS_BLK[%08x:%08x:%08x:%08x:%08x]\n",
7108 val32, val32_2, val32_3, val32_4, val32_5);
7110 /* SW status block */
7111 printk("DEBUG: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
7112 tp->hw_status->status,
7113 tp->hw_status->status_tag,
7114 tp->hw_status->rx_jumbo_consumer,
7115 tp->hw_status->rx_consumer,
7116 tp->hw_status->rx_mini_consumer,
7117 tp->hw_status->idx[0].rx_producer,
7118 tp->hw_status->idx[0].tx_consumer);
7120 /* SW statistics block */
7121 printk("DEBUG: Host statistics block [%08x:%08x:%08x:%08x]\n",
7122 ((u32 *)tp->hw_stats)[0],
7123 ((u32 *)tp->hw_stats)[1],
7124 ((u32 *)tp->hw_stats)[2],
7125 ((u32 *)tp->hw_stats)[3]);
7128 printk("DEBUG: SNDHOST_PROD[%08x%08x] SNDNIC_PROD[%08x%08x]\n",
7129 tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + 0x0),
7130 tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + 0x4),
7131 tr32_mailbox(MAILBOX_SNDNIC_PROD_IDX_0 + 0x0),
7132 tr32_mailbox(MAILBOX_SNDNIC_PROD_IDX_0 + 0x4));
7134 /* NIC side send descriptors. */
7135 for (i = 0; i < 6; i++) {
7138 txd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_TX_BUFFER_DESC
7139 + (i * sizeof(struct tg3_tx_buffer_desc));
7140 printk("DEBUG: NIC TXD(%d)[%08x:%08x:%08x:%08x]\n",
7142 readl(txd + 0x0), readl(txd + 0x4),
7143 readl(txd + 0x8), readl(txd + 0xc));
7146 /* NIC side RX descriptors. */
7147 for (i = 0; i < 6; i++) {
7150 rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_BUFFER_DESC
7151 + (i * sizeof(struct tg3_rx_buffer_desc));
7152 printk("DEBUG: NIC RXD_STD(%d)[0][%08x:%08x:%08x:%08x]\n",
7154 readl(rxd + 0x0), readl(rxd + 0x4),
7155 readl(rxd + 0x8), readl(rxd + 0xc));
7156 rxd += (4 * sizeof(u32));
7157 printk("DEBUG: NIC RXD_STD(%d)[1][%08x:%08x:%08x:%08x]\n",
7159 readl(rxd + 0x0), readl(rxd + 0x4),
7160 readl(rxd + 0x8), readl(rxd + 0xc));
7163 for (i = 0; i < 6; i++) {
7166 rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_JUMBO_BUFFER_DESC
7167 + (i * sizeof(struct tg3_rx_buffer_desc));
7168 printk("DEBUG: NIC RXD_JUMBO(%d)[0][%08x:%08x:%08x:%08x]\n",
7170 readl(rxd + 0x0), readl(rxd + 0x4),
7171 readl(rxd + 0x8), readl(rxd + 0xc));
7172 rxd += (4 * sizeof(u32));
7173 printk("DEBUG: NIC RXD_JUMBO(%d)[1][%08x:%08x:%08x:%08x]\n",
7175 readl(rxd + 0x0), readl(rxd + 0x4),
7176 readl(rxd + 0x8), readl(rxd + 0xc));
7181 static struct net_device_stats *tg3_get_stats(struct net_device *);
7182 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *);
7184 static int tg3_close(struct net_device *dev)
7186 struct tg3 *tp = netdev_priv(dev);
7188 /* Calling flush_scheduled_work() may deadlock because
7189 * linkwatch_event() may be on the workqueue and it will try to get
7190 * the rtnl_lock which we are holding.
7192 while (tp->tg3_flags & TG3_FLAG_IN_RESET_TASK)
7195 netif_stop_queue(dev);
7197 del_timer_sync(&tp->timer);
7199 tg3_full_lock(tp, 1);
7204 tg3_disable_ints(tp);
7206 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
7209 ~(TG3_FLAG_INIT_COMPLETE |
7210 TG3_FLAG_GOT_SERDES_FLOWCTL);
7212 tg3_full_unlock(tp);
7214 free_irq(tp->pdev->irq, dev);
7215 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7216 pci_disable_msi(tp->pdev);
7217 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
7220 memcpy(&tp->net_stats_prev, tg3_get_stats(tp->dev),
7221 sizeof(tp->net_stats_prev));
7222 memcpy(&tp->estats_prev, tg3_get_estats(tp),
7223 sizeof(tp->estats_prev));
7225 tg3_free_consistent(tp);
7227 tg3_set_power_state(tp, PCI_D3hot);
7229 netif_carrier_off(tp->dev);
7234 static inline unsigned long get_stat64(tg3_stat64_t *val)
7238 #if (BITS_PER_LONG == 32)
7241 ret = ((u64)val->high << 32) | ((u64)val->low);
7246 static unsigned long calc_crc_errors(struct tg3 *tp)
7248 struct tg3_hw_stats *hw_stats = tp->hw_stats;
7250 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
7251 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
7252 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
7255 spin_lock_bh(&tp->lock);
7256 if (!tg3_readphy(tp, 0x1e, &val)) {
7257 tg3_writephy(tp, 0x1e, val | 0x8000);
7258 tg3_readphy(tp, 0x14, &val);
7261 spin_unlock_bh(&tp->lock);
7263 tp->phy_crc_errors += val;
7265 return tp->phy_crc_errors;
7268 return get_stat64(&hw_stats->rx_fcs_errors);
7271 #define ESTAT_ADD(member) \
7272 estats->member = old_estats->member + \
7273 get_stat64(&hw_stats->member)
7275 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *tp)
7277 struct tg3_ethtool_stats *estats = &tp->estats;
7278 struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
7279 struct tg3_hw_stats *hw_stats = tp->hw_stats;
7284 ESTAT_ADD(rx_octets);
7285 ESTAT_ADD(rx_fragments);
7286 ESTAT_ADD(rx_ucast_packets);
7287 ESTAT_ADD(rx_mcast_packets);
7288 ESTAT_ADD(rx_bcast_packets);
7289 ESTAT_ADD(rx_fcs_errors);
7290 ESTAT_ADD(rx_align_errors);
7291 ESTAT_ADD(rx_xon_pause_rcvd);
7292 ESTAT_ADD(rx_xoff_pause_rcvd);
7293 ESTAT_ADD(rx_mac_ctrl_rcvd);
7294 ESTAT_ADD(rx_xoff_entered);
7295 ESTAT_ADD(rx_frame_too_long_errors);
7296 ESTAT_ADD(rx_jabbers);
7297 ESTAT_ADD(rx_undersize_packets);
7298 ESTAT_ADD(rx_in_length_errors);
7299 ESTAT_ADD(rx_out_length_errors);
7300 ESTAT_ADD(rx_64_or_less_octet_packets);
7301 ESTAT_ADD(rx_65_to_127_octet_packets);
7302 ESTAT_ADD(rx_128_to_255_octet_packets);
7303 ESTAT_ADD(rx_256_to_511_octet_packets);
7304 ESTAT_ADD(rx_512_to_1023_octet_packets);
7305 ESTAT_ADD(rx_1024_to_1522_octet_packets);
7306 ESTAT_ADD(rx_1523_to_2047_octet_packets);
7307 ESTAT_ADD(rx_2048_to_4095_octet_packets);
7308 ESTAT_ADD(rx_4096_to_8191_octet_packets);
7309 ESTAT_ADD(rx_8192_to_9022_octet_packets);
7311 ESTAT_ADD(tx_octets);
7312 ESTAT_ADD(tx_collisions);
7313 ESTAT_ADD(tx_xon_sent);
7314 ESTAT_ADD(tx_xoff_sent);
7315 ESTAT_ADD(tx_flow_control);
7316 ESTAT_ADD(tx_mac_errors);
7317 ESTAT_ADD(tx_single_collisions);
7318 ESTAT_ADD(tx_mult_collisions);
7319 ESTAT_ADD(tx_deferred);
7320 ESTAT_ADD(tx_excessive_collisions);
7321 ESTAT_ADD(tx_late_collisions);
7322 ESTAT_ADD(tx_collide_2times);
7323 ESTAT_ADD(tx_collide_3times);
7324 ESTAT_ADD(tx_collide_4times);
7325 ESTAT_ADD(tx_collide_5times);
7326 ESTAT_ADD(tx_collide_6times);
7327 ESTAT_ADD(tx_collide_7times);
7328 ESTAT_ADD(tx_collide_8times);
7329 ESTAT_ADD(tx_collide_9times);
7330 ESTAT_ADD(tx_collide_10times);
7331 ESTAT_ADD(tx_collide_11times);
7332 ESTAT_ADD(tx_collide_12times);
7333 ESTAT_ADD(tx_collide_13times);
7334 ESTAT_ADD(tx_collide_14times);
7335 ESTAT_ADD(tx_collide_15times);
7336 ESTAT_ADD(tx_ucast_packets);
7337 ESTAT_ADD(tx_mcast_packets);
7338 ESTAT_ADD(tx_bcast_packets);
7339 ESTAT_ADD(tx_carrier_sense_errors);
7340 ESTAT_ADD(tx_discards);
7341 ESTAT_ADD(tx_errors);
7343 ESTAT_ADD(dma_writeq_full);
7344 ESTAT_ADD(dma_write_prioq_full);
7345 ESTAT_ADD(rxbds_empty);
7346 ESTAT_ADD(rx_discards);
7347 ESTAT_ADD(rx_errors);
7348 ESTAT_ADD(rx_threshold_hit);
7350 ESTAT_ADD(dma_readq_full);
7351 ESTAT_ADD(dma_read_prioq_full);
7352 ESTAT_ADD(tx_comp_queue_full);
7354 ESTAT_ADD(ring_set_send_prod_index);
7355 ESTAT_ADD(ring_status_update);
7356 ESTAT_ADD(nic_irqs);
7357 ESTAT_ADD(nic_avoided_irqs);
7358 ESTAT_ADD(nic_tx_threshold_hit);
7363 static struct net_device_stats *tg3_get_stats(struct net_device *dev)
7365 struct tg3 *tp = netdev_priv(dev);
7366 struct net_device_stats *stats = &tp->net_stats;
7367 struct net_device_stats *old_stats = &tp->net_stats_prev;
7368 struct tg3_hw_stats *hw_stats = tp->hw_stats;
7373 stats->rx_packets = old_stats->rx_packets +
7374 get_stat64(&hw_stats->rx_ucast_packets) +
7375 get_stat64(&hw_stats->rx_mcast_packets) +
7376 get_stat64(&hw_stats->rx_bcast_packets);
7378 stats->tx_packets = old_stats->tx_packets +
7379 get_stat64(&hw_stats->tx_ucast_packets) +
7380 get_stat64(&hw_stats->tx_mcast_packets) +
7381 get_stat64(&hw_stats->tx_bcast_packets);
7383 stats->rx_bytes = old_stats->rx_bytes +
7384 get_stat64(&hw_stats->rx_octets);
7385 stats->tx_bytes = old_stats->tx_bytes +
7386 get_stat64(&hw_stats->tx_octets);
7388 stats->rx_errors = old_stats->rx_errors +
7389 get_stat64(&hw_stats->rx_errors);
7390 stats->tx_errors = old_stats->tx_errors +
7391 get_stat64(&hw_stats->tx_errors) +
7392 get_stat64(&hw_stats->tx_mac_errors) +
7393 get_stat64(&hw_stats->tx_carrier_sense_errors) +
7394 get_stat64(&hw_stats->tx_discards);
7396 stats->multicast = old_stats->multicast +
7397 get_stat64(&hw_stats->rx_mcast_packets);
7398 stats->collisions = old_stats->collisions +
7399 get_stat64(&hw_stats->tx_collisions);
7401 stats->rx_length_errors = old_stats->rx_length_errors +
7402 get_stat64(&hw_stats->rx_frame_too_long_errors) +
7403 get_stat64(&hw_stats->rx_undersize_packets);
7405 stats->rx_over_errors = old_stats->rx_over_errors +
7406 get_stat64(&hw_stats->rxbds_empty);
7407 stats->rx_frame_errors = old_stats->rx_frame_errors +
7408 get_stat64(&hw_stats->rx_align_errors);
7409 stats->tx_aborted_errors = old_stats->tx_aborted_errors +
7410 get_stat64(&hw_stats->tx_discards);
7411 stats->tx_carrier_errors = old_stats->tx_carrier_errors +
7412 get_stat64(&hw_stats->tx_carrier_sense_errors);
7414 stats->rx_crc_errors = old_stats->rx_crc_errors +
7415 calc_crc_errors(tp);
7417 stats->rx_missed_errors = old_stats->rx_missed_errors +
7418 get_stat64(&hw_stats->rx_discards);
7423 static inline u32 calc_crc(unsigned char *buf, int len)
7431 for (j = 0; j < len; j++) {
7434 for (k = 0; k < 8; k++) {
7448 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
7450 /* accept or reject all multicast frames */
7451 tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
7452 tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
7453 tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
7454 tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
7457 static void __tg3_set_rx_mode(struct net_device *dev)
7459 struct tg3 *tp = netdev_priv(dev);
7462 rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
7463 RX_MODE_KEEP_VLAN_TAG);
7465 /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
7468 #if TG3_VLAN_TAG_USED
7470 !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
7471 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
7473 /* By definition, VLAN is disabled always in this
7476 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
7477 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
7480 if (dev->flags & IFF_PROMISC) {
7481 /* Promiscuous mode. */
7482 rx_mode |= RX_MODE_PROMISC;
7483 } else if (dev->flags & IFF_ALLMULTI) {
7484 /* Accept all multicast. */
7485 tg3_set_multi (tp, 1);
7486 } else if (dev->mc_count < 1) {
7487 /* Reject all multicast. */
7488 tg3_set_multi (tp, 0);
7490 /* Accept one or more multicast(s). */
7491 struct dev_mc_list *mclist;
7493 u32 mc_filter[4] = { 0, };
7498 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
7499 i++, mclist = mclist->next) {
7501 crc = calc_crc (mclist->dmi_addr, ETH_ALEN);
7503 regidx = (bit & 0x60) >> 5;
7505 mc_filter[regidx] |= (1 << bit);
7508 tw32(MAC_HASH_REG_0, mc_filter[0]);
7509 tw32(MAC_HASH_REG_1, mc_filter[1]);
7510 tw32(MAC_HASH_REG_2, mc_filter[2]);
7511 tw32(MAC_HASH_REG_3, mc_filter[3]);
7514 if (rx_mode != tp->rx_mode) {
7515 tp->rx_mode = rx_mode;
7516 tw32_f(MAC_RX_MODE, rx_mode);
7521 static void tg3_set_rx_mode(struct net_device *dev)
7523 struct tg3 *tp = netdev_priv(dev);
7525 if (!netif_running(dev))
7528 tg3_full_lock(tp, 0);
7529 __tg3_set_rx_mode(dev);
7530 tg3_full_unlock(tp);
7533 #define TG3_REGDUMP_LEN (32 * 1024)
7535 static int tg3_get_regs_len(struct net_device *dev)
7537 return TG3_REGDUMP_LEN;
7540 static void tg3_get_regs(struct net_device *dev,
7541 struct ethtool_regs *regs, void *_p)
7544 struct tg3 *tp = netdev_priv(dev);
7550 memset(p, 0, TG3_REGDUMP_LEN);
7552 if (tp->link_config.phy_is_low_power)
7555 tg3_full_lock(tp, 0);
7557 #define __GET_REG32(reg) (*(p)++ = tr32(reg))
7558 #define GET_REG32_LOOP(base,len) \
7559 do { p = (u32 *)(orig_p + (base)); \
7560 for (i = 0; i < len; i += 4) \
7561 __GET_REG32((base) + i); \
7563 #define GET_REG32_1(reg) \
7564 do { p = (u32 *)(orig_p + (reg)); \
7565 __GET_REG32((reg)); \
7568 GET_REG32_LOOP(TG3PCI_VENDOR, 0xb0);
7569 GET_REG32_LOOP(MAILBOX_INTERRUPT_0, 0x200);
7570 GET_REG32_LOOP(MAC_MODE, 0x4f0);
7571 GET_REG32_LOOP(SNDDATAI_MODE, 0xe0);
7572 GET_REG32_1(SNDDATAC_MODE);
7573 GET_REG32_LOOP(SNDBDS_MODE, 0x80);
7574 GET_REG32_LOOP(SNDBDI_MODE, 0x48);
7575 GET_REG32_1(SNDBDC_MODE);
7576 GET_REG32_LOOP(RCVLPC_MODE, 0x20);
7577 GET_REG32_LOOP(RCVLPC_SELLST_BASE, 0x15c);
7578 GET_REG32_LOOP(RCVDBDI_MODE, 0x0c);
7579 GET_REG32_LOOP(RCVDBDI_JUMBO_BD, 0x3c);
7580 GET_REG32_LOOP(RCVDBDI_BD_PROD_IDX_0, 0x44);
7581 GET_REG32_1(RCVDCC_MODE);
7582 GET_REG32_LOOP(RCVBDI_MODE, 0x20);
7583 GET_REG32_LOOP(RCVCC_MODE, 0x14);
7584 GET_REG32_LOOP(RCVLSC_MODE, 0x08);
7585 GET_REG32_1(MBFREE_MODE);
7586 GET_REG32_LOOP(HOSTCC_MODE, 0x100);
7587 GET_REG32_LOOP(MEMARB_MODE, 0x10);
7588 GET_REG32_LOOP(BUFMGR_MODE, 0x58);
7589 GET_REG32_LOOP(RDMAC_MODE, 0x08);
7590 GET_REG32_LOOP(WDMAC_MODE, 0x08);
7591 GET_REG32_1(RX_CPU_MODE);
7592 GET_REG32_1(RX_CPU_STATE);
7593 GET_REG32_1(RX_CPU_PGMCTR);
7594 GET_REG32_1(RX_CPU_HWBKPT);
7595 GET_REG32_1(TX_CPU_MODE);
7596 GET_REG32_1(TX_CPU_STATE);
7597 GET_REG32_1(TX_CPU_PGMCTR);
7598 GET_REG32_LOOP(GRCMBOX_INTERRUPT_0, 0x110);
7599 GET_REG32_LOOP(FTQ_RESET, 0x120);
7600 GET_REG32_LOOP(MSGINT_MODE, 0x0c);
7601 GET_REG32_1(DMAC_MODE);
7602 GET_REG32_LOOP(GRC_MODE, 0x4c);
7603 if (tp->tg3_flags & TG3_FLAG_NVRAM)
7604 GET_REG32_LOOP(NVRAM_CMD, 0x24);
7607 #undef GET_REG32_LOOP
7610 tg3_full_unlock(tp);
7613 static int tg3_get_eeprom_len(struct net_device *dev)
7615 struct tg3 *tp = netdev_priv(dev);
7617 return tp->nvram_size;
7620 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val);
7621 static int tg3_nvram_read_swab(struct tg3 *tp, u32 offset, u32 *val);
7623 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
7625 struct tg3 *tp = netdev_priv(dev);
7628 u32 i, offset, len, val, b_offset, b_count;
7630 if (tp->link_config.phy_is_low_power)
7633 offset = eeprom->offset;
7637 eeprom->magic = TG3_EEPROM_MAGIC;
7640 /* adjustments to start on required 4 byte boundary */
7641 b_offset = offset & 3;
7642 b_count = 4 - b_offset;
7643 if (b_count > len) {
7644 /* i.e. offset=1 len=2 */
7647 ret = tg3_nvram_read(tp, offset-b_offset, &val);
7650 val = cpu_to_le32(val);
7651 memcpy(data, ((char*)&val) + b_offset, b_count);
7654 eeprom->len += b_count;
7657 /* read bytes upto the last 4 byte boundary */
7658 pd = &data[eeprom->len];
7659 for (i = 0; i < (len - (len & 3)); i += 4) {
7660 ret = tg3_nvram_read(tp, offset + i, &val);
7665 val = cpu_to_le32(val);
7666 memcpy(pd + i, &val, 4);
7671 /* read last bytes not ending on 4 byte boundary */
7672 pd = &data[eeprom->len];
7674 b_offset = offset + len - b_count;
7675 ret = tg3_nvram_read(tp, b_offset, &val);
7678 val = cpu_to_le32(val);
7679 memcpy(pd, ((char*)&val), b_count);
7680 eeprom->len += b_count;
7685 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf);
7687 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
7689 struct tg3 *tp = netdev_priv(dev);
7691 u32 offset, len, b_offset, odd_len, start, end;
7694 if (tp->link_config.phy_is_low_power)
7697 if (eeprom->magic != TG3_EEPROM_MAGIC)
7700 offset = eeprom->offset;
7703 if ((b_offset = (offset & 3))) {
7704 /* adjustments to start on required 4 byte boundary */
7705 ret = tg3_nvram_read(tp, offset-b_offset, &start);
7708 start = cpu_to_le32(start);
7717 /* adjustments to end on required 4 byte boundary */
7719 len = (len + 3) & ~3;
7720 ret = tg3_nvram_read(tp, offset+len-4, &end);
7723 end = cpu_to_le32(end);
7727 if (b_offset || odd_len) {
7728 buf = kmalloc(len, GFP_KERNEL);
7732 memcpy(buf, &start, 4);
7734 memcpy(buf+len-4, &end, 4);
7735 memcpy(buf + b_offset, data, eeprom->len);
7738 ret = tg3_nvram_write_block(tp, offset, len, buf);
7746 static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
7748 struct tg3 *tp = netdev_priv(dev);
7750 cmd->supported = (SUPPORTED_Autoneg);
7752 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
7753 cmd->supported |= (SUPPORTED_1000baseT_Half |
7754 SUPPORTED_1000baseT_Full);
7756 if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)) {
7757 cmd->supported |= (SUPPORTED_100baseT_Half |
7758 SUPPORTED_100baseT_Full |
7759 SUPPORTED_10baseT_Half |
7760 SUPPORTED_10baseT_Full |
7762 cmd->port = PORT_TP;
7764 cmd->supported |= SUPPORTED_FIBRE;
7765 cmd->port = PORT_FIBRE;
7768 cmd->advertising = tp->link_config.advertising;
7769 if (netif_running(dev)) {
7770 cmd->speed = tp->link_config.active_speed;
7771 cmd->duplex = tp->link_config.active_duplex;
7773 cmd->phy_address = PHY_ADDR;
7774 cmd->transceiver = 0;
7775 cmd->autoneg = tp->link_config.autoneg;
7781 static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
7783 struct tg3 *tp = netdev_priv(dev);
7785 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) {
7786 /* These are the only valid advertisement bits allowed. */
7787 if (cmd->autoneg == AUTONEG_ENABLE &&
7788 (cmd->advertising & ~(ADVERTISED_1000baseT_Half |
7789 ADVERTISED_1000baseT_Full |
7790 ADVERTISED_Autoneg |
7793 /* Fiber can only do SPEED_1000. */
7794 else if ((cmd->autoneg != AUTONEG_ENABLE) &&
7795 (cmd->speed != SPEED_1000))
7797 /* Copper cannot force SPEED_1000. */
7798 } else if ((cmd->autoneg != AUTONEG_ENABLE) &&
7799 (cmd->speed == SPEED_1000))
7801 else if ((cmd->speed == SPEED_1000) &&
7802 (tp->tg3_flags2 & TG3_FLAG_10_100_ONLY))
7805 tg3_full_lock(tp, 0);
7807 tp->link_config.autoneg = cmd->autoneg;
7808 if (cmd->autoneg == AUTONEG_ENABLE) {
7809 tp->link_config.advertising = cmd->advertising;
7810 tp->link_config.speed = SPEED_INVALID;
7811 tp->link_config.duplex = DUPLEX_INVALID;
7813 tp->link_config.advertising = 0;
7814 tp->link_config.speed = cmd->speed;
7815 tp->link_config.duplex = cmd->duplex;
7818 if (netif_running(dev))
7819 tg3_setup_phy(tp, 1);
7821 tg3_full_unlock(tp);
7826 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
7828 struct tg3 *tp = netdev_priv(dev);
7830 strcpy(info->driver, DRV_MODULE_NAME);
7831 strcpy(info->version, DRV_MODULE_VERSION);
7832 strcpy(info->fw_version, tp->fw_ver);
7833 strcpy(info->bus_info, pci_name(tp->pdev));
7836 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
7838 struct tg3 *tp = netdev_priv(dev);
7840 wol->supported = WAKE_MAGIC;
7842 if (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)
7843 wol->wolopts = WAKE_MAGIC;
7844 memset(&wol->sopass, 0, sizeof(wol->sopass));
7847 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
7849 struct tg3 *tp = netdev_priv(dev);
7851 if (wol->wolopts & ~WAKE_MAGIC)
7853 if ((wol->wolopts & WAKE_MAGIC) &&
7854 tp->tg3_flags2 & TG3_FLG2_PHY_SERDES &&
7855 !(tp->tg3_flags & TG3_FLAG_SERDES_WOL_CAP))
7858 spin_lock_bh(&tp->lock);
7859 if (wol->wolopts & WAKE_MAGIC)
7860 tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
7862 tp->tg3_flags &= ~TG3_FLAG_WOL_ENABLE;
7863 spin_unlock_bh(&tp->lock);
7868 static u32 tg3_get_msglevel(struct net_device *dev)
7870 struct tg3 *tp = netdev_priv(dev);
7871 return tp->msg_enable;
7874 static void tg3_set_msglevel(struct net_device *dev, u32 value)
7876 struct tg3 *tp = netdev_priv(dev);
7877 tp->msg_enable = value;
7880 #if TG3_TSO_SUPPORT != 0
7881 static int tg3_set_tso(struct net_device *dev, u32 value)
7883 struct tg3 *tp = netdev_priv(dev);
7885 if (!(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
7890 return ethtool_op_set_tso(dev, value);
7894 static int tg3_nway_reset(struct net_device *dev)
7896 struct tg3 *tp = netdev_priv(dev);
7900 if (!netif_running(dev))
7903 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
7906 spin_lock_bh(&tp->lock);
7908 tg3_readphy(tp, MII_BMCR, &bmcr);
7909 if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
7910 ((bmcr & BMCR_ANENABLE) ||
7911 (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT))) {
7912 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
7916 spin_unlock_bh(&tp->lock);
7921 static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
7923 struct tg3 *tp = netdev_priv(dev);
7925 ering->rx_max_pending = TG3_RX_RING_SIZE - 1;
7926 ering->rx_mini_max_pending = 0;
7927 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE)
7928 ering->rx_jumbo_max_pending = TG3_RX_JUMBO_RING_SIZE - 1;
7930 ering->rx_jumbo_max_pending = 0;
7932 ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
7934 ering->rx_pending = tp->rx_pending;
7935 ering->rx_mini_pending = 0;
7936 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE)
7937 ering->rx_jumbo_pending = tp->rx_jumbo_pending;
7939 ering->rx_jumbo_pending = 0;
7941 ering->tx_pending = tp->tx_pending;
7944 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
7946 struct tg3 *tp = netdev_priv(dev);
7949 if ((ering->rx_pending > TG3_RX_RING_SIZE - 1) ||
7950 (ering->rx_jumbo_pending > TG3_RX_JUMBO_RING_SIZE - 1) ||
7951 (ering->tx_pending > TG3_TX_RING_SIZE - 1))
7954 if (netif_running(dev)) {
7959 tg3_full_lock(tp, irq_sync);
7961 tp->rx_pending = ering->rx_pending;
7963 if ((tp->tg3_flags2 & TG3_FLG2_MAX_RXPEND_64) &&
7964 tp->rx_pending > 63)
7965 tp->rx_pending = 63;
7966 tp->rx_jumbo_pending = ering->rx_jumbo_pending;
7967 tp->tx_pending = ering->tx_pending;
7969 if (netif_running(dev)) {
7970 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
7972 tg3_netif_start(tp);
7975 tg3_full_unlock(tp);
7980 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
7982 struct tg3 *tp = netdev_priv(dev);
7984 epause->autoneg = (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG) != 0;
7985 epause->rx_pause = (tp->tg3_flags & TG3_FLAG_RX_PAUSE) != 0;
7986 epause->tx_pause = (tp->tg3_flags & TG3_FLAG_TX_PAUSE) != 0;
7989 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
7991 struct tg3 *tp = netdev_priv(dev);
7994 if (netif_running(dev)) {
7999 tg3_full_lock(tp, irq_sync);
8001 if (epause->autoneg)
8002 tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
8004 tp->tg3_flags &= ~TG3_FLAG_PAUSE_AUTONEG;
8005 if (epause->rx_pause)
8006 tp->tg3_flags |= TG3_FLAG_RX_PAUSE;
8008 tp->tg3_flags &= ~TG3_FLAG_RX_PAUSE;
8009 if (epause->tx_pause)
8010 tp->tg3_flags |= TG3_FLAG_TX_PAUSE;
8012 tp->tg3_flags &= ~TG3_FLAG_TX_PAUSE;
8014 if (netif_running(dev)) {
8015 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
8017 tg3_netif_start(tp);
8020 tg3_full_unlock(tp);
8025 static u32 tg3_get_rx_csum(struct net_device *dev)
8027 struct tg3 *tp = netdev_priv(dev);
8028 return (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0;
8031 static int tg3_set_rx_csum(struct net_device *dev, u32 data)
8033 struct tg3 *tp = netdev_priv(dev);
8035 if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
8041 spin_lock_bh(&tp->lock);
8043 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
8045 tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
8046 spin_unlock_bh(&tp->lock);
8051 static int tg3_set_tx_csum(struct net_device *dev, u32 data)
8053 struct tg3 *tp = netdev_priv(dev);
8055 if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
8061 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
8062 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
8063 ethtool_op_set_tx_hw_csum(dev, data);
8065 ethtool_op_set_tx_csum(dev, data);
8070 static int tg3_get_stats_count (struct net_device *dev)
8072 return TG3_NUM_STATS;
8075 static int tg3_get_test_count (struct net_device *dev)
8077 return TG3_NUM_TEST;
8080 static void tg3_get_strings (struct net_device *dev, u32 stringset, u8 *buf)
8082 switch (stringset) {
8084 memcpy(buf, ðtool_stats_keys, sizeof(ethtool_stats_keys));
8087 memcpy(buf, ðtool_test_keys, sizeof(ethtool_test_keys));
8090 WARN_ON(1); /* we need a WARN() */
8095 static int tg3_phys_id(struct net_device *dev, u32 data)
8097 struct tg3 *tp = netdev_priv(dev);
8100 if (!netif_running(tp->dev))
8106 for (i = 0; i < (data * 2); i++) {
8108 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
8109 LED_CTRL_1000MBPS_ON |
8110 LED_CTRL_100MBPS_ON |
8111 LED_CTRL_10MBPS_ON |
8112 LED_CTRL_TRAFFIC_OVERRIDE |
8113 LED_CTRL_TRAFFIC_BLINK |
8114 LED_CTRL_TRAFFIC_LED);
8117 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
8118 LED_CTRL_TRAFFIC_OVERRIDE);
8120 if (msleep_interruptible(500))
8123 tw32(MAC_LED_CTRL, tp->led_ctrl);
8127 static void tg3_get_ethtool_stats (struct net_device *dev,
8128 struct ethtool_stats *estats, u64 *tmp_stats)
8130 struct tg3 *tp = netdev_priv(dev);
8131 memcpy(tmp_stats, tg3_get_estats(tp), sizeof(tp->estats));
8134 #define NVRAM_TEST_SIZE 0x100
8135 #define NVRAM_SELFBOOT_FORMAT1_SIZE 0x14
8137 static int tg3_test_nvram(struct tg3 *tp)
8139 u32 *buf, csum, magic;
8140 int i, j, err = 0, size;
8142 if (tg3_nvram_read_swab(tp, 0, &magic) != 0)
8145 if (magic == TG3_EEPROM_MAGIC)
8146 size = NVRAM_TEST_SIZE;
8147 else if ((magic & 0xff000000) == 0xa5000000) {
8148 if ((magic & 0xe00000) == 0x200000)
8149 size = NVRAM_SELFBOOT_FORMAT1_SIZE;
8155 buf = kmalloc(size, GFP_KERNEL);
8160 for (i = 0, j = 0; i < size; i += 4, j++) {
8163 if ((err = tg3_nvram_read(tp, i, &val)) != 0)
8165 buf[j] = cpu_to_le32(val);
8170 /* Selfboot format */
8171 if (cpu_to_be32(buf[0]) != TG3_EEPROM_MAGIC) {
8172 u8 *buf8 = (u8 *) buf, csum8 = 0;
8174 for (i = 0; i < size; i++)
8186 /* Bootstrap checksum at offset 0x10 */
8187 csum = calc_crc((unsigned char *) buf, 0x10);
8188 if(csum != cpu_to_le32(buf[0x10/4]))
8191 /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
8192 csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
8193 if (csum != cpu_to_le32(buf[0xfc/4]))
8203 #define TG3_SERDES_TIMEOUT_SEC 2
8204 #define TG3_COPPER_TIMEOUT_SEC 6
8206 static int tg3_test_link(struct tg3 *tp)
8210 if (!netif_running(tp->dev))
8213 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
8214 max = TG3_SERDES_TIMEOUT_SEC;
8216 max = TG3_COPPER_TIMEOUT_SEC;
8218 for (i = 0; i < max; i++) {
8219 if (netif_carrier_ok(tp->dev))
8222 if (msleep_interruptible(1000))
8229 /* Only test the commonly used registers */
8230 static int tg3_test_registers(struct tg3 *tp)
8233 u32 offset, read_mask, write_mask, val, save_val, read_val;
8237 #define TG3_FL_5705 0x1
8238 #define TG3_FL_NOT_5705 0x2
8239 #define TG3_FL_NOT_5788 0x4
8243 /* MAC Control Registers */
8244 { MAC_MODE, TG3_FL_NOT_5705,
8245 0x00000000, 0x00ef6f8c },
8246 { MAC_MODE, TG3_FL_5705,
8247 0x00000000, 0x01ef6b8c },
8248 { MAC_STATUS, TG3_FL_NOT_5705,
8249 0x03800107, 0x00000000 },
8250 { MAC_STATUS, TG3_FL_5705,
8251 0x03800100, 0x00000000 },
8252 { MAC_ADDR_0_HIGH, 0x0000,
8253 0x00000000, 0x0000ffff },
8254 { MAC_ADDR_0_LOW, 0x0000,
8255 0x00000000, 0xffffffff },
8256 { MAC_RX_MTU_SIZE, 0x0000,
8257 0x00000000, 0x0000ffff },
8258 { MAC_TX_MODE, 0x0000,
8259 0x00000000, 0x00000070 },
8260 { MAC_TX_LENGTHS, 0x0000,
8261 0x00000000, 0x00003fff },
8262 { MAC_RX_MODE, TG3_FL_NOT_5705,
8263 0x00000000, 0x000007fc },
8264 { MAC_RX_MODE, TG3_FL_5705,
8265 0x00000000, 0x000007dc },
8266 { MAC_HASH_REG_0, 0x0000,
8267 0x00000000, 0xffffffff },
8268 { MAC_HASH_REG_1, 0x0000,
8269 0x00000000, 0xffffffff },
8270 { MAC_HASH_REG_2, 0x0000,
8271 0x00000000, 0xffffffff },
8272 { MAC_HASH_REG_3, 0x0000,
8273 0x00000000, 0xffffffff },
8275 /* Receive Data and Receive BD Initiator Control Registers. */
8276 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
8277 0x00000000, 0xffffffff },
8278 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
8279 0x00000000, 0xffffffff },
8280 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
8281 0x00000000, 0x00000003 },
8282 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
8283 0x00000000, 0xffffffff },
8284 { RCVDBDI_STD_BD+0, 0x0000,
8285 0x00000000, 0xffffffff },
8286 { RCVDBDI_STD_BD+4, 0x0000,
8287 0x00000000, 0xffffffff },
8288 { RCVDBDI_STD_BD+8, 0x0000,
8289 0x00000000, 0xffff0002 },
8290 { RCVDBDI_STD_BD+0xc, 0x0000,
8291 0x00000000, 0xffffffff },
8293 /* Receive BD Initiator Control Registers. */
8294 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
8295 0x00000000, 0xffffffff },
8296 { RCVBDI_STD_THRESH, TG3_FL_5705,
8297 0x00000000, 0x000003ff },
8298 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
8299 0x00000000, 0xffffffff },
8301 /* Host Coalescing Control Registers. */
8302 { HOSTCC_MODE, TG3_FL_NOT_5705,
8303 0x00000000, 0x00000004 },
8304 { HOSTCC_MODE, TG3_FL_5705,
8305 0x00000000, 0x000000f6 },
8306 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
8307 0x00000000, 0xffffffff },
8308 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
8309 0x00000000, 0x000003ff },
8310 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
8311 0x00000000, 0xffffffff },
8312 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
8313 0x00000000, 0x000003ff },
8314 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
8315 0x00000000, 0xffffffff },
8316 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
8317 0x00000000, 0x000000ff },
8318 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
8319 0x00000000, 0xffffffff },
8320 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
8321 0x00000000, 0x000000ff },
8322 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
8323 0x00000000, 0xffffffff },
8324 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
8325 0x00000000, 0xffffffff },
8326 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
8327 0x00000000, 0xffffffff },
8328 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
8329 0x00000000, 0x000000ff },
8330 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
8331 0x00000000, 0xffffffff },
8332 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
8333 0x00000000, 0x000000ff },
8334 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
8335 0x00000000, 0xffffffff },
8336 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
8337 0x00000000, 0xffffffff },
8338 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
8339 0x00000000, 0xffffffff },
8340 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
8341 0x00000000, 0xffffffff },
8342 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
8343 0x00000000, 0xffffffff },
8344 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
8345 0xffffffff, 0x00000000 },
8346 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
8347 0xffffffff, 0x00000000 },
8349 /* Buffer Manager Control Registers. */
8350 { BUFMGR_MB_POOL_ADDR, 0x0000,
8351 0x00000000, 0x007fff80 },
8352 { BUFMGR_MB_POOL_SIZE, 0x0000,
8353 0x00000000, 0x007fffff },
8354 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
8355 0x00000000, 0x0000003f },
8356 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
8357 0x00000000, 0x000001ff },
8358 { BUFMGR_MB_HIGH_WATER, 0x0000,
8359 0x00000000, 0x000001ff },
8360 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
8361 0xffffffff, 0x00000000 },
8362 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
8363 0xffffffff, 0x00000000 },
8365 /* Mailbox Registers */
8366 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
8367 0x00000000, 0x000001ff },
8368 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
8369 0x00000000, 0x000001ff },
8370 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
8371 0x00000000, 0x000007ff },
8372 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
8373 0x00000000, 0x000001ff },
8375 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
8378 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
8383 for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
8384 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
8387 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
8390 if ((tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
8391 (reg_tbl[i].flags & TG3_FL_NOT_5788))
8394 offset = (u32) reg_tbl[i].offset;
8395 read_mask = reg_tbl[i].read_mask;
8396 write_mask = reg_tbl[i].write_mask;
8398 /* Save the original register content */
8399 save_val = tr32(offset);
8401 /* Determine the read-only value. */
8402 read_val = save_val & read_mask;
8404 /* Write zero to the register, then make sure the read-only bits
8405 * are not changed and the read/write bits are all zeros.
8411 /* Test the read-only and read/write bits. */
8412 if (((val & read_mask) != read_val) || (val & write_mask))
8415 /* Write ones to all the bits defined by RdMask and WrMask, then
8416 * make sure the read-only bits are not changed and the
8417 * read/write bits are all ones.
8419 tw32(offset, read_mask | write_mask);
8423 /* Test the read-only bits. */
8424 if ((val & read_mask) != read_val)
8427 /* Test the read/write bits. */
8428 if ((val & write_mask) != write_mask)
8431 tw32(offset, save_val);
8437 printk(KERN_ERR PFX "Register test failed at offset %x\n", offset);
8438 tw32(offset, save_val);
8442 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
8444 static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
8448 for (i = 0; i < sizeof(test_pattern)/sizeof(u32); i++) {
8449 for (j = 0; j < len; j += 4) {
8452 tg3_write_mem(tp, offset + j, test_pattern[i]);
8453 tg3_read_mem(tp, offset + j, &val);
8454 if (val != test_pattern[i])
8461 static int tg3_test_memory(struct tg3 *tp)
8463 static struct mem_entry {
8466 } mem_tbl_570x[] = {
8467 { 0x00000000, 0x00b50},
8468 { 0x00002000, 0x1c000},
8469 { 0xffffffff, 0x00000}
8470 }, mem_tbl_5705[] = {
8471 { 0x00000100, 0x0000c},
8472 { 0x00000200, 0x00008},
8473 { 0x00004000, 0x00800},
8474 { 0x00006000, 0x01000},
8475 { 0x00008000, 0x02000},
8476 { 0x00010000, 0x0e000},
8477 { 0xffffffff, 0x00000}
8478 }, mem_tbl_5755[] = {
8479 { 0x00000200, 0x00008},
8480 { 0x00004000, 0x00800},
8481 { 0x00006000, 0x00800},
8482 { 0x00008000, 0x02000},
8483 { 0x00010000, 0x0c000},
8484 { 0xffffffff, 0x00000}
8486 struct mem_entry *mem_tbl;
8490 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
8491 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
8492 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
8493 mem_tbl = mem_tbl_5755;
8495 mem_tbl = mem_tbl_5705;
8497 mem_tbl = mem_tbl_570x;
8499 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
8500 if ((err = tg3_do_mem_test(tp, mem_tbl[i].offset,
8501 mem_tbl[i].len)) != 0)
8508 #define TG3_MAC_LOOPBACK 0
8509 #define TG3_PHY_LOOPBACK 1
8511 static int tg3_run_loopback(struct tg3 *tp, int loopback_mode)
8513 u32 mac_mode, rx_start_idx, rx_idx, tx_idx, opaque_key;
8515 struct sk_buff *skb, *rx_skb;
8518 int num_pkts, tx_len, rx_len, i, err;
8519 struct tg3_rx_buffer_desc *desc;
8521 if (loopback_mode == TG3_MAC_LOOPBACK) {
8522 /* HW errata - mac loopback fails in some cases on 5780.
8523 * Normal traffic and PHY loopback are not affected by
8526 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780)
8529 mac_mode = (tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK) |
8530 MAC_MODE_PORT_INT_LPBACK | MAC_MODE_LINK_POLARITY |
8531 MAC_MODE_PORT_MODE_GMII;
8532 tw32(MAC_MODE, mac_mode);
8533 } else if (loopback_mode == TG3_PHY_LOOPBACK) {
8534 tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK | BMCR_FULLDPLX |
8537 /* reset to prevent losing 1st rx packet intermittently */
8538 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
8539 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
8541 tw32_f(MAC_RX_MODE, tp->rx_mode);
8543 mac_mode = (tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK) |
8544 MAC_MODE_LINK_POLARITY | MAC_MODE_PORT_MODE_GMII;
8545 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
8546 mac_mode &= ~MAC_MODE_LINK_POLARITY;
8547 tg3_writephy(tp, MII_TG3_EXT_CTRL,
8548 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
8550 tw32(MAC_MODE, mac_mode);
8558 skb = dev_alloc_skb(tx_len);
8562 tx_data = skb_put(skb, tx_len);
8563 memcpy(tx_data, tp->dev->dev_addr, 6);
8564 memset(tx_data + 6, 0x0, 8);
8566 tw32(MAC_RX_MTU_SIZE, tx_len + 4);
8568 for (i = 14; i < tx_len; i++)
8569 tx_data[i] = (u8) (i & 0xff);
8571 map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
8573 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
8578 rx_start_idx = tp->hw_status->idx[0].rx_producer;
8582 tg3_set_txd(tp, tp->tx_prod, map, tx_len, 0, 1);
8587 tw32_tx_mbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW,
8589 tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW);
8593 for (i = 0; i < 10; i++) {
8594 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
8599 tx_idx = tp->hw_status->idx[0].tx_consumer;
8600 rx_idx = tp->hw_status->idx[0].rx_producer;
8601 if ((tx_idx == tp->tx_prod) &&
8602 (rx_idx == (rx_start_idx + num_pkts)))
8606 pci_unmap_single(tp->pdev, map, tx_len, PCI_DMA_TODEVICE);
8609 if (tx_idx != tp->tx_prod)
8612 if (rx_idx != rx_start_idx + num_pkts)
8615 desc = &tp->rx_rcb[rx_start_idx];
8616 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
8617 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
8618 if (opaque_key != RXD_OPAQUE_RING_STD)
8621 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
8622 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
8625 rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4;
8626 if (rx_len != tx_len)
8629 rx_skb = tp->rx_std_buffers[desc_idx].skb;
8631 map = pci_unmap_addr(&tp->rx_std_buffers[desc_idx], mapping);
8632 pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len, PCI_DMA_FROMDEVICE);
8634 for (i = 14; i < tx_len; i++) {
8635 if (*(rx_skb->data + i) != (u8) (i & 0xff))
8640 /* tg3_free_rings will unmap and free the rx_skb */
8645 #define TG3_MAC_LOOPBACK_FAILED 1
8646 #define TG3_PHY_LOOPBACK_FAILED 2
8647 #define TG3_LOOPBACK_FAILED (TG3_MAC_LOOPBACK_FAILED | \
8648 TG3_PHY_LOOPBACK_FAILED)
8650 static int tg3_test_loopback(struct tg3 *tp)
8654 if (!netif_running(tp->dev))
8655 return TG3_LOOPBACK_FAILED;
8657 tg3_reset_hw(tp, 1);
8659 if (tg3_run_loopback(tp, TG3_MAC_LOOPBACK))
8660 err |= TG3_MAC_LOOPBACK_FAILED;
8661 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
8662 if (tg3_run_loopback(tp, TG3_PHY_LOOPBACK))
8663 err |= TG3_PHY_LOOPBACK_FAILED;
8669 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
8672 struct tg3 *tp = netdev_priv(dev);
8674 if (tp->link_config.phy_is_low_power)
8675 tg3_set_power_state(tp, PCI_D0);
8677 memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
8679 if (tg3_test_nvram(tp) != 0) {
8680 etest->flags |= ETH_TEST_FL_FAILED;
8683 if (tg3_test_link(tp) != 0) {
8684 etest->flags |= ETH_TEST_FL_FAILED;
8687 if (etest->flags & ETH_TEST_FL_OFFLINE) {
8688 int err, irq_sync = 0;
8690 if (netif_running(dev)) {
8695 tg3_full_lock(tp, irq_sync);
8697 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
8698 err = tg3_nvram_lock(tp);
8699 tg3_halt_cpu(tp, RX_CPU_BASE);
8700 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
8701 tg3_halt_cpu(tp, TX_CPU_BASE);
8703 tg3_nvram_unlock(tp);
8705 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
8708 if (tg3_test_registers(tp) != 0) {
8709 etest->flags |= ETH_TEST_FL_FAILED;
8712 if (tg3_test_memory(tp) != 0) {
8713 etest->flags |= ETH_TEST_FL_FAILED;
8716 if ((data[4] = tg3_test_loopback(tp)) != 0)
8717 etest->flags |= ETH_TEST_FL_FAILED;
8719 tg3_full_unlock(tp);
8721 if (tg3_test_interrupt(tp) != 0) {
8722 etest->flags |= ETH_TEST_FL_FAILED;
8726 tg3_full_lock(tp, 0);
8728 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
8729 if (netif_running(dev)) {
8730 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
8732 tg3_netif_start(tp);
8735 tg3_full_unlock(tp);
8737 if (tp->link_config.phy_is_low_power)
8738 tg3_set_power_state(tp, PCI_D3hot);
8742 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
8744 struct mii_ioctl_data *data = if_mii(ifr);
8745 struct tg3 *tp = netdev_priv(dev);
8750 data->phy_id = PHY_ADDR;
8756 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
8757 break; /* We have no PHY */
8759 if (tp->link_config.phy_is_low_power)
8762 spin_lock_bh(&tp->lock);
8763 err = tg3_readphy(tp, data->reg_num & 0x1f, &mii_regval);
8764 spin_unlock_bh(&tp->lock);
8766 data->val_out = mii_regval;
8772 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
8773 break; /* We have no PHY */
8775 if (!capable(CAP_NET_ADMIN))
8778 if (tp->link_config.phy_is_low_power)
8781 spin_lock_bh(&tp->lock);
8782 err = tg3_writephy(tp, data->reg_num & 0x1f, data->val_in);
8783 spin_unlock_bh(&tp->lock);
8794 #if TG3_VLAN_TAG_USED
8795 static void tg3_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
8797 struct tg3 *tp = netdev_priv(dev);
8799 if (netif_running(dev))
8802 tg3_full_lock(tp, 0);
8806 /* Update RX_MODE_KEEP_VLAN_TAG bit in RX_MODE register. */
8807 __tg3_set_rx_mode(dev);
8809 tg3_full_unlock(tp);
8811 if (netif_running(dev))
8812 tg3_netif_start(tp);
8815 static void tg3_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
8817 struct tg3 *tp = netdev_priv(dev);
8819 if (netif_running(dev))
8822 tg3_full_lock(tp, 0);
8824 tp->vlgrp->vlan_devices[vid] = NULL;
8825 tg3_full_unlock(tp);
8827 if (netif_running(dev))
8828 tg3_netif_start(tp);
8832 static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
8834 struct tg3 *tp = netdev_priv(dev);
8836 memcpy(ec, &tp->coal, sizeof(*ec));
8840 static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
8842 struct tg3 *tp = netdev_priv(dev);
8843 u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
8844 u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
8846 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
8847 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
8848 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
8849 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
8850 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
8853 if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
8854 (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
8855 (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
8856 (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
8857 (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
8858 (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
8859 (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
8860 (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
8861 (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
8862 (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
8865 /* No rx interrupts will be generated if both are zero */
8866 if ((ec->rx_coalesce_usecs == 0) &&
8867 (ec->rx_max_coalesced_frames == 0))
8870 /* No tx interrupts will be generated if both are zero */
8871 if ((ec->tx_coalesce_usecs == 0) &&
8872 (ec->tx_max_coalesced_frames == 0))
8875 /* Only copy relevant parameters, ignore all others. */
8876 tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
8877 tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
8878 tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
8879 tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
8880 tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
8881 tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
8882 tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
8883 tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
8884 tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
8886 if (netif_running(dev)) {
8887 tg3_full_lock(tp, 0);
8888 __tg3_set_coalesce(tp, &tp->coal);
8889 tg3_full_unlock(tp);
8894 static struct ethtool_ops tg3_ethtool_ops = {
8895 .get_settings = tg3_get_settings,
8896 .set_settings = tg3_set_settings,
8897 .get_drvinfo = tg3_get_drvinfo,
8898 .get_regs_len = tg3_get_regs_len,
8899 .get_regs = tg3_get_regs,
8900 .get_wol = tg3_get_wol,
8901 .set_wol = tg3_set_wol,
8902 .get_msglevel = tg3_get_msglevel,
8903 .set_msglevel = tg3_set_msglevel,
8904 .nway_reset = tg3_nway_reset,
8905 .get_link = ethtool_op_get_link,
8906 .get_eeprom_len = tg3_get_eeprom_len,
8907 .get_eeprom = tg3_get_eeprom,
8908 .set_eeprom = tg3_set_eeprom,
8909 .get_ringparam = tg3_get_ringparam,
8910 .set_ringparam = tg3_set_ringparam,
8911 .get_pauseparam = tg3_get_pauseparam,
8912 .set_pauseparam = tg3_set_pauseparam,
8913 .get_rx_csum = tg3_get_rx_csum,
8914 .set_rx_csum = tg3_set_rx_csum,
8915 .get_tx_csum = ethtool_op_get_tx_csum,
8916 .set_tx_csum = tg3_set_tx_csum,
8917 .get_sg = ethtool_op_get_sg,
8918 .set_sg = ethtool_op_set_sg,
8919 #if TG3_TSO_SUPPORT != 0
8920 .get_tso = ethtool_op_get_tso,
8921 .set_tso = tg3_set_tso,
8923 .self_test_count = tg3_get_test_count,
8924 .self_test = tg3_self_test,
8925 .get_strings = tg3_get_strings,
8926 .phys_id = tg3_phys_id,
8927 .get_stats_count = tg3_get_stats_count,
8928 .get_ethtool_stats = tg3_get_ethtool_stats,
8929 .get_coalesce = tg3_get_coalesce,
8930 .set_coalesce = tg3_set_coalesce,
8931 .get_perm_addr = ethtool_op_get_perm_addr,
8934 static void __devinit tg3_get_eeprom_size(struct tg3 *tp)
8936 u32 cursize, val, magic;
8938 tp->nvram_size = EEPROM_CHIP_SIZE;
8940 if (tg3_nvram_read_swab(tp, 0, &magic) != 0)
8943 if ((magic != TG3_EEPROM_MAGIC) && ((magic & 0xff000000) != 0xa5000000))
8947 * Size the chip by reading offsets at increasing powers of two.
8948 * When we encounter our validation signature, we know the addressing
8949 * has wrapped around, and thus have our chip size.
8953 while (cursize < tp->nvram_size) {
8954 if (tg3_nvram_read_swab(tp, cursize, &val) != 0)
8963 tp->nvram_size = cursize;
8966 static void __devinit tg3_get_nvram_size(struct tg3 *tp)
8970 if (tg3_nvram_read_swab(tp, 0, &val) != 0)
8973 /* Selfboot format */
8974 if (val != TG3_EEPROM_MAGIC) {
8975 tg3_get_eeprom_size(tp);
8979 if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
8981 tp->nvram_size = (val >> 16) * 1024;
8985 tp->nvram_size = 0x20000;
8988 static void __devinit tg3_get_nvram_info(struct tg3 *tp)
8992 nvcfg1 = tr32(NVRAM_CFG1);
8993 if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
8994 tp->tg3_flags2 |= TG3_FLG2_FLASH;
8997 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
8998 tw32(NVRAM_CFG1, nvcfg1);
9001 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) ||
9002 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
9003 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
9004 case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
9005 tp->nvram_jedecnum = JEDEC_ATMEL;
9006 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
9007 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9009 case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
9010 tp->nvram_jedecnum = JEDEC_ATMEL;
9011 tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
9013 case FLASH_VENDOR_ATMEL_EEPROM:
9014 tp->nvram_jedecnum = JEDEC_ATMEL;
9015 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
9016 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9018 case FLASH_VENDOR_ST:
9019 tp->nvram_jedecnum = JEDEC_ST;
9020 tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
9021 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9023 case FLASH_VENDOR_SAIFUN:
9024 tp->nvram_jedecnum = JEDEC_SAIFUN;
9025 tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
9027 case FLASH_VENDOR_SST_SMALL:
9028 case FLASH_VENDOR_SST_LARGE:
9029 tp->nvram_jedecnum = JEDEC_SST;
9030 tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
9035 tp->nvram_jedecnum = JEDEC_ATMEL;
9036 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
9037 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9041 static void __devinit tg3_get_5752_nvram_info(struct tg3 *tp)
9045 nvcfg1 = tr32(NVRAM_CFG1);
9047 /* NVRAM protection for TPM */
9048 if (nvcfg1 & (1 << 27))
9049 tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM;
9051 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
9052 case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
9053 case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
9054 tp->nvram_jedecnum = JEDEC_ATMEL;
9055 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9057 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
9058 tp->nvram_jedecnum = JEDEC_ATMEL;
9059 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9060 tp->tg3_flags2 |= TG3_FLG2_FLASH;
9062 case FLASH_5752VENDOR_ST_M45PE10:
9063 case FLASH_5752VENDOR_ST_M45PE20:
9064 case FLASH_5752VENDOR_ST_M45PE40:
9065 tp->nvram_jedecnum = JEDEC_ST;
9066 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9067 tp->tg3_flags2 |= TG3_FLG2_FLASH;
9071 if (tp->tg3_flags2 & TG3_FLG2_FLASH) {
9072 switch (nvcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
9073 case FLASH_5752PAGE_SIZE_256:
9074 tp->nvram_pagesize = 256;
9076 case FLASH_5752PAGE_SIZE_512:
9077 tp->nvram_pagesize = 512;
9079 case FLASH_5752PAGE_SIZE_1K:
9080 tp->nvram_pagesize = 1024;
9082 case FLASH_5752PAGE_SIZE_2K:
9083 tp->nvram_pagesize = 2048;
9085 case FLASH_5752PAGE_SIZE_4K:
9086 tp->nvram_pagesize = 4096;
9088 case FLASH_5752PAGE_SIZE_264:
9089 tp->nvram_pagesize = 264;
9094 /* For eeprom, set pagesize to maximum eeprom size */
9095 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
9097 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
9098 tw32(NVRAM_CFG1, nvcfg1);
9102 static void __devinit tg3_get_5755_nvram_info(struct tg3 *tp)
9106 nvcfg1 = tr32(NVRAM_CFG1);
9108 /* NVRAM protection for TPM */
9109 if (nvcfg1 & (1 << 27))
9110 tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM;
9112 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
9113 case FLASH_5755VENDOR_ATMEL_EEPROM_64KHZ:
9114 case FLASH_5755VENDOR_ATMEL_EEPROM_376KHZ:
9115 tp->nvram_jedecnum = JEDEC_ATMEL;
9116 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9117 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
9119 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
9120 tw32(NVRAM_CFG1, nvcfg1);
9122 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
9123 case FLASH_5755VENDOR_ATMEL_FLASH_1:
9124 case FLASH_5755VENDOR_ATMEL_FLASH_2:
9125 case FLASH_5755VENDOR_ATMEL_FLASH_3:
9126 case FLASH_5755VENDOR_ATMEL_FLASH_4:
9127 tp->nvram_jedecnum = JEDEC_ATMEL;
9128 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9129 tp->tg3_flags2 |= TG3_FLG2_FLASH;
9130 tp->nvram_pagesize = 264;
9132 case FLASH_5752VENDOR_ST_M45PE10:
9133 case FLASH_5752VENDOR_ST_M45PE20:
9134 case FLASH_5752VENDOR_ST_M45PE40:
9135 tp->nvram_jedecnum = JEDEC_ST;
9136 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9137 tp->tg3_flags2 |= TG3_FLG2_FLASH;
9138 tp->nvram_pagesize = 256;
9143 static void __devinit tg3_get_5787_nvram_info(struct tg3 *tp)
9147 nvcfg1 = tr32(NVRAM_CFG1);
9149 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
9150 case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
9151 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
9152 case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
9153 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
9154 tp->nvram_jedecnum = JEDEC_ATMEL;
9155 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9156 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
9158 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
9159 tw32(NVRAM_CFG1, nvcfg1);
9161 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
9162 case FLASH_5755VENDOR_ATMEL_FLASH_1:
9163 case FLASH_5755VENDOR_ATMEL_FLASH_2:
9164 case FLASH_5755VENDOR_ATMEL_FLASH_3:
9165 tp->nvram_jedecnum = JEDEC_ATMEL;
9166 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9167 tp->tg3_flags2 |= TG3_FLG2_FLASH;
9168 tp->nvram_pagesize = 264;
9170 case FLASH_5752VENDOR_ST_M45PE10:
9171 case FLASH_5752VENDOR_ST_M45PE20:
9172 case FLASH_5752VENDOR_ST_M45PE40:
9173 tp->nvram_jedecnum = JEDEC_ST;
9174 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9175 tp->tg3_flags2 |= TG3_FLG2_FLASH;
9176 tp->nvram_pagesize = 256;
9181 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
9182 static void __devinit tg3_nvram_init(struct tg3 *tp)
9186 tw32_f(GRC_EEPROM_ADDR,
9187 (EEPROM_ADDR_FSM_RESET |
9188 (EEPROM_DEFAULT_CLOCK_PERIOD <<
9189 EEPROM_ADDR_CLKPERD_SHIFT)));
9191 /* XXX schedule_timeout() ... */
9192 for (j = 0; j < 100; j++)
9195 /* Enable seeprom accesses. */
9196 tw32_f(GRC_LOCAL_CTRL,
9197 tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
9200 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
9201 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
9202 tp->tg3_flags |= TG3_FLAG_NVRAM;
9204 if (tg3_nvram_lock(tp)) {
9205 printk(KERN_WARNING PFX "%s: Cannot get nvarm lock, "
9206 "tg3_nvram_init failed.\n", tp->dev->name);
9209 tg3_enable_nvram_access(tp);
9211 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
9212 tg3_get_5752_nvram_info(tp);
9213 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
9214 tg3_get_5755_nvram_info(tp);
9215 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
9216 tg3_get_5787_nvram_info(tp);
9218 tg3_get_nvram_info(tp);
9220 tg3_get_nvram_size(tp);
9222 tg3_disable_nvram_access(tp);
9223 tg3_nvram_unlock(tp);
9226 tp->tg3_flags &= ~(TG3_FLAG_NVRAM | TG3_FLAG_NVRAM_BUFFERED);
9228 tg3_get_eeprom_size(tp);
9232 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
9233 u32 offset, u32 *val)
9238 if (offset > EEPROM_ADDR_ADDR_MASK ||
9242 tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
9243 EEPROM_ADDR_DEVID_MASK |
9245 tw32(GRC_EEPROM_ADDR,
9247 (0 << EEPROM_ADDR_DEVID_SHIFT) |
9248 ((offset << EEPROM_ADDR_ADDR_SHIFT) &
9249 EEPROM_ADDR_ADDR_MASK) |
9250 EEPROM_ADDR_READ | EEPROM_ADDR_START);
9252 for (i = 0; i < 10000; i++) {
9253 tmp = tr32(GRC_EEPROM_ADDR);
9255 if (tmp & EEPROM_ADDR_COMPLETE)
9259 if (!(tmp & EEPROM_ADDR_COMPLETE))
9262 *val = tr32(GRC_EEPROM_DATA);
9266 #define NVRAM_CMD_TIMEOUT 10000
9268 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
9272 tw32(NVRAM_CMD, nvram_cmd);
9273 for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
9275 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
9280 if (i == NVRAM_CMD_TIMEOUT) {
9286 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
9288 if ((tp->tg3_flags & TG3_FLAG_NVRAM) &&
9289 (tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) &&
9290 (tp->tg3_flags2 & TG3_FLG2_FLASH) &&
9291 (tp->nvram_jedecnum == JEDEC_ATMEL))
9293 addr = ((addr / tp->nvram_pagesize) <<
9294 ATMEL_AT45DB0X1B_PAGE_POS) +
9295 (addr % tp->nvram_pagesize);
9300 static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
9302 if ((tp->tg3_flags & TG3_FLAG_NVRAM) &&
9303 (tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) &&
9304 (tp->tg3_flags2 & TG3_FLG2_FLASH) &&
9305 (tp->nvram_jedecnum == JEDEC_ATMEL))
9307 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
9308 tp->nvram_pagesize) +
9309 (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
9314 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
9318 if (!(tp->tg3_flags & TG3_FLAG_NVRAM))
9319 return tg3_nvram_read_using_eeprom(tp, offset, val);
9321 offset = tg3_nvram_phys_addr(tp, offset);
9323 if (offset > NVRAM_ADDR_MSK)
9326 ret = tg3_nvram_lock(tp);
9330 tg3_enable_nvram_access(tp);
9332 tw32(NVRAM_ADDR, offset);
9333 ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
9334 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
9337 *val = swab32(tr32(NVRAM_RDDATA));
9339 tg3_disable_nvram_access(tp);
9341 tg3_nvram_unlock(tp);
9346 static int tg3_nvram_read_swab(struct tg3 *tp, u32 offset, u32 *val)
9351 err = tg3_nvram_read(tp, offset, &tmp);
9356 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
9357 u32 offset, u32 len, u8 *buf)
9362 for (i = 0; i < len; i += 4) {
9367 memcpy(&data, buf + i, 4);
9369 tw32(GRC_EEPROM_DATA, cpu_to_le32(data));
9371 val = tr32(GRC_EEPROM_ADDR);
9372 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
9374 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
9376 tw32(GRC_EEPROM_ADDR, val |
9377 (0 << EEPROM_ADDR_DEVID_SHIFT) |
9378 (addr & EEPROM_ADDR_ADDR_MASK) |
9382 for (j = 0; j < 10000; j++) {
9383 val = tr32(GRC_EEPROM_ADDR);
9385 if (val & EEPROM_ADDR_COMPLETE)
9389 if (!(val & EEPROM_ADDR_COMPLETE)) {
9398 /* offset and length are dword aligned */
9399 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
9403 u32 pagesize = tp->nvram_pagesize;
9404 u32 pagemask = pagesize - 1;
9408 tmp = kmalloc(pagesize, GFP_KERNEL);
9414 u32 phy_addr, page_off, size;
9416 phy_addr = offset & ~pagemask;
9418 for (j = 0; j < pagesize; j += 4) {
9419 if ((ret = tg3_nvram_read(tp, phy_addr + j,
9420 (u32 *) (tmp + j))))
9426 page_off = offset & pagemask;
9433 memcpy(tmp + page_off, buf, size);
9435 offset = offset + (pagesize - page_off);
9437 tg3_enable_nvram_access(tp);
9440 * Before we can erase the flash page, we need
9441 * to issue a special "write enable" command.
9443 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
9445 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
9448 /* Erase the target page */
9449 tw32(NVRAM_ADDR, phy_addr);
9451 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
9452 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
9454 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
9457 /* Issue another write enable to start the write. */
9458 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
9460 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
9463 for (j = 0; j < pagesize; j += 4) {
9466 data = *((u32 *) (tmp + j));
9467 tw32(NVRAM_WRDATA, cpu_to_be32(data));
9469 tw32(NVRAM_ADDR, phy_addr + j);
9471 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
9475 nvram_cmd |= NVRAM_CMD_FIRST;
9476 else if (j == (pagesize - 4))
9477 nvram_cmd |= NVRAM_CMD_LAST;
9479 if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
9486 nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
9487 tg3_nvram_exec_cmd(tp, nvram_cmd);
9494 /* offset and length are dword aligned */
9495 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
9500 for (i = 0; i < len; i += 4, offset += 4) {
9501 u32 data, page_off, phy_addr, nvram_cmd;
9503 memcpy(&data, buf + i, 4);
9504 tw32(NVRAM_WRDATA, cpu_to_be32(data));
9506 page_off = offset % tp->nvram_pagesize;
9508 phy_addr = tg3_nvram_phys_addr(tp, offset);
9510 tw32(NVRAM_ADDR, phy_addr);
9512 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
9514 if ((page_off == 0) || (i == 0))
9515 nvram_cmd |= NVRAM_CMD_FIRST;
9516 if (page_off == (tp->nvram_pagesize - 4))
9517 nvram_cmd |= NVRAM_CMD_LAST;
9520 nvram_cmd |= NVRAM_CMD_LAST;
9522 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752) &&
9523 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5755) &&
9524 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5787) &&
9525 (tp->nvram_jedecnum == JEDEC_ST) &&
9526 (nvram_cmd & NVRAM_CMD_FIRST)) {
9528 if ((ret = tg3_nvram_exec_cmd(tp,
9529 NVRAM_CMD_WREN | NVRAM_CMD_GO |
9534 if (!(tp->tg3_flags2 & TG3_FLG2_FLASH)) {
9535 /* We always do complete word writes to eeprom. */
9536 nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
9539 if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
9545 /* offset and length are dword aligned */
9546 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
9550 if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
9551 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
9552 ~GRC_LCLCTRL_GPIO_OUTPUT1);
9556 if (!(tp->tg3_flags & TG3_FLAG_NVRAM)) {
9557 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
9562 ret = tg3_nvram_lock(tp);
9566 tg3_enable_nvram_access(tp);
9567 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
9568 !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM))
9569 tw32(NVRAM_WRITE1, 0x406);
9571 grc_mode = tr32(GRC_MODE);
9572 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
9574 if ((tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) ||
9575 !(tp->tg3_flags2 & TG3_FLG2_FLASH)) {
9577 ret = tg3_nvram_write_block_buffered(tp, offset, len,
9581 ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
9585 grc_mode = tr32(GRC_MODE);
9586 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
9588 tg3_disable_nvram_access(tp);
9589 tg3_nvram_unlock(tp);
9592 if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
9593 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
9600 struct subsys_tbl_ent {
9601 u16 subsys_vendor, subsys_devid;
9605 static struct subsys_tbl_ent subsys_id_to_phy_id[] = {
9606 /* Broadcom boards. */
9607 { PCI_VENDOR_ID_BROADCOM, 0x1644, PHY_ID_BCM5401 }, /* BCM95700A6 */
9608 { PCI_VENDOR_ID_BROADCOM, 0x0001, PHY_ID_BCM5701 }, /* BCM95701A5 */
9609 { PCI_VENDOR_ID_BROADCOM, 0x0002, PHY_ID_BCM8002 }, /* BCM95700T6 */
9610 { PCI_VENDOR_ID_BROADCOM, 0x0003, 0 }, /* BCM95700A9 */
9611 { PCI_VENDOR_ID_BROADCOM, 0x0005, PHY_ID_BCM5701 }, /* BCM95701T1 */
9612 { PCI_VENDOR_ID_BROADCOM, 0x0006, PHY_ID_BCM5701 }, /* BCM95701T8 */
9613 { PCI_VENDOR_ID_BROADCOM, 0x0007, 0 }, /* BCM95701A7 */
9614 { PCI_VENDOR_ID_BROADCOM, 0x0008, PHY_ID_BCM5701 }, /* BCM95701A10 */
9615 { PCI_VENDOR_ID_BROADCOM, 0x8008, PHY_ID_BCM5701 }, /* BCM95701A12 */
9616 { PCI_VENDOR_ID_BROADCOM, 0x0009, PHY_ID_BCM5703 }, /* BCM95703Ax1 */
9617 { PCI_VENDOR_ID_BROADCOM, 0x8009, PHY_ID_BCM5703 }, /* BCM95703Ax2 */
9620 { PCI_VENDOR_ID_3COM, 0x1000, PHY_ID_BCM5401 }, /* 3C996T */
9621 { PCI_VENDOR_ID_3COM, 0x1006, PHY_ID_BCM5701 }, /* 3C996BT */
9622 { PCI_VENDOR_ID_3COM, 0x1004, 0 }, /* 3C996SX */
9623 { PCI_VENDOR_ID_3COM, 0x1007, PHY_ID_BCM5701 }, /* 3C1000T */
9624 { PCI_VENDOR_ID_3COM, 0x1008, PHY_ID_BCM5701 }, /* 3C940BR01 */
9627 { PCI_VENDOR_ID_DELL, 0x00d1, PHY_ID_BCM5401 }, /* VIPER */
9628 { PCI_VENDOR_ID_DELL, 0x0106, PHY_ID_BCM5401 }, /* JAGUAR */
9629 { PCI_VENDOR_ID_DELL, 0x0109, PHY_ID_BCM5411 }, /* MERLOT */
9630 { PCI_VENDOR_ID_DELL, 0x010a, PHY_ID_BCM5411 }, /* SLIM_MERLOT */
9632 /* Compaq boards. */
9633 { PCI_VENDOR_ID_COMPAQ, 0x007c, PHY_ID_BCM5701 }, /* BANSHEE */
9634 { PCI_VENDOR_ID_COMPAQ, 0x009a, PHY_ID_BCM5701 }, /* BANSHEE_2 */
9635 { PCI_VENDOR_ID_COMPAQ, 0x007d, 0 }, /* CHANGELING */
9636 { PCI_VENDOR_ID_COMPAQ, 0x0085, PHY_ID_BCM5701 }, /* NC7780 */
9637 { PCI_VENDOR_ID_COMPAQ, 0x0099, PHY_ID_BCM5701 }, /* NC7780_2 */
9640 { PCI_VENDOR_ID_IBM, 0x0281, 0 } /* IBM??? */
9643 static inline struct subsys_tbl_ent *lookup_by_subsys(struct tg3 *tp)
9647 for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
9648 if ((subsys_id_to_phy_id[i].subsys_vendor ==
9649 tp->pdev->subsystem_vendor) &&
9650 (subsys_id_to_phy_id[i].subsys_devid ==
9651 tp->pdev->subsystem_device))
9652 return &subsys_id_to_phy_id[i];
9657 static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
9662 /* On some early chips the SRAM cannot be accessed in D3hot state,
9663 * so need make sure we're in D0.
9665 pci_read_config_word(tp->pdev, tp->pm_cap + PCI_PM_CTRL, &pmcsr);
9666 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
9667 pci_write_config_word(tp->pdev, tp->pm_cap + PCI_PM_CTRL, pmcsr);
9670 /* Make sure register accesses (indirect or otherwise)
9671 * will function correctly.
9673 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
9674 tp->misc_host_ctrl);
9676 /* The memory arbiter has to be enabled in order for SRAM accesses
9677 * to succeed. Normally on powerup the tg3 chip firmware will make
9678 * sure it is enabled, but other entities such as system netboot
9679 * code might disable it.
9681 val = tr32(MEMARB_MODE);
9682 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
9684 tp->phy_id = PHY_ID_INVALID;
9685 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
9687 /* Assume an onboard device by default. */
9688 tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT;
9690 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
9691 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
9692 u32 nic_cfg, led_cfg;
9693 u32 nic_phy_id, ver, cfg2 = 0, eeprom_phy_id;
9694 int eeprom_phy_serdes = 0;
9696 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
9697 tp->nic_sram_data_cfg = nic_cfg;
9699 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
9700 ver >>= NIC_SRAM_DATA_VER_SHIFT;
9701 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700) &&
9702 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) &&
9703 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5703) &&
9704 (ver > 0) && (ver < 0x100))
9705 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
9707 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
9708 NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
9709 eeprom_phy_serdes = 1;
9711 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
9712 if (nic_phy_id != 0) {
9713 u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
9714 u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
9716 eeprom_phy_id = (id1 >> 16) << 10;
9717 eeprom_phy_id |= (id2 & 0xfc00) << 16;
9718 eeprom_phy_id |= (id2 & 0x03ff) << 0;
9722 tp->phy_id = eeprom_phy_id;
9723 if (eeprom_phy_serdes) {
9724 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
9725 tp->tg3_flags2 |= TG3_FLG2_MII_SERDES;
9727 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
9730 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
9731 led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
9732 SHASTA_EXT_LED_MODE_MASK);
9734 led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
9738 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
9739 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
9742 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
9743 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
9746 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
9747 tp->led_ctrl = LED_CTRL_MODE_MAC;
9749 /* Default to PHY_1_MODE if 0 (MAC_MODE) is
9750 * read on some older 5700/5701 bootcode.
9752 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
9754 GET_ASIC_REV(tp->pci_chip_rev_id) ==
9756 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
9760 case SHASTA_EXT_LED_SHARED:
9761 tp->led_ctrl = LED_CTRL_MODE_SHARED;
9762 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
9763 tp->pci_chip_rev_id != CHIPREV_ID_5750_A1)
9764 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
9765 LED_CTRL_MODE_PHY_2);
9768 case SHASTA_EXT_LED_MAC:
9769 tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
9772 case SHASTA_EXT_LED_COMBO:
9773 tp->led_ctrl = LED_CTRL_MODE_COMBO;
9774 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0)
9775 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
9776 LED_CTRL_MODE_PHY_2);
9781 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
9782 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) &&
9783 tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
9784 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
9786 if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP)
9787 tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT;
9789 tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT;
9791 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
9792 tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
9793 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
9794 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
9796 if (nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL)
9797 tp->tg3_flags |= TG3_FLAG_SERDES_WOL_CAP;
9799 if (cfg2 & (1 << 17))
9800 tp->tg3_flags2 |= TG3_FLG2_CAPACITIVE_COUPLING;
9802 /* serdes signal pre-emphasis in register 0x590 set by */
9803 /* bootcode if bit 18 is set */
9804 if (cfg2 & (1 << 18))
9805 tp->tg3_flags2 |= TG3_FLG2_SERDES_PREEMPHASIS;
9809 static int __devinit tg3_phy_probe(struct tg3 *tp)
9811 u32 hw_phy_id_1, hw_phy_id_2;
9812 u32 hw_phy_id, hw_phy_id_masked;
9815 /* Reading the PHY ID register can conflict with ASF
9816 * firwmare access to the PHY hardware.
9819 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
9820 hw_phy_id = hw_phy_id_masked = PHY_ID_INVALID;
9822 /* Now read the physical PHY_ID from the chip and verify
9823 * that it is sane. If it doesn't look good, we fall back
9824 * to either the hard-coded table based PHY_ID and failing
9825 * that the value found in the eeprom area.
9827 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
9828 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
9830 hw_phy_id = (hw_phy_id_1 & 0xffff) << 10;
9831 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
9832 hw_phy_id |= (hw_phy_id_2 & 0x03ff) << 0;
9834 hw_phy_id_masked = hw_phy_id & PHY_ID_MASK;
9837 if (!err && KNOWN_PHY_ID(hw_phy_id_masked)) {
9838 tp->phy_id = hw_phy_id;
9839 if (hw_phy_id_masked == PHY_ID_BCM8002)
9840 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
9842 tp->tg3_flags2 &= ~TG3_FLG2_PHY_SERDES;
9844 if (tp->phy_id != PHY_ID_INVALID) {
9845 /* Do nothing, phy ID already set up in
9846 * tg3_get_eeprom_hw_cfg().
9849 struct subsys_tbl_ent *p;
9851 /* No eeprom signature? Try the hardcoded
9852 * subsys device table.
9854 p = lookup_by_subsys(tp);
9858 tp->phy_id = p->phy_id;
9860 tp->phy_id == PHY_ID_BCM8002)
9861 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
9865 if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) &&
9866 !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
9867 u32 bmsr, adv_reg, tg3_ctrl;
9869 tg3_readphy(tp, MII_BMSR, &bmsr);
9870 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
9871 (bmsr & BMSR_LSTATUS))
9872 goto skip_phy_reset;
9874 err = tg3_phy_reset(tp);
9878 adv_reg = (ADVERTISE_10HALF | ADVERTISE_10FULL |
9879 ADVERTISE_100HALF | ADVERTISE_100FULL |
9880 ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
9882 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
9883 tg3_ctrl = (MII_TG3_CTRL_ADV_1000_HALF |
9884 MII_TG3_CTRL_ADV_1000_FULL);
9885 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
9886 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
9887 tg3_ctrl |= (MII_TG3_CTRL_AS_MASTER |
9888 MII_TG3_CTRL_ENABLE_AS_MASTER);
9891 if (!tg3_copper_is_advertising_all(tp)) {
9892 tg3_writephy(tp, MII_ADVERTISE, adv_reg);
9894 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
9895 tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
9897 tg3_writephy(tp, MII_BMCR,
9898 BMCR_ANENABLE | BMCR_ANRESTART);
9900 tg3_phy_set_wirespeed(tp);
9902 tg3_writephy(tp, MII_ADVERTISE, adv_reg);
9903 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
9904 tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
9908 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
9909 err = tg3_init_5401phy_dsp(tp);
9914 if (!err && ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401)) {
9915 err = tg3_init_5401phy_dsp(tp);
9918 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
9919 tp->link_config.advertising =
9920 (ADVERTISED_1000baseT_Half |
9921 ADVERTISED_1000baseT_Full |
9922 ADVERTISED_Autoneg |
9924 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
9925 tp->link_config.advertising &=
9926 ~(ADVERTISED_1000baseT_Half |
9927 ADVERTISED_1000baseT_Full);
9932 static void __devinit tg3_read_partno(struct tg3 *tp)
9934 unsigned char vpd_data[256];
9938 if (tg3_nvram_read_swab(tp, 0x0, &magic))
9941 if (magic == TG3_EEPROM_MAGIC) {
9942 for (i = 0; i < 256; i += 4) {
9945 if (tg3_nvram_read(tp, 0x100 + i, &tmp))
9948 vpd_data[i + 0] = ((tmp >> 0) & 0xff);
9949 vpd_data[i + 1] = ((tmp >> 8) & 0xff);
9950 vpd_data[i + 2] = ((tmp >> 16) & 0xff);
9951 vpd_data[i + 3] = ((tmp >> 24) & 0xff);
9956 vpd_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_VPD);
9957 for (i = 0; i < 256; i += 4) {
9961 pci_write_config_word(tp->pdev, vpd_cap + PCI_VPD_ADDR,
9964 pci_read_config_word(tp->pdev, vpd_cap +
9965 PCI_VPD_ADDR, &tmp16);
9970 if (!(tmp16 & 0x8000))
9973 pci_read_config_dword(tp->pdev, vpd_cap + PCI_VPD_DATA,
9975 tmp = cpu_to_le32(tmp);
9976 memcpy(&vpd_data[i], &tmp, 4);
9980 /* Now parse and find the part number. */
9981 for (i = 0; i < 256; ) {
9982 unsigned char val = vpd_data[i];
9985 if (val == 0x82 || val == 0x91) {
9988 (vpd_data[i + 2] << 8)));
9995 block_end = (i + 3 +
9997 (vpd_data[i + 2] << 8)));
9999 while (i < block_end) {
10000 if (vpd_data[i + 0] == 'P' &&
10001 vpd_data[i + 1] == 'N') {
10002 int partno_len = vpd_data[i + 2];
10004 if (partno_len > 24)
10005 goto out_not_found;
10007 memcpy(tp->board_part_number,
10016 /* Part number not found. */
10017 goto out_not_found;
10021 strcpy(tp->board_part_number, "none");
10024 static void __devinit tg3_read_fw_ver(struct tg3 *tp)
10026 u32 val, offset, start;
10028 if (tg3_nvram_read_swab(tp, 0, &val))
10031 if (val != TG3_EEPROM_MAGIC)
10034 if (tg3_nvram_read_swab(tp, 0xc, &offset) ||
10035 tg3_nvram_read_swab(tp, 0x4, &start))
10038 offset = tg3_nvram_logical_addr(tp, offset);
10039 if (tg3_nvram_read_swab(tp, offset, &val))
10042 if ((val & 0xfc000000) == 0x0c000000) {
10043 u32 ver_offset, addr;
10046 if (tg3_nvram_read_swab(tp, offset + 4, &val) ||
10047 tg3_nvram_read_swab(tp, offset + 8, &ver_offset))
10053 addr = offset + ver_offset - start;
10054 for (i = 0; i < 16; i += 4) {
10055 if (tg3_nvram_read(tp, addr + i, &val))
10058 val = cpu_to_le32(val);
10059 memcpy(tp->fw_ver + i, &val, 4);
10064 static int __devinit tg3_get_invariants(struct tg3 *tp)
10066 static struct pci_device_id write_reorder_chipsets[] = {
10067 { PCI_DEVICE(PCI_VENDOR_ID_AMD,
10068 PCI_DEVICE_ID_AMD_FE_GATE_700C) },
10069 { PCI_DEVICE(PCI_VENDOR_ID_VIA,
10070 PCI_DEVICE_ID_VIA_8385_0) },
10074 u32 cacheline_sz_reg;
10075 u32 pci_state_reg, grc_misc_cfg;
10080 /* Force memory write invalidate off. If we leave it on,
10081 * then on 5700_BX chips we have to enable a workaround.
10082 * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
10083 * to match the cacheline size. The Broadcom driver have this
10084 * workaround but turns MWI off all the times so never uses
10085 * it. This seems to suggest that the workaround is insufficient.
10087 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
10088 pci_cmd &= ~PCI_COMMAND_INVALIDATE;
10089 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
10091 /* It is absolutely critical that TG3PCI_MISC_HOST_CTRL
10092 * has the register indirect write enable bit set before
10093 * we try to access any of the MMIO registers. It is also
10094 * critical that the PCI-X hw workaround situation is decided
10095 * before that as well.
10097 pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
10100 tp->pci_chip_rev_id = (misc_ctrl_reg >>
10101 MISC_HOST_CTRL_CHIPREV_SHIFT);
10103 /* Wrong chip ID in 5752 A0. This code can be removed later
10104 * as A0 is not in production.
10106 if (tp->pci_chip_rev_id == CHIPREV_ID_5752_A0_HW)
10107 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
10109 /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
10110 * we need to disable memory and use config. cycles
10111 * only to access all registers. The 5702/03 chips
10112 * can mistakenly decode the special cycles from the
10113 * ICH chipsets as memory write cycles, causing corruption
10114 * of register and memory space. Only certain ICH bridges
10115 * will drive special cycles with non-zero data during the
10116 * address phase which can fall within the 5703's address
10117 * range. This is not an ICH bug as the PCI spec allows
10118 * non-zero address during special cycles. However, only
10119 * these ICH bridges are known to drive non-zero addresses
10120 * during special cycles.
10122 * Since special cycles do not cross PCI bridges, we only
10123 * enable this workaround if the 5703 is on the secondary
10124 * bus of these ICH bridges.
10126 if ((tp->pci_chip_rev_id == CHIPREV_ID_5703_A1) ||
10127 (tp->pci_chip_rev_id == CHIPREV_ID_5703_A2)) {
10128 static struct tg3_dev_id {
10132 } ich_chipsets[] = {
10133 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
10135 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
10137 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
10139 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
10143 struct tg3_dev_id *pci_id = &ich_chipsets[0];
10144 struct pci_dev *bridge = NULL;
10146 while (pci_id->vendor != 0) {
10147 bridge = pci_get_device(pci_id->vendor, pci_id->device,
10153 if (pci_id->rev != PCI_ANY_ID) {
10156 pci_read_config_byte(bridge, PCI_REVISION_ID,
10158 if (rev > pci_id->rev)
10161 if (bridge->subordinate &&
10162 (bridge->subordinate->number ==
10163 tp->pdev->bus->number)) {
10165 tp->tg3_flags2 |= TG3_FLG2_ICH_WORKAROUND;
10166 pci_dev_put(bridge);
10172 /* The EPB bridge inside 5714, 5715, and 5780 cannot support
10173 * DMA addresses > 40-bit. This bridge may have other additional
10174 * 57xx devices behind it in some 4-port NIC designs for example.
10175 * Any tg3 device found behind the bridge will also need the 40-bit
10178 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 ||
10179 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
10180 tp->tg3_flags2 |= TG3_FLG2_5780_CLASS;
10181 tp->tg3_flags |= TG3_FLAG_40BIT_DMA_BUG;
10182 tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI);
10185 struct pci_dev *bridge = NULL;
10188 bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
10189 PCI_DEVICE_ID_SERVERWORKS_EPB,
10191 if (bridge && bridge->subordinate &&
10192 (bridge->subordinate->number <=
10193 tp->pdev->bus->number) &&
10194 (bridge->subordinate->subordinate >=
10195 tp->pdev->bus->number)) {
10196 tp->tg3_flags |= TG3_FLAG_40BIT_DMA_BUG;
10197 pci_dev_put(bridge);
10203 /* Initialize misc host control in PCI block. */
10204 tp->misc_host_ctrl |= (misc_ctrl_reg &
10205 MISC_HOST_CTRL_CHIPREV);
10206 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
10207 tp->misc_host_ctrl);
10209 pci_read_config_dword(tp->pdev, TG3PCI_CACHELINESZ,
10210 &cacheline_sz_reg);
10212 tp->pci_cacheline_sz = (cacheline_sz_reg >> 0) & 0xff;
10213 tp->pci_lat_timer = (cacheline_sz_reg >> 8) & 0xff;
10214 tp->pci_hdr_type = (cacheline_sz_reg >> 16) & 0xff;
10215 tp->pci_bist = (cacheline_sz_reg >> 24) & 0xff;
10217 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
10218 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
10219 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
10220 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
10221 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
10222 tp->tg3_flags2 |= TG3_FLG2_5750_PLUS;
10224 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) ||
10225 (tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
10226 tp->tg3_flags2 |= TG3_FLG2_5705_PLUS;
10228 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
10229 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
10230 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787) {
10231 tp->tg3_flags2 |= TG3_FLG2_HW_TSO_2;
10232 tp->tg3_flags2 |= TG3_FLG2_1SHOT_MSI;
10234 tp->tg3_flags2 |= TG3_FLG2_HW_TSO_1 |
10235 TG3_FLG2_HW_TSO_1_BUG;
10236 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
10238 tp->pci_chip_rev_id >= CHIPREV_ID_5750_C2)
10239 tp->tg3_flags2 &= ~TG3_FLG2_HW_TSO_1_BUG;
10243 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705 &&
10244 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5750 &&
10245 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752 &&
10246 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5755 &&
10247 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5787)
10248 tp->tg3_flags2 |= TG3_FLG2_JUMBO_CAPABLE;
10250 if (pci_find_capability(tp->pdev, PCI_CAP_ID_EXP) != 0)
10251 tp->tg3_flags2 |= TG3_FLG2_PCI_EXPRESS;
10253 /* If we have an AMD 762 or VIA K8T800 chipset, write
10254 * reordering to the mailbox registers done by the host
10255 * controller can cause major troubles. We read back from
10256 * every mailbox register write to force the writes to be
10257 * posted to the chip in order.
10259 if (pci_dev_present(write_reorder_chipsets) &&
10260 !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
10261 tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;
10263 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
10264 tp->pci_lat_timer < 64) {
10265 tp->pci_lat_timer = 64;
10267 cacheline_sz_reg = ((tp->pci_cacheline_sz & 0xff) << 0);
10268 cacheline_sz_reg |= ((tp->pci_lat_timer & 0xff) << 8);
10269 cacheline_sz_reg |= ((tp->pci_hdr_type & 0xff) << 16);
10270 cacheline_sz_reg |= ((tp->pci_bist & 0xff) << 24);
10272 pci_write_config_dword(tp->pdev, TG3PCI_CACHELINESZ,
10276 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
10279 if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0) {
10280 tp->tg3_flags |= TG3_FLAG_PCIX_MODE;
10282 /* If this is a 5700 BX chipset, and we are in PCI-X
10283 * mode, enable register write workaround.
10285 * The workaround is to use indirect register accesses
10286 * for all chip writes not to mailbox registers.
10288 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX) {
10292 tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
10294 /* The chip can have it's power management PCI config
10295 * space registers clobbered due to this bug.
10296 * So explicitly force the chip into D0 here.
10298 pci_read_config_dword(tp->pdev, TG3PCI_PM_CTRL_STAT,
10300 pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
10301 pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
10302 pci_write_config_dword(tp->pdev, TG3PCI_PM_CTRL_STAT,
10305 /* Also, force SERR#/PERR# in PCI command. */
10306 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
10307 pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
10308 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
10312 /* 5700 BX chips need to have their TX producer index mailboxes
10313 * written twice to workaround a bug.
10315 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX)
10316 tp->tg3_flags |= TG3_FLAG_TXD_MBOX_HWBUG;
10318 /* Back to back register writes can cause problems on this chip,
10319 * the workaround is to read back all reg writes except those to
10320 * mailbox regs. See tg3_write_indirect_reg32().
10322 * PCI Express 5750_A0 rev chips need this workaround too.
10324 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
10325 ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
10326 tp->pci_chip_rev_id == CHIPREV_ID_5750_A0))
10327 tp->tg3_flags |= TG3_FLAG_5701_REG_WRITE_BUG;
10329 if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
10330 tp->tg3_flags |= TG3_FLAG_PCI_HIGH_SPEED;
10331 if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
10332 tp->tg3_flags |= TG3_FLAG_PCI_32BIT;
10334 /* Chip-specific fixup from Broadcom driver */
10335 if ((tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) &&
10336 (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
10337 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
10338 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
10341 /* Default fast path register access methods */
10342 tp->read32 = tg3_read32;
10343 tp->write32 = tg3_write32;
10344 tp->read32_mbox = tg3_read32;
10345 tp->write32_mbox = tg3_write32;
10346 tp->write32_tx_mbox = tg3_write32;
10347 tp->write32_rx_mbox = tg3_write32;
10349 /* Various workaround register access methods */
10350 if (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG)
10351 tp->write32 = tg3_write_indirect_reg32;
10352 else if (tp->tg3_flags & TG3_FLAG_5701_REG_WRITE_BUG)
10353 tp->write32 = tg3_write_flush_reg32;
10355 if ((tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG) ||
10356 (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)) {
10357 tp->write32_tx_mbox = tg3_write32_tx_mbox;
10358 if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
10359 tp->write32_rx_mbox = tg3_write_flush_reg32;
10362 if (tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND) {
10363 tp->read32 = tg3_read_indirect_reg32;
10364 tp->write32 = tg3_write_indirect_reg32;
10365 tp->read32_mbox = tg3_read_indirect_mbox;
10366 tp->write32_mbox = tg3_write_indirect_mbox;
10367 tp->write32_tx_mbox = tg3_write_indirect_mbox;
10368 tp->write32_rx_mbox = tg3_write_indirect_mbox;
10373 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
10374 pci_cmd &= ~PCI_COMMAND_MEMORY;
10375 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
10378 if (tp->write32 == tg3_write_indirect_reg32 ||
10379 ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
10380 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
10381 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)))
10382 tp->tg3_flags |= TG3_FLAG_SRAM_USE_CONFIG;
10384 /* Get eeprom hw config before calling tg3_set_power_state().
10385 * In particular, the TG3_FLAG_EEPROM_WRITE_PROT flag must be
10386 * determined before calling tg3_set_power_state() so that
10387 * we know whether or not to switch out of Vaux power.
10388 * When the flag is set, it means that GPIO1 is used for eeprom
10389 * write protect and also implies that it is a LOM where GPIOs
10390 * are not used to switch power.
10392 tg3_get_eeprom_hw_cfg(tp);
10394 /* Set up tp->grc_local_ctrl before calling tg3_set_power_state().
10395 * GPIO1 driven high will bring 5700's external PHY out of reset.
10396 * It is also used as eeprom write protect on LOMs.
10398 tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
10399 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
10400 (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT))
10401 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
10402 GRC_LCLCTRL_GPIO_OUTPUT1);
10403 /* Unused GPIO3 must be driven as output on 5752 because there
10404 * are no pull-up resistors on unused GPIO pins.
10406 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
10407 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
10409 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
10410 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
10412 /* Force the chip into D0. */
10413 err = tg3_set_power_state(tp, PCI_D0);
10415 printk(KERN_ERR PFX "(%s) transition to D0 failed\n",
10416 pci_name(tp->pdev));
10420 /* 5700 B0 chips do not support checksumming correctly due
10421 * to hardware bugs.
10423 if (tp->pci_chip_rev_id == CHIPREV_ID_5700_B0)
10424 tp->tg3_flags |= TG3_FLAG_BROKEN_CHECKSUMS;
10426 /* Derive initial jumbo mode from MTU assigned in
10427 * ether_setup() via the alloc_etherdev() call
10429 if (tp->dev->mtu > ETH_DATA_LEN &&
10430 !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
10431 tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE;
10433 /* Determine WakeOnLan speed to use. */
10434 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
10435 tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
10436 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0 ||
10437 tp->pci_chip_rev_id == CHIPREV_ID_5701_B2) {
10438 tp->tg3_flags &= ~(TG3_FLAG_WOL_SPEED_100MB);
10440 tp->tg3_flags |= TG3_FLAG_WOL_SPEED_100MB;
10443 /* A few boards don't want Ethernet@WireSpeed phy feature */
10444 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
10445 ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
10446 (tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) &&
10447 (tp->pci_chip_rev_id != CHIPREV_ID_5705_A1)) ||
10448 (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES))
10449 tp->tg3_flags2 |= TG3_FLG2_NO_ETH_WIRE_SPEED;
10451 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5703_AX ||
10452 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_AX)
10453 tp->tg3_flags2 |= TG3_FLG2_PHY_ADC_BUG;
10454 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0)
10455 tp->tg3_flags2 |= TG3_FLG2_PHY_5704_A0_BUG;
10457 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
10458 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
10459 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
10460 tp->tg3_flags2 |= TG3_FLG2_PHY_JITTER_BUG;
10462 tp->tg3_flags2 |= TG3_FLG2_PHY_BER_BUG;
10465 tp->coalesce_mode = 0;
10466 if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX &&
10467 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX)
10468 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
10470 /* Initialize MAC MI mode, polling disabled. */
10471 tw32_f(MAC_MI_MODE, tp->mi_mode);
10474 /* Initialize data/descriptor byte/word swapping. */
10475 val = tr32(GRC_MODE);
10476 val &= GRC_MODE_HOST_STACKUP;
10477 tw32(GRC_MODE, val | tp->grc_mode);
10479 tg3_switch_clocks(tp);
10481 /* Clear this out for sanity. */
10482 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
10484 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
10486 if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
10487 (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) == 0) {
10488 u32 chiprevid = GET_CHIP_REV_ID(tp->misc_host_ctrl);
10490 if (chiprevid == CHIPREV_ID_5701_A0 ||
10491 chiprevid == CHIPREV_ID_5701_B0 ||
10492 chiprevid == CHIPREV_ID_5701_B2 ||
10493 chiprevid == CHIPREV_ID_5701_B5) {
10494 void __iomem *sram_base;
10496 /* Write some dummy words into the SRAM status block
10497 * area, see if it reads back correctly. If the return
10498 * value is bad, force enable the PCIX workaround.
10500 sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
10502 writel(0x00000000, sram_base);
10503 writel(0x00000000, sram_base + 4);
10504 writel(0xffffffff, sram_base + 4);
10505 if (readl(sram_base) != 0x00000000)
10506 tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
10511 tg3_nvram_init(tp);
10513 grc_misc_cfg = tr32(GRC_MISC_CFG);
10514 grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
10516 /* Broadcom's driver says that CIOBE multisplit has a bug */
10518 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
10519 grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5704CIOBE) {
10520 tp->tg3_flags |= TG3_FLAG_SPLIT_MODE;
10521 tp->split_mode_max_reqs = SPLIT_MODE_5704_MAX_REQ;
10524 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
10525 (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
10526 grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
10527 tp->tg3_flags2 |= TG3_FLG2_IS_5788;
10529 if (!(tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
10530 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700))
10531 tp->tg3_flags |= TG3_FLAG_TAGGED_STATUS;
10532 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) {
10533 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
10534 HOSTCC_MODE_CLRTICK_TXBD);
10536 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
10537 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
10538 tp->misc_host_ctrl);
10541 /* these are limited to 10/100 only */
10542 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
10543 (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
10544 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
10545 tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
10546 (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901 ||
10547 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901_2 ||
10548 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5705F)) ||
10549 (tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
10550 (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5751F ||
10551 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5753F)))
10552 tp->tg3_flags |= TG3_FLAG_10_100_ONLY;
10554 err = tg3_phy_probe(tp);
10556 printk(KERN_ERR PFX "(%s) phy probe failed, err %d\n",
10557 pci_name(tp->pdev), err);
10558 /* ... but do not return immediately ... */
10561 tg3_read_partno(tp);
10562 tg3_read_fw_ver(tp);
10564 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
10565 tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
10567 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
10568 tp->tg3_flags |= TG3_FLAG_USE_MI_INTERRUPT;
10570 tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
10573 /* 5700 {AX,BX} chips have a broken status block link
10574 * change bit implementation, so we must use the
10575 * status register in those cases.
10577 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
10578 tp->tg3_flags |= TG3_FLAG_USE_LINKCHG_REG;
10580 tp->tg3_flags &= ~TG3_FLAG_USE_LINKCHG_REG;
10582 /* The led_ctrl is set during tg3_phy_probe, here we might
10583 * have to force the link status polling mechanism based
10584 * upon subsystem IDs.
10586 if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
10587 !(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
10588 tp->tg3_flags |= (TG3_FLAG_USE_MI_INTERRUPT |
10589 TG3_FLAG_USE_LINKCHG_REG);
10592 /* For all SERDES we poll the MAC status register. */
10593 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
10594 tp->tg3_flags |= TG3_FLAG_POLL_SERDES;
10596 tp->tg3_flags &= ~TG3_FLAG_POLL_SERDES;
10598 /* All chips before 5787 can get confused if TX buffers
10599 * straddle the 4GB address boundary in some cases.
10601 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
10602 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
10603 tp->dev->hard_start_xmit = tg3_start_xmit;
10605 tp->dev->hard_start_xmit = tg3_start_xmit_dma_bug;
10608 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
10609 (tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0)
10612 tp->rx_std_max_post = TG3_RX_RING_SIZE;
10614 /* Increment the rx prod index on the rx std ring by at most
10615 * 8 for these chips to workaround hw errata.
10617 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
10618 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
10619 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
10620 tp->rx_std_max_post = 8;
10622 /* By default, disable wake-on-lan. User can change this
10623 * using ETHTOOL_SWOL.
10625 tp->tg3_flags &= ~TG3_FLAG_WOL_ENABLE;
10630 #ifdef CONFIG_SPARC64
10631 static int __devinit tg3_get_macaddr_sparc(struct tg3 *tp)
10633 struct net_device *dev = tp->dev;
10634 struct pci_dev *pdev = tp->pdev;
10635 struct pcidev_cookie *pcp = pdev->sysdata;
10638 unsigned char *addr;
10641 addr = of_get_property(pcp->prom_node, "local-mac-address",
10643 if (addr && len == 6) {
10644 memcpy(dev->dev_addr, addr, 6);
10645 memcpy(dev->perm_addr, dev->dev_addr, 6);
10652 static int __devinit tg3_get_default_macaddr_sparc(struct tg3 *tp)
10654 struct net_device *dev = tp->dev;
10656 memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
10657 memcpy(dev->perm_addr, idprom->id_ethaddr, 6);
10662 static int __devinit tg3_get_device_address(struct tg3 *tp)
10664 struct net_device *dev = tp->dev;
10665 u32 hi, lo, mac_offset;
10668 #ifdef CONFIG_SPARC64
10669 if (!tg3_get_macaddr_sparc(tp))
10674 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
10675 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
10676 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
10678 if (tg3_nvram_lock(tp))
10679 tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
10681 tg3_nvram_unlock(tp);
10684 /* First try to get it from MAC address mailbox. */
10685 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
10686 if ((hi >> 16) == 0x484b) {
10687 dev->dev_addr[0] = (hi >> 8) & 0xff;
10688 dev->dev_addr[1] = (hi >> 0) & 0xff;
10690 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
10691 dev->dev_addr[2] = (lo >> 24) & 0xff;
10692 dev->dev_addr[3] = (lo >> 16) & 0xff;
10693 dev->dev_addr[4] = (lo >> 8) & 0xff;
10694 dev->dev_addr[5] = (lo >> 0) & 0xff;
10696 /* Some old bootcode may report a 0 MAC address in SRAM */
10697 addr_ok = is_valid_ether_addr(&dev->dev_addr[0]);
10700 /* Next, try NVRAM. */
10701 if (!tg3_nvram_read(tp, mac_offset + 0, &hi) &&
10702 !tg3_nvram_read(tp, mac_offset + 4, &lo)) {
10703 dev->dev_addr[0] = ((hi >> 16) & 0xff);
10704 dev->dev_addr[1] = ((hi >> 24) & 0xff);
10705 dev->dev_addr[2] = ((lo >> 0) & 0xff);
10706 dev->dev_addr[3] = ((lo >> 8) & 0xff);
10707 dev->dev_addr[4] = ((lo >> 16) & 0xff);
10708 dev->dev_addr[5] = ((lo >> 24) & 0xff);
10710 /* Finally just fetch it out of the MAC control regs. */
10712 hi = tr32(MAC_ADDR_0_HIGH);
10713 lo = tr32(MAC_ADDR_0_LOW);
10715 dev->dev_addr[5] = lo & 0xff;
10716 dev->dev_addr[4] = (lo >> 8) & 0xff;
10717 dev->dev_addr[3] = (lo >> 16) & 0xff;
10718 dev->dev_addr[2] = (lo >> 24) & 0xff;
10719 dev->dev_addr[1] = hi & 0xff;
10720 dev->dev_addr[0] = (hi >> 8) & 0xff;
10724 if (!is_valid_ether_addr(&dev->dev_addr[0])) {
10725 #ifdef CONFIG_SPARC64
10726 if (!tg3_get_default_macaddr_sparc(tp))
10731 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
10735 #define BOUNDARY_SINGLE_CACHELINE 1
10736 #define BOUNDARY_MULTI_CACHELINE 2
10738 static u32 __devinit tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
10740 int cacheline_size;
10744 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
10746 cacheline_size = 1024;
10748 cacheline_size = (int) byte * 4;
10750 /* On 5703 and later chips, the boundary bits have no
10753 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
10754 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
10755 !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
10758 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
10759 goal = BOUNDARY_MULTI_CACHELINE;
10761 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
10762 goal = BOUNDARY_SINGLE_CACHELINE;
10771 /* PCI controllers on most RISC systems tend to disconnect
10772 * when a device tries to burst across a cache-line boundary.
10773 * Therefore, letting tg3 do so just wastes PCI bandwidth.
10775 * Unfortunately, for PCI-E there are only limited
10776 * write-side controls for this, and thus for reads
10777 * we will still get the disconnects. We'll also waste
10778 * these PCI cycles for both read and write for chips
10779 * other than 5700 and 5701 which do not implement the
10782 if ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
10783 !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
10784 switch (cacheline_size) {
10789 if (goal == BOUNDARY_SINGLE_CACHELINE) {
10790 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
10791 DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
10793 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
10794 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
10799 val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
10800 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
10804 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
10805 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
10808 } else if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
10809 switch (cacheline_size) {
10813 if (goal == BOUNDARY_SINGLE_CACHELINE) {
10814 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
10815 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
10821 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
10822 val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
10826 switch (cacheline_size) {
10828 if (goal == BOUNDARY_SINGLE_CACHELINE) {
10829 val |= (DMA_RWCTRL_READ_BNDRY_16 |
10830 DMA_RWCTRL_WRITE_BNDRY_16);
10835 if (goal == BOUNDARY_SINGLE_CACHELINE) {
10836 val |= (DMA_RWCTRL_READ_BNDRY_32 |
10837 DMA_RWCTRL_WRITE_BNDRY_32);
10842 if (goal == BOUNDARY_SINGLE_CACHELINE) {
10843 val |= (DMA_RWCTRL_READ_BNDRY_64 |
10844 DMA_RWCTRL_WRITE_BNDRY_64);
10849 if (goal == BOUNDARY_SINGLE_CACHELINE) {
10850 val |= (DMA_RWCTRL_READ_BNDRY_128 |
10851 DMA_RWCTRL_WRITE_BNDRY_128);
10856 val |= (DMA_RWCTRL_READ_BNDRY_256 |
10857 DMA_RWCTRL_WRITE_BNDRY_256);
10860 val |= (DMA_RWCTRL_READ_BNDRY_512 |
10861 DMA_RWCTRL_WRITE_BNDRY_512);
10865 val |= (DMA_RWCTRL_READ_BNDRY_1024 |
10866 DMA_RWCTRL_WRITE_BNDRY_1024);
10875 static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma, int size, int to_device)
10877 struct tg3_internal_buffer_desc test_desc;
10878 u32 sram_dma_descs;
10881 sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
10883 tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
10884 tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
10885 tw32(RDMAC_STATUS, 0);
10886 tw32(WDMAC_STATUS, 0);
10888 tw32(BUFMGR_MODE, 0);
10889 tw32(FTQ_RESET, 0);
10891 test_desc.addr_hi = ((u64) buf_dma) >> 32;
10892 test_desc.addr_lo = buf_dma & 0xffffffff;
10893 test_desc.nic_mbuf = 0x00002100;
10894 test_desc.len = size;
10897 * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
10898 * the *second* time the tg3 driver was getting loaded after an
10901 * Broadcom tells me:
10902 * ...the DMA engine is connected to the GRC block and a DMA
10903 * reset may affect the GRC block in some unpredictable way...
10904 * The behavior of resets to individual blocks has not been tested.
10906 * Broadcom noted the GRC reset will also reset all sub-components.
10909 test_desc.cqid_sqid = (13 << 8) | 2;
10911 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
10914 test_desc.cqid_sqid = (16 << 8) | 7;
10916 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
10919 test_desc.flags = 0x00000005;
10921 for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
10924 val = *(((u32 *)&test_desc) + i);
10925 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
10926 sram_dma_descs + (i * sizeof(u32)));
10927 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
10929 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
10932 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
10934 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
10938 for (i = 0; i < 40; i++) {
10942 val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
10944 val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
10945 if ((val & 0xffff) == sram_dma_descs) {
10956 #define TEST_BUFFER_SIZE 0x2000
10958 static int __devinit tg3_test_dma(struct tg3 *tp)
10960 dma_addr_t buf_dma;
10961 u32 *buf, saved_dma_rwctrl;
10964 buf = pci_alloc_consistent(tp->pdev, TEST_BUFFER_SIZE, &buf_dma);
10970 tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
10971 (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
10973 tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
10975 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
10976 /* DMA read watermark not used on PCIE */
10977 tp->dma_rwctrl |= 0x00180000;
10978 } else if (!(tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
10979 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
10980 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
10981 tp->dma_rwctrl |= 0x003f0000;
10983 tp->dma_rwctrl |= 0x003f000f;
10985 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
10986 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
10987 u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
10989 /* If the 5704 is behind the EPB bridge, we can
10990 * do the less restrictive ONE_DMA workaround for
10991 * better performance.
10993 if ((tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG) &&
10994 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
10995 tp->dma_rwctrl |= 0x8000;
10996 else if (ccval == 0x6 || ccval == 0x7)
10997 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
10999 /* Set bit 23 to enable PCIX hw bug fix */
11000 tp->dma_rwctrl |= 0x009f0000;
11001 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) {
11002 /* 5780 always in PCIX mode */
11003 tp->dma_rwctrl |= 0x00144000;
11004 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
11005 /* 5714 always in PCIX mode */
11006 tp->dma_rwctrl |= 0x00148000;
11008 tp->dma_rwctrl |= 0x001b000f;
11012 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
11013 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
11014 tp->dma_rwctrl &= 0xfffffff0;
11016 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
11017 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
11018 /* Remove this if it causes problems for some boards. */
11019 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
11021 /* On 5700/5701 chips, we need to set this bit.
11022 * Otherwise the chip will issue cacheline transactions
11023 * to streamable DMA memory with not all the byte
11024 * enables turned on. This is an error on several
11025 * RISC PCI controllers, in particular sparc64.
11027 * On 5703/5704 chips, this bit has been reassigned
11028 * a different meaning. In particular, it is used
11029 * on those chips to enable a PCI-X workaround.
11031 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
11034 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
11037 /* Unneeded, already done by tg3_get_invariants. */
11038 tg3_switch_clocks(tp);
11042 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
11043 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
11046 /* It is best to perform DMA test with maximum write burst size
11047 * to expose the 5700/5701 write DMA bug.
11049 saved_dma_rwctrl = tp->dma_rwctrl;
11050 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
11051 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
11056 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
11059 /* Send the buffer to the chip. */
11060 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1);
11062 printk(KERN_ERR "tg3_test_dma() Write the buffer failed %d\n", ret);
11067 /* validate data reached card RAM correctly. */
11068 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
11070 tg3_read_mem(tp, 0x2100 + (i*4), &val);
11071 if (le32_to_cpu(val) != p[i]) {
11072 printk(KERN_ERR " tg3_test_dma() Card buffer corrupted on write! (%d != %d)\n", val, i);
11073 /* ret = -ENODEV here? */
11078 /* Now read it back. */
11079 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0);
11081 printk(KERN_ERR "tg3_test_dma() Read the buffer failed %d\n", ret);
11087 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
11091 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
11092 DMA_RWCTRL_WRITE_BNDRY_16) {
11093 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
11094 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
11095 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
11098 printk(KERN_ERR "tg3_test_dma() buffer corrupted on read back! (%d != %d)\n", p[i], i);
11104 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
11110 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
11111 DMA_RWCTRL_WRITE_BNDRY_16) {
11112 static struct pci_device_id dma_wait_state_chipsets[] = {
11113 { PCI_DEVICE(PCI_VENDOR_ID_APPLE,
11114 PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
11118 /* DMA test passed without adjusting DMA boundary,
11119 * now look for chipsets that are known to expose the
11120 * DMA bug without failing the test.
11122 if (pci_dev_present(dma_wait_state_chipsets)) {
11123 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
11124 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
11127 /* Safe to use the calculated DMA boundary. */
11128 tp->dma_rwctrl = saved_dma_rwctrl;
11130 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
11134 pci_free_consistent(tp->pdev, TEST_BUFFER_SIZE, buf, buf_dma);
11139 static void __devinit tg3_init_link_config(struct tg3 *tp)
11141 tp->link_config.advertising =
11142 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
11143 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
11144 ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full |
11145 ADVERTISED_Autoneg | ADVERTISED_MII);
11146 tp->link_config.speed = SPEED_INVALID;
11147 tp->link_config.duplex = DUPLEX_INVALID;
11148 tp->link_config.autoneg = AUTONEG_ENABLE;
11149 tp->link_config.active_speed = SPEED_INVALID;
11150 tp->link_config.active_duplex = DUPLEX_INVALID;
11151 tp->link_config.phy_is_low_power = 0;
11152 tp->link_config.orig_speed = SPEED_INVALID;
11153 tp->link_config.orig_duplex = DUPLEX_INVALID;
11154 tp->link_config.orig_autoneg = AUTONEG_INVALID;
11157 static void __devinit tg3_init_bufmgr_config(struct tg3 *tp)
11159 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
11160 tp->bufmgr_config.mbuf_read_dma_low_water =
11161 DEFAULT_MB_RDMA_LOW_WATER_5705;
11162 tp->bufmgr_config.mbuf_mac_rx_low_water =
11163 DEFAULT_MB_MACRX_LOW_WATER_5705;
11164 tp->bufmgr_config.mbuf_high_water =
11165 DEFAULT_MB_HIGH_WATER_5705;
11167 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
11168 DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
11169 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
11170 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
11171 tp->bufmgr_config.mbuf_high_water_jumbo =
11172 DEFAULT_MB_HIGH_WATER_JUMBO_5780;
11174 tp->bufmgr_config.mbuf_read_dma_low_water =
11175 DEFAULT_MB_RDMA_LOW_WATER;
11176 tp->bufmgr_config.mbuf_mac_rx_low_water =
11177 DEFAULT_MB_MACRX_LOW_WATER;
11178 tp->bufmgr_config.mbuf_high_water =
11179 DEFAULT_MB_HIGH_WATER;
11181 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
11182 DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
11183 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
11184 DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
11185 tp->bufmgr_config.mbuf_high_water_jumbo =
11186 DEFAULT_MB_HIGH_WATER_JUMBO;
11189 tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
11190 tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
11193 static char * __devinit tg3_phy_string(struct tg3 *tp)
11195 switch (tp->phy_id & PHY_ID_MASK) {
11196 case PHY_ID_BCM5400: return "5400";
11197 case PHY_ID_BCM5401: return "5401";
11198 case PHY_ID_BCM5411: return "5411";
11199 case PHY_ID_BCM5701: return "5701";
11200 case PHY_ID_BCM5703: return "5703";
11201 case PHY_ID_BCM5704: return "5704";
11202 case PHY_ID_BCM5705: return "5705";
11203 case PHY_ID_BCM5750: return "5750";
11204 case PHY_ID_BCM5752: return "5752";
11205 case PHY_ID_BCM5714: return "5714";
11206 case PHY_ID_BCM5780: return "5780";
11207 case PHY_ID_BCM5755: return "5755";
11208 case PHY_ID_BCM5787: return "5787";
11209 case PHY_ID_BCM8002: return "8002/serdes";
11210 case 0: return "serdes";
11211 default: return "unknown";
11215 static char * __devinit tg3_bus_string(struct tg3 *tp, char *str)
11217 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
11218 strcpy(str, "PCI Express");
11220 } else if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) {
11221 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
11223 strcpy(str, "PCIX:");
11225 if ((clock_ctrl == 7) ||
11226 ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
11227 GRC_MISC_CFG_BOARD_ID_5704CIOBE))
11228 strcat(str, "133MHz");
11229 else if (clock_ctrl == 0)
11230 strcat(str, "33MHz");
11231 else if (clock_ctrl == 2)
11232 strcat(str, "50MHz");
11233 else if (clock_ctrl == 4)
11234 strcat(str, "66MHz");
11235 else if (clock_ctrl == 6)
11236 strcat(str, "100MHz");
11238 strcpy(str, "PCI:");
11239 if (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED)
11240 strcat(str, "66MHz");
11242 strcat(str, "33MHz");
11244 if (tp->tg3_flags & TG3_FLAG_PCI_32BIT)
11245 strcat(str, ":32-bit");
11247 strcat(str, ":64-bit");
11251 static struct pci_dev * __devinit tg3_find_peer(struct tg3 *tp)
11253 struct pci_dev *peer;
11254 unsigned int func, devnr = tp->pdev->devfn & ~7;
11256 for (func = 0; func < 8; func++) {
11257 peer = pci_get_slot(tp->pdev->bus, devnr | func);
11258 if (peer && peer != tp->pdev)
11262 /* 5704 can be configured in single-port mode, set peer to
11263 * tp->pdev in that case.
11271 * We don't need to keep the refcount elevated; there's no way
11272 * to remove one half of this device without removing the other
11279 static void __devinit tg3_init_coal(struct tg3 *tp)
11281 struct ethtool_coalesce *ec = &tp->coal;
11283 memset(ec, 0, sizeof(*ec));
11284 ec->cmd = ETHTOOL_GCOALESCE;
11285 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
11286 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
11287 ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
11288 ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
11289 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
11290 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
11291 ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
11292 ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
11293 ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
11295 if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
11296 HOSTCC_MODE_CLRTICK_TXBD)) {
11297 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
11298 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
11299 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
11300 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
11303 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
11304 ec->rx_coalesce_usecs_irq = 0;
11305 ec->tx_coalesce_usecs_irq = 0;
11306 ec->stats_block_coalesce_usecs = 0;
11310 static int __devinit tg3_init_one(struct pci_dev *pdev,
11311 const struct pci_device_id *ent)
11313 static int tg3_version_printed = 0;
11314 unsigned long tg3reg_base, tg3reg_len;
11315 struct net_device *dev;
11317 int i, err, pm_cap;
11319 u64 dma_mask, persist_dma_mask;
11321 if (tg3_version_printed++ == 0)
11322 printk(KERN_INFO "%s", version);
11324 err = pci_enable_device(pdev);
11326 printk(KERN_ERR PFX "Cannot enable PCI device, "
11331 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
11332 printk(KERN_ERR PFX "Cannot find proper PCI device "
11333 "base address, aborting.\n");
11335 goto err_out_disable_pdev;
11338 err = pci_request_regions(pdev, DRV_MODULE_NAME);
11340 printk(KERN_ERR PFX "Cannot obtain PCI resources, "
11342 goto err_out_disable_pdev;
11345 pci_set_master(pdev);
11347 /* Find power-management capability. */
11348 pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
11350 printk(KERN_ERR PFX "Cannot find PowerManagement capability, "
11353 goto err_out_free_res;
11356 tg3reg_base = pci_resource_start(pdev, 0);
11357 tg3reg_len = pci_resource_len(pdev, 0);
11359 dev = alloc_etherdev(sizeof(*tp));
11361 printk(KERN_ERR PFX "Etherdev alloc failed, aborting.\n");
11363 goto err_out_free_res;
11366 SET_MODULE_OWNER(dev);
11367 SET_NETDEV_DEV(dev, &pdev->dev);
11369 #if TG3_VLAN_TAG_USED
11370 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
11371 dev->vlan_rx_register = tg3_vlan_rx_register;
11372 dev->vlan_rx_kill_vid = tg3_vlan_rx_kill_vid;
11375 tp = netdev_priv(dev);
11378 tp->pm_cap = pm_cap;
11379 tp->mac_mode = TG3_DEF_MAC_MODE;
11380 tp->rx_mode = TG3_DEF_RX_MODE;
11381 tp->tx_mode = TG3_DEF_TX_MODE;
11382 tp->mi_mode = MAC_MI_MODE_BASE;
11384 tp->msg_enable = tg3_debug;
11386 tp->msg_enable = TG3_DEF_MSG_ENABLE;
11388 /* The word/byte swap controls here control register access byte
11389 * swapping. DMA data byte swapping is controlled in the GRC_MODE
11392 tp->misc_host_ctrl =
11393 MISC_HOST_CTRL_MASK_PCI_INT |
11394 MISC_HOST_CTRL_WORD_SWAP |
11395 MISC_HOST_CTRL_INDIR_ACCESS |
11396 MISC_HOST_CTRL_PCISTATE_RW;
11398 /* The NONFRM (non-frame) byte/word swap controls take effect
11399 * on descriptor entries, anything which isn't packet data.
11401 * The StrongARM chips on the board (one for tx, one for rx)
11402 * are running in big-endian mode.
11404 tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
11405 GRC_MODE_WSWAP_NONFRM_DATA);
11406 #ifdef __BIG_ENDIAN
11407 tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
11409 spin_lock_init(&tp->lock);
11410 spin_lock_init(&tp->tx_lock);
11411 spin_lock_init(&tp->indirect_lock);
11412 INIT_WORK(&tp->reset_task, tg3_reset_task, tp);
11414 tp->regs = ioremap_nocache(tg3reg_base, tg3reg_len);
11415 if (tp->regs == 0UL) {
11416 printk(KERN_ERR PFX "Cannot map device registers, "
11419 goto err_out_free_dev;
11422 tg3_init_link_config(tp);
11424 tp->rx_pending = TG3_DEF_RX_RING_PENDING;
11425 tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
11426 tp->tx_pending = TG3_DEF_TX_RING_PENDING;
11428 dev->open = tg3_open;
11429 dev->stop = tg3_close;
11430 dev->get_stats = tg3_get_stats;
11431 dev->set_multicast_list = tg3_set_rx_mode;
11432 dev->set_mac_address = tg3_set_mac_addr;
11433 dev->do_ioctl = tg3_ioctl;
11434 dev->tx_timeout = tg3_tx_timeout;
11435 dev->poll = tg3_poll;
11436 dev->ethtool_ops = &tg3_ethtool_ops;
11438 dev->watchdog_timeo = TG3_TX_TIMEOUT;
11439 dev->change_mtu = tg3_change_mtu;
11440 dev->irq = pdev->irq;
11441 #ifdef CONFIG_NET_POLL_CONTROLLER
11442 dev->poll_controller = tg3_poll_controller;
11445 err = tg3_get_invariants(tp);
11447 printk(KERN_ERR PFX "Problem fetching invariants of chip, "
11449 goto err_out_iounmap;
11452 /* The EPB bridge inside 5714, 5715, and 5780 and any
11453 * device behind the EPB cannot support DMA addresses > 40-bit.
11454 * On 64-bit systems with IOMMU, use 40-bit dma_mask.
11455 * On 64-bit systems without IOMMU, use 64-bit dma_mask and
11456 * do DMA address check in tg3_start_xmit().
11458 if (tp->tg3_flags2 & TG3_FLG2_IS_5788)
11459 persist_dma_mask = dma_mask = DMA_32BIT_MASK;
11460 else if (tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG) {
11461 persist_dma_mask = dma_mask = DMA_40BIT_MASK;
11462 #ifdef CONFIG_HIGHMEM
11463 dma_mask = DMA_64BIT_MASK;
11466 persist_dma_mask = dma_mask = DMA_64BIT_MASK;
11468 /* Configure DMA attributes. */
11469 if (dma_mask > DMA_32BIT_MASK) {
11470 err = pci_set_dma_mask(pdev, dma_mask);
11472 dev->features |= NETIF_F_HIGHDMA;
11473 err = pci_set_consistent_dma_mask(pdev,
11476 printk(KERN_ERR PFX "Unable to obtain 64 bit "
11477 "DMA for consistent allocations\n");
11478 goto err_out_iounmap;
11482 if (err || dma_mask == DMA_32BIT_MASK) {
11483 err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
11485 printk(KERN_ERR PFX "No usable DMA configuration, "
11487 goto err_out_iounmap;
11491 tg3_init_bufmgr_config(tp);
11493 #if TG3_TSO_SUPPORT != 0
11494 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
11495 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
11497 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
11498 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
11499 tp->pci_chip_rev_id == CHIPREV_ID_5705_A0 ||
11500 (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0) {
11501 tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
11503 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
11506 /* TSO is on by default on chips that support hardware TSO.
11507 * Firmware TSO on older chips gives lower performance, so it
11508 * is off by default, but can be enabled using ethtool.
11510 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
11511 dev->features |= NETIF_F_TSO;
11515 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 &&
11516 !(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) &&
11517 !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
11518 tp->tg3_flags2 |= TG3_FLG2_MAX_RXPEND_64;
11519 tp->rx_pending = 63;
11522 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
11523 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714))
11524 tp->pdev_peer = tg3_find_peer(tp);
11526 err = tg3_get_device_address(tp);
11528 printk(KERN_ERR PFX "Could not obtain valid ethernet address, "
11530 goto err_out_iounmap;
11534 * Reset chip in case UNDI or EFI driver did not shutdown
11535 * DMA self test will enable WDMAC and we'll see (spurious)
11536 * pending DMA on the PCI bus at that point.
11538 if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
11539 (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
11540 pci_save_state(tp->pdev);
11541 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
11542 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11545 err = tg3_test_dma(tp);
11547 printk(KERN_ERR PFX "DMA engine test failed, aborting.\n");
11548 goto err_out_iounmap;
11551 /* Tigon3 can do ipv4 only... and some chips have buggy
11554 if ((tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) == 0) {
11555 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
11556 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
11557 dev->features |= NETIF_F_HW_CSUM;
11559 dev->features |= NETIF_F_IP_CSUM;
11560 dev->features |= NETIF_F_SG;
11561 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
11563 tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
11565 /* flow control autonegotiation is default behavior */
11566 tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
11570 /* Now that we have fully setup the chip, save away a snapshot
11571 * of the PCI config space. We need to restore this after
11572 * GRC_MISC_CFG core clock resets and some resume events.
11574 pci_save_state(tp->pdev);
11576 err = register_netdev(dev);
11578 printk(KERN_ERR PFX "Cannot register net device, "
11580 goto err_out_iounmap;
11583 pci_set_drvdata(pdev, dev);
11585 printk(KERN_INFO "%s: Tigon3 [partno(%s) rev %04x PHY(%s)] (%s) %sBaseT Ethernet ",
11587 tp->board_part_number,
11588 tp->pci_chip_rev_id,
11589 tg3_phy_string(tp),
11590 tg3_bus_string(tp, str),
11591 (tp->tg3_flags & TG3_FLAG_10_100_ONLY) ? "10/100" : "10/100/1000");
11593 for (i = 0; i < 6; i++)
11594 printk("%2.2x%c", dev->dev_addr[i],
11595 i == 5 ? '\n' : ':');
11597 printk(KERN_INFO "%s: RXcsums[%d] LinkChgREG[%d] "
11598 "MIirq[%d] ASF[%d] Split[%d] WireSpeed[%d] "
11601 (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0,
11602 (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) != 0,
11603 (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) != 0,
11604 (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0,
11605 (tp->tg3_flags & TG3_FLAG_SPLIT_MODE) != 0,
11606 (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED) == 0,
11607 (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) != 0);
11608 printk(KERN_INFO "%s: dma_rwctrl[%08x] dma_mask[%d-bit]\n",
11609 dev->name, tp->dma_rwctrl,
11610 (pdev->dma_mask == DMA_32BIT_MASK) ? 32 :
11611 (((u64) pdev->dma_mask == DMA_40BIT_MASK) ? 40 : 64));
11613 netif_carrier_off(tp->dev);
11627 pci_release_regions(pdev);
11629 err_out_disable_pdev:
11630 pci_disable_device(pdev);
11631 pci_set_drvdata(pdev, NULL);
11635 static void __devexit tg3_remove_one(struct pci_dev *pdev)
11637 struct net_device *dev = pci_get_drvdata(pdev);
11640 struct tg3 *tp = netdev_priv(dev);
11642 flush_scheduled_work();
11643 unregister_netdev(dev);
11649 pci_release_regions(pdev);
11650 pci_disable_device(pdev);
11651 pci_set_drvdata(pdev, NULL);
11655 static int tg3_suspend(struct pci_dev *pdev, pm_message_t state)
11657 struct net_device *dev = pci_get_drvdata(pdev);
11658 struct tg3 *tp = netdev_priv(dev);
11661 if (!netif_running(dev))
11664 flush_scheduled_work();
11665 tg3_netif_stop(tp);
11667 del_timer_sync(&tp->timer);
11669 tg3_full_lock(tp, 1);
11670 tg3_disable_ints(tp);
11671 tg3_full_unlock(tp);
11673 netif_device_detach(dev);
11675 tg3_full_lock(tp, 0);
11676 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11677 tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
11678 tg3_full_unlock(tp);
11680 err = tg3_set_power_state(tp, pci_choose_state(pdev, state));
11682 tg3_full_lock(tp, 0);
11684 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
11685 tg3_init_hw(tp, 1);
11687 tp->timer.expires = jiffies + tp->timer_offset;
11688 add_timer(&tp->timer);
11690 netif_device_attach(dev);
11691 tg3_netif_start(tp);
11693 tg3_full_unlock(tp);
11699 static int tg3_resume(struct pci_dev *pdev)
11701 struct net_device *dev = pci_get_drvdata(pdev);
11702 struct tg3 *tp = netdev_priv(dev);
11705 if (!netif_running(dev))
11708 pci_restore_state(tp->pdev);
11710 err = tg3_set_power_state(tp, PCI_D0);
11714 netif_device_attach(dev);
11716 tg3_full_lock(tp, 0);
11718 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
11719 tg3_init_hw(tp, 1);
11721 tp->timer.expires = jiffies + tp->timer_offset;
11722 add_timer(&tp->timer);
11724 tg3_netif_start(tp);
11726 tg3_full_unlock(tp);
11731 static struct pci_driver tg3_driver = {
11732 .name = DRV_MODULE_NAME,
11733 .id_table = tg3_pci_tbl,
11734 .probe = tg3_init_one,
11735 .remove = __devexit_p(tg3_remove_one),
11736 .suspend = tg3_suspend,
11737 .resume = tg3_resume
11740 static int __init tg3_init(void)
11742 return pci_module_init(&tg3_driver);
11745 static void __exit tg3_cleanup(void)
11747 pci_unregister_driver(&tg3_driver);
11750 module_init(tg3_init);
11751 module_exit(tg3_cleanup);