2 * tg3.c: Broadcom Tigon3 ethernet driver.
4 * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5 * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6 * Copyright (C) 2004 Sun Microsystems Inc.
7 * Copyright (C) 2005 Broadcom Corporation.
10 * Derived from proprietary unpublished source code,
11 * Copyright (C) 2000-2003 Broadcom Corporation.
13 * Permission is hereby granted for the distribution of this firmware
14 * data in hexadecimal or equivalent format, provided this copyright
15 * notice is accompanying it.
18 #include <linux/config.h>
20 #include <linux/module.h>
21 #include <linux/moduleparam.h>
22 #include <linux/kernel.h>
23 #include <linux/types.h>
24 #include <linux/compiler.h>
25 #include <linux/slab.h>
26 #include <linux/delay.h>
28 #include <linux/init.h>
29 #include <linux/ioport.h>
30 #include <linux/pci.h>
31 #include <linux/netdevice.h>
32 #include <linux/etherdevice.h>
33 #include <linux/skbuff.h>
34 #include <linux/ethtool.h>
35 #include <linux/mii.h>
36 #include <linux/if_vlan.h>
38 #include <linux/tcp.h>
39 #include <linux/workqueue.h>
40 #include <linux/prefetch.h>
41 #include <linux/dma-mapping.h>
43 #include <net/checksum.h>
45 #include <asm/system.h>
47 #include <asm/byteorder.h>
48 #include <asm/uaccess.h>
51 #include <asm/idprom.h>
52 #include <asm/oplib.h>
56 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
57 #define TG3_VLAN_TAG_USED 1
59 #define TG3_VLAN_TAG_USED 0
63 #define TG3_TSO_SUPPORT 1
65 #define TG3_TSO_SUPPORT 0
70 #define DRV_MODULE_NAME "tg3"
71 #define PFX DRV_MODULE_NAME ": "
72 #define DRV_MODULE_VERSION "3.60"
73 #define DRV_MODULE_RELDATE "June 17, 2006"
75 #define TG3_DEF_MAC_MODE 0
76 #define TG3_DEF_RX_MODE 0
77 #define TG3_DEF_TX_MODE 0
78 #define TG3_DEF_MSG_ENABLE \
88 /* length of time before we decide the hardware is borked,
89 * and dev->tx_timeout() should be called to fix the problem
91 #define TG3_TX_TIMEOUT (5 * HZ)
93 /* hardware minimum and maximum for a single frame's data payload */
94 #define TG3_MIN_MTU 60
95 #define TG3_MAX_MTU(tp) \
96 ((tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) ? 9000 : 1500)
98 /* These numbers seem to be hard coded in the NIC firmware somehow.
99 * You can't change the ring sizes, but you can change where you place
100 * them in the NIC onboard memory.
102 #define TG3_RX_RING_SIZE 512
103 #define TG3_DEF_RX_RING_PENDING 200
104 #define TG3_RX_JUMBO_RING_SIZE 256
105 #define TG3_DEF_RX_JUMBO_RING_PENDING 100
107 /* Do not place this n-ring entries value into the tp struct itself,
108 * we really want to expose these constants to GCC so that modulo et
109 * al. operations are done with shifts and masks instead of with
110 * hw multiply/modulo instructions. Another solution would be to
111 * replace things like '% foo' with '& (foo - 1)'.
113 #define TG3_RX_RCB_RING_SIZE(tp) \
114 ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ? 512 : 1024)
116 #define TG3_TX_RING_SIZE 512
117 #define TG3_DEF_TX_RING_PENDING (TG3_TX_RING_SIZE - 1)
119 #define TG3_RX_RING_BYTES (sizeof(struct tg3_rx_buffer_desc) * \
121 #define TG3_RX_JUMBO_RING_BYTES (sizeof(struct tg3_rx_buffer_desc) * \
122 TG3_RX_JUMBO_RING_SIZE)
123 #define TG3_RX_RCB_RING_BYTES(tp) (sizeof(struct tg3_rx_buffer_desc) * \
124 TG3_RX_RCB_RING_SIZE(tp))
125 #define TG3_TX_RING_BYTES (sizeof(struct tg3_tx_buffer_desc) * \
127 #define TX_BUFFS_AVAIL(TP) \
128 ((TP)->tx_pending - \
129 (((TP)->tx_prod - (TP)->tx_cons) & (TG3_TX_RING_SIZE - 1)))
130 #define NEXT_TX(N) (((N) + 1) & (TG3_TX_RING_SIZE - 1))
132 #define RX_PKT_BUF_SZ (1536 + tp->rx_offset + 64)
133 #define RX_JUMBO_PKT_BUF_SZ (9046 + tp->rx_offset + 64)
135 /* minimum number of free TX descriptors required to wake up TX process */
136 #define TG3_TX_WAKEUP_THRESH (TG3_TX_RING_SIZE / 4)
138 /* number of ETHTOOL_GSTATS u64's */
139 #define TG3_NUM_STATS (sizeof(struct tg3_ethtool_stats)/sizeof(u64))
141 #define TG3_NUM_TEST 6
143 static char version[] __devinitdata =
144 DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
146 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
147 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
148 MODULE_LICENSE("GPL");
149 MODULE_VERSION(DRV_MODULE_VERSION);
151 static int tg3_debug = -1; /* -1 == use TG3_DEF_MSG_ENABLE as value */
152 module_param(tg3_debug, int, 0);
153 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
155 static struct pci_device_id tg3_pci_tbl[] = {
156 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700,
157 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
158 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701,
159 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
160 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702,
161 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
162 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703,
163 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
164 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704,
165 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
166 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE,
167 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
168 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705,
169 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
170 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2,
171 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
172 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M,
173 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
174 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2,
175 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
176 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X,
177 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
178 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X,
179 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
180 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S,
181 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
182 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3,
183 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
184 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3,
185 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
186 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782,
187 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
188 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788,
189 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
190 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789,
191 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
192 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901,
193 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
194 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2,
195 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
196 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2,
197 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
198 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F,
199 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
200 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5720,
201 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
202 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721,
203 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
204 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750,
205 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
206 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751,
207 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
208 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750M,
209 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
210 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M,
211 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
212 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F,
213 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
214 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752,
215 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
216 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M,
217 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
218 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753,
219 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
220 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M,
221 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
222 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F,
223 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
224 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754,
225 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
226 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M,
227 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
228 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755,
229 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
230 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M,
231 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
232 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786,
233 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
234 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787,
235 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
236 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M,
237 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
238 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714,
239 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
240 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S,
241 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
242 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715,
243 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
244 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S,
245 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
246 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780,
247 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
248 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S,
249 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
250 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781,
251 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
252 { PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX,
253 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
254 { PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX,
255 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
256 { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000,
257 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
258 { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001,
259 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
260 { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003,
261 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
262 { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100,
263 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
264 { PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3,
265 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
269 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
272 const char string[ETH_GSTRING_LEN];
273 } ethtool_stats_keys[TG3_NUM_STATS] = {
276 { "rx_ucast_packets" },
277 { "rx_mcast_packets" },
278 { "rx_bcast_packets" },
280 { "rx_align_errors" },
281 { "rx_xon_pause_rcvd" },
282 { "rx_xoff_pause_rcvd" },
283 { "rx_mac_ctrl_rcvd" },
284 { "rx_xoff_entered" },
285 { "rx_frame_too_long_errors" },
287 { "rx_undersize_packets" },
288 { "rx_in_length_errors" },
289 { "rx_out_length_errors" },
290 { "rx_64_or_less_octet_packets" },
291 { "rx_65_to_127_octet_packets" },
292 { "rx_128_to_255_octet_packets" },
293 { "rx_256_to_511_octet_packets" },
294 { "rx_512_to_1023_octet_packets" },
295 { "rx_1024_to_1522_octet_packets" },
296 { "rx_1523_to_2047_octet_packets" },
297 { "rx_2048_to_4095_octet_packets" },
298 { "rx_4096_to_8191_octet_packets" },
299 { "rx_8192_to_9022_octet_packets" },
306 { "tx_flow_control" },
308 { "tx_single_collisions" },
309 { "tx_mult_collisions" },
311 { "tx_excessive_collisions" },
312 { "tx_late_collisions" },
313 { "tx_collide_2times" },
314 { "tx_collide_3times" },
315 { "tx_collide_4times" },
316 { "tx_collide_5times" },
317 { "tx_collide_6times" },
318 { "tx_collide_7times" },
319 { "tx_collide_8times" },
320 { "tx_collide_9times" },
321 { "tx_collide_10times" },
322 { "tx_collide_11times" },
323 { "tx_collide_12times" },
324 { "tx_collide_13times" },
325 { "tx_collide_14times" },
326 { "tx_collide_15times" },
327 { "tx_ucast_packets" },
328 { "tx_mcast_packets" },
329 { "tx_bcast_packets" },
330 { "tx_carrier_sense_errors" },
334 { "dma_writeq_full" },
335 { "dma_write_prioq_full" },
339 { "rx_threshold_hit" },
341 { "dma_readq_full" },
342 { "dma_read_prioq_full" },
343 { "tx_comp_queue_full" },
345 { "ring_set_send_prod_index" },
346 { "ring_status_update" },
348 { "nic_avoided_irqs" },
349 { "nic_tx_threshold_hit" }
353 const char string[ETH_GSTRING_LEN];
354 } ethtool_test_keys[TG3_NUM_TEST] = {
355 { "nvram test (online) " },
356 { "link test (online) " },
357 { "register test (offline)" },
358 { "memory test (offline)" },
359 { "loopback test (offline)" },
360 { "interrupt test (offline)" },
363 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
365 writel(val, tp->regs + off);
368 static u32 tg3_read32(struct tg3 *tp, u32 off)
370 return (readl(tp->regs + off));
373 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
377 spin_lock_irqsave(&tp->indirect_lock, flags);
378 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
379 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
380 spin_unlock_irqrestore(&tp->indirect_lock, flags);
383 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
385 writel(val, tp->regs + off);
386 readl(tp->regs + off);
389 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
394 spin_lock_irqsave(&tp->indirect_lock, flags);
395 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
396 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
397 spin_unlock_irqrestore(&tp->indirect_lock, flags);
401 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
405 if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
406 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
407 TG3_64BIT_REG_LOW, val);
410 if (off == (MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW)) {
411 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
412 TG3_64BIT_REG_LOW, val);
416 spin_lock_irqsave(&tp->indirect_lock, flags);
417 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
418 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
419 spin_unlock_irqrestore(&tp->indirect_lock, flags);
421 /* In indirect mode when disabling interrupts, we also need
422 * to clear the interrupt bit in the GRC local ctrl register.
424 if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
426 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
427 tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
431 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
436 spin_lock_irqsave(&tp->indirect_lock, flags);
437 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
438 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
439 spin_unlock_irqrestore(&tp->indirect_lock, flags);
443 /* usec_wait specifies the wait time in usec when writing to certain registers
444 * where it is unsafe to read back the register without some delay.
445 * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
446 * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
448 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
450 if ((tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) ||
451 (tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND))
452 /* Non-posted methods */
453 tp->write32(tp, off, val);
456 tg3_write32(tp, off, val);
461 /* Wait again after the read for the posted method to guarantee that
462 * the wait time is met.
468 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
470 tp->write32_mbox(tp, off, val);
471 if (!(tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) &&
472 !(tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND))
473 tp->read32_mbox(tp, off);
476 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
478 void __iomem *mbox = tp->regs + off;
480 if (tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG)
482 if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
486 #define tw32_mailbox(reg, val) tp->write32_mbox(tp, reg, val)
487 #define tw32_mailbox_f(reg, val) tw32_mailbox_flush(tp, (reg), (val))
488 #define tw32_rx_mbox(reg, val) tp->write32_rx_mbox(tp, reg, val)
489 #define tw32_tx_mbox(reg, val) tp->write32_tx_mbox(tp, reg, val)
490 #define tr32_mailbox(reg) tp->read32_mbox(tp, reg)
492 #define tw32(reg,val) tp->write32(tp, reg, val)
493 #define tw32_f(reg,val) _tw32_flush(tp,(reg),(val), 0)
494 #define tw32_wait_f(reg,val,us) _tw32_flush(tp,(reg),(val), (us))
495 #define tr32(reg) tp->read32(tp, reg)
497 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
501 spin_lock_irqsave(&tp->indirect_lock, flags);
502 if (tp->tg3_flags & TG3_FLAG_SRAM_USE_CONFIG) {
503 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
504 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
506 /* Always leave this as zero. */
507 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
509 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
510 tw32_f(TG3PCI_MEM_WIN_DATA, val);
512 /* Always leave this as zero. */
513 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
515 spin_unlock_irqrestore(&tp->indirect_lock, flags);
518 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
522 spin_lock_irqsave(&tp->indirect_lock, flags);
523 if (tp->tg3_flags & TG3_FLAG_SRAM_USE_CONFIG) {
524 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
525 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
527 /* Always leave this as zero. */
528 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
530 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
531 *val = tr32(TG3PCI_MEM_WIN_DATA);
533 /* Always leave this as zero. */
534 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
536 spin_unlock_irqrestore(&tp->indirect_lock, flags);
539 static void tg3_disable_ints(struct tg3 *tp)
541 tw32(TG3PCI_MISC_HOST_CTRL,
542 (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
543 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
546 static inline void tg3_cond_int(struct tg3 *tp)
548 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
549 (tp->hw_status->status & SD_STATUS_UPDATED))
550 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
553 static void tg3_enable_ints(struct tg3 *tp)
558 tw32(TG3PCI_MISC_HOST_CTRL,
559 (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
560 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
561 (tp->last_tag << 24));
562 if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI)
563 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
564 (tp->last_tag << 24));
568 static inline unsigned int tg3_has_work(struct tg3 *tp)
570 struct tg3_hw_status *sblk = tp->hw_status;
571 unsigned int work_exists = 0;
573 /* check for phy events */
574 if (!(tp->tg3_flags &
575 (TG3_FLAG_USE_LINKCHG_REG |
576 TG3_FLAG_POLL_SERDES))) {
577 if (sblk->status & SD_STATUS_LINK_CHG)
580 /* check for RX/TX work to do */
581 if (sblk->idx[0].tx_consumer != tp->tx_cons ||
582 sblk->idx[0].rx_producer != tp->rx_rcb_ptr)
589 * similar to tg3_enable_ints, but it accurately determines whether there
590 * is new work pending and can return without flushing the PIO write
591 * which reenables interrupts
593 static void tg3_restart_ints(struct tg3 *tp)
595 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
599 /* When doing tagged status, this work check is unnecessary.
600 * The last_tag we write above tells the chip which piece of
601 * work we've completed.
603 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
605 tw32(HOSTCC_MODE, tp->coalesce_mode |
606 (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
609 static inline void tg3_netif_stop(struct tg3 *tp)
611 tp->dev->trans_start = jiffies; /* prevent tx timeout */
612 netif_poll_disable(tp->dev);
613 netif_tx_disable(tp->dev);
616 static inline void tg3_netif_start(struct tg3 *tp)
618 netif_wake_queue(tp->dev);
619 /* NOTE: unconditional netif_wake_queue is only appropriate
620 * so long as all callers are assured to have free tx slots
621 * (such as after tg3_init_hw)
623 netif_poll_enable(tp->dev);
624 tp->hw_status->status |= SD_STATUS_UPDATED;
628 static void tg3_switch_clocks(struct tg3 *tp)
630 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
633 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
636 orig_clock_ctrl = clock_ctrl;
637 clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
638 CLOCK_CTRL_CLKRUN_OENABLE |
640 tp->pci_clock_ctrl = clock_ctrl;
642 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
643 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
644 tw32_wait_f(TG3PCI_CLOCK_CTRL,
645 clock_ctrl | CLOCK_CTRL_625_CORE, 40);
647 } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
648 tw32_wait_f(TG3PCI_CLOCK_CTRL,
650 (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
652 tw32_wait_f(TG3PCI_CLOCK_CTRL,
653 clock_ctrl | (CLOCK_CTRL_ALTCLK),
656 tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
659 #define PHY_BUSY_LOOPS 5000
661 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
667 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
669 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
675 frame_val = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
676 MI_COM_PHY_ADDR_MASK);
677 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
678 MI_COM_REG_ADDR_MASK);
679 frame_val |= (MI_COM_CMD_READ | MI_COM_START);
681 tw32_f(MAC_MI_COM, frame_val);
683 loops = PHY_BUSY_LOOPS;
686 frame_val = tr32(MAC_MI_COM);
688 if ((frame_val & MI_COM_BUSY) == 0) {
690 frame_val = tr32(MAC_MI_COM);
698 *val = frame_val & MI_COM_DATA_MASK;
702 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
703 tw32_f(MAC_MI_MODE, tp->mi_mode);
710 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
716 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
718 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
722 frame_val = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
723 MI_COM_PHY_ADDR_MASK);
724 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
725 MI_COM_REG_ADDR_MASK);
726 frame_val |= (val & MI_COM_DATA_MASK);
727 frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
729 tw32_f(MAC_MI_COM, frame_val);
731 loops = PHY_BUSY_LOOPS;
734 frame_val = tr32(MAC_MI_COM);
735 if ((frame_val & MI_COM_BUSY) == 0) {
737 frame_val = tr32(MAC_MI_COM);
747 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
748 tw32_f(MAC_MI_MODE, tp->mi_mode);
755 static void tg3_phy_set_wirespeed(struct tg3 *tp)
759 if (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED)
762 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x7007) &&
763 !tg3_readphy(tp, MII_TG3_AUX_CTRL, &val))
764 tg3_writephy(tp, MII_TG3_AUX_CTRL,
765 (val | (1 << 15) | (1 << 4)));
768 static int tg3_bmcr_reset(struct tg3 *tp)
773 /* OK, reset it, and poll the BMCR_RESET bit until it
774 * clears or we time out.
776 phy_control = BMCR_RESET;
777 err = tg3_writephy(tp, MII_BMCR, phy_control);
783 err = tg3_readphy(tp, MII_BMCR, &phy_control);
787 if ((phy_control & BMCR_RESET) == 0) {
799 static int tg3_wait_macro_done(struct tg3 *tp)
806 if (!tg3_readphy(tp, 0x16, &tmp32)) {
807 if ((tmp32 & 0x1000) == 0)
817 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
819 static const u32 test_pat[4][6] = {
820 { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
821 { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
822 { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
823 { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
827 for (chan = 0; chan < 4; chan++) {
830 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
831 (chan * 0x2000) | 0x0200);
832 tg3_writephy(tp, 0x16, 0x0002);
834 for (i = 0; i < 6; i++)
835 tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
838 tg3_writephy(tp, 0x16, 0x0202);
839 if (tg3_wait_macro_done(tp)) {
844 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
845 (chan * 0x2000) | 0x0200);
846 tg3_writephy(tp, 0x16, 0x0082);
847 if (tg3_wait_macro_done(tp)) {
852 tg3_writephy(tp, 0x16, 0x0802);
853 if (tg3_wait_macro_done(tp)) {
858 for (i = 0; i < 6; i += 2) {
861 if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
862 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
863 tg3_wait_macro_done(tp)) {
869 if (low != test_pat[chan][i] ||
870 high != test_pat[chan][i+1]) {
871 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
872 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
873 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
883 static int tg3_phy_reset_chanpat(struct tg3 *tp)
887 for (chan = 0; chan < 4; chan++) {
890 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
891 (chan * 0x2000) | 0x0200);
892 tg3_writephy(tp, 0x16, 0x0002);
893 for (i = 0; i < 6; i++)
894 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
895 tg3_writephy(tp, 0x16, 0x0202);
896 if (tg3_wait_macro_done(tp))
903 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
905 u32 reg32, phy9_orig;
906 int retries, do_phy_reset, err;
912 err = tg3_bmcr_reset(tp);
918 /* Disable transmitter and interrupt. */
919 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, ®32))
923 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
925 /* Set full-duplex, 1000 mbps. */
926 tg3_writephy(tp, MII_BMCR,
927 BMCR_FULLDPLX | TG3_BMCR_SPEED1000);
929 /* Set to master mode. */
930 if (tg3_readphy(tp, MII_TG3_CTRL, &phy9_orig))
933 tg3_writephy(tp, MII_TG3_CTRL,
934 (MII_TG3_CTRL_AS_MASTER |
935 MII_TG3_CTRL_ENABLE_AS_MASTER));
937 /* Enable SM_DSP_CLOCK and 6dB. */
938 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
940 /* Block the PHY control access. */
941 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
942 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0800);
944 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
949 err = tg3_phy_reset_chanpat(tp);
953 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
954 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0000);
956 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
957 tg3_writephy(tp, 0x16, 0x0000);
959 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
960 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
961 /* Set Extended packet length bit for jumbo frames */
962 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4400);
965 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
968 tg3_writephy(tp, MII_TG3_CTRL, phy9_orig);
970 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, ®32)) {
972 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
979 static void tg3_link_report(struct tg3 *);
981 /* This will reset the tigon3 PHY if there is no valid
982 * link unless the FORCE argument is non-zero.
984 static int tg3_phy_reset(struct tg3 *tp)
989 err = tg3_readphy(tp, MII_BMSR, &phy_status);
990 err |= tg3_readphy(tp, MII_BMSR, &phy_status);
994 if (netif_running(tp->dev) && netif_carrier_ok(tp->dev)) {
995 netif_carrier_off(tp->dev);
999 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
1000 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
1001 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
1002 err = tg3_phy_reset_5703_4_5(tp);
1008 err = tg3_bmcr_reset(tp);
1013 if (tp->tg3_flags2 & TG3_FLG2_PHY_ADC_BUG) {
1014 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1015 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1016 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x2aaa);
1017 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1018 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0323);
1019 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1021 if (tp->tg3_flags2 & TG3_FLG2_PHY_5704_A0_BUG) {
1022 tg3_writephy(tp, 0x1c, 0x8d68);
1023 tg3_writephy(tp, 0x1c, 0x8d68);
1025 if (tp->tg3_flags2 & TG3_FLG2_PHY_BER_BUG) {
1026 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1027 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1028 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x310b);
1029 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1030 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x9506);
1031 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x401f);
1032 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x14e2);
1033 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1035 else if (tp->tg3_flags2 & TG3_FLG2_PHY_JITTER_BUG) {
1036 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1037 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1038 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
1039 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1041 /* Set Extended packet length bit (bit 14) on all chips that */
1042 /* support jumbo frames */
1043 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
1044 /* Cannot do read-modify-write on 5401 */
1045 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
1046 } else if (tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) {
1049 /* Set bit 14 with read-modify-write to preserve other bits */
1050 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0007) &&
1051 !tg3_readphy(tp, MII_TG3_AUX_CTRL, &phy_reg))
1052 tg3_writephy(tp, MII_TG3_AUX_CTRL, phy_reg | 0x4000);
1055 /* Set phy register 0x10 bit 0 to high fifo elasticity to support
1056 * jumbo frames transmission.
1058 if (tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) {
1061 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &phy_reg))
1062 tg3_writephy(tp, MII_TG3_EXT_CTRL,
1063 phy_reg | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
1066 tg3_phy_set_wirespeed(tp);
1070 static void tg3_frob_aux_power(struct tg3 *tp)
1072 struct tg3 *tp_peer = tp;
1074 if ((tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) != 0)
1077 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
1078 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)) {
1079 struct net_device *dev_peer;
1081 dev_peer = pci_get_drvdata(tp->pdev_peer);
1082 /* remove_one() may have been run on the peer. */
1086 tp_peer = netdev_priv(dev_peer);
1089 if ((tp->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
1090 (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0 ||
1091 (tp_peer->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
1092 (tp_peer->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0) {
1093 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1094 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1095 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1096 (GRC_LCLCTRL_GPIO_OE0 |
1097 GRC_LCLCTRL_GPIO_OE1 |
1098 GRC_LCLCTRL_GPIO_OE2 |
1099 GRC_LCLCTRL_GPIO_OUTPUT0 |
1100 GRC_LCLCTRL_GPIO_OUTPUT1),
1104 u32 grc_local_ctrl = 0;
1106 if (tp_peer != tp &&
1107 (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
1110 /* Workaround to prevent overdrawing Amps. */
1111 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
1113 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
1114 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1115 grc_local_ctrl, 100);
1118 /* On 5753 and variants, GPIO2 cannot be used. */
1119 no_gpio2 = tp->nic_sram_data_cfg &
1120 NIC_SRAM_DATA_CFG_NO_GPIO2;
1122 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
1123 GRC_LCLCTRL_GPIO_OE1 |
1124 GRC_LCLCTRL_GPIO_OE2 |
1125 GRC_LCLCTRL_GPIO_OUTPUT1 |
1126 GRC_LCLCTRL_GPIO_OUTPUT2;
1128 grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
1129 GRC_LCLCTRL_GPIO_OUTPUT2);
1131 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1132 grc_local_ctrl, 100);
1134 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
1136 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1137 grc_local_ctrl, 100);
1140 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
1141 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1142 grc_local_ctrl, 100);
1146 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
1147 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
1148 if (tp_peer != tp &&
1149 (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
1152 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1153 (GRC_LCLCTRL_GPIO_OE1 |
1154 GRC_LCLCTRL_GPIO_OUTPUT1), 100);
1156 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1157 GRC_LCLCTRL_GPIO_OE1, 100);
1159 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1160 (GRC_LCLCTRL_GPIO_OE1 |
1161 GRC_LCLCTRL_GPIO_OUTPUT1), 100);
1166 static int tg3_setup_phy(struct tg3 *, int);
1168 #define RESET_KIND_SHUTDOWN 0
1169 #define RESET_KIND_INIT 1
1170 #define RESET_KIND_SUSPEND 2
1172 static void tg3_write_sig_post_reset(struct tg3 *, int);
1173 static int tg3_halt_cpu(struct tg3 *, u32);
1174 static int tg3_nvram_lock(struct tg3 *);
1175 static void tg3_nvram_unlock(struct tg3 *);
1177 static void tg3_power_down_phy(struct tg3 *tp)
1179 /* The PHY should not be powered down on some chips because
1182 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1183 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
1184 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 &&
1185 (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)))
1187 tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
1190 static int tg3_set_power_state(struct tg3 *tp, pci_power_t state)
1193 u16 power_control, power_caps;
1194 int pm = tp->pm_cap;
1196 /* Make sure register accesses (indirect or otherwise)
1197 * will function correctly.
1199 pci_write_config_dword(tp->pdev,
1200 TG3PCI_MISC_HOST_CTRL,
1201 tp->misc_host_ctrl);
1203 pci_read_config_word(tp->pdev,
1206 power_control |= PCI_PM_CTRL_PME_STATUS;
1207 power_control &= ~(PCI_PM_CTRL_STATE_MASK);
1211 pci_write_config_word(tp->pdev,
1214 udelay(100); /* Delay after power state change */
1216 /* Switch out of Vaux if it is not a LOM */
1217 if (!(tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT))
1218 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl, 100);
1235 printk(KERN_WARNING PFX "%s: Invalid power state (%d) "
1237 tp->dev->name, state);
1241 power_control |= PCI_PM_CTRL_PME_ENABLE;
1243 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
1244 tw32(TG3PCI_MISC_HOST_CTRL,
1245 misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
1247 if (tp->link_config.phy_is_low_power == 0) {
1248 tp->link_config.phy_is_low_power = 1;
1249 tp->link_config.orig_speed = tp->link_config.speed;
1250 tp->link_config.orig_duplex = tp->link_config.duplex;
1251 tp->link_config.orig_autoneg = tp->link_config.autoneg;
1254 if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)) {
1255 tp->link_config.speed = SPEED_10;
1256 tp->link_config.duplex = DUPLEX_HALF;
1257 tp->link_config.autoneg = AUTONEG_ENABLE;
1258 tg3_setup_phy(tp, 0);
1261 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
1265 for (i = 0; i < 200; i++) {
1266 tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
1267 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
1272 tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
1273 WOL_DRV_STATE_SHUTDOWN |
1274 WOL_DRV_WOL | WOL_SET_MAGIC_PKT);
1276 pci_read_config_word(tp->pdev, pm + PCI_PM_PMC, &power_caps);
1278 if (tp->tg3_flags & TG3_FLAG_WOL_ENABLE) {
1281 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
1282 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x5a);
1285 mac_mode = MAC_MODE_PORT_MODE_MII;
1287 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 ||
1288 !(tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB))
1289 mac_mode |= MAC_MODE_LINK_POLARITY;
1291 mac_mode = MAC_MODE_PORT_MODE_TBI;
1294 if (!(tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
1295 tw32(MAC_LED_CTRL, tp->led_ctrl);
1297 if (((power_caps & PCI_PM_CAP_PME_D3cold) &&
1298 (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)))
1299 mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
1301 tw32_f(MAC_MODE, mac_mode);
1304 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
1308 if (!(tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB) &&
1309 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1310 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
1313 base_val = tp->pci_clock_ctrl;
1314 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
1315 CLOCK_CTRL_TXCLK_DISABLE);
1317 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
1318 CLOCK_CTRL_PWRDOWN_PLL133, 40);
1319 } else if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
1321 } else if (!((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
1322 (tp->tg3_flags & TG3_FLAG_ENABLE_ASF))) {
1323 u32 newbits1, newbits2;
1325 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1326 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1327 newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
1328 CLOCK_CTRL_TXCLK_DISABLE |
1330 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
1331 } else if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
1332 newbits1 = CLOCK_CTRL_625_CORE;
1333 newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
1335 newbits1 = CLOCK_CTRL_ALTCLK;
1336 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
1339 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
1342 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
1345 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
1348 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1349 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1350 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
1351 CLOCK_CTRL_TXCLK_DISABLE |
1352 CLOCK_CTRL_44MHZ_CORE);
1354 newbits3 = CLOCK_CTRL_44MHZ_CORE;
1357 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1358 tp->pci_clock_ctrl | newbits3, 40);
1362 if (!(tp->tg3_flags & TG3_FLAG_WOL_ENABLE) &&
1363 !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
1364 /* Turn off the PHY */
1365 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
1366 tg3_writephy(tp, MII_TG3_EXT_CTRL,
1367 MII_TG3_EXT_CTRL_FORCE_LED_OFF);
1368 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x01b2);
1369 tg3_power_down_phy(tp);
1373 tg3_frob_aux_power(tp);
1375 /* Workaround for unstable PLL clock */
1376 if ((GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX) ||
1377 (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX)) {
1378 u32 val = tr32(0x7d00);
1380 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
1382 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
1385 err = tg3_nvram_lock(tp);
1386 tg3_halt_cpu(tp, RX_CPU_BASE);
1388 tg3_nvram_unlock(tp);
1392 tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
1394 /* Finally, set the new power state. */
1395 pci_write_config_word(tp->pdev, pm + PCI_PM_CTRL, power_control);
1396 udelay(100); /* Delay after power state change */
1401 static void tg3_link_report(struct tg3 *tp)
1403 if (!netif_carrier_ok(tp->dev)) {
1404 printk(KERN_INFO PFX "%s: Link is down.\n", tp->dev->name);
1406 printk(KERN_INFO PFX "%s: Link is up at %d Mbps, %s duplex.\n",
1408 (tp->link_config.active_speed == SPEED_1000 ?
1410 (tp->link_config.active_speed == SPEED_100 ?
1412 (tp->link_config.active_duplex == DUPLEX_FULL ?
1415 printk(KERN_INFO PFX "%s: Flow control is %s for TX and "
1418 (tp->tg3_flags & TG3_FLAG_TX_PAUSE) ? "on" : "off",
1419 (tp->tg3_flags & TG3_FLAG_RX_PAUSE) ? "on" : "off");
1423 static void tg3_setup_flow_control(struct tg3 *tp, u32 local_adv, u32 remote_adv)
1425 u32 new_tg3_flags = 0;
1426 u32 old_rx_mode = tp->rx_mode;
1427 u32 old_tx_mode = tp->tx_mode;
1429 if (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG) {
1431 /* Convert 1000BaseX flow control bits to 1000BaseT
1432 * bits before resolving flow control.
1434 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
1435 local_adv &= ~(ADVERTISE_PAUSE_CAP |
1436 ADVERTISE_PAUSE_ASYM);
1437 remote_adv &= ~(LPA_PAUSE_CAP | LPA_PAUSE_ASYM);
1439 if (local_adv & ADVERTISE_1000XPAUSE)
1440 local_adv |= ADVERTISE_PAUSE_CAP;
1441 if (local_adv & ADVERTISE_1000XPSE_ASYM)
1442 local_adv |= ADVERTISE_PAUSE_ASYM;
1443 if (remote_adv & LPA_1000XPAUSE)
1444 remote_adv |= LPA_PAUSE_CAP;
1445 if (remote_adv & LPA_1000XPAUSE_ASYM)
1446 remote_adv |= LPA_PAUSE_ASYM;
1449 if (local_adv & ADVERTISE_PAUSE_CAP) {
1450 if (local_adv & ADVERTISE_PAUSE_ASYM) {
1451 if (remote_adv & LPA_PAUSE_CAP)
1453 (TG3_FLAG_RX_PAUSE |
1455 else if (remote_adv & LPA_PAUSE_ASYM)
1457 (TG3_FLAG_RX_PAUSE);
1459 if (remote_adv & LPA_PAUSE_CAP)
1461 (TG3_FLAG_RX_PAUSE |
1464 } else if (local_adv & ADVERTISE_PAUSE_ASYM) {
1465 if ((remote_adv & LPA_PAUSE_CAP) &&
1466 (remote_adv & LPA_PAUSE_ASYM))
1467 new_tg3_flags |= TG3_FLAG_TX_PAUSE;
1470 tp->tg3_flags &= ~(TG3_FLAG_RX_PAUSE | TG3_FLAG_TX_PAUSE);
1471 tp->tg3_flags |= new_tg3_flags;
1473 new_tg3_flags = tp->tg3_flags;
1476 if (new_tg3_flags & TG3_FLAG_RX_PAUSE)
1477 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1479 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1481 if (old_rx_mode != tp->rx_mode) {
1482 tw32_f(MAC_RX_MODE, tp->rx_mode);
1485 if (new_tg3_flags & TG3_FLAG_TX_PAUSE)
1486 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1488 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1490 if (old_tx_mode != tp->tx_mode) {
1491 tw32_f(MAC_TX_MODE, tp->tx_mode);
1495 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
1497 switch (val & MII_TG3_AUX_STAT_SPDMASK) {
1498 case MII_TG3_AUX_STAT_10HALF:
1500 *duplex = DUPLEX_HALF;
1503 case MII_TG3_AUX_STAT_10FULL:
1505 *duplex = DUPLEX_FULL;
1508 case MII_TG3_AUX_STAT_100HALF:
1510 *duplex = DUPLEX_HALF;
1513 case MII_TG3_AUX_STAT_100FULL:
1515 *duplex = DUPLEX_FULL;
1518 case MII_TG3_AUX_STAT_1000HALF:
1519 *speed = SPEED_1000;
1520 *duplex = DUPLEX_HALF;
1523 case MII_TG3_AUX_STAT_1000FULL:
1524 *speed = SPEED_1000;
1525 *duplex = DUPLEX_FULL;
1529 *speed = SPEED_INVALID;
1530 *duplex = DUPLEX_INVALID;
1535 static void tg3_phy_copper_begin(struct tg3 *tp)
1540 if (tp->link_config.phy_is_low_power) {
1541 /* Entering low power mode. Disable gigabit and
1542 * 100baseT advertisements.
1544 tg3_writephy(tp, MII_TG3_CTRL, 0);
1546 new_adv = (ADVERTISE_10HALF | ADVERTISE_10FULL |
1547 ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
1548 if (tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB)
1549 new_adv |= (ADVERTISE_100HALF | ADVERTISE_100FULL);
1551 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1552 } else if (tp->link_config.speed == SPEED_INVALID) {
1553 tp->link_config.advertising =
1554 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
1555 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
1556 ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full |
1557 ADVERTISED_Autoneg | ADVERTISED_MII);
1559 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
1560 tp->link_config.advertising &=
1561 ~(ADVERTISED_1000baseT_Half |
1562 ADVERTISED_1000baseT_Full);
1564 new_adv = (ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
1565 if (tp->link_config.advertising & ADVERTISED_10baseT_Half)
1566 new_adv |= ADVERTISE_10HALF;
1567 if (tp->link_config.advertising & ADVERTISED_10baseT_Full)
1568 new_adv |= ADVERTISE_10FULL;
1569 if (tp->link_config.advertising & ADVERTISED_100baseT_Half)
1570 new_adv |= ADVERTISE_100HALF;
1571 if (tp->link_config.advertising & ADVERTISED_100baseT_Full)
1572 new_adv |= ADVERTISE_100FULL;
1573 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1575 if (tp->link_config.advertising &
1576 (ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full)) {
1578 if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
1579 new_adv |= MII_TG3_CTRL_ADV_1000_HALF;
1580 if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
1581 new_adv |= MII_TG3_CTRL_ADV_1000_FULL;
1582 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY) &&
1583 (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1584 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0))
1585 new_adv |= (MII_TG3_CTRL_AS_MASTER |
1586 MII_TG3_CTRL_ENABLE_AS_MASTER);
1587 tg3_writephy(tp, MII_TG3_CTRL, new_adv);
1589 tg3_writephy(tp, MII_TG3_CTRL, 0);
1592 /* Asking for a specific link mode. */
1593 if (tp->link_config.speed == SPEED_1000) {
1594 new_adv = ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP;
1595 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1597 if (tp->link_config.duplex == DUPLEX_FULL)
1598 new_adv = MII_TG3_CTRL_ADV_1000_FULL;
1600 new_adv = MII_TG3_CTRL_ADV_1000_HALF;
1601 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1602 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
1603 new_adv |= (MII_TG3_CTRL_AS_MASTER |
1604 MII_TG3_CTRL_ENABLE_AS_MASTER);
1605 tg3_writephy(tp, MII_TG3_CTRL, new_adv);
1607 tg3_writephy(tp, MII_TG3_CTRL, 0);
1609 new_adv = ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP;
1610 if (tp->link_config.speed == SPEED_100) {
1611 if (tp->link_config.duplex == DUPLEX_FULL)
1612 new_adv |= ADVERTISE_100FULL;
1614 new_adv |= ADVERTISE_100HALF;
1616 if (tp->link_config.duplex == DUPLEX_FULL)
1617 new_adv |= ADVERTISE_10FULL;
1619 new_adv |= ADVERTISE_10HALF;
1621 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1625 if (tp->link_config.autoneg == AUTONEG_DISABLE &&
1626 tp->link_config.speed != SPEED_INVALID) {
1627 u32 bmcr, orig_bmcr;
1629 tp->link_config.active_speed = tp->link_config.speed;
1630 tp->link_config.active_duplex = tp->link_config.duplex;
1633 switch (tp->link_config.speed) {
1639 bmcr |= BMCR_SPEED100;
1643 bmcr |= TG3_BMCR_SPEED1000;
1647 if (tp->link_config.duplex == DUPLEX_FULL)
1648 bmcr |= BMCR_FULLDPLX;
1650 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
1651 (bmcr != orig_bmcr)) {
1652 tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
1653 for (i = 0; i < 1500; i++) {
1657 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
1658 tg3_readphy(tp, MII_BMSR, &tmp))
1660 if (!(tmp & BMSR_LSTATUS)) {
1665 tg3_writephy(tp, MII_BMCR, bmcr);
1669 tg3_writephy(tp, MII_BMCR,
1670 BMCR_ANENABLE | BMCR_ANRESTART);
1674 static int tg3_init_5401phy_dsp(struct tg3 *tp)
1678 /* Turn off tap power management. */
1679 /* Set Extended packet length bit */
1680 err = tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
1682 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0012);
1683 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1804);
1685 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0013);
1686 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1204);
1688 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
1689 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0132);
1691 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
1692 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0232);
1694 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1695 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0a20);
1702 static int tg3_copper_is_advertising_all(struct tg3 *tp)
1704 u32 adv_reg, all_mask;
1706 if (tg3_readphy(tp, MII_ADVERTISE, &adv_reg))
1709 all_mask = (ADVERTISE_10HALF | ADVERTISE_10FULL |
1710 ADVERTISE_100HALF | ADVERTISE_100FULL);
1711 if ((adv_reg & all_mask) != all_mask)
1713 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
1716 if (tg3_readphy(tp, MII_TG3_CTRL, &tg3_ctrl))
1719 all_mask = (MII_TG3_CTRL_ADV_1000_HALF |
1720 MII_TG3_CTRL_ADV_1000_FULL);
1721 if ((tg3_ctrl & all_mask) != all_mask)
1727 static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
1729 int current_link_up;
1738 (MAC_STATUS_SYNC_CHANGED |
1739 MAC_STATUS_CFG_CHANGED |
1740 MAC_STATUS_MI_COMPLETION |
1741 MAC_STATUS_LNKSTATE_CHANGED));
1744 tp->mi_mode = MAC_MI_MODE_BASE;
1745 tw32_f(MAC_MI_MODE, tp->mi_mode);
1748 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x02);
1750 /* Some third-party PHYs need to be reset on link going
1753 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
1754 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
1755 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
1756 netif_carrier_ok(tp->dev)) {
1757 tg3_readphy(tp, MII_BMSR, &bmsr);
1758 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
1759 !(bmsr & BMSR_LSTATUS))
1765 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
1766 tg3_readphy(tp, MII_BMSR, &bmsr);
1767 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
1768 !(tp->tg3_flags & TG3_FLAG_INIT_COMPLETE))
1771 if (!(bmsr & BMSR_LSTATUS)) {
1772 err = tg3_init_5401phy_dsp(tp);
1776 tg3_readphy(tp, MII_BMSR, &bmsr);
1777 for (i = 0; i < 1000; i++) {
1779 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
1780 (bmsr & BMSR_LSTATUS)) {
1786 if ((tp->phy_id & PHY_ID_REV_MASK) == PHY_REV_BCM5401_B0 &&
1787 !(bmsr & BMSR_LSTATUS) &&
1788 tp->link_config.active_speed == SPEED_1000) {
1789 err = tg3_phy_reset(tp);
1791 err = tg3_init_5401phy_dsp(tp);
1796 } else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1797 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
1798 /* 5701 {A0,B0} CRC bug workaround */
1799 tg3_writephy(tp, 0x15, 0x0a75);
1800 tg3_writephy(tp, 0x1c, 0x8c68);
1801 tg3_writephy(tp, 0x1c, 0x8d68);
1802 tg3_writephy(tp, 0x1c, 0x8c68);
1805 /* Clear pending interrupts... */
1806 tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
1807 tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
1809 if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT)
1810 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
1812 tg3_writephy(tp, MII_TG3_IMASK, ~0);
1814 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1815 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1816 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
1817 tg3_writephy(tp, MII_TG3_EXT_CTRL,
1818 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
1820 tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
1823 current_link_up = 0;
1824 current_speed = SPEED_INVALID;
1825 current_duplex = DUPLEX_INVALID;
1827 if (tp->tg3_flags2 & TG3_FLG2_CAPACITIVE_COUPLING) {
1830 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4007);
1831 tg3_readphy(tp, MII_TG3_AUX_CTRL, &val);
1832 if (!(val & (1 << 10))) {
1834 tg3_writephy(tp, MII_TG3_AUX_CTRL, val);
1840 for (i = 0; i < 100; i++) {
1841 tg3_readphy(tp, MII_BMSR, &bmsr);
1842 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
1843 (bmsr & BMSR_LSTATUS))
1848 if (bmsr & BMSR_LSTATUS) {
1851 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
1852 for (i = 0; i < 2000; i++) {
1854 if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
1859 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
1864 for (i = 0; i < 200; i++) {
1865 tg3_readphy(tp, MII_BMCR, &bmcr);
1866 if (tg3_readphy(tp, MII_BMCR, &bmcr))
1868 if (bmcr && bmcr != 0x7fff)
1873 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
1874 if (bmcr & BMCR_ANENABLE) {
1875 current_link_up = 1;
1877 /* Force autoneg restart if we are exiting
1880 if (!tg3_copper_is_advertising_all(tp))
1881 current_link_up = 0;
1883 current_link_up = 0;
1886 if (!(bmcr & BMCR_ANENABLE) &&
1887 tp->link_config.speed == current_speed &&
1888 tp->link_config.duplex == current_duplex) {
1889 current_link_up = 1;
1891 current_link_up = 0;
1895 tp->link_config.active_speed = current_speed;
1896 tp->link_config.active_duplex = current_duplex;
1899 if (current_link_up == 1 &&
1900 (tp->link_config.active_duplex == DUPLEX_FULL) &&
1901 (tp->link_config.autoneg == AUTONEG_ENABLE)) {
1902 u32 local_adv, remote_adv;
1904 if (tg3_readphy(tp, MII_ADVERTISE, &local_adv))
1906 local_adv &= (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
1908 if (tg3_readphy(tp, MII_LPA, &remote_adv))
1911 remote_adv &= (LPA_PAUSE_CAP | LPA_PAUSE_ASYM);
1913 /* If we are not advertising full pause capability,
1914 * something is wrong. Bring the link down and reconfigure.
1916 if (local_adv != ADVERTISE_PAUSE_CAP) {
1917 current_link_up = 0;
1919 tg3_setup_flow_control(tp, local_adv, remote_adv);
1923 if (current_link_up == 0 || tp->link_config.phy_is_low_power) {
1926 tg3_phy_copper_begin(tp);
1928 tg3_readphy(tp, MII_BMSR, &tmp);
1929 if (!tg3_readphy(tp, MII_BMSR, &tmp) &&
1930 (tmp & BMSR_LSTATUS))
1931 current_link_up = 1;
1934 tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
1935 if (current_link_up == 1) {
1936 if (tp->link_config.active_speed == SPEED_100 ||
1937 tp->link_config.active_speed == SPEED_10)
1938 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
1940 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
1942 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
1944 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
1945 if (tp->link_config.active_duplex == DUPLEX_HALF)
1946 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
1948 tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
1949 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
1950 if ((tp->led_ctrl == LED_CTRL_MODE_PHY_2) ||
1951 (current_link_up == 1 &&
1952 tp->link_config.active_speed == SPEED_10))
1953 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
1955 if (current_link_up == 1)
1956 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
1959 /* ??? Without this setting Netgear GA302T PHY does not
1960 * ??? send/receive packets...
1962 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411 &&
1963 tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) {
1964 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
1965 tw32_f(MAC_MI_MODE, tp->mi_mode);
1969 tw32_f(MAC_MODE, tp->mac_mode);
1972 if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
1973 /* Polled via timer. */
1974 tw32_f(MAC_EVENT, 0);
1976 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
1980 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 &&
1981 current_link_up == 1 &&
1982 tp->link_config.active_speed == SPEED_1000 &&
1983 ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ||
1984 (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED))) {
1987 (MAC_STATUS_SYNC_CHANGED |
1988 MAC_STATUS_CFG_CHANGED));
1991 NIC_SRAM_FIRMWARE_MBOX,
1992 NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
1995 if (current_link_up != netif_carrier_ok(tp->dev)) {
1996 if (current_link_up)
1997 netif_carrier_on(tp->dev);
1999 netif_carrier_off(tp->dev);
2000 tg3_link_report(tp);
2006 struct tg3_fiber_aneginfo {
2008 #define ANEG_STATE_UNKNOWN 0
2009 #define ANEG_STATE_AN_ENABLE 1
2010 #define ANEG_STATE_RESTART_INIT 2
2011 #define ANEG_STATE_RESTART 3
2012 #define ANEG_STATE_DISABLE_LINK_OK 4
2013 #define ANEG_STATE_ABILITY_DETECT_INIT 5
2014 #define ANEG_STATE_ABILITY_DETECT 6
2015 #define ANEG_STATE_ACK_DETECT_INIT 7
2016 #define ANEG_STATE_ACK_DETECT 8
2017 #define ANEG_STATE_COMPLETE_ACK_INIT 9
2018 #define ANEG_STATE_COMPLETE_ACK 10
2019 #define ANEG_STATE_IDLE_DETECT_INIT 11
2020 #define ANEG_STATE_IDLE_DETECT 12
2021 #define ANEG_STATE_LINK_OK 13
2022 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT 14
2023 #define ANEG_STATE_NEXT_PAGE_WAIT 15
2026 #define MR_AN_ENABLE 0x00000001
2027 #define MR_RESTART_AN 0x00000002
2028 #define MR_AN_COMPLETE 0x00000004
2029 #define MR_PAGE_RX 0x00000008
2030 #define MR_NP_LOADED 0x00000010
2031 #define MR_TOGGLE_TX 0x00000020
2032 #define MR_LP_ADV_FULL_DUPLEX 0x00000040
2033 #define MR_LP_ADV_HALF_DUPLEX 0x00000080
2034 #define MR_LP_ADV_SYM_PAUSE 0x00000100
2035 #define MR_LP_ADV_ASYM_PAUSE 0x00000200
2036 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
2037 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
2038 #define MR_LP_ADV_NEXT_PAGE 0x00001000
2039 #define MR_TOGGLE_RX 0x00002000
2040 #define MR_NP_RX 0x00004000
2042 #define MR_LINK_OK 0x80000000
2044 unsigned long link_time, cur_time;
2046 u32 ability_match_cfg;
2047 int ability_match_count;
2049 char ability_match, idle_match, ack_match;
2051 u32 txconfig, rxconfig;
2052 #define ANEG_CFG_NP 0x00000080
2053 #define ANEG_CFG_ACK 0x00000040
2054 #define ANEG_CFG_RF2 0x00000020
2055 #define ANEG_CFG_RF1 0x00000010
2056 #define ANEG_CFG_PS2 0x00000001
2057 #define ANEG_CFG_PS1 0x00008000
2058 #define ANEG_CFG_HD 0x00004000
2059 #define ANEG_CFG_FD 0x00002000
2060 #define ANEG_CFG_INVAL 0x00001f06
2065 #define ANEG_TIMER_ENAB 2
2066 #define ANEG_FAILED -1
2068 #define ANEG_STATE_SETTLE_TIME 10000
2070 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
2071 struct tg3_fiber_aneginfo *ap)
2073 unsigned long delta;
2077 if (ap->state == ANEG_STATE_UNKNOWN) {
2081 ap->ability_match_cfg = 0;
2082 ap->ability_match_count = 0;
2083 ap->ability_match = 0;
2089 if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
2090 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
2092 if (rx_cfg_reg != ap->ability_match_cfg) {
2093 ap->ability_match_cfg = rx_cfg_reg;
2094 ap->ability_match = 0;
2095 ap->ability_match_count = 0;
2097 if (++ap->ability_match_count > 1) {
2098 ap->ability_match = 1;
2099 ap->ability_match_cfg = rx_cfg_reg;
2102 if (rx_cfg_reg & ANEG_CFG_ACK)
2110 ap->ability_match_cfg = 0;
2111 ap->ability_match_count = 0;
2112 ap->ability_match = 0;
2118 ap->rxconfig = rx_cfg_reg;
2122 case ANEG_STATE_UNKNOWN:
2123 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
2124 ap->state = ANEG_STATE_AN_ENABLE;
2127 case ANEG_STATE_AN_ENABLE:
2128 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
2129 if (ap->flags & MR_AN_ENABLE) {
2132 ap->ability_match_cfg = 0;
2133 ap->ability_match_count = 0;
2134 ap->ability_match = 0;
2138 ap->state = ANEG_STATE_RESTART_INIT;
2140 ap->state = ANEG_STATE_DISABLE_LINK_OK;
2144 case ANEG_STATE_RESTART_INIT:
2145 ap->link_time = ap->cur_time;
2146 ap->flags &= ~(MR_NP_LOADED);
2148 tw32(MAC_TX_AUTO_NEG, 0);
2149 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
2150 tw32_f(MAC_MODE, tp->mac_mode);
2153 ret = ANEG_TIMER_ENAB;
2154 ap->state = ANEG_STATE_RESTART;
2157 case ANEG_STATE_RESTART:
2158 delta = ap->cur_time - ap->link_time;
2159 if (delta > ANEG_STATE_SETTLE_TIME) {
2160 ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
2162 ret = ANEG_TIMER_ENAB;
2166 case ANEG_STATE_DISABLE_LINK_OK:
2170 case ANEG_STATE_ABILITY_DETECT_INIT:
2171 ap->flags &= ~(MR_TOGGLE_TX);
2172 ap->txconfig = (ANEG_CFG_FD | ANEG_CFG_PS1);
2173 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
2174 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
2175 tw32_f(MAC_MODE, tp->mac_mode);
2178 ap->state = ANEG_STATE_ABILITY_DETECT;
2181 case ANEG_STATE_ABILITY_DETECT:
2182 if (ap->ability_match != 0 && ap->rxconfig != 0) {
2183 ap->state = ANEG_STATE_ACK_DETECT_INIT;
2187 case ANEG_STATE_ACK_DETECT_INIT:
2188 ap->txconfig |= ANEG_CFG_ACK;
2189 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
2190 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
2191 tw32_f(MAC_MODE, tp->mac_mode);
2194 ap->state = ANEG_STATE_ACK_DETECT;
2197 case ANEG_STATE_ACK_DETECT:
2198 if (ap->ack_match != 0) {
2199 if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
2200 (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
2201 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
2203 ap->state = ANEG_STATE_AN_ENABLE;
2205 } else if (ap->ability_match != 0 &&
2206 ap->rxconfig == 0) {
2207 ap->state = ANEG_STATE_AN_ENABLE;
2211 case ANEG_STATE_COMPLETE_ACK_INIT:
2212 if (ap->rxconfig & ANEG_CFG_INVAL) {
2216 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
2217 MR_LP_ADV_HALF_DUPLEX |
2218 MR_LP_ADV_SYM_PAUSE |
2219 MR_LP_ADV_ASYM_PAUSE |
2220 MR_LP_ADV_REMOTE_FAULT1 |
2221 MR_LP_ADV_REMOTE_FAULT2 |
2222 MR_LP_ADV_NEXT_PAGE |
2225 if (ap->rxconfig & ANEG_CFG_FD)
2226 ap->flags |= MR_LP_ADV_FULL_DUPLEX;
2227 if (ap->rxconfig & ANEG_CFG_HD)
2228 ap->flags |= MR_LP_ADV_HALF_DUPLEX;
2229 if (ap->rxconfig & ANEG_CFG_PS1)
2230 ap->flags |= MR_LP_ADV_SYM_PAUSE;
2231 if (ap->rxconfig & ANEG_CFG_PS2)
2232 ap->flags |= MR_LP_ADV_ASYM_PAUSE;
2233 if (ap->rxconfig & ANEG_CFG_RF1)
2234 ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
2235 if (ap->rxconfig & ANEG_CFG_RF2)
2236 ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
2237 if (ap->rxconfig & ANEG_CFG_NP)
2238 ap->flags |= MR_LP_ADV_NEXT_PAGE;
2240 ap->link_time = ap->cur_time;
2242 ap->flags ^= (MR_TOGGLE_TX);
2243 if (ap->rxconfig & 0x0008)
2244 ap->flags |= MR_TOGGLE_RX;
2245 if (ap->rxconfig & ANEG_CFG_NP)
2246 ap->flags |= MR_NP_RX;
2247 ap->flags |= MR_PAGE_RX;
2249 ap->state = ANEG_STATE_COMPLETE_ACK;
2250 ret = ANEG_TIMER_ENAB;
2253 case ANEG_STATE_COMPLETE_ACK:
2254 if (ap->ability_match != 0 &&
2255 ap->rxconfig == 0) {
2256 ap->state = ANEG_STATE_AN_ENABLE;
2259 delta = ap->cur_time - ap->link_time;
2260 if (delta > ANEG_STATE_SETTLE_TIME) {
2261 if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
2262 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
2264 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
2265 !(ap->flags & MR_NP_RX)) {
2266 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
2274 case ANEG_STATE_IDLE_DETECT_INIT:
2275 ap->link_time = ap->cur_time;
2276 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
2277 tw32_f(MAC_MODE, tp->mac_mode);
2280 ap->state = ANEG_STATE_IDLE_DETECT;
2281 ret = ANEG_TIMER_ENAB;
2284 case ANEG_STATE_IDLE_DETECT:
2285 if (ap->ability_match != 0 &&
2286 ap->rxconfig == 0) {
2287 ap->state = ANEG_STATE_AN_ENABLE;
2290 delta = ap->cur_time - ap->link_time;
2291 if (delta > ANEG_STATE_SETTLE_TIME) {
2292 /* XXX another gem from the Broadcom driver :( */
2293 ap->state = ANEG_STATE_LINK_OK;
2297 case ANEG_STATE_LINK_OK:
2298 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
2302 case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
2303 /* ??? unimplemented */
2306 case ANEG_STATE_NEXT_PAGE_WAIT:
2307 /* ??? unimplemented */
2318 static int fiber_autoneg(struct tg3 *tp, u32 *flags)
2321 struct tg3_fiber_aneginfo aninfo;
2322 int status = ANEG_FAILED;
2326 tw32_f(MAC_TX_AUTO_NEG, 0);
2328 tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
2329 tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
2332 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
2335 memset(&aninfo, 0, sizeof(aninfo));
2336 aninfo.flags |= MR_AN_ENABLE;
2337 aninfo.state = ANEG_STATE_UNKNOWN;
2338 aninfo.cur_time = 0;
2340 while (++tick < 195000) {
2341 status = tg3_fiber_aneg_smachine(tp, &aninfo);
2342 if (status == ANEG_DONE || status == ANEG_FAILED)
2348 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
2349 tw32_f(MAC_MODE, tp->mac_mode);
2352 *flags = aninfo.flags;
2354 if (status == ANEG_DONE &&
2355 (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
2356 MR_LP_ADV_FULL_DUPLEX)))
2362 static void tg3_init_bcm8002(struct tg3 *tp)
2364 u32 mac_status = tr32(MAC_STATUS);
2367 /* Reset when initting first time or we have a link. */
2368 if ((tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) &&
2369 !(mac_status & MAC_STATUS_PCS_SYNCED))
2372 /* Set PLL lock range. */
2373 tg3_writephy(tp, 0x16, 0x8007);
2376 tg3_writephy(tp, MII_BMCR, BMCR_RESET);
2378 /* Wait for reset to complete. */
2379 /* XXX schedule_timeout() ... */
2380 for (i = 0; i < 500; i++)
2383 /* Config mode; select PMA/Ch 1 regs. */
2384 tg3_writephy(tp, 0x10, 0x8411);
2386 /* Enable auto-lock and comdet, select txclk for tx. */
2387 tg3_writephy(tp, 0x11, 0x0a10);
2389 tg3_writephy(tp, 0x18, 0x00a0);
2390 tg3_writephy(tp, 0x16, 0x41ff);
2392 /* Assert and deassert POR. */
2393 tg3_writephy(tp, 0x13, 0x0400);
2395 tg3_writephy(tp, 0x13, 0x0000);
2397 tg3_writephy(tp, 0x11, 0x0a50);
2399 tg3_writephy(tp, 0x11, 0x0a10);
2401 /* Wait for signal to stabilize */
2402 /* XXX schedule_timeout() ... */
2403 for (i = 0; i < 15000; i++)
2406 /* Deselect the channel register so we can read the PHYID
2409 tg3_writephy(tp, 0x10, 0x8011);
2412 static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
2414 u32 sg_dig_ctrl, sg_dig_status;
2415 u32 serdes_cfg, expected_sg_dig_ctrl;
2416 int workaround, port_a;
2417 int current_link_up;
2420 expected_sg_dig_ctrl = 0;
2423 current_link_up = 0;
2425 if (tp->pci_chip_rev_id != CHIPREV_ID_5704_A0 &&
2426 tp->pci_chip_rev_id != CHIPREV_ID_5704_A1) {
2428 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
2431 /* preserve bits 0-11,13,14 for signal pre-emphasis */
2432 /* preserve bits 20-23 for voltage regulator */
2433 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
2436 sg_dig_ctrl = tr32(SG_DIG_CTRL);
2438 if (tp->link_config.autoneg != AUTONEG_ENABLE) {
2439 if (sg_dig_ctrl & (1 << 31)) {
2441 u32 val = serdes_cfg;
2447 tw32_f(MAC_SERDES_CFG, val);
2449 tw32_f(SG_DIG_CTRL, 0x01388400);
2451 if (mac_status & MAC_STATUS_PCS_SYNCED) {
2452 tg3_setup_flow_control(tp, 0, 0);
2453 current_link_up = 1;
2458 /* Want auto-negotiation. */
2459 expected_sg_dig_ctrl = 0x81388400;
2461 /* Pause capability */
2462 expected_sg_dig_ctrl |= (1 << 11);
2464 /* Asymettric pause */
2465 expected_sg_dig_ctrl |= (1 << 12);
2467 if (sg_dig_ctrl != expected_sg_dig_ctrl) {
2469 tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
2470 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | (1 << 30));
2472 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
2474 tp->tg3_flags2 |= TG3_FLG2_PHY_JUST_INITTED;
2475 } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
2476 MAC_STATUS_SIGNAL_DET)) {
2479 /* Giver time to negotiate (~200ms) */
2480 for (i = 0; i < 40000; i++) {
2481 sg_dig_status = tr32(SG_DIG_STATUS);
2482 if (sg_dig_status & (0x3))
2486 mac_status = tr32(MAC_STATUS);
2488 if ((sg_dig_status & (1 << 1)) &&
2489 (mac_status & MAC_STATUS_PCS_SYNCED)) {
2490 u32 local_adv, remote_adv;
2492 local_adv = ADVERTISE_PAUSE_CAP;
2494 if (sg_dig_status & (1 << 19))
2495 remote_adv |= LPA_PAUSE_CAP;
2496 if (sg_dig_status & (1 << 20))
2497 remote_adv |= LPA_PAUSE_ASYM;
2499 tg3_setup_flow_control(tp, local_adv, remote_adv);
2500 current_link_up = 1;
2501 tp->tg3_flags2 &= ~TG3_FLG2_PHY_JUST_INITTED;
2502 } else if (!(sg_dig_status & (1 << 1))) {
2503 if (tp->tg3_flags2 & TG3_FLG2_PHY_JUST_INITTED)
2504 tp->tg3_flags2 &= ~TG3_FLG2_PHY_JUST_INITTED;
2507 u32 val = serdes_cfg;
2514 tw32_f(MAC_SERDES_CFG, val);
2517 tw32_f(SG_DIG_CTRL, 0x01388400);
2520 /* Link parallel detection - link is up */
2521 /* only if we have PCS_SYNC and not */
2522 /* receiving config code words */
2523 mac_status = tr32(MAC_STATUS);
2524 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
2525 !(mac_status & MAC_STATUS_RCVD_CFG)) {
2526 tg3_setup_flow_control(tp, 0, 0);
2527 current_link_up = 1;
2534 return current_link_up;
2537 static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
2539 int current_link_up = 0;
2541 if (!(mac_status & MAC_STATUS_PCS_SYNCED)) {
2542 tp->tg3_flags &= ~TG3_FLAG_GOT_SERDES_FLOWCTL;
2546 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2550 if (fiber_autoneg(tp, &flags)) {
2551 u32 local_adv, remote_adv;
2553 local_adv = ADVERTISE_PAUSE_CAP;
2555 if (flags & MR_LP_ADV_SYM_PAUSE)
2556 remote_adv |= LPA_PAUSE_CAP;
2557 if (flags & MR_LP_ADV_ASYM_PAUSE)
2558 remote_adv |= LPA_PAUSE_ASYM;
2560 tg3_setup_flow_control(tp, local_adv, remote_adv);
2562 tp->tg3_flags |= TG3_FLAG_GOT_SERDES_FLOWCTL;
2563 current_link_up = 1;
2565 for (i = 0; i < 30; i++) {
2568 (MAC_STATUS_SYNC_CHANGED |
2569 MAC_STATUS_CFG_CHANGED));
2571 if ((tr32(MAC_STATUS) &
2572 (MAC_STATUS_SYNC_CHANGED |
2573 MAC_STATUS_CFG_CHANGED)) == 0)
2577 mac_status = tr32(MAC_STATUS);
2578 if (current_link_up == 0 &&
2579 (mac_status & MAC_STATUS_PCS_SYNCED) &&
2580 !(mac_status & MAC_STATUS_RCVD_CFG))
2581 current_link_up = 1;
2583 /* Forcing 1000FD link up. */
2584 current_link_up = 1;
2585 tp->tg3_flags |= TG3_FLAG_GOT_SERDES_FLOWCTL;
2587 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
2592 return current_link_up;
2595 static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
2598 u16 orig_active_speed;
2599 u8 orig_active_duplex;
2601 int current_link_up;
2605 (tp->tg3_flags & (TG3_FLAG_RX_PAUSE |
2606 TG3_FLAG_TX_PAUSE));
2607 orig_active_speed = tp->link_config.active_speed;
2608 orig_active_duplex = tp->link_config.active_duplex;
2610 if (!(tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG) &&
2611 netif_carrier_ok(tp->dev) &&
2612 (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE)) {
2613 mac_status = tr32(MAC_STATUS);
2614 mac_status &= (MAC_STATUS_PCS_SYNCED |
2615 MAC_STATUS_SIGNAL_DET |
2616 MAC_STATUS_CFG_CHANGED |
2617 MAC_STATUS_RCVD_CFG);
2618 if (mac_status == (MAC_STATUS_PCS_SYNCED |
2619 MAC_STATUS_SIGNAL_DET)) {
2620 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
2621 MAC_STATUS_CFG_CHANGED));
2626 tw32_f(MAC_TX_AUTO_NEG, 0);
2628 tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
2629 tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
2630 tw32_f(MAC_MODE, tp->mac_mode);
2633 if (tp->phy_id == PHY_ID_BCM8002)
2634 tg3_init_bcm8002(tp);
2636 /* Enable link change event even when serdes polling. */
2637 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2640 current_link_up = 0;
2641 mac_status = tr32(MAC_STATUS);
2643 if (tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG)
2644 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
2646 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
2648 tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
2649 tw32_f(MAC_MODE, tp->mac_mode);
2652 tp->hw_status->status =
2653 (SD_STATUS_UPDATED |
2654 (tp->hw_status->status & ~SD_STATUS_LINK_CHG));
2656 for (i = 0; i < 100; i++) {
2657 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
2658 MAC_STATUS_CFG_CHANGED));
2660 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
2661 MAC_STATUS_CFG_CHANGED)) == 0)
2665 mac_status = tr32(MAC_STATUS);
2666 if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
2667 current_link_up = 0;
2668 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2669 tw32_f(MAC_MODE, (tp->mac_mode |
2670 MAC_MODE_SEND_CONFIGS));
2672 tw32_f(MAC_MODE, tp->mac_mode);
2676 if (current_link_up == 1) {
2677 tp->link_config.active_speed = SPEED_1000;
2678 tp->link_config.active_duplex = DUPLEX_FULL;
2679 tw32(MAC_LED_CTRL, (tp->led_ctrl |
2680 LED_CTRL_LNKLED_OVERRIDE |
2681 LED_CTRL_1000MBPS_ON));
2683 tp->link_config.active_speed = SPEED_INVALID;
2684 tp->link_config.active_duplex = DUPLEX_INVALID;
2685 tw32(MAC_LED_CTRL, (tp->led_ctrl |
2686 LED_CTRL_LNKLED_OVERRIDE |
2687 LED_CTRL_TRAFFIC_OVERRIDE));
2690 if (current_link_up != netif_carrier_ok(tp->dev)) {
2691 if (current_link_up)
2692 netif_carrier_on(tp->dev);
2694 netif_carrier_off(tp->dev);
2695 tg3_link_report(tp);
2698 tp->tg3_flags & (TG3_FLAG_RX_PAUSE |
2700 if (orig_pause_cfg != now_pause_cfg ||
2701 orig_active_speed != tp->link_config.active_speed ||
2702 orig_active_duplex != tp->link_config.active_duplex)
2703 tg3_link_report(tp);
2709 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
2711 int current_link_up, err = 0;
2716 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
2717 tw32_f(MAC_MODE, tp->mac_mode);
2723 (MAC_STATUS_SYNC_CHANGED |
2724 MAC_STATUS_CFG_CHANGED |
2725 MAC_STATUS_MI_COMPLETION |
2726 MAC_STATUS_LNKSTATE_CHANGED));
2732 current_link_up = 0;
2733 current_speed = SPEED_INVALID;
2734 current_duplex = DUPLEX_INVALID;
2736 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
2737 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
2738 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
2739 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
2740 bmsr |= BMSR_LSTATUS;
2742 bmsr &= ~BMSR_LSTATUS;
2745 err |= tg3_readphy(tp, MII_BMCR, &bmcr);
2747 if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
2748 (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT)) {
2749 /* do nothing, just check for link up at the end */
2750 } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2753 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
2754 new_adv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
2755 ADVERTISE_1000XPAUSE |
2756 ADVERTISE_1000XPSE_ASYM |
2759 /* Always advertise symmetric PAUSE just like copper */
2760 new_adv |= ADVERTISE_1000XPAUSE;
2762 if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
2763 new_adv |= ADVERTISE_1000XHALF;
2764 if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
2765 new_adv |= ADVERTISE_1000XFULL;
2767 if ((new_adv != adv) || !(bmcr & BMCR_ANENABLE)) {
2768 tg3_writephy(tp, MII_ADVERTISE, new_adv);
2769 bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
2770 tg3_writephy(tp, MII_BMCR, bmcr);
2772 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2773 tp->tg3_flags2 |= TG3_FLG2_PHY_JUST_INITTED;
2774 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2781 bmcr &= ~BMCR_SPEED1000;
2782 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
2784 if (tp->link_config.duplex == DUPLEX_FULL)
2785 new_bmcr |= BMCR_FULLDPLX;
2787 if (new_bmcr != bmcr) {
2788 /* BMCR_SPEED1000 is a reserved bit that needs
2789 * to be set on write.
2791 new_bmcr |= BMCR_SPEED1000;
2793 /* Force a linkdown */
2794 if (netif_carrier_ok(tp->dev)) {
2797 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
2798 adv &= ~(ADVERTISE_1000XFULL |
2799 ADVERTISE_1000XHALF |
2801 tg3_writephy(tp, MII_ADVERTISE, adv);
2802 tg3_writephy(tp, MII_BMCR, bmcr |
2806 netif_carrier_off(tp->dev);
2808 tg3_writephy(tp, MII_BMCR, new_bmcr);
2810 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
2811 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
2812 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
2814 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
2815 bmsr |= BMSR_LSTATUS;
2817 bmsr &= ~BMSR_LSTATUS;
2819 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2823 if (bmsr & BMSR_LSTATUS) {
2824 current_speed = SPEED_1000;
2825 current_link_up = 1;
2826 if (bmcr & BMCR_FULLDPLX)
2827 current_duplex = DUPLEX_FULL;
2829 current_duplex = DUPLEX_HALF;
2831 if (bmcr & BMCR_ANENABLE) {
2832 u32 local_adv, remote_adv, common;
2834 err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
2835 err |= tg3_readphy(tp, MII_LPA, &remote_adv);
2836 common = local_adv & remote_adv;
2837 if (common & (ADVERTISE_1000XHALF |
2838 ADVERTISE_1000XFULL)) {
2839 if (common & ADVERTISE_1000XFULL)
2840 current_duplex = DUPLEX_FULL;
2842 current_duplex = DUPLEX_HALF;
2844 tg3_setup_flow_control(tp, local_adv,
2848 current_link_up = 0;
2852 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
2853 if (tp->link_config.active_duplex == DUPLEX_HALF)
2854 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
2856 tw32_f(MAC_MODE, tp->mac_mode);
2859 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2861 tp->link_config.active_speed = current_speed;
2862 tp->link_config.active_duplex = current_duplex;
2864 if (current_link_up != netif_carrier_ok(tp->dev)) {
2865 if (current_link_up)
2866 netif_carrier_on(tp->dev);
2868 netif_carrier_off(tp->dev);
2869 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2871 tg3_link_report(tp);
2876 static void tg3_serdes_parallel_detect(struct tg3 *tp)
2878 if (tp->tg3_flags2 & TG3_FLG2_PHY_JUST_INITTED) {
2879 /* Give autoneg time to complete. */
2880 tp->tg3_flags2 &= ~TG3_FLG2_PHY_JUST_INITTED;
2883 if (!netif_carrier_ok(tp->dev) &&
2884 (tp->link_config.autoneg == AUTONEG_ENABLE)) {
2887 tg3_readphy(tp, MII_BMCR, &bmcr);
2888 if (bmcr & BMCR_ANENABLE) {
2891 /* Select shadow register 0x1f */
2892 tg3_writephy(tp, 0x1c, 0x7c00);
2893 tg3_readphy(tp, 0x1c, &phy1);
2895 /* Select expansion interrupt status register */
2896 tg3_writephy(tp, 0x17, 0x0f01);
2897 tg3_readphy(tp, 0x15, &phy2);
2898 tg3_readphy(tp, 0x15, &phy2);
2900 if ((phy1 & 0x10) && !(phy2 & 0x20)) {
2901 /* We have signal detect and not receiving
2902 * config code words, link is up by parallel
2906 bmcr &= ~BMCR_ANENABLE;
2907 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
2908 tg3_writephy(tp, MII_BMCR, bmcr);
2909 tp->tg3_flags2 |= TG3_FLG2_PARALLEL_DETECT;
2913 else if (netif_carrier_ok(tp->dev) &&
2914 (tp->link_config.autoneg == AUTONEG_ENABLE) &&
2915 (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT)) {
2918 /* Select expansion interrupt status register */
2919 tg3_writephy(tp, 0x17, 0x0f01);
2920 tg3_readphy(tp, 0x15, &phy2);
2924 /* Config code words received, turn on autoneg. */
2925 tg3_readphy(tp, MII_BMCR, &bmcr);
2926 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
2928 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2934 static int tg3_setup_phy(struct tg3 *tp, int force_reset)
2938 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
2939 err = tg3_setup_fiber_phy(tp, force_reset);
2940 } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
2941 err = tg3_setup_fiber_mii_phy(tp, force_reset);
2943 err = tg3_setup_copper_phy(tp, force_reset);
2946 if (tp->link_config.active_speed == SPEED_1000 &&
2947 tp->link_config.active_duplex == DUPLEX_HALF)
2948 tw32(MAC_TX_LENGTHS,
2949 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2950 (6 << TX_LENGTHS_IPG_SHIFT) |
2951 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
2953 tw32(MAC_TX_LENGTHS,
2954 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2955 (6 << TX_LENGTHS_IPG_SHIFT) |
2956 (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
2958 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
2959 if (netif_carrier_ok(tp->dev)) {
2960 tw32(HOSTCC_STAT_COAL_TICKS,
2961 tp->coal.stats_block_coalesce_usecs);
2963 tw32(HOSTCC_STAT_COAL_TICKS, 0);
2970 /* This is called whenever we suspect that the system chipset is re-
2971 * ordering the sequence of MMIO to the tx send mailbox. The symptom
2972 * is bogus tx completions. We try to recover by setting the
2973 * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
2976 static void tg3_tx_recover(struct tg3 *tp)
2978 BUG_ON((tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) ||
2979 tp->write32_tx_mbox == tg3_write_indirect_mbox);
2981 printk(KERN_WARNING PFX "%s: The system may be re-ordering memory-"
2982 "mapped I/O cycles to the network device, attempting to "
2983 "recover. Please report the problem to the driver maintainer "
2984 "and include system chipset information.\n", tp->dev->name);
2986 spin_lock(&tp->lock);
2987 tp->tg3_flags |= TG3_FLAG_TX_RECOVERY_PENDING;
2988 spin_unlock(&tp->lock);
2991 /* Tigon3 never reports partial packet sends. So we do not
2992 * need special logic to handle SKBs that have not had all
2993 * of their frags sent yet, like SunGEM does.
2995 static void tg3_tx(struct tg3 *tp)
2997 u32 hw_idx = tp->hw_status->idx[0].tx_consumer;
2998 u32 sw_idx = tp->tx_cons;
3000 while (sw_idx != hw_idx) {
3001 struct tx_ring_info *ri = &tp->tx_buffers[sw_idx];
3002 struct sk_buff *skb = ri->skb;
3005 if (unlikely(skb == NULL)) {
3010 pci_unmap_single(tp->pdev,
3011 pci_unmap_addr(ri, mapping),
3017 sw_idx = NEXT_TX(sw_idx);
3019 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
3020 ri = &tp->tx_buffers[sw_idx];
3021 if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
3024 pci_unmap_page(tp->pdev,
3025 pci_unmap_addr(ri, mapping),
3026 skb_shinfo(skb)->frags[i].size,
3029 sw_idx = NEXT_TX(sw_idx);
3034 if (unlikely(tx_bug)) {
3040 tp->tx_cons = sw_idx;
3042 if (unlikely(netif_queue_stopped(tp->dev))) {
3043 spin_lock(&tp->tx_lock);
3044 if (netif_queue_stopped(tp->dev) &&
3045 (TX_BUFFS_AVAIL(tp) > TG3_TX_WAKEUP_THRESH))
3046 netif_wake_queue(tp->dev);
3047 spin_unlock(&tp->tx_lock);
3051 /* Returns size of skb allocated or < 0 on error.
3053 * We only need to fill in the address because the other members
3054 * of the RX descriptor are invariant, see tg3_init_rings.
3056 * Note the purposeful assymetry of cpu vs. chip accesses. For
3057 * posting buffers we only dirty the first cache line of the RX
3058 * descriptor (containing the address). Whereas for the RX status
3059 * buffers the cpu only reads the last cacheline of the RX descriptor
3060 * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
3062 static int tg3_alloc_rx_skb(struct tg3 *tp, u32 opaque_key,
3063 int src_idx, u32 dest_idx_unmasked)
3065 struct tg3_rx_buffer_desc *desc;
3066 struct ring_info *map, *src_map;
3067 struct sk_buff *skb;
3069 int skb_size, dest_idx;
3072 switch (opaque_key) {
3073 case RXD_OPAQUE_RING_STD:
3074 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
3075 desc = &tp->rx_std[dest_idx];
3076 map = &tp->rx_std_buffers[dest_idx];
3078 src_map = &tp->rx_std_buffers[src_idx];
3079 skb_size = tp->rx_pkt_buf_sz;
3082 case RXD_OPAQUE_RING_JUMBO:
3083 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
3084 desc = &tp->rx_jumbo[dest_idx];
3085 map = &tp->rx_jumbo_buffers[dest_idx];
3087 src_map = &tp->rx_jumbo_buffers[src_idx];
3088 skb_size = RX_JUMBO_PKT_BUF_SZ;
3095 /* Do not overwrite any of the map or rp information
3096 * until we are sure we can commit to a new buffer.
3098 * Callers depend upon this behavior and assume that
3099 * we leave everything unchanged if we fail.
3101 skb = dev_alloc_skb(skb_size);
3106 skb_reserve(skb, tp->rx_offset);
3108 mapping = pci_map_single(tp->pdev, skb->data,
3109 skb_size - tp->rx_offset,
3110 PCI_DMA_FROMDEVICE);
3113 pci_unmap_addr_set(map, mapping, mapping);
3115 if (src_map != NULL)
3116 src_map->skb = NULL;
3118 desc->addr_hi = ((u64)mapping >> 32);
3119 desc->addr_lo = ((u64)mapping & 0xffffffff);
3124 /* We only need to move over in the address because the other
3125 * members of the RX descriptor are invariant. See notes above
3126 * tg3_alloc_rx_skb for full details.
3128 static void tg3_recycle_rx(struct tg3 *tp, u32 opaque_key,
3129 int src_idx, u32 dest_idx_unmasked)
3131 struct tg3_rx_buffer_desc *src_desc, *dest_desc;
3132 struct ring_info *src_map, *dest_map;
3135 switch (opaque_key) {
3136 case RXD_OPAQUE_RING_STD:
3137 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
3138 dest_desc = &tp->rx_std[dest_idx];
3139 dest_map = &tp->rx_std_buffers[dest_idx];
3140 src_desc = &tp->rx_std[src_idx];
3141 src_map = &tp->rx_std_buffers[src_idx];
3144 case RXD_OPAQUE_RING_JUMBO:
3145 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
3146 dest_desc = &tp->rx_jumbo[dest_idx];
3147 dest_map = &tp->rx_jumbo_buffers[dest_idx];
3148 src_desc = &tp->rx_jumbo[src_idx];
3149 src_map = &tp->rx_jumbo_buffers[src_idx];
3156 dest_map->skb = src_map->skb;
3157 pci_unmap_addr_set(dest_map, mapping,
3158 pci_unmap_addr(src_map, mapping));
3159 dest_desc->addr_hi = src_desc->addr_hi;
3160 dest_desc->addr_lo = src_desc->addr_lo;
3162 src_map->skb = NULL;
3165 #if TG3_VLAN_TAG_USED
3166 static int tg3_vlan_rx(struct tg3 *tp, struct sk_buff *skb, u16 vlan_tag)
3168 return vlan_hwaccel_receive_skb(skb, tp->vlgrp, vlan_tag);
3172 /* The RX ring scheme is composed of multiple rings which post fresh
3173 * buffers to the chip, and one special ring the chip uses to report
3174 * status back to the host.
3176 * The special ring reports the status of received packets to the
3177 * host. The chip does not write into the original descriptor the
3178 * RX buffer was obtained from. The chip simply takes the original
3179 * descriptor as provided by the host, updates the status and length
3180 * field, then writes this into the next status ring entry.
3182 * Each ring the host uses to post buffers to the chip is described
3183 * by a TG3_BDINFO entry in the chips SRAM area. When a packet arrives,
3184 * it is first placed into the on-chip ram. When the packet's length
3185 * is known, it walks down the TG3_BDINFO entries to select the ring.
3186 * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
3187 * which is within the range of the new packet's length is chosen.
3189 * The "separate ring for rx status" scheme may sound queer, but it makes
3190 * sense from a cache coherency perspective. If only the host writes
3191 * to the buffer post rings, and only the chip writes to the rx status
3192 * rings, then cache lines never move beyond shared-modified state.
3193 * If both the host and chip were to write into the same ring, cache line
3194 * eviction could occur since both entities want it in an exclusive state.
3196 static int tg3_rx(struct tg3 *tp, int budget)
3198 u32 work_mask, rx_std_posted = 0;
3199 u32 sw_idx = tp->rx_rcb_ptr;
3203 hw_idx = tp->hw_status->idx[0].rx_producer;
3205 * We need to order the read of hw_idx and the read of
3206 * the opaque cookie.
3211 while (sw_idx != hw_idx && budget > 0) {
3212 struct tg3_rx_buffer_desc *desc = &tp->rx_rcb[sw_idx];
3214 struct sk_buff *skb;
3215 dma_addr_t dma_addr;
3216 u32 opaque_key, desc_idx, *post_ptr;
3218 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
3219 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
3220 if (opaque_key == RXD_OPAQUE_RING_STD) {
3221 dma_addr = pci_unmap_addr(&tp->rx_std_buffers[desc_idx],
3223 skb = tp->rx_std_buffers[desc_idx].skb;
3224 post_ptr = &tp->rx_std_ptr;
3226 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
3227 dma_addr = pci_unmap_addr(&tp->rx_jumbo_buffers[desc_idx],
3229 skb = tp->rx_jumbo_buffers[desc_idx].skb;
3230 post_ptr = &tp->rx_jumbo_ptr;
3233 goto next_pkt_nopost;
3236 work_mask |= opaque_key;
3238 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
3239 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
3241 tg3_recycle_rx(tp, opaque_key,
3242 desc_idx, *post_ptr);
3244 /* Other statistics kept track of by card. */
3245 tp->net_stats.rx_dropped++;
3249 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4; /* omit crc */
3251 if (len > RX_COPY_THRESHOLD
3252 && tp->rx_offset == 2
3253 /* rx_offset != 2 iff this is a 5701 card running
3254 * in PCI-X mode [see tg3_get_invariants()] */
3258 skb_size = tg3_alloc_rx_skb(tp, opaque_key,
3259 desc_idx, *post_ptr);
3263 pci_unmap_single(tp->pdev, dma_addr,
3264 skb_size - tp->rx_offset,
3265 PCI_DMA_FROMDEVICE);
3269 struct sk_buff *copy_skb;
3271 tg3_recycle_rx(tp, opaque_key,
3272 desc_idx, *post_ptr);
3274 copy_skb = dev_alloc_skb(len + 2);
3275 if (copy_skb == NULL)
3276 goto drop_it_no_recycle;
3278 copy_skb->dev = tp->dev;
3279 skb_reserve(copy_skb, 2);
3280 skb_put(copy_skb, len);
3281 pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
3282 memcpy(copy_skb->data, skb->data, len);
3283 pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
3285 /* We'll reuse the original ring buffer. */
3289 if ((tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) &&
3290 (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
3291 (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
3292 >> RXD_TCPCSUM_SHIFT) == 0xffff))
3293 skb->ip_summed = CHECKSUM_UNNECESSARY;
3295 skb->ip_summed = CHECKSUM_NONE;
3297 skb->protocol = eth_type_trans(skb, tp->dev);
3298 #if TG3_VLAN_TAG_USED
3299 if (tp->vlgrp != NULL &&
3300 desc->type_flags & RXD_FLAG_VLAN) {
3301 tg3_vlan_rx(tp, skb,
3302 desc->err_vlan & RXD_VLAN_MASK);
3305 netif_receive_skb(skb);
3307 tp->dev->last_rx = jiffies;
3314 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
3315 u32 idx = *post_ptr % TG3_RX_RING_SIZE;
3317 tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX +
3318 TG3_64BIT_REG_LOW, idx);
3319 work_mask &= ~RXD_OPAQUE_RING_STD;
3324 sw_idx %= TG3_RX_RCB_RING_SIZE(tp);
3326 /* Refresh hw_idx to see if there is new work */
3327 if (sw_idx == hw_idx) {
3328 hw_idx = tp->hw_status->idx[0].rx_producer;
3333 /* ACK the status ring. */
3334 tp->rx_rcb_ptr = sw_idx;
3335 tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW, sw_idx);
3337 /* Refill RX ring(s). */
3338 if (work_mask & RXD_OPAQUE_RING_STD) {
3339 sw_idx = tp->rx_std_ptr % TG3_RX_RING_SIZE;
3340 tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
3343 if (work_mask & RXD_OPAQUE_RING_JUMBO) {
3344 sw_idx = tp->rx_jumbo_ptr % TG3_RX_JUMBO_RING_SIZE;
3345 tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
3353 static int tg3_poll(struct net_device *netdev, int *budget)
3355 struct tg3 *tp = netdev_priv(netdev);
3356 struct tg3_hw_status *sblk = tp->hw_status;
3359 /* handle link change and other phy events */
3360 if (!(tp->tg3_flags &
3361 (TG3_FLAG_USE_LINKCHG_REG |
3362 TG3_FLAG_POLL_SERDES))) {
3363 if (sblk->status & SD_STATUS_LINK_CHG) {
3364 sblk->status = SD_STATUS_UPDATED |
3365 (sblk->status & ~SD_STATUS_LINK_CHG);
3366 spin_lock(&tp->lock);
3367 tg3_setup_phy(tp, 0);
3368 spin_unlock(&tp->lock);
3372 /* run TX completion thread */
3373 if (sblk->idx[0].tx_consumer != tp->tx_cons) {
3375 if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING)) {
3376 netif_rx_complete(netdev);
3377 schedule_work(&tp->reset_task);
3382 /* run RX thread, within the bounds set by NAPI.
3383 * All RX "locking" is done by ensuring outside
3384 * code synchronizes with dev->poll()
3386 if (sblk->idx[0].rx_producer != tp->rx_rcb_ptr) {
3387 int orig_budget = *budget;
3390 if (orig_budget > netdev->quota)
3391 orig_budget = netdev->quota;
3393 work_done = tg3_rx(tp, orig_budget);
3395 *budget -= work_done;
3396 netdev->quota -= work_done;
3399 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) {
3400 tp->last_tag = sblk->status_tag;
3403 sblk->status &= ~SD_STATUS_UPDATED;
3405 /* if no more work, tell net stack and NIC we're done */
3406 done = !tg3_has_work(tp);
3408 netif_rx_complete(netdev);
3409 tg3_restart_ints(tp);
3412 return (done ? 0 : 1);
3415 static void tg3_irq_quiesce(struct tg3 *tp)
3417 BUG_ON(tp->irq_sync);
3422 synchronize_irq(tp->pdev->irq);
3425 static inline int tg3_irq_sync(struct tg3 *tp)
3427 return tp->irq_sync;
3430 /* Fully shutdown all tg3 driver activity elsewhere in the system.
3431 * If irq_sync is non-zero, then the IRQ handler must be synchronized
3432 * with as well. Most of the time, this is not necessary except when
3433 * shutting down the device.
3435 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
3438 tg3_irq_quiesce(tp);
3439 spin_lock_bh(&tp->lock);
3442 static inline void tg3_full_unlock(struct tg3 *tp)
3444 spin_unlock_bh(&tp->lock);
3447 /* One-shot MSI handler - Chip automatically disables interrupt
3448 * after sending MSI so driver doesn't have to do it.
3450 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id, struct pt_regs *regs)
3452 struct net_device *dev = dev_id;
3453 struct tg3 *tp = netdev_priv(dev);
3455 prefetch(tp->hw_status);
3456 prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
3458 if (likely(!tg3_irq_sync(tp)))
3459 netif_rx_schedule(dev); /* schedule NAPI poll */
3464 /* MSI ISR - No need to check for interrupt sharing and no need to
3465 * flush status block and interrupt mailbox. PCI ordering rules
3466 * guarantee that MSI will arrive after the status block.
3468 static irqreturn_t tg3_msi(int irq, void *dev_id, struct pt_regs *regs)
3470 struct net_device *dev = dev_id;
3471 struct tg3 *tp = netdev_priv(dev);
3473 prefetch(tp->hw_status);
3474 prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
3476 * Writing any value to intr-mbox-0 clears PCI INTA# and
3477 * chip-internal interrupt pending events.
3478 * Writing non-zero to intr-mbox-0 additional tells the
3479 * NIC to stop sending us irqs, engaging "in-intr-handler"
3482 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
3483 if (likely(!tg3_irq_sync(tp)))
3484 netif_rx_schedule(dev); /* schedule NAPI poll */
3486 return IRQ_RETVAL(1);
3489 static irqreturn_t tg3_interrupt(int irq, void *dev_id, struct pt_regs *regs)
3491 struct net_device *dev = dev_id;
3492 struct tg3 *tp = netdev_priv(dev);
3493 struct tg3_hw_status *sblk = tp->hw_status;
3494 unsigned int handled = 1;
3496 /* In INTx mode, it is possible for the interrupt to arrive at
3497 * the CPU before the status block posted prior to the interrupt.
3498 * Reading the PCI State register will confirm whether the
3499 * interrupt is ours and will flush the status block.
3501 if ((sblk->status & SD_STATUS_UPDATED) ||
3502 !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
3504 * Writing any value to intr-mbox-0 clears PCI INTA# and
3505 * chip-internal interrupt pending events.
3506 * Writing non-zero to intr-mbox-0 additional tells the
3507 * NIC to stop sending us irqs, engaging "in-intr-handler"
3510 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
3512 if (tg3_irq_sync(tp))
3514 sblk->status &= ~SD_STATUS_UPDATED;
3515 if (likely(tg3_has_work(tp))) {
3516 prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
3517 netif_rx_schedule(dev); /* schedule NAPI poll */
3519 /* No work, shared interrupt perhaps? re-enable
3520 * interrupts, and flush that PCI write
3522 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
3525 } else { /* shared interrupt */
3529 return IRQ_RETVAL(handled);
3532 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id, struct pt_regs *regs)
3534 struct net_device *dev = dev_id;
3535 struct tg3 *tp = netdev_priv(dev);
3536 struct tg3_hw_status *sblk = tp->hw_status;
3537 unsigned int handled = 1;
3539 /* In INTx mode, it is possible for the interrupt to arrive at
3540 * the CPU before the status block posted prior to the interrupt.
3541 * Reading the PCI State register will confirm whether the
3542 * interrupt is ours and will flush the status block.
3544 if ((sblk->status_tag != tp->last_tag) ||
3545 !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
3547 * writing any value to intr-mbox-0 clears PCI INTA# and
3548 * chip-internal interrupt pending events.
3549 * writing non-zero to intr-mbox-0 additional tells the
3550 * NIC to stop sending us irqs, engaging "in-intr-handler"
3553 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
3555 if (tg3_irq_sync(tp))
3557 if (netif_rx_schedule_prep(dev)) {
3558 prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
3559 /* Update last_tag to mark that this status has been
3560 * seen. Because interrupt may be shared, we may be
3561 * racing with tg3_poll(), so only update last_tag
3562 * if tg3_poll() is not scheduled.
3564 tp->last_tag = sblk->status_tag;
3565 __netif_rx_schedule(dev);
3567 } else { /* shared interrupt */
3571 return IRQ_RETVAL(handled);
3574 /* ISR for interrupt test */
3575 static irqreturn_t tg3_test_isr(int irq, void *dev_id,
3576 struct pt_regs *regs)
3578 struct net_device *dev = dev_id;
3579 struct tg3 *tp = netdev_priv(dev);
3580 struct tg3_hw_status *sblk = tp->hw_status;
3582 if ((sblk->status & SD_STATUS_UPDATED) ||
3583 !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
3584 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
3586 return IRQ_RETVAL(1);
3588 return IRQ_RETVAL(0);
3591 static int tg3_init_hw(struct tg3 *, int);
3592 static int tg3_halt(struct tg3 *, int, int);
3594 #ifdef CONFIG_NET_POLL_CONTROLLER
3595 static void tg3_poll_controller(struct net_device *dev)
3597 struct tg3 *tp = netdev_priv(dev);
3599 tg3_interrupt(tp->pdev->irq, dev, NULL);
3603 static void tg3_reset_task(void *_data)
3605 struct tg3 *tp = _data;
3606 unsigned int restart_timer;
3608 tg3_full_lock(tp, 0);
3609 tp->tg3_flags |= TG3_FLAG_IN_RESET_TASK;
3611 if (!netif_running(tp->dev)) {
3612 tp->tg3_flags &= ~TG3_FLAG_IN_RESET_TASK;
3613 tg3_full_unlock(tp);
3617 tg3_full_unlock(tp);
3621 tg3_full_lock(tp, 1);
3623 restart_timer = tp->tg3_flags2 & TG3_FLG2_RESTART_TIMER;
3624 tp->tg3_flags2 &= ~TG3_FLG2_RESTART_TIMER;
3626 if (tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING) {
3627 tp->write32_tx_mbox = tg3_write32_tx_mbox;
3628 tp->write32_rx_mbox = tg3_write_flush_reg32;
3629 tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;
3630 tp->tg3_flags &= ~TG3_FLAG_TX_RECOVERY_PENDING;
3633 tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
3636 tg3_netif_start(tp);
3639 mod_timer(&tp->timer, jiffies + 1);
3641 tp->tg3_flags &= ~TG3_FLAG_IN_RESET_TASK;
3643 tg3_full_unlock(tp);
3646 static void tg3_tx_timeout(struct net_device *dev)
3648 struct tg3 *tp = netdev_priv(dev);
3650 printk(KERN_ERR PFX "%s: transmit timed out, resetting\n",
3653 schedule_work(&tp->reset_task);
3656 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
3657 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
3659 u32 base = (u32) mapping & 0xffffffff;
3661 return ((base > 0xffffdcc0) &&
3662 (base + len + 8 < base));
3665 /* Test for DMA addresses > 40-bit */
3666 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
3669 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
3670 if (tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG)
3671 return (((u64) mapping + len) > DMA_40BIT_MASK);
3678 static void tg3_set_txd(struct tg3 *, int, dma_addr_t, int, u32, u32);
3680 /* Workaround 4GB and 40-bit hardware DMA bugs. */
3681 static int tigon3_dma_hwbug_workaround(struct tg3 *tp, struct sk_buff *skb,
3682 u32 last_plus_one, u32 *start,
3683 u32 base_flags, u32 mss)
3685 struct sk_buff *new_skb = skb_copy(skb, GFP_ATOMIC);
3686 dma_addr_t new_addr = 0;
3693 /* New SKB is guaranteed to be linear. */
3695 new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
3697 /* Make sure new skb does not cross any 4G boundaries.
3698 * Drop the packet if it does.
3700 if (tg3_4g_overflow_test(new_addr, new_skb->len)) {
3702 dev_kfree_skb(new_skb);
3705 tg3_set_txd(tp, entry, new_addr, new_skb->len,
3706 base_flags, 1 | (mss << 1));
3707 *start = NEXT_TX(entry);
3711 /* Now clean up the sw ring entries. */
3713 while (entry != last_plus_one) {
3717 len = skb_headlen(skb);
3719 len = skb_shinfo(skb)->frags[i-1].size;
3720 pci_unmap_single(tp->pdev,
3721 pci_unmap_addr(&tp->tx_buffers[entry], mapping),
3722 len, PCI_DMA_TODEVICE);
3724 tp->tx_buffers[entry].skb = new_skb;
3725 pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, new_addr);
3727 tp->tx_buffers[entry].skb = NULL;
3729 entry = NEXT_TX(entry);
3738 static void tg3_set_txd(struct tg3 *tp, int entry,
3739 dma_addr_t mapping, int len, u32 flags,
3742 struct tg3_tx_buffer_desc *txd = &tp->tx_ring[entry];
3743 int is_end = (mss_and_is_end & 0x1);
3744 u32 mss = (mss_and_is_end >> 1);
3748 flags |= TXD_FLAG_END;
3749 if (flags & TXD_FLAG_VLAN) {
3750 vlan_tag = flags >> 16;
3753 vlan_tag |= (mss << TXD_MSS_SHIFT);
3755 txd->addr_hi = ((u64) mapping >> 32);
3756 txd->addr_lo = ((u64) mapping & 0xffffffff);
3757 txd->len_flags = (len << TXD_LEN_SHIFT) | flags;
3758 txd->vlan_tag = vlan_tag << TXD_VLAN_TAG_SHIFT;
3761 /* hard_start_xmit for devices that don't have any bugs and
3762 * support TG3_FLG2_HW_TSO_2 only.
3764 static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
3766 struct tg3 *tp = netdev_priv(dev);
3768 u32 len, entry, base_flags, mss;
3770 len = skb_headlen(skb);
3772 /* We are running in BH disabled context with netif_tx_lock
3773 * and TX reclaim runs via tp->poll inside of a software
3774 * interrupt. Furthermore, IRQ processing runs lockless so we have
3775 * no IRQ context deadlocks to worry about either. Rejoice!
3777 if (unlikely(TX_BUFFS_AVAIL(tp) <= (skb_shinfo(skb)->nr_frags + 1))) {
3778 if (!netif_queue_stopped(dev)) {
3779 netif_stop_queue(dev);
3781 /* This is a hard error, log it. */
3782 printk(KERN_ERR PFX "%s: BUG! Tx Ring full when "
3783 "queue awake!\n", dev->name);
3785 return NETDEV_TX_BUSY;
3788 entry = tp->tx_prod;
3790 #if TG3_TSO_SUPPORT != 0
3792 if (skb->len > (tp->dev->mtu + ETH_HLEN) &&
3793 (mss = skb_shinfo(skb)->gso_size) != 0) {
3794 int tcp_opt_len, ip_tcp_len;
3796 if (skb_header_cloned(skb) &&
3797 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
3802 tcp_opt_len = ((skb->h.th->doff - 5) * 4);
3803 ip_tcp_len = (skb->nh.iph->ihl * 4) + sizeof(struct tcphdr);
3805 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
3806 TXD_FLAG_CPU_POST_DMA);
3808 skb->nh.iph->check = 0;
3809 skb->nh.iph->tot_len = htons(mss + ip_tcp_len + tcp_opt_len);
3811 skb->h.th->check = 0;
3813 mss |= (ip_tcp_len + tcp_opt_len) << 9;
3815 else if (skb->ip_summed == CHECKSUM_HW)
3816 base_flags |= TXD_FLAG_TCPUDP_CSUM;
3819 if (skb->ip_summed == CHECKSUM_HW)
3820 base_flags |= TXD_FLAG_TCPUDP_CSUM;
3822 #if TG3_VLAN_TAG_USED
3823 if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
3824 base_flags |= (TXD_FLAG_VLAN |
3825 (vlan_tx_tag_get(skb) << 16));
3828 /* Queue skb data, a.k.a. the main skb fragment. */
3829 mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
3831 tp->tx_buffers[entry].skb = skb;
3832 pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
3834 tg3_set_txd(tp, entry, mapping, len, base_flags,
3835 (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
3837 entry = NEXT_TX(entry);
3839 /* Now loop through additional data fragments, and queue them. */
3840 if (skb_shinfo(skb)->nr_frags > 0) {
3841 unsigned int i, last;
3843 last = skb_shinfo(skb)->nr_frags - 1;
3844 for (i = 0; i <= last; i++) {
3845 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3848 mapping = pci_map_page(tp->pdev,
3851 len, PCI_DMA_TODEVICE);
3853 tp->tx_buffers[entry].skb = NULL;
3854 pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
3856 tg3_set_txd(tp, entry, mapping, len,
3857 base_flags, (i == last) | (mss << 1));
3859 entry = NEXT_TX(entry);
3863 /* Packets are ready, update Tx producer idx local and on card. */
3864 tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry);
3866 tp->tx_prod = entry;
3867 if (unlikely(TX_BUFFS_AVAIL(tp) <= (MAX_SKB_FRAGS + 1))) {
3868 spin_lock(&tp->tx_lock);
3869 netif_stop_queue(dev);
3870 if (TX_BUFFS_AVAIL(tp) > TG3_TX_WAKEUP_THRESH)
3871 netif_wake_queue(tp->dev);
3872 spin_unlock(&tp->tx_lock);
3878 dev->trans_start = jiffies;
3880 return NETDEV_TX_OK;
3883 #if TG3_TSO_SUPPORT != 0
3884 static int tg3_start_xmit_dma_bug(struct sk_buff *, struct net_device *);
3886 /* Use GSO to workaround a rare TSO bug that may be triggered when the
3887 * TSO header is greater than 80 bytes.
3889 static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
3891 struct sk_buff *segs, *nskb;
3893 /* Estimate the number of fragments in the worst case */
3894 if (unlikely(TX_BUFFS_AVAIL(tp) <= (skb_shinfo(skb)->gso_segs * 3))) {
3895 netif_stop_queue(tp->dev);
3896 return NETDEV_TX_BUSY;
3899 segs = skb_gso_segment(skb, tp->dev->features & ~NETIF_F_TSO);
3900 if (unlikely(IS_ERR(segs)))
3901 goto tg3_tso_bug_end;
3907 tg3_start_xmit_dma_bug(nskb, tp->dev);
3913 return NETDEV_TX_OK;
3917 /* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
3918 * support TG3_FLG2_HW_TSO_1 or firmware TSO only.
3920 static int tg3_start_xmit_dma_bug(struct sk_buff *skb, struct net_device *dev)
3922 struct tg3 *tp = netdev_priv(dev);
3924 u32 len, entry, base_flags, mss;
3925 int would_hit_hwbug;
3927 len = skb_headlen(skb);
3929 /* We are running in BH disabled context with netif_tx_lock
3930 * and TX reclaim runs via tp->poll inside of a software
3931 * interrupt. Furthermore, IRQ processing runs lockless so we have
3932 * no IRQ context deadlocks to worry about either. Rejoice!
3934 if (unlikely(TX_BUFFS_AVAIL(tp) <= (skb_shinfo(skb)->nr_frags + 1))) {
3935 if (!netif_queue_stopped(dev)) {
3936 netif_stop_queue(dev);
3938 /* This is a hard error, log it. */
3939 printk(KERN_ERR PFX "%s: BUG! Tx Ring full when "
3940 "queue awake!\n", dev->name);
3942 return NETDEV_TX_BUSY;
3945 entry = tp->tx_prod;
3947 if (skb->ip_summed == CHECKSUM_HW)
3948 base_flags |= TXD_FLAG_TCPUDP_CSUM;
3949 #if TG3_TSO_SUPPORT != 0
3951 if (skb->len > (tp->dev->mtu + ETH_HLEN) &&
3952 (mss = skb_shinfo(skb)->gso_size) != 0) {
3953 int tcp_opt_len, ip_tcp_len, hdr_len;
3955 if (skb_header_cloned(skb) &&
3956 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
3961 tcp_opt_len = ((skb->h.th->doff - 5) * 4);
3962 ip_tcp_len = (skb->nh.iph->ihl * 4) + sizeof(struct tcphdr);
3964 hdr_len = ip_tcp_len + tcp_opt_len;
3965 if (unlikely((ETH_HLEN + hdr_len) > 80) &&
3966 (tp->tg3_flags2 & TG3_FLG2_HW_TSO_1_BUG))
3967 return (tg3_tso_bug(tp, skb));
3969 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
3970 TXD_FLAG_CPU_POST_DMA);
3972 skb->nh.iph->check = 0;
3973 skb->nh.iph->tot_len = htons(mss + hdr_len);
3974 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
3975 skb->h.th->check = 0;
3976 base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
3980 ~csum_tcpudp_magic(skb->nh.iph->saddr,
3985 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO) ||
3986 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)) {
3987 if (tcp_opt_len || skb->nh.iph->ihl > 5) {
3990 tsflags = ((skb->nh.iph->ihl - 5) +
3991 (tcp_opt_len >> 2));
3992 mss |= (tsflags << 11);
3995 if (tcp_opt_len || skb->nh.iph->ihl > 5) {
3998 tsflags = ((skb->nh.iph->ihl - 5) +
3999 (tcp_opt_len >> 2));
4000 base_flags |= tsflags << 12;
4007 #if TG3_VLAN_TAG_USED
4008 if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
4009 base_flags |= (TXD_FLAG_VLAN |
4010 (vlan_tx_tag_get(skb) << 16));
4013 /* Queue skb data, a.k.a. the main skb fragment. */
4014 mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
4016 tp->tx_buffers[entry].skb = skb;
4017 pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
4019 would_hit_hwbug = 0;
4021 if (tg3_4g_overflow_test(mapping, len))
4022 would_hit_hwbug = 1;
4024 tg3_set_txd(tp, entry, mapping, len, base_flags,
4025 (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
4027 entry = NEXT_TX(entry);
4029 /* Now loop through additional data fragments, and queue them. */
4030 if (skb_shinfo(skb)->nr_frags > 0) {
4031 unsigned int i, last;
4033 last = skb_shinfo(skb)->nr_frags - 1;
4034 for (i = 0; i <= last; i++) {
4035 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4038 mapping = pci_map_page(tp->pdev,
4041 len, PCI_DMA_TODEVICE);
4043 tp->tx_buffers[entry].skb = NULL;
4044 pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
4046 if (tg3_4g_overflow_test(mapping, len))
4047 would_hit_hwbug = 1;
4049 if (tg3_40bit_overflow_test(tp, mapping, len))
4050 would_hit_hwbug = 1;
4052 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
4053 tg3_set_txd(tp, entry, mapping, len,
4054 base_flags, (i == last)|(mss << 1));
4056 tg3_set_txd(tp, entry, mapping, len,
4057 base_flags, (i == last));
4059 entry = NEXT_TX(entry);
4063 if (would_hit_hwbug) {
4064 u32 last_plus_one = entry;
4067 start = entry - 1 - skb_shinfo(skb)->nr_frags;
4068 start &= (TG3_TX_RING_SIZE - 1);
4070 /* If the workaround fails due to memory/mapping
4071 * failure, silently drop this packet.
4073 if (tigon3_dma_hwbug_workaround(tp, skb, last_plus_one,
4074 &start, base_flags, mss))
4080 /* Packets are ready, update Tx producer idx local and on card. */
4081 tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry);
4083 tp->tx_prod = entry;
4084 if (unlikely(TX_BUFFS_AVAIL(tp) <= (MAX_SKB_FRAGS + 1))) {
4085 spin_lock(&tp->tx_lock);
4086 netif_stop_queue(dev);
4087 if (TX_BUFFS_AVAIL(tp) > TG3_TX_WAKEUP_THRESH)
4088 netif_wake_queue(tp->dev);
4089 spin_unlock(&tp->tx_lock);
4095 dev->trans_start = jiffies;
4097 return NETDEV_TX_OK;
4100 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
4105 if (new_mtu > ETH_DATA_LEN) {
4106 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
4107 tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
4108 ethtool_op_set_tso(dev, 0);
4111 tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE;
4113 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
4114 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
4115 tp->tg3_flags &= ~TG3_FLAG_JUMBO_RING_ENABLE;
4119 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
4121 struct tg3 *tp = netdev_priv(dev);
4123 if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
4126 if (!netif_running(dev)) {
4127 /* We'll just catch it later when the
4130 tg3_set_mtu(dev, tp, new_mtu);
4136 tg3_full_lock(tp, 1);
4138 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
4140 tg3_set_mtu(dev, tp, new_mtu);
4144 tg3_netif_start(tp);
4146 tg3_full_unlock(tp);
4151 /* Free up pending packets in all rx/tx rings.
4153 * The chip has been shut down and the driver detached from
4154 * the networking, so no interrupts or new tx packets will
4155 * end up in the driver. tp->{tx,}lock is not held and we are not
4156 * in an interrupt context and thus may sleep.
4158 static void tg3_free_rings(struct tg3 *tp)
4160 struct ring_info *rxp;
4163 for (i = 0; i < TG3_RX_RING_SIZE; i++) {
4164 rxp = &tp->rx_std_buffers[i];
4166 if (rxp->skb == NULL)
4168 pci_unmap_single(tp->pdev,
4169 pci_unmap_addr(rxp, mapping),
4170 tp->rx_pkt_buf_sz - tp->rx_offset,
4171 PCI_DMA_FROMDEVICE);
4172 dev_kfree_skb_any(rxp->skb);
4176 for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
4177 rxp = &tp->rx_jumbo_buffers[i];
4179 if (rxp->skb == NULL)
4181 pci_unmap_single(tp->pdev,
4182 pci_unmap_addr(rxp, mapping),
4183 RX_JUMBO_PKT_BUF_SZ - tp->rx_offset,
4184 PCI_DMA_FROMDEVICE);
4185 dev_kfree_skb_any(rxp->skb);
4189 for (i = 0; i < TG3_TX_RING_SIZE; ) {
4190 struct tx_ring_info *txp;
4191 struct sk_buff *skb;
4194 txp = &tp->tx_buffers[i];
4202 pci_unmap_single(tp->pdev,
4203 pci_unmap_addr(txp, mapping),
4210 for (j = 0; j < skb_shinfo(skb)->nr_frags; j++) {
4211 txp = &tp->tx_buffers[i & (TG3_TX_RING_SIZE - 1)];
4212 pci_unmap_page(tp->pdev,
4213 pci_unmap_addr(txp, mapping),
4214 skb_shinfo(skb)->frags[j].size,
4219 dev_kfree_skb_any(skb);
4223 /* Initialize tx/rx rings for packet processing.
4225 * The chip has been shut down and the driver detached from
4226 * the networking, so no interrupts or new tx packets will
4227 * end up in the driver. tp->{tx,}lock are held and thus
4230 static void tg3_init_rings(struct tg3 *tp)
4234 /* Free up all the SKBs. */
4237 /* Zero out all descriptors. */
4238 memset(tp->rx_std, 0, TG3_RX_RING_BYTES);
4239 memset(tp->rx_jumbo, 0, TG3_RX_JUMBO_RING_BYTES);
4240 memset(tp->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
4241 memset(tp->tx_ring, 0, TG3_TX_RING_BYTES);
4243 tp->rx_pkt_buf_sz = RX_PKT_BUF_SZ;
4244 if ((tp->tg3_flags2 & TG3_FLG2_5780_CLASS) &&
4245 (tp->dev->mtu > ETH_DATA_LEN))
4246 tp->rx_pkt_buf_sz = RX_JUMBO_PKT_BUF_SZ;
4248 /* Initialize invariants of the rings, we only set this
4249 * stuff once. This works because the card does not
4250 * write into the rx buffer posting rings.
4252 for (i = 0; i < TG3_RX_RING_SIZE; i++) {
4253 struct tg3_rx_buffer_desc *rxd;
4255 rxd = &tp->rx_std[i];
4256 rxd->idx_len = (tp->rx_pkt_buf_sz - tp->rx_offset - 64)
4258 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
4259 rxd->opaque = (RXD_OPAQUE_RING_STD |
4260 (i << RXD_OPAQUE_INDEX_SHIFT));
4263 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
4264 for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
4265 struct tg3_rx_buffer_desc *rxd;
4267 rxd = &tp->rx_jumbo[i];
4268 rxd->idx_len = (RX_JUMBO_PKT_BUF_SZ - tp->rx_offset - 64)
4270 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
4272 rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
4273 (i << RXD_OPAQUE_INDEX_SHIFT));
4277 /* Now allocate fresh SKBs for each rx ring. */
4278 for (i = 0; i < tp->rx_pending; i++) {
4279 if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_STD,
4284 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
4285 for (i = 0; i < tp->rx_jumbo_pending; i++) {
4286 if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_JUMBO,
4294 * Must not be invoked with interrupt sources disabled and
4295 * the hardware shutdown down.
4297 static void tg3_free_consistent(struct tg3 *tp)
4299 kfree(tp->rx_std_buffers);
4300 tp->rx_std_buffers = NULL;
4302 pci_free_consistent(tp->pdev, TG3_RX_RING_BYTES,
4303 tp->rx_std, tp->rx_std_mapping);
4307 pci_free_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
4308 tp->rx_jumbo, tp->rx_jumbo_mapping);
4309 tp->rx_jumbo = NULL;
4312 pci_free_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
4313 tp->rx_rcb, tp->rx_rcb_mapping);
4317 pci_free_consistent(tp->pdev, TG3_TX_RING_BYTES,
4318 tp->tx_ring, tp->tx_desc_mapping);
4321 if (tp->hw_status) {
4322 pci_free_consistent(tp->pdev, TG3_HW_STATUS_SIZE,
4323 tp->hw_status, tp->status_mapping);
4324 tp->hw_status = NULL;
4327 pci_free_consistent(tp->pdev, sizeof(struct tg3_hw_stats),
4328 tp->hw_stats, tp->stats_mapping);
4329 tp->hw_stats = NULL;
4334 * Must not be invoked with interrupt sources disabled and
4335 * the hardware shutdown down. Can sleep.
4337 static int tg3_alloc_consistent(struct tg3 *tp)
4339 tp->rx_std_buffers = kmalloc((sizeof(struct ring_info) *
4341 TG3_RX_JUMBO_RING_SIZE)) +
4342 (sizeof(struct tx_ring_info) *
4345 if (!tp->rx_std_buffers)
4348 memset(tp->rx_std_buffers, 0,
4349 (sizeof(struct ring_info) *
4351 TG3_RX_JUMBO_RING_SIZE)) +
4352 (sizeof(struct tx_ring_info) *
4355 tp->rx_jumbo_buffers = &tp->rx_std_buffers[TG3_RX_RING_SIZE];
4356 tp->tx_buffers = (struct tx_ring_info *)
4357 &tp->rx_jumbo_buffers[TG3_RX_JUMBO_RING_SIZE];
4359 tp->rx_std = pci_alloc_consistent(tp->pdev, TG3_RX_RING_BYTES,
4360 &tp->rx_std_mapping);
4364 tp->rx_jumbo = pci_alloc_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
4365 &tp->rx_jumbo_mapping);
4370 tp->rx_rcb = pci_alloc_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
4371 &tp->rx_rcb_mapping);
4375 tp->tx_ring = pci_alloc_consistent(tp->pdev, TG3_TX_RING_BYTES,
4376 &tp->tx_desc_mapping);
4380 tp->hw_status = pci_alloc_consistent(tp->pdev,
4382 &tp->status_mapping);
4386 tp->hw_stats = pci_alloc_consistent(tp->pdev,
4387 sizeof(struct tg3_hw_stats),
4388 &tp->stats_mapping);
4392 memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
4393 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
4398 tg3_free_consistent(tp);
4402 #define MAX_WAIT_CNT 1000
4404 /* To stop a block, clear the enable bit and poll till it
4405 * clears. tp->lock is held.
4407 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, int silent)
4412 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
4419 /* We can't enable/disable these bits of the
4420 * 5705/5750, just say success.
4433 for (i = 0; i < MAX_WAIT_CNT; i++) {
4436 if ((val & enable_bit) == 0)
4440 if (i == MAX_WAIT_CNT && !silent) {
4441 printk(KERN_ERR PFX "tg3_stop_block timed out, "
4442 "ofs=%lx enable_bit=%x\n",
4450 /* tp->lock is held. */
4451 static int tg3_abort_hw(struct tg3 *tp, int silent)
4455 tg3_disable_ints(tp);
4457 tp->rx_mode &= ~RX_MODE_ENABLE;
4458 tw32_f(MAC_RX_MODE, tp->rx_mode);
4461 err = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
4462 err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
4463 err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
4464 err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
4465 err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
4466 err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
4468 err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
4469 err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
4470 err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
4471 err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
4472 err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
4473 err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
4474 err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
4476 tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
4477 tw32_f(MAC_MODE, tp->mac_mode);
4480 tp->tx_mode &= ~TX_MODE_ENABLE;
4481 tw32_f(MAC_TX_MODE, tp->tx_mode);
4483 for (i = 0; i < MAX_WAIT_CNT; i++) {
4485 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
4488 if (i >= MAX_WAIT_CNT) {
4489 printk(KERN_ERR PFX "tg3_abort_hw timed out for %s, "
4490 "TX_MODE_ENABLE will not clear MAC_TX_MODE=%08x\n",
4491 tp->dev->name, tr32(MAC_TX_MODE));
4495 err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
4496 err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
4497 err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
4499 tw32(FTQ_RESET, 0xffffffff);
4500 tw32(FTQ_RESET, 0x00000000);
4502 err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
4503 err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
4506 memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
4508 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
4513 /* tp->lock is held. */
4514 static int tg3_nvram_lock(struct tg3 *tp)
4516 if (tp->tg3_flags & TG3_FLAG_NVRAM) {
4519 if (tp->nvram_lock_cnt == 0) {
4520 tw32(NVRAM_SWARB, SWARB_REQ_SET1);
4521 for (i = 0; i < 8000; i++) {
4522 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
4527 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
4531 tp->nvram_lock_cnt++;
4536 /* tp->lock is held. */
4537 static void tg3_nvram_unlock(struct tg3 *tp)
4539 if (tp->tg3_flags & TG3_FLAG_NVRAM) {
4540 if (tp->nvram_lock_cnt > 0)
4541 tp->nvram_lock_cnt--;
4542 if (tp->nvram_lock_cnt == 0)
4543 tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
4547 /* tp->lock is held. */
4548 static void tg3_enable_nvram_access(struct tg3 *tp)
4550 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
4551 !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM)) {
4552 u32 nvaccess = tr32(NVRAM_ACCESS);
4554 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
4558 /* tp->lock is held. */
4559 static void tg3_disable_nvram_access(struct tg3 *tp)
4561 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
4562 !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM)) {
4563 u32 nvaccess = tr32(NVRAM_ACCESS);
4565 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
4569 /* tp->lock is held. */
4570 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
4572 tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
4573 NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
4575 if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
4577 case RESET_KIND_INIT:
4578 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4582 case RESET_KIND_SHUTDOWN:
4583 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4587 case RESET_KIND_SUSPEND:
4588 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4598 /* tp->lock is held. */
4599 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
4601 if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
4603 case RESET_KIND_INIT:
4604 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4605 DRV_STATE_START_DONE);
4608 case RESET_KIND_SHUTDOWN:
4609 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4610 DRV_STATE_UNLOAD_DONE);
4619 /* tp->lock is held. */
4620 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
4622 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
4624 case RESET_KIND_INIT:
4625 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4629 case RESET_KIND_SHUTDOWN:
4630 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4634 case RESET_KIND_SUSPEND:
4635 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4645 static void tg3_stop_fw(struct tg3 *);
4647 /* tp->lock is held. */
4648 static int tg3_chip_reset(struct tg3 *tp)
4651 void (*write_op)(struct tg3 *, u32, u32);
4656 /* No matching tg3_nvram_unlock() after this because
4657 * chip reset below will undo the nvram lock.
4659 tp->nvram_lock_cnt = 0;
4661 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
4662 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
4663 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
4664 tw32(GRC_FASTBOOT_PC, 0);
4667 * We must avoid the readl() that normally takes place.
4668 * It locks machines, causes machine checks, and other
4669 * fun things. So, temporarily disable the 5701
4670 * hardware workaround, while we do the reset.
4672 write_op = tp->write32;
4673 if (write_op == tg3_write_flush_reg32)
4674 tp->write32 = tg3_write32;
4677 val = GRC_MISC_CFG_CORECLK_RESET;
4679 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
4680 if (tr32(0x7e2c) == 0x60) {
4683 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
4684 tw32(GRC_MISC_CFG, (1 << 29));
4689 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
4690 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
4691 tw32(GRC_MISC_CFG, val);
4693 /* restore 5701 hardware bug workaround write method */
4694 tp->write32 = write_op;
4696 /* Unfortunately, we have to delay before the PCI read back.
4697 * Some 575X chips even will not respond to a PCI cfg access
4698 * when the reset command is given to the chip.
4700 * How do these hardware designers expect things to work
4701 * properly if the PCI write is posted for a long period
4702 * of time? It is always necessary to have some method by
4703 * which a register read back can occur to push the write
4704 * out which does the reset.
4706 * For most tg3 variants the trick below was working.
4711 /* Flush PCI posted writes. The normal MMIO registers
4712 * are inaccessible at this time so this is the only
4713 * way to make this reliably (actually, this is no longer
4714 * the case, see above). I tried to use indirect
4715 * register read/write but this upset some 5701 variants.
4717 pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
4721 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
4722 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) {
4726 /* Wait for link training to complete. */
4727 for (i = 0; i < 5000; i++)
4730 pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
4731 pci_write_config_dword(tp->pdev, 0xc4,
4732 cfg_val | (1 << 15));
4734 /* Set PCIE max payload size and clear error status. */
4735 pci_write_config_dword(tp->pdev, 0xd8, 0xf5000);
4738 /* Re-enable indirect register accesses. */
4739 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
4740 tp->misc_host_ctrl);
4742 /* Set MAX PCI retry to zero. */
4743 val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
4744 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
4745 (tp->tg3_flags & TG3_FLAG_PCIX_MODE))
4746 val |= PCISTATE_RETRY_SAME_DMA;
4747 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
4749 pci_restore_state(tp->pdev);
4751 /* Make sure PCI-X relaxed ordering bit is clear. */
4752 pci_read_config_dword(tp->pdev, TG3PCI_X_CAPS, &val);
4753 val &= ~PCIX_CAPS_RELAXED_ORDERING;
4754 pci_write_config_dword(tp->pdev, TG3PCI_X_CAPS, val);
4756 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
4759 /* Chip reset on 5780 will reset MSI enable bit,
4760 * so need to restore it.
4762 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
4765 pci_read_config_word(tp->pdev,
4766 tp->msi_cap + PCI_MSI_FLAGS,
4768 pci_write_config_word(tp->pdev,
4769 tp->msi_cap + PCI_MSI_FLAGS,
4770 ctrl | PCI_MSI_FLAGS_ENABLE);
4771 val = tr32(MSGINT_MODE);
4772 tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
4775 val = tr32(MEMARB_MODE);
4776 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
4779 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
4781 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A3) {
4783 tw32(0x5000, 0x400);
4786 tw32(GRC_MODE, tp->grc_mode);
4788 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0) {
4789 u32 val = tr32(0xc4);
4791 tw32(0xc4, val | (1 << 15));
4794 if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
4795 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
4796 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
4797 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0)
4798 tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
4799 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
4802 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
4803 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
4804 tw32_f(MAC_MODE, tp->mac_mode);
4805 } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
4806 tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
4807 tw32_f(MAC_MODE, tp->mac_mode);
4809 tw32_f(MAC_MODE, 0);
4812 /* Wait for firmware initialization to complete. */
4813 for (i = 0; i < 100000; i++) {
4814 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
4815 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
4820 /* Chip might not be fitted with firmare. Some Sun onboard
4821 * parts are configured like that. So don't signal the timeout
4822 * of the above loop as an error, but do report the lack of
4823 * running firmware once.
4826 !(tp->tg3_flags2 & TG3_FLG2_NO_FWARE_REPORTED)) {
4827 tp->tg3_flags2 |= TG3_FLG2_NO_FWARE_REPORTED;
4829 printk(KERN_INFO PFX "%s: No firmware running.\n",
4833 if ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
4834 tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
4835 u32 val = tr32(0x7c00);
4837 tw32(0x7c00, val | (1 << 25));
4840 /* Reprobe ASF enable state. */
4841 tp->tg3_flags &= ~TG3_FLAG_ENABLE_ASF;
4842 tp->tg3_flags2 &= ~TG3_FLG2_ASF_NEW_HANDSHAKE;
4843 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
4844 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
4847 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
4848 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
4849 tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
4850 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
4851 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
4858 /* tp->lock is held. */
4859 static void tg3_stop_fw(struct tg3 *tp)
4861 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
4865 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
4866 val = tr32(GRC_RX_CPU_EVENT);
4868 tw32(GRC_RX_CPU_EVENT, val);
4870 /* Wait for RX cpu to ACK the event. */
4871 for (i = 0; i < 100; i++) {
4872 if (!(tr32(GRC_RX_CPU_EVENT) & (1 << 14)))
4879 /* tp->lock is held. */
4880 static int tg3_halt(struct tg3 *tp, int kind, int silent)
4886 tg3_write_sig_pre_reset(tp, kind);
4888 tg3_abort_hw(tp, silent);
4889 err = tg3_chip_reset(tp);
4891 tg3_write_sig_legacy(tp, kind);
4892 tg3_write_sig_post_reset(tp, kind);
4900 #define TG3_FW_RELEASE_MAJOR 0x0
4901 #define TG3_FW_RELASE_MINOR 0x0
4902 #define TG3_FW_RELEASE_FIX 0x0
4903 #define TG3_FW_START_ADDR 0x08000000
4904 #define TG3_FW_TEXT_ADDR 0x08000000
4905 #define TG3_FW_TEXT_LEN 0x9c0
4906 #define TG3_FW_RODATA_ADDR 0x080009c0
4907 #define TG3_FW_RODATA_LEN 0x60
4908 #define TG3_FW_DATA_ADDR 0x08000a40
4909 #define TG3_FW_DATA_LEN 0x20
4910 #define TG3_FW_SBSS_ADDR 0x08000a60
4911 #define TG3_FW_SBSS_LEN 0xc
4912 #define TG3_FW_BSS_ADDR 0x08000a70
4913 #define TG3_FW_BSS_LEN 0x10
4915 static u32 tg3FwText[(TG3_FW_TEXT_LEN / sizeof(u32)) + 1] = {
4916 0x00000000, 0x10000003, 0x00000000, 0x0000000d, 0x0000000d, 0x3c1d0800,
4917 0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100000, 0x0e000018, 0x00000000,
4918 0x0000000d, 0x3c1d0800, 0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100034,
4919 0x0e00021c, 0x00000000, 0x0000000d, 0x00000000, 0x00000000, 0x00000000,
4920 0x27bdffe0, 0x3c1cc000, 0xafbf0018, 0xaf80680c, 0x0e00004c, 0x241b2105,
4921 0x97850000, 0x97870002, 0x9782002c, 0x9783002e, 0x3c040800, 0x248409c0,
4922 0xafa00014, 0x00021400, 0x00621825, 0x00052c00, 0xafa30010, 0x8f860010,
4923 0x00e52825, 0x0e000060, 0x24070102, 0x3c02ac00, 0x34420100, 0x3c03ac01,
4924 0x34630100, 0xaf820490, 0x3c02ffff, 0xaf820494, 0xaf830498, 0xaf82049c,
4925 0x24020001, 0xaf825ce0, 0x0e00003f, 0xaf825d00, 0x0e000140, 0x00000000,
4926 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x2402ffff, 0xaf825404, 0x8f835400,
4927 0x34630400, 0xaf835400, 0xaf825404, 0x3c020800, 0x24420034, 0xaf82541c,
4928 0x03e00008, 0xaf805400, 0x00000000, 0x00000000, 0x3c020800, 0x34423000,
4929 0x3c030800, 0x34633000, 0x3c040800, 0x348437ff, 0x3c010800, 0xac220a64,
4930 0x24020040, 0x3c010800, 0xac220a68, 0x3c010800, 0xac200a60, 0xac600000,
4931 0x24630004, 0x0083102b, 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000,
4932 0x00804821, 0x8faa0010, 0x3c020800, 0x8c420a60, 0x3c040800, 0x8c840a68,
4933 0x8fab0014, 0x24430001, 0x0044102b, 0x3c010800, 0xac230a60, 0x14400003,
4934 0x00004021, 0x3c010800, 0xac200a60, 0x3c020800, 0x8c420a60, 0x3c030800,
4935 0x8c630a64, 0x91240000, 0x00021140, 0x00431021, 0x00481021, 0x25080001,
4936 0xa0440000, 0x29020008, 0x1440fff4, 0x25290001, 0x3c020800, 0x8c420a60,
4937 0x3c030800, 0x8c630a64, 0x8f84680c, 0x00021140, 0x00431021, 0xac440008,
4938 0xac45000c, 0xac460010, 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c,
4939 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4940 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4942 0x02000008, 0x00000000, 0x0a0001e3, 0x3c0a0001, 0x0a0001e3, 0x3c0a0002,
4943 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4944 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4945 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4946 0x0a0001e3, 0x3c0a0007, 0x0a0001e3, 0x3c0a0008, 0x0a0001e3, 0x3c0a0009,
4947 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a000b,
4948 0x0a0001e3, 0x3c0a000c, 0x0a0001e3, 0x3c0a000d, 0x0a0001e3, 0x00000000,
4949 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a000e, 0x0a0001e3, 0x00000000,
4950 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4951 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4952 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a0013, 0x0a0001e3, 0x3c0a0014,
4953 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4954 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4955 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4956 0x27bdffe0, 0x00001821, 0x00001021, 0xafbf0018, 0xafb10014, 0xafb00010,
4957 0x3c010800, 0x00220821, 0xac200a70, 0x3c010800, 0x00220821, 0xac200a74,
4958 0x3c010800, 0x00220821, 0xac200a78, 0x24630001, 0x1860fff5, 0x2442000c,
4959 0x24110001, 0x8f906810, 0x32020004, 0x14400005, 0x24040001, 0x3c020800,
4960 0x8c420a78, 0x18400003, 0x00002021, 0x0e000182, 0x00000000, 0x32020001,
4961 0x10400003, 0x00000000, 0x0e000169, 0x00000000, 0x0a000153, 0xaf915028,
4962 0x8fbf0018, 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0020, 0x3c050800,
4963 0x8ca50a70, 0x3c060800, 0x8cc60a80, 0x3c070800, 0x8ce70a78, 0x27bdffe0,
4964 0x3c040800, 0x248409d0, 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014,
4965 0x0e00017b, 0x00002021, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x24020001,
4966 0x8f836810, 0x00821004, 0x00021027, 0x00621824, 0x03e00008, 0xaf836810,
4967 0x27bdffd8, 0xafbf0024, 0x1080002e, 0xafb00020, 0x8f825cec, 0xafa20018,
4968 0x8f825cec, 0x3c100800, 0x26100a78, 0xafa2001c, 0x34028000, 0xaf825cec,
4969 0x8e020000, 0x18400016, 0x00000000, 0x3c020800, 0x94420a74, 0x8fa3001c,
4970 0x000221c0, 0xac830004, 0x8fa2001c, 0x3c010800, 0x0e000201, 0xac220a74,
4971 0x10400005, 0x00000000, 0x8e020000, 0x24420001, 0x0a0001df, 0xae020000,
4972 0x3c020800, 0x8c420a70, 0x00021c02, 0x000321c0, 0x0a0001c5, 0xafa2001c,
4973 0x0e000201, 0x00000000, 0x1040001f, 0x00000000, 0x8e020000, 0x8fa3001c,
4974 0x24420001, 0x3c010800, 0xac230a70, 0x3c010800, 0xac230a74, 0x0a0001df,
4975 0xae020000, 0x3c100800, 0x26100a78, 0x8e020000, 0x18400028, 0x00000000,
4976 0x0e000201, 0x00000000, 0x14400024, 0x00000000, 0x8e020000, 0x3c030800,
4977 0x8c630a70, 0x2442ffff, 0xafa3001c, 0x18400006, 0xae020000, 0x00031402,
4978 0x000221c0, 0x8c820004, 0x3c010800, 0xac220a70, 0x97a2001e, 0x2442ff00,
4979 0x2c420300, 0x1440000b, 0x24024000, 0x3c040800, 0x248409dc, 0xafa00010,
4980 0xafa00014, 0x8fa6001c, 0x24050008, 0x0e000060, 0x00003821, 0x0a0001df,
4981 0x00000000, 0xaf825cf8, 0x3c020800, 0x8c420a40, 0x8fa3001c, 0x24420001,
4982 0xaf835cf8, 0x3c010800, 0xac220a40, 0x8fbf0024, 0x8fb00020, 0x03e00008,
4983 0x27bd0028, 0x27bdffe0, 0x3c040800, 0x248409e8, 0x00002821, 0x00003021,
4984 0x00003821, 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014, 0x8fbf0018,
4985 0x03e00008, 0x27bd0020, 0x8f82680c, 0x8f85680c, 0x00021827, 0x0003182b,
4986 0x00031823, 0x00431024, 0x00441021, 0x00a2282b, 0x10a00006, 0x00000000,
4987 0x00401821, 0x8f82680c, 0x0043102b, 0x1440fffd, 0x00000000, 0x03e00008,
4988 0x00000000, 0x3c040800, 0x8c840000, 0x3c030800, 0x8c630a40, 0x0064102b,
4989 0x54400002, 0x00831023, 0x00641023, 0x2c420008, 0x03e00008, 0x38420001,
4990 0x27bdffe0, 0x00802821, 0x3c040800, 0x24840a00, 0x00003021, 0x00003821,
4991 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014, 0x0a000216, 0x00000000,
4992 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x00000000, 0x27bdffe0, 0x3c1cc000,
4993 0xafbf0018, 0x0e00004c, 0xaf80680c, 0x3c040800, 0x24840a10, 0x03802821,
4994 0x00003021, 0x00003821, 0xafa00010, 0x0e000060, 0xafa00014, 0x2402ffff,
4995 0xaf825404, 0x3c0200aa, 0x0e000234, 0xaf825434, 0x8fbf0018, 0x03e00008,
4996 0x27bd0020, 0x00000000, 0x00000000, 0x00000000, 0x27bdffe8, 0xafb00010,
4997 0x24100001, 0xafbf0014, 0x3c01c003, 0xac200000, 0x8f826810, 0x30422000,
4998 0x10400003, 0x00000000, 0x0e000246, 0x00000000, 0x0a00023a, 0xaf905428,
4999 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x27bdfff8, 0x8f845d0c,
5000 0x3c0200ff, 0x3c030800, 0x8c630a50, 0x3442fff8, 0x00821024, 0x1043001e,
5001 0x3c0500ff, 0x34a5fff8, 0x3c06c003, 0x3c074000, 0x00851824, 0x8c620010,
5002 0x3c010800, 0xac230a50, 0x30420008, 0x10400005, 0x00871025, 0x8cc20000,
5003 0x24420001, 0xacc20000, 0x00871025, 0xaf825d0c, 0x8fa20000, 0x24420001,
5004 0xafa20000, 0x8fa20000, 0x8fa20000, 0x24420001, 0xafa20000, 0x8fa20000,
5005 0x8f845d0c, 0x3c030800, 0x8c630a50, 0x00851024, 0x1443ffe8, 0x00851824,
5006 0x27bd0008, 0x03e00008, 0x00000000, 0x00000000, 0x00000000
5009 static u32 tg3FwRodata[(TG3_FW_RODATA_LEN / sizeof(u32)) + 1] = {
5010 0x35373031, 0x726c7341, 0x00000000, 0x00000000, 0x53774576, 0x656e7430,
5011 0x00000000, 0x726c7045, 0x76656e74, 0x31000000, 0x556e6b6e, 0x45766e74,
5012 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x66617461, 0x6c457272,
5013 0x00000000, 0x00000000, 0x4d61696e, 0x43707542, 0x00000000, 0x00000000,
5017 #if 0 /* All zeros, don't eat up space with it. */
5018 u32 tg3FwData[(TG3_FW_DATA_LEN / sizeof(u32)) + 1] = {
5019 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
5020 0x00000000, 0x00000000, 0x00000000, 0x00000000
5024 #define RX_CPU_SCRATCH_BASE 0x30000
5025 #define RX_CPU_SCRATCH_SIZE 0x04000
5026 #define TX_CPU_SCRATCH_BASE 0x34000
5027 #define TX_CPU_SCRATCH_SIZE 0x04000
5029 /* tp->lock is held. */
5030 static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
5034 BUG_ON(offset == TX_CPU_BASE &&
5035 (tp->tg3_flags2 & TG3_FLG2_5705_PLUS));
5037 if (offset == RX_CPU_BASE) {
5038 for (i = 0; i < 10000; i++) {
5039 tw32(offset + CPU_STATE, 0xffffffff);
5040 tw32(offset + CPU_MODE, CPU_MODE_HALT);
5041 if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
5045 tw32(offset + CPU_STATE, 0xffffffff);
5046 tw32_f(offset + CPU_MODE, CPU_MODE_HALT);
5049 for (i = 0; i < 10000; i++) {
5050 tw32(offset + CPU_STATE, 0xffffffff);
5051 tw32(offset + CPU_MODE, CPU_MODE_HALT);
5052 if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
5058 printk(KERN_ERR PFX "tg3_reset_cpu timed out for %s, "
5061 (offset == RX_CPU_BASE ? "RX" : "TX"));
5065 /* Clear firmware's nvram arbitration. */
5066 if (tp->tg3_flags & TG3_FLAG_NVRAM)
5067 tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
5072 unsigned int text_base;
5073 unsigned int text_len;
5075 unsigned int rodata_base;
5076 unsigned int rodata_len;
5078 unsigned int data_base;
5079 unsigned int data_len;
5083 /* tp->lock is held. */
5084 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base, u32 cpu_scratch_base,
5085 int cpu_scratch_size, struct fw_info *info)
5087 int err, lock_err, i;
5088 void (*write_op)(struct tg3 *, u32, u32);
5090 if (cpu_base == TX_CPU_BASE &&
5091 (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5092 printk(KERN_ERR PFX "tg3_load_firmware_cpu: Trying to load "
5093 "TX cpu firmware on %s which is 5705.\n",
5098 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
5099 write_op = tg3_write_mem;
5101 write_op = tg3_write_indirect_reg32;
5103 /* It is possible that bootcode is still loading at this point.
5104 * Get the nvram lock first before halting the cpu.
5106 lock_err = tg3_nvram_lock(tp);
5107 err = tg3_halt_cpu(tp, cpu_base);
5109 tg3_nvram_unlock(tp);
5113 for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
5114 write_op(tp, cpu_scratch_base + i, 0);
5115 tw32(cpu_base + CPU_STATE, 0xffffffff);
5116 tw32(cpu_base + CPU_MODE, tr32(cpu_base+CPU_MODE)|CPU_MODE_HALT);
5117 for (i = 0; i < (info->text_len / sizeof(u32)); i++)
5118 write_op(tp, (cpu_scratch_base +
5119 (info->text_base & 0xffff) +
5122 info->text_data[i] : 0));
5123 for (i = 0; i < (info->rodata_len / sizeof(u32)); i++)
5124 write_op(tp, (cpu_scratch_base +
5125 (info->rodata_base & 0xffff) +
5127 (info->rodata_data ?
5128 info->rodata_data[i] : 0));
5129 for (i = 0; i < (info->data_len / sizeof(u32)); i++)
5130 write_op(tp, (cpu_scratch_base +
5131 (info->data_base & 0xffff) +
5134 info->data_data[i] : 0));
5142 /* tp->lock is held. */
5143 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
5145 struct fw_info info;
5148 info.text_base = TG3_FW_TEXT_ADDR;
5149 info.text_len = TG3_FW_TEXT_LEN;
5150 info.text_data = &tg3FwText[0];
5151 info.rodata_base = TG3_FW_RODATA_ADDR;
5152 info.rodata_len = TG3_FW_RODATA_LEN;
5153 info.rodata_data = &tg3FwRodata[0];
5154 info.data_base = TG3_FW_DATA_ADDR;
5155 info.data_len = TG3_FW_DATA_LEN;
5156 info.data_data = NULL;
5158 err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
5159 RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
5164 err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
5165 TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
5170 /* Now startup only the RX cpu. */
5171 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
5172 tw32_f(RX_CPU_BASE + CPU_PC, TG3_FW_TEXT_ADDR);
5174 for (i = 0; i < 5; i++) {
5175 if (tr32(RX_CPU_BASE + CPU_PC) == TG3_FW_TEXT_ADDR)
5177 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
5178 tw32(RX_CPU_BASE + CPU_MODE, CPU_MODE_HALT);
5179 tw32_f(RX_CPU_BASE + CPU_PC, TG3_FW_TEXT_ADDR);
5183 printk(KERN_ERR PFX "tg3_load_firmware fails for %s "
5184 "to set RX CPU PC, is %08x should be %08x\n",
5185 tp->dev->name, tr32(RX_CPU_BASE + CPU_PC),
5189 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
5190 tw32_f(RX_CPU_BASE + CPU_MODE, 0x00000000);
5195 #if TG3_TSO_SUPPORT != 0
5197 #define TG3_TSO_FW_RELEASE_MAJOR 0x1
5198 #define TG3_TSO_FW_RELASE_MINOR 0x6
5199 #define TG3_TSO_FW_RELEASE_FIX 0x0
5200 #define TG3_TSO_FW_START_ADDR 0x08000000
5201 #define TG3_TSO_FW_TEXT_ADDR 0x08000000
5202 #define TG3_TSO_FW_TEXT_LEN 0x1aa0
5203 #define TG3_TSO_FW_RODATA_ADDR 0x08001aa0
5204 #define TG3_TSO_FW_RODATA_LEN 0x60
5205 #define TG3_TSO_FW_DATA_ADDR 0x08001b20
5206 #define TG3_TSO_FW_DATA_LEN 0x30
5207 #define TG3_TSO_FW_SBSS_ADDR 0x08001b50
5208 #define TG3_TSO_FW_SBSS_LEN 0x2c
5209 #define TG3_TSO_FW_BSS_ADDR 0x08001b80
5210 #define TG3_TSO_FW_BSS_LEN 0x894
5212 static u32 tg3TsoFwText[(TG3_TSO_FW_TEXT_LEN / 4) + 1] = {
5213 0x0e000003, 0x00000000, 0x08001b24, 0x00000000, 0x10000003, 0x00000000,
5214 0x0000000d, 0x0000000d, 0x3c1d0800, 0x37bd4000, 0x03a0f021, 0x3c100800,
5215 0x26100000, 0x0e000010, 0x00000000, 0x0000000d, 0x27bdffe0, 0x3c04fefe,
5216 0xafbf0018, 0x0e0005d8, 0x34840002, 0x0e000668, 0x00000000, 0x3c030800,
5217 0x90631b68, 0x24020002, 0x3c040800, 0x24841aac, 0x14620003, 0x24050001,
5218 0x3c040800, 0x24841aa0, 0x24060006, 0x00003821, 0xafa00010, 0x0e00067c,
5219 0xafa00014, 0x8f625c50, 0x34420001, 0xaf625c50, 0x8f625c90, 0x34420001,
5220 0xaf625c90, 0x2402ffff, 0x0e000034, 0xaf625404, 0x8fbf0018, 0x03e00008,
5221 0x27bd0020, 0x00000000, 0x00000000, 0x00000000, 0x27bdffe0, 0xafbf001c,
5222 0xafb20018, 0xafb10014, 0x0e00005b, 0xafb00010, 0x24120002, 0x24110001,
5223 0x8f706820, 0x32020100, 0x10400003, 0x00000000, 0x0e0000bb, 0x00000000,
5224 0x8f706820, 0x32022000, 0x10400004, 0x32020001, 0x0e0001f0, 0x24040001,
5225 0x32020001, 0x10400003, 0x00000000, 0x0e0000a3, 0x00000000, 0x3c020800,
5226 0x90421b98, 0x14520003, 0x00000000, 0x0e0004c0, 0x00000000, 0x0a00003c,
5227 0xaf715028, 0x8fbf001c, 0x8fb20018, 0x8fb10014, 0x8fb00010, 0x03e00008,
5228 0x27bd0020, 0x27bdffe0, 0x3c040800, 0x24841ac0, 0x00002821, 0x00003021,
5229 0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014, 0x3c040800,
5230 0x248423d8, 0xa4800000, 0x3c010800, 0xa0201b98, 0x3c010800, 0xac201b9c,
5231 0x3c010800, 0xac201ba0, 0x3c010800, 0xac201ba4, 0x3c010800, 0xac201bac,
5232 0x3c010800, 0xac201bb8, 0x3c010800, 0xac201bbc, 0x8f624434, 0x3c010800,
5233 0xac221b88, 0x8f624438, 0x3c010800, 0xac221b8c, 0x8f624410, 0xac80f7a8,
5234 0x3c010800, 0xac201b84, 0x3c010800, 0xac2023e0, 0x3c010800, 0xac2023c8,
5235 0x3c010800, 0xac2023cc, 0x3c010800, 0xac202400, 0x3c010800, 0xac221b90,
5236 0x8f620068, 0x24030007, 0x00021702, 0x10430005, 0x00000000, 0x8f620068,
5237 0x00021702, 0x14400004, 0x24020001, 0x3c010800, 0x0a000097, 0xac20240c,
5238 0xac820034, 0x3c040800, 0x24841acc, 0x3c050800, 0x8ca5240c, 0x00003021,
5239 0x00003821, 0xafa00010, 0x0e00067c, 0xafa00014, 0x8fbf0018, 0x03e00008,
5240 0x27bd0020, 0x27bdffe0, 0x3c040800, 0x24841ad8, 0x00002821, 0x00003021,
5241 0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014, 0x0e00005b,
5242 0x00000000, 0x0e0000b4, 0x00002021, 0x8fbf0018, 0x03e00008, 0x27bd0020,
5243 0x24020001, 0x8f636820, 0x00821004, 0x00021027, 0x00621824, 0x03e00008,
5244 0xaf636820, 0x27bdffd0, 0xafbf002c, 0xafb60028, 0xafb50024, 0xafb40020,
5245 0xafb3001c, 0xafb20018, 0xafb10014, 0xafb00010, 0x8f675c5c, 0x3c030800,
5246 0x24631bbc, 0x8c620000, 0x14470005, 0x3c0200ff, 0x3c020800, 0x90421b98,
5247 0x14400119, 0x3c0200ff, 0x3442fff8, 0x00e28824, 0xac670000, 0x00111902,
5248 0x306300ff, 0x30e20003, 0x000211c0, 0x00622825, 0x00a04021, 0x00071602,
5249 0x3c030800, 0x90631b98, 0x3044000f, 0x14600036, 0x00804821, 0x24020001,
5250 0x3c010800, 0xa0221b98, 0x00051100, 0x00821025, 0x3c010800, 0xac201b9c,
5251 0x3c010800, 0xac201ba0, 0x3c010800, 0xac201ba4, 0x3c010800, 0xac201bac,
5252 0x3c010800, 0xac201bb8, 0x3c010800, 0xac201bb0, 0x3c010800, 0xac201bb4,
5253 0x3c010800, 0xa42223d8, 0x9622000c, 0x30437fff, 0x3c010800, 0xa4222410,
5254 0x30428000, 0x3c010800, 0xa4231bc6, 0x10400005, 0x24020001, 0x3c010800,
5255 0xac2223f4, 0x0a000102, 0x2406003e, 0x24060036, 0x3c010800, 0xac2023f4,
5256 0x9622000a, 0x3c030800, 0x94631bc6, 0x3c010800, 0xac2023f0, 0x3c010800,
5257 0xac2023f8, 0x00021302, 0x00021080, 0x00c21021, 0x00621821, 0x3c010800,
5258 0xa42223d0, 0x3c010800, 0x0a000115, 0xa4231b96, 0x9622000c, 0x3c010800,
5259 0xa42223ec, 0x3c040800, 0x24841b9c, 0x8c820000, 0x00021100, 0x3c010800,
5260 0x00220821, 0xac311bc8, 0x8c820000, 0x00021100, 0x3c010800, 0x00220821,
5261 0xac271bcc, 0x8c820000, 0x25030001, 0x306601ff, 0x00021100, 0x3c010800,
5262 0x00220821, 0xac261bd0, 0x8c820000, 0x00021100, 0x3c010800, 0x00220821,
5263 0xac291bd4, 0x96230008, 0x3c020800, 0x8c421bac, 0x00432821, 0x3c010800,
5264 0xac251bac, 0x9622000a, 0x30420004, 0x14400018, 0x00061100, 0x8f630c14,
5265 0x3063000f, 0x2c620002, 0x1440000b, 0x3c02c000, 0x8f630c14, 0x3c020800,
5266 0x8c421b40, 0x3063000f, 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002,
5267 0x1040fff7, 0x3c02c000, 0x00e21825, 0xaf635c5c, 0x8f625c50, 0x30420002,
5268 0x10400014, 0x00000000, 0x0a000147, 0x00000000, 0x3c030800, 0x8c631b80,
5269 0x3c040800, 0x94841b94, 0x01221025, 0x3c010800, 0xa42223da, 0x24020001,
5270 0x3c010800, 0xac221bb8, 0x24630001, 0x0085202a, 0x3c010800, 0x10800003,
5271 0xac231b80, 0x3c010800, 0xa4251b94, 0x3c060800, 0x24c61b9c, 0x8cc20000,
5272 0x24420001, 0xacc20000, 0x28420080, 0x14400005, 0x00000000, 0x0e000656,
5273 0x24040002, 0x0a0001e6, 0x00000000, 0x3c020800, 0x8c421bb8, 0x10400078,
5274 0x24020001, 0x3c050800, 0x90a51b98, 0x14a20072, 0x00000000, 0x3c150800,
5275 0x96b51b96, 0x3c040800, 0x8c841bac, 0x32a3ffff, 0x0083102a, 0x1440006c,
5276 0x00000000, 0x14830003, 0x00000000, 0x3c010800, 0xac2523f0, 0x1060005c,
5277 0x00009021, 0x24d60004, 0x0060a021, 0x24d30014, 0x8ec20000, 0x00028100,
5278 0x3c110800, 0x02308821, 0x0e000625, 0x8e311bc8, 0x00402821, 0x10a00054,
5279 0x00000000, 0x9628000a, 0x31020040, 0x10400005, 0x2407180c, 0x8e22000c,
5280 0x2407188c, 0x00021400, 0xaca20018, 0x3c030800, 0x00701821, 0x8c631bd0,
5281 0x3c020800, 0x00501021, 0x8c421bd4, 0x00031d00, 0x00021400, 0x00621825,
5282 0xaca30014, 0x8ec30004, 0x96220008, 0x00432023, 0x3242ffff, 0x3083ffff,
5283 0x00431021, 0x0282102a, 0x14400002, 0x02b23023, 0x00803021, 0x8e620000,
5284 0x30c4ffff, 0x00441021, 0xae620000, 0x8e220000, 0xaca20000, 0x8e220004,
5285 0x8e63fff4, 0x00431021, 0xaca20004, 0xa4a6000e, 0x8e62fff4, 0x00441021,
5286 0xae62fff4, 0x96230008, 0x0043102a, 0x14400005, 0x02469021, 0x8e62fff0,
5287 0xae60fff4, 0x24420001, 0xae62fff0, 0xaca00008, 0x3242ffff, 0x14540008,
5288 0x24020305, 0x31020080, 0x54400001, 0x34e70010, 0x24020905, 0xa4a2000c,
5289 0x0a0001cb, 0x34e70020, 0xa4a2000c, 0x3c020800, 0x8c4223f0, 0x10400003,
5290 0x3c024b65, 0x0a0001d3, 0x34427654, 0x3c02b49a, 0x344289ab, 0xaca2001c,
5291 0x30e2ffff, 0xaca20010, 0x0e0005a2, 0x00a02021, 0x3242ffff, 0x0054102b,
5292 0x1440ffa9, 0x00000000, 0x24020002, 0x3c010800, 0x0a0001e6, 0xa0221b98,
5293 0x8ec2083c, 0x24420001, 0x0a0001e6, 0xaec2083c, 0x0e0004c0, 0x00000000,
5294 0x8fbf002c, 0x8fb60028, 0x8fb50024, 0x8fb40020, 0x8fb3001c, 0x8fb20018,
5295 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0030, 0x27bdffd0, 0xafbf0028,
5296 0xafb30024, 0xafb20020, 0xafb1001c, 0xafb00018, 0x8f725c9c, 0x3c0200ff,
5297 0x3442fff8, 0x3c070800, 0x24e71bb4, 0x02428824, 0x9623000e, 0x8ce20000,
5298 0x00431021, 0xace20000, 0x8e220010, 0x30420020, 0x14400011, 0x00809821,
5299 0x0e00063b, 0x02202021, 0x3c02c000, 0x02421825, 0xaf635c9c, 0x8f625c90,
5300 0x30420002, 0x1040011e, 0x00000000, 0xaf635c9c, 0x8f625c90, 0x30420002,
5301 0x10400119, 0x00000000, 0x0a00020d, 0x00000000, 0x8e240008, 0x8e230014,
5302 0x00041402, 0x000231c0, 0x00031502, 0x304201ff, 0x2442ffff, 0x3042007f,
5303 0x00031942, 0x30637800, 0x00021100, 0x24424000, 0x00624821, 0x9522000a,
5304 0x3084ffff, 0x30420008, 0x104000b0, 0x000429c0, 0x3c020800, 0x8c422400,
5305 0x14400024, 0x24c50008, 0x94c20014, 0x3c010800, 0xa42223d0, 0x8cc40010,
5306 0x00041402, 0x3c010800, 0xa42223d2, 0x3c010800, 0xa42423d4, 0x94c2000e,
5307 0x3083ffff, 0x00431023, 0x3c010800, 0xac222408, 0x94c2001a, 0x3c010800,
5308 0xac262400, 0x3c010800, 0xac322404, 0x3c010800, 0xac2223fc, 0x3c02c000,
5309 0x02421825, 0xaf635c9c, 0x8f625c90, 0x30420002, 0x104000e5, 0x00000000,
5310 0xaf635c9c, 0x8f625c90, 0x30420002, 0x104000e0, 0x00000000, 0x0a000246,
5311 0x00000000, 0x94c2000e, 0x3c030800, 0x946323d4, 0x00434023, 0x3103ffff,
5312 0x2c620008, 0x1040001c, 0x00000000, 0x94c20014, 0x24420028, 0x00a22821,
5313 0x00031042, 0x1840000b, 0x00002021, 0x24e60848, 0x00403821, 0x94a30000,
5314 0x8cc20000, 0x24840001, 0x00431021, 0xacc20000, 0x0087102a, 0x1440fff9,
5315 0x24a50002, 0x31020001, 0x1040001f, 0x3c024000, 0x3c040800, 0x248423fc,
5316 0xa0a00001, 0x94a30000, 0x8c820000, 0x00431021, 0x0a000285, 0xac820000,
5317 0x8f626800, 0x3c030010, 0x00431024, 0x10400009, 0x00000000, 0x94c2001a,
5318 0x3c030800, 0x8c6323fc, 0x00431021, 0x3c010800, 0xac2223fc, 0x0a000286,
5319 0x3c024000, 0x94c2001a, 0x94c4001c, 0x3c030800, 0x8c6323fc, 0x00441023,
5320 0x00621821, 0x3c010800, 0xac2323fc, 0x3c024000, 0x02421825, 0xaf635c9c,
5321 0x8f625c90, 0x30420002, 0x1440fffc, 0x00000000, 0x9522000a, 0x30420010,
5322 0x1040009b, 0x00000000, 0x3c030800, 0x946323d4, 0x3c070800, 0x24e72400,
5323 0x8ce40000, 0x8f626800, 0x24630030, 0x00832821, 0x3c030010, 0x00431024,
5324 0x1440000a, 0x00000000, 0x94a20004, 0x3c040800, 0x8c842408, 0x3c030800,
5325 0x8c6323fc, 0x00441023, 0x00621821, 0x3c010800, 0xac2323fc, 0x3c040800,
5326 0x8c8423fc, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402, 0x00822021,
5327 0x00041027, 0xa4a20006, 0x3c030800, 0x8c632404, 0x3c0200ff, 0x3442fff8,
5328 0x00628824, 0x96220008, 0x24050001, 0x24034000, 0x000231c0, 0x00801021,
5329 0xa4c2001a, 0xa4c0001c, 0xace00000, 0x3c010800, 0xac251b60, 0xaf635cb8,
5330 0x8f625cb0, 0x30420002, 0x10400003, 0x00000000, 0x3c010800, 0xac201b60,
5331 0x8e220008, 0xaf625cb8, 0x8f625cb0, 0x30420002, 0x10400003, 0x00000000,
5332 0x3c010800, 0xac201b60, 0x3c020800, 0x8c421b60, 0x1040ffec, 0x00000000,
5333 0x3c040800, 0x0e00063b, 0x8c842404, 0x0a00032a, 0x00000000, 0x3c030800,
5334 0x90631b98, 0x24020002, 0x14620003, 0x3c034b65, 0x0a0002e1, 0x00008021,
5335 0x8e22001c, 0x34637654, 0x10430002, 0x24100002, 0x24100001, 0x00c02021,
5336 0x0e000350, 0x02003021, 0x24020003, 0x3c010800, 0xa0221b98, 0x24020002,
5337 0x1202000a, 0x24020001, 0x3c030800, 0x8c6323f0, 0x10620006, 0x00000000,
5338 0x3c020800, 0x944223d8, 0x00021400, 0x0a00031f, 0xae220014, 0x3c040800,
5339 0x248423da, 0x94820000, 0x00021400, 0xae220014, 0x3c020800, 0x8c421bbc,
5340 0x3c03c000, 0x3c010800, 0xa0201b98, 0x00431025, 0xaf625c5c, 0x8f625c50,
5341 0x30420002, 0x10400009, 0x00000000, 0x2484f7e2, 0x8c820000, 0x00431025,
5342 0xaf625c5c, 0x8f625c50, 0x30420002, 0x1440fffa, 0x00000000, 0x3c020800,
5343 0x24421b84, 0x8c430000, 0x24630001, 0xac430000, 0x8f630c14, 0x3063000f,
5344 0x2c620002, 0x1440000c, 0x3c024000, 0x8f630c14, 0x3c020800, 0x8c421b40,
5345 0x3063000f, 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002, 0x1040fff7,
5346 0x00000000, 0x3c024000, 0x02421825, 0xaf635c9c, 0x8f625c90, 0x30420002,
5347 0x1440fffc, 0x00000000, 0x12600003, 0x00000000, 0x0e0004c0, 0x00000000,
5348 0x8fbf0028, 0x8fb30024, 0x8fb20020, 0x8fb1001c, 0x8fb00018, 0x03e00008,
5349 0x27bd0030, 0x8f634450, 0x3c040800, 0x24841b88, 0x8c820000, 0x00031c02,
5350 0x0043102b, 0x14400007, 0x3c038000, 0x8c840004, 0x8f624450, 0x00021c02,
5351 0x0083102b, 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024,
5352 0x1440fffd, 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3c024000,
5353 0x00822025, 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00000000,
5354 0x03e00008, 0x00000000, 0x27bdffe0, 0x00805821, 0x14c00011, 0x256e0008,
5355 0x3c020800, 0x8c4223f4, 0x10400007, 0x24020016, 0x3c010800, 0xa42223d2,
5356 0x2402002a, 0x3c010800, 0x0a000364, 0xa42223d4, 0x8d670010, 0x00071402,
5357 0x3c010800, 0xa42223d2, 0x3c010800, 0xa42723d4, 0x3c040800, 0x948423d4,
5358 0x3c030800, 0x946323d2, 0x95cf0006, 0x3c020800, 0x944223d0, 0x00832023,
5359 0x01e2c023, 0x3065ffff, 0x24a20028, 0x01c24821, 0x3082ffff, 0x14c0001a,
5360 0x01226021, 0x9582000c, 0x3042003f, 0x3c010800, 0xa42223d6, 0x95820004,
5361 0x95830006, 0x3c010800, 0xac2023e4, 0x3c010800, 0xac2023e8, 0x00021400,
5362 0x00431025, 0x3c010800, 0xac221bc0, 0x95220004, 0x3c010800, 0xa4221bc4,
5363 0x95230002, 0x01e51023, 0x0043102a, 0x10400010, 0x24020001, 0x3c010800,
5364 0x0a000398, 0xac2223f8, 0x3c030800, 0x8c6323e8, 0x3c020800, 0x94421bc4,
5365 0x00431021, 0xa5220004, 0x3c020800, 0x94421bc0, 0xa5820004, 0x3c020800,
5366 0x8c421bc0, 0xa5820006, 0x3c020800, 0x8c4223f0, 0x3c0d0800, 0x8dad23e4,
5367 0x3c0a0800, 0x144000e5, 0x8d4a23e8, 0x3c020800, 0x94421bc4, 0x004a1821,
5368 0x3063ffff, 0x0062182b, 0x24020002, 0x10c2000d, 0x01435023, 0x3c020800,
5369 0x944223d6, 0x30420009, 0x10400008, 0x00000000, 0x9582000c, 0x3042fff6,
5370 0xa582000c, 0x3c020800, 0x944223d6, 0x30420009, 0x01a26823, 0x3c020800,
5371 0x8c4223f8, 0x1040004a, 0x01203821, 0x3c020800, 0x944223d2, 0x00004021,
5372 0xa520000a, 0x01e21023, 0xa5220002, 0x3082ffff, 0x00021042, 0x18400008,
5373 0x00003021, 0x00401821, 0x94e20000, 0x25080001, 0x00c23021, 0x0103102a,
5374 0x1440fffb, 0x24e70002, 0x00061c02, 0x30c2ffff, 0x00623021, 0x00061402,
5375 0x00c23021, 0x00c02821, 0x00061027, 0xa522000a, 0x00003021, 0x2527000c,
5376 0x00004021, 0x94e20000, 0x25080001, 0x00c23021, 0x2d020004, 0x1440fffb,
5377 0x24e70002, 0x95220002, 0x00004021, 0x91230009, 0x00442023, 0x01803821,
5378 0x3082ffff, 0xa4e00010, 0x00621821, 0x00021042, 0x18400010, 0x00c33021,
5379 0x00404821, 0x94e20000, 0x24e70002, 0x00c23021, 0x30e2007f, 0x14400006,
5380 0x25080001, 0x8d630000, 0x3c02007f, 0x3442ff80, 0x00625824, 0x25670008,
5381 0x0109102a, 0x1440fff3, 0x00000000, 0x30820001, 0x10400005, 0x00061c02,
5382 0xa0e00001, 0x94e20000, 0x00c23021, 0x00061c02, 0x30c2ffff, 0x00623021,
5383 0x00061402, 0x00c23021, 0x0a00047d, 0x30c6ffff, 0x24020002, 0x14c20081,
5384 0x00000000, 0x3c020800, 0x8c42240c, 0x14400007, 0x00000000, 0x3c020800,
5385 0x944223d2, 0x95230002, 0x01e21023, 0x10620077, 0x00000000, 0x3c020800,
5386 0x944223d2, 0x01e21023, 0xa5220002, 0x3c020800, 0x8c42240c, 0x1040001a,
5387 0x31e3ffff, 0x8dc70010, 0x3c020800, 0x94421b96, 0x00e04021, 0x00072c02,
5388 0x00aa2021, 0x00431023, 0x00823823, 0x00072402, 0x30e2ffff, 0x00823821,
5389 0x00071027, 0xa522000a, 0x3102ffff, 0x3c040800, 0x948423d4, 0x00453023,
5390 0x00e02821, 0x00641823, 0x006d1821, 0x00c33021, 0x00061c02, 0x30c2ffff,
5391 0x0a00047d, 0x00623021, 0x01203821, 0x00004021, 0x3082ffff, 0x00021042,
5392 0x18400008, 0x00003021, 0x00401821, 0x94e20000, 0x25080001, 0x00c23021,
5393 0x0103102a, 0x1440fffb, 0x24e70002, 0x00061c02, 0x30c2ffff, 0x00623021,
5394 0x00061402, 0x00c23021, 0x00c02821, 0x00061027, 0xa522000a, 0x00003021,
5395 0x2527000c, 0x00004021, 0x94e20000, 0x25080001, 0x00c23021, 0x2d020004,
5396 0x1440fffb, 0x24e70002, 0x95220002, 0x00004021, 0x91230009, 0x00442023,
5397 0x01803821, 0x3082ffff, 0xa4e00010, 0x3c040800, 0x948423d4, 0x00621821,
5398 0x00c33021, 0x00061c02, 0x30c2ffff, 0x00623021, 0x00061c02, 0x3c020800,
5399 0x944223d0, 0x00c34821, 0x00441023, 0x00021fc2, 0x00431021, 0x00021043,
5400 0x18400010, 0x00003021, 0x00402021, 0x94e20000, 0x24e70002, 0x00c23021,
5401 0x30e2007f, 0x14400006, 0x25080001, 0x8d630000, 0x3c02007f, 0x3442ff80,
5402 0x00625824, 0x25670008, 0x0104102a, 0x1440fff3, 0x00000000, 0x3c020800,
5403 0x944223ec, 0x00c23021, 0x3122ffff, 0x00c23021, 0x00061c02, 0x30c2ffff,
5404 0x00623021, 0x00061402, 0x00c23021, 0x00c04021, 0x00061027, 0xa5820010,
5405 0xadc00014, 0x0a00049d, 0xadc00000, 0x8dc70010, 0x00e04021, 0x11400007,
5406 0x00072c02, 0x00aa3021, 0x00061402, 0x30c3ffff, 0x00433021, 0x00061402,
5407 0x00c22821, 0x00051027, 0xa522000a, 0x3c030800, 0x946323d4, 0x3102ffff,
5408 0x01e21021, 0x00433023, 0x00cd3021, 0x00061c02, 0x30c2ffff, 0x00623021,
5409 0x00061402, 0x00c23021, 0x00c04021, 0x00061027, 0xa5820010, 0x3102ffff,
5410 0x00051c00, 0x00431025, 0xadc20010, 0x3c020800, 0x8c4223f4, 0x10400005,
5411 0x2de205eb, 0x14400002, 0x25e2fff2, 0x34028870, 0xa5c20034, 0x3c030800,
5412 0x246323e8, 0x8c620000, 0x24420001, 0xac620000, 0x3c040800, 0x8c8423e4,
5413 0x3c020800, 0x8c421bc0, 0x3303ffff, 0x00832021, 0x00431821, 0x0062102b,
5414 0x3c010800, 0xac2423e4, 0x10400003, 0x2482ffff, 0x3c010800, 0xac2223e4,
5415 0x3c010800, 0xac231bc0, 0x03e00008, 0x27bd0020, 0x27bdffb8, 0x3c050800,
5416 0x24a51b96, 0xafbf0044, 0xafbe0040, 0xafb7003c, 0xafb60038, 0xafb50034,
5417 0xafb40030, 0xafb3002c, 0xafb20028, 0xafb10024, 0xafb00020, 0x94a90000,
5418 0x3c020800, 0x944223d0, 0x3c030800, 0x8c631bb0, 0x3c040800, 0x8c841bac,
5419 0x01221023, 0x0064182a, 0xa7a9001e, 0x106000be, 0xa7a20016, 0x24be0022,
5420 0x97b6001e, 0x24b3001a, 0x24b70016, 0x8fc20000, 0x14400008, 0x00000000,
5421 0x8fc2fff8, 0x97a30016, 0x8fc4fff4, 0x00431021, 0x0082202a, 0x148000b0,
5422 0x00000000, 0x97d50818, 0x32a2ffff, 0x104000a3, 0x00009021, 0x0040a021,
5423 0x00008821, 0x0e000625, 0x00000000, 0x00403021, 0x14c00007, 0x00000000,
5424 0x3c020800, 0x8c4223dc, 0x24420001, 0x3c010800, 0x0a000596, 0xac2223dc,
5425 0x3c100800, 0x02118021, 0x8e101bc8, 0x9608000a, 0x31020040, 0x10400005,
5426 0x2407180c, 0x8e02000c, 0x2407188c, 0x00021400, 0xacc20018, 0x31020080,
5427 0x54400001, 0x34e70010, 0x3c020800, 0x00511021, 0x8c421bd0, 0x3c030800,
5428 0x00711821, 0x8c631bd4, 0x00021500, 0x00031c00, 0x00431025, 0xacc20014,
5429 0x96040008, 0x3242ffff, 0x00821021, 0x0282102a, 0x14400002, 0x02b22823,
5430 0x00802821, 0x8e020000, 0x02459021, 0xacc20000, 0x8e020004, 0x00c02021,
5431 0x26310010, 0xac820004, 0x30e2ffff, 0xac800008, 0xa485000e, 0xac820010,
5432 0x24020305, 0x0e0005a2, 0xa482000c, 0x3242ffff, 0x0054102b, 0x1440ffc5,
5433 0x3242ffff, 0x0a00058e, 0x00000000, 0x8e620000, 0x8e63fffc, 0x0043102a,
5434 0x10400067, 0x00000000, 0x8e62fff0, 0x00028900, 0x3c100800, 0x02118021,
5435 0x0e000625, 0x8e101bc8, 0x00403021, 0x14c00005, 0x00000000, 0x8e62082c,
5436 0x24420001, 0x0a000596, 0xae62082c, 0x9608000a, 0x31020040, 0x10400005,
5437 0x2407180c, 0x8e02000c, 0x2407188c, 0x00021400, 0xacc20018, 0x3c020800,
5438 0x00511021, 0x8c421bd0, 0x3c030800, 0x00711821, 0x8c631bd4, 0x00021500,
5439 0x00031c00, 0x00431025, 0xacc20014, 0x8e63fff4, 0x96020008, 0x00432023,
5440 0x3242ffff, 0x3083ffff, 0x00431021, 0x02c2102a, 0x10400003, 0x00802821,
5441 0x97a9001e, 0x01322823, 0x8e620000, 0x30a4ffff, 0x00441021, 0xae620000,
5442 0xa4c5000e, 0x8e020000, 0xacc20000, 0x8e020004, 0x8e63fff4, 0x00431021,
5443 0xacc20004, 0x8e63fff4, 0x96020008, 0x00641821, 0x0062102a, 0x14400006,
5444 0x02459021, 0x8e62fff0, 0xae60fff4, 0x24420001, 0x0a000571, 0xae62fff0,
5445 0xae63fff4, 0xacc00008, 0x3242ffff, 0x10560003, 0x31020004, 0x10400006,
5446 0x24020305, 0x31020080, 0x54400001, 0x34e70010, 0x34e70020, 0x24020905,
5447 0xa4c2000c, 0x8ee30000, 0x8ee20004, 0x14620007, 0x3c02b49a, 0x8ee20860,
5448 0x54400001, 0x34e70400, 0x3c024b65, 0x0a000588, 0x34427654, 0x344289ab,
5449 0xacc2001c, 0x30e2ffff, 0xacc20010, 0x0e0005a2, 0x00c02021, 0x3242ffff,
5450 0x0056102b, 0x1440ff9b, 0x00000000, 0x8e620000, 0x8e63fffc, 0x0043102a,
5451 0x1440ff48, 0x00000000, 0x8fbf0044, 0x8fbe0040, 0x8fb7003c, 0x8fb60038,
5452 0x8fb50034, 0x8fb40030, 0x8fb3002c, 0x8fb20028, 0x8fb10024, 0x8fb00020,
5453 0x03e00008, 0x27bd0048, 0x27bdffe8, 0xafbf0014, 0xafb00010, 0x8f624450,
5454 0x8f634410, 0x0a0005b1, 0x00808021, 0x8f626820, 0x30422000, 0x10400003,
5455 0x00000000, 0x0e0001f0, 0x00002021, 0x8f624450, 0x8f634410, 0x3042ffff,
5456 0x0043102b, 0x1440fff5, 0x00000000, 0x8f630c14, 0x3063000f, 0x2c620002,
5457 0x1440000b, 0x00000000, 0x8f630c14, 0x3c020800, 0x8c421b40, 0x3063000f,
5458 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002, 0x1040fff7, 0x00000000,
5459 0xaf705c18, 0x8f625c10, 0x30420002, 0x10400009, 0x00000000, 0x8f626820,
5460 0x30422000, 0x1040fff8, 0x00000000, 0x0e0001f0, 0x00002021, 0x0a0005c4,
5461 0x00000000, 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x00000000,
5462 0x00000000, 0x00000000, 0x27bdffe8, 0x3c1bc000, 0xafbf0014, 0xafb00010,
5463 0xaf60680c, 0x8f626804, 0x34420082, 0xaf626804, 0x8f634000, 0x24020b50,
5464 0x3c010800, 0xac221b54, 0x24020b78, 0x3c010800, 0xac221b64, 0x34630002,
5465 0xaf634000, 0x0e000605, 0x00808021, 0x3c010800, 0xa0221b68, 0x304200ff,
5466 0x24030002, 0x14430005, 0x00000000, 0x3c020800, 0x8c421b54, 0x0a0005f8,
5467 0xac5000c0, 0x3c020800, 0x8c421b54, 0xac5000bc, 0x8f624434, 0x8f634438,
5468 0x8f644410, 0x3c010800, 0xac221b5c, 0x3c010800, 0xac231b6c, 0x3c010800,
5469 0xac241b58, 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x3c040800,
5470 0x8c870000, 0x3c03aa55, 0x3463aa55, 0x3c06c003, 0xac830000, 0x8cc20000,
5471 0x14430007, 0x24050002, 0x3c0355aa, 0x346355aa, 0xac830000, 0x8cc20000,
5472 0x50430001, 0x24050001, 0x3c020800, 0xac470000, 0x03e00008, 0x00a01021,
5473 0x27bdfff8, 0x18800009, 0x00002821, 0x8f63680c, 0x8f62680c, 0x1043fffe,
5474 0x00000000, 0x24a50001, 0x00a4102a, 0x1440fff9, 0x00000000, 0x03e00008,
5475 0x27bd0008, 0x8f634450, 0x3c020800, 0x8c421b5c, 0x00031c02, 0x0043102b,
5476 0x14400008, 0x3c038000, 0x3c040800, 0x8c841b6c, 0x8f624450, 0x00021c02,
5477 0x0083102b, 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024,
5478 0x1440fffd, 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3082ffff,
5479 0x2442e000, 0x2c422001, 0x14400003, 0x3c024000, 0x0a000648, 0x2402ffff,
5480 0x00822025, 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00001021,
5481 0x03e00008, 0x00000000, 0x8f624450, 0x3c030800, 0x8c631b58, 0x0a000651,
5482 0x3042ffff, 0x8f624450, 0x3042ffff, 0x0043102b, 0x1440fffc, 0x00000000,
5483 0x03e00008, 0x00000000, 0x27bdffe0, 0x00802821, 0x3c040800, 0x24841af0,
5484 0x00003021, 0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014,
5485 0x0a000660, 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x00000000,
5486 0x00000000, 0x00000000, 0x3c020800, 0x34423000, 0x3c030800, 0x34633000,
5487 0x3c040800, 0x348437ff, 0x3c010800, 0xac221b74, 0x24020040, 0x3c010800,
5488 0xac221b78, 0x3c010800, 0xac201b70, 0xac600000, 0x24630004, 0x0083102b,
5489 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000, 0x00804821, 0x8faa0010,
5490 0x3c020800, 0x8c421b70, 0x3c040800, 0x8c841b78, 0x8fab0014, 0x24430001,
5491 0x0044102b, 0x3c010800, 0xac231b70, 0x14400003, 0x00004021, 0x3c010800,
5492 0xac201b70, 0x3c020800, 0x8c421b70, 0x3c030800, 0x8c631b74, 0x91240000,
5493 0x00021140, 0x00431021, 0x00481021, 0x25080001, 0xa0440000, 0x29020008,
5494 0x1440fff4, 0x25290001, 0x3c020800, 0x8c421b70, 0x3c030800, 0x8c631b74,
5495 0x8f64680c, 0x00021140, 0x00431021, 0xac440008, 0xac45000c, 0xac460010,
5496 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c, 0x00000000, 0x00000000,
5499 static u32 tg3TsoFwRodata[] = {
5500 0x4d61696e, 0x43707542, 0x00000000, 0x4d61696e, 0x43707541, 0x00000000,
5501 0x00000000, 0x00000000, 0x73746b6f, 0x66666c64, 0x496e0000, 0x73746b6f,
5502 0x66662a2a, 0x00000000, 0x53774576, 0x656e7430, 0x00000000, 0x00000000,
5503 0x00000000, 0x00000000, 0x66617461, 0x6c457272, 0x00000000, 0x00000000,
5507 static u32 tg3TsoFwData[] = {
5508 0x00000000, 0x73746b6f, 0x66666c64, 0x5f76312e, 0x362e3000, 0x00000000,
5509 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
5513 /* 5705 needs a special version of the TSO firmware. */
5514 #define TG3_TSO5_FW_RELEASE_MAJOR 0x1
5515 #define TG3_TSO5_FW_RELASE_MINOR 0x2
5516 #define TG3_TSO5_FW_RELEASE_FIX 0x0
5517 #define TG3_TSO5_FW_START_ADDR 0x00010000
5518 #define TG3_TSO5_FW_TEXT_ADDR 0x00010000
5519 #define TG3_TSO5_FW_TEXT_LEN 0xe90
5520 #define TG3_TSO5_FW_RODATA_ADDR 0x00010e90
5521 #define TG3_TSO5_FW_RODATA_LEN 0x50
5522 #define TG3_TSO5_FW_DATA_ADDR 0x00010f00
5523 #define TG3_TSO5_FW_DATA_LEN 0x20
5524 #define TG3_TSO5_FW_SBSS_ADDR 0x00010f20
5525 #define TG3_TSO5_FW_SBSS_LEN 0x28
5526 #define TG3_TSO5_FW_BSS_ADDR 0x00010f50
5527 #define TG3_TSO5_FW_BSS_LEN 0x88
5529 static u32 tg3Tso5FwText[(TG3_TSO5_FW_TEXT_LEN / 4) + 1] = {
5530 0x0c004003, 0x00000000, 0x00010f04, 0x00000000, 0x10000003, 0x00000000,
5531 0x0000000d, 0x0000000d, 0x3c1d0001, 0x37bde000, 0x03a0f021, 0x3c100001,
5532 0x26100000, 0x0c004010, 0x00000000, 0x0000000d, 0x27bdffe0, 0x3c04fefe,
5533 0xafbf0018, 0x0c0042e8, 0x34840002, 0x0c004364, 0x00000000, 0x3c030001,
5534 0x90630f34, 0x24020002, 0x3c040001, 0x24840e9c, 0x14620003, 0x24050001,
5535 0x3c040001, 0x24840e90, 0x24060002, 0x00003821, 0xafa00010, 0x0c004378,
5536 0xafa00014, 0x0c00402c, 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020,
5537 0x00000000, 0x00000000, 0x27bdffe0, 0xafbf001c, 0xafb20018, 0xafb10014,
5538 0x0c0042d4, 0xafb00010, 0x3c128000, 0x24110001, 0x8f706810, 0x32020400,
5539 0x10400007, 0x00000000, 0x8f641008, 0x00921024, 0x14400003, 0x00000000,
5540 0x0c004064, 0x00000000, 0x3c020001, 0x90420f56, 0x10510003, 0x32020200,
5541 0x1040fff1, 0x00000000, 0x0c0041b4, 0x00000000, 0x08004034, 0x00000000,
5542 0x8fbf001c, 0x8fb20018, 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0020,
5543 0x27bdffe0, 0x3c040001, 0x24840eb0, 0x00002821, 0x00003021, 0x00003821,
5544 0xafbf0018, 0xafa00010, 0x0c004378, 0xafa00014, 0x0000d021, 0x24020130,
5545 0xaf625000, 0x3c010001, 0xa4200f50, 0x3c010001, 0xa0200f57, 0x8fbf0018,
5546 0x03e00008, 0x27bd0020, 0x00000000, 0x00000000, 0x3c030001, 0x24630f60,
5547 0x90620000, 0x27bdfff0, 0x14400003, 0x0080c021, 0x08004073, 0x00004821,
5548 0x3c022000, 0x03021024, 0x10400003, 0x24090002, 0x08004073, 0xa0600000,
5549 0x24090001, 0x00181040, 0x30431f80, 0x346f8008, 0x1520004b, 0x25eb0028,
5550 0x3c040001, 0x00832021, 0x8c848010, 0x3c050001, 0x24a50f7a, 0x00041402,
5551 0xa0a20000, 0x3c010001, 0xa0240f7b, 0x3c020001, 0x00431021, 0x94428014,
5552 0x3c010001, 0xa0220f7c, 0x3c0c0001, 0x01836021, 0x8d8c8018, 0x304200ff,
5553 0x24420008, 0x000220c3, 0x24020001, 0x3c010001, 0xa0220f60, 0x0124102b,
5554 0x1040000c, 0x00003821, 0x24a6000e, 0x01602821, 0x8ca20000, 0x8ca30004,
5555 0x24a50008, 0x24e70001, 0xacc20000, 0xacc30004, 0x00e4102b, 0x1440fff8,
5556 0x24c60008, 0x00003821, 0x3c080001, 0x25080f7b, 0x91060000, 0x3c020001,
5557 0x90420f7c, 0x2503000d, 0x00c32821, 0x00461023, 0x00021fc2, 0x00431021,
5558 0x00021043, 0x1840000c, 0x00002021, 0x91020001, 0x00461023, 0x00021fc2,
5559 0x00431021, 0x00021843, 0x94a20000, 0x24e70001, 0x00822021, 0x00e3102a,
5560 0x1440fffb, 0x24a50002, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402,
5561 0x00822021, 0x3c02ffff, 0x01821024, 0x3083ffff, 0x00431025, 0x3c010001,
5562 0x080040fa, 0xac220f80, 0x3c050001, 0x24a50f7c, 0x90a20000, 0x3c0c0001,
5563 0x01836021, 0x8d8c8018, 0x000220c2, 0x1080000e, 0x00003821, 0x01603021,
5564 0x24a5000c, 0x8ca20000, 0x8ca30004, 0x24a50008, 0x24e70001, 0xacc20000,
5565 0xacc30004, 0x00e4102b, 0x1440fff8, 0x24c60008, 0x3c050001, 0x24a50f7c,
5566 0x90a20000, 0x30430007, 0x24020004, 0x10620011, 0x28620005, 0x10400005,
5567 0x24020002, 0x10620008, 0x000710c0, 0x080040fa, 0x00000000, 0x24020006,
5568 0x1062000e, 0x000710c0, 0x080040fa, 0x00000000, 0x00a21821, 0x9463000c,
5569 0x004b1021, 0x080040fa, 0xa4430000, 0x000710c0, 0x00a21821, 0x8c63000c,
5570 0x004b1021, 0x080040fa, 0xac430000, 0x00a21821, 0x8c63000c, 0x004b2021,
5571 0x00a21021, 0xac830000, 0x94420010, 0xa4820004, 0x95e70006, 0x3c020001,
5572 0x90420f7c, 0x3c030001, 0x90630f7a, 0x00e2c823, 0x3c020001, 0x90420f7b,
5573 0x24630028, 0x01e34021, 0x24420028, 0x15200012, 0x01e23021, 0x94c2000c,
5574 0x3c010001, 0xa4220f78, 0x94c20004, 0x94c30006, 0x3c010001, 0xa4200f76,
5575 0x3c010001, 0xa4200f72, 0x00021400, 0x00431025, 0x3c010001, 0xac220f6c,
5576 0x95020004, 0x3c010001, 0x08004124, 0xa4220f70, 0x3c020001, 0x94420f70,
5577 0x3c030001, 0x94630f72, 0x00431021, 0xa5020004, 0x3c020001, 0x94420f6c,
5578 0xa4c20004, 0x3c020001, 0x8c420f6c, 0xa4c20006, 0x3c040001, 0x94840f72,
5579 0x3c020001, 0x94420f70, 0x3c0a0001, 0x954a0f76, 0x00441821, 0x3063ffff,
5580 0x0062182a, 0x24020002, 0x1122000b, 0x00832023, 0x3c030001, 0x94630f78,
5581 0x30620009, 0x10400006, 0x3062fff6, 0xa4c2000c, 0x3c020001, 0x94420f78,
5582 0x30420009, 0x01425023, 0x24020001, 0x1122001b, 0x29220002, 0x50400005,
5583 0x24020002, 0x11200007, 0x31a2ffff, 0x08004197, 0x00000000, 0x1122001d,
5584 0x24020016, 0x08004197, 0x31a2ffff, 0x3c0e0001, 0x95ce0f80, 0x10800005,
5585 0x01806821, 0x01c42021, 0x00041c02, 0x3082ffff, 0x00627021, 0x000e1027,
5586 0xa502000a, 0x3c030001, 0x90630f7b, 0x31a2ffff, 0x00e21021, 0x0800418d,
5587 0x00432023, 0x3c020001, 0x94420f80, 0x00442021, 0x00041c02, 0x3082ffff,
5588 0x00622021, 0x00807021, 0x00041027, 0x08004185, 0xa502000a, 0x3c050001,
5589 0x24a50f7a, 0x90a30000, 0x14620002, 0x24e2fff2, 0xa5e20034, 0x90a20000,
5590 0x00e21023, 0xa5020002, 0x3c030001, 0x94630f80, 0x3c020001, 0x94420f5a,
5591 0x30e5ffff, 0x00641821, 0x00451023, 0x00622023, 0x00041c02, 0x3082ffff,
5592 0x00622021, 0x00041027, 0xa502000a, 0x3c030001, 0x90630f7c, 0x24620001,
5593 0x14a20005, 0x00807021, 0x01631021, 0x90420000, 0x08004185, 0x00026200,
5594 0x24620002, 0x14a20003, 0x306200fe, 0x004b1021, 0x944c0000, 0x3c020001,
5595 0x94420f82, 0x3183ffff, 0x3c040001, 0x90840f7b, 0x00431021, 0x00e21021,
5596 0x00442023, 0x008a2021, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402,
5597 0x00822021, 0x00806821, 0x00041027, 0xa4c20010, 0x31a2ffff, 0x000e1c00,
5598 0x00431025, 0x3c040001, 0x24840f72, 0xade20010, 0x94820000, 0x3c050001,
5599 0x94a50f76, 0x3c030001, 0x8c630f6c, 0x24420001, 0x00b92821, 0xa4820000,
5600 0x3322ffff, 0x00622021, 0x0083182b, 0x3c010001, 0xa4250f76, 0x10600003,
5601 0x24a2ffff, 0x3c010001, 0xa4220f76, 0x3c024000, 0x03021025, 0x3c010001,
5602 0xac240f6c, 0xaf621008, 0x03e00008, 0x27bd0010, 0x3c030001, 0x90630f56,
5603 0x27bdffe8, 0x24020001, 0xafbf0014, 0x10620026, 0xafb00010, 0x8f620cf4,
5604 0x2442ffff, 0x3042007f, 0x00021100, 0x8c434000, 0x3c010001, 0xac230f64,
5605 0x8c434008, 0x24444000, 0x8c5c4004, 0x30620040, 0x14400002, 0x24020088,
5606 0x24020008, 0x3c010001, 0xa4220f68, 0x30620004, 0x10400005, 0x24020001,
5607 0x3c010001, 0xa0220f57, 0x080041d5, 0x00031402, 0x3c010001, 0xa0200f57,
5608 0x00031402, 0x3c010001, 0xa4220f54, 0x9483000c, 0x24020001, 0x3c010001,
5609 0xa4200f50, 0x3c010001, 0xa0220f56, 0x3c010001, 0xa4230f62, 0x24020001,
5610 0x1342001e, 0x00000000, 0x13400005, 0x24020003, 0x13420067, 0x00000000,
5611 0x080042cf, 0x00000000, 0x3c020001, 0x94420f62, 0x241a0001, 0x3c010001,
5612 0xa4200f5e, 0x3c010001, 0xa4200f52, 0x304407ff, 0x00021bc2, 0x00031823,
5613 0x3063003e, 0x34630036, 0x00021242, 0x3042003c, 0x00621821, 0x3c010001,
5614 0xa4240f58, 0x00832021, 0x24630030, 0x3c010001, 0xa4240f5a, 0x3c010001,
5615 0xa4230f5c, 0x3c060001, 0x24c60f52, 0x94c50000, 0x94c30002, 0x3c040001,
5616 0x94840f5a, 0x00651021, 0x0044102a, 0x10400013, 0x3c108000, 0x00a31021,
5617 0xa4c20000, 0x3c02a000, 0xaf620cf4, 0x3c010001, 0xa0200f56, 0x8f641008,
5618 0x00901024, 0x14400003, 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4,
5619 0x00501024, 0x104000b7, 0x00000000, 0x0800420f, 0x00000000, 0x3c030001,
5620 0x94630f50, 0x00851023, 0xa4c40000, 0x00621821, 0x3042ffff, 0x3c010001,
5621 0xa4230f50, 0xaf620ce8, 0x3c020001, 0x94420f68, 0x34420024, 0xaf620cec,
5622 0x94c30002, 0x3c020001, 0x94420f50, 0x14620012, 0x3c028000, 0x3c108000,
5623 0x3c02a000, 0xaf620cf4, 0x3c010001, 0xa0200f56, 0x8f641008, 0x00901024,
5624 0x14400003, 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4, 0x00501024,
5625 0x1440fff7, 0x00000000, 0x080042cf, 0x241a0003, 0xaf620cf4, 0x3c108000,
5626 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064, 0x00000000,
5627 0x8f620cf4, 0x00501024, 0x1440fff7, 0x00000000, 0x080042cf, 0x241a0003,
5628 0x3c070001, 0x24e70f50, 0x94e20000, 0x03821021, 0xaf620ce0, 0x3c020001,
5629 0x8c420f64, 0xaf620ce4, 0x3c050001, 0x94a50f54, 0x94e30000, 0x3c040001,
5630 0x94840f58, 0x3c020001, 0x94420f5e, 0x00a32823, 0x00822023, 0x30a6ffff,
5631 0x3083ffff, 0x00c3102b, 0x14400043, 0x00000000, 0x3c020001, 0x94420f5c,
5632 0x00021400, 0x00621025, 0xaf620ce8, 0x94e20000, 0x3c030001, 0x94630f54,
5633 0x00441021, 0xa4e20000, 0x3042ffff, 0x14430021, 0x3c020008, 0x3c020001,
5634 0x90420f57, 0x10400006, 0x3c03000c, 0x3c020001, 0x94420f68, 0x34630624,
5635 0x0800427c, 0x0000d021, 0x3c020001, 0x94420f68, 0x3c030008, 0x34630624,
5636 0x00431025, 0xaf620cec, 0x3c108000, 0x3c02a000, 0xaf620cf4, 0x3c010001,
5637 0xa0200f56, 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064,
5638 0x00000000, 0x8f620cf4, 0x00501024, 0x10400015, 0x00000000, 0x08004283,
5639 0x00000000, 0x3c030001, 0x94630f68, 0x34420624, 0x3c108000, 0x00621825,
5640 0x3c028000, 0xaf630cec, 0xaf620cf4, 0x8f641008, 0x00901024, 0x14400003,
5641 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4, 0x00501024, 0x1440fff7,
5642 0x00000000, 0x3c010001, 0x080042cf, 0xa4200f5e, 0x3c020001, 0x94420f5c,
5643 0x00021400, 0x00c21025, 0xaf620ce8, 0x3c020001, 0x90420f57, 0x10400009,
5644 0x3c03000c, 0x3c020001, 0x94420f68, 0x34630624, 0x0000d021, 0x00431025,
5645 0xaf620cec, 0x080042c1, 0x3c108000, 0x3c020001, 0x94420f68, 0x3c030008,
5646 0x34630604, 0x00431025, 0xaf620cec, 0x3c020001, 0x94420f5e, 0x00451021,
5647 0x3c010001, 0xa4220f5e, 0x3c108000, 0x3c02a000, 0xaf620cf4, 0x3c010001,
5648 0xa0200f56, 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064,
5649 0x00000000, 0x8f620cf4, 0x00501024, 0x1440fff7, 0x00000000, 0x8fbf0014,
5650 0x8fb00010, 0x03e00008, 0x27bd0018, 0x00000000, 0x27bdffe0, 0x3c040001,
5651 0x24840ec0, 0x00002821, 0x00003021, 0x00003821, 0xafbf0018, 0xafa00010,
5652 0x0c004378, 0xafa00014, 0x0000d021, 0x24020130, 0xaf625000, 0x3c010001,
5653 0xa4200f50, 0x3c010001, 0xa0200f57, 0x8fbf0018, 0x03e00008, 0x27bd0020,
5654 0x27bdffe8, 0x3c1bc000, 0xafbf0014, 0xafb00010, 0xaf60680c, 0x8f626804,
5655 0x34420082, 0xaf626804, 0x8f634000, 0x24020b50, 0x3c010001, 0xac220f20,
5656 0x24020b78, 0x3c010001, 0xac220f30, 0x34630002, 0xaf634000, 0x0c004315,
5657 0x00808021, 0x3c010001, 0xa0220f34, 0x304200ff, 0x24030002, 0x14430005,
5658 0x00000000, 0x3c020001, 0x8c420f20, 0x08004308, 0xac5000c0, 0x3c020001,
5659 0x8c420f20, 0xac5000bc, 0x8f624434, 0x8f634438, 0x8f644410, 0x3c010001,
5660 0xac220f28, 0x3c010001, 0xac230f38, 0x3c010001, 0xac240f24, 0x8fbf0014,
5661 0x8fb00010, 0x03e00008, 0x27bd0018, 0x03e00008, 0x24020001, 0x27bdfff8,
5662 0x18800009, 0x00002821, 0x8f63680c, 0x8f62680c, 0x1043fffe, 0x00000000,
5663 0x24a50001, 0x00a4102a, 0x1440fff9, 0x00000000, 0x03e00008, 0x27bd0008,
5664 0x8f634450, 0x3c020001, 0x8c420f28, 0x00031c02, 0x0043102b, 0x14400008,
5665 0x3c038000, 0x3c040001, 0x8c840f38, 0x8f624450, 0x00021c02, 0x0083102b,
5666 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024, 0x1440fffd,
5667 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3082ffff, 0x2442e000,
5668 0x2c422001, 0x14400003, 0x3c024000, 0x08004347, 0x2402ffff, 0x00822025,
5669 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00001021, 0x03e00008,
5670 0x00000000, 0x8f624450, 0x3c030001, 0x8c630f24, 0x08004350, 0x3042ffff,
5671 0x8f624450, 0x3042ffff, 0x0043102b, 0x1440fffc, 0x00000000, 0x03e00008,
5672 0x00000000, 0x27bdffe0, 0x00802821, 0x3c040001, 0x24840ed0, 0x00003021,
5673 0x00003821, 0xafbf0018, 0xafa00010, 0x0c004378, 0xafa00014, 0x0800435f,
5674 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x3c020001, 0x3442d600,
5675 0x3c030001, 0x3463d600, 0x3c040001, 0x3484ddff, 0x3c010001, 0xac220f40,
5676 0x24020040, 0x3c010001, 0xac220f44, 0x3c010001, 0xac200f3c, 0xac600000,
5677 0x24630004, 0x0083102b, 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000,
5678 0x00804821, 0x8faa0010, 0x3c020001, 0x8c420f3c, 0x3c040001, 0x8c840f44,
5679 0x8fab0014, 0x24430001, 0x0044102b, 0x3c010001, 0xac230f3c, 0x14400003,
5680 0x00004021, 0x3c010001, 0xac200f3c, 0x3c020001, 0x8c420f3c, 0x3c030001,
5681 0x8c630f40, 0x91240000, 0x00021140, 0x00431021, 0x00481021, 0x25080001,
5682 0xa0440000, 0x29020008, 0x1440fff4, 0x25290001, 0x3c020001, 0x8c420f3c,
5683 0x3c030001, 0x8c630f40, 0x8f64680c, 0x00021140, 0x00431021, 0xac440008,
5684 0xac45000c, 0xac460010, 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c,
5685 0x00000000, 0x00000000, 0x00000000,
5688 static u32 tg3Tso5FwRodata[(TG3_TSO5_FW_RODATA_LEN / 4) + 1] = {
5689 0x4d61696e, 0x43707542, 0x00000000, 0x4d61696e, 0x43707541, 0x00000000,
5690 0x00000000, 0x00000000, 0x73746b6f, 0x66666c64, 0x00000000, 0x00000000,
5691 0x73746b6f, 0x66666c64, 0x00000000, 0x00000000, 0x66617461, 0x6c457272,
5692 0x00000000, 0x00000000, 0x00000000,
5695 static u32 tg3Tso5FwData[(TG3_TSO5_FW_DATA_LEN / 4) + 1] = {
5696 0x00000000, 0x73746b6f, 0x66666c64, 0x5f76312e, 0x322e3000, 0x00000000,
5697 0x00000000, 0x00000000, 0x00000000,
5700 /* tp->lock is held. */
5701 static int tg3_load_tso_firmware(struct tg3 *tp)
5703 struct fw_info info;
5704 unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
5707 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
5710 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
5711 info.text_base = TG3_TSO5_FW_TEXT_ADDR;
5712 info.text_len = TG3_TSO5_FW_TEXT_LEN;
5713 info.text_data = &tg3Tso5FwText[0];
5714 info.rodata_base = TG3_TSO5_FW_RODATA_ADDR;
5715 info.rodata_len = TG3_TSO5_FW_RODATA_LEN;
5716 info.rodata_data = &tg3Tso5FwRodata[0];
5717 info.data_base = TG3_TSO5_FW_DATA_ADDR;
5718 info.data_len = TG3_TSO5_FW_DATA_LEN;
5719 info.data_data = &tg3Tso5FwData[0];
5720 cpu_base = RX_CPU_BASE;
5721 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
5722 cpu_scratch_size = (info.text_len +
5725 TG3_TSO5_FW_SBSS_LEN +
5726 TG3_TSO5_FW_BSS_LEN);
5728 info.text_base = TG3_TSO_FW_TEXT_ADDR;
5729 info.text_len = TG3_TSO_FW_TEXT_LEN;
5730 info.text_data = &tg3TsoFwText[0];
5731 info.rodata_base = TG3_TSO_FW_RODATA_ADDR;
5732 info.rodata_len = TG3_TSO_FW_RODATA_LEN;
5733 info.rodata_data = &tg3TsoFwRodata[0];
5734 info.data_base = TG3_TSO_FW_DATA_ADDR;
5735 info.data_len = TG3_TSO_FW_DATA_LEN;
5736 info.data_data = &tg3TsoFwData[0];
5737 cpu_base = TX_CPU_BASE;
5738 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
5739 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
5742 err = tg3_load_firmware_cpu(tp, cpu_base,
5743 cpu_scratch_base, cpu_scratch_size,
5748 /* Now startup the cpu. */
5749 tw32(cpu_base + CPU_STATE, 0xffffffff);
5750 tw32_f(cpu_base + CPU_PC, info.text_base);
5752 for (i = 0; i < 5; i++) {
5753 if (tr32(cpu_base + CPU_PC) == info.text_base)
5755 tw32(cpu_base + CPU_STATE, 0xffffffff);
5756 tw32(cpu_base + CPU_MODE, CPU_MODE_HALT);
5757 tw32_f(cpu_base + CPU_PC, info.text_base);
5761 printk(KERN_ERR PFX "tg3_load_tso_firmware fails for %s "
5762 "to set CPU PC, is %08x should be %08x\n",
5763 tp->dev->name, tr32(cpu_base + CPU_PC),
5767 tw32(cpu_base + CPU_STATE, 0xffffffff);
5768 tw32_f(cpu_base + CPU_MODE, 0x00000000);
5772 #endif /* TG3_TSO_SUPPORT != 0 */
5774 /* tp->lock is held. */
5775 static void __tg3_set_mac_addr(struct tg3 *tp)
5777 u32 addr_high, addr_low;
5780 addr_high = ((tp->dev->dev_addr[0] << 8) |
5781 tp->dev->dev_addr[1]);
5782 addr_low = ((tp->dev->dev_addr[2] << 24) |
5783 (tp->dev->dev_addr[3] << 16) |
5784 (tp->dev->dev_addr[4] << 8) |
5785 (tp->dev->dev_addr[5] << 0));
5786 for (i = 0; i < 4; i++) {
5787 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
5788 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
5791 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
5792 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
5793 for (i = 0; i < 12; i++) {
5794 tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
5795 tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
5799 addr_high = (tp->dev->dev_addr[0] +
5800 tp->dev->dev_addr[1] +
5801 tp->dev->dev_addr[2] +
5802 tp->dev->dev_addr[3] +
5803 tp->dev->dev_addr[4] +
5804 tp->dev->dev_addr[5]) &
5805 TX_BACKOFF_SEED_MASK;
5806 tw32(MAC_TX_BACKOFF_SEED, addr_high);
5809 static int tg3_set_mac_addr(struct net_device *dev, void *p)
5811 struct tg3 *tp = netdev_priv(dev);
5812 struct sockaddr *addr = p;
5814 if (!is_valid_ether_addr(addr->sa_data))
5817 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
5819 if (!netif_running(dev))
5822 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
5823 /* Reset chip so that ASF can re-init any MAC addresses it
5827 tg3_full_lock(tp, 1);
5829 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
5832 tg3_netif_start(tp);
5833 tg3_full_unlock(tp);
5835 spin_lock_bh(&tp->lock);
5836 __tg3_set_mac_addr(tp);
5837 spin_unlock_bh(&tp->lock);
5843 /* tp->lock is held. */
5844 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
5845 dma_addr_t mapping, u32 maxlen_flags,
5849 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
5850 ((u64) mapping >> 32));
5852 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
5853 ((u64) mapping & 0xffffffff));
5855 (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
5858 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
5860 (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
5864 static void __tg3_set_rx_mode(struct net_device *);
5865 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
5867 tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
5868 tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
5869 tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
5870 tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
5871 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5872 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
5873 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
5875 tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
5876 tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
5877 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5878 u32 val = ec->stats_block_coalesce_usecs;
5880 if (!netif_carrier_ok(tp->dev))
5883 tw32(HOSTCC_STAT_COAL_TICKS, val);
5887 /* tp->lock is held. */
5888 static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
5890 u32 val, rdmac_mode;
5893 tg3_disable_ints(tp);
5897 tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
5899 if (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) {
5900 tg3_abort_hw(tp, 1);
5903 if ((tp->tg3_flags2 & TG3_FLG2_MII_SERDES) && reset_phy)
5906 err = tg3_chip_reset(tp);
5910 tg3_write_sig_legacy(tp, RESET_KIND_INIT);
5912 /* This works around an issue with Athlon chipsets on
5913 * B3 tigon3 silicon. This bit has no effect on any
5914 * other revision. But do not set this on PCI Express
5917 if (!(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
5918 tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
5919 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
5921 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
5922 (tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
5923 val = tr32(TG3PCI_PCISTATE);
5924 val |= PCISTATE_RETRY_SAME_DMA;
5925 tw32(TG3PCI_PCISTATE, val);
5928 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_BX) {
5929 /* Enable some hw fixes. */
5930 val = tr32(TG3PCI_MSI_DATA);
5931 val |= (1 << 26) | (1 << 28) | (1 << 29);
5932 tw32(TG3PCI_MSI_DATA, val);
5935 /* Descriptor ring init may make accesses to the
5936 * NIC SRAM area to setup the TX descriptors, so we
5937 * can only do this after the hardware has been
5938 * successfully reset.
5942 /* This value is determined during the probe time DMA
5943 * engine test, tg3_test_dma.
5945 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
5947 tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
5948 GRC_MODE_4X_NIC_SEND_RINGS |
5949 GRC_MODE_NO_TX_PHDR_CSUM |
5950 GRC_MODE_NO_RX_PHDR_CSUM);
5951 tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
5953 /* Pseudo-header checksum is done by hardware logic and not
5954 * the offload processers, so make the chip do the pseudo-
5955 * header checksums on receive. For transmit it is more
5956 * convenient to do the pseudo-header checksum in software
5957 * as Linux does that on transmit for us in all cases.
5959 tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
5963 (GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP));
5965 /* Setup the timer prescalar register. Clock is always 66Mhz. */
5966 val = tr32(GRC_MISC_CFG);
5968 val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
5969 tw32(GRC_MISC_CFG, val);
5971 /* Initialize MBUF/DESC pool. */
5972 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
5974 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
5975 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
5976 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
5977 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
5979 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
5980 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
5981 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
5983 #if TG3_TSO_SUPPORT != 0
5984 else if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
5987 fw_len = (TG3_TSO5_FW_TEXT_LEN +
5988 TG3_TSO5_FW_RODATA_LEN +
5989 TG3_TSO5_FW_DATA_LEN +
5990 TG3_TSO5_FW_SBSS_LEN +
5991 TG3_TSO5_FW_BSS_LEN);
5992 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
5993 tw32(BUFMGR_MB_POOL_ADDR,
5994 NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
5995 tw32(BUFMGR_MB_POOL_SIZE,
5996 NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
6000 if (tp->dev->mtu <= ETH_DATA_LEN) {
6001 tw32(BUFMGR_MB_RDMA_LOW_WATER,
6002 tp->bufmgr_config.mbuf_read_dma_low_water);
6003 tw32(BUFMGR_MB_MACRX_LOW_WATER,
6004 tp->bufmgr_config.mbuf_mac_rx_low_water);
6005 tw32(BUFMGR_MB_HIGH_WATER,
6006 tp->bufmgr_config.mbuf_high_water);
6008 tw32(BUFMGR_MB_RDMA_LOW_WATER,
6009 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
6010 tw32(BUFMGR_MB_MACRX_LOW_WATER,
6011 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
6012 tw32(BUFMGR_MB_HIGH_WATER,
6013 tp->bufmgr_config.mbuf_high_water_jumbo);
6015 tw32(BUFMGR_DMA_LOW_WATER,
6016 tp->bufmgr_config.dma_low_water);
6017 tw32(BUFMGR_DMA_HIGH_WATER,
6018 tp->bufmgr_config.dma_high_water);
6020 tw32(BUFMGR_MODE, BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE);
6021 for (i = 0; i < 2000; i++) {
6022 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
6027 printk(KERN_ERR PFX "tg3_reset_hw cannot enable BUFMGR for %s.\n",
6032 /* Setup replenish threshold. */
6033 val = tp->rx_pending / 8;
6036 else if (val > tp->rx_std_max_post)
6037 val = tp->rx_std_max_post;
6039 tw32(RCVBDI_STD_THRESH, val);
6041 /* Initialize TG3_BDINFO's at:
6042 * RCVDBDI_STD_BD: standard eth size rx ring
6043 * RCVDBDI_JUMBO_BD: jumbo frame rx ring
6044 * RCVDBDI_MINI_BD: small frame rx ring (??? does not work)
6047 * TG3_BDINFO_HOST_ADDR: high/low parts of DMA address of ring
6048 * TG3_BDINFO_MAXLEN_FLAGS: (rx max buffer size << 16) |
6049 * ring attribute flags
6050 * TG3_BDINFO_NIC_ADDR: location of descriptors in nic SRAM
6052 * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
6053 * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
6055 * The size of each ring is fixed in the firmware, but the location is
6058 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
6059 ((u64) tp->rx_std_mapping >> 32));
6060 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
6061 ((u64) tp->rx_std_mapping & 0xffffffff));
6062 tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
6063 NIC_SRAM_RX_BUFFER_DESC);
6065 /* Don't even try to program the JUMBO/MINI buffer descriptor
6068 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
6069 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS,
6070 RX_STD_MAX_SIZE_5705 << BDINFO_FLAGS_MAXLEN_SHIFT);
6072 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS,
6073 RX_STD_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT);
6075 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
6076 BDINFO_FLAGS_DISABLED);
6078 /* Setup replenish threshold. */
6079 tw32(RCVBDI_JUMBO_THRESH, tp->rx_jumbo_pending / 8);
6081 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
6082 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
6083 ((u64) tp->rx_jumbo_mapping >> 32));
6084 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
6085 ((u64) tp->rx_jumbo_mapping & 0xffffffff));
6086 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
6087 RX_JUMBO_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT);
6088 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
6089 NIC_SRAM_RX_JUMBO_BUFFER_DESC);
6091 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
6092 BDINFO_FLAGS_DISABLED);
6097 /* There is only one send ring on 5705/5750, no need to explicitly
6098 * disable the others.
6100 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6101 /* Clear out send RCB ring in SRAM. */
6102 for (i = NIC_SRAM_SEND_RCB; i < NIC_SRAM_RCV_RET_RCB; i += TG3_BDINFO_SIZE)
6103 tg3_write_mem(tp, i + TG3_BDINFO_MAXLEN_FLAGS,
6104 BDINFO_FLAGS_DISABLED);
6109 tw32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0);
6110 tw32_tx_mbox(MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0);
6112 tg3_set_bdinfo(tp, NIC_SRAM_SEND_RCB,
6113 tp->tx_desc_mapping,
6114 (TG3_TX_RING_SIZE <<
6115 BDINFO_FLAGS_MAXLEN_SHIFT),
6116 NIC_SRAM_TX_BUFFER_DESC);
6118 /* There is only one receive return ring on 5705/5750, no need
6119 * to explicitly disable the others.
6121 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6122 for (i = NIC_SRAM_RCV_RET_RCB; i < NIC_SRAM_STATS_BLK;
6123 i += TG3_BDINFO_SIZE) {
6124 tg3_write_mem(tp, i + TG3_BDINFO_MAXLEN_FLAGS,
6125 BDINFO_FLAGS_DISABLED);
6130 tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW, 0);
6132 tg3_set_bdinfo(tp, NIC_SRAM_RCV_RET_RCB,
6134 (TG3_RX_RCB_RING_SIZE(tp) <<
6135 BDINFO_FLAGS_MAXLEN_SHIFT),
6138 tp->rx_std_ptr = tp->rx_pending;
6139 tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
6142 tp->rx_jumbo_ptr = (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) ?
6143 tp->rx_jumbo_pending : 0;
6144 tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
6147 /* Initialize MAC address and backoff seed. */
6148 __tg3_set_mac_addr(tp);
6150 /* MTU + ethernet header + FCS + optional VLAN tag */
6151 tw32(MAC_RX_MTU_SIZE, tp->dev->mtu + ETH_HLEN + 8);
6153 /* The slot time is changed by tg3_setup_phy if we
6154 * run at gigabit with half duplex.
6156 tw32(MAC_TX_LENGTHS,
6157 (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
6158 (6 << TX_LENGTHS_IPG_SHIFT) |
6159 (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
6161 /* Receive rules. */
6162 tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
6163 tw32(RCVLPC_CONFIG, 0x0181);
6165 /* Calculate RDMAC_MODE setting early, we need it to determine
6166 * the RCVLPC_STATE_ENABLE mask.
6168 rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
6169 RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
6170 RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
6171 RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
6172 RDMAC_MODE_LNGREAD_ENAB);
6173 if (tp->tg3_flags & TG3_FLAG_SPLIT_MODE)
6174 rdmac_mode |= RDMAC_MODE_SPLIT_ENABLE;
6176 /* If statement applies to 5705 and 5750 PCI devices only */
6177 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
6178 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
6179 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)) {
6180 if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE &&
6181 (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
6182 tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
6183 rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
6184 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
6185 !(tp->tg3_flags2 & TG3_FLG2_IS_5788)) {
6186 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
6190 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)
6191 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
6193 #if TG3_TSO_SUPPORT != 0
6194 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
6195 rdmac_mode |= (1 << 27);
6198 /* Receive/send statistics. */
6199 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
6200 val = tr32(RCVLPC_STATS_ENABLE);
6201 val &= ~RCVLPC_STATSENAB_DACK_FIX;
6202 tw32(RCVLPC_STATS_ENABLE, val);
6203 } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
6204 (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
6205 val = tr32(RCVLPC_STATS_ENABLE);
6206 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
6207 tw32(RCVLPC_STATS_ENABLE, val);
6209 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
6211 tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
6212 tw32(SNDDATAI_STATSENAB, 0xffffff);
6213 tw32(SNDDATAI_STATSCTRL,
6214 (SNDDATAI_SCTRL_ENABLE |
6215 SNDDATAI_SCTRL_FASTUPD));
6217 /* Setup host coalescing engine. */
6218 tw32(HOSTCC_MODE, 0);
6219 for (i = 0; i < 2000; i++) {
6220 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
6225 __tg3_set_coalesce(tp, &tp->coal);
6227 /* set status block DMA address */
6228 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
6229 ((u64) tp->status_mapping >> 32));
6230 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
6231 ((u64) tp->status_mapping & 0xffffffff));
6233 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6234 /* Status/statistics block address. See tg3_timer,
6235 * the tg3_periodic_fetch_stats call there, and
6236 * tg3_get_stats to see how this works for 5705/5750 chips.
6238 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
6239 ((u64) tp->stats_mapping >> 32));
6240 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
6241 ((u64) tp->stats_mapping & 0xffffffff));
6242 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
6243 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
6246 tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
6248 tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
6249 tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
6250 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
6251 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
6253 /* Clear statistics/status block in chip, and status block in ram. */
6254 for (i = NIC_SRAM_STATS_BLK;
6255 i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
6257 tg3_write_mem(tp, i, 0);
6260 memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
6262 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
6263 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
6264 /* reset to prevent losing 1st rx packet intermittently */
6265 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
6269 tp->mac_mode = MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
6270 MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE | MAC_MODE_FHDE_ENABLE;
6271 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
6274 /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
6275 * If TG3_FLAG_EEPROM_WRITE_PROT is set, we should read the
6276 * register to preserve the GPIO settings for LOMs. The GPIOs,
6277 * whether used as inputs or outputs, are set by boot code after
6280 if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
6283 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE2 |
6284 GRC_LCLCTRL_GPIO_OUTPUT0 | GRC_LCLCTRL_GPIO_OUTPUT2;
6286 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
6287 gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
6288 GRC_LCLCTRL_GPIO_OUTPUT3;
6290 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
6291 gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
6293 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
6295 /* GPIO1 must be driven high for eeprom write protect */
6296 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
6297 GRC_LCLCTRL_GPIO_OUTPUT1);
6299 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
6302 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0);
6305 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6306 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
6310 val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
6311 WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
6312 WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
6313 WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
6314 WDMAC_MODE_LNGREAD_ENAB);
6316 /* If statement applies to 5705 and 5750 PCI devices only */
6317 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
6318 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
6319 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
6320 if ((tp->tg3_flags & TG3_FLG2_TSO_CAPABLE) &&
6321 (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
6322 tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
6324 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
6325 !(tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
6326 !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
6327 val |= WDMAC_MODE_RX_ACCEL;
6331 /* Enable host coalescing bug fix */
6332 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755) ||
6333 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787))
6336 tw32_f(WDMAC_MODE, val);
6339 if ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0) {
6340 val = tr32(TG3PCI_X_CAPS);
6341 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) {
6342 val &= ~PCIX_CAPS_BURST_MASK;
6343 val |= (PCIX_CAPS_MAX_BURST_CPIOB << PCIX_CAPS_BURST_SHIFT);
6344 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
6345 val &= ~(PCIX_CAPS_SPLIT_MASK | PCIX_CAPS_BURST_MASK);
6346 val |= (PCIX_CAPS_MAX_BURST_CPIOB << PCIX_CAPS_BURST_SHIFT);
6347 if (tp->tg3_flags & TG3_FLAG_SPLIT_MODE)
6348 val |= (tp->split_mode_max_reqs <<
6349 PCIX_CAPS_SPLIT_SHIFT);
6351 tw32(TG3PCI_X_CAPS, val);
6354 tw32_f(RDMAC_MODE, rdmac_mode);
6357 tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
6358 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
6359 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
6360 tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
6361 tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
6362 tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
6363 tw32(RCVDBDI_MODE, RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ);
6364 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
6365 #if TG3_TSO_SUPPORT != 0
6366 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
6367 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
6369 tw32(SNDBDI_MODE, SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE);
6370 tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
6372 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
6373 err = tg3_load_5701_a0_firmware_fix(tp);
6378 #if TG3_TSO_SUPPORT != 0
6379 if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
6380 err = tg3_load_tso_firmware(tp);
6386 tp->tx_mode = TX_MODE_ENABLE;
6387 tw32_f(MAC_TX_MODE, tp->tx_mode);
6390 tp->rx_mode = RX_MODE_ENABLE;
6391 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
6392 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
6394 tw32_f(MAC_RX_MODE, tp->rx_mode);
6397 if (tp->link_config.phy_is_low_power) {
6398 tp->link_config.phy_is_low_power = 0;
6399 tp->link_config.speed = tp->link_config.orig_speed;
6400 tp->link_config.duplex = tp->link_config.orig_duplex;
6401 tp->link_config.autoneg = tp->link_config.orig_autoneg;
6404 tp->mi_mode = MAC_MI_MODE_BASE;
6405 tw32_f(MAC_MI_MODE, tp->mi_mode);
6408 tw32(MAC_LED_CTRL, tp->led_ctrl);
6410 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
6411 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
6412 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
6415 tw32_f(MAC_RX_MODE, tp->rx_mode);
6418 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
6419 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) &&
6420 !(tp->tg3_flags2 & TG3_FLG2_SERDES_PREEMPHASIS)) {
6421 /* Set drive transmission level to 1.2V */
6422 /* only if the signal pre-emphasis bit is not set */
6423 val = tr32(MAC_SERDES_CFG);
6426 tw32(MAC_SERDES_CFG, val);
6428 if (tp->pci_chip_rev_id == CHIPREV_ID_5703_A1)
6429 tw32(MAC_SERDES_CFG, 0x616000);
6432 /* Prevent chip from dropping frames when flow control
6435 tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, 2);
6437 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
6438 (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
6439 /* Use hardware link auto-negotiation */
6440 tp->tg3_flags2 |= TG3_FLG2_HW_AUTONEG;
6443 if ((tp->tg3_flags2 & TG3_FLG2_MII_SERDES) &&
6444 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)) {
6447 tmp = tr32(SERDES_RX_CTRL);
6448 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
6449 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
6450 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
6451 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
6454 err = tg3_setup_phy(tp, reset_phy);
6458 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
6461 /* Clear CRC stats. */
6462 if (!tg3_readphy(tp, 0x1e, &tmp)) {
6463 tg3_writephy(tp, 0x1e, tmp | 0x8000);
6464 tg3_readphy(tp, 0x14, &tmp);
6468 __tg3_set_rx_mode(tp->dev);
6470 /* Initialize receive rules. */
6471 tw32(MAC_RCV_RULE_0, 0xc2000000 & RCV_RULE_DISABLE_MASK);
6472 tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
6473 tw32(MAC_RCV_RULE_1, 0x86000004 & RCV_RULE_DISABLE_MASK);
6474 tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
6476 if ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
6477 !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
6481 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF)
6485 tw32(MAC_RCV_RULE_15, 0); tw32(MAC_RCV_VALUE_15, 0);
6487 tw32(MAC_RCV_RULE_14, 0); tw32(MAC_RCV_VALUE_14, 0);
6489 tw32(MAC_RCV_RULE_13, 0); tw32(MAC_RCV_VALUE_13, 0);
6491 tw32(MAC_RCV_RULE_12, 0); tw32(MAC_RCV_VALUE_12, 0);
6493 tw32(MAC_RCV_RULE_11, 0); tw32(MAC_RCV_VALUE_11, 0);
6495 tw32(MAC_RCV_RULE_10, 0); tw32(MAC_RCV_VALUE_10, 0);
6497 tw32(MAC_RCV_RULE_9, 0); tw32(MAC_RCV_VALUE_9, 0);
6499 tw32(MAC_RCV_RULE_8, 0); tw32(MAC_RCV_VALUE_8, 0);
6501 tw32(MAC_RCV_RULE_7, 0); tw32(MAC_RCV_VALUE_7, 0);
6503 tw32(MAC_RCV_RULE_6, 0); tw32(MAC_RCV_VALUE_6, 0);
6505 tw32(MAC_RCV_RULE_5, 0); tw32(MAC_RCV_VALUE_5, 0);
6507 tw32(MAC_RCV_RULE_4, 0); tw32(MAC_RCV_VALUE_4, 0);
6509 /* tw32(MAC_RCV_RULE_3, 0); tw32(MAC_RCV_VALUE_3, 0); */
6511 /* tw32(MAC_RCV_RULE_2, 0); tw32(MAC_RCV_VALUE_2, 0); */
6519 tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
6524 /* Called at device open time to get the chip ready for
6525 * packet processing. Invoked with tp->lock held.
6527 static int tg3_init_hw(struct tg3 *tp, int reset_phy)
6531 /* Force the chip into D0. */
6532 err = tg3_set_power_state(tp, PCI_D0);
6536 tg3_switch_clocks(tp);
6538 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
6540 err = tg3_reset_hw(tp, reset_phy);
6546 #define TG3_STAT_ADD32(PSTAT, REG) \
6547 do { u32 __val = tr32(REG); \
6548 (PSTAT)->low += __val; \
6549 if ((PSTAT)->low < __val) \
6550 (PSTAT)->high += 1; \
6553 static void tg3_periodic_fetch_stats(struct tg3 *tp)
6555 struct tg3_hw_stats *sp = tp->hw_stats;
6557 if (!netif_carrier_ok(tp->dev))
6560 TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
6561 TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
6562 TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
6563 TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
6564 TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
6565 TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
6566 TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
6567 TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
6568 TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
6569 TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
6570 TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
6571 TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
6572 TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
6574 TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
6575 TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
6576 TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
6577 TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
6578 TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
6579 TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
6580 TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
6581 TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
6582 TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
6583 TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
6584 TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
6585 TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
6586 TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
6587 TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
6589 TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
6590 TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
6591 TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
6594 static void tg3_timer(unsigned long __opaque)
6596 struct tg3 *tp = (struct tg3 *) __opaque;
6601 spin_lock(&tp->lock);
6603 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) {
6604 /* All of this garbage is because when using non-tagged
6605 * IRQ status the mailbox/status_block protocol the chip
6606 * uses with the cpu is race prone.
6608 if (tp->hw_status->status & SD_STATUS_UPDATED) {
6609 tw32(GRC_LOCAL_CTRL,
6610 tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
6612 tw32(HOSTCC_MODE, tp->coalesce_mode |
6613 (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
6616 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
6617 tp->tg3_flags2 |= TG3_FLG2_RESTART_TIMER;
6618 spin_unlock(&tp->lock);
6619 schedule_work(&tp->reset_task);
6624 /* This part only runs once per second. */
6625 if (!--tp->timer_counter) {
6626 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
6627 tg3_periodic_fetch_stats(tp);
6629 if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
6633 mac_stat = tr32(MAC_STATUS);
6636 if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) {
6637 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
6639 } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
6643 tg3_setup_phy(tp, 0);
6644 } else if (tp->tg3_flags & TG3_FLAG_POLL_SERDES) {
6645 u32 mac_stat = tr32(MAC_STATUS);
6648 if (netif_carrier_ok(tp->dev) &&
6649 (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
6652 if (! netif_carrier_ok(tp->dev) &&
6653 (mac_stat & (MAC_STATUS_PCS_SYNCED |
6654 MAC_STATUS_SIGNAL_DET))) {
6660 ~MAC_MODE_PORT_MODE_MASK));
6662 tw32_f(MAC_MODE, tp->mac_mode);
6664 tg3_setup_phy(tp, 0);
6666 } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
6667 tg3_serdes_parallel_detect(tp);
6669 tp->timer_counter = tp->timer_multiplier;
6672 /* Heartbeat is only sent once every 2 seconds. */
6673 if (!--tp->asf_counter) {
6674 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
6677 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
6678 FWCMD_NICDRV_ALIVE2);
6679 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
6680 /* 5 seconds timeout */
6681 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, 5);
6682 val = tr32(GRC_RX_CPU_EVENT);
6684 tw32(GRC_RX_CPU_EVENT, val);
6686 tp->asf_counter = tp->asf_multiplier;
6689 spin_unlock(&tp->lock);
6692 tp->timer.expires = jiffies + tp->timer_offset;
6693 add_timer(&tp->timer);
6696 static int tg3_request_irq(struct tg3 *tp)
6698 irqreturn_t (*fn)(int, void *, struct pt_regs *);
6699 unsigned long flags;
6700 struct net_device *dev = tp->dev;
6702 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6704 if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI)
6706 flags = SA_SAMPLE_RANDOM;
6709 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
6710 fn = tg3_interrupt_tagged;
6711 flags = SA_SHIRQ | SA_SAMPLE_RANDOM;
6713 return (request_irq(tp->pdev->irq, fn, flags, dev->name, dev));
6716 static int tg3_test_interrupt(struct tg3 *tp)
6718 struct net_device *dev = tp->dev;
6722 if (!netif_running(dev))
6725 tg3_disable_ints(tp);
6727 free_irq(tp->pdev->irq, dev);
6729 err = request_irq(tp->pdev->irq, tg3_test_isr,
6730 SA_SHIRQ | SA_SAMPLE_RANDOM, dev->name, dev);
6734 tp->hw_status->status &= ~SD_STATUS_UPDATED;
6735 tg3_enable_ints(tp);
6737 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
6740 for (i = 0; i < 5; i++) {
6741 int_mbox = tr32_mailbox(MAILBOX_INTERRUPT_0 +
6748 tg3_disable_ints(tp);
6750 free_irq(tp->pdev->irq, dev);
6752 err = tg3_request_irq(tp);
6763 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
6764 * successfully restored
6766 static int tg3_test_msi(struct tg3 *tp)
6768 struct net_device *dev = tp->dev;
6772 if (!(tp->tg3_flags2 & TG3_FLG2_USING_MSI))
6775 /* Turn off SERR reporting in case MSI terminates with Master
6778 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
6779 pci_write_config_word(tp->pdev, PCI_COMMAND,
6780 pci_cmd & ~PCI_COMMAND_SERR);
6782 err = tg3_test_interrupt(tp);
6784 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
6789 /* other failures */
6793 /* MSI test failed, go back to INTx mode */
6794 printk(KERN_WARNING PFX "%s: No interrupt was generated using MSI, "
6795 "switching to INTx mode. Please report this failure to "
6796 "the PCI maintainer and include system chipset information.\n",
6799 free_irq(tp->pdev->irq, dev);
6800 pci_disable_msi(tp->pdev);
6802 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
6804 err = tg3_request_irq(tp);
6808 /* Need to reset the chip because the MSI cycle may have terminated
6809 * with Master Abort.
6811 tg3_full_lock(tp, 1);
6813 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
6814 err = tg3_init_hw(tp, 1);
6816 tg3_full_unlock(tp);
6819 free_irq(tp->pdev->irq, dev);
6824 static int tg3_open(struct net_device *dev)
6826 struct tg3 *tp = netdev_priv(dev);
6829 tg3_full_lock(tp, 0);
6831 err = tg3_set_power_state(tp, PCI_D0);
6835 tg3_disable_ints(tp);
6836 tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
6838 tg3_full_unlock(tp);
6840 /* The placement of this call is tied
6841 * to the setup and use of Host TX descriptors.
6843 err = tg3_alloc_consistent(tp);
6847 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
6848 (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5750_AX) &&
6849 (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5750_BX) &&
6850 !((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) &&
6851 (tp->pdev_peer == tp->pdev))) {
6852 /* All MSI supporting chips should support tagged
6853 * status. Assert that this is the case.
6855 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) {
6856 printk(KERN_WARNING PFX "%s: MSI without TAGGED? "
6857 "Not using MSI.\n", tp->dev->name);
6858 } else if (pci_enable_msi(tp->pdev) == 0) {
6861 msi_mode = tr32(MSGINT_MODE);
6862 tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
6863 tp->tg3_flags2 |= TG3_FLG2_USING_MSI;
6866 err = tg3_request_irq(tp);
6869 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6870 pci_disable_msi(tp->pdev);
6871 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
6873 tg3_free_consistent(tp);
6877 tg3_full_lock(tp, 0);
6879 err = tg3_init_hw(tp, 1);
6881 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
6884 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
6885 tp->timer_offset = HZ;
6887 tp->timer_offset = HZ / 10;
6889 BUG_ON(tp->timer_offset > HZ);
6890 tp->timer_counter = tp->timer_multiplier =
6891 (HZ / tp->timer_offset);
6892 tp->asf_counter = tp->asf_multiplier =
6893 ((HZ / tp->timer_offset) * 2);
6895 init_timer(&tp->timer);
6896 tp->timer.expires = jiffies + tp->timer_offset;
6897 tp->timer.data = (unsigned long) tp;
6898 tp->timer.function = tg3_timer;
6901 tg3_full_unlock(tp);
6904 free_irq(tp->pdev->irq, dev);
6905 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6906 pci_disable_msi(tp->pdev);
6907 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
6909 tg3_free_consistent(tp);
6913 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6914 err = tg3_test_msi(tp);
6917 tg3_full_lock(tp, 0);
6919 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6920 pci_disable_msi(tp->pdev);
6921 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
6923 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
6925 tg3_free_consistent(tp);
6927 tg3_full_unlock(tp);
6932 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6933 if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI) {
6934 u32 val = tr32(0x7c04);
6936 tw32(0x7c04, val | (1 << 29));
6941 tg3_full_lock(tp, 0);
6943 add_timer(&tp->timer);
6944 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
6945 tg3_enable_ints(tp);
6947 tg3_full_unlock(tp);
6949 netif_start_queue(dev);
6955 /*static*/ void tg3_dump_state(struct tg3 *tp)
6957 u32 val32, val32_2, val32_3, val32_4, val32_5;
6961 pci_read_config_word(tp->pdev, PCI_STATUS, &val16);
6962 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE, &val32);
6963 printk("DEBUG: PCI status [%04x] TG3PCI state[%08x]\n",
6967 printk("DEBUG: MAC_MODE[%08x] MAC_STATUS[%08x]\n",
6968 tr32(MAC_MODE), tr32(MAC_STATUS));
6969 printk(" MAC_EVENT[%08x] MAC_LED_CTRL[%08x]\n",
6970 tr32(MAC_EVENT), tr32(MAC_LED_CTRL));
6971 printk("DEBUG: MAC_TX_MODE[%08x] MAC_TX_STATUS[%08x]\n",
6972 tr32(MAC_TX_MODE), tr32(MAC_TX_STATUS));
6973 printk(" MAC_RX_MODE[%08x] MAC_RX_STATUS[%08x]\n",
6974 tr32(MAC_RX_MODE), tr32(MAC_RX_STATUS));
6976 /* Send data initiator control block */
6977 printk("DEBUG: SNDDATAI_MODE[%08x] SNDDATAI_STATUS[%08x]\n",
6978 tr32(SNDDATAI_MODE), tr32(SNDDATAI_STATUS));
6979 printk(" SNDDATAI_STATSCTRL[%08x]\n",
6980 tr32(SNDDATAI_STATSCTRL));
6982 /* Send data completion control block */
6983 printk("DEBUG: SNDDATAC_MODE[%08x]\n", tr32(SNDDATAC_MODE));
6985 /* Send BD ring selector block */
6986 printk("DEBUG: SNDBDS_MODE[%08x] SNDBDS_STATUS[%08x]\n",
6987 tr32(SNDBDS_MODE), tr32(SNDBDS_STATUS));
6989 /* Send BD initiator control block */
6990 printk("DEBUG: SNDBDI_MODE[%08x] SNDBDI_STATUS[%08x]\n",
6991 tr32(SNDBDI_MODE), tr32(SNDBDI_STATUS));
6993 /* Send BD completion control block */
6994 printk("DEBUG: SNDBDC_MODE[%08x]\n", tr32(SNDBDC_MODE));
6996 /* Receive list placement control block */
6997 printk("DEBUG: RCVLPC_MODE[%08x] RCVLPC_STATUS[%08x]\n",
6998 tr32(RCVLPC_MODE), tr32(RCVLPC_STATUS));
6999 printk(" RCVLPC_STATSCTRL[%08x]\n",
7000 tr32(RCVLPC_STATSCTRL));
7002 /* Receive data and receive BD initiator control block */
7003 printk("DEBUG: RCVDBDI_MODE[%08x] RCVDBDI_STATUS[%08x]\n",
7004 tr32(RCVDBDI_MODE), tr32(RCVDBDI_STATUS));
7006 /* Receive data completion control block */
7007 printk("DEBUG: RCVDCC_MODE[%08x]\n",
7010 /* Receive BD initiator control block */
7011 printk("DEBUG: RCVBDI_MODE[%08x] RCVBDI_STATUS[%08x]\n",
7012 tr32(RCVBDI_MODE), tr32(RCVBDI_STATUS));
7014 /* Receive BD completion control block */
7015 printk("DEBUG: RCVCC_MODE[%08x] RCVCC_STATUS[%08x]\n",
7016 tr32(RCVCC_MODE), tr32(RCVCC_STATUS));
7018 /* Receive list selector control block */
7019 printk("DEBUG: RCVLSC_MODE[%08x] RCVLSC_STATUS[%08x]\n",
7020 tr32(RCVLSC_MODE), tr32(RCVLSC_STATUS));
7022 /* Mbuf cluster free block */
7023 printk("DEBUG: MBFREE_MODE[%08x] MBFREE_STATUS[%08x]\n",
7024 tr32(MBFREE_MODE), tr32(MBFREE_STATUS));
7026 /* Host coalescing control block */
7027 printk("DEBUG: HOSTCC_MODE[%08x] HOSTCC_STATUS[%08x]\n",
7028 tr32(HOSTCC_MODE), tr32(HOSTCC_STATUS));
7029 printk("DEBUG: HOSTCC_STATS_BLK_HOST_ADDR[%08x%08x]\n",
7030 tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
7031 tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
7032 printk("DEBUG: HOSTCC_STATUS_BLK_HOST_ADDR[%08x%08x]\n",
7033 tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
7034 tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
7035 printk("DEBUG: HOSTCC_STATS_BLK_NIC_ADDR[%08x]\n",
7036 tr32(HOSTCC_STATS_BLK_NIC_ADDR));
7037 printk("DEBUG: HOSTCC_STATUS_BLK_NIC_ADDR[%08x]\n",
7038 tr32(HOSTCC_STATUS_BLK_NIC_ADDR));
7040 /* Memory arbiter control block */
7041 printk("DEBUG: MEMARB_MODE[%08x] MEMARB_STATUS[%08x]\n",
7042 tr32(MEMARB_MODE), tr32(MEMARB_STATUS));
7044 /* Buffer manager control block */
7045 printk("DEBUG: BUFMGR_MODE[%08x] BUFMGR_STATUS[%08x]\n",
7046 tr32(BUFMGR_MODE), tr32(BUFMGR_STATUS));
7047 printk("DEBUG: BUFMGR_MB_POOL_ADDR[%08x] BUFMGR_MB_POOL_SIZE[%08x]\n",
7048 tr32(BUFMGR_MB_POOL_ADDR), tr32(BUFMGR_MB_POOL_SIZE));
7049 printk("DEBUG: BUFMGR_DMA_DESC_POOL_ADDR[%08x] "
7050 "BUFMGR_DMA_DESC_POOL_SIZE[%08x]\n",
7051 tr32(BUFMGR_DMA_DESC_POOL_ADDR),
7052 tr32(BUFMGR_DMA_DESC_POOL_SIZE));
7054 /* Read DMA control block */
7055 printk("DEBUG: RDMAC_MODE[%08x] RDMAC_STATUS[%08x]\n",
7056 tr32(RDMAC_MODE), tr32(RDMAC_STATUS));
7058 /* Write DMA control block */
7059 printk("DEBUG: WDMAC_MODE[%08x] WDMAC_STATUS[%08x]\n",
7060 tr32(WDMAC_MODE), tr32(WDMAC_STATUS));
7062 /* DMA completion block */
7063 printk("DEBUG: DMAC_MODE[%08x]\n",
7067 printk("DEBUG: GRC_MODE[%08x] GRC_MISC_CFG[%08x]\n",
7068 tr32(GRC_MODE), tr32(GRC_MISC_CFG));
7069 printk("DEBUG: GRC_LOCAL_CTRL[%08x]\n",
7070 tr32(GRC_LOCAL_CTRL));
7073 printk("DEBUG: RCVDBDI_JUMBO_BD[%08x%08x:%08x:%08x]\n",
7074 tr32(RCVDBDI_JUMBO_BD + 0x0),
7075 tr32(RCVDBDI_JUMBO_BD + 0x4),
7076 tr32(RCVDBDI_JUMBO_BD + 0x8),
7077 tr32(RCVDBDI_JUMBO_BD + 0xc));
7078 printk("DEBUG: RCVDBDI_STD_BD[%08x%08x:%08x:%08x]\n",
7079 tr32(RCVDBDI_STD_BD + 0x0),
7080 tr32(RCVDBDI_STD_BD + 0x4),
7081 tr32(RCVDBDI_STD_BD + 0x8),
7082 tr32(RCVDBDI_STD_BD + 0xc));
7083 printk("DEBUG: RCVDBDI_MINI_BD[%08x%08x:%08x:%08x]\n",
7084 tr32(RCVDBDI_MINI_BD + 0x0),
7085 tr32(RCVDBDI_MINI_BD + 0x4),
7086 tr32(RCVDBDI_MINI_BD + 0x8),
7087 tr32(RCVDBDI_MINI_BD + 0xc));
7089 tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x0, &val32);
7090 tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x4, &val32_2);
7091 tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x8, &val32_3);
7092 tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0xc, &val32_4);
7093 printk("DEBUG: SRAM_SEND_RCB_0[%08x%08x:%08x:%08x]\n",
7094 val32, val32_2, val32_3, val32_4);
7096 tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x0, &val32);
7097 tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x4, &val32_2);
7098 tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x8, &val32_3);
7099 tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0xc, &val32_4);
7100 printk("DEBUG: SRAM_RCV_RET_RCB_0[%08x%08x:%08x:%08x]\n",
7101 val32, val32_2, val32_3, val32_4);
7103 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x0, &val32);
7104 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x4, &val32_2);
7105 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x8, &val32_3);
7106 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0xc, &val32_4);
7107 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x10, &val32_5);
7108 printk("DEBUG: SRAM_STATUS_BLK[%08x:%08x:%08x:%08x:%08x]\n",
7109 val32, val32_2, val32_3, val32_4, val32_5);
7111 /* SW status block */
7112 printk("DEBUG: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
7113 tp->hw_status->status,
7114 tp->hw_status->status_tag,
7115 tp->hw_status->rx_jumbo_consumer,
7116 tp->hw_status->rx_consumer,
7117 tp->hw_status->rx_mini_consumer,
7118 tp->hw_status->idx[0].rx_producer,
7119 tp->hw_status->idx[0].tx_consumer);
7121 /* SW statistics block */
7122 printk("DEBUG: Host statistics block [%08x:%08x:%08x:%08x]\n",
7123 ((u32 *)tp->hw_stats)[0],
7124 ((u32 *)tp->hw_stats)[1],
7125 ((u32 *)tp->hw_stats)[2],
7126 ((u32 *)tp->hw_stats)[3]);
7129 printk("DEBUG: SNDHOST_PROD[%08x%08x] SNDNIC_PROD[%08x%08x]\n",
7130 tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + 0x0),
7131 tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + 0x4),
7132 tr32_mailbox(MAILBOX_SNDNIC_PROD_IDX_0 + 0x0),
7133 tr32_mailbox(MAILBOX_SNDNIC_PROD_IDX_0 + 0x4));
7135 /* NIC side send descriptors. */
7136 for (i = 0; i < 6; i++) {
7139 txd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_TX_BUFFER_DESC
7140 + (i * sizeof(struct tg3_tx_buffer_desc));
7141 printk("DEBUG: NIC TXD(%d)[%08x:%08x:%08x:%08x]\n",
7143 readl(txd + 0x0), readl(txd + 0x4),
7144 readl(txd + 0x8), readl(txd + 0xc));
7147 /* NIC side RX descriptors. */
7148 for (i = 0; i < 6; i++) {
7151 rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_BUFFER_DESC
7152 + (i * sizeof(struct tg3_rx_buffer_desc));
7153 printk("DEBUG: NIC RXD_STD(%d)[0][%08x:%08x:%08x:%08x]\n",
7155 readl(rxd + 0x0), readl(rxd + 0x4),
7156 readl(rxd + 0x8), readl(rxd + 0xc));
7157 rxd += (4 * sizeof(u32));
7158 printk("DEBUG: NIC RXD_STD(%d)[1][%08x:%08x:%08x:%08x]\n",
7160 readl(rxd + 0x0), readl(rxd + 0x4),
7161 readl(rxd + 0x8), readl(rxd + 0xc));
7164 for (i = 0; i < 6; i++) {
7167 rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_JUMBO_BUFFER_DESC
7168 + (i * sizeof(struct tg3_rx_buffer_desc));
7169 printk("DEBUG: NIC RXD_JUMBO(%d)[0][%08x:%08x:%08x:%08x]\n",
7171 readl(rxd + 0x0), readl(rxd + 0x4),
7172 readl(rxd + 0x8), readl(rxd + 0xc));
7173 rxd += (4 * sizeof(u32));
7174 printk("DEBUG: NIC RXD_JUMBO(%d)[1][%08x:%08x:%08x:%08x]\n",
7176 readl(rxd + 0x0), readl(rxd + 0x4),
7177 readl(rxd + 0x8), readl(rxd + 0xc));
7182 static struct net_device_stats *tg3_get_stats(struct net_device *);
7183 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *);
7185 static int tg3_close(struct net_device *dev)
7187 struct tg3 *tp = netdev_priv(dev);
7189 /* Calling flush_scheduled_work() may deadlock because
7190 * linkwatch_event() may be on the workqueue and it will try to get
7191 * the rtnl_lock which we are holding.
7193 while (tp->tg3_flags & TG3_FLAG_IN_RESET_TASK)
7196 netif_stop_queue(dev);
7198 del_timer_sync(&tp->timer);
7200 tg3_full_lock(tp, 1);
7205 tg3_disable_ints(tp);
7207 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
7210 ~(TG3_FLAG_INIT_COMPLETE |
7211 TG3_FLAG_GOT_SERDES_FLOWCTL);
7213 tg3_full_unlock(tp);
7215 free_irq(tp->pdev->irq, dev);
7216 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7217 pci_disable_msi(tp->pdev);
7218 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
7221 memcpy(&tp->net_stats_prev, tg3_get_stats(tp->dev),
7222 sizeof(tp->net_stats_prev));
7223 memcpy(&tp->estats_prev, tg3_get_estats(tp),
7224 sizeof(tp->estats_prev));
7226 tg3_free_consistent(tp);
7228 tg3_set_power_state(tp, PCI_D3hot);
7230 netif_carrier_off(tp->dev);
7235 static inline unsigned long get_stat64(tg3_stat64_t *val)
7239 #if (BITS_PER_LONG == 32)
7242 ret = ((u64)val->high << 32) | ((u64)val->low);
7247 static unsigned long calc_crc_errors(struct tg3 *tp)
7249 struct tg3_hw_stats *hw_stats = tp->hw_stats;
7251 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
7252 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
7253 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
7256 spin_lock_bh(&tp->lock);
7257 if (!tg3_readphy(tp, 0x1e, &val)) {
7258 tg3_writephy(tp, 0x1e, val | 0x8000);
7259 tg3_readphy(tp, 0x14, &val);
7262 spin_unlock_bh(&tp->lock);
7264 tp->phy_crc_errors += val;
7266 return tp->phy_crc_errors;
7269 return get_stat64(&hw_stats->rx_fcs_errors);
7272 #define ESTAT_ADD(member) \
7273 estats->member = old_estats->member + \
7274 get_stat64(&hw_stats->member)
7276 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *tp)
7278 struct tg3_ethtool_stats *estats = &tp->estats;
7279 struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
7280 struct tg3_hw_stats *hw_stats = tp->hw_stats;
7285 ESTAT_ADD(rx_octets);
7286 ESTAT_ADD(rx_fragments);
7287 ESTAT_ADD(rx_ucast_packets);
7288 ESTAT_ADD(rx_mcast_packets);
7289 ESTAT_ADD(rx_bcast_packets);
7290 ESTAT_ADD(rx_fcs_errors);
7291 ESTAT_ADD(rx_align_errors);
7292 ESTAT_ADD(rx_xon_pause_rcvd);
7293 ESTAT_ADD(rx_xoff_pause_rcvd);
7294 ESTAT_ADD(rx_mac_ctrl_rcvd);
7295 ESTAT_ADD(rx_xoff_entered);
7296 ESTAT_ADD(rx_frame_too_long_errors);
7297 ESTAT_ADD(rx_jabbers);
7298 ESTAT_ADD(rx_undersize_packets);
7299 ESTAT_ADD(rx_in_length_errors);
7300 ESTAT_ADD(rx_out_length_errors);
7301 ESTAT_ADD(rx_64_or_less_octet_packets);
7302 ESTAT_ADD(rx_65_to_127_octet_packets);
7303 ESTAT_ADD(rx_128_to_255_octet_packets);
7304 ESTAT_ADD(rx_256_to_511_octet_packets);
7305 ESTAT_ADD(rx_512_to_1023_octet_packets);
7306 ESTAT_ADD(rx_1024_to_1522_octet_packets);
7307 ESTAT_ADD(rx_1523_to_2047_octet_packets);
7308 ESTAT_ADD(rx_2048_to_4095_octet_packets);
7309 ESTAT_ADD(rx_4096_to_8191_octet_packets);
7310 ESTAT_ADD(rx_8192_to_9022_octet_packets);
7312 ESTAT_ADD(tx_octets);
7313 ESTAT_ADD(tx_collisions);
7314 ESTAT_ADD(tx_xon_sent);
7315 ESTAT_ADD(tx_xoff_sent);
7316 ESTAT_ADD(tx_flow_control);
7317 ESTAT_ADD(tx_mac_errors);
7318 ESTAT_ADD(tx_single_collisions);
7319 ESTAT_ADD(tx_mult_collisions);
7320 ESTAT_ADD(tx_deferred);
7321 ESTAT_ADD(tx_excessive_collisions);
7322 ESTAT_ADD(tx_late_collisions);
7323 ESTAT_ADD(tx_collide_2times);
7324 ESTAT_ADD(tx_collide_3times);
7325 ESTAT_ADD(tx_collide_4times);
7326 ESTAT_ADD(tx_collide_5times);
7327 ESTAT_ADD(tx_collide_6times);
7328 ESTAT_ADD(tx_collide_7times);
7329 ESTAT_ADD(tx_collide_8times);
7330 ESTAT_ADD(tx_collide_9times);
7331 ESTAT_ADD(tx_collide_10times);
7332 ESTAT_ADD(tx_collide_11times);
7333 ESTAT_ADD(tx_collide_12times);
7334 ESTAT_ADD(tx_collide_13times);
7335 ESTAT_ADD(tx_collide_14times);
7336 ESTAT_ADD(tx_collide_15times);
7337 ESTAT_ADD(tx_ucast_packets);
7338 ESTAT_ADD(tx_mcast_packets);
7339 ESTAT_ADD(tx_bcast_packets);
7340 ESTAT_ADD(tx_carrier_sense_errors);
7341 ESTAT_ADD(tx_discards);
7342 ESTAT_ADD(tx_errors);
7344 ESTAT_ADD(dma_writeq_full);
7345 ESTAT_ADD(dma_write_prioq_full);
7346 ESTAT_ADD(rxbds_empty);
7347 ESTAT_ADD(rx_discards);
7348 ESTAT_ADD(rx_errors);
7349 ESTAT_ADD(rx_threshold_hit);
7351 ESTAT_ADD(dma_readq_full);
7352 ESTAT_ADD(dma_read_prioq_full);
7353 ESTAT_ADD(tx_comp_queue_full);
7355 ESTAT_ADD(ring_set_send_prod_index);
7356 ESTAT_ADD(ring_status_update);
7357 ESTAT_ADD(nic_irqs);
7358 ESTAT_ADD(nic_avoided_irqs);
7359 ESTAT_ADD(nic_tx_threshold_hit);
7364 static struct net_device_stats *tg3_get_stats(struct net_device *dev)
7366 struct tg3 *tp = netdev_priv(dev);
7367 struct net_device_stats *stats = &tp->net_stats;
7368 struct net_device_stats *old_stats = &tp->net_stats_prev;
7369 struct tg3_hw_stats *hw_stats = tp->hw_stats;
7374 stats->rx_packets = old_stats->rx_packets +
7375 get_stat64(&hw_stats->rx_ucast_packets) +
7376 get_stat64(&hw_stats->rx_mcast_packets) +
7377 get_stat64(&hw_stats->rx_bcast_packets);
7379 stats->tx_packets = old_stats->tx_packets +
7380 get_stat64(&hw_stats->tx_ucast_packets) +
7381 get_stat64(&hw_stats->tx_mcast_packets) +
7382 get_stat64(&hw_stats->tx_bcast_packets);
7384 stats->rx_bytes = old_stats->rx_bytes +
7385 get_stat64(&hw_stats->rx_octets);
7386 stats->tx_bytes = old_stats->tx_bytes +
7387 get_stat64(&hw_stats->tx_octets);
7389 stats->rx_errors = old_stats->rx_errors +
7390 get_stat64(&hw_stats->rx_errors);
7391 stats->tx_errors = old_stats->tx_errors +
7392 get_stat64(&hw_stats->tx_errors) +
7393 get_stat64(&hw_stats->tx_mac_errors) +
7394 get_stat64(&hw_stats->tx_carrier_sense_errors) +
7395 get_stat64(&hw_stats->tx_discards);
7397 stats->multicast = old_stats->multicast +
7398 get_stat64(&hw_stats->rx_mcast_packets);
7399 stats->collisions = old_stats->collisions +
7400 get_stat64(&hw_stats->tx_collisions);
7402 stats->rx_length_errors = old_stats->rx_length_errors +
7403 get_stat64(&hw_stats->rx_frame_too_long_errors) +
7404 get_stat64(&hw_stats->rx_undersize_packets);
7406 stats->rx_over_errors = old_stats->rx_over_errors +
7407 get_stat64(&hw_stats->rxbds_empty);
7408 stats->rx_frame_errors = old_stats->rx_frame_errors +
7409 get_stat64(&hw_stats->rx_align_errors);
7410 stats->tx_aborted_errors = old_stats->tx_aborted_errors +
7411 get_stat64(&hw_stats->tx_discards);
7412 stats->tx_carrier_errors = old_stats->tx_carrier_errors +
7413 get_stat64(&hw_stats->tx_carrier_sense_errors);
7415 stats->rx_crc_errors = old_stats->rx_crc_errors +
7416 calc_crc_errors(tp);
7418 stats->rx_missed_errors = old_stats->rx_missed_errors +
7419 get_stat64(&hw_stats->rx_discards);
7424 static inline u32 calc_crc(unsigned char *buf, int len)
7432 for (j = 0; j < len; j++) {
7435 for (k = 0; k < 8; k++) {
7449 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
7451 /* accept or reject all multicast frames */
7452 tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
7453 tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
7454 tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
7455 tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
7458 static void __tg3_set_rx_mode(struct net_device *dev)
7460 struct tg3 *tp = netdev_priv(dev);
7463 rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
7464 RX_MODE_KEEP_VLAN_TAG);
7466 /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
7469 #if TG3_VLAN_TAG_USED
7471 !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
7472 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
7474 /* By definition, VLAN is disabled always in this
7477 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
7478 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
7481 if (dev->flags & IFF_PROMISC) {
7482 /* Promiscuous mode. */
7483 rx_mode |= RX_MODE_PROMISC;
7484 } else if (dev->flags & IFF_ALLMULTI) {
7485 /* Accept all multicast. */
7486 tg3_set_multi (tp, 1);
7487 } else if (dev->mc_count < 1) {
7488 /* Reject all multicast. */
7489 tg3_set_multi (tp, 0);
7491 /* Accept one or more multicast(s). */
7492 struct dev_mc_list *mclist;
7494 u32 mc_filter[4] = { 0, };
7499 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
7500 i++, mclist = mclist->next) {
7502 crc = calc_crc (mclist->dmi_addr, ETH_ALEN);
7504 regidx = (bit & 0x60) >> 5;
7506 mc_filter[regidx] |= (1 << bit);
7509 tw32(MAC_HASH_REG_0, mc_filter[0]);
7510 tw32(MAC_HASH_REG_1, mc_filter[1]);
7511 tw32(MAC_HASH_REG_2, mc_filter[2]);
7512 tw32(MAC_HASH_REG_3, mc_filter[3]);
7515 if (rx_mode != tp->rx_mode) {
7516 tp->rx_mode = rx_mode;
7517 tw32_f(MAC_RX_MODE, rx_mode);
7522 static void tg3_set_rx_mode(struct net_device *dev)
7524 struct tg3 *tp = netdev_priv(dev);
7526 if (!netif_running(dev))
7529 tg3_full_lock(tp, 0);
7530 __tg3_set_rx_mode(dev);
7531 tg3_full_unlock(tp);
7534 #define TG3_REGDUMP_LEN (32 * 1024)
7536 static int tg3_get_regs_len(struct net_device *dev)
7538 return TG3_REGDUMP_LEN;
7541 static void tg3_get_regs(struct net_device *dev,
7542 struct ethtool_regs *regs, void *_p)
7545 struct tg3 *tp = netdev_priv(dev);
7551 memset(p, 0, TG3_REGDUMP_LEN);
7553 if (tp->link_config.phy_is_low_power)
7556 tg3_full_lock(tp, 0);
7558 #define __GET_REG32(reg) (*(p)++ = tr32(reg))
7559 #define GET_REG32_LOOP(base,len) \
7560 do { p = (u32 *)(orig_p + (base)); \
7561 for (i = 0; i < len; i += 4) \
7562 __GET_REG32((base) + i); \
7564 #define GET_REG32_1(reg) \
7565 do { p = (u32 *)(orig_p + (reg)); \
7566 __GET_REG32((reg)); \
7569 GET_REG32_LOOP(TG3PCI_VENDOR, 0xb0);
7570 GET_REG32_LOOP(MAILBOX_INTERRUPT_0, 0x200);
7571 GET_REG32_LOOP(MAC_MODE, 0x4f0);
7572 GET_REG32_LOOP(SNDDATAI_MODE, 0xe0);
7573 GET_REG32_1(SNDDATAC_MODE);
7574 GET_REG32_LOOP(SNDBDS_MODE, 0x80);
7575 GET_REG32_LOOP(SNDBDI_MODE, 0x48);
7576 GET_REG32_1(SNDBDC_MODE);
7577 GET_REG32_LOOP(RCVLPC_MODE, 0x20);
7578 GET_REG32_LOOP(RCVLPC_SELLST_BASE, 0x15c);
7579 GET_REG32_LOOP(RCVDBDI_MODE, 0x0c);
7580 GET_REG32_LOOP(RCVDBDI_JUMBO_BD, 0x3c);
7581 GET_REG32_LOOP(RCVDBDI_BD_PROD_IDX_0, 0x44);
7582 GET_REG32_1(RCVDCC_MODE);
7583 GET_REG32_LOOP(RCVBDI_MODE, 0x20);
7584 GET_REG32_LOOP(RCVCC_MODE, 0x14);
7585 GET_REG32_LOOP(RCVLSC_MODE, 0x08);
7586 GET_REG32_1(MBFREE_MODE);
7587 GET_REG32_LOOP(HOSTCC_MODE, 0x100);
7588 GET_REG32_LOOP(MEMARB_MODE, 0x10);
7589 GET_REG32_LOOP(BUFMGR_MODE, 0x58);
7590 GET_REG32_LOOP(RDMAC_MODE, 0x08);
7591 GET_REG32_LOOP(WDMAC_MODE, 0x08);
7592 GET_REG32_1(RX_CPU_MODE);
7593 GET_REG32_1(RX_CPU_STATE);
7594 GET_REG32_1(RX_CPU_PGMCTR);
7595 GET_REG32_1(RX_CPU_HWBKPT);
7596 GET_REG32_1(TX_CPU_MODE);
7597 GET_REG32_1(TX_CPU_STATE);
7598 GET_REG32_1(TX_CPU_PGMCTR);
7599 GET_REG32_LOOP(GRCMBOX_INTERRUPT_0, 0x110);
7600 GET_REG32_LOOP(FTQ_RESET, 0x120);
7601 GET_REG32_LOOP(MSGINT_MODE, 0x0c);
7602 GET_REG32_1(DMAC_MODE);
7603 GET_REG32_LOOP(GRC_MODE, 0x4c);
7604 if (tp->tg3_flags & TG3_FLAG_NVRAM)
7605 GET_REG32_LOOP(NVRAM_CMD, 0x24);
7608 #undef GET_REG32_LOOP
7611 tg3_full_unlock(tp);
7614 static int tg3_get_eeprom_len(struct net_device *dev)
7616 struct tg3 *tp = netdev_priv(dev);
7618 return tp->nvram_size;
7621 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val);
7622 static int tg3_nvram_read_swab(struct tg3 *tp, u32 offset, u32 *val);
7624 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
7626 struct tg3 *tp = netdev_priv(dev);
7629 u32 i, offset, len, val, b_offset, b_count;
7631 if (tp->link_config.phy_is_low_power)
7634 offset = eeprom->offset;
7638 eeprom->magic = TG3_EEPROM_MAGIC;
7641 /* adjustments to start on required 4 byte boundary */
7642 b_offset = offset & 3;
7643 b_count = 4 - b_offset;
7644 if (b_count > len) {
7645 /* i.e. offset=1 len=2 */
7648 ret = tg3_nvram_read(tp, offset-b_offset, &val);
7651 val = cpu_to_le32(val);
7652 memcpy(data, ((char*)&val) + b_offset, b_count);
7655 eeprom->len += b_count;
7658 /* read bytes upto the last 4 byte boundary */
7659 pd = &data[eeprom->len];
7660 for (i = 0; i < (len - (len & 3)); i += 4) {
7661 ret = tg3_nvram_read(tp, offset + i, &val);
7666 val = cpu_to_le32(val);
7667 memcpy(pd + i, &val, 4);
7672 /* read last bytes not ending on 4 byte boundary */
7673 pd = &data[eeprom->len];
7675 b_offset = offset + len - b_count;
7676 ret = tg3_nvram_read(tp, b_offset, &val);
7679 val = cpu_to_le32(val);
7680 memcpy(pd, ((char*)&val), b_count);
7681 eeprom->len += b_count;
7686 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf);
7688 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
7690 struct tg3 *tp = netdev_priv(dev);
7692 u32 offset, len, b_offset, odd_len, start, end;
7695 if (tp->link_config.phy_is_low_power)
7698 if (eeprom->magic != TG3_EEPROM_MAGIC)
7701 offset = eeprom->offset;
7704 if ((b_offset = (offset & 3))) {
7705 /* adjustments to start on required 4 byte boundary */
7706 ret = tg3_nvram_read(tp, offset-b_offset, &start);
7709 start = cpu_to_le32(start);
7718 /* adjustments to end on required 4 byte boundary */
7720 len = (len + 3) & ~3;
7721 ret = tg3_nvram_read(tp, offset+len-4, &end);
7724 end = cpu_to_le32(end);
7728 if (b_offset || odd_len) {
7729 buf = kmalloc(len, GFP_KERNEL);
7733 memcpy(buf, &start, 4);
7735 memcpy(buf+len-4, &end, 4);
7736 memcpy(buf + b_offset, data, eeprom->len);
7739 ret = tg3_nvram_write_block(tp, offset, len, buf);
7747 static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
7749 struct tg3 *tp = netdev_priv(dev);
7751 cmd->supported = (SUPPORTED_Autoneg);
7753 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
7754 cmd->supported |= (SUPPORTED_1000baseT_Half |
7755 SUPPORTED_1000baseT_Full);
7757 if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)) {
7758 cmd->supported |= (SUPPORTED_100baseT_Half |
7759 SUPPORTED_100baseT_Full |
7760 SUPPORTED_10baseT_Half |
7761 SUPPORTED_10baseT_Full |
7763 cmd->port = PORT_TP;
7765 cmd->supported |= SUPPORTED_FIBRE;
7766 cmd->port = PORT_FIBRE;
7769 cmd->advertising = tp->link_config.advertising;
7770 if (netif_running(dev)) {
7771 cmd->speed = tp->link_config.active_speed;
7772 cmd->duplex = tp->link_config.active_duplex;
7774 cmd->phy_address = PHY_ADDR;
7775 cmd->transceiver = 0;
7776 cmd->autoneg = tp->link_config.autoneg;
7782 static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
7784 struct tg3 *tp = netdev_priv(dev);
7786 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) {
7787 /* These are the only valid advertisement bits allowed. */
7788 if (cmd->autoneg == AUTONEG_ENABLE &&
7789 (cmd->advertising & ~(ADVERTISED_1000baseT_Half |
7790 ADVERTISED_1000baseT_Full |
7791 ADVERTISED_Autoneg |
7794 /* Fiber can only do SPEED_1000. */
7795 else if ((cmd->autoneg != AUTONEG_ENABLE) &&
7796 (cmd->speed != SPEED_1000))
7798 /* Copper cannot force SPEED_1000. */
7799 } else if ((cmd->autoneg != AUTONEG_ENABLE) &&
7800 (cmd->speed == SPEED_1000))
7802 else if ((cmd->speed == SPEED_1000) &&
7803 (tp->tg3_flags2 & TG3_FLAG_10_100_ONLY))
7806 tg3_full_lock(tp, 0);
7808 tp->link_config.autoneg = cmd->autoneg;
7809 if (cmd->autoneg == AUTONEG_ENABLE) {
7810 tp->link_config.advertising = cmd->advertising;
7811 tp->link_config.speed = SPEED_INVALID;
7812 tp->link_config.duplex = DUPLEX_INVALID;
7814 tp->link_config.advertising = 0;
7815 tp->link_config.speed = cmd->speed;
7816 tp->link_config.duplex = cmd->duplex;
7819 if (netif_running(dev))
7820 tg3_setup_phy(tp, 1);
7822 tg3_full_unlock(tp);
7827 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
7829 struct tg3 *tp = netdev_priv(dev);
7831 strcpy(info->driver, DRV_MODULE_NAME);
7832 strcpy(info->version, DRV_MODULE_VERSION);
7833 strcpy(info->fw_version, tp->fw_ver);
7834 strcpy(info->bus_info, pci_name(tp->pdev));
7837 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
7839 struct tg3 *tp = netdev_priv(dev);
7841 wol->supported = WAKE_MAGIC;
7843 if (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)
7844 wol->wolopts = WAKE_MAGIC;
7845 memset(&wol->sopass, 0, sizeof(wol->sopass));
7848 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
7850 struct tg3 *tp = netdev_priv(dev);
7852 if (wol->wolopts & ~WAKE_MAGIC)
7854 if ((wol->wolopts & WAKE_MAGIC) &&
7855 tp->tg3_flags2 & TG3_FLG2_PHY_SERDES &&
7856 !(tp->tg3_flags & TG3_FLAG_SERDES_WOL_CAP))
7859 spin_lock_bh(&tp->lock);
7860 if (wol->wolopts & WAKE_MAGIC)
7861 tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
7863 tp->tg3_flags &= ~TG3_FLAG_WOL_ENABLE;
7864 spin_unlock_bh(&tp->lock);
7869 static u32 tg3_get_msglevel(struct net_device *dev)
7871 struct tg3 *tp = netdev_priv(dev);
7872 return tp->msg_enable;
7875 static void tg3_set_msglevel(struct net_device *dev, u32 value)
7877 struct tg3 *tp = netdev_priv(dev);
7878 tp->msg_enable = value;
7881 #if TG3_TSO_SUPPORT != 0
7882 static int tg3_set_tso(struct net_device *dev, u32 value)
7884 struct tg3 *tp = netdev_priv(dev);
7886 if (!(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
7891 return ethtool_op_set_tso(dev, value);
7895 static int tg3_nway_reset(struct net_device *dev)
7897 struct tg3 *tp = netdev_priv(dev);
7901 if (!netif_running(dev))
7904 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
7907 spin_lock_bh(&tp->lock);
7909 tg3_readphy(tp, MII_BMCR, &bmcr);
7910 if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
7911 ((bmcr & BMCR_ANENABLE) ||
7912 (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT))) {
7913 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
7917 spin_unlock_bh(&tp->lock);
7922 static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
7924 struct tg3 *tp = netdev_priv(dev);
7926 ering->rx_max_pending = TG3_RX_RING_SIZE - 1;
7927 ering->rx_mini_max_pending = 0;
7928 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE)
7929 ering->rx_jumbo_max_pending = TG3_RX_JUMBO_RING_SIZE - 1;
7931 ering->rx_jumbo_max_pending = 0;
7933 ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
7935 ering->rx_pending = tp->rx_pending;
7936 ering->rx_mini_pending = 0;
7937 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE)
7938 ering->rx_jumbo_pending = tp->rx_jumbo_pending;
7940 ering->rx_jumbo_pending = 0;
7942 ering->tx_pending = tp->tx_pending;
7945 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
7947 struct tg3 *tp = netdev_priv(dev);
7950 if ((ering->rx_pending > TG3_RX_RING_SIZE - 1) ||
7951 (ering->rx_jumbo_pending > TG3_RX_JUMBO_RING_SIZE - 1) ||
7952 (ering->tx_pending > TG3_TX_RING_SIZE - 1))
7955 if (netif_running(dev)) {
7960 tg3_full_lock(tp, irq_sync);
7962 tp->rx_pending = ering->rx_pending;
7964 if ((tp->tg3_flags2 & TG3_FLG2_MAX_RXPEND_64) &&
7965 tp->rx_pending > 63)
7966 tp->rx_pending = 63;
7967 tp->rx_jumbo_pending = ering->rx_jumbo_pending;
7968 tp->tx_pending = ering->tx_pending;
7970 if (netif_running(dev)) {
7971 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
7973 tg3_netif_start(tp);
7976 tg3_full_unlock(tp);
7981 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
7983 struct tg3 *tp = netdev_priv(dev);
7985 epause->autoneg = (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG) != 0;
7986 epause->rx_pause = (tp->tg3_flags & TG3_FLAG_RX_PAUSE) != 0;
7987 epause->tx_pause = (tp->tg3_flags & TG3_FLAG_TX_PAUSE) != 0;
7990 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
7992 struct tg3 *tp = netdev_priv(dev);
7995 if (netif_running(dev)) {
8000 tg3_full_lock(tp, irq_sync);
8002 if (epause->autoneg)
8003 tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
8005 tp->tg3_flags &= ~TG3_FLAG_PAUSE_AUTONEG;
8006 if (epause->rx_pause)
8007 tp->tg3_flags |= TG3_FLAG_RX_PAUSE;
8009 tp->tg3_flags &= ~TG3_FLAG_RX_PAUSE;
8010 if (epause->tx_pause)
8011 tp->tg3_flags |= TG3_FLAG_TX_PAUSE;
8013 tp->tg3_flags &= ~TG3_FLAG_TX_PAUSE;
8015 if (netif_running(dev)) {
8016 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
8018 tg3_netif_start(tp);
8021 tg3_full_unlock(tp);
8026 static u32 tg3_get_rx_csum(struct net_device *dev)
8028 struct tg3 *tp = netdev_priv(dev);
8029 return (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0;
8032 static int tg3_set_rx_csum(struct net_device *dev, u32 data)
8034 struct tg3 *tp = netdev_priv(dev);
8036 if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
8042 spin_lock_bh(&tp->lock);
8044 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
8046 tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
8047 spin_unlock_bh(&tp->lock);
8052 static int tg3_set_tx_csum(struct net_device *dev, u32 data)
8054 struct tg3 *tp = netdev_priv(dev);
8056 if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
8062 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
8063 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
8064 ethtool_op_set_tx_hw_csum(dev, data);
8066 ethtool_op_set_tx_csum(dev, data);
8071 static int tg3_get_stats_count (struct net_device *dev)
8073 return TG3_NUM_STATS;
8076 static int tg3_get_test_count (struct net_device *dev)
8078 return TG3_NUM_TEST;
8081 static void tg3_get_strings (struct net_device *dev, u32 stringset, u8 *buf)
8083 switch (stringset) {
8085 memcpy(buf, ðtool_stats_keys, sizeof(ethtool_stats_keys));
8088 memcpy(buf, ðtool_test_keys, sizeof(ethtool_test_keys));
8091 WARN_ON(1); /* we need a WARN() */
8096 static int tg3_phys_id(struct net_device *dev, u32 data)
8098 struct tg3 *tp = netdev_priv(dev);
8101 if (!netif_running(tp->dev))
8107 for (i = 0; i < (data * 2); i++) {
8109 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
8110 LED_CTRL_1000MBPS_ON |
8111 LED_CTRL_100MBPS_ON |
8112 LED_CTRL_10MBPS_ON |
8113 LED_CTRL_TRAFFIC_OVERRIDE |
8114 LED_CTRL_TRAFFIC_BLINK |
8115 LED_CTRL_TRAFFIC_LED);
8118 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
8119 LED_CTRL_TRAFFIC_OVERRIDE);
8121 if (msleep_interruptible(500))
8124 tw32(MAC_LED_CTRL, tp->led_ctrl);
8128 static void tg3_get_ethtool_stats (struct net_device *dev,
8129 struct ethtool_stats *estats, u64 *tmp_stats)
8131 struct tg3 *tp = netdev_priv(dev);
8132 memcpy(tmp_stats, tg3_get_estats(tp), sizeof(tp->estats));
8135 #define NVRAM_TEST_SIZE 0x100
8136 #define NVRAM_SELFBOOT_FORMAT1_SIZE 0x14
8138 static int tg3_test_nvram(struct tg3 *tp)
8140 u32 *buf, csum, magic;
8141 int i, j, err = 0, size;
8143 if (tg3_nvram_read_swab(tp, 0, &magic) != 0)
8146 if (magic == TG3_EEPROM_MAGIC)
8147 size = NVRAM_TEST_SIZE;
8148 else if ((magic & 0xff000000) == 0xa5000000) {
8149 if ((magic & 0xe00000) == 0x200000)
8150 size = NVRAM_SELFBOOT_FORMAT1_SIZE;
8156 buf = kmalloc(size, GFP_KERNEL);
8161 for (i = 0, j = 0; i < size; i += 4, j++) {
8164 if ((err = tg3_nvram_read(tp, i, &val)) != 0)
8166 buf[j] = cpu_to_le32(val);
8171 /* Selfboot format */
8172 if (cpu_to_be32(buf[0]) != TG3_EEPROM_MAGIC) {
8173 u8 *buf8 = (u8 *) buf, csum8 = 0;
8175 for (i = 0; i < size; i++)
8187 /* Bootstrap checksum at offset 0x10 */
8188 csum = calc_crc((unsigned char *) buf, 0x10);
8189 if(csum != cpu_to_le32(buf[0x10/4]))
8192 /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
8193 csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
8194 if (csum != cpu_to_le32(buf[0xfc/4]))
8204 #define TG3_SERDES_TIMEOUT_SEC 2
8205 #define TG3_COPPER_TIMEOUT_SEC 6
8207 static int tg3_test_link(struct tg3 *tp)
8211 if (!netif_running(tp->dev))
8214 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
8215 max = TG3_SERDES_TIMEOUT_SEC;
8217 max = TG3_COPPER_TIMEOUT_SEC;
8219 for (i = 0; i < max; i++) {
8220 if (netif_carrier_ok(tp->dev))
8223 if (msleep_interruptible(1000))
8230 /* Only test the commonly used registers */
8231 static int tg3_test_registers(struct tg3 *tp)
8234 u32 offset, read_mask, write_mask, val, save_val, read_val;
8238 #define TG3_FL_5705 0x1
8239 #define TG3_FL_NOT_5705 0x2
8240 #define TG3_FL_NOT_5788 0x4
8244 /* MAC Control Registers */
8245 { MAC_MODE, TG3_FL_NOT_5705,
8246 0x00000000, 0x00ef6f8c },
8247 { MAC_MODE, TG3_FL_5705,
8248 0x00000000, 0x01ef6b8c },
8249 { MAC_STATUS, TG3_FL_NOT_5705,
8250 0x03800107, 0x00000000 },
8251 { MAC_STATUS, TG3_FL_5705,
8252 0x03800100, 0x00000000 },
8253 { MAC_ADDR_0_HIGH, 0x0000,
8254 0x00000000, 0x0000ffff },
8255 { MAC_ADDR_0_LOW, 0x0000,
8256 0x00000000, 0xffffffff },
8257 { MAC_RX_MTU_SIZE, 0x0000,
8258 0x00000000, 0x0000ffff },
8259 { MAC_TX_MODE, 0x0000,
8260 0x00000000, 0x00000070 },
8261 { MAC_TX_LENGTHS, 0x0000,
8262 0x00000000, 0x00003fff },
8263 { MAC_RX_MODE, TG3_FL_NOT_5705,
8264 0x00000000, 0x000007fc },
8265 { MAC_RX_MODE, TG3_FL_5705,
8266 0x00000000, 0x000007dc },
8267 { MAC_HASH_REG_0, 0x0000,
8268 0x00000000, 0xffffffff },
8269 { MAC_HASH_REG_1, 0x0000,
8270 0x00000000, 0xffffffff },
8271 { MAC_HASH_REG_2, 0x0000,
8272 0x00000000, 0xffffffff },
8273 { MAC_HASH_REG_3, 0x0000,
8274 0x00000000, 0xffffffff },
8276 /* Receive Data and Receive BD Initiator Control Registers. */
8277 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
8278 0x00000000, 0xffffffff },
8279 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
8280 0x00000000, 0xffffffff },
8281 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
8282 0x00000000, 0x00000003 },
8283 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
8284 0x00000000, 0xffffffff },
8285 { RCVDBDI_STD_BD+0, 0x0000,
8286 0x00000000, 0xffffffff },
8287 { RCVDBDI_STD_BD+4, 0x0000,
8288 0x00000000, 0xffffffff },
8289 { RCVDBDI_STD_BD+8, 0x0000,
8290 0x00000000, 0xffff0002 },
8291 { RCVDBDI_STD_BD+0xc, 0x0000,
8292 0x00000000, 0xffffffff },
8294 /* Receive BD Initiator Control Registers. */
8295 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
8296 0x00000000, 0xffffffff },
8297 { RCVBDI_STD_THRESH, TG3_FL_5705,
8298 0x00000000, 0x000003ff },
8299 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
8300 0x00000000, 0xffffffff },
8302 /* Host Coalescing Control Registers. */
8303 { HOSTCC_MODE, TG3_FL_NOT_5705,
8304 0x00000000, 0x00000004 },
8305 { HOSTCC_MODE, TG3_FL_5705,
8306 0x00000000, 0x000000f6 },
8307 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
8308 0x00000000, 0xffffffff },
8309 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
8310 0x00000000, 0x000003ff },
8311 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
8312 0x00000000, 0xffffffff },
8313 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
8314 0x00000000, 0x000003ff },
8315 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
8316 0x00000000, 0xffffffff },
8317 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
8318 0x00000000, 0x000000ff },
8319 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
8320 0x00000000, 0xffffffff },
8321 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
8322 0x00000000, 0x000000ff },
8323 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
8324 0x00000000, 0xffffffff },
8325 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
8326 0x00000000, 0xffffffff },
8327 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
8328 0x00000000, 0xffffffff },
8329 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
8330 0x00000000, 0x000000ff },
8331 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
8332 0x00000000, 0xffffffff },
8333 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
8334 0x00000000, 0x000000ff },
8335 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
8336 0x00000000, 0xffffffff },
8337 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
8338 0x00000000, 0xffffffff },
8339 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
8340 0x00000000, 0xffffffff },
8341 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
8342 0x00000000, 0xffffffff },
8343 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
8344 0x00000000, 0xffffffff },
8345 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
8346 0xffffffff, 0x00000000 },
8347 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
8348 0xffffffff, 0x00000000 },
8350 /* Buffer Manager Control Registers. */
8351 { BUFMGR_MB_POOL_ADDR, 0x0000,
8352 0x00000000, 0x007fff80 },
8353 { BUFMGR_MB_POOL_SIZE, 0x0000,
8354 0x00000000, 0x007fffff },
8355 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
8356 0x00000000, 0x0000003f },
8357 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
8358 0x00000000, 0x000001ff },
8359 { BUFMGR_MB_HIGH_WATER, 0x0000,
8360 0x00000000, 0x000001ff },
8361 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
8362 0xffffffff, 0x00000000 },
8363 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
8364 0xffffffff, 0x00000000 },
8366 /* Mailbox Registers */
8367 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
8368 0x00000000, 0x000001ff },
8369 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
8370 0x00000000, 0x000001ff },
8371 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
8372 0x00000000, 0x000007ff },
8373 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
8374 0x00000000, 0x000001ff },
8376 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
8379 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
8384 for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
8385 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
8388 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
8391 if ((tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
8392 (reg_tbl[i].flags & TG3_FL_NOT_5788))
8395 offset = (u32) reg_tbl[i].offset;
8396 read_mask = reg_tbl[i].read_mask;
8397 write_mask = reg_tbl[i].write_mask;
8399 /* Save the original register content */
8400 save_val = tr32(offset);
8402 /* Determine the read-only value. */
8403 read_val = save_val & read_mask;
8405 /* Write zero to the register, then make sure the read-only bits
8406 * are not changed and the read/write bits are all zeros.
8412 /* Test the read-only and read/write bits. */
8413 if (((val & read_mask) != read_val) || (val & write_mask))
8416 /* Write ones to all the bits defined by RdMask and WrMask, then
8417 * make sure the read-only bits are not changed and the
8418 * read/write bits are all ones.
8420 tw32(offset, read_mask | write_mask);
8424 /* Test the read-only bits. */
8425 if ((val & read_mask) != read_val)
8428 /* Test the read/write bits. */
8429 if ((val & write_mask) != write_mask)
8432 tw32(offset, save_val);
8438 printk(KERN_ERR PFX "Register test failed at offset %x\n", offset);
8439 tw32(offset, save_val);
8443 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
8445 static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
8449 for (i = 0; i < sizeof(test_pattern)/sizeof(u32); i++) {
8450 for (j = 0; j < len; j += 4) {
8453 tg3_write_mem(tp, offset + j, test_pattern[i]);
8454 tg3_read_mem(tp, offset + j, &val);
8455 if (val != test_pattern[i])
8462 static int tg3_test_memory(struct tg3 *tp)
8464 static struct mem_entry {
8467 } mem_tbl_570x[] = {
8468 { 0x00000000, 0x00b50},
8469 { 0x00002000, 0x1c000},
8470 { 0xffffffff, 0x00000}
8471 }, mem_tbl_5705[] = {
8472 { 0x00000100, 0x0000c},
8473 { 0x00000200, 0x00008},
8474 { 0x00004000, 0x00800},
8475 { 0x00006000, 0x01000},
8476 { 0x00008000, 0x02000},
8477 { 0x00010000, 0x0e000},
8478 { 0xffffffff, 0x00000}
8479 }, mem_tbl_5755[] = {
8480 { 0x00000200, 0x00008},
8481 { 0x00004000, 0x00800},
8482 { 0x00006000, 0x00800},
8483 { 0x00008000, 0x02000},
8484 { 0x00010000, 0x0c000},
8485 { 0xffffffff, 0x00000}
8487 struct mem_entry *mem_tbl;
8491 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
8492 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
8493 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
8494 mem_tbl = mem_tbl_5755;
8496 mem_tbl = mem_tbl_5705;
8498 mem_tbl = mem_tbl_570x;
8500 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
8501 if ((err = tg3_do_mem_test(tp, mem_tbl[i].offset,
8502 mem_tbl[i].len)) != 0)
8509 #define TG3_MAC_LOOPBACK 0
8510 #define TG3_PHY_LOOPBACK 1
8512 static int tg3_run_loopback(struct tg3 *tp, int loopback_mode)
8514 u32 mac_mode, rx_start_idx, rx_idx, tx_idx, opaque_key;
8516 struct sk_buff *skb, *rx_skb;
8519 int num_pkts, tx_len, rx_len, i, err;
8520 struct tg3_rx_buffer_desc *desc;
8522 if (loopback_mode == TG3_MAC_LOOPBACK) {
8523 /* HW errata - mac loopback fails in some cases on 5780.
8524 * Normal traffic and PHY loopback are not affected by
8527 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780)
8530 mac_mode = (tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK) |
8531 MAC_MODE_PORT_INT_LPBACK | MAC_MODE_LINK_POLARITY |
8532 MAC_MODE_PORT_MODE_GMII;
8533 tw32(MAC_MODE, mac_mode);
8534 } else if (loopback_mode == TG3_PHY_LOOPBACK) {
8535 tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK | BMCR_FULLDPLX |
8538 /* reset to prevent losing 1st rx packet intermittently */
8539 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
8540 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
8542 tw32_f(MAC_RX_MODE, tp->rx_mode);
8544 mac_mode = (tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK) |
8545 MAC_MODE_LINK_POLARITY | MAC_MODE_PORT_MODE_GMII;
8546 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
8547 mac_mode &= ~MAC_MODE_LINK_POLARITY;
8548 tg3_writephy(tp, MII_TG3_EXT_CTRL,
8549 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
8551 tw32(MAC_MODE, mac_mode);
8559 skb = dev_alloc_skb(tx_len);
8563 tx_data = skb_put(skb, tx_len);
8564 memcpy(tx_data, tp->dev->dev_addr, 6);
8565 memset(tx_data + 6, 0x0, 8);
8567 tw32(MAC_RX_MTU_SIZE, tx_len + 4);
8569 for (i = 14; i < tx_len; i++)
8570 tx_data[i] = (u8) (i & 0xff);
8572 map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
8574 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
8579 rx_start_idx = tp->hw_status->idx[0].rx_producer;
8583 tg3_set_txd(tp, tp->tx_prod, map, tx_len, 0, 1);
8588 tw32_tx_mbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW,
8590 tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW);
8594 for (i = 0; i < 10; i++) {
8595 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
8600 tx_idx = tp->hw_status->idx[0].tx_consumer;
8601 rx_idx = tp->hw_status->idx[0].rx_producer;
8602 if ((tx_idx == tp->tx_prod) &&
8603 (rx_idx == (rx_start_idx + num_pkts)))
8607 pci_unmap_single(tp->pdev, map, tx_len, PCI_DMA_TODEVICE);
8610 if (tx_idx != tp->tx_prod)
8613 if (rx_idx != rx_start_idx + num_pkts)
8616 desc = &tp->rx_rcb[rx_start_idx];
8617 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
8618 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
8619 if (opaque_key != RXD_OPAQUE_RING_STD)
8622 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
8623 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
8626 rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4;
8627 if (rx_len != tx_len)
8630 rx_skb = tp->rx_std_buffers[desc_idx].skb;
8632 map = pci_unmap_addr(&tp->rx_std_buffers[desc_idx], mapping);
8633 pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len, PCI_DMA_FROMDEVICE);
8635 for (i = 14; i < tx_len; i++) {
8636 if (*(rx_skb->data + i) != (u8) (i & 0xff))
8641 /* tg3_free_rings will unmap and free the rx_skb */
8646 #define TG3_MAC_LOOPBACK_FAILED 1
8647 #define TG3_PHY_LOOPBACK_FAILED 2
8648 #define TG3_LOOPBACK_FAILED (TG3_MAC_LOOPBACK_FAILED | \
8649 TG3_PHY_LOOPBACK_FAILED)
8651 static int tg3_test_loopback(struct tg3 *tp)
8655 if (!netif_running(tp->dev))
8656 return TG3_LOOPBACK_FAILED;
8658 tg3_reset_hw(tp, 1);
8660 if (tg3_run_loopback(tp, TG3_MAC_LOOPBACK))
8661 err |= TG3_MAC_LOOPBACK_FAILED;
8662 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
8663 if (tg3_run_loopback(tp, TG3_PHY_LOOPBACK))
8664 err |= TG3_PHY_LOOPBACK_FAILED;
8670 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
8673 struct tg3 *tp = netdev_priv(dev);
8675 if (tp->link_config.phy_is_low_power)
8676 tg3_set_power_state(tp, PCI_D0);
8678 memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
8680 if (tg3_test_nvram(tp) != 0) {
8681 etest->flags |= ETH_TEST_FL_FAILED;
8684 if (tg3_test_link(tp) != 0) {
8685 etest->flags |= ETH_TEST_FL_FAILED;
8688 if (etest->flags & ETH_TEST_FL_OFFLINE) {
8689 int err, irq_sync = 0;
8691 if (netif_running(dev)) {
8696 tg3_full_lock(tp, irq_sync);
8698 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
8699 err = tg3_nvram_lock(tp);
8700 tg3_halt_cpu(tp, RX_CPU_BASE);
8701 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
8702 tg3_halt_cpu(tp, TX_CPU_BASE);
8704 tg3_nvram_unlock(tp);
8706 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
8709 if (tg3_test_registers(tp) != 0) {
8710 etest->flags |= ETH_TEST_FL_FAILED;
8713 if (tg3_test_memory(tp) != 0) {
8714 etest->flags |= ETH_TEST_FL_FAILED;
8717 if ((data[4] = tg3_test_loopback(tp)) != 0)
8718 etest->flags |= ETH_TEST_FL_FAILED;
8720 tg3_full_unlock(tp);
8722 if (tg3_test_interrupt(tp) != 0) {
8723 etest->flags |= ETH_TEST_FL_FAILED;
8727 tg3_full_lock(tp, 0);
8729 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
8730 if (netif_running(dev)) {
8731 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
8733 tg3_netif_start(tp);
8736 tg3_full_unlock(tp);
8738 if (tp->link_config.phy_is_low_power)
8739 tg3_set_power_state(tp, PCI_D3hot);
8743 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
8745 struct mii_ioctl_data *data = if_mii(ifr);
8746 struct tg3 *tp = netdev_priv(dev);
8751 data->phy_id = PHY_ADDR;
8757 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
8758 break; /* We have no PHY */
8760 if (tp->link_config.phy_is_low_power)
8763 spin_lock_bh(&tp->lock);
8764 err = tg3_readphy(tp, data->reg_num & 0x1f, &mii_regval);
8765 spin_unlock_bh(&tp->lock);
8767 data->val_out = mii_regval;
8773 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
8774 break; /* We have no PHY */
8776 if (!capable(CAP_NET_ADMIN))
8779 if (tp->link_config.phy_is_low_power)
8782 spin_lock_bh(&tp->lock);
8783 err = tg3_writephy(tp, data->reg_num & 0x1f, data->val_in);
8784 spin_unlock_bh(&tp->lock);
8795 #if TG3_VLAN_TAG_USED
8796 static void tg3_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
8798 struct tg3 *tp = netdev_priv(dev);
8800 if (netif_running(dev))
8803 tg3_full_lock(tp, 0);
8807 /* Update RX_MODE_KEEP_VLAN_TAG bit in RX_MODE register. */
8808 __tg3_set_rx_mode(dev);
8810 tg3_full_unlock(tp);
8812 if (netif_running(dev))
8813 tg3_netif_start(tp);
8816 static void tg3_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
8818 struct tg3 *tp = netdev_priv(dev);
8820 if (netif_running(dev))
8823 tg3_full_lock(tp, 0);
8825 tp->vlgrp->vlan_devices[vid] = NULL;
8826 tg3_full_unlock(tp);
8828 if (netif_running(dev))
8829 tg3_netif_start(tp);
8833 static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
8835 struct tg3 *tp = netdev_priv(dev);
8837 memcpy(ec, &tp->coal, sizeof(*ec));
8841 static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
8843 struct tg3 *tp = netdev_priv(dev);
8844 u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
8845 u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
8847 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
8848 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
8849 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
8850 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
8851 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
8854 if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
8855 (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
8856 (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
8857 (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
8858 (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
8859 (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
8860 (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
8861 (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
8862 (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
8863 (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
8866 /* No rx interrupts will be generated if both are zero */
8867 if ((ec->rx_coalesce_usecs == 0) &&
8868 (ec->rx_max_coalesced_frames == 0))
8871 /* No tx interrupts will be generated if both are zero */
8872 if ((ec->tx_coalesce_usecs == 0) &&
8873 (ec->tx_max_coalesced_frames == 0))
8876 /* Only copy relevant parameters, ignore all others. */
8877 tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
8878 tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
8879 tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
8880 tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
8881 tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
8882 tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
8883 tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
8884 tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
8885 tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
8887 if (netif_running(dev)) {
8888 tg3_full_lock(tp, 0);
8889 __tg3_set_coalesce(tp, &tp->coal);
8890 tg3_full_unlock(tp);
8895 static struct ethtool_ops tg3_ethtool_ops = {
8896 .get_settings = tg3_get_settings,
8897 .set_settings = tg3_set_settings,
8898 .get_drvinfo = tg3_get_drvinfo,
8899 .get_regs_len = tg3_get_regs_len,
8900 .get_regs = tg3_get_regs,
8901 .get_wol = tg3_get_wol,
8902 .set_wol = tg3_set_wol,
8903 .get_msglevel = tg3_get_msglevel,
8904 .set_msglevel = tg3_set_msglevel,
8905 .nway_reset = tg3_nway_reset,
8906 .get_link = ethtool_op_get_link,
8907 .get_eeprom_len = tg3_get_eeprom_len,
8908 .get_eeprom = tg3_get_eeprom,
8909 .set_eeprom = tg3_set_eeprom,
8910 .get_ringparam = tg3_get_ringparam,
8911 .set_ringparam = tg3_set_ringparam,
8912 .get_pauseparam = tg3_get_pauseparam,
8913 .set_pauseparam = tg3_set_pauseparam,
8914 .get_rx_csum = tg3_get_rx_csum,
8915 .set_rx_csum = tg3_set_rx_csum,
8916 .get_tx_csum = ethtool_op_get_tx_csum,
8917 .set_tx_csum = tg3_set_tx_csum,
8918 .get_sg = ethtool_op_get_sg,
8919 .set_sg = ethtool_op_set_sg,
8920 #if TG3_TSO_SUPPORT != 0
8921 .get_tso = ethtool_op_get_tso,
8922 .set_tso = tg3_set_tso,
8924 .self_test_count = tg3_get_test_count,
8925 .self_test = tg3_self_test,
8926 .get_strings = tg3_get_strings,
8927 .phys_id = tg3_phys_id,
8928 .get_stats_count = tg3_get_stats_count,
8929 .get_ethtool_stats = tg3_get_ethtool_stats,
8930 .get_coalesce = tg3_get_coalesce,
8931 .set_coalesce = tg3_set_coalesce,
8932 .get_perm_addr = ethtool_op_get_perm_addr,
8935 static void __devinit tg3_get_eeprom_size(struct tg3 *tp)
8937 u32 cursize, val, magic;
8939 tp->nvram_size = EEPROM_CHIP_SIZE;
8941 if (tg3_nvram_read_swab(tp, 0, &magic) != 0)
8944 if ((magic != TG3_EEPROM_MAGIC) && ((magic & 0xff000000) != 0xa5000000))
8948 * Size the chip by reading offsets at increasing powers of two.
8949 * When we encounter our validation signature, we know the addressing
8950 * has wrapped around, and thus have our chip size.
8954 while (cursize < tp->nvram_size) {
8955 if (tg3_nvram_read_swab(tp, cursize, &val) != 0)
8964 tp->nvram_size = cursize;
8967 static void __devinit tg3_get_nvram_size(struct tg3 *tp)
8971 if (tg3_nvram_read_swab(tp, 0, &val) != 0)
8974 /* Selfboot format */
8975 if (val != TG3_EEPROM_MAGIC) {
8976 tg3_get_eeprom_size(tp);
8980 if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
8982 tp->nvram_size = (val >> 16) * 1024;
8986 tp->nvram_size = 0x20000;
8989 static void __devinit tg3_get_nvram_info(struct tg3 *tp)
8993 nvcfg1 = tr32(NVRAM_CFG1);
8994 if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
8995 tp->tg3_flags2 |= TG3_FLG2_FLASH;
8998 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
8999 tw32(NVRAM_CFG1, nvcfg1);
9002 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) ||
9003 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
9004 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
9005 case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
9006 tp->nvram_jedecnum = JEDEC_ATMEL;
9007 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
9008 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9010 case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
9011 tp->nvram_jedecnum = JEDEC_ATMEL;
9012 tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
9014 case FLASH_VENDOR_ATMEL_EEPROM:
9015 tp->nvram_jedecnum = JEDEC_ATMEL;
9016 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
9017 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9019 case FLASH_VENDOR_ST:
9020 tp->nvram_jedecnum = JEDEC_ST;
9021 tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
9022 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9024 case FLASH_VENDOR_SAIFUN:
9025 tp->nvram_jedecnum = JEDEC_SAIFUN;
9026 tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
9028 case FLASH_VENDOR_SST_SMALL:
9029 case FLASH_VENDOR_SST_LARGE:
9030 tp->nvram_jedecnum = JEDEC_SST;
9031 tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
9036 tp->nvram_jedecnum = JEDEC_ATMEL;
9037 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
9038 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9042 static void __devinit tg3_get_5752_nvram_info(struct tg3 *tp)
9046 nvcfg1 = tr32(NVRAM_CFG1);
9048 /* NVRAM protection for TPM */
9049 if (nvcfg1 & (1 << 27))
9050 tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM;
9052 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
9053 case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
9054 case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
9055 tp->nvram_jedecnum = JEDEC_ATMEL;
9056 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9058 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
9059 tp->nvram_jedecnum = JEDEC_ATMEL;
9060 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9061 tp->tg3_flags2 |= TG3_FLG2_FLASH;
9063 case FLASH_5752VENDOR_ST_M45PE10:
9064 case FLASH_5752VENDOR_ST_M45PE20:
9065 case FLASH_5752VENDOR_ST_M45PE40:
9066 tp->nvram_jedecnum = JEDEC_ST;
9067 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9068 tp->tg3_flags2 |= TG3_FLG2_FLASH;
9072 if (tp->tg3_flags2 & TG3_FLG2_FLASH) {
9073 switch (nvcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
9074 case FLASH_5752PAGE_SIZE_256:
9075 tp->nvram_pagesize = 256;
9077 case FLASH_5752PAGE_SIZE_512:
9078 tp->nvram_pagesize = 512;
9080 case FLASH_5752PAGE_SIZE_1K:
9081 tp->nvram_pagesize = 1024;
9083 case FLASH_5752PAGE_SIZE_2K:
9084 tp->nvram_pagesize = 2048;
9086 case FLASH_5752PAGE_SIZE_4K:
9087 tp->nvram_pagesize = 4096;
9089 case FLASH_5752PAGE_SIZE_264:
9090 tp->nvram_pagesize = 264;
9095 /* For eeprom, set pagesize to maximum eeprom size */
9096 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
9098 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
9099 tw32(NVRAM_CFG1, nvcfg1);
9103 static void __devinit tg3_get_5755_nvram_info(struct tg3 *tp)
9107 nvcfg1 = tr32(NVRAM_CFG1);
9109 /* NVRAM protection for TPM */
9110 if (nvcfg1 & (1 << 27))
9111 tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM;
9113 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
9114 case FLASH_5755VENDOR_ATMEL_EEPROM_64KHZ:
9115 case FLASH_5755VENDOR_ATMEL_EEPROM_376KHZ:
9116 tp->nvram_jedecnum = JEDEC_ATMEL;
9117 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9118 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
9120 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
9121 tw32(NVRAM_CFG1, nvcfg1);
9123 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
9124 case FLASH_5755VENDOR_ATMEL_FLASH_1:
9125 case FLASH_5755VENDOR_ATMEL_FLASH_2:
9126 case FLASH_5755VENDOR_ATMEL_FLASH_3:
9127 case FLASH_5755VENDOR_ATMEL_FLASH_4:
9128 tp->nvram_jedecnum = JEDEC_ATMEL;
9129 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9130 tp->tg3_flags2 |= TG3_FLG2_FLASH;
9131 tp->nvram_pagesize = 264;
9133 case FLASH_5752VENDOR_ST_M45PE10:
9134 case FLASH_5752VENDOR_ST_M45PE20:
9135 case FLASH_5752VENDOR_ST_M45PE40:
9136 tp->nvram_jedecnum = JEDEC_ST;
9137 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9138 tp->tg3_flags2 |= TG3_FLG2_FLASH;
9139 tp->nvram_pagesize = 256;
9144 static void __devinit tg3_get_5787_nvram_info(struct tg3 *tp)
9148 nvcfg1 = tr32(NVRAM_CFG1);
9150 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
9151 case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
9152 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
9153 case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
9154 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
9155 tp->nvram_jedecnum = JEDEC_ATMEL;
9156 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9157 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
9159 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
9160 tw32(NVRAM_CFG1, nvcfg1);
9162 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
9163 case FLASH_5755VENDOR_ATMEL_FLASH_1:
9164 case FLASH_5755VENDOR_ATMEL_FLASH_2:
9165 case FLASH_5755VENDOR_ATMEL_FLASH_3:
9166 tp->nvram_jedecnum = JEDEC_ATMEL;
9167 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9168 tp->tg3_flags2 |= TG3_FLG2_FLASH;
9169 tp->nvram_pagesize = 264;
9171 case FLASH_5752VENDOR_ST_M45PE10:
9172 case FLASH_5752VENDOR_ST_M45PE20:
9173 case FLASH_5752VENDOR_ST_M45PE40:
9174 tp->nvram_jedecnum = JEDEC_ST;
9175 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9176 tp->tg3_flags2 |= TG3_FLG2_FLASH;
9177 tp->nvram_pagesize = 256;
9182 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
9183 static void __devinit tg3_nvram_init(struct tg3 *tp)
9187 tw32_f(GRC_EEPROM_ADDR,
9188 (EEPROM_ADDR_FSM_RESET |
9189 (EEPROM_DEFAULT_CLOCK_PERIOD <<
9190 EEPROM_ADDR_CLKPERD_SHIFT)));
9192 /* XXX schedule_timeout() ... */
9193 for (j = 0; j < 100; j++)
9196 /* Enable seeprom accesses. */
9197 tw32_f(GRC_LOCAL_CTRL,
9198 tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
9201 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
9202 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
9203 tp->tg3_flags |= TG3_FLAG_NVRAM;
9205 if (tg3_nvram_lock(tp)) {
9206 printk(KERN_WARNING PFX "%s: Cannot get nvarm lock, "
9207 "tg3_nvram_init failed.\n", tp->dev->name);
9210 tg3_enable_nvram_access(tp);
9212 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
9213 tg3_get_5752_nvram_info(tp);
9214 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
9215 tg3_get_5755_nvram_info(tp);
9216 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
9217 tg3_get_5787_nvram_info(tp);
9219 tg3_get_nvram_info(tp);
9221 tg3_get_nvram_size(tp);
9223 tg3_disable_nvram_access(tp);
9224 tg3_nvram_unlock(tp);
9227 tp->tg3_flags &= ~(TG3_FLAG_NVRAM | TG3_FLAG_NVRAM_BUFFERED);
9229 tg3_get_eeprom_size(tp);
9233 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
9234 u32 offset, u32 *val)
9239 if (offset > EEPROM_ADDR_ADDR_MASK ||
9243 tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
9244 EEPROM_ADDR_DEVID_MASK |
9246 tw32(GRC_EEPROM_ADDR,
9248 (0 << EEPROM_ADDR_DEVID_SHIFT) |
9249 ((offset << EEPROM_ADDR_ADDR_SHIFT) &
9250 EEPROM_ADDR_ADDR_MASK) |
9251 EEPROM_ADDR_READ | EEPROM_ADDR_START);
9253 for (i = 0; i < 10000; i++) {
9254 tmp = tr32(GRC_EEPROM_ADDR);
9256 if (tmp & EEPROM_ADDR_COMPLETE)
9260 if (!(tmp & EEPROM_ADDR_COMPLETE))
9263 *val = tr32(GRC_EEPROM_DATA);
9267 #define NVRAM_CMD_TIMEOUT 10000
9269 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
9273 tw32(NVRAM_CMD, nvram_cmd);
9274 for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
9276 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
9281 if (i == NVRAM_CMD_TIMEOUT) {
9287 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
9289 if ((tp->tg3_flags & TG3_FLAG_NVRAM) &&
9290 (tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) &&
9291 (tp->tg3_flags2 & TG3_FLG2_FLASH) &&
9292 (tp->nvram_jedecnum == JEDEC_ATMEL))
9294 addr = ((addr / tp->nvram_pagesize) <<
9295 ATMEL_AT45DB0X1B_PAGE_POS) +
9296 (addr % tp->nvram_pagesize);
9301 static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
9303 if ((tp->tg3_flags & TG3_FLAG_NVRAM) &&
9304 (tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) &&
9305 (tp->tg3_flags2 & TG3_FLG2_FLASH) &&
9306 (tp->nvram_jedecnum == JEDEC_ATMEL))
9308 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
9309 tp->nvram_pagesize) +
9310 (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
9315 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
9319 if (!(tp->tg3_flags & TG3_FLAG_NVRAM))
9320 return tg3_nvram_read_using_eeprom(tp, offset, val);
9322 offset = tg3_nvram_phys_addr(tp, offset);
9324 if (offset > NVRAM_ADDR_MSK)
9327 ret = tg3_nvram_lock(tp);
9331 tg3_enable_nvram_access(tp);
9333 tw32(NVRAM_ADDR, offset);
9334 ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
9335 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
9338 *val = swab32(tr32(NVRAM_RDDATA));
9340 tg3_disable_nvram_access(tp);
9342 tg3_nvram_unlock(tp);
9347 static int tg3_nvram_read_swab(struct tg3 *tp, u32 offset, u32 *val)
9352 err = tg3_nvram_read(tp, offset, &tmp);
9357 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
9358 u32 offset, u32 len, u8 *buf)
9363 for (i = 0; i < len; i += 4) {
9368 memcpy(&data, buf + i, 4);
9370 tw32(GRC_EEPROM_DATA, cpu_to_le32(data));
9372 val = tr32(GRC_EEPROM_ADDR);
9373 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
9375 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
9377 tw32(GRC_EEPROM_ADDR, val |
9378 (0 << EEPROM_ADDR_DEVID_SHIFT) |
9379 (addr & EEPROM_ADDR_ADDR_MASK) |
9383 for (j = 0; j < 10000; j++) {
9384 val = tr32(GRC_EEPROM_ADDR);
9386 if (val & EEPROM_ADDR_COMPLETE)
9390 if (!(val & EEPROM_ADDR_COMPLETE)) {
9399 /* offset and length are dword aligned */
9400 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
9404 u32 pagesize = tp->nvram_pagesize;
9405 u32 pagemask = pagesize - 1;
9409 tmp = kmalloc(pagesize, GFP_KERNEL);
9415 u32 phy_addr, page_off, size;
9417 phy_addr = offset & ~pagemask;
9419 for (j = 0; j < pagesize; j += 4) {
9420 if ((ret = tg3_nvram_read(tp, phy_addr + j,
9421 (u32 *) (tmp + j))))
9427 page_off = offset & pagemask;
9434 memcpy(tmp + page_off, buf, size);
9436 offset = offset + (pagesize - page_off);
9438 tg3_enable_nvram_access(tp);
9441 * Before we can erase the flash page, we need
9442 * to issue a special "write enable" command.
9444 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
9446 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
9449 /* Erase the target page */
9450 tw32(NVRAM_ADDR, phy_addr);
9452 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
9453 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
9455 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
9458 /* Issue another write enable to start the write. */
9459 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
9461 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
9464 for (j = 0; j < pagesize; j += 4) {
9467 data = *((u32 *) (tmp + j));
9468 tw32(NVRAM_WRDATA, cpu_to_be32(data));
9470 tw32(NVRAM_ADDR, phy_addr + j);
9472 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
9476 nvram_cmd |= NVRAM_CMD_FIRST;
9477 else if (j == (pagesize - 4))
9478 nvram_cmd |= NVRAM_CMD_LAST;
9480 if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
9487 nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
9488 tg3_nvram_exec_cmd(tp, nvram_cmd);
9495 /* offset and length are dword aligned */
9496 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
9501 for (i = 0; i < len; i += 4, offset += 4) {
9502 u32 data, page_off, phy_addr, nvram_cmd;
9504 memcpy(&data, buf + i, 4);
9505 tw32(NVRAM_WRDATA, cpu_to_be32(data));
9507 page_off = offset % tp->nvram_pagesize;
9509 phy_addr = tg3_nvram_phys_addr(tp, offset);
9511 tw32(NVRAM_ADDR, phy_addr);
9513 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
9515 if ((page_off == 0) || (i == 0))
9516 nvram_cmd |= NVRAM_CMD_FIRST;
9517 if (page_off == (tp->nvram_pagesize - 4))
9518 nvram_cmd |= NVRAM_CMD_LAST;
9521 nvram_cmd |= NVRAM_CMD_LAST;
9523 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752) &&
9524 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5755) &&
9525 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5787) &&
9526 (tp->nvram_jedecnum == JEDEC_ST) &&
9527 (nvram_cmd & NVRAM_CMD_FIRST)) {
9529 if ((ret = tg3_nvram_exec_cmd(tp,
9530 NVRAM_CMD_WREN | NVRAM_CMD_GO |
9535 if (!(tp->tg3_flags2 & TG3_FLG2_FLASH)) {
9536 /* We always do complete word writes to eeprom. */
9537 nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
9540 if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
9546 /* offset and length are dword aligned */
9547 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
9551 if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
9552 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
9553 ~GRC_LCLCTRL_GPIO_OUTPUT1);
9557 if (!(tp->tg3_flags & TG3_FLAG_NVRAM)) {
9558 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
9563 ret = tg3_nvram_lock(tp);
9567 tg3_enable_nvram_access(tp);
9568 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
9569 !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM))
9570 tw32(NVRAM_WRITE1, 0x406);
9572 grc_mode = tr32(GRC_MODE);
9573 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
9575 if ((tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) ||
9576 !(tp->tg3_flags2 & TG3_FLG2_FLASH)) {
9578 ret = tg3_nvram_write_block_buffered(tp, offset, len,
9582 ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
9586 grc_mode = tr32(GRC_MODE);
9587 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
9589 tg3_disable_nvram_access(tp);
9590 tg3_nvram_unlock(tp);
9593 if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
9594 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
9601 struct subsys_tbl_ent {
9602 u16 subsys_vendor, subsys_devid;
9606 static struct subsys_tbl_ent subsys_id_to_phy_id[] = {
9607 /* Broadcom boards. */
9608 { PCI_VENDOR_ID_BROADCOM, 0x1644, PHY_ID_BCM5401 }, /* BCM95700A6 */
9609 { PCI_VENDOR_ID_BROADCOM, 0x0001, PHY_ID_BCM5701 }, /* BCM95701A5 */
9610 { PCI_VENDOR_ID_BROADCOM, 0x0002, PHY_ID_BCM8002 }, /* BCM95700T6 */
9611 { PCI_VENDOR_ID_BROADCOM, 0x0003, 0 }, /* BCM95700A9 */
9612 { PCI_VENDOR_ID_BROADCOM, 0x0005, PHY_ID_BCM5701 }, /* BCM95701T1 */
9613 { PCI_VENDOR_ID_BROADCOM, 0x0006, PHY_ID_BCM5701 }, /* BCM95701T8 */
9614 { PCI_VENDOR_ID_BROADCOM, 0x0007, 0 }, /* BCM95701A7 */
9615 { PCI_VENDOR_ID_BROADCOM, 0x0008, PHY_ID_BCM5701 }, /* BCM95701A10 */
9616 { PCI_VENDOR_ID_BROADCOM, 0x8008, PHY_ID_BCM5701 }, /* BCM95701A12 */
9617 { PCI_VENDOR_ID_BROADCOM, 0x0009, PHY_ID_BCM5703 }, /* BCM95703Ax1 */
9618 { PCI_VENDOR_ID_BROADCOM, 0x8009, PHY_ID_BCM5703 }, /* BCM95703Ax2 */
9621 { PCI_VENDOR_ID_3COM, 0x1000, PHY_ID_BCM5401 }, /* 3C996T */
9622 { PCI_VENDOR_ID_3COM, 0x1006, PHY_ID_BCM5701 }, /* 3C996BT */
9623 { PCI_VENDOR_ID_3COM, 0x1004, 0 }, /* 3C996SX */
9624 { PCI_VENDOR_ID_3COM, 0x1007, PHY_ID_BCM5701 }, /* 3C1000T */
9625 { PCI_VENDOR_ID_3COM, 0x1008, PHY_ID_BCM5701 }, /* 3C940BR01 */
9628 { PCI_VENDOR_ID_DELL, 0x00d1, PHY_ID_BCM5401 }, /* VIPER */
9629 { PCI_VENDOR_ID_DELL, 0x0106, PHY_ID_BCM5401 }, /* JAGUAR */
9630 { PCI_VENDOR_ID_DELL, 0x0109, PHY_ID_BCM5411 }, /* MERLOT */
9631 { PCI_VENDOR_ID_DELL, 0x010a, PHY_ID_BCM5411 }, /* SLIM_MERLOT */
9633 /* Compaq boards. */
9634 { PCI_VENDOR_ID_COMPAQ, 0x007c, PHY_ID_BCM5701 }, /* BANSHEE */
9635 { PCI_VENDOR_ID_COMPAQ, 0x009a, PHY_ID_BCM5701 }, /* BANSHEE_2 */
9636 { PCI_VENDOR_ID_COMPAQ, 0x007d, 0 }, /* CHANGELING */
9637 { PCI_VENDOR_ID_COMPAQ, 0x0085, PHY_ID_BCM5701 }, /* NC7780 */
9638 { PCI_VENDOR_ID_COMPAQ, 0x0099, PHY_ID_BCM5701 }, /* NC7780_2 */
9641 { PCI_VENDOR_ID_IBM, 0x0281, 0 } /* IBM??? */
9644 static inline struct subsys_tbl_ent *lookup_by_subsys(struct tg3 *tp)
9648 for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
9649 if ((subsys_id_to_phy_id[i].subsys_vendor ==
9650 tp->pdev->subsystem_vendor) &&
9651 (subsys_id_to_phy_id[i].subsys_devid ==
9652 tp->pdev->subsystem_device))
9653 return &subsys_id_to_phy_id[i];
9658 static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
9663 /* On some early chips the SRAM cannot be accessed in D3hot state,
9664 * so need make sure we're in D0.
9666 pci_read_config_word(tp->pdev, tp->pm_cap + PCI_PM_CTRL, &pmcsr);
9667 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
9668 pci_write_config_word(tp->pdev, tp->pm_cap + PCI_PM_CTRL, pmcsr);
9671 /* Make sure register accesses (indirect or otherwise)
9672 * will function correctly.
9674 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
9675 tp->misc_host_ctrl);
9677 /* The memory arbiter has to be enabled in order for SRAM accesses
9678 * to succeed. Normally on powerup the tg3 chip firmware will make
9679 * sure it is enabled, but other entities such as system netboot
9680 * code might disable it.
9682 val = tr32(MEMARB_MODE);
9683 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
9685 tp->phy_id = PHY_ID_INVALID;
9686 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
9688 /* Assume an onboard device by default. */
9689 tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT;
9691 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
9692 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
9693 u32 nic_cfg, led_cfg;
9694 u32 nic_phy_id, ver, cfg2 = 0, eeprom_phy_id;
9695 int eeprom_phy_serdes = 0;
9697 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
9698 tp->nic_sram_data_cfg = nic_cfg;
9700 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
9701 ver >>= NIC_SRAM_DATA_VER_SHIFT;
9702 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700) &&
9703 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) &&
9704 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5703) &&
9705 (ver > 0) && (ver < 0x100))
9706 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
9708 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
9709 NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
9710 eeprom_phy_serdes = 1;
9712 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
9713 if (nic_phy_id != 0) {
9714 u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
9715 u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
9717 eeprom_phy_id = (id1 >> 16) << 10;
9718 eeprom_phy_id |= (id2 & 0xfc00) << 16;
9719 eeprom_phy_id |= (id2 & 0x03ff) << 0;
9723 tp->phy_id = eeprom_phy_id;
9724 if (eeprom_phy_serdes) {
9725 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
9726 tp->tg3_flags2 |= TG3_FLG2_MII_SERDES;
9728 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
9731 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
9732 led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
9733 SHASTA_EXT_LED_MODE_MASK);
9735 led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
9739 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
9740 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
9743 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
9744 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
9747 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
9748 tp->led_ctrl = LED_CTRL_MODE_MAC;
9750 /* Default to PHY_1_MODE if 0 (MAC_MODE) is
9751 * read on some older 5700/5701 bootcode.
9753 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
9755 GET_ASIC_REV(tp->pci_chip_rev_id) ==
9757 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
9761 case SHASTA_EXT_LED_SHARED:
9762 tp->led_ctrl = LED_CTRL_MODE_SHARED;
9763 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
9764 tp->pci_chip_rev_id != CHIPREV_ID_5750_A1)
9765 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
9766 LED_CTRL_MODE_PHY_2);
9769 case SHASTA_EXT_LED_MAC:
9770 tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
9773 case SHASTA_EXT_LED_COMBO:
9774 tp->led_ctrl = LED_CTRL_MODE_COMBO;
9775 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0)
9776 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
9777 LED_CTRL_MODE_PHY_2);
9782 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
9783 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) &&
9784 tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
9785 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
9787 if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP)
9788 tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT;
9790 tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT;
9792 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
9793 tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
9794 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
9795 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
9797 if (nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL)
9798 tp->tg3_flags |= TG3_FLAG_SERDES_WOL_CAP;
9800 if (cfg2 & (1 << 17))
9801 tp->tg3_flags2 |= TG3_FLG2_CAPACITIVE_COUPLING;
9803 /* serdes signal pre-emphasis in register 0x590 set by */
9804 /* bootcode if bit 18 is set */
9805 if (cfg2 & (1 << 18))
9806 tp->tg3_flags2 |= TG3_FLG2_SERDES_PREEMPHASIS;
9810 static int __devinit tg3_phy_probe(struct tg3 *tp)
9812 u32 hw_phy_id_1, hw_phy_id_2;
9813 u32 hw_phy_id, hw_phy_id_masked;
9816 /* Reading the PHY ID register can conflict with ASF
9817 * firwmare access to the PHY hardware.
9820 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
9821 hw_phy_id = hw_phy_id_masked = PHY_ID_INVALID;
9823 /* Now read the physical PHY_ID from the chip and verify
9824 * that it is sane. If it doesn't look good, we fall back
9825 * to either the hard-coded table based PHY_ID and failing
9826 * that the value found in the eeprom area.
9828 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
9829 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
9831 hw_phy_id = (hw_phy_id_1 & 0xffff) << 10;
9832 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
9833 hw_phy_id |= (hw_phy_id_2 & 0x03ff) << 0;
9835 hw_phy_id_masked = hw_phy_id & PHY_ID_MASK;
9838 if (!err && KNOWN_PHY_ID(hw_phy_id_masked)) {
9839 tp->phy_id = hw_phy_id;
9840 if (hw_phy_id_masked == PHY_ID_BCM8002)
9841 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
9843 tp->tg3_flags2 &= ~TG3_FLG2_PHY_SERDES;
9845 if (tp->phy_id != PHY_ID_INVALID) {
9846 /* Do nothing, phy ID already set up in
9847 * tg3_get_eeprom_hw_cfg().
9850 struct subsys_tbl_ent *p;
9852 /* No eeprom signature? Try the hardcoded
9853 * subsys device table.
9855 p = lookup_by_subsys(tp);
9859 tp->phy_id = p->phy_id;
9861 tp->phy_id == PHY_ID_BCM8002)
9862 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
9866 if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) &&
9867 !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
9868 u32 bmsr, adv_reg, tg3_ctrl;
9870 tg3_readphy(tp, MII_BMSR, &bmsr);
9871 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
9872 (bmsr & BMSR_LSTATUS))
9873 goto skip_phy_reset;
9875 err = tg3_phy_reset(tp);
9879 adv_reg = (ADVERTISE_10HALF | ADVERTISE_10FULL |
9880 ADVERTISE_100HALF | ADVERTISE_100FULL |
9881 ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
9883 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
9884 tg3_ctrl = (MII_TG3_CTRL_ADV_1000_HALF |
9885 MII_TG3_CTRL_ADV_1000_FULL);
9886 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
9887 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
9888 tg3_ctrl |= (MII_TG3_CTRL_AS_MASTER |
9889 MII_TG3_CTRL_ENABLE_AS_MASTER);
9892 if (!tg3_copper_is_advertising_all(tp)) {
9893 tg3_writephy(tp, MII_ADVERTISE, adv_reg);
9895 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
9896 tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
9898 tg3_writephy(tp, MII_BMCR,
9899 BMCR_ANENABLE | BMCR_ANRESTART);
9901 tg3_phy_set_wirespeed(tp);
9903 tg3_writephy(tp, MII_ADVERTISE, adv_reg);
9904 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
9905 tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
9909 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
9910 err = tg3_init_5401phy_dsp(tp);
9915 if (!err && ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401)) {
9916 err = tg3_init_5401phy_dsp(tp);
9919 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
9920 tp->link_config.advertising =
9921 (ADVERTISED_1000baseT_Half |
9922 ADVERTISED_1000baseT_Full |
9923 ADVERTISED_Autoneg |
9925 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
9926 tp->link_config.advertising &=
9927 ~(ADVERTISED_1000baseT_Half |
9928 ADVERTISED_1000baseT_Full);
9933 static void __devinit tg3_read_partno(struct tg3 *tp)
9935 unsigned char vpd_data[256];
9939 if (tg3_nvram_read_swab(tp, 0x0, &magic))
9942 if (magic == TG3_EEPROM_MAGIC) {
9943 for (i = 0; i < 256; i += 4) {
9946 if (tg3_nvram_read(tp, 0x100 + i, &tmp))
9949 vpd_data[i + 0] = ((tmp >> 0) & 0xff);
9950 vpd_data[i + 1] = ((tmp >> 8) & 0xff);
9951 vpd_data[i + 2] = ((tmp >> 16) & 0xff);
9952 vpd_data[i + 3] = ((tmp >> 24) & 0xff);
9957 vpd_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_VPD);
9958 for (i = 0; i < 256; i += 4) {
9962 pci_write_config_word(tp->pdev, vpd_cap + PCI_VPD_ADDR,
9965 pci_read_config_word(tp->pdev, vpd_cap +
9966 PCI_VPD_ADDR, &tmp16);
9971 if (!(tmp16 & 0x8000))
9974 pci_read_config_dword(tp->pdev, vpd_cap + PCI_VPD_DATA,
9976 tmp = cpu_to_le32(tmp);
9977 memcpy(&vpd_data[i], &tmp, 4);
9981 /* Now parse and find the part number. */
9982 for (i = 0; i < 256; ) {
9983 unsigned char val = vpd_data[i];
9986 if (val == 0x82 || val == 0x91) {
9989 (vpd_data[i + 2] << 8)));
9996 block_end = (i + 3 +
9998 (vpd_data[i + 2] << 8)));
10000 while (i < block_end) {
10001 if (vpd_data[i + 0] == 'P' &&
10002 vpd_data[i + 1] == 'N') {
10003 int partno_len = vpd_data[i + 2];
10005 if (partno_len > 24)
10006 goto out_not_found;
10008 memcpy(tp->board_part_number,
10017 /* Part number not found. */
10018 goto out_not_found;
10022 strcpy(tp->board_part_number, "none");
10025 static void __devinit tg3_read_fw_ver(struct tg3 *tp)
10027 u32 val, offset, start;
10029 if (tg3_nvram_read_swab(tp, 0, &val))
10032 if (val != TG3_EEPROM_MAGIC)
10035 if (tg3_nvram_read_swab(tp, 0xc, &offset) ||
10036 tg3_nvram_read_swab(tp, 0x4, &start))
10039 offset = tg3_nvram_logical_addr(tp, offset);
10040 if (tg3_nvram_read_swab(tp, offset, &val))
10043 if ((val & 0xfc000000) == 0x0c000000) {
10044 u32 ver_offset, addr;
10047 if (tg3_nvram_read_swab(tp, offset + 4, &val) ||
10048 tg3_nvram_read_swab(tp, offset + 8, &ver_offset))
10054 addr = offset + ver_offset - start;
10055 for (i = 0; i < 16; i += 4) {
10056 if (tg3_nvram_read(tp, addr + i, &val))
10059 val = cpu_to_le32(val);
10060 memcpy(tp->fw_ver + i, &val, 4);
10065 static int __devinit tg3_get_invariants(struct tg3 *tp)
10067 static struct pci_device_id write_reorder_chipsets[] = {
10068 { PCI_DEVICE(PCI_VENDOR_ID_AMD,
10069 PCI_DEVICE_ID_AMD_FE_GATE_700C) },
10070 { PCI_DEVICE(PCI_VENDOR_ID_VIA,
10071 PCI_DEVICE_ID_VIA_8385_0) },
10075 u32 cacheline_sz_reg;
10076 u32 pci_state_reg, grc_misc_cfg;
10081 /* Force memory write invalidate off. If we leave it on,
10082 * then on 5700_BX chips we have to enable a workaround.
10083 * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
10084 * to match the cacheline size. The Broadcom driver have this
10085 * workaround but turns MWI off all the times so never uses
10086 * it. This seems to suggest that the workaround is insufficient.
10088 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
10089 pci_cmd &= ~PCI_COMMAND_INVALIDATE;
10090 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
10092 /* It is absolutely critical that TG3PCI_MISC_HOST_CTRL
10093 * has the register indirect write enable bit set before
10094 * we try to access any of the MMIO registers. It is also
10095 * critical that the PCI-X hw workaround situation is decided
10096 * before that as well.
10098 pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
10101 tp->pci_chip_rev_id = (misc_ctrl_reg >>
10102 MISC_HOST_CTRL_CHIPREV_SHIFT);
10104 /* Wrong chip ID in 5752 A0. This code can be removed later
10105 * as A0 is not in production.
10107 if (tp->pci_chip_rev_id == CHIPREV_ID_5752_A0_HW)
10108 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
10110 /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
10111 * we need to disable memory and use config. cycles
10112 * only to access all registers. The 5702/03 chips
10113 * can mistakenly decode the special cycles from the
10114 * ICH chipsets as memory write cycles, causing corruption
10115 * of register and memory space. Only certain ICH bridges
10116 * will drive special cycles with non-zero data during the
10117 * address phase which can fall within the 5703's address
10118 * range. This is not an ICH bug as the PCI spec allows
10119 * non-zero address during special cycles. However, only
10120 * these ICH bridges are known to drive non-zero addresses
10121 * during special cycles.
10123 * Since special cycles do not cross PCI bridges, we only
10124 * enable this workaround if the 5703 is on the secondary
10125 * bus of these ICH bridges.
10127 if ((tp->pci_chip_rev_id == CHIPREV_ID_5703_A1) ||
10128 (tp->pci_chip_rev_id == CHIPREV_ID_5703_A2)) {
10129 static struct tg3_dev_id {
10133 } ich_chipsets[] = {
10134 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
10136 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
10138 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
10140 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
10144 struct tg3_dev_id *pci_id = &ich_chipsets[0];
10145 struct pci_dev *bridge = NULL;
10147 while (pci_id->vendor != 0) {
10148 bridge = pci_get_device(pci_id->vendor, pci_id->device,
10154 if (pci_id->rev != PCI_ANY_ID) {
10157 pci_read_config_byte(bridge, PCI_REVISION_ID,
10159 if (rev > pci_id->rev)
10162 if (bridge->subordinate &&
10163 (bridge->subordinate->number ==
10164 tp->pdev->bus->number)) {
10166 tp->tg3_flags2 |= TG3_FLG2_ICH_WORKAROUND;
10167 pci_dev_put(bridge);
10173 /* The EPB bridge inside 5714, 5715, and 5780 cannot support
10174 * DMA addresses > 40-bit. This bridge may have other additional
10175 * 57xx devices behind it in some 4-port NIC designs for example.
10176 * Any tg3 device found behind the bridge will also need the 40-bit
10179 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 ||
10180 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
10181 tp->tg3_flags2 |= TG3_FLG2_5780_CLASS;
10182 tp->tg3_flags |= TG3_FLAG_40BIT_DMA_BUG;
10183 tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI);
10186 struct pci_dev *bridge = NULL;
10189 bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
10190 PCI_DEVICE_ID_SERVERWORKS_EPB,
10192 if (bridge && bridge->subordinate &&
10193 (bridge->subordinate->number <=
10194 tp->pdev->bus->number) &&
10195 (bridge->subordinate->subordinate >=
10196 tp->pdev->bus->number)) {
10197 tp->tg3_flags |= TG3_FLAG_40BIT_DMA_BUG;
10198 pci_dev_put(bridge);
10204 /* Initialize misc host control in PCI block. */
10205 tp->misc_host_ctrl |= (misc_ctrl_reg &
10206 MISC_HOST_CTRL_CHIPREV);
10207 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
10208 tp->misc_host_ctrl);
10210 pci_read_config_dword(tp->pdev, TG3PCI_CACHELINESZ,
10211 &cacheline_sz_reg);
10213 tp->pci_cacheline_sz = (cacheline_sz_reg >> 0) & 0xff;
10214 tp->pci_lat_timer = (cacheline_sz_reg >> 8) & 0xff;
10215 tp->pci_hdr_type = (cacheline_sz_reg >> 16) & 0xff;
10216 tp->pci_bist = (cacheline_sz_reg >> 24) & 0xff;
10218 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
10219 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
10220 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
10221 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
10222 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
10223 tp->tg3_flags2 |= TG3_FLG2_5750_PLUS;
10225 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) ||
10226 (tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
10227 tp->tg3_flags2 |= TG3_FLG2_5705_PLUS;
10229 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
10230 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
10231 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787) {
10232 tp->tg3_flags2 |= TG3_FLG2_HW_TSO_2;
10233 tp->tg3_flags2 |= TG3_FLG2_1SHOT_MSI;
10235 tp->tg3_flags2 |= TG3_FLG2_HW_TSO_1 |
10236 TG3_FLG2_HW_TSO_1_BUG;
10237 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
10239 tp->pci_chip_rev_id >= CHIPREV_ID_5750_C2)
10240 tp->tg3_flags2 &= ~TG3_FLG2_HW_TSO_1_BUG;
10244 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705 &&
10245 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5750 &&
10246 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752 &&
10247 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5755 &&
10248 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5787)
10249 tp->tg3_flags2 |= TG3_FLG2_JUMBO_CAPABLE;
10251 if (pci_find_capability(tp->pdev, PCI_CAP_ID_EXP) != 0)
10252 tp->tg3_flags2 |= TG3_FLG2_PCI_EXPRESS;
10254 /* If we have an AMD 762 or VIA K8T800 chipset, write
10255 * reordering to the mailbox registers done by the host
10256 * controller can cause major troubles. We read back from
10257 * every mailbox register write to force the writes to be
10258 * posted to the chip in order.
10260 if (pci_dev_present(write_reorder_chipsets) &&
10261 !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
10262 tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;
10264 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
10265 tp->pci_lat_timer < 64) {
10266 tp->pci_lat_timer = 64;
10268 cacheline_sz_reg = ((tp->pci_cacheline_sz & 0xff) << 0);
10269 cacheline_sz_reg |= ((tp->pci_lat_timer & 0xff) << 8);
10270 cacheline_sz_reg |= ((tp->pci_hdr_type & 0xff) << 16);
10271 cacheline_sz_reg |= ((tp->pci_bist & 0xff) << 24);
10273 pci_write_config_dword(tp->pdev, TG3PCI_CACHELINESZ,
10277 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
10280 if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0) {
10281 tp->tg3_flags |= TG3_FLAG_PCIX_MODE;
10283 /* If this is a 5700 BX chipset, and we are in PCI-X
10284 * mode, enable register write workaround.
10286 * The workaround is to use indirect register accesses
10287 * for all chip writes not to mailbox registers.
10289 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX) {
10293 tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
10295 /* The chip can have it's power management PCI config
10296 * space registers clobbered due to this bug.
10297 * So explicitly force the chip into D0 here.
10299 pci_read_config_dword(tp->pdev, TG3PCI_PM_CTRL_STAT,
10301 pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
10302 pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
10303 pci_write_config_dword(tp->pdev, TG3PCI_PM_CTRL_STAT,
10306 /* Also, force SERR#/PERR# in PCI command. */
10307 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
10308 pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
10309 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
10313 /* 5700 BX chips need to have their TX producer index mailboxes
10314 * written twice to workaround a bug.
10316 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX)
10317 tp->tg3_flags |= TG3_FLAG_TXD_MBOX_HWBUG;
10319 /* Back to back register writes can cause problems on this chip,
10320 * the workaround is to read back all reg writes except those to
10321 * mailbox regs. See tg3_write_indirect_reg32().
10323 * PCI Express 5750_A0 rev chips need this workaround too.
10325 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
10326 ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
10327 tp->pci_chip_rev_id == CHIPREV_ID_5750_A0))
10328 tp->tg3_flags |= TG3_FLAG_5701_REG_WRITE_BUG;
10330 if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
10331 tp->tg3_flags |= TG3_FLAG_PCI_HIGH_SPEED;
10332 if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
10333 tp->tg3_flags |= TG3_FLAG_PCI_32BIT;
10335 /* Chip-specific fixup from Broadcom driver */
10336 if ((tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) &&
10337 (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
10338 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
10339 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
10342 /* Default fast path register access methods */
10343 tp->read32 = tg3_read32;
10344 tp->write32 = tg3_write32;
10345 tp->read32_mbox = tg3_read32;
10346 tp->write32_mbox = tg3_write32;
10347 tp->write32_tx_mbox = tg3_write32;
10348 tp->write32_rx_mbox = tg3_write32;
10350 /* Various workaround register access methods */
10351 if (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG)
10352 tp->write32 = tg3_write_indirect_reg32;
10353 else if (tp->tg3_flags & TG3_FLAG_5701_REG_WRITE_BUG)
10354 tp->write32 = tg3_write_flush_reg32;
10356 if ((tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG) ||
10357 (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)) {
10358 tp->write32_tx_mbox = tg3_write32_tx_mbox;
10359 if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
10360 tp->write32_rx_mbox = tg3_write_flush_reg32;
10363 if (tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND) {
10364 tp->read32 = tg3_read_indirect_reg32;
10365 tp->write32 = tg3_write_indirect_reg32;
10366 tp->read32_mbox = tg3_read_indirect_mbox;
10367 tp->write32_mbox = tg3_write_indirect_mbox;
10368 tp->write32_tx_mbox = tg3_write_indirect_mbox;
10369 tp->write32_rx_mbox = tg3_write_indirect_mbox;
10374 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
10375 pci_cmd &= ~PCI_COMMAND_MEMORY;
10376 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
10379 if (tp->write32 == tg3_write_indirect_reg32 ||
10380 ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
10381 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
10382 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)))
10383 tp->tg3_flags |= TG3_FLAG_SRAM_USE_CONFIG;
10385 /* Get eeprom hw config before calling tg3_set_power_state().
10386 * In particular, the TG3_FLAG_EEPROM_WRITE_PROT flag must be
10387 * determined before calling tg3_set_power_state() so that
10388 * we know whether or not to switch out of Vaux power.
10389 * When the flag is set, it means that GPIO1 is used for eeprom
10390 * write protect and also implies that it is a LOM where GPIOs
10391 * are not used to switch power.
10393 tg3_get_eeprom_hw_cfg(tp);
10395 /* Set up tp->grc_local_ctrl before calling tg3_set_power_state().
10396 * GPIO1 driven high will bring 5700's external PHY out of reset.
10397 * It is also used as eeprom write protect on LOMs.
10399 tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
10400 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
10401 (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT))
10402 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
10403 GRC_LCLCTRL_GPIO_OUTPUT1);
10404 /* Unused GPIO3 must be driven as output on 5752 because there
10405 * are no pull-up resistors on unused GPIO pins.
10407 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
10408 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
10410 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
10411 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
10413 /* Force the chip into D0. */
10414 err = tg3_set_power_state(tp, PCI_D0);
10416 printk(KERN_ERR PFX "(%s) transition to D0 failed\n",
10417 pci_name(tp->pdev));
10421 /* 5700 B0 chips do not support checksumming correctly due
10422 * to hardware bugs.
10424 if (tp->pci_chip_rev_id == CHIPREV_ID_5700_B0)
10425 tp->tg3_flags |= TG3_FLAG_BROKEN_CHECKSUMS;
10427 /* Derive initial jumbo mode from MTU assigned in
10428 * ether_setup() via the alloc_etherdev() call
10430 if (tp->dev->mtu > ETH_DATA_LEN &&
10431 !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
10432 tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE;
10434 /* Determine WakeOnLan speed to use. */
10435 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
10436 tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
10437 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0 ||
10438 tp->pci_chip_rev_id == CHIPREV_ID_5701_B2) {
10439 tp->tg3_flags &= ~(TG3_FLAG_WOL_SPEED_100MB);
10441 tp->tg3_flags |= TG3_FLAG_WOL_SPEED_100MB;
10444 /* A few boards don't want Ethernet@WireSpeed phy feature */
10445 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
10446 ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
10447 (tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) &&
10448 (tp->pci_chip_rev_id != CHIPREV_ID_5705_A1)) ||
10449 (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES))
10450 tp->tg3_flags2 |= TG3_FLG2_NO_ETH_WIRE_SPEED;
10452 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5703_AX ||
10453 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_AX)
10454 tp->tg3_flags2 |= TG3_FLG2_PHY_ADC_BUG;
10455 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0)
10456 tp->tg3_flags2 |= TG3_FLG2_PHY_5704_A0_BUG;
10458 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
10459 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
10460 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
10461 tp->tg3_flags2 |= TG3_FLG2_PHY_JITTER_BUG;
10463 tp->tg3_flags2 |= TG3_FLG2_PHY_BER_BUG;
10466 tp->coalesce_mode = 0;
10467 if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX &&
10468 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX)
10469 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
10471 /* Initialize MAC MI mode, polling disabled. */
10472 tw32_f(MAC_MI_MODE, tp->mi_mode);
10475 /* Initialize data/descriptor byte/word swapping. */
10476 val = tr32(GRC_MODE);
10477 val &= GRC_MODE_HOST_STACKUP;
10478 tw32(GRC_MODE, val | tp->grc_mode);
10480 tg3_switch_clocks(tp);
10482 /* Clear this out for sanity. */
10483 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
10485 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
10487 if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
10488 (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) == 0) {
10489 u32 chiprevid = GET_CHIP_REV_ID(tp->misc_host_ctrl);
10491 if (chiprevid == CHIPREV_ID_5701_A0 ||
10492 chiprevid == CHIPREV_ID_5701_B0 ||
10493 chiprevid == CHIPREV_ID_5701_B2 ||
10494 chiprevid == CHIPREV_ID_5701_B5) {
10495 void __iomem *sram_base;
10497 /* Write some dummy words into the SRAM status block
10498 * area, see if it reads back correctly. If the return
10499 * value is bad, force enable the PCIX workaround.
10501 sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
10503 writel(0x00000000, sram_base);
10504 writel(0x00000000, sram_base + 4);
10505 writel(0xffffffff, sram_base + 4);
10506 if (readl(sram_base) != 0x00000000)
10507 tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
10512 tg3_nvram_init(tp);
10514 grc_misc_cfg = tr32(GRC_MISC_CFG);
10515 grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
10517 /* Broadcom's driver says that CIOBE multisplit has a bug */
10519 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
10520 grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5704CIOBE) {
10521 tp->tg3_flags |= TG3_FLAG_SPLIT_MODE;
10522 tp->split_mode_max_reqs = SPLIT_MODE_5704_MAX_REQ;
10525 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
10526 (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
10527 grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
10528 tp->tg3_flags2 |= TG3_FLG2_IS_5788;
10530 if (!(tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
10531 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700))
10532 tp->tg3_flags |= TG3_FLAG_TAGGED_STATUS;
10533 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) {
10534 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
10535 HOSTCC_MODE_CLRTICK_TXBD);
10537 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
10538 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
10539 tp->misc_host_ctrl);
10542 /* these are limited to 10/100 only */
10543 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
10544 (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
10545 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
10546 tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
10547 (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901 ||
10548 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901_2 ||
10549 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5705F)) ||
10550 (tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
10551 (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5751F ||
10552 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5753F)))
10553 tp->tg3_flags |= TG3_FLAG_10_100_ONLY;
10555 err = tg3_phy_probe(tp);
10557 printk(KERN_ERR PFX "(%s) phy probe failed, err %d\n",
10558 pci_name(tp->pdev), err);
10559 /* ... but do not return immediately ... */
10562 tg3_read_partno(tp);
10563 tg3_read_fw_ver(tp);
10565 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
10566 tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
10568 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
10569 tp->tg3_flags |= TG3_FLAG_USE_MI_INTERRUPT;
10571 tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
10574 /* 5700 {AX,BX} chips have a broken status block link
10575 * change bit implementation, so we must use the
10576 * status register in those cases.
10578 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
10579 tp->tg3_flags |= TG3_FLAG_USE_LINKCHG_REG;
10581 tp->tg3_flags &= ~TG3_FLAG_USE_LINKCHG_REG;
10583 /* The led_ctrl is set during tg3_phy_probe, here we might
10584 * have to force the link status polling mechanism based
10585 * upon subsystem IDs.
10587 if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
10588 !(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
10589 tp->tg3_flags |= (TG3_FLAG_USE_MI_INTERRUPT |
10590 TG3_FLAG_USE_LINKCHG_REG);
10593 /* For all SERDES we poll the MAC status register. */
10594 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
10595 tp->tg3_flags |= TG3_FLAG_POLL_SERDES;
10597 tp->tg3_flags &= ~TG3_FLAG_POLL_SERDES;
10599 /* All chips before 5787 can get confused if TX buffers
10600 * straddle the 4GB address boundary in some cases.
10602 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
10603 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
10604 tp->dev->hard_start_xmit = tg3_start_xmit;
10606 tp->dev->hard_start_xmit = tg3_start_xmit_dma_bug;
10609 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
10610 (tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0)
10613 tp->rx_std_max_post = TG3_RX_RING_SIZE;
10615 /* Increment the rx prod index on the rx std ring by at most
10616 * 8 for these chips to workaround hw errata.
10618 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
10619 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
10620 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
10621 tp->rx_std_max_post = 8;
10623 /* By default, disable wake-on-lan. User can change this
10624 * using ETHTOOL_SWOL.
10626 tp->tg3_flags &= ~TG3_FLAG_WOL_ENABLE;
10631 #ifdef CONFIG_SPARC64
10632 static int __devinit tg3_get_macaddr_sparc(struct tg3 *tp)
10634 struct net_device *dev = tp->dev;
10635 struct pci_dev *pdev = tp->pdev;
10636 struct pcidev_cookie *pcp = pdev->sysdata;
10639 unsigned char *addr;
10642 addr = of_get_property(pcp->prom_node, "local-mac-address",
10644 if (addr && len == 6) {
10645 memcpy(dev->dev_addr, addr, 6);
10646 memcpy(dev->perm_addr, dev->dev_addr, 6);
10653 static int __devinit tg3_get_default_macaddr_sparc(struct tg3 *tp)
10655 struct net_device *dev = tp->dev;
10657 memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
10658 memcpy(dev->perm_addr, idprom->id_ethaddr, 6);
10663 static int __devinit tg3_get_device_address(struct tg3 *tp)
10665 struct net_device *dev = tp->dev;
10666 u32 hi, lo, mac_offset;
10669 #ifdef CONFIG_SPARC64
10670 if (!tg3_get_macaddr_sparc(tp))
10675 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
10676 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
10677 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
10679 if (tg3_nvram_lock(tp))
10680 tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
10682 tg3_nvram_unlock(tp);
10685 /* First try to get it from MAC address mailbox. */
10686 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
10687 if ((hi >> 16) == 0x484b) {
10688 dev->dev_addr[0] = (hi >> 8) & 0xff;
10689 dev->dev_addr[1] = (hi >> 0) & 0xff;
10691 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
10692 dev->dev_addr[2] = (lo >> 24) & 0xff;
10693 dev->dev_addr[3] = (lo >> 16) & 0xff;
10694 dev->dev_addr[4] = (lo >> 8) & 0xff;
10695 dev->dev_addr[5] = (lo >> 0) & 0xff;
10697 /* Some old bootcode may report a 0 MAC address in SRAM */
10698 addr_ok = is_valid_ether_addr(&dev->dev_addr[0]);
10701 /* Next, try NVRAM. */
10702 if (!tg3_nvram_read(tp, mac_offset + 0, &hi) &&
10703 !tg3_nvram_read(tp, mac_offset + 4, &lo)) {
10704 dev->dev_addr[0] = ((hi >> 16) & 0xff);
10705 dev->dev_addr[1] = ((hi >> 24) & 0xff);
10706 dev->dev_addr[2] = ((lo >> 0) & 0xff);
10707 dev->dev_addr[3] = ((lo >> 8) & 0xff);
10708 dev->dev_addr[4] = ((lo >> 16) & 0xff);
10709 dev->dev_addr[5] = ((lo >> 24) & 0xff);
10711 /* Finally just fetch it out of the MAC control regs. */
10713 hi = tr32(MAC_ADDR_0_HIGH);
10714 lo = tr32(MAC_ADDR_0_LOW);
10716 dev->dev_addr[5] = lo & 0xff;
10717 dev->dev_addr[4] = (lo >> 8) & 0xff;
10718 dev->dev_addr[3] = (lo >> 16) & 0xff;
10719 dev->dev_addr[2] = (lo >> 24) & 0xff;
10720 dev->dev_addr[1] = hi & 0xff;
10721 dev->dev_addr[0] = (hi >> 8) & 0xff;
10725 if (!is_valid_ether_addr(&dev->dev_addr[0])) {
10726 #ifdef CONFIG_SPARC64
10727 if (!tg3_get_default_macaddr_sparc(tp))
10732 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
10736 #define BOUNDARY_SINGLE_CACHELINE 1
10737 #define BOUNDARY_MULTI_CACHELINE 2
10739 static u32 __devinit tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
10741 int cacheline_size;
10745 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
10747 cacheline_size = 1024;
10749 cacheline_size = (int) byte * 4;
10751 /* On 5703 and later chips, the boundary bits have no
10754 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
10755 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
10756 !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
10759 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
10760 goal = BOUNDARY_MULTI_CACHELINE;
10762 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
10763 goal = BOUNDARY_SINGLE_CACHELINE;
10772 /* PCI controllers on most RISC systems tend to disconnect
10773 * when a device tries to burst across a cache-line boundary.
10774 * Therefore, letting tg3 do so just wastes PCI bandwidth.
10776 * Unfortunately, for PCI-E there are only limited
10777 * write-side controls for this, and thus for reads
10778 * we will still get the disconnects. We'll also waste
10779 * these PCI cycles for both read and write for chips
10780 * other than 5700 and 5701 which do not implement the
10783 if ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
10784 !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
10785 switch (cacheline_size) {
10790 if (goal == BOUNDARY_SINGLE_CACHELINE) {
10791 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
10792 DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
10794 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
10795 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
10800 val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
10801 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
10805 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
10806 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
10809 } else if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
10810 switch (cacheline_size) {
10814 if (goal == BOUNDARY_SINGLE_CACHELINE) {
10815 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
10816 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
10822 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
10823 val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
10827 switch (cacheline_size) {
10829 if (goal == BOUNDARY_SINGLE_CACHELINE) {
10830 val |= (DMA_RWCTRL_READ_BNDRY_16 |
10831 DMA_RWCTRL_WRITE_BNDRY_16);
10836 if (goal == BOUNDARY_SINGLE_CACHELINE) {
10837 val |= (DMA_RWCTRL_READ_BNDRY_32 |
10838 DMA_RWCTRL_WRITE_BNDRY_32);
10843 if (goal == BOUNDARY_SINGLE_CACHELINE) {
10844 val |= (DMA_RWCTRL_READ_BNDRY_64 |
10845 DMA_RWCTRL_WRITE_BNDRY_64);
10850 if (goal == BOUNDARY_SINGLE_CACHELINE) {
10851 val |= (DMA_RWCTRL_READ_BNDRY_128 |
10852 DMA_RWCTRL_WRITE_BNDRY_128);
10857 val |= (DMA_RWCTRL_READ_BNDRY_256 |
10858 DMA_RWCTRL_WRITE_BNDRY_256);
10861 val |= (DMA_RWCTRL_READ_BNDRY_512 |
10862 DMA_RWCTRL_WRITE_BNDRY_512);
10866 val |= (DMA_RWCTRL_READ_BNDRY_1024 |
10867 DMA_RWCTRL_WRITE_BNDRY_1024);
10876 static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma, int size, int to_device)
10878 struct tg3_internal_buffer_desc test_desc;
10879 u32 sram_dma_descs;
10882 sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
10884 tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
10885 tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
10886 tw32(RDMAC_STATUS, 0);
10887 tw32(WDMAC_STATUS, 0);
10889 tw32(BUFMGR_MODE, 0);
10890 tw32(FTQ_RESET, 0);
10892 test_desc.addr_hi = ((u64) buf_dma) >> 32;
10893 test_desc.addr_lo = buf_dma & 0xffffffff;
10894 test_desc.nic_mbuf = 0x00002100;
10895 test_desc.len = size;
10898 * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
10899 * the *second* time the tg3 driver was getting loaded after an
10902 * Broadcom tells me:
10903 * ...the DMA engine is connected to the GRC block and a DMA
10904 * reset may affect the GRC block in some unpredictable way...
10905 * The behavior of resets to individual blocks has not been tested.
10907 * Broadcom noted the GRC reset will also reset all sub-components.
10910 test_desc.cqid_sqid = (13 << 8) | 2;
10912 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
10915 test_desc.cqid_sqid = (16 << 8) | 7;
10917 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
10920 test_desc.flags = 0x00000005;
10922 for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
10925 val = *(((u32 *)&test_desc) + i);
10926 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
10927 sram_dma_descs + (i * sizeof(u32)));
10928 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
10930 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
10933 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
10935 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
10939 for (i = 0; i < 40; i++) {
10943 val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
10945 val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
10946 if ((val & 0xffff) == sram_dma_descs) {
10957 #define TEST_BUFFER_SIZE 0x2000
10959 static int __devinit tg3_test_dma(struct tg3 *tp)
10961 dma_addr_t buf_dma;
10962 u32 *buf, saved_dma_rwctrl;
10965 buf = pci_alloc_consistent(tp->pdev, TEST_BUFFER_SIZE, &buf_dma);
10971 tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
10972 (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
10974 tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
10976 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
10977 /* DMA read watermark not used on PCIE */
10978 tp->dma_rwctrl |= 0x00180000;
10979 } else if (!(tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
10980 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
10981 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
10982 tp->dma_rwctrl |= 0x003f0000;
10984 tp->dma_rwctrl |= 0x003f000f;
10986 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
10987 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
10988 u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
10990 /* If the 5704 is behind the EPB bridge, we can
10991 * do the less restrictive ONE_DMA workaround for
10992 * better performance.
10994 if ((tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG) &&
10995 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
10996 tp->dma_rwctrl |= 0x8000;
10997 else if (ccval == 0x6 || ccval == 0x7)
10998 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
11000 /* Set bit 23 to enable PCIX hw bug fix */
11001 tp->dma_rwctrl |= 0x009f0000;
11002 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) {
11003 /* 5780 always in PCIX mode */
11004 tp->dma_rwctrl |= 0x00144000;
11005 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
11006 /* 5714 always in PCIX mode */
11007 tp->dma_rwctrl |= 0x00148000;
11009 tp->dma_rwctrl |= 0x001b000f;
11013 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
11014 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
11015 tp->dma_rwctrl &= 0xfffffff0;
11017 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
11018 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
11019 /* Remove this if it causes problems for some boards. */
11020 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
11022 /* On 5700/5701 chips, we need to set this bit.
11023 * Otherwise the chip will issue cacheline transactions
11024 * to streamable DMA memory with not all the byte
11025 * enables turned on. This is an error on several
11026 * RISC PCI controllers, in particular sparc64.
11028 * On 5703/5704 chips, this bit has been reassigned
11029 * a different meaning. In particular, it is used
11030 * on those chips to enable a PCI-X workaround.
11032 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
11035 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
11038 /* Unneeded, already done by tg3_get_invariants. */
11039 tg3_switch_clocks(tp);
11043 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
11044 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
11047 /* It is best to perform DMA test with maximum write burst size
11048 * to expose the 5700/5701 write DMA bug.
11050 saved_dma_rwctrl = tp->dma_rwctrl;
11051 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
11052 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
11057 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
11060 /* Send the buffer to the chip. */
11061 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1);
11063 printk(KERN_ERR "tg3_test_dma() Write the buffer failed %d\n", ret);
11068 /* validate data reached card RAM correctly. */
11069 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
11071 tg3_read_mem(tp, 0x2100 + (i*4), &val);
11072 if (le32_to_cpu(val) != p[i]) {
11073 printk(KERN_ERR " tg3_test_dma() Card buffer corrupted on write! (%d != %d)\n", val, i);
11074 /* ret = -ENODEV here? */
11079 /* Now read it back. */
11080 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0);
11082 printk(KERN_ERR "tg3_test_dma() Read the buffer failed %d\n", ret);
11088 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
11092 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
11093 DMA_RWCTRL_WRITE_BNDRY_16) {
11094 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
11095 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
11096 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
11099 printk(KERN_ERR "tg3_test_dma() buffer corrupted on read back! (%d != %d)\n", p[i], i);
11105 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
11111 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
11112 DMA_RWCTRL_WRITE_BNDRY_16) {
11113 static struct pci_device_id dma_wait_state_chipsets[] = {
11114 { PCI_DEVICE(PCI_VENDOR_ID_APPLE,
11115 PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
11119 /* DMA test passed without adjusting DMA boundary,
11120 * now look for chipsets that are known to expose the
11121 * DMA bug without failing the test.
11123 if (pci_dev_present(dma_wait_state_chipsets)) {
11124 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
11125 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
11128 /* Safe to use the calculated DMA boundary. */
11129 tp->dma_rwctrl = saved_dma_rwctrl;
11131 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
11135 pci_free_consistent(tp->pdev, TEST_BUFFER_SIZE, buf, buf_dma);
11140 static void __devinit tg3_init_link_config(struct tg3 *tp)
11142 tp->link_config.advertising =
11143 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
11144 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
11145 ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full |
11146 ADVERTISED_Autoneg | ADVERTISED_MII);
11147 tp->link_config.speed = SPEED_INVALID;
11148 tp->link_config.duplex = DUPLEX_INVALID;
11149 tp->link_config.autoneg = AUTONEG_ENABLE;
11150 tp->link_config.active_speed = SPEED_INVALID;
11151 tp->link_config.active_duplex = DUPLEX_INVALID;
11152 tp->link_config.phy_is_low_power = 0;
11153 tp->link_config.orig_speed = SPEED_INVALID;
11154 tp->link_config.orig_duplex = DUPLEX_INVALID;
11155 tp->link_config.orig_autoneg = AUTONEG_INVALID;
11158 static void __devinit tg3_init_bufmgr_config(struct tg3 *tp)
11160 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
11161 tp->bufmgr_config.mbuf_read_dma_low_water =
11162 DEFAULT_MB_RDMA_LOW_WATER_5705;
11163 tp->bufmgr_config.mbuf_mac_rx_low_water =
11164 DEFAULT_MB_MACRX_LOW_WATER_5705;
11165 tp->bufmgr_config.mbuf_high_water =
11166 DEFAULT_MB_HIGH_WATER_5705;
11168 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
11169 DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
11170 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
11171 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
11172 tp->bufmgr_config.mbuf_high_water_jumbo =
11173 DEFAULT_MB_HIGH_WATER_JUMBO_5780;
11175 tp->bufmgr_config.mbuf_read_dma_low_water =
11176 DEFAULT_MB_RDMA_LOW_WATER;
11177 tp->bufmgr_config.mbuf_mac_rx_low_water =
11178 DEFAULT_MB_MACRX_LOW_WATER;
11179 tp->bufmgr_config.mbuf_high_water =
11180 DEFAULT_MB_HIGH_WATER;
11182 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
11183 DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
11184 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
11185 DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
11186 tp->bufmgr_config.mbuf_high_water_jumbo =
11187 DEFAULT_MB_HIGH_WATER_JUMBO;
11190 tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
11191 tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
11194 static char * __devinit tg3_phy_string(struct tg3 *tp)
11196 switch (tp->phy_id & PHY_ID_MASK) {
11197 case PHY_ID_BCM5400: return "5400";
11198 case PHY_ID_BCM5401: return "5401";
11199 case PHY_ID_BCM5411: return "5411";
11200 case PHY_ID_BCM5701: return "5701";
11201 case PHY_ID_BCM5703: return "5703";
11202 case PHY_ID_BCM5704: return "5704";
11203 case PHY_ID_BCM5705: return "5705";
11204 case PHY_ID_BCM5750: return "5750";
11205 case PHY_ID_BCM5752: return "5752";
11206 case PHY_ID_BCM5714: return "5714";
11207 case PHY_ID_BCM5780: return "5780";
11208 case PHY_ID_BCM5755: return "5755";
11209 case PHY_ID_BCM5787: return "5787";
11210 case PHY_ID_BCM8002: return "8002/serdes";
11211 case 0: return "serdes";
11212 default: return "unknown";
11216 static char * __devinit tg3_bus_string(struct tg3 *tp, char *str)
11218 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
11219 strcpy(str, "PCI Express");
11221 } else if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) {
11222 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
11224 strcpy(str, "PCIX:");
11226 if ((clock_ctrl == 7) ||
11227 ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
11228 GRC_MISC_CFG_BOARD_ID_5704CIOBE))
11229 strcat(str, "133MHz");
11230 else if (clock_ctrl == 0)
11231 strcat(str, "33MHz");
11232 else if (clock_ctrl == 2)
11233 strcat(str, "50MHz");
11234 else if (clock_ctrl == 4)
11235 strcat(str, "66MHz");
11236 else if (clock_ctrl == 6)
11237 strcat(str, "100MHz");
11239 strcpy(str, "PCI:");
11240 if (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED)
11241 strcat(str, "66MHz");
11243 strcat(str, "33MHz");
11245 if (tp->tg3_flags & TG3_FLAG_PCI_32BIT)
11246 strcat(str, ":32-bit");
11248 strcat(str, ":64-bit");
11252 static struct pci_dev * __devinit tg3_find_peer(struct tg3 *tp)
11254 struct pci_dev *peer;
11255 unsigned int func, devnr = tp->pdev->devfn & ~7;
11257 for (func = 0; func < 8; func++) {
11258 peer = pci_get_slot(tp->pdev->bus, devnr | func);
11259 if (peer && peer != tp->pdev)
11263 /* 5704 can be configured in single-port mode, set peer to
11264 * tp->pdev in that case.
11272 * We don't need to keep the refcount elevated; there's no way
11273 * to remove one half of this device without removing the other
11280 static void __devinit tg3_init_coal(struct tg3 *tp)
11282 struct ethtool_coalesce *ec = &tp->coal;
11284 memset(ec, 0, sizeof(*ec));
11285 ec->cmd = ETHTOOL_GCOALESCE;
11286 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
11287 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
11288 ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
11289 ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
11290 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
11291 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
11292 ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
11293 ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
11294 ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
11296 if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
11297 HOSTCC_MODE_CLRTICK_TXBD)) {
11298 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
11299 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
11300 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
11301 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
11304 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
11305 ec->rx_coalesce_usecs_irq = 0;
11306 ec->tx_coalesce_usecs_irq = 0;
11307 ec->stats_block_coalesce_usecs = 0;
11311 static int __devinit tg3_init_one(struct pci_dev *pdev,
11312 const struct pci_device_id *ent)
11314 static int tg3_version_printed = 0;
11315 unsigned long tg3reg_base, tg3reg_len;
11316 struct net_device *dev;
11318 int i, err, pm_cap;
11320 u64 dma_mask, persist_dma_mask;
11322 if (tg3_version_printed++ == 0)
11323 printk(KERN_INFO "%s", version);
11325 err = pci_enable_device(pdev);
11327 printk(KERN_ERR PFX "Cannot enable PCI device, "
11332 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
11333 printk(KERN_ERR PFX "Cannot find proper PCI device "
11334 "base address, aborting.\n");
11336 goto err_out_disable_pdev;
11339 err = pci_request_regions(pdev, DRV_MODULE_NAME);
11341 printk(KERN_ERR PFX "Cannot obtain PCI resources, "
11343 goto err_out_disable_pdev;
11346 pci_set_master(pdev);
11348 /* Find power-management capability. */
11349 pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
11351 printk(KERN_ERR PFX "Cannot find PowerManagement capability, "
11354 goto err_out_free_res;
11357 tg3reg_base = pci_resource_start(pdev, 0);
11358 tg3reg_len = pci_resource_len(pdev, 0);
11360 dev = alloc_etherdev(sizeof(*tp));
11362 printk(KERN_ERR PFX "Etherdev alloc failed, aborting.\n");
11364 goto err_out_free_res;
11367 SET_MODULE_OWNER(dev);
11368 SET_NETDEV_DEV(dev, &pdev->dev);
11370 #if TG3_VLAN_TAG_USED
11371 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
11372 dev->vlan_rx_register = tg3_vlan_rx_register;
11373 dev->vlan_rx_kill_vid = tg3_vlan_rx_kill_vid;
11376 tp = netdev_priv(dev);
11379 tp->pm_cap = pm_cap;
11380 tp->mac_mode = TG3_DEF_MAC_MODE;
11381 tp->rx_mode = TG3_DEF_RX_MODE;
11382 tp->tx_mode = TG3_DEF_TX_MODE;
11383 tp->mi_mode = MAC_MI_MODE_BASE;
11385 tp->msg_enable = tg3_debug;
11387 tp->msg_enable = TG3_DEF_MSG_ENABLE;
11389 /* The word/byte swap controls here control register access byte
11390 * swapping. DMA data byte swapping is controlled in the GRC_MODE
11393 tp->misc_host_ctrl =
11394 MISC_HOST_CTRL_MASK_PCI_INT |
11395 MISC_HOST_CTRL_WORD_SWAP |
11396 MISC_HOST_CTRL_INDIR_ACCESS |
11397 MISC_HOST_CTRL_PCISTATE_RW;
11399 /* The NONFRM (non-frame) byte/word swap controls take effect
11400 * on descriptor entries, anything which isn't packet data.
11402 * The StrongARM chips on the board (one for tx, one for rx)
11403 * are running in big-endian mode.
11405 tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
11406 GRC_MODE_WSWAP_NONFRM_DATA);
11407 #ifdef __BIG_ENDIAN
11408 tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
11410 spin_lock_init(&tp->lock);
11411 spin_lock_init(&tp->tx_lock);
11412 spin_lock_init(&tp->indirect_lock);
11413 INIT_WORK(&tp->reset_task, tg3_reset_task, tp);
11415 tp->regs = ioremap_nocache(tg3reg_base, tg3reg_len);
11416 if (tp->regs == 0UL) {
11417 printk(KERN_ERR PFX "Cannot map device registers, "
11420 goto err_out_free_dev;
11423 tg3_init_link_config(tp);
11425 tp->rx_pending = TG3_DEF_RX_RING_PENDING;
11426 tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
11427 tp->tx_pending = TG3_DEF_TX_RING_PENDING;
11429 dev->open = tg3_open;
11430 dev->stop = tg3_close;
11431 dev->get_stats = tg3_get_stats;
11432 dev->set_multicast_list = tg3_set_rx_mode;
11433 dev->set_mac_address = tg3_set_mac_addr;
11434 dev->do_ioctl = tg3_ioctl;
11435 dev->tx_timeout = tg3_tx_timeout;
11436 dev->poll = tg3_poll;
11437 dev->ethtool_ops = &tg3_ethtool_ops;
11439 dev->watchdog_timeo = TG3_TX_TIMEOUT;
11440 dev->change_mtu = tg3_change_mtu;
11441 dev->irq = pdev->irq;
11442 #ifdef CONFIG_NET_POLL_CONTROLLER
11443 dev->poll_controller = tg3_poll_controller;
11446 err = tg3_get_invariants(tp);
11448 printk(KERN_ERR PFX "Problem fetching invariants of chip, "
11450 goto err_out_iounmap;
11453 /* The EPB bridge inside 5714, 5715, and 5780 and any
11454 * device behind the EPB cannot support DMA addresses > 40-bit.
11455 * On 64-bit systems with IOMMU, use 40-bit dma_mask.
11456 * On 64-bit systems without IOMMU, use 64-bit dma_mask and
11457 * do DMA address check in tg3_start_xmit().
11459 if (tp->tg3_flags2 & TG3_FLG2_IS_5788)
11460 persist_dma_mask = dma_mask = DMA_32BIT_MASK;
11461 else if (tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG) {
11462 persist_dma_mask = dma_mask = DMA_40BIT_MASK;
11463 #ifdef CONFIG_HIGHMEM
11464 dma_mask = DMA_64BIT_MASK;
11467 persist_dma_mask = dma_mask = DMA_64BIT_MASK;
11469 /* Configure DMA attributes. */
11470 if (dma_mask > DMA_32BIT_MASK) {
11471 err = pci_set_dma_mask(pdev, dma_mask);
11473 dev->features |= NETIF_F_HIGHDMA;
11474 err = pci_set_consistent_dma_mask(pdev,
11477 printk(KERN_ERR PFX "Unable to obtain 64 bit "
11478 "DMA for consistent allocations\n");
11479 goto err_out_iounmap;
11483 if (err || dma_mask == DMA_32BIT_MASK) {
11484 err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
11486 printk(KERN_ERR PFX "No usable DMA configuration, "
11488 goto err_out_iounmap;
11492 tg3_init_bufmgr_config(tp);
11494 #if TG3_TSO_SUPPORT != 0
11495 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
11496 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
11498 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
11499 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
11500 tp->pci_chip_rev_id == CHIPREV_ID_5705_A0 ||
11501 (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0) {
11502 tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
11504 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
11507 /* TSO is on by default on chips that support hardware TSO.
11508 * Firmware TSO on older chips gives lower performance, so it
11509 * is off by default, but can be enabled using ethtool.
11511 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
11512 dev->features |= NETIF_F_TSO;
11516 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 &&
11517 !(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) &&
11518 !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
11519 tp->tg3_flags2 |= TG3_FLG2_MAX_RXPEND_64;
11520 tp->rx_pending = 63;
11523 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
11524 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714))
11525 tp->pdev_peer = tg3_find_peer(tp);
11527 err = tg3_get_device_address(tp);
11529 printk(KERN_ERR PFX "Could not obtain valid ethernet address, "
11531 goto err_out_iounmap;
11535 * Reset chip in case UNDI or EFI driver did not shutdown
11536 * DMA self test will enable WDMAC and we'll see (spurious)
11537 * pending DMA on the PCI bus at that point.
11539 if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
11540 (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
11541 pci_save_state(tp->pdev);
11542 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
11543 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11546 err = tg3_test_dma(tp);
11548 printk(KERN_ERR PFX "DMA engine test failed, aborting.\n");
11549 goto err_out_iounmap;
11552 /* Tigon3 can do ipv4 only... and some chips have buggy
11555 if ((tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) == 0) {
11556 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
11557 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
11558 dev->features |= NETIF_F_HW_CSUM;
11560 dev->features |= NETIF_F_IP_CSUM;
11561 dev->features |= NETIF_F_SG;
11562 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
11564 tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
11566 /* flow control autonegotiation is default behavior */
11567 tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
11571 /* Now that we have fully setup the chip, save away a snapshot
11572 * of the PCI config space. We need to restore this after
11573 * GRC_MISC_CFG core clock resets and some resume events.
11575 pci_save_state(tp->pdev);
11577 err = register_netdev(dev);
11579 printk(KERN_ERR PFX "Cannot register net device, "
11581 goto err_out_iounmap;
11584 pci_set_drvdata(pdev, dev);
11586 printk(KERN_INFO "%s: Tigon3 [partno(%s) rev %04x PHY(%s)] (%s) %sBaseT Ethernet ",
11588 tp->board_part_number,
11589 tp->pci_chip_rev_id,
11590 tg3_phy_string(tp),
11591 tg3_bus_string(tp, str),
11592 (tp->tg3_flags & TG3_FLAG_10_100_ONLY) ? "10/100" : "10/100/1000");
11594 for (i = 0; i < 6; i++)
11595 printk("%2.2x%c", dev->dev_addr[i],
11596 i == 5 ? '\n' : ':');
11598 printk(KERN_INFO "%s: RXcsums[%d] LinkChgREG[%d] "
11599 "MIirq[%d] ASF[%d] Split[%d] WireSpeed[%d] "
11602 (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0,
11603 (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) != 0,
11604 (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) != 0,
11605 (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0,
11606 (tp->tg3_flags & TG3_FLAG_SPLIT_MODE) != 0,
11607 (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED) == 0,
11608 (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) != 0);
11609 printk(KERN_INFO "%s: dma_rwctrl[%08x] dma_mask[%d-bit]\n",
11610 dev->name, tp->dma_rwctrl,
11611 (pdev->dma_mask == DMA_32BIT_MASK) ? 32 :
11612 (((u64) pdev->dma_mask == DMA_40BIT_MASK) ? 40 : 64));
11614 netif_carrier_off(tp->dev);
11628 pci_release_regions(pdev);
11630 err_out_disable_pdev:
11631 pci_disable_device(pdev);
11632 pci_set_drvdata(pdev, NULL);
11636 static void __devexit tg3_remove_one(struct pci_dev *pdev)
11638 struct net_device *dev = pci_get_drvdata(pdev);
11641 struct tg3 *tp = netdev_priv(dev);
11643 flush_scheduled_work();
11644 unregister_netdev(dev);
11650 pci_release_regions(pdev);
11651 pci_disable_device(pdev);
11652 pci_set_drvdata(pdev, NULL);
11656 static int tg3_suspend(struct pci_dev *pdev, pm_message_t state)
11658 struct net_device *dev = pci_get_drvdata(pdev);
11659 struct tg3 *tp = netdev_priv(dev);
11662 if (!netif_running(dev))
11665 flush_scheduled_work();
11666 tg3_netif_stop(tp);
11668 del_timer_sync(&tp->timer);
11670 tg3_full_lock(tp, 1);
11671 tg3_disable_ints(tp);
11672 tg3_full_unlock(tp);
11674 netif_device_detach(dev);
11676 tg3_full_lock(tp, 0);
11677 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11678 tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
11679 tg3_full_unlock(tp);
11681 err = tg3_set_power_state(tp, pci_choose_state(pdev, state));
11683 tg3_full_lock(tp, 0);
11685 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
11686 tg3_init_hw(tp, 1);
11688 tp->timer.expires = jiffies + tp->timer_offset;
11689 add_timer(&tp->timer);
11691 netif_device_attach(dev);
11692 tg3_netif_start(tp);
11694 tg3_full_unlock(tp);
11700 static int tg3_resume(struct pci_dev *pdev)
11702 struct net_device *dev = pci_get_drvdata(pdev);
11703 struct tg3 *tp = netdev_priv(dev);
11706 if (!netif_running(dev))
11709 pci_restore_state(tp->pdev);
11711 err = tg3_set_power_state(tp, PCI_D0);
11715 netif_device_attach(dev);
11717 tg3_full_lock(tp, 0);
11719 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
11720 tg3_init_hw(tp, 1);
11722 tp->timer.expires = jiffies + tp->timer_offset;
11723 add_timer(&tp->timer);
11725 tg3_netif_start(tp);
11727 tg3_full_unlock(tp);
11732 static struct pci_driver tg3_driver = {
11733 .name = DRV_MODULE_NAME,
11734 .id_table = tg3_pci_tbl,
11735 .probe = tg3_init_one,
11736 .remove = __devexit_p(tg3_remove_one),
11737 .suspend = tg3_suspend,
11738 .resume = tg3_resume
11741 static int __init tg3_init(void)
11743 return pci_module_init(&tg3_driver);
11746 static void __exit tg3_cleanup(void)
11748 pci_unregister_driver(&tg3_driver);
11751 module_init(tg3_init);
11752 module_exit(tg3_cleanup);