1 /************************************************************************
2 * s2io.c: A Linux PCI-X Ethernet driver for Neterion 10GbE Server NIC
3 * Copyright(c) 2002-2007 Neterion Inc.
5 * This software may be used and distributed according to the terms of
6 * the GNU General Public License (GPL), incorporated herein by reference.
7 * Drivers based on or derived from this code fall under the GPL and must
8 * retain the authorship, copyright and license notice. This file is not
9 * a complete program and may only be used when the entire operating
10 * system is licensed under the GPL.
11 * See the file COPYING in this distribution for more information.
14 * Jeff Garzik : For pointing out the improper error condition
15 * check in the s2io_xmit routine and also some
16 * issues in the Tx watch dog function. Also for
17 * patiently answering all those innumerable
18 * questions regaring the 2.6 porting issues.
19 * Stephen Hemminger : Providing proper 2.6 porting mechanism for some
20 * macros available only in 2.6 Kernel.
21 * Francois Romieu : For pointing out all code part that were
22 * deprecated and also styling related comments.
23 * Grant Grundler : For helping me get rid of some Architecture
25 * Christopher Hellwig : Some more 2.6 specific issues in the driver.
27 * The module loadable parameters that are supported by the driver and a brief
28 * explaination of all the variables.
30 * rx_ring_num : This can be used to program the number of receive rings used
32 * rx_ring_sz: This defines the number of receive blocks each ring can have.
33 * This is also an array of size 8.
34 * rx_ring_mode: This defines the operation mode of all 8 rings. The valid
35 * values are 1, 2 and 3.
36 * tx_fifo_num: This defines the number of Tx FIFOs thats used int the driver.
37 * tx_fifo_len: This too is an array of 8. Each element defines the number of
38 * Tx descriptors that can be associated with each corresponding FIFO.
39 * intr_type: This defines the type of interrupt. The values can be 0(INTA),
40 * 1(MSI), 2(MSI_X). Default value is '0(INTA)'
41 * lro: Specifies whether to enable Large Receive Offload (LRO) or not.
42 * Possible values '1' for enable '0' for disable. Default is '0'
43 * lro_max_pkts: This parameter defines maximum number of packets can be
44 * aggregated as a single large packet
45 * napi: This parameter used to enable/disable NAPI (polling Rx)
46 * Possible values '1' for enable and '0' for disable. Default is '1'
47 * ufo: This parameter used to enable/disable UDP Fragmentation Offload(UFO)
48 * Possible values '1' for enable and '0' for disable. Default is '0'
49 * vlan_tag_strip: This can be used to enable or disable vlan stripping.
50 * Possible values '1' for enable , '0' for disable.
51 * Default is '2' - which means disable in promisc mode
52 * and enable in non-promiscuous mode.
53 ************************************************************************/
55 #include <linux/module.h>
56 #include <linux/types.h>
57 #include <linux/errno.h>
58 #include <linux/ioport.h>
59 #include <linux/pci.h>
60 #include <linux/dma-mapping.h>
61 #include <linux/kernel.h>
62 #include <linux/netdevice.h>
63 #include <linux/etherdevice.h>
64 #include <linux/skbuff.h>
65 #include <linux/init.h>
66 #include <linux/delay.h>
67 #include <linux/stddef.h>
68 #include <linux/ioctl.h>
69 #include <linux/timex.h>
70 #include <linux/ethtool.h>
71 #include <linux/workqueue.h>
72 #include <linux/if_vlan.h>
74 #include <linux/tcp.h>
77 #include <asm/system.h>
78 #include <asm/uaccess.h>
80 #include <asm/div64.h>
85 #include "s2io-regs.h"
87 #define DRV_VERSION "2.0.23.1"
89 /* S2io Driver name & version. */
90 static char s2io_driver_name[] = "Neterion";
91 static char s2io_driver_version[] = DRV_VERSION;
93 static int rxd_size[4] = {32,48,48,64};
94 static int rxd_count[4] = {127,85,85,63};
96 static inline int RXD_IS_UP2DT(struct RxD_t *rxdp)
100 ret = ((!(rxdp->Control_1 & RXD_OWN_XENA)) &&
101 (GET_RXD_MARKER(rxdp->Control_2) != THE_RXD_MARK));
107 * Cards with following subsystem_id have a link state indication
108 * problem, 600B, 600C, 600D, 640B, 640C and 640D.
109 * macro below identifies these cards given the subsystem_id.
111 #define CARDS_WITH_FAULTY_LINK_INDICATORS(dev_type, subid) \
112 (dev_type == XFRAME_I_DEVICE) ? \
113 ((((subid >= 0x600B) && (subid <= 0x600D)) || \
114 ((subid >= 0x640B) && (subid <= 0x640D))) ? 1 : 0) : 0
116 #define LINK_IS_UP(val64) (!(val64 & (ADAPTER_STATUS_RMAC_REMOTE_FAULT | \
117 ADAPTER_STATUS_RMAC_LOCAL_FAULT)))
118 #define TASKLET_IN_USE test_and_set_bit(0, (&sp->tasklet_status))
121 static inline int rx_buffer_level(struct s2io_nic * sp, int rxb_size, int ring)
123 struct mac_info *mac_control;
125 mac_control = &sp->mac_control;
126 if (rxb_size <= rxd_count[sp->rxd_mode])
128 else if ((mac_control->rings[ring].pkt_cnt - rxb_size) > 16)
133 /* Ethtool related variables and Macros. */
134 static char s2io_gstrings[][ETH_GSTRING_LEN] = {
135 "Register test\t(offline)",
136 "Eeprom test\t(offline)",
137 "Link test\t(online)",
138 "RLDRAM test\t(offline)",
139 "BIST Test\t(offline)"
142 static char ethtool_xena_stats_keys[][ETH_GSTRING_LEN] = {
144 {"tmac_data_octets"},
148 {"tmac_pause_ctrl_frms"},
152 {"tmac_any_err_frms"},
153 {"tmac_ttl_less_fb_octets"},
154 {"tmac_vld_ip_octets"},
162 {"rmac_data_octets"},
163 {"rmac_fcs_err_frms"},
165 {"rmac_vld_mcst_frms"},
166 {"rmac_vld_bcst_frms"},
167 {"rmac_in_rng_len_err_frms"},
168 {"rmac_out_rng_len_err_frms"},
170 {"rmac_pause_ctrl_frms"},
171 {"rmac_unsup_ctrl_frms"},
173 {"rmac_accepted_ucst_frms"},
174 {"rmac_accepted_nucst_frms"},
175 {"rmac_discarded_frms"},
176 {"rmac_drop_events"},
177 {"rmac_ttl_less_fb_octets"},
179 {"rmac_usized_frms"},
180 {"rmac_osized_frms"},
182 {"rmac_jabber_frms"},
183 {"rmac_ttl_64_frms"},
184 {"rmac_ttl_65_127_frms"},
185 {"rmac_ttl_128_255_frms"},
186 {"rmac_ttl_256_511_frms"},
187 {"rmac_ttl_512_1023_frms"},
188 {"rmac_ttl_1024_1518_frms"},
196 {"rmac_err_drp_udp"},
197 {"rmac_xgmii_err_sym"},
215 {"rmac_xgmii_data_err_cnt"},
216 {"rmac_xgmii_ctrl_err_cnt"},
217 {"rmac_accepted_ip"},
221 {"new_rd_req_rtry_cnt"},
223 {"wr_rtry_rd_ack_cnt"},
226 {"new_wr_req_rtry_cnt"},
229 {"rd_rtry_wr_ack_cnt"},
239 static char ethtool_enhanced_stats_keys[][ETH_GSTRING_LEN] = {
240 {"rmac_ttl_1519_4095_frms"},
241 {"rmac_ttl_4096_8191_frms"},
242 {"rmac_ttl_8192_max_frms"},
243 {"rmac_ttl_gt_max_frms"},
244 {"rmac_osized_alt_frms"},
245 {"rmac_jabber_alt_frms"},
246 {"rmac_gt_max_alt_frms"},
248 {"rmac_len_discard"},
249 {"rmac_fcs_discard"},
252 {"rmac_red_discard"},
253 {"rmac_rts_discard"},
254 {"rmac_ingm_full_discard"},
258 static char ethtool_driver_stats_keys[][ETH_GSTRING_LEN] = {
259 {"\n DRIVER STATISTICS"},
260 {"single_bit_ecc_errs"},
261 {"double_bit_ecc_errs"},
267 ("alarm_transceiver_temp_high"),
268 ("alarm_transceiver_temp_low"),
269 ("alarm_laser_bias_current_high"),
270 ("alarm_laser_bias_current_low"),
271 ("alarm_laser_output_power_high"),
272 ("alarm_laser_output_power_low"),
273 ("warn_transceiver_temp_high"),
274 ("warn_transceiver_temp_low"),
275 ("warn_laser_bias_current_high"),
276 ("warn_laser_bias_current_low"),
277 ("warn_laser_output_power_high"),
278 ("warn_laser_output_power_low"),
279 ("lro_aggregated_pkts"),
280 ("lro_flush_both_count"),
281 ("lro_out_of_sequence_pkts"),
282 ("lro_flush_due_to_max_pkts"),
283 ("lro_avg_aggr_pkts"),
284 ("mem_alloc_fail_cnt"),
285 ("watchdog_timer_cnt"),
292 ("tx_tcode_buf_abort_cnt"),
293 ("tx_tcode_desc_abort_cnt"),
294 ("tx_tcode_parity_err_cnt"),
295 ("tx_tcode_link_loss_cnt"),
296 ("tx_tcode_list_proc_err_cnt"),
297 ("rx_tcode_parity_err_cnt"),
298 ("rx_tcode_abort_cnt"),
299 ("rx_tcode_parity_abort_cnt"),
300 ("rx_tcode_rda_fail_cnt"),
301 ("rx_tcode_unkn_prot_cnt"),
302 ("rx_tcode_fcs_err_cnt"),
303 ("rx_tcode_buf_size_err_cnt"),
304 ("rx_tcode_rxd_corrupt_cnt"),
305 ("rx_tcode_unkn_err_cnt")
308 #define S2IO_XENA_STAT_LEN sizeof(ethtool_xena_stats_keys)/ ETH_GSTRING_LEN
309 #define S2IO_ENHANCED_STAT_LEN sizeof(ethtool_enhanced_stats_keys)/ \
311 #define S2IO_DRIVER_STAT_LEN sizeof(ethtool_driver_stats_keys)/ ETH_GSTRING_LEN
313 #define XFRAME_I_STAT_LEN (S2IO_XENA_STAT_LEN + S2IO_DRIVER_STAT_LEN )
314 #define XFRAME_II_STAT_LEN (XFRAME_I_STAT_LEN + S2IO_ENHANCED_STAT_LEN )
316 #define XFRAME_I_STAT_STRINGS_LEN ( XFRAME_I_STAT_LEN * ETH_GSTRING_LEN )
317 #define XFRAME_II_STAT_STRINGS_LEN ( XFRAME_II_STAT_LEN * ETH_GSTRING_LEN )
319 #define S2IO_TEST_LEN sizeof(s2io_gstrings) / ETH_GSTRING_LEN
320 #define S2IO_STRINGS_LEN S2IO_TEST_LEN * ETH_GSTRING_LEN
322 #define S2IO_TIMER_CONF(timer, handle, arg, exp) \
323 init_timer(&timer); \
324 timer.function = handle; \
325 timer.data = (unsigned long) arg; \
326 mod_timer(&timer, (jiffies + exp)) \
329 static void s2io_vlan_rx_register(struct net_device *dev,
330 struct vlan_group *grp)
332 struct s2io_nic *nic = dev->priv;
335 spin_lock_irqsave(&nic->tx_lock, flags);
337 spin_unlock_irqrestore(&nic->tx_lock, flags);
340 /* A flag indicating whether 'RX_PA_CFG_STRIP_VLAN_TAG' bit is set or not */
341 static int vlan_strip_flag;
344 * Constants to be programmed into the Xena's registers, to configure
349 static const u64 herc_act_dtx_cfg[] = {
351 0x8000051536750000ULL, 0x80000515367500E0ULL,
353 0x8000051536750004ULL, 0x80000515367500E4ULL,
355 0x80010515003F0000ULL, 0x80010515003F00E0ULL,
357 0x80010515003F0004ULL, 0x80010515003F00E4ULL,
359 0x801205150D440000ULL, 0x801205150D4400E0ULL,
361 0x801205150D440004ULL, 0x801205150D4400E4ULL,
363 0x80020515F2100000ULL, 0x80020515F21000E0ULL,
365 0x80020515F2100004ULL, 0x80020515F21000E4ULL,
370 static const u64 xena_dtx_cfg[] = {
372 0x8000051500000000ULL, 0x80000515000000E0ULL,
374 0x80000515D9350004ULL, 0x80000515D93500E4ULL,
376 0x8001051500000000ULL, 0x80010515000000E0ULL,
378 0x80010515001E0004ULL, 0x80010515001E00E4ULL,
380 0x8002051500000000ULL, 0x80020515000000E0ULL,
382 0x80020515F2100004ULL, 0x80020515F21000E4ULL,
387 * Constants for Fixing the MacAddress problem seen mostly on
390 static const u64 fix_mac[] = {
391 0x0060000000000000ULL, 0x0060600000000000ULL,
392 0x0040600000000000ULL, 0x0000600000000000ULL,
393 0x0020600000000000ULL, 0x0060600000000000ULL,
394 0x0020600000000000ULL, 0x0060600000000000ULL,
395 0x0020600000000000ULL, 0x0060600000000000ULL,
396 0x0020600000000000ULL, 0x0060600000000000ULL,
397 0x0020600000000000ULL, 0x0060600000000000ULL,
398 0x0020600000000000ULL, 0x0060600000000000ULL,
399 0x0020600000000000ULL, 0x0060600000000000ULL,
400 0x0020600000000000ULL, 0x0060600000000000ULL,
401 0x0020600000000000ULL, 0x0060600000000000ULL,
402 0x0020600000000000ULL, 0x0060600000000000ULL,
403 0x0020600000000000ULL, 0x0000600000000000ULL,
404 0x0040600000000000ULL, 0x0060600000000000ULL,
408 MODULE_LICENSE("GPL");
409 MODULE_VERSION(DRV_VERSION);
412 /* Module Loadable parameters. */
413 S2IO_PARM_INT(tx_fifo_num, 1);
414 S2IO_PARM_INT(rx_ring_num, 1);
417 S2IO_PARM_INT(rx_ring_mode, 1);
418 S2IO_PARM_INT(use_continuous_tx_intrs, 1);
419 S2IO_PARM_INT(rmac_pause_time, 0x100);
420 S2IO_PARM_INT(mc_pause_threshold_q0q3, 187);
421 S2IO_PARM_INT(mc_pause_threshold_q4q7, 187);
422 S2IO_PARM_INT(shared_splits, 0);
423 S2IO_PARM_INT(tmac_util_period, 5);
424 S2IO_PARM_INT(rmac_util_period, 5);
425 S2IO_PARM_INT(bimodal, 0);
426 S2IO_PARM_INT(l3l4hdr_size, 128);
427 /* Frequency of Rx desc syncs expressed as power of 2 */
428 S2IO_PARM_INT(rxsync_frequency, 3);
429 /* Interrupt type. Values can be 0(INTA), 1(MSI), 2(MSI_X) */
430 S2IO_PARM_INT(intr_type, 0);
431 /* Large receive offload feature */
432 S2IO_PARM_INT(lro, 0);
433 /* Max pkts to be aggregated by LRO at one time. If not specified,
434 * aggregation happens until we hit max IP pkt size(64K)
436 S2IO_PARM_INT(lro_max_pkts, 0xFFFF);
437 S2IO_PARM_INT(indicate_max_pkts, 0);
439 S2IO_PARM_INT(napi, 1);
440 S2IO_PARM_INT(ufo, 0);
441 S2IO_PARM_INT(vlan_tag_strip, NO_STRIP_IN_PROMISC);
443 static unsigned int tx_fifo_len[MAX_TX_FIFOS] =
444 {DEFAULT_FIFO_0_LEN, [1 ...(MAX_TX_FIFOS - 1)] = DEFAULT_FIFO_1_7_LEN};
445 static unsigned int rx_ring_sz[MAX_RX_RINGS] =
446 {[0 ...(MAX_RX_RINGS - 1)] = SMALL_BLK_CNT};
447 static unsigned int rts_frm_len[MAX_RX_RINGS] =
448 {[0 ...(MAX_RX_RINGS - 1)] = 0 };
450 module_param_array(tx_fifo_len, uint, NULL, 0);
451 module_param_array(rx_ring_sz, uint, NULL, 0);
452 module_param_array(rts_frm_len, uint, NULL, 0);
456 * This table lists all the devices that this driver supports.
458 static struct pci_device_id s2io_tbl[] __devinitdata = {
459 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_S2IO_WIN,
460 PCI_ANY_ID, PCI_ANY_ID},
461 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_S2IO_UNI,
462 PCI_ANY_ID, PCI_ANY_ID},
463 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_HERC_WIN,
464 PCI_ANY_ID, PCI_ANY_ID},
465 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_HERC_UNI,
466 PCI_ANY_ID, PCI_ANY_ID},
470 MODULE_DEVICE_TABLE(pci, s2io_tbl);
472 static struct pci_error_handlers s2io_err_handler = {
473 .error_detected = s2io_io_error_detected,
474 .slot_reset = s2io_io_slot_reset,
475 .resume = s2io_io_resume,
478 static struct pci_driver s2io_driver = {
480 .id_table = s2io_tbl,
481 .probe = s2io_init_nic,
482 .remove = __devexit_p(s2io_rem_nic),
483 .err_handler = &s2io_err_handler,
486 /* A simplifier macro used both by init and free shared_mem Fns(). */
487 #define TXD_MEM_PAGE_CNT(len, per_each) ((len+per_each - 1) / per_each)
490 * init_shared_mem - Allocation and Initialization of Memory
491 * @nic: Device private variable.
492 * Description: The function allocates all the memory areas shared
493 * between the NIC and the driver. This includes Tx descriptors,
494 * Rx descriptors and the statistics block.
497 static int init_shared_mem(struct s2io_nic *nic)
500 void *tmp_v_addr, *tmp_v_addr_next;
501 dma_addr_t tmp_p_addr, tmp_p_addr_next;
502 struct RxD_block *pre_rxd_blk = NULL;
504 int lst_size, lst_per_page;
505 struct net_device *dev = nic->dev;
509 struct mac_info *mac_control;
510 struct config_param *config;
511 unsigned long long mem_allocated = 0;
513 mac_control = &nic->mac_control;
514 config = &nic->config;
517 /* Allocation and initialization of TXDLs in FIOFs */
519 for (i = 0; i < config->tx_fifo_num; i++) {
520 size += config->tx_cfg[i].fifo_len;
522 if (size > MAX_AVAILABLE_TXDS) {
523 DBG_PRINT(ERR_DBG, "s2io: Requested TxDs too high, ");
524 DBG_PRINT(ERR_DBG, "Requested: %d, max supported: 8192\n", size);
528 lst_size = (sizeof(struct TxD) * config->max_txds);
529 lst_per_page = PAGE_SIZE / lst_size;
531 for (i = 0; i < config->tx_fifo_num; i++) {
532 int fifo_len = config->tx_cfg[i].fifo_len;
533 int list_holder_size = fifo_len * sizeof(struct list_info_hold);
534 mac_control->fifos[i].list_info = kmalloc(list_holder_size,
536 if (!mac_control->fifos[i].list_info) {
538 "Malloc failed for list_info\n");
541 mem_allocated += list_holder_size;
542 memset(mac_control->fifos[i].list_info, 0, list_holder_size);
544 for (i = 0; i < config->tx_fifo_num; i++) {
545 int page_num = TXD_MEM_PAGE_CNT(config->tx_cfg[i].fifo_len,
547 mac_control->fifos[i].tx_curr_put_info.offset = 0;
548 mac_control->fifos[i].tx_curr_put_info.fifo_len =
549 config->tx_cfg[i].fifo_len - 1;
550 mac_control->fifos[i].tx_curr_get_info.offset = 0;
551 mac_control->fifos[i].tx_curr_get_info.fifo_len =
552 config->tx_cfg[i].fifo_len - 1;
553 mac_control->fifos[i].fifo_no = i;
554 mac_control->fifos[i].nic = nic;
555 mac_control->fifos[i].max_txds = MAX_SKB_FRAGS + 2;
557 for (j = 0; j < page_num; j++) {
561 tmp_v = pci_alloc_consistent(nic->pdev,
565 "pci_alloc_consistent ");
566 DBG_PRINT(INFO_DBG, "failed for TxDL\n");
569 /* If we got a zero DMA address(can happen on
570 * certain platforms like PPC), reallocate.
571 * Store virtual address of page we don't want,
575 mac_control->zerodma_virt_addr = tmp_v;
577 "%s: Zero DMA address for TxDL. ", dev->name);
579 "Virtual address %p\n", tmp_v);
580 tmp_v = pci_alloc_consistent(nic->pdev,
584 "pci_alloc_consistent ");
585 DBG_PRINT(INFO_DBG, "failed for TxDL\n");
588 mem_allocated += PAGE_SIZE;
590 while (k < lst_per_page) {
591 int l = (j * lst_per_page) + k;
592 if (l == config->tx_cfg[i].fifo_len)
594 mac_control->fifos[i].list_info[l].list_virt_addr =
595 tmp_v + (k * lst_size);
596 mac_control->fifos[i].list_info[l].list_phy_addr =
597 tmp_p + (k * lst_size);
603 nic->ufo_in_band_v = kcalloc(size, sizeof(u64), GFP_KERNEL);
604 if (!nic->ufo_in_band_v)
606 mem_allocated += (size * sizeof(u64));
608 /* Allocation and initialization of RXDs in Rings */
610 for (i = 0; i < config->rx_ring_num; i++) {
611 if (config->rx_cfg[i].num_rxd %
612 (rxd_count[nic->rxd_mode] + 1)) {
613 DBG_PRINT(ERR_DBG, "%s: RxD count of ", dev->name);
614 DBG_PRINT(ERR_DBG, "Ring%d is not a multiple of ",
616 DBG_PRINT(ERR_DBG, "RxDs per Block");
619 size += config->rx_cfg[i].num_rxd;
620 mac_control->rings[i].block_count =
621 config->rx_cfg[i].num_rxd /
622 (rxd_count[nic->rxd_mode] + 1 );
623 mac_control->rings[i].pkt_cnt = config->rx_cfg[i].num_rxd -
624 mac_control->rings[i].block_count;
626 if (nic->rxd_mode == RXD_MODE_1)
627 size = (size * (sizeof(struct RxD1)));
629 size = (size * (sizeof(struct RxD3)));
631 for (i = 0; i < config->rx_ring_num; i++) {
632 mac_control->rings[i].rx_curr_get_info.block_index = 0;
633 mac_control->rings[i].rx_curr_get_info.offset = 0;
634 mac_control->rings[i].rx_curr_get_info.ring_len =
635 config->rx_cfg[i].num_rxd - 1;
636 mac_control->rings[i].rx_curr_put_info.block_index = 0;
637 mac_control->rings[i].rx_curr_put_info.offset = 0;
638 mac_control->rings[i].rx_curr_put_info.ring_len =
639 config->rx_cfg[i].num_rxd - 1;
640 mac_control->rings[i].nic = nic;
641 mac_control->rings[i].ring_no = i;
643 blk_cnt = config->rx_cfg[i].num_rxd /
644 (rxd_count[nic->rxd_mode] + 1);
645 /* Allocating all the Rx blocks */
646 for (j = 0; j < blk_cnt; j++) {
647 struct rx_block_info *rx_blocks;
650 rx_blocks = &mac_control->rings[i].rx_blocks[j];
651 size = SIZE_OF_BLOCK; //size is always page size
652 tmp_v_addr = pci_alloc_consistent(nic->pdev, size,
654 if (tmp_v_addr == NULL) {
656 * In case of failure, free_shared_mem()
657 * is called, which should free any
658 * memory that was alloced till the
661 rx_blocks->block_virt_addr = tmp_v_addr;
664 mem_allocated += size;
665 memset(tmp_v_addr, 0, size);
666 rx_blocks->block_virt_addr = tmp_v_addr;
667 rx_blocks->block_dma_addr = tmp_p_addr;
668 rx_blocks->rxds = kmalloc(sizeof(struct rxd_info)*
669 rxd_count[nic->rxd_mode],
671 if (!rx_blocks->rxds)
674 (sizeof(struct rxd_info)* rxd_count[nic->rxd_mode]);
675 for (l=0; l<rxd_count[nic->rxd_mode];l++) {
676 rx_blocks->rxds[l].virt_addr =
677 rx_blocks->block_virt_addr +
678 (rxd_size[nic->rxd_mode] * l);
679 rx_blocks->rxds[l].dma_addr =
680 rx_blocks->block_dma_addr +
681 (rxd_size[nic->rxd_mode] * l);
684 /* Interlinking all Rx Blocks */
685 for (j = 0; j < blk_cnt; j++) {
687 mac_control->rings[i].rx_blocks[j].block_virt_addr;
689 mac_control->rings[i].rx_blocks[(j + 1) %
690 blk_cnt].block_virt_addr;
692 mac_control->rings[i].rx_blocks[j].block_dma_addr;
694 mac_control->rings[i].rx_blocks[(j + 1) %
695 blk_cnt].block_dma_addr;
697 pre_rxd_blk = (struct RxD_block *) tmp_v_addr;
698 pre_rxd_blk->reserved_2_pNext_RxD_block =
699 (unsigned long) tmp_v_addr_next;
700 pre_rxd_blk->pNext_RxD_Blk_physical =
701 (u64) tmp_p_addr_next;
704 if (nic->rxd_mode >= RXD_MODE_3A) {
706 * Allocation of Storages for buffer addresses in 2BUFF mode
707 * and the buffers as well.
709 for (i = 0; i < config->rx_ring_num; i++) {
710 blk_cnt = config->rx_cfg[i].num_rxd /
711 (rxd_count[nic->rxd_mode]+ 1);
712 mac_control->rings[i].ba =
713 kmalloc((sizeof(struct buffAdd *) * blk_cnt),
715 if (!mac_control->rings[i].ba)
717 mem_allocated +=(sizeof(struct buffAdd *) * blk_cnt);
718 for (j = 0; j < blk_cnt; j++) {
720 mac_control->rings[i].ba[j] =
721 kmalloc((sizeof(struct buffAdd) *
722 (rxd_count[nic->rxd_mode] + 1)),
724 if (!mac_control->rings[i].ba[j])
726 mem_allocated += (sizeof(struct buffAdd) * \
727 (rxd_count[nic->rxd_mode] + 1));
728 while (k != rxd_count[nic->rxd_mode]) {
729 ba = &mac_control->rings[i].ba[j][k];
731 ba->ba_0_org = (void *) kmalloc
732 (BUF0_LEN + ALIGN_SIZE, GFP_KERNEL);
736 (BUF0_LEN + ALIGN_SIZE);
737 tmp = (unsigned long)ba->ba_0_org;
739 tmp &= ~((unsigned long) ALIGN_SIZE);
740 ba->ba_0 = (void *) tmp;
742 ba->ba_1_org = (void *) kmalloc
743 (BUF1_LEN + ALIGN_SIZE, GFP_KERNEL);
747 += (BUF1_LEN + ALIGN_SIZE);
748 tmp = (unsigned long) ba->ba_1_org;
750 tmp &= ~((unsigned long) ALIGN_SIZE);
751 ba->ba_1 = (void *) tmp;
758 /* Allocation and initialization of Statistics block */
759 size = sizeof(struct stat_block);
760 mac_control->stats_mem = pci_alloc_consistent
761 (nic->pdev, size, &mac_control->stats_mem_phy);
763 if (!mac_control->stats_mem) {
765 * In case of failure, free_shared_mem() is called, which
766 * should free any memory that was alloced till the
771 mem_allocated += size;
772 mac_control->stats_mem_sz = size;
774 tmp_v_addr = mac_control->stats_mem;
775 mac_control->stats_info = (struct stat_block *) tmp_v_addr;
776 memset(tmp_v_addr, 0, size);
777 DBG_PRINT(INIT_DBG, "%s:Ring Mem PHY: 0x%llx\n", dev->name,
778 (unsigned long long) tmp_p_addr);
779 mac_control->stats_info->sw_stat.mem_allocated += mem_allocated;
784 * free_shared_mem - Free the allocated Memory
785 * @nic: Device private variable.
786 * Description: This function is to free all memory locations allocated by
787 * the init_shared_mem() function and return it to the kernel.
790 static void free_shared_mem(struct s2io_nic *nic)
792 int i, j, blk_cnt, size;
795 dma_addr_t tmp_p_addr;
796 struct mac_info *mac_control;
797 struct config_param *config;
798 int lst_size, lst_per_page;
799 struct net_device *dev = nic->dev;
805 mac_control = &nic->mac_control;
806 config = &nic->config;
808 lst_size = (sizeof(struct TxD) * config->max_txds);
809 lst_per_page = PAGE_SIZE / lst_size;
811 for (i = 0; i < config->tx_fifo_num; i++) {
812 ufo_size += config->tx_cfg[i].fifo_len;
813 page_num = TXD_MEM_PAGE_CNT(config->tx_cfg[i].fifo_len,
815 for (j = 0; j < page_num; j++) {
816 int mem_blks = (j * lst_per_page);
817 if (!mac_control->fifos[i].list_info)
819 if (!mac_control->fifos[i].list_info[mem_blks].
822 pci_free_consistent(nic->pdev, PAGE_SIZE,
823 mac_control->fifos[i].
826 mac_control->fifos[i].
829 nic->mac_control.stats_info->sw_stat.mem_freed
832 /* If we got a zero DMA address during allocation,
835 if (mac_control->zerodma_virt_addr) {
836 pci_free_consistent(nic->pdev, PAGE_SIZE,
837 mac_control->zerodma_virt_addr,
840 "%s: Freeing TxDL with zero DMA addr. ",
842 DBG_PRINT(INIT_DBG, "Virtual address %p\n",
843 mac_control->zerodma_virt_addr);
844 nic->mac_control.stats_info->sw_stat.mem_freed
847 kfree(mac_control->fifos[i].list_info);
848 nic->mac_control.stats_info->sw_stat.mem_freed +=
849 (nic->config.tx_cfg[i].fifo_len *sizeof(struct list_info_hold));
852 size = SIZE_OF_BLOCK;
853 for (i = 0; i < config->rx_ring_num; i++) {
854 blk_cnt = mac_control->rings[i].block_count;
855 for (j = 0; j < blk_cnt; j++) {
856 tmp_v_addr = mac_control->rings[i].rx_blocks[j].
858 tmp_p_addr = mac_control->rings[i].rx_blocks[j].
860 if (tmp_v_addr == NULL)
862 pci_free_consistent(nic->pdev, size,
863 tmp_v_addr, tmp_p_addr);
864 nic->mac_control.stats_info->sw_stat.mem_freed += size;
865 kfree(mac_control->rings[i].rx_blocks[j].rxds);
866 nic->mac_control.stats_info->sw_stat.mem_freed +=
867 ( sizeof(struct rxd_info)* rxd_count[nic->rxd_mode]);
871 if (nic->rxd_mode >= RXD_MODE_3A) {
872 /* Freeing buffer storage addresses in 2BUFF mode. */
873 for (i = 0; i < config->rx_ring_num; i++) {
874 blk_cnt = config->rx_cfg[i].num_rxd /
875 (rxd_count[nic->rxd_mode] + 1);
876 for (j = 0; j < blk_cnt; j++) {
878 if (!mac_control->rings[i].ba[j])
880 while (k != rxd_count[nic->rxd_mode]) {
882 &mac_control->rings[i].ba[j][k];
884 nic->mac_control.stats_info->sw_stat.\
885 mem_freed += (BUF0_LEN + ALIGN_SIZE);
887 nic->mac_control.stats_info->sw_stat.\
888 mem_freed += (BUF1_LEN + ALIGN_SIZE);
891 kfree(mac_control->rings[i].ba[j]);
892 nic->mac_control.stats_info->sw_stat.mem_freed += (sizeof(struct buffAdd) *
893 (rxd_count[nic->rxd_mode] + 1));
895 kfree(mac_control->rings[i].ba);
896 nic->mac_control.stats_info->sw_stat.mem_freed +=
897 (sizeof(struct buffAdd *) * blk_cnt);
901 if (mac_control->stats_mem) {
902 pci_free_consistent(nic->pdev,
903 mac_control->stats_mem_sz,
904 mac_control->stats_mem,
905 mac_control->stats_mem_phy);
906 nic->mac_control.stats_info->sw_stat.mem_freed +=
907 mac_control->stats_mem_sz;
909 if (nic->ufo_in_band_v) {
910 kfree(nic->ufo_in_band_v);
911 nic->mac_control.stats_info->sw_stat.mem_freed
912 += (ufo_size * sizeof(u64));
917 * s2io_verify_pci_mode -
920 static int s2io_verify_pci_mode(struct s2io_nic *nic)
922 struct XENA_dev_config __iomem *bar0 = nic->bar0;
923 register u64 val64 = 0;
926 val64 = readq(&bar0->pci_mode);
927 mode = (u8)GET_PCI_MODE(val64);
929 if ( val64 & PCI_MODE_UNKNOWN_MODE)
930 return -1; /* Unknown PCI mode */
934 #define NEC_VENID 0x1033
935 #define NEC_DEVID 0x0125
936 static int s2io_on_nec_bridge(struct pci_dev *s2io_pdev)
938 struct pci_dev *tdev = NULL;
939 while ((tdev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, tdev)) != NULL) {
940 if (tdev->vendor == NEC_VENID && tdev->device == NEC_DEVID) {
941 if (tdev->bus == s2io_pdev->bus->parent)
949 static int bus_speed[8] = {33, 133, 133, 200, 266, 133, 200, 266};
951 * s2io_print_pci_mode -
953 static int s2io_print_pci_mode(struct s2io_nic *nic)
955 struct XENA_dev_config __iomem *bar0 = nic->bar0;
956 register u64 val64 = 0;
958 struct config_param *config = &nic->config;
960 val64 = readq(&bar0->pci_mode);
961 mode = (u8)GET_PCI_MODE(val64);
963 if ( val64 & PCI_MODE_UNKNOWN_MODE)
964 return -1; /* Unknown PCI mode */
966 config->bus_speed = bus_speed[mode];
968 if (s2io_on_nec_bridge(nic->pdev)) {
969 DBG_PRINT(ERR_DBG, "%s: Device is on PCI-E bus\n",
974 if (val64 & PCI_MODE_32_BITS) {
975 DBG_PRINT(ERR_DBG, "%s: Device is on 32 bit ", nic->dev->name);
977 DBG_PRINT(ERR_DBG, "%s: Device is on 64 bit ", nic->dev->name);
981 case PCI_MODE_PCI_33:
982 DBG_PRINT(ERR_DBG, "33MHz PCI bus\n");
984 case PCI_MODE_PCI_66:
985 DBG_PRINT(ERR_DBG, "66MHz PCI bus\n");
987 case PCI_MODE_PCIX_M1_66:
988 DBG_PRINT(ERR_DBG, "66MHz PCIX(M1) bus\n");
990 case PCI_MODE_PCIX_M1_100:
991 DBG_PRINT(ERR_DBG, "100MHz PCIX(M1) bus\n");
993 case PCI_MODE_PCIX_M1_133:
994 DBG_PRINT(ERR_DBG, "133MHz PCIX(M1) bus\n");
996 case PCI_MODE_PCIX_M2_66:
997 DBG_PRINT(ERR_DBG, "133MHz PCIX(M2) bus\n");
999 case PCI_MODE_PCIX_M2_100:
1000 DBG_PRINT(ERR_DBG, "200MHz PCIX(M2) bus\n");
1002 case PCI_MODE_PCIX_M2_133:
1003 DBG_PRINT(ERR_DBG, "266MHz PCIX(M2) bus\n");
1006 return -1; /* Unsupported bus speed */
1013 * init_nic - Initialization of hardware
1014 * @nic: device peivate variable
1015 * Description: The function sequentially configures every block
1016 * of the H/W from their reset values.
1017 * Return Value: SUCCESS on success and
1018 * '-1' on failure (endian settings incorrect).
1021 static int init_nic(struct s2io_nic *nic)
1023 struct XENA_dev_config __iomem *bar0 = nic->bar0;
1024 struct net_device *dev = nic->dev;
1025 register u64 val64 = 0;
1029 struct mac_info *mac_control;
1030 struct config_param *config;
1032 unsigned long long mem_share;
1035 mac_control = &nic->mac_control;
1036 config = &nic->config;
1038 /* to set the swapper controle on the card */
1039 if(s2io_set_swapper(nic)) {
1040 DBG_PRINT(ERR_DBG,"ERROR: Setting Swapper failed\n");
1045 * Herc requires EOI to be removed from reset before XGXS, so..
1047 if (nic->device_type & XFRAME_II_DEVICE) {
1048 val64 = 0xA500000000ULL;
1049 writeq(val64, &bar0->sw_reset);
1051 val64 = readq(&bar0->sw_reset);
1054 /* Remove XGXS from reset state */
1056 writeq(val64, &bar0->sw_reset);
1058 val64 = readq(&bar0->sw_reset);
1060 /* Enable Receiving broadcasts */
1061 add = &bar0->mac_cfg;
1062 val64 = readq(&bar0->mac_cfg);
1063 val64 |= MAC_RMAC_BCAST_ENABLE;
1064 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1065 writel((u32) val64, add);
1066 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1067 writel((u32) (val64 >> 32), (add + 4));
1069 /* Read registers in all blocks */
1070 val64 = readq(&bar0->mac_int_mask);
1071 val64 = readq(&bar0->mc_int_mask);
1072 val64 = readq(&bar0->xgxs_int_mask);
1076 writeq(vBIT(val64, 2, 14), &bar0->rmac_max_pyld_len);
1078 if (nic->device_type & XFRAME_II_DEVICE) {
1079 while (herc_act_dtx_cfg[dtx_cnt] != END_SIGN) {
1080 SPECIAL_REG_WRITE(herc_act_dtx_cfg[dtx_cnt],
1081 &bar0->dtx_control, UF);
1083 msleep(1); /* Necessary!! */
1087 while (xena_dtx_cfg[dtx_cnt] != END_SIGN) {
1088 SPECIAL_REG_WRITE(xena_dtx_cfg[dtx_cnt],
1089 &bar0->dtx_control, UF);
1090 val64 = readq(&bar0->dtx_control);
1095 /* Tx DMA Initialization */
1097 writeq(val64, &bar0->tx_fifo_partition_0);
1098 writeq(val64, &bar0->tx_fifo_partition_1);
1099 writeq(val64, &bar0->tx_fifo_partition_2);
1100 writeq(val64, &bar0->tx_fifo_partition_3);
1103 for (i = 0, j = 0; i < config->tx_fifo_num; i++) {
1105 vBIT(config->tx_cfg[i].fifo_len - 1, ((i * 32) + 19),
1106 13) | vBIT(config->tx_cfg[i].fifo_priority,
1109 if (i == (config->tx_fifo_num - 1)) {
1116 writeq(val64, &bar0->tx_fifo_partition_0);
1120 writeq(val64, &bar0->tx_fifo_partition_1);
1124 writeq(val64, &bar0->tx_fifo_partition_2);
1128 writeq(val64, &bar0->tx_fifo_partition_3);
1134 * Disable 4 PCCs for Xena1, 2 and 3 as per H/W bug
1135 * SXE-008 TRANSMIT DMA ARBITRATION ISSUE.
1137 if ((nic->device_type == XFRAME_I_DEVICE) &&
1138 (nic->pdev->revision < 4))
1139 writeq(PCC_ENABLE_FOUR, &bar0->pcc_enable);
1141 val64 = readq(&bar0->tx_fifo_partition_0);
1142 DBG_PRINT(INIT_DBG, "Fifo partition at: 0x%p is: 0x%llx\n",
1143 &bar0->tx_fifo_partition_0, (unsigned long long) val64);
1146 * Initialization of Tx_PA_CONFIG register to ignore packet
1147 * integrity checking.
1149 val64 = readq(&bar0->tx_pa_cfg);
1150 val64 |= TX_PA_CFG_IGNORE_FRM_ERR | TX_PA_CFG_IGNORE_SNAP_OUI |
1151 TX_PA_CFG_IGNORE_LLC_CTRL | TX_PA_CFG_IGNORE_L2_ERR;
1152 writeq(val64, &bar0->tx_pa_cfg);
1154 /* Rx DMA intialization. */
1156 for (i = 0; i < config->rx_ring_num; i++) {
1158 vBIT(config->rx_cfg[i].ring_priority, (5 + (i * 8)),
1161 writeq(val64, &bar0->rx_queue_priority);
1164 * Allocating equal share of memory to all the
1168 if (nic->device_type & XFRAME_II_DEVICE)
1173 for (i = 0; i < config->rx_ring_num; i++) {
1176 mem_share = (mem_size / config->rx_ring_num +
1177 mem_size % config->rx_ring_num);
1178 val64 |= RX_QUEUE_CFG_Q0_SZ(mem_share);
1181 mem_share = (mem_size / config->rx_ring_num);
1182 val64 |= RX_QUEUE_CFG_Q1_SZ(mem_share);
1185 mem_share = (mem_size / config->rx_ring_num);
1186 val64 |= RX_QUEUE_CFG_Q2_SZ(mem_share);
1189 mem_share = (mem_size / config->rx_ring_num);
1190 val64 |= RX_QUEUE_CFG_Q3_SZ(mem_share);
1193 mem_share = (mem_size / config->rx_ring_num);
1194 val64 |= RX_QUEUE_CFG_Q4_SZ(mem_share);
1197 mem_share = (mem_size / config->rx_ring_num);
1198 val64 |= RX_QUEUE_CFG_Q5_SZ(mem_share);
1201 mem_share = (mem_size / config->rx_ring_num);
1202 val64 |= RX_QUEUE_CFG_Q6_SZ(mem_share);
1205 mem_share = (mem_size / config->rx_ring_num);
1206 val64 |= RX_QUEUE_CFG_Q7_SZ(mem_share);
1210 writeq(val64, &bar0->rx_queue_cfg);
1213 * Filling Tx round robin registers
1214 * as per the number of FIFOs
1216 switch (config->tx_fifo_num) {
1218 val64 = 0x0000000000000000ULL;
1219 writeq(val64, &bar0->tx_w_round_robin_0);
1220 writeq(val64, &bar0->tx_w_round_robin_1);
1221 writeq(val64, &bar0->tx_w_round_robin_2);
1222 writeq(val64, &bar0->tx_w_round_robin_3);
1223 writeq(val64, &bar0->tx_w_round_robin_4);
1226 val64 = 0x0000010000010000ULL;
1227 writeq(val64, &bar0->tx_w_round_robin_0);
1228 val64 = 0x0100000100000100ULL;
1229 writeq(val64, &bar0->tx_w_round_robin_1);
1230 val64 = 0x0001000001000001ULL;
1231 writeq(val64, &bar0->tx_w_round_robin_2);
1232 val64 = 0x0000010000010000ULL;
1233 writeq(val64, &bar0->tx_w_round_robin_3);
1234 val64 = 0x0100000000000000ULL;
1235 writeq(val64, &bar0->tx_w_round_robin_4);
1238 val64 = 0x0001000102000001ULL;
1239 writeq(val64, &bar0->tx_w_round_robin_0);
1240 val64 = 0x0001020000010001ULL;
1241 writeq(val64, &bar0->tx_w_round_robin_1);
1242 val64 = 0x0200000100010200ULL;
1243 writeq(val64, &bar0->tx_w_round_robin_2);
1244 val64 = 0x0001000102000001ULL;
1245 writeq(val64, &bar0->tx_w_round_robin_3);
1246 val64 = 0x0001020000000000ULL;
1247 writeq(val64, &bar0->tx_w_round_robin_4);
1250 val64 = 0x0001020300010200ULL;
1251 writeq(val64, &bar0->tx_w_round_robin_0);
1252 val64 = 0x0100000102030001ULL;
1253 writeq(val64, &bar0->tx_w_round_robin_1);
1254 val64 = 0x0200010000010203ULL;
1255 writeq(val64, &bar0->tx_w_round_robin_2);
1256 val64 = 0x0001020001000001ULL;
1257 writeq(val64, &bar0->tx_w_round_robin_3);
1258 val64 = 0x0203000100000000ULL;
1259 writeq(val64, &bar0->tx_w_round_robin_4);
1262 val64 = 0x0001000203000102ULL;
1263 writeq(val64, &bar0->tx_w_round_robin_0);
1264 val64 = 0x0001020001030004ULL;
1265 writeq(val64, &bar0->tx_w_round_robin_1);
1266 val64 = 0x0001000203000102ULL;
1267 writeq(val64, &bar0->tx_w_round_robin_2);
1268 val64 = 0x0001020001030004ULL;
1269 writeq(val64, &bar0->tx_w_round_robin_3);
1270 val64 = 0x0001000000000000ULL;
1271 writeq(val64, &bar0->tx_w_round_robin_4);
1274 val64 = 0x0001020304000102ULL;
1275 writeq(val64, &bar0->tx_w_round_robin_0);
1276 val64 = 0x0304050001020001ULL;
1277 writeq(val64, &bar0->tx_w_round_robin_1);
1278 val64 = 0x0203000100000102ULL;
1279 writeq(val64, &bar0->tx_w_round_robin_2);
1280 val64 = 0x0304000102030405ULL;
1281 writeq(val64, &bar0->tx_w_round_robin_3);
1282 val64 = 0x0001000200000000ULL;
1283 writeq(val64, &bar0->tx_w_round_robin_4);
1286 val64 = 0x0001020001020300ULL;
1287 writeq(val64, &bar0->tx_w_round_robin_0);
1288 val64 = 0x0102030400010203ULL;
1289 writeq(val64, &bar0->tx_w_round_robin_1);
1290 val64 = 0x0405060001020001ULL;
1291 writeq(val64, &bar0->tx_w_round_robin_2);
1292 val64 = 0x0304050000010200ULL;
1293 writeq(val64, &bar0->tx_w_round_robin_3);
1294 val64 = 0x0102030000000000ULL;
1295 writeq(val64, &bar0->tx_w_round_robin_4);
1298 val64 = 0x0001020300040105ULL;
1299 writeq(val64, &bar0->tx_w_round_robin_0);
1300 val64 = 0x0200030106000204ULL;
1301 writeq(val64, &bar0->tx_w_round_robin_1);
1302 val64 = 0x0103000502010007ULL;
1303 writeq(val64, &bar0->tx_w_round_robin_2);
1304 val64 = 0x0304010002060500ULL;
1305 writeq(val64, &bar0->tx_w_round_robin_3);
1306 val64 = 0x0103020400000000ULL;
1307 writeq(val64, &bar0->tx_w_round_robin_4);
1311 /* Enable all configured Tx FIFO partitions */
1312 val64 = readq(&bar0->tx_fifo_partition_0);
1313 val64 |= (TX_FIFO_PARTITION_EN);
1314 writeq(val64, &bar0->tx_fifo_partition_0);
1316 /* Filling the Rx round robin registers as per the
1317 * number of Rings and steering based on QoS.
1319 switch (config->rx_ring_num) {
1321 val64 = 0x8080808080808080ULL;
1322 writeq(val64, &bar0->rts_qos_steering);
1325 val64 = 0x0000010000010000ULL;
1326 writeq(val64, &bar0->rx_w_round_robin_0);
1327 val64 = 0x0100000100000100ULL;
1328 writeq(val64, &bar0->rx_w_round_robin_1);
1329 val64 = 0x0001000001000001ULL;
1330 writeq(val64, &bar0->rx_w_round_robin_2);
1331 val64 = 0x0000010000010000ULL;
1332 writeq(val64, &bar0->rx_w_round_robin_3);
1333 val64 = 0x0100000000000000ULL;
1334 writeq(val64, &bar0->rx_w_round_robin_4);
1336 val64 = 0x8080808040404040ULL;
1337 writeq(val64, &bar0->rts_qos_steering);
1340 val64 = 0x0001000102000001ULL;
1341 writeq(val64, &bar0->rx_w_round_robin_0);
1342 val64 = 0x0001020000010001ULL;
1343 writeq(val64, &bar0->rx_w_round_robin_1);
1344 val64 = 0x0200000100010200ULL;
1345 writeq(val64, &bar0->rx_w_round_robin_2);
1346 val64 = 0x0001000102000001ULL;
1347 writeq(val64, &bar0->rx_w_round_robin_3);
1348 val64 = 0x0001020000000000ULL;
1349 writeq(val64, &bar0->rx_w_round_robin_4);
1351 val64 = 0x8080804040402020ULL;
1352 writeq(val64, &bar0->rts_qos_steering);
1355 val64 = 0x0001020300010200ULL;
1356 writeq(val64, &bar0->rx_w_round_robin_0);
1357 val64 = 0x0100000102030001ULL;
1358 writeq(val64, &bar0->rx_w_round_robin_1);
1359 val64 = 0x0200010000010203ULL;
1360 writeq(val64, &bar0->rx_w_round_robin_2);
1361 val64 = 0x0001020001000001ULL;
1362 writeq(val64, &bar0->rx_w_round_robin_3);
1363 val64 = 0x0203000100000000ULL;
1364 writeq(val64, &bar0->rx_w_round_robin_4);
1366 val64 = 0x8080404020201010ULL;
1367 writeq(val64, &bar0->rts_qos_steering);
1370 val64 = 0x0001000203000102ULL;
1371 writeq(val64, &bar0->rx_w_round_robin_0);
1372 val64 = 0x0001020001030004ULL;
1373 writeq(val64, &bar0->rx_w_round_robin_1);
1374 val64 = 0x0001000203000102ULL;
1375 writeq(val64, &bar0->rx_w_round_robin_2);
1376 val64 = 0x0001020001030004ULL;
1377 writeq(val64, &bar0->rx_w_round_robin_3);
1378 val64 = 0x0001000000000000ULL;
1379 writeq(val64, &bar0->rx_w_round_robin_4);
1381 val64 = 0x8080404020201008ULL;
1382 writeq(val64, &bar0->rts_qos_steering);
1385 val64 = 0x0001020304000102ULL;
1386 writeq(val64, &bar0->rx_w_round_robin_0);
1387 val64 = 0x0304050001020001ULL;
1388 writeq(val64, &bar0->rx_w_round_robin_1);
1389 val64 = 0x0203000100000102ULL;
1390 writeq(val64, &bar0->rx_w_round_robin_2);
1391 val64 = 0x0304000102030405ULL;
1392 writeq(val64, &bar0->rx_w_round_robin_3);
1393 val64 = 0x0001000200000000ULL;
1394 writeq(val64, &bar0->rx_w_round_robin_4);
1396 val64 = 0x8080404020100804ULL;
1397 writeq(val64, &bar0->rts_qos_steering);
1400 val64 = 0x0001020001020300ULL;
1401 writeq(val64, &bar0->rx_w_round_robin_0);
1402 val64 = 0x0102030400010203ULL;
1403 writeq(val64, &bar0->rx_w_round_robin_1);
1404 val64 = 0x0405060001020001ULL;
1405 writeq(val64, &bar0->rx_w_round_robin_2);
1406 val64 = 0x0304050000010200ULL;
1407 writeq(val64, &bar0->rx_w_round_robin_3);
1408 val64 = 0x0102030000000000ULL;
1409 writeq(val64, &bar0->rx_w_round_robin_4);
1411 val64 = 0x8080402010080402ULL;
1412 writeq(val64, &bar0->rts_qos_steering);
1415 val64 = 0x0001020300040105ULL;
1416 writeq(val64, &bar0->rx_w_round_robin_0);
1417 val64 = 0x0200030106000204ULL;
1418 writeq(val64, &bar0->rx_w_round_robin_1);
1419 val64 = 0x0103000502010007ULL;
1420 writeq(val64, &bar0->rx_w_round_robin_2);
1421 val64 = 0x0304010002060500ULL;
1422 writeq(val64, &bar0->rx_w_round_robin_3);
1423 val64 = 0x0103020400000000ULL;
1424 writeq(val64, &bar0->rx_w_round_robin_4);
1426 val64 = 0x8040201008040201ULL;
1427 writeq(val64, &bar0->rts_qos_steering);
1433 for (i = 0; i < 8; i++)
1434 writeq(val64, &bar0->rts_frm_len_n[i]);
1436 /* Set the default rts frame length for the rings configured */
1437 val64 = MAC_RTS_FRM_LEN_SET(dev->mtu+22);
1438 for (i = 0 ; i < config->rx_ring_num ; i++)
1439 writeq(val64, &bar0->rts_frm_len_n[i]);
1441 /* Set the frame length for the configured rings
1442 * desired by the user
1444 for (i = 0; i < config->rx_ring_num; i++) {
1445 /* If rts_frm_len[i] == 0 then it is assumed that user not
1446 * specified frame length steering.
1447 * If the user provides the frame length then program
1448 * the rts_frm_len register for those values or else
1449 * leave it as it is.
1451 if (rts_frm_len[i] != 0) {
1452 writeq(MAC_RTS_FRM_LEN_SET(rts_frm_len[i]),
1453 &bar0->rts_frm_len_n[i]);
1457 /* Disable differentiated services steering logic */
1458 for (i = 0; i < 64; i++) {
1459 if (rts_ds_steer(nic, i, 0) == FAILURE) {
1460 DBG_PRINT(ERR_DBG, "%s: failed rts ds steering",
1462 DBG_PRINT(ERR_DBG, "set on codepoint %d\n", i);
1467 /* Program statistics memory */
1468 writeq(mac_control->stats_mem_phy, &bar0->stat_addr);
1470 if (nic->device_type == XFRAME_II_DEVICE) {
1471 val64 = STAT_BC(0x320);
1472 writeq(val64, &bar0->stat_byte_cnt);
1476 * Initializing the sampling rate for the device to calculate the
1477 * bandwidth utilization.
1479 val64 = MAC_TX_LINK_UTIL_VAL(tmac_util_period) |
1480 MAC_RX_LINK_UTIL_VAL(rmac_util_period);
1481 writeq(val64, &bar0->mac_link_util);
1485 * Initializing the Transmit and Receive Traffic Interrupt
1489 * TTI Initialization. Default Tx timer gets us about
1490 * 250 interrupts per sec. Continuous interrupts are enabled
1493 if (nic->device_type == XFRAME_II_DEVICE) {
1494 int count = (nic->config.bus_speed * 125)/2;
1495 val64 = TTI_DATA1_MEM_TX_TIMER_VAL(count);
1498 val64 = TTI_DATA1_MEM_TX_TIMER_VAL(0x2078);
1500 val64 |= TTI_DATA1_MEM_TX_URNG_A(0xA) |
1501 TTI_DATA1_MEM_TX_URNG_B(0x10) |
1502 TTI_DATA1_MEM_TX_URNG_C(0x30) | TTI_DATA1_MEM_TX_TIMER_AC_EN;
1503 if (use_continuous_tx_intrs)
1504 val64 |= TTI_DATA1_MEM_TX_TIMER_CI_EN;
1505 writeq(val64, &bar0->tti_data1_mem);
1507 val64 = TTI_DATA2_MEM_TX_UFC_A(0x10) |
1508 TTI_DATA2_MEM_TX_UFC_B(0x20) |
1509 TTI_DATA2_MEM_TX_UFC_C(0x40) | TTI_DATA2_MEM_TX_UFC_D(0x80);
1510 writeq(val64, &bar0->tti_data2_mem);
1512 val64 = TTI_CMD_MEM_WE | TTI_CMD_MEM_STROBE_NEW_CMD;
1513 writeq(val64, &bar0->tti_command_mem);
1516 * Once the operation completes, the Strobe bit of the command
1517 * register will be reset. We poll for this particular condition
1518 * We wait for a maximum of 500ms for the operation to complete,
1519 * if it's not complete by then we return error.
1523 val64 = readq(&bar0->tti_command_mem);
1524 if (!(val64 & TTI_CMD_MEM_STROBE_NEW_CMD)) {
1528 DBG_PRINT(ERR_DBG, "%s: TTI init Failed\n",
1536 if (nic->config.bimodal) {
1538 for (k = 0; k < config->rx_ring_num; k++) {
1539 val64 = TTI_CMD_MEM_WE | TTI_CMD_MEM_STROBE_NEW_CMD;
1540 val64 |= TTI_CMD_MEM_OFFSET(0x38+k);
1541 writeq(val64, &bar0->tti_command_mem);
1544 * Once the operation completes, the Strobe bit of the command
1545 * register will be reset. We poll for this particular condition
1546 * We wait for a maximum of 500ms for the operation to complete,
1547 * if it's not complete by then we return error.
1551 val64 = readq(&bar0->tti_command_mem);
1552 if (!(val64 & TTI_CMD_MEM_STROBE_NEW_CMD)) {
1557 "%s: TTI init Failed\n",
1567 /* RTI Initialization */
1568 if (nic->device_type == XFRAME_II_DEVICE) {
1570 * Programmed to generate Apprx 500 Intrs per
1573 int count = (nic->config.bus_speed * 125)/4;
1574 val64 = RTI_DATA1_MEM_RX_TIMER_VAL(count);
1576 val64 = RTI_DATA1_MEM_RX_TIMER_VAL(0xFFF);
1578 val64 |= RTI_DATA1_MEM_RX_URNG_A(0xA) |
1579 RTI_DATA1_MEM_RX_URNG_B(0x10) |
1580 RTI_DATA1_MEM_RX_URNG_C(0x30) | RTI_DATA1_MEM_RX_TIMER_AC_EN;
1582 writeq(val64, &bar0->rti_data1_mem);
1584 val64 = RTI_DATA2_MEM_RX_UFC_A(0x1) |
1585 RTI_DATA2_MEM_RX_UFC_B(0x2) ;
1586 if (nic->intr_type == MSI_X)
1587 val64 |= (RTI_DATA2_MEM_RX_UFC_C(0x20) | \
1588 RTI_DATA2_MEM_RX_UFC_D(0x40));
1590 val64 |= (RTI_DATA2_MEM_RX_UFC_C(0x40) | \
1591 RTI_DATA2_MEM_RX_UFC_D(0x80));
1592 writeq(val64, &bar0->rti_data2_mem);
1594 for (i = 0; i < config->rx_ring_num; i++) {
1595 val64 = RTI_CMD_MEM_WE | RTI_CMD_MEM_STROBE_NEW_CMD
1596 | RTI_CMD_MEM_OFFSET(i);
1597 writeq(val64, &bar0->rti_command_mem);
1600 * Once the operation completes, the Strobe bit of the
1601 * command register will be reset. We poll for this
1602 * particular condition. We wait for a maximum of 500ms
1603 * for the operation to complete, if it's not complete
1604 * by then we return error.
1608 val64 = readq(&bar0->rti_command_mem);
1609 if (!(val64 & RTI_CMD_MEM_STROBE_NEW_CMD)) {
1613 DBG_PRINT(ERR_DBG, "%s: RTI init Failed\n",
1624 * Initializing proper values as Pause threshold into all
1625 * the 8 Queues on Rx side.
1627 writeq(0xffbbffbbffbbffbbULL, &bar0->mc_pause_thresh_q0q3);
1628 writeq(0xffbbffbbffbbffbbULL, &bar0->mc_pause_thresh_q4q7);
1630 /* Disable RMAC PAD STRIPPING */
1631 add = &bar0->mac_cfg;
1632 val64 = readq(&bar0->mac_cfg);
1633 val64 &= ~(MAC_CFG_RMAC_STRIP_PAD);
1634 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1635 writel((u32) (val64), add);
1636 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1637 writel((u32) (val64 >> 32), (add + 4));
1638 val64 = readq(&bar0->mac_cfg);
1640 /* Enable FCS stripping by adapter */
1641 add = &bar0->mac_cfg;
1642 val64 = readq(&bar0->mac_cfg);
1643 val64 |= MAC_CFG_RMAC_STRIP_FCS;
1644 if (nic->device_type == XFRAME_II_DEVICE)
1645 writeq(val64, &bar0->mac_cfg);
1647 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1648 writel((u32) (val64), add);
1649 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1650 writel((u32) (val64 >> 32), (add + 4));
1654 * Set the time value to be inserted in the pause frame
1655 * generated by xena.
1657 val64 = readq(&bar0->rmac_pause_cfg);
1658 val64 &= ~(RMAC_PAUSE_HG_PTIME(0xffff));
1659 val64 |= RMAC_PAUSE_HG_PTIME(nic->mac_control.rmac_pause_time);
1660 writeq(val64, &bar0->rmac_pause_cfg);
1663 * Set the Threshold Limit for Generating the pause frame
1664 * If the amount of data in any Queue exceeds ratio of
1665 * (mac_control.mc_pause_threshold_q0q3 or q4q7)/256
1666 * pause frame is generated
1669 for (i = 0; i < 4; i++) {
1671 (((u64) 0xFF00 | nic->mac_control.
1672 mc_pause_threshold_q0q3)
1675 writeq(val64, &bar0->mc_pause_thresh_q0q3);
1678 for (i = 0; i < 4; i++) {
1680 (((u64) 0xFF00 | nic->mac_control.
1681 mc_pause_threshold_q4q7)
1684 writeq(val64, &bar0->mc_pause_thresh_q4q7);
1687 * TxDMA will stop Read request if the number of read split has
1688 * exceeded the limit pointed by shared_splits
1690 val64 = readq(&bar0->pic_control);
1691 val64 |= PIC_CNTL_SHARED_SPLITS(shared_splits);
1692 writeq(val64, &bar0->pic_control);
1694 if (nic->config.bus_speed == 266) {
1695 writeq(TXREQTO_VAL(0x7f) | TXREQTO_EN, &bar0->txreqtimeout);
1696 writeq(0x0, &bar0->read_retry_delay);
1697 writeq(0x0, &bar0->write_retry_delay);
1701 * Programming the Herc to split every write transaction
1702 * that does not start on an ADB to reduce disconnects.
1704 if (nic->device_type == XFRAME_II_DEVICE) {
1705 val64 = FAULT_BEHAVIOUR | EXT_REQ_EN |
1706 MISC_LINK_STABILITY_PRD(3);
1707 writeq(val64, &bar0->misc_control);
1708 val64 = readq(&bar0->pic_control2);
1709 val64 &= ~(BIT(13)|BIT(14)|BIT(15));
1710 writeq(val64, &bar0->pic_control2);
1712 if (strstr(nic->product_name, "CX4")) {
1713 val64 = TMAC_AVG_IPG(0x17);
1714 writeq(val64, &bar0->tmac_avg_ipg);
1719 #define LINK_UP_DOWN_INTERRUPT 1
1720 #define MAC_RMAC_ERR_TIMER 2
1722 static int s2io_link_fault_indication(struct s2io_nic *nic)
1724 if (nic->intr_type != INTA)
1725 return MAC_RMAC_ERR_TIMER;
1726 if (nic->device_type == XFRAME_II_DEVICE)
1727 return LINK_UP_DOWN_INTERRUPT;
1729 return MAC_RMAC_ERR_TIMER;
1733 * en_dis_able_nic_intrs - Enable or Disable the interrupts
1734 * @nic: device private variable,
1735 * @mask: A mask indicating which Intr block must be modified and,
1736 * @flag: A flag indicating whether to enable or disable the Intrs.
1737 * Description: This function will either disable or enable the interrupts
1738 * depending on the flag argument. The mask argument can be used to
1739 * enable/disable any Intr block.
1740 * Return Value: NONE.
1743 static void en_dis_able_nic_intrs(struct s2io_nic *nic, u16 mask, int flag)
1745 struct XENA_dev_config __iomem *bar0 = nic->bar0;
1746 register u64 val64 = 0, temp64 = 0;
1748 /* Top level interrupt classification */
1749 /* PIC Interrupts */
1750 if ((mask & (TX_PIC_INTR | RX_PIC_INTR))) {
1751 /* Enable PIC Intrs in the general intr mask register */
1752 val64 = TXPIC_INT_M;
1753 if (flag == ENABLE_INTRS) {
1754 temp64 = readq(&bar0->general_int_mask);
1755 temp64 &= ~((u64) val64);
1756 writeq(temp64, &bar0->general_int_mask);
1758 * If Hercules adapter enable GPIO otherwise
1759 * disable all PCIX, Flash, MDIO, IIC and GPIO
1760 * interrupts for now.
1763 if (s2io_link_fault_indication(nic) ==
1764 LINK_UP_DOWN_INTERRUPT ) {
1765 temp64 = readq(&bar0->pic_int_mask);
1766 temp64 &= ~((u64) PIC_INT_GPIO);
1767 writeq(temp64, &bar0->pic_int_mask);
1768 temp64 = readq(&bar0->gpio_int_mask);
1769 temp64 &= ~((u64) GPIO_INT_MASK_LINK_UP);
1770 writeq(temp64, &bar0->gpio_int_mask);
1772 writeq(DISABLE_ALL_INTRS, &bar0->pic_int_mask);
1775 * No MSI Support is available presently, so TTI and
1776 * RTI interrupts are also disabled.
1778 } else if (flag == DISABLE_INTRS) {
1780 * Disable PIC Intrs in the general
1781 * intr mask register
1783 writeq(DISABLE_ALL_INTRS, &bar0->pic_int_mask);
1784 temp64 = readq(&bar0->general_int_mask);
1786 writeq(val64, &bar0->general_int_mask);
1790 /* MAC Interrupts */
1791 /* Enabling/Disabling MAC interrupts */
1792 if (mask & (TX_MAC_INTR | RX_MAC_INTR)) {
1793 val64 = TXMAC_INT_M | RXMAC_INT_M;
1794 if (flag == ENABLE_INTRS) {
1795 temp64 = readq(&bar0->general_int_mask);
1796 temp64 &= ~((u64) val64);
1797 writeq(temp64, &bar0->general_int_mask);
1799 * All MAC block error interrupts are disabled for now
1802 } else if (flag == DISABLE_INTRS) {
1804 * Disable MAC Intrs in the general intr mask register
1806 writeq(DISABLE_ALL_INTRS, &bar0->mac_int_mask);
1807 writeq(DISABLE_ALL_INTRS,
1808 &bar0->mac_rmac_err_mask);
1810 temp64 = readq(&bar0->general_int_mask);
1812 writeq(val64, &bar0->general_int_mask);
1816 /* Tx traffic interrupts */
1817 if (mask & TX_TRAFFIC_INTR) {
1818 val64 = TXTRAFFIC_INT_M;
1819 if (flag == ENABLE_INTRS) {
1820 temp64 = readq(&bar0->general_int_mask);
1821 temp64 &= ~((u64) val64);
1822 writeq(temp64, &bar0->general_int_mask);
1824 * Enable all the Tx side interrupts
1825 * writing 0 Enables all 64 TX interrupt levels
1827 writeq(0x0, &bar0->tx_traffic_mask);
1828 } else if (flag == DISABLE_INTRS) {
1830 * Disable Tx Traffic Intrs in the general intr mask
1833 writeq(DISABLE_ALL_INTRS, &bar0->tx_traffic_mask);
1834 temp64 = readq(&bar0->general_int_mask);
1836 writeq(val64, &bar0->general_int_mask);
1840 /* Rx traffic interrupts */
1841 if (mask & RX_TRAFFIC_INTR) {
1842 val64 = RXTRAFFIC_INT_M;
1843 if (flag == ENABLE_INTRS) {
1844 temp64 = readq(&bar0->general_int_mask);
1845 temp64 &= ~((u64) val64);
1846 writeq(temp64, &bar0->general_int_mask);
1847 /* writing 0 Enables all 8 RX interrupt levels */
1848 writeq(0x0, &bar0->rx_traffic_mask);
1849 } else if (flag == DISABLE_INTRS) {
1851 * Disable Rx Traffic Intrs in the general intr mask
1854 writeq(DISABLE_ALL_INTRS, &bar0->rx_traffic_mask);
1855 temp64 = readq(&bar0->general_int_mask);
1857 writeq(val64, &bar0->general_int_mask);
1863 * verify_pcc_quiescent- Checks for PCC quiescent state
1864 * Return: 1 If PCC is quiescence
1865 * 0 If PCC is not quiescence
1867 static int verify_pcc_quiescent(struct s2io_nic *sp, int flag)
1870 struct XENA_dev_config __iomem *bar0 = sp->bar0;
1871 u64 val64 = readq(&bar0->adapter_status);
1873 herc = (sp->device_type == XFRAME_II_DEVICE);
1875 if (flag == FALSE) {
1876 if ((!herc && (sp->pdev->revision >= 4)) || herc) {
1877 if (!(val64 & ADAPTER_STATUS_RMAC_PCC_IDLE))
1880 if (!(val64 & ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE))
1884 if ((!herc && (sp->pdev->revision >= 4)) || herc) {
1885 if (((val64 & ADAPTER_STATUS_RMAC_PCC_IDLE) ==
1886 ADAPTER_STATUS_RMAC_PCC_IDLE))
1889 if (((val64 & ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE) ==
1890 ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE))
1898 * verify_xena_quiescence - Checks whether the H/W is ready
1899 * Description: Returns whether the H/W is ready to go or not. Depending
1900 * on whether adapter enable bit was written or not the comparison
1901 * differs and the calling function passes the input argument flag to
1903 * Return: 1 If xena is quiescence
1904 * 0 If Xena is not quiescence
1907 static int verify_xena_quiescence(struct s2io_nic *sp)
1910 struct XENA_dev_config __iomem *bar0 = sp->bar0;
1911 u64 val64 = readq(&bar0->adapter_status);
1912 mode = s2io_verify_pci_mode(sp);
1914 if (!(val64 & ADAPTER_STATUS_TDMA_READY)) {
1915 DBG_PRINT(ERR_DBG, "%s", "TDMA is not ready!");
1918 if (!(val64 & ADAPTER_STATUS_RDMA_READY)) {
1919 DBG_PRINT(ERR_DBG, "%s", "RDMA is not ready!");
1922 if (!(val64 & ADAPTER_STATUS_PFC_READY)) {
1923 DBG_PRINT(ERR_DBG, "%s", "PFC is not ready!");
1926 if (!(val64 & ADAPTER_STATUS_TMAC_BUF_EMPTY)) {
1927 DBG_PRINT(ERR_DBG, "%s", "TMAC BUF is not empty!");
1930 if (!(val64 & ADAPTER_STATUS_PIC_QUIESCENT)) {
1931 DBG_PRINT(ERR_DBG, "%s", "PIC is not QUIESCENT!");
1934 if (!(val64 & ADAPTER_STATUS_MC_DRAM_READY)) {
1935 DBG_PRINT(ERR_DBG, "%s", "MC_DRAM is not ready!");
1938 if (!(val64 & ADAPTER_STATUS_MC_QUEUES_READY)) {
1939 DBG_PRINT(ERR_DBG, "%s", "MC_QUEUES is not ready!");
1942 if (!(val64 & ADAPTER_STATUS_M_PLL_LOCK)) {
1943 DBG_PRINT(ERR_DBG, "%s", "M_PLL is not locked!");
1948 * In PCI 33 mode, the P_PLL is not used, and therefore,
1949 * the the P_PLL_LOCK bit in the adapter_status register will
1952 if (!(val64 & ADAPTER_STATUS_P_PLL_LOCK) &&
1953 sp->device_type == XFRAME_II_DEVICE && mode !=
1955 DBG_PRINT(ERR_DBG, "%s", "P_PLL is not locked!");
1958 if (!((val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ==
1959 ADAPTER_STATUS_RC_PRC_QUIESCENT)) {
1960 DBG_PRINT(ERR_DBG, "%s", "RC_PRC is not QUIESCENT!");
1967 * fix_mac_address - Fix for Mac addr problem on Alpha platforms
1968 * @sp: Pointer to device specifc structure
1970 * New procedure to clear mac address reading problems on Alpha platforms
1974 static void fix_mac_address(struct s2io_nic * sp)
1976 struct XENA_dev_config __iomem *bar0 = sp->bar0;
1980 while (fix_mac[i] != END_SIGN) {
1981 writeq(fix_mac[i++], &bar0->gpio_control);
1983 val64 = readq(&bar0->gpio_control);
1988 * start_nic - Turns the device on
1989 * @nic : device private variable.
1991 * This function actually turns the device on. Before this function is
1992 * called,all Registers are configured from their reset states
1993 * and shared memory is allocated but the NIC is still quiescent. On
1994 * calling this function, the device interrupts are cleared and the NIC is
1995 * literally switched on by writing into the adapter control register.
1997 * SUCCESS on success and -1 on failure.
2000 static int start_nic(struct s2io_nic *nic)
2002 struct XENA_dev_config __iomem *bar0 = nic->bar0;
2003 struct net_device *dev = nic->dev;
2004 register u64 val64 = 0;
2006 struct mac_info *mac_control;
2007 struct config_param *config;
2009 mac_control = &nic->mac_control;
2010 config = &nic->config;
2012 /* PRC Initialization and configuration */
2013 for (i = 0; i < config->rx_ring_num; i++) {
2014 writeq((u64) mac_control->rings[i].rx_blocks[0].block_dma_addr,
2015 &bar0->prc_rxd0_n[i]);
2017 val64 = readq(&bar0->prc_ctrl_n[i]);
2018 if (nic->config.bimodal)
2019 val64 |= PRC_CTRL_BIMODAL_INTERRUPT;
2020 if (nic->rxd_mode == RXD_MODE_1)
2021 val64 |= PRC_CTRL_RC_ENABLED;
2023 val64 |= PRC_CTRL_RC_ENABLED | PRC_CTRL_RING_MODE_3;
2024 if (nic->device_type == XFRAME_II_DEVICE)
2025 val64 |= PRC_CTRL_GROUP_READS;
2026 val64 &= ~PRC_CTRL_RXD_BACKOFF_INTERVAL(0xFFFFFF);
2027 val64 |= PRC_CTRL_RXD_BACKOFF_INTERVAL(0x1000);
2028 writeq(val64, &bar0->prc_ctrl_n[i]);
2031 if (nic->rxd_mode == RXD_MODE_3B) {
2032 /* Enabling 2 buffer mode by writing into Rx_pa_cfg reg. */
2033 val64 = readq(&bar0->rx_pa_cfg);
2034 val64 |= RX_PA_CFG_IGNORE_L2_ERR;
2035 writeq(val64, &bar0->rx_pa_cfg);
2038 if (vlan_tag_strip == 0) {
2039 val64 = readq(&bar0->rx_pa_cfg);
2040 val64 &= ~RX_PA_CFG_STRIP_VLAN_TAG;
2041 writeq(val64, &bar0->rx_pa_cfg);
2042 vlan_strip_flag = 0;
2046 * Enabling MC-RLDRAM. After enabling the device, we timeout
2047 * for around 100ms, which is approximately the time required
2048 * for the device to be ready for operation.
2050 val64 = readq(&bar0->mc_rldram_mrs);
2051 val64 |= MC_RLDRAM_QUEUE_SIZE_ENABLE | MC_RLDRAM_MRS_ENABLE;
2052 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
2053 val64 = readq(&bar0->mc_rldram_mrs);
2055 msleep(100); /* Delay by around 100 ms. */
2057 /* Enabling ECC Protection. */
2058 val64 = readq(&bar0->adapter_control);
2059 val64 &= ~ADAPTER_ECC_EN;
2060 writeq(val64, &bar0->adapter_control);
2063 * Clearing any possible Link state change interrupts that
2064 * could have popped up just before Enabling the card.
2066 val64 = readq(&bar0->mac_rmac_err_reg);
2068 writeq(val64, &bar0->mac_rmac_err_reg);
2071 * Verify if the device is ready to be enabled, if so enable
2074 val64 = readq(&bar0->adapter_status);
2075 if (!verify_xena_quiescence(nic)) {
2076 DBG_PRINT(ERR_DBG, "%s: device is not ready, ", dev->name);
2077 DBG_PRINT(ERR_DBG, "Adapter status reads: 0x%llx\n",
2078 (unsigned long long) val64);
2083 * With some switches, link might be already up at this point.
2084 * Because of this weird behavior, when we enable laser,
2085 * we may not get link. We need to handle this. We cannot
2086 * figure out which switch is misbehaving. So we are forced to
2087 * make a global change.
2090 /* Enabling Laser. */
2091 val64 = readq(&bar0->adapter_control);
2092 val64 |= ADAPTER_EOI_TX_ON;
2093 writeq(val64, &bar0->adapter_control);
2095 if (s2io_link_fault_indication(nic) == MAC_RMAC_ERR_TIMER) {
2097 * Dont see link state interrupts initally on some switches,
2098 * so directly scheduling the link state task here.
2100 schedule_work(&nic->set_link_task);
2102 /* SXE-002: Initialize link and activity LED */
2103 subid = nic->pdev->subsystem_device;
2104 if (((subid & 0xFF) >= 0x07) &&
2105 (nic->device_type == XFRAME_I_DEVICE)) {
2106 val64 = readq(&bar0->gpio_control);
2107 val64 |= 0x0000800000000000ULL;
2108 writeq(val64, &bar0->gpio_control);
2109 val64 = 0x0411040400000000ULL;
2110 writeq(val64, (void __iomem *)bar0 + 0x2700);
2116 * s2io_txdl_getskb - Get the skb from txdl, unmap and return skb
2118 static struct sk_buff *s2io_txdl_getskb(struct fifo_info *fifo_data, struct \
2119 TxD *txdlp, int get_off)
2121 struct s2io_nic *nic = fifo_data->nic;
2122 struct sk_buff *skb;
2127 if (txds->Host_Control == (u64)(long)nic->ufo_in_band_v) {
2128 pci_unmap_single(nic->pdev, (dma_addr_t)
2129 txds->Buffer_Pointer, sizeof(u64),
2134 skb = (struct sk_buff *) ((unsigned long)
2135 txds->Host_Control);
2137 memset(txdlp, 0, (sizeof(struct TxD) * fifo_data->max_txds));
2140 pci_unmap_single(nic->pdev, (dma_addr_t)
2141 txds->Buffer_Pointer,
2142 skb->len - skb->data_len,
2144 frg_cnt = skb_shinfo(skb)->nr_frags;
2147 for (j = 0; j < frg_cnt; j++, txds++) {
2148 skb_frag_t *frag = &skb_shinfo(skb)->frags[j];
2149 if (!txds->Buffer_Pointer)
2151 pci_unmap_page(nic->pdev, (dma_addr_t)
2152 txds->Buffer_Pointer,
2153 frag->size, PCI_DMA_TODEVICE);
2156 memset(txdlp,0, (sizeof(struct TxD) * fifo_data->max_txds));
2161 * free_tx_buffers - Free all queued Tx buffers
2162 * @nic : device private variable.
2164 * Free all queued Tx buffers.
2165 * Return Value: void
2168 static void free_tx_buffers(struct s2io_nic *nic)
2170 struct net_device *dev = nic->dev;
2171 struct sk_buff *skb;
2174 struct mac_info *mac_control;
2175 struct config_param *config;
2178 mac_control = &nic->mac_control;
2179 config = &nic->config;
2181 for (i = 0; i < config->tx_fifo_num; i++) {
2182 for (j = 0; j < config->tx_cfg[i].fifo_len - 1; j++) {
2183 txdp = (struct TxD *) \
2184 mac_control->fifos[i].list_info[j].list_virt_addr;
2185 skb = s2io_txdl_getskb(&mac_control->fifos[i], txdp, j);
2187 nic->mac_control.stats_info->sw_stat.mem_freed
2194 "%s:forcibly freeing %d skbs on FIFO%d\n",
2196 mac_control->fifos[i].tx_curr_get_info.offset = 0;
2197 mac_control->fifos[i].tx_curr_put_info.offset = 0;
2202 * stop_nic - To stop the nic
2203 * @nic ; device private variable.
2205 * This function does exactly the opposite of what the start_nic()
2206 * function does. This function is called to stop the device.
2211 static void stop_nic(struct s2io_nic *nic)
2213 struct XENA_dev_config __iomem *bar0 = nic->bar0;
2214 register u64 val64 = 0;
2216 struct mac_info *mac_control;
2217 struct config_param *config;
2219 mac_control = &nic->mac_control;
2220 config = &nic->config;
2222 /* Disable all interrupts */
2223 interruptible = TX_TRAFFIC_INTR | RX_TRAFFIC_INTR;
2224 interruptible |= TX_PIC_INTR | RX_PIC_INTR;
2225 interruptible |= TX_MAC_INTR | RX_MAC_INTR;
2226 en_dis_able_nic_intrs(nic, interruptible, DISABLE_INTRS);
2228 /* Clearing Adapter_En bit of ADAPTER_CONTROL Register */
2229 val64 = readq(&bar0->adapter_control);
2230 val64 &= ~(ADAPTER_CNTL_EN);
2231 writeq(val64, &bar0->adapter_control);
2234 static int fill_rxd_3buf(struct s2io_nic *nic, struct RxD_t *rxdp, struct \
2237 struct net_device *dev = nic->dev;
2238 struct sk_buff *frag_list;
2241 /* Buffer-1 receives L3/L4 headers */
2242 ((struct RxD3*)rxdp)->Buffer1_ptr = pci_map_single
2243 (nic->pdev, skb->data, l3l4hdr_size + 4,
2244 PCI_DMA_FROMDEVICE);
2246 /* skb_shinfo(skb)->frag_list will have L4 data payload */
2247 skb_shinfo(skb)->frag_list = dev_alloc_skb(dev->mtu + ALIGN_SIZE);
2248 if (skb_shinfo(skb)->frag_list == NULL) {
2249 nic->mac_control.stats_info->sw_stat.mem_alloc_fail_cnt++;
2250 DBG_PRINT(INFO_DBG, "%s: dev_alloc_skb failed\n ", dev->name);
2253 frag_list = skb_shinfo(skb)->frag_list;
2254 skb->truesize += frag_list->truesize;
2255 nic->mac_control.stats_info->sw_stat.mem_allocated
2256 += frag_list->truesize;
2257 frag_list->next = NULL;
2258 tmp = (void *)ALIGN((long)frag_list->data, ALIGN_SIZE + 1);
2259 frag_list->data = tmp;
2260 skb_reset_tail_pointer(frag_list);
2262 /* Buffer-2 receives L4 data payload */
2263 ((struct RxD3*)rxdp)->Buffer2_ptr = pci_map_single(nic->pdev,
2264 frag_list->data, dev->mtu,
2265 PCI_DMA_FROMDEVICE);
2266 rxdp->Control_2 |= SET_BUFFER1_SIZE_3(l3l4hdr_size + 4);
2267 rxdp->Control_2 |= SET_BUFFER2_SIZE_3(dev->mtu);
2273 * fill_rx_buffers - Allocates the Rx side skbs
2274 * @nic: device private variable
2275 * @ring_no: ring number
2277 * The function allocates Rx side skbs and puts the physical
2278 * address of these buffers into the RxD buffer pointers, so that the NIC
2279 * can DMA the received frame into these locations.
2280 * The NIC supports 3 receive modes, viz
2282 * 2. three buffer and
2283 * 3. Five buffer modes.
2284 * Each mode defines how many fragments the received frame will be split
2285 * up into by the NIC. The frame is split into L3 header, L4 Header,
2286 * L4 payload in three buffer mode and in 5 buffer mode, L4 payload itself
2287 * is split into 3 fragments. As of now only single buffer mode is
2290 * SUCCESS on success or an appropriate -ve value on failure.
2293 static int fill_rx_buffers(struct s2io_nic *nic, int ring_no)
2295 struct net_device *dev = nic->dev;
2296 struct sk_buff *skb;
2298 int off, off1, size, block_no, block_no1;
2301 struct mac_info *mac_control;
2302 struct config_param *config;
2305 unsigned long flags;
2306 struct RxD_t *first_rxdp = NULL;
2307 u64 Buffer0_ptr = 0, Buffer1_ptr = 0;
2309 mac_control = &nic->mac_control;
2310 config = &nic->config;
2311 alloc_cnt = mac_control->rings[ring_no].pkt_cnt -
2312 atomic_read(&nic->rx_bufs_left[ring_no]);
2314 block_no1 = mac_control->rings[ring_no].rx_curr_get_info.block_index;
2315 off1 = mac_control->rings[ring_no].rx_curr_get_info.offset;
2316 while (alloc_tab < alloc_cnt) {
2317 block_no = mac_control->rings[ring_no].rx_curr_put_info.
2319 off = mac_control->rings[ring_no].rx_curr_put_info.offset;
2321 rxdp = mac_control->rings[ring_no].
2322 rx_blocks[block_no].rxds[off].virt_addr;
2324 if ((block_no == block_no1) && (off == off1) &&
2325 (rxdp->Host_Control)) {
2326 DBG_PRINT(INTR_DBG, "%s: Get and Put",
2328 DBG_PRINT(INTR_DBG, " info equated\n");
2331 if (off && (off == rxd_count[nic->rxd_mode])) {
2332 mac_control->rings[ring_no].rx_curr_put_info.
2334 if (mac_control->rings[ring_no].rx_curr_put_info.
2335 block_index == mac_control->rings[ring_no].
2337 mac_control->rings[ring_no].rx_curr_put_info.
2339 block_no = mac_control->rings[ring_no].
2340 rx_curr_put_info.block_index;
2341 if (off == rxd_count[nic->rxd_mode])
2343 mac_control->rings[ring_no].rx_curr_put_info.
2345 rxdp = mac_control->rings[ring_no].
2346 rx_blocks[block_no].block_virt_addr;
2347 DBG_PRINT(INTR_DBG, "%s: Next block at: %p\n",
2351 spin_lock_irqsave(&nic->put_lock, flags);
2352 mac_control->rings[ring_no].put_pos =
2353 (block_no * (rxd_count[nic->rxd_mode] + 1)) + off;
2354 spin_unlock_irqrestore(&nic->put_lock, flags);
2356 mac_control->rings[ring_no].put_pos =
2357 (block_no * (rxd_count[nic->rxd_mode] + 1)) + off;
2359 if ((rxdp->Control_1 & RXD_OWN_XENA) &&
2360 ((nic->rxd_mode >= RXD_MODE_3A) &&
2361 (rxdp->Control_2 & BIT(0)))) {
2362 mac_control->rings[ring_no].rx_curr_put_info.
2366 /* calculate size of skb based on ring mode */
2367 size = dev->mtu + HEADER_ETHERNET_II_802_3_SIZE +
2368 HEADER_802_2_SIZE + HEADER_SNAP_SIZE;
2369 if (nic->rxd_mode == RXD_MODE_1)
2370 size += NET_IP_ALIGN;
2371 else if (nic->rxd_mode == RXD_MODE_3B)
2372 size = dev->mtu + ALIGN_SIZE + BUF0_LEN + 4;
2374 size = l3l4hdr_size + ALIGN_SIZE + BUF0_LEN + 4;
2377 skb = dev_alloc_skb(size);
2379 DBG_PRINT(INFO_DBG, "%s: Out of ", dev->name);
2380 DBG_PRINT(INFO_DBG, "memory to allocate SKBs\n");
2383 first_rxdp->Control_1 |= RXD_OWN_XENA;
2385 nic->mac_control.stats_info->sw_stat. \
2386 mem_alloc_fail_cnt++;
2389 nic->mac_control.stats_info->sw_stat.mem_allocated
2391 if (nic->rxd_mode == RXD_MODE_1) {
2392 /* 1 buffer mode - normal operation mode */
2393 memset(rxdp, 0, sizeof(struct RxD1));
2394 skb_reserve(skb, NET_IP_ALIGN);
2395 ((struct RxD1*)rxdp)->Buffer0_ptr = pci_map_single
2396 (nic->pdev, skb->data, size - NET_IP_ALIGN,
2397 PCI_DMA_FROMDEVICE);
2399 SET_BUFFER0_SIZE_1(size - NET_IP_ALIGN);
2401 } else if (nic->rxd_mode >= RXD_MODE_3A) {
2403 * 2 or 3 buffer mode -
2404 * Both 2 buffer mode and 3 buffer mode provides 128
2405 * byte aligned receive buffers.
2407 * 3 buffer mode provides header separation where in
2408 * skb->data will have L3/L4 headers where as
2409 * skb_shinfo(skb)->frag_list will have the L4 data
2413 /* save buffer pointers to avoid frequent dma mapping */
2414 Buffer0_ptr = ((struct RxD3*)rxdp)->Buffer0_ptr;
2415 Buffer1_ptr = ((struct RxD3*)rxdp)->Buffer1_ptr;
2416 memset(rxdp, 0, sizeof(struct RxD3));
2417 /* restore the buffer pointers for dma sync*/
2418 ((struct RxD3*)rxdp)->Buffer0_ptr = Buffer0_ptr;
2419 ((struct RxD3*)rxdp)->Buffer1_ptr = Buffer1_ptr;
2421 ba = &mac_control->rings[ring_no].ba[block_no][off];
2422 skb_reserve(skb, BUF0_LEN);
2423 tmp = (u64)(unsigned long) skb->data;
2426 skb->data = (void *) (unsigned long)tmp;
2427 skb_reset_tail_pointer(skb);
2429 if (!(((struct RxD3*)rxdp)->Buffer0_ptr))
2430 ((struct RxD3*)rxdp)->Buffer0_ptr =
2431 pci_map_single(nic->pdev, ba->ba_0, BUF0_LEN,
2432 PCI_DMA_FROMDEVICE);
2434 pci_dma_sync_single_for_device(nic->pdev,
2435 (dma_addr_t) ((struct RxD3*)rxdp)->Buffer0_ptr,
2436 BUF0_LEN, PCI_DMA_FROMDEVICE);
2437 rxdp->Control_2 = SET_BUFFER0_SIZE_3(BUF0_LEN);
2438 if (nic->rxd_mode == RXD_MODE_3B) {
2439 /* Two buffer mode */
2442 * Buffer2 will have L3/L4 header plus
2445 ((struct RxD3*)rxdp)->Buffer2_ptr = pci_map_single
2446 (nic->pdev, skb->data, dev->mtu + 4,
2447 PCI_DMA_FROMDEVICE);
2449 /* Buffer-1 will be dummy buffer. Not used */
2450 if (!(((struct RxD3*)rxdp)->Buffer1_ptr)) {
2451 ((struct RxD3*)rxdp)->Buffer1_ptr =
2452 pci_map_single(nic->pdev,
2454 PCI_DMA_FROMDEVICE);
2456 rxdp->Control_2 |= SET_BUFFER1_SIZE_3(1);
2457 rxdp->Control_2 |= SET_BUFFER2_SIZE_3
2461 if (fill_rxd_3buf(nic, rxdp, skb) == -ENOMEM) {
2462 nic->mac_control.stats_info->sw_stat.\
2463 mem_freed += skb->truesize;
2464 dev_kfree_skb_irq(skb);
2467 first_rxdp->Control_1 |=
2473 rxdp->Control_2 |= BIT(0);
2475 rxdp->Host_Control = (unsigned long) (skb);
2476 if (alloc_tab & ((1 << rxsync_frequency) - 1))
2477 rxdp->Control_1 |= RXD_OWN_XENA;
2479 if (off == (rxd_count[nic->rxd_mode] + 1))
2481 mac_control->rings[ring_no].rx_curr_put_info.offset = off;
2483 rxdp->Control_2 |= SET_RXD_MARKER;
2484 if (!(alloc_tab & ((1 << rxsync_frequency) - 1))) {
2487 first_rxdp->Control_1 |= RXD_OWN_XENA;
2491 atomic_inc(&nic->rx_bufs_left[ring_no]);
2496 /* Transfer ownership of first descriptor to adapter just before
2497 * exiting. Before that, use memory barrier so that ownership
2498 * and other fields are seen by adapter correctly.
2502 first_rxdp->Control_1 |= RXD_OWN_XENA;
2508 static void free_rxd_blk(struct s2io_nic *sp, int ring_no, int blk)
2510 struct net_device *dev = sp->dev;
2512 struct sk_buff *skb;
2514 struct mac_info *mac_control;
2517 mac_control = &sp->mac_control;
2518 for (j = 0 ; j < rxd_count[sp->rxd_mode]; j++) {
2519 rxdp = mac_control->rings[ring_no].
2520 rx_blocks[blk].rxds[j].virt_addr;
2521 skb = (struct sk_buff *)
2522 ((unsigned long) rxdp->Host_Control);
2526 if (sp->rxd_mode == RXD_MODE_1) {
2527 pci_unmap_single(sp->pdev, (dma_addr_t)
2528 ((struct RxD1*)rxdp)->Buffer0_ptr,
2530 HEADER_ETHERNET_II_802_3_SIZE
2531 + HEADER_802_2_SIZE +
2533 PCI_DMA_FROMDEVICE);
2534 memset(rxdp, 0, sizeof(struct RxD1));
2535 } else if(sp->rxd_mode == RXD_MODE_3B) {
2536 ba = &mac_control->rings[ring_no].
2538 pci_unmap_single(sp->pdev, (dma_addr_t)
2539 ((struct RxD3*)rxdp)->Buffer0_ptr,
2541 PCI_DMA_FROMDEVICE);
2542 pci_unmap_single(sp->pdev, (dma_addr_t)
2543 ((struct RxD3*)rxdp)->Buffer1_ptr,
2545 PCI_DMA_FROMDEVICE);
2546 pci_unmap_single(sp->pdev, (dma_addr_t)
2547 ((struct RxD3*)rxdp)->Buffer2_ptr,
2549 PCI_DMA_FROMDEVICE);
2550 memset(rxdp, 0, sizeof(struct RxD3));
2552 pci_unmap_single(sp->pdev, (dma_addr_t)
2553 ((struct RxD3*)rxdp)->Buffer0_ptr, BUF0_LEN,
2554 PCI_DMA_FROMDEVICE);
2555 pci_unmap_single(sp->pdev, (dma_addr_t)
2556 ((struct RxD3*)rxdp)->Buffer1_ptr,
2558 PCI_DMA_FROMDEVICE);
2559 pci_unmap_single(sp->pdev, (dma_addr_t)
2560 ((struct RxD3*)rxdp)->Buffer2_ptr, dev->mtu,
2561 PCI_DMA_FROMDEVICE);
2562 memset(rxdp, 0, sizeof(struct RxD3));
2564 sp->mac_control.stats_info->sw_stat.mem_freed += skb->truesize;
2566 atomic_dec(&sp->rx_bufs_left[ring_no]);
2571 * free_rx_buffers - Frees all Rx buffers
2572 * @sp: device private variable.
2574 * This function will free all Rx buffers allocated by host.
2579 static void free_rx_buffers(struct s2io_nic *sp)
2581 struct net_device *dev = sp->dev;
2582 int i, blk = 0, buf_cnt = 0;
2583 struct mac_info *mac_control;
2584 struct config_param *config;
2586 mac_control = &sp->mac_control;
2587 config = &sp->config;
2589 for (i = 0; i < config->rx_ring_num; i++) {
2590 for (blk = 0; blk < rx_ring_sz[i]; blk++)
2591 free_rxd_blk(sp,i,blk);
2593 mac_control->rings[i].rx_curr_put_info.block_index = 0;
2594 mac_control->rings[i].rx_curr_get_info.block_index = 0;
2595 mac_control->rings[i].rx_curr_put_info.offset = 0;
2596 mac_control->rings[i].rx_curr_get_info.offset = 0;
2597 atomic_set(&sp->rx_bufs_left[i], 0);
2598 DBG_PRINT(INIT_DBG, "%s:Freed 0x%x Rx Buffers on ring%d\n",
2599 dev->name, buf_cnt, i);
2604 * s2io_poll - Rx interrupt handler for NAPI support
2605 * @dev : pointer to the device structure.
2606 * @budget : The number of packets that were budgeted to be processed
2607 * during one pass through the 'Poll" function.
2609 * Comes into picture only if NAPI support has been incorporated. It does
2610 * the same thing that rx_intr_handler does, but not in a interrupt context
2611 * also It will process only a given number of packets.
2613 * 0 on success and 1 if there are No Rx packets to be processed.
2616 static int s2io_poll(struct net_device *dev, int *budget)
2618 struct s2io_nic *nic = dev->priv;
2619 int pkt_cnt = 0, org_pkts_to_process;
2620 struct mac_info *mac_control;
2621 struct config_param *config;
2622 struct XENA_dev_config __iomem *bar0 = nic->bar0;
2625 atomic_inc(&nic->isr_cnt);
2626 mac_control = &nic->mac_control;
2627 config = &nic->config;
2629 nic->pkts_to_process = *budget;
2630 if (nic->pkts_to_process > dev->quota)
2631 nic->pkts_to_process = dev->quota;
2632 org_pkts_to_process = nic->pkts_to_process;
2634 writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_int);
2635 readl(&bar0->rx_traffic_int);
2637 for (i = 0; i < config->rx_ring_num; i++) {
2638 rx_intr_handler(&mac_control->rings[i]);
2639 pkt_cnt = org_pkts_to_process - nic->pkts_to_process;
2640 if (!nic->pkts_to_process) {
2641 /* Quota for the current iteration has been met */
2648 dev->quota -= pkt_cnt;
2650 netif_rx_complete(dev);
2652 for (i = 0; i < config->rx_ring_num; i++) {
2653 if (fill_rx_buffers(nic, i) == -ENOMEM) {
2654 DBG_PRINT(INFO_DBG, "%s:Out of memory", dev->name);
2655 DBG_PRINT(INFO_DBG, " in Rx Poll!!\n");
2659 /* Re enable the Rx interrupts. */
2660 writeq(0x0, &bar0->rx_traffic_mask);
2661 readl(&bar0->rx_traffic_mask);
2662 atomic_dec(&nic->isr_cnt);
2666 dev->quota -= pkt_cnt;
2669 for (i = 0; i < config->rx_ring_num; i++) {
2670 if (fill_rx_buffers(nic, i) == -ENOMEM) {
2671 DBG_PRINT(INFO_DBG, "%s:Out of memory", dev->name);
2672 DBG_PRINT(INFO_DBG, " in Rx Poll!!\n");
2676 atomic_dec(&nic->isr_cnt);
2680 #ifdef CONFIG_NET_POLL_CONTROLLER
2682 * s2io_netpoll - netpoll event handler entry point
2683 * @dev : pointer to the device structure.
2685 * This function will be called by upper layer to check for events on the
2686 * interface in situations where interrupts are disabled. It is used for
2687 * specific in-kernel networking tasks, such as remote consoles and kernel
2688 * debugging over the network (example netdump in RedHat).
2690 static void s2io_netpoll(struct net_device *dev)
2692 struct s2io_nic *nic = dev->priv;
2693 struct mac_info *mac_control;
2694 struct config_param *config;
2695 struct XENA_dev_config __iomem *bar0 = nic->bar0;
2696 u64 val64 = 0xFFFFFFFFFFFFFFFFULL;
2699 if (pci_channel_offline(nic->pdev))
2702 disable_irq(dev->irq);
2704 atomic_inc(&nic->isr_cnt);
2705 mac_control = &nic->mac_control;
2706 config = &nic->config;
2708 writeq(val64, &bar0->rx_traffic_int);
2709 writeq(val64, &bar0->tx_traffic_int);
2711 /* we need to free up the transmitted skbufs or else netpoll will
2712 * run out of skbs and will fail and eventually netpoll application such
2713 * as netdump will fail.
2715 for (i = 0; i < config->tx_fifo_num; i++)
2716 tx_intr_handler(&mac_control->fifos[i]);
2718 /* check for received packet and indicate up to network */
2719 for (i = 0; i < config->rx_ring_num; i++)
2720 rx_intr_handler(&mac_control->rings[i]);
2722 for (i = 0; i < config->rx_ring_num; i++) {
2723 if (fill_rx_buffers(nic, i) == -ENOMEM) {
2724 DBG_PRINT(INFO_DBG, "%s:Out of memory", dev->name);
2725 DBG_PRINT(INFO_DBG, " in Rx Netpoll!!\n");
2729 atomic_dec(&nic->isr_cnt);
2730 enable_irq(dev->irq);
2736 * rx_intr_handler - Rx interrupt handler
2737 * @nic: device private variable.
2739 * If the interrupt is because of a received frame or if the
2740 * receive ring contains fresh as yet un-processed frames,this function is
2741 * called. It picks out the RxD at which place the last Rx processing had
2742 * stopped and sends the skb to the OSM's Rx handler and then increments
2747 static void rx_intr_handler(struct ring_info *ring_data)
2749 struct s2io_nic *nic = ring_data->nic;
2750 struct net_device *dev = (struct net_device *) nic->dev;
2751 int get_block, put_block, put_offset;
2752 struct rx_curr_get_info get_info, put_info;
2754 struct sk_buff *skb;
2758 spin_lock(&nic->rx_lock);
2759 if (atomic_read(&nic->card_state) == CARD_DOWN) {
2760 DBG_PRINT(INTR_DBG, "%s: %s going down for reset\n",
2761 __FUNCTION__, dev->name);
2762 spin_unlock(&nic->rx_lock);
2766 get_info = ring_data->rx_curr_get_info;
2767 get_block = get_info.block_index;
2768 memcpy(&put_info, &ring_data->rx_curr_put_info, sizeof(put_info));
2769 put_block = put_info.block_index;
2770 rxdp = ring_data->rx_blocks[get_block].rxds[get_info.offset].virt_addr;
2772 spin_lock(&nic->put_lock);
2773 put_offset = ring_data->put_pos;
2774 spin_unlock(&nic->put_lock);
2776 put_offset = ring_data->put_pos;
2778 while (RXD_IS_UP2DT(rxdp)) {
2780 * If your are next to put index then it's
2781 * FIFO full condition
2783 if ((get_block == put_block) &&
2784 (get_info.offset + 1) == put_info.offset) {
2785 DBG_PRINT(INTR_DBG, "%s: Ring Full\n",dev->name);
2788 skb = (struct sk_buff *) ((unsigned long)rxdp->Host_Control);
2790 DBG_PRINT(ERR_DBG, "%s: The skb is ",
2792 DBG_PRINT(ERR_DBG, "Null in Rx Intr\n");
2793 spin_unlock(&nic->rx_lock);
2796 if (nic->rxd_mode == RXD_MODE_1) {
2797 pci_unmap_single(nic->pdev, (dma_addr_t)
2798 ((struct RxD1*)rxdp)->Buffer0_ptr,
2800 HEADER_ETHERNET_II_802_3_SIZE +
2803 PCI_DMA_FROMDEVICE);
2804 } else if (nic->rxd_mode == RXD_MODE_3B) {
2805 pci_dma_sync_single_for_cpu(nic->pdev, (dma_addr_t)
2806 ((struct RxD3*)rxdp)->Buffer0_ptr,
2807 BUF0_LEN, PCI_DMA_FROMDEVICE);
2808 pci_unmap_single(nic->pdev, (dma_addr_t)
2809 ((struct RxD3*)rxdp)->Buffer2_ptr,
2811 PCI_DMA_FROMDEVICE);
2813 pci_dma_sync_single_for_cpu(nic->pdev, (dma_addr_t)
2814 ((struct RxD3*)rxdp)->Buffer0_ptr, BUF0_LEN,
2815 PCI_DMA_FROMDEVICE);
2816 pci_unmap_single(nic->pdev, (dma_addr_t)
2817 ((struct RxD3*)rxdp)->Buffer1_ptr,
2819 PCI_DMA_FROMDEVICE);
2820 pci_unmap_single(nic->pdev, (dma_addr_t)
2821 ((struct RxD3*)rxdp)->Buffer2_ptr,
2822 dev->mtu, PCI_DMA_FROMDEVICE);
2824 prefetch(skb->data);
2825 rx_osm_handler(ring_data, rxdp);
2827 ring_data->rx_curr_get_info.offset = get_info.offset;
2828 rxdp = ring_data->rx_blocks[get_block].
2829 rxds[get_info.offset].virt_addr;
2830 if (get_info.offset == rxd_count[nic->rxd_mode]) {
2831 get_info.offset = 0;
2832 ring_data->rx_curr_get_info.offset = get_info.offset;
2834 if (get_block == ring_data->block_count)
2836 ring_data->rx_curr_get_info.block_index = get_block;
2837 rxdp = ring_data->rx_blocks[get_block].block_virt_addr;
2840 nic->pkts_to_process -= 1;
2841 if ((napi) && (!nic->pkts_to_process))
2844 if ((indicate_max_pkts) && (pkt_cnt > indicate_max_pkts))
2848 /* Clear all LRO sessions before exiting */
2849 for (i=0; i<MAX_LRO_SESSIONS; i++) {
2850 struct lro *lro = &nic->lro0_n[i];
2852 update_L3L4_header(nic, lro);
2853 queue_rx_frame(lro->parent);
2854 clear_lro_session(lro);
2859 spin_unlock(&nic->rx_lock);
2863 * tx_intr_handler - Transmit interrupt handler
2864 * @nic : device private variable
2866 * If an interrupt was raised to indicate DMA complete of the
2867 * Tx packet, this function is called. It identifies the last TxD
2868 * whose buffer was freed and frees all skbs whose data have already
2869 * DMA'ed into the NICs internal memory.
2874 static void tx_intr_handler(struct fifo_info *fifo_data)
2876 struct s2io_nic *nic = fifo_data->nic;
2877 struct net_device *dev = (struct net_device *) nic->dev;
2878 struct tx_curr_get_info get_info, put_info;
2879 struct sk_buff *skb;
2883 get_info = fifo_data->tx_curr_get_info;
2884 memcpy(&put_info, &fifo_data->tx_curr_put_info, sizeof(put_info));
2885 txdlp = (struct TxD *) fifo_data->list_info[get_info.offset].
2887 while ((!(txdlp->Control_1 & TXD_LIST_OWN_XENA)) &&
2888 (get_info.offset != put_info.offset) &&
2889 (txdlp->Host_Control)) {
2890 /* Check for TxD errors */
2891 if (txdlp->Control_1 & TXD_T_CODE) {
2892 unsigned long long err;
2893 err = txdlp->Control_1 & TXD_T_CODE;
2895 nic->mac_control.stats_info->sw_stat.
2899 /* update t_code statistics */
2900 err_mask = err >> 48;
2903 nic->mac_control.stats_info->sw_stat.
2908 nic->mac_control.stats_info->sw_stat.
2909 tx_desc_abort_cnt++;
2913 nic->mac_control.stats_info->sw_stat.
2914 tx_parity_err_cnt++;
2918 nic->mac_control.stats_info->sw_stat.
2923 nic->mac_control.stats_info->sw_stat.
2924 tx_list_proc_err_cnt++;
2929 skb = s2io_txdl_getskb(fifo_data, txdlp, get_info.offset);
2931 DBG_PRINT(ERR_DBG, "%s: Null skb ",
2933 DBG_PRINT(ERR_DBG, "in Tx Free Intr\n");
2937 /* Updating the statistics block */
2938 nic->stats.tx_bytes += skb->len;
2939 nic->mac_control.stats_info->sw_stat.mem_freed += skb->truesize;
2940 dev_kfree_skb_irq(skb);
2943 if (get_info.offset == get_info.fifo_len + 1)
2944 get_info.offset = 0;
2945 txdlp = (struct TxD *) fifo_data->list_info
2946 [get_info.offset].list_virt_addr;
2947 fifo_data->tx_curr_get_info.offset =
2951 spin_lock(&nic->tx_lock);
2952 if (netif_queue_stopped(dev))
2953 netif_wake_queue(dev);
2954 spin_unlock(&nic->tx_lock);
2958 * s2io_mdio_write - Function to write in to MDIO registers
2959 * @mmd_type : MMD type value (PMA/PMD/WIS/PCS/PHYXS)
2960 * @addr : address value
2961 * @value : data value
2962 * @dev : pointer to net_device structure
2964 * This function is used to write values to the MDIO registers
2967 static void s2io_mdio_write(u32 mmd_type, u64 addr, u16 value, struct net_device *dev)
2970 struct s2io_nic *sp = dev->priv;
2971 struct XENA_dev_config __iomem *bar0 = sp->bar0;
2973 //address transaction
2974 val64 = val64 | MDIO_MMD_INDX_ADDR(addr)
2975 | MDIO_MMD_DEV_ADDR(mmd_type)
2976 | MDIO_MMS_PRT_ADDR(0x0);
2977 writeq(val64, &bar0->mdio_control);
2978 val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
2979 writeq(val64, &bar0->mdio_control);
2984 val64 = val64 | MDIO_MMD_INDX_ADDR(addr)
2985 | MDIO_MMD_DEV_ADDR(mmd_type)
2986 | MDIO_MMS_PRT_ADDR(0x0)
2987 | MDIO_MDIO_DATA(value)
2988 | MDIO_OP(MDIO_OP_WRITE_TRANS);
2989 writeq(val64, &bar0->mdio_control);
2990 val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
2991 writeq(val64, &bar0->mdio_control);
2995 val64 = val64 | MDIO_MMD_INDX_ADDR(addr)
2996 | MDIO_MMD_DEV_ADDR(mmd_type)
2997 | MDIO_MMS_PRT_ADDR(0x0)
2998 | MDIO_OP(MDIO_OP_READ_TRANS);
2999 writeq(val64, &bar0->mdio_control);
3000 val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
3001 writeq(val64, &bar0->mdio_control);
3007 * s2io_mdio_read - Function to write in to MDIO registers
3008 * @mmd_type : MMD type value (PMA/PMD/WIS/PCS/PHYXS)
3009 * @addr : address value
3010 * @dev : pointer to net_device structure
3012 * This function is used to read values to the MDIO registers
3015 static u64 s2io_mdio_read(u32 mmd_type, u64 addr, struct net_device *dev)
3019 struct s2io_nic *sp = dev->priv;
3020 struct XENA_dev_config __iomem *bar0 = sp->bar0;
3022 /* address transaction */
3023 val64 = val64 | MDIO_MMD_INDX_ADDR(addr)
3024 | MDIO_MMD_DEV_ADDR(mmd_type)
3025 | MDIO_MMS_PRT_ADDR(0x0);
3026 writeq(val64, &bar0->mdio_control);
3027 val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
3028 writeq(val64, &bar0->mdio_control);
3031 /* Data transaction */
3033 val64 = val64 | MDIO_MMD_INDX_ADDR(addr)
3034 | MDIO_MMD_DEV_ADDR(mmd_type)
3035 | MDIO_MMS_PRT_ADDR(0x0)
3036 | MDIO_OP(MDIO_OP_READ_TRANS);
3037 writeq(val64, &bar0->mdio_control);
3038 val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
3039 writeq(val64, &bar0->mdio_control);
3042 /* Read the value from regs */
3043 rval64 = readq(&bar0->mdio_control);
3044 rval64 = rval64 & 0xFFFF0000;
3045 rval64 = rval64 >> 16;
3049 * s2io_chk_xpak_counter - Function to check the status of the xpak counters
3050 * @counter : couter value to be updated
3051 * @flag : flag to indicate the status
3052 * @type : counter type
3054 * This function is to check the status of the xpak counters value
3058 static void s2io_chk_xpak_counter(u64 *counter, u64 * regs_stat, u32 index, u16 flag, u16 type)
3063 for(i = 0; i <index; i++)
3068 *counter = *counter + 1;
3069 val64 = *regs_stat & mask;
3070 val64 = val64 >> (index * 0x2);
3077 DBG_PRINT(ERR_DBG, "Take Xframe NIC out of "
3078 "service. Excessive temperatures may "
3079 "result in premature transceiver "
3083 DBG_PRINT(ERR_DBG, "Take Xframe NIC out of "
3084 "service Excessive bias currents may "
3085 "indicate imminent laser diode "
3089 DBG_PRINT(ERR_DBG, "Take Xframe NIC out of "
3090 "service Excessive laser output "
3091 "power may saturate far-end "
3095 DBG_PRINT(ERR_DBG, "Incorrect XPAK Alarm "
3100 val64 = val64 << (index * 0x2);
3101 *regs_stat = (*regs_stat & (~mask)) | (val64);
3104 *regs_stat = *regs_stat & (~mask);
3109 * s2io_updt_xpak_counter - Function to update the xpak counters
3110 * @dev : pointer to net_device struct
3112 * This function is to upate the status of the xpak counters value
3115 static void s2io_updt_xpak_counter(struct net_device *dev)
3123 struct s2io_nic *sp = dev->priv;
3124 struct stat_block *stat_info = sp->mac_control.stats_info;
3126 /* Check the communication with the MDIO slave */
3129 val64 = s2io_mdio_read(MDIO_MMD_PMA_DEV_ADDR, addr, dev);
3130 if((val64 == 0xFFFF) || (val64 == 0x0000))
3132 DBG_PRINT(ERR_DBG, "ERR: MDIO slave access failed - "
3133 "Returned %llx\n", (unsigned long long)val64);
3137 /* Check for the expecte value of 2040 at PMA address 0x0000 */
3140 DBG_PRINT(ERR_DBG, "Incorrect value at PMA address 0x0000 - ");
3141 DBG_PRINT(ERR_DBG, "Returned: %llx- Expected: 0x2040\n",
3142 (unsigned long long)val64);
3146 /* Loading the DOM register to MDIO register */
3148 s2io_mdio_write(MDIO_MMD_PMA_DEV_ADDR, addr, val16, dev);
3149 val64 = s2io_mdio_read(MDIO_MMD_PMA_DEV_ADDR, addr, dev);
3151 /* Reading the Alarm flags */
3154 val64 = s2io_mdio_read(MDIO_MMD_PMA_DEV_ADDR, addr, dev);
3156 flag = CHECKBIT(val64, 0x7);
3158 s2io_chk_xpak_counter(&stat_info->xpak_stat.alarm_transceiver_temp_high,
3159 &stat_info->xpak_stat.xpak_regs_stat,
3162 if(CHECKBIT(val64, 0x6))
3163 stat_info->xpak_stat.alarm_transceiver_temp_low++;
3165 flag = CHECKBIT(val64, 0x3);
3167 s2io_chk_xpak_counter(&stat_info->xpak_stat.alarm_laser_bias_current_high,
3168 &stat_info->xpak_stat.xpak_regs_stat,
3171 if(CHECKBIT(val64, 0x2))
3172 stat_info->xpak_stat.alarm_laser_bias_current_low++;
3174 flag = CHECKBIT(val64, 0x1);
3176 s2io_chk_xpak_counter(&stat_info->xpak_stat.alarm_laser_output_power_high,
3177 &stat_info->xpak_stat.xpak_regs_stat,
3180 if(CHECKBIT(val64, 0x0))
3181 stat_info->xpak_stat.alarm_laser_output_power_low++;
3183 /* Reading the Warning flags */
3186 val64 = s2io_mdio_read(MDIO_MMD_PMA_DEV_ADDR, addr, dev);
3188 if(CHECKBIT(val64, 0x7))
3189 stat_info->xpak_stat.warn_transceiver_temp_high++;
3191 if(CHECKBIT(val64, 0x6))
3192 stat_info->xpak_stat.warn_transceiver_temp_low++;
3194 if(CHECKBIT(val64, 0x3))
3195 stat_info->xpak_stat.warn_laser_bias_current_high++;
3197 if(CHECKBIT(val64, 0x2))
3198 stat_info->xpak_stat.warn_laser_bias_current_low++;
3200 if(CHECKBIT(val64, 0x1))
3201 stat_info->xpak_stat.warn_laser_output_power_high++;
3203 if(CHECKBIT(val64, 0x0))
3204 stat_info->xpak_stat.warn_laser_output_power_low++;
3208 * alarm_intr_handler - Alarm Interrrupt handler
3209 * @nic: device private variable
3210 * Description: If the interrupt was neither because of Rx packet or Tx
3211 * complete, this function is called. If the interrupt was to indicate
3212 * a loss of link, the OSM link status handler is invoked for any other
3213 * alarm interrupt the block that raised the interrupt is displayed
3214 * and a H/W reset is issued.
3219 static void alarm_intr_handler(struct s2io_nic *nic)
3221 struct net_device *dev = (struct net_device *) nic->dev;
3222 struct XENA_dev_config __iomem *bar0 = nic->bar0;
3223 register u64 val64 = 0, err_reg = 0;
3226 if (atomic_read(&nic->card_state) == CARD_DOWN)
3228 if (pci_channel_offline(nic->pdev))
3230 nic->mac_control.stats_info->sw_stat.ring_full_cnt = 0;
3231 /* Handling the XPAK counters update */
3232 if(nic->mac_control.stats_info->xpak_stat.xpak_timer_count < 72000) {
3233 /* waiting for an hour */
3234 nic->mac_control.stats_info->xpak_stat.xpak_timer_count++;
3236 s2io_updt_xpak_counter(dev);
3237 /* reset the count to zero */
3238 nic->mac_control.stats_info->xpak_stat.xpak_timer_count = 0;
3241 /* Handling link status change error Intr */
3242 if (s2io_link_fault_indication(nic) == MAC_RMAC_ERR_TIMER) {
3243 err_reg = readq(&bar0->mac_rmac_err_reg);
3244 writeq(err_reg, &bar0->mac_rmac_err_reg);
3245 if (err_reg & RMAC_LINK_STATE_CHANGE_INT) {
3246 schedule_work(&nic->set_link_task);
3250 /* Handling Ecc errors */
3251 val64 = readq(&bar0->mc_err_reg);
3252 writeq(val64, &bar0->mc_err_reg);
3253 if (val64 & (MC_ERR_REG_ECC_ALL_SNG | MC_ERR_REG_ECC_ALL_DBL)) {
3254 if (val64 & MC_ERR_REG_ECC_ALL_DBL) {
3255 nic->mac_control.stats_info->sw_stat.
3257 DBG_PRINT(INIT_DBG, "%s: Device indicates ",
3259 DBG_PRINT(INIT_DBG, "double ECC error!!\n");
3260 if (nic->device_type != XFRAME_II_DEVICE) {
3261 /* Reset XframeI only if critical error */
3262 if (val64 & (MC_ERR_REG_MIRI_ECC_DB_ERR_0 |
3263 MC_ERR_REG_MIRI_ECC_DB_ERR_1)) {
3264 netif_stop_queue(dev);
3265 schedule_work(&nic->rst_timer_task);
3266 nic->mac_control.stats_info->sw_stat.
3271 nic->mac_control.stats_info->sw_stat.
3276 /* In case of a serious error, the device will be Reset. */
3277 val64 = readq(&bar0->serr_source);
3278 if (val64 & SERR_SOURCE_ANY) {
3279 nic->mac_control.stats_info->sw_stat.serious_err_cnt++;
3280 DBG_PRINT(ERR_DBG, "%s: Device indicates ", dev->name);
3281 DBG_PRINT(ERR_DBG, "serious error %llx!!\n",
3282 (unsigned long long)val64);
3283 netif_stop_queue(dev);
3284 schedule_work(&nic->rst_timer_task);
3285 nic->mac_control.stats_info->sw_stat.soft_reset_cnt++;
3289 * Also as mentioned in the latest Errata sheets if the PCC_FB_ECC
3290 * Error occurs, the adapter will be recycled by disabling the
3291 * adapter enable bit and enabling it again after the device
3292 * becomes Quiescent.
3294 val64 = readq(&bar0->pcc_err_reg);
3295 writeq(val64, &bar0->pcc_err_reg);
3296 if (val64 & PCC_FB_ECC_DB_ERR) {
3297 u64 ac = readq(&bar0->adapter_control);
3298 ac &= ~(ADAPTER_CNTL_EN);
3299 writeq(ac, &bar0->adapter_control);
3300 ac = readq(&bar0->adapter_control);
3301 schedule_work(&nic->set_link_task);
3303 /* Check for data parity error */
3304 val64 = readq(&bar0->pic_int_status);
3305 if (val64 & PIC_INT_GPIO) {
3306 val64 = readq(&bar0->gpio_int_reg);
3307 if (val64 & GPIO_INT_REG_DP_ERR_INT) {
3308 nic->mac_control.stats_info->sw_stat.parity_err_cnt++;
3309 schedule_work(&nic->rst_timer_task);
3310 nic->mac_control.stats_info->sw_stat.soft_reset_cnt++;
3314 /* Check for ring full counter */
3315 if (nic->device_type & XFRAME_II_DEVICE) {
3316 val64 = readq(&bar0->ring_bump_counter1);
3317 for (i=0; i<4; i++) {
3318 cnt = ( val64 & vBIT(0xFFFF,(i*16),16));
3319 cnt >>= 64 - ((i+1)*16);
3320 nic->mac_control.stats_info->sw_stat.ring_full_cnt
3324 val64 = readq(&bar0->ring_bump_counter2);
3325 for (i=0; i<4; i++) {
3326 cnt = ( val64 & vBIT(0xFFFF,(i*16),16));
3327 cnt >>= 64 - ((i+1)*16);
3328 nic->mac_control.stats_info->sw_stat.ring_full_cnt
3333 /* Other type of interrupts are not being handled now, TODO */
3337 * wait_for_cmd_complete - waits for a command to complete.
3338 * @sp : private member of the device structure, which is a pointer to the
3339 * s2io_nic structure.
3340 * Description: Function that waits for a command to Write into RMAC
3341 * ADDR DATA registers to be completed and returns either success or
3342 * error depending on whether the command was complete or not.
3344 * SUCCESS on success and FAILURE on failure.
3347 static int wait_for_cmd_complete(void __iomem *addr, u64 busy_bit,
3350 int ret = FAILURE, cnt = 0, delay = 1;
3353 if ((bit_state != S2IO_BIT_RESET) && (bit_state != S2IO_BIT_SET))
3357 val64 = readq(addr);
3358 if (bit_state == S2IO_BIT_RESET) {
3359 if (!(val64 & busy_bit)) {
3364 if (!(val64 & busy_bit)) {
3381 * check_pci_device_id - Checks if the device id is supported
3383 * Description: Function to check if the pci device id is supported by driver.
3384 * Return value: Actual device id if supported else PCI_ANY_ID
3386 static u16 check_pci_device_id(u16 id)
3389 case PCI_DEVICE_ID_HERC_WIN:
3390 case PCI_DEVICE_ID_HERC_UNI:
3391 return XFRAME_II_DEVICE;
3392 case PCI_DEVICE_ID_S2IO_UNI:
3393 case PCI_DEVICE_ID_S2IO_WIN:
3394 return XFRAME_I_DEVICE;
3401 * s2io_reset - Resets the card.
3402 * @sp : private member of the device structure.
3403 * Description: Function to Reset the card. This function then also
3404 * restores the previously saved PCI configuration space registers as
3405 * the card reset also resets the configuration space.
3410 static void s2io_reset(struct s2io_nic * sp)
3412 struct XENA_dev_config __iomem *bar0 = sp->bar0;
3417 unsigned long long up_cnt, down_cnt, up_time, down_time, reset_cnt;
3418 unsigned long long mem_alloc_cnt, mem_free_cnt, watchdog_cnt;
3420 DBG_PRINT(INIT_DBG,"%s - Resetting XFrame card %s\n",
3421 __FUNCTION__, sp->dev->name);
3423 /* Back up the PCI-X CMD reg, dont want to lose MMRBC, OST settings */
3424 pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER, &(pci_cmd));
3426 if (sp->device_type == XFRAME_II_DEVICE) {
3428 ret = pci_set_power_state(sp->pdev, 3);
3430 ret = pci_set_power_state(sp->pdev, 0);
3432 DBG_PRINT(ERR_DBG,"%s PME based SW_Reset failed!\n",
3440 val64 = SW_RESET_ALL;
3441 writeq(val64, &bar0->sw_reset);
3443 if (strstr(sp->product_name, "CX4")) {
3447 for (i = 0; i < S2IO_MAX_PCI_CONFIG_SPACE_REINIT; i++) {
3449 /* Restore the PCI state saved during initialization. */
3450 pci_restore_state(sp->pdev);
3451 pci_read_config_word(sp->pdev, 0x2, &val16);
3452 if (check_pci_device_id(val16) != (u16)PCI_ANY_ID)
3457 if (check_pci_device_id(val16) == (u16)PCI_ANY_ID) {
3458 DBG_PRINT(ERR_DBG,"%s SW_Reset failed!\n", __FUNCTION__);
3461 pci_write_config_word(sp->pdev, PCIX_COMMAND_REGISTER, pci_cmd);
3465 /* Set swapper to enable I/O register access */
3466 s2io_set_swapper(sp);
3468 /* Restore the MSIX table entries from local variables */
3469 restore_xmsi_data(sp);
3471 /* Clear certain PCI/PCI-X fields after reset */
3472 if (sp->device_type == XFRAME_II_DEVICE) {
3473 /* Clear "detected parity error" bit */
3474 pci_write_config_word(sp->pdev, PCI_STATUS, 0x8000);
3476 /* Clearing PCIX Ecc status register */
3477 pci_write_config_dword(sp->pdev, 0x68, 0x7C);
3479 /* Clearing PCI_STATUS error reflected here */
3480 writeq(BIT(62), &bar0->txpic_int_reg);
3483 /* Reset device statistics maintained by OS */
3484 memset(&sp->stats, 0, sizeof (struct net_device_stats));
3486 up_cnt = sp->mac_control.stats_info->sw_stat.link_up_cnt;
3487 down_cnt = sp->mac_control.stats_info->sw_stat.link_down_cnt;
3488 up_time = sp->mac_control.stats_info->sw_stat.link_up_time;
3489 down_time = sp->mac_control.stats_info->sw_stat.link_down_time;
3490 reset_cnt = sp->mac_control.stats_info->sw_stat.soft_reset_cnt;
3491 mem_alloc_cnt = sp->mac_control.stats_info->sw_stat.mem_allocated;
3492 mem_free_cnt = sp->mac_control.stats_info->sw_stat.mem_freed;
3493 watchdog_cnt = sp->mac_control.stats_info->sw_stat.watchdog_timer_cnt;
3494 /* save link up/down time/cnt, reset/memory/watchdog cnt */
3495 memset(sp->mac_control.stats_info, 0, sizeof(struct stat_block));
3496 /* restore link up/down time/cnt, reset/memory/watchdog cnt */
3497 sp->mac_control.stats_info->sw_stat.link_up_cnt = up_cnt;
3498 sp->mac_control.stats_info->sw_stat.link_down_cnt = down_cnt;
3499 sp->mac_control.stats_info->sw_stat.link_up_time = up_time;
3500 sp->mac_control.stats_info->sw_stat.link_down_time = down_time;
3501 sp->mac_control.stats_info->sw_stat.soft_reset_cnt = reset_cnt;
3502 sp->mac_control.stats_info->sw_stat.mem_allocated = mem_alloc_cnt;
3503 sp->mac_control.stats_info->sw_stat.mem_freed = mem_free_cnt;
3504 sp->mac_control.stats_info->sw_stat.watchdog_timer_cnt = watchdog_cnt;
3506 /* SXE-002: Configure link and activity LED to turn it off */
3507 subid = sp->pdev->subsystem_device;
3508 if (((subid & 0xFF) >= 0x07) &&
3509 (sp->device_type == XFRAME_I_DEVICE)) {
3510 val64 = readq(&bar0->gpio_control);
3511 val64 |= 0x0000800000000000ULL;
3512 writeq(val64, &bar0->gpio_control);
3513 val64 = 0x0411040400000000ULL;
3514 writeq(val64, (void __iomem *)bar0 + 0x2700);
3518 * Clear spurious ECC interrupts that would have occured on
3519 * XFRAME II cards after reset.
3521 if (sp->device_type == XFRAME_II_DEVICE) {
3522 val64 = readq(&bar0->pcc_err_reg);
3523 writeq(val64, &bar0->pcc_err_reg);
3526 /* restore the previously assigned mac address */
3527 s2io_set_mac_addr(sp->dev, (u8 *)&sp->def_mac_addr[0].mac_addr);
3529 sp->device_enabled_once = FALSE;
3533 * s2io_set_swapper - to set the swapper controle on the card
3534 * @sp : private member of the device structure,
3535 * pointer to the s2io_nic structure.
3536 * Description: Function to set the swapper control on the card
3537 * correctly depending on the 'endianness' of the system.
3539 * SUCCESS on success and FAILURE on failure.
3542 static int s2io_set_swapper(struct s2io_nic * sp)
3544 struct net_device *dev = sp->dev;
3545 struct XENA_dev_config __iomem *bar0 = sp->bar0;
3546 u64 val64, valt, valr;
3549 * Set proper endian settings and verify the same by reading
3550 * the PIF Feed-back register.
3553 val64 = readq(&bar0->pif_rd_swapper_fb);
3554 if (val64 != 0x0123456789ABCDEFULL) {
3556 u64 value[] = { 0xC30000C3C30000C3ULL, /* FE=1, SE=1 */
3557 0x8100008181000081ULL, /* FE=1, SE=0 */
3558 0x4200004242000042ULL, /* FE=0, SE=1 */
3559 0}; /* FE=0, SE=0 */
3562 writeq(value[i], &bar0->swapper_ctrl);
3563 val64 = readq(&bar0->pif_rd_swapper_fb);
3564 if (val64 == 0x0123456789ABCDEFULL)
3569 DBG_PRINT(ERR_DBG, "%s: Endian settings are wrong, ",
3571 DBG_PRINT(ERR_DBG, "feedback read %llx\n",
3572 (unsigned long long) val64);
3577 valr = readq(&bar0->swapper_ctrl);
3580 valt = 0x0123456789ABCDEFULL;
3581 writeq(valt, &bar0->xmsi_address);
3582 val64 = readq(&bar0->xmsi_address);
3586 u64 value[] = { 0x00C3C30000C3C300ULL, /* FE=1, SE=1 */
3587 0x0081810000818100ULL, /* FE=1, SE=0 */
3588 0x0042420000424200ULL, /* FE=0, SE=1 */
3589 0}; /* FE=0, SE=0 */
3592 writeq((value[i] | valr), &bar0->swapper_ctrl);
3593 writeq(valt, &bar0->xmsi_address);
3594 val64 = readq(&bar0->xmsi_address);
3600 unsigned long long x = val64;
3601 DBG_PRINT(ERR_DBG, "Write failed, Xmsi_addr ");
3602 DBG_PRINT(ERR_DBG, "reads:0x%llx\n", x);
3606 val64 = readq(&bar0->swapper_ctrl);
3607 val64 &= 0xFFFF000000000000ULL;
3611 * The device by default set to a big endian format, so a
3612 * big endian driver need not set anything.
3614 val64 |= (SWAPPER_CTRL_TXP_FE |
3615 SWAPPER_CTRL_TXP_SE |
3616 SWAPPER_CTRL_TXD_R_FE |
3617 SWAPPER_CTRL_TXD_W_FE |
3618 SWAPPER_CTRL_TXF_R_FE |
3619 SWAPPER_CTRL_RXD_R_FE |
3620 SWAPPER_CTRL_RXD_W_FE |
3621 SWAPPER_CTRL_RXF_W_FE |
3622 SWAPPER_CTRL_XMSI_FE |
3623 SWAPPER_CTRL_STATS_FE | SWAPPER_CTRL_STATS_SE);
3624 if (sp->intr_type == INTA)
3625 val64 |= SWAPPER_CTRL_XMSI_SE;
3626 writeq(val64, &bar0->swapper_ctrl);
3629 * Initially we enable all bits to make it accessible by the
3630 * driver, then we selectively enable only those bits that
3633 val64 |= (SWAPPER_CTRL_TXP_FE |
3634 SWAPPER_CTRL_TXP_SE |
3635 SWAPPER_CTRL_TXD_R_FE |
3636 SWAPPER_CTRL_TXD_R_SE |
3637 SWAPPER_CTRL_TXD_W_FE |
3638 SWAPPER_CTRL_TXD_W_SE |
3639 SWAPPER_CTRL_TXF_R_FE |
3640 SWAPPER_CTRL_RXD_R_FE |
3641 SWAPPER_CTRL_RXD_R_SE |
3642 SWAPPER_CTRL_RXD_W_FE |
3643 SWAPPER_CTRL_RXD_W_SE |
3644 SWAPPER_CTRL_RXF_W_FE |
3645 SWAPPER_CTRL_XMSI_FE |
3646 SWAPPER_CTRL_STATS_FE | SWAPPER_CTRL_STATS_SE);
3647 if (sp->intr_type == INTA)
3648 val64 |= SWAPPER_CTRL_XMSI_SE;
3649 writeq(val64, &bar0->swapper_ctrl);
3651 val64 = readq(&bar0->swapper_ctrl);
3654 * Verifying if endian settings are accurate by reading a
3655 * feedback register.
3657 val64 = readq(&bar0->pif_rd_swapper_fb);
3658 if (val64 != 0x0123456789ABCDEFULL) {
3659 /* Endian settings are incorrect, calls for another dekko. */
3660 DBG_PRINT(ERR_DBG, "%s: Endian settings are wrong, ",
3662 DBG_PRINT(ERR_DBG, "feedback read %llx\n",
3663 (unsigned long long) val64);
3670 static int wait_for_msix_trans(struct s2io_nic *nic, int i)
3672 struct XENA_dev_config __iomem *bar0 = nic->bar0;
3674 int ret = 0, cnt = 0;
3677 val64 = readq(&bar0->xmsi_access);
3678 if (!(val64 & BIT(15)))
3684 DBG_PRINT(ERR_DBG, "XMSI # %d Access failed\n", i);
3691 static void restore_xmsi_data(struct s2io_nic *nic)
3693 struct XENA_dev_config __iomem *bar0 = nic->bar0;
3697 for (i=0; i < MAX_REQUESTED_MSI_X; i++) {
3698 writeq(nic->msix_info[i].addr, &bar0->xmsi_address);
3699 writeq(nic->msix_info[i].data, &bar0->xmsi_data);
3700 val64 = (BIT(7) | BIT(15) | vBIT(i, 26, 6));
3701 writeq(val64, &bar0->xmsi_access);
3702 if (wait_for_msix_trans(nic, i)) {
3703 DBG_PRINT(ERR_DBG, "failed in %s\n", __FUNCTION__);
3709 static void store_xmsi_data(struct s2io_nic *nic)
3711 struct XENA_dev_config __iomem *bar0 = nic->bar0;
3712 u64 val64, addr, data;
3715 /* Store and display */
3716 for (i=0; i < MAX_REQUESTED_MSI_X; i++) {
3717 val64 = (BIT(15) | vBIT(i, 26, 6));
3718 writeq(val64, &bar0->xmsi_access);
3719 if (wait_for_msix_trans(nic, i)) {
3720 DBG_PRINT(ERR_DBG, "failed in %s\n", __FUNCTION__);
3723 addr = readq(&bar0->xmsi_address);
3724 data = readq(&bar0->xmsi_data);
3726 nic->msix_info[i].addr = addr;
3727 nic->msix_info[i].data = data;
3732 int s2io_enable_msi(struct s2io_nic *nic)
3734 struct XENA_dev_config __iomem *bar0 = nic->bar0;
3735 u16 msi_ctrl, msg_val;
3736 struct config_param *config = &nic->config;
3737 struct net_device *dev = nic->dev;
3738 u64 val64, tx_mat, rx_mat;
3741 val64 = readq(&bar0->pic_control);
3743 writeq(val64, &bar0->pic_control);
3745 err = pci_enable_msi(nic->pdev);
3747 DBG_PRINT(ERR_DBG, "%s: enabling MSI failed\n",
3753 * Enable MSI and use MSI-1 in stead of the standard MSI-0
3754 * for interrupt handling.
3756 pci_read_config_word(nic->pdev, 0x4c, &msg_val);
3758 pci_write_config_word(nic->pdev, 0x4c, msg_val);
3759 pci_read_config_word(nic->pdev, 0x4c, &msg_val);
3761 pci_read_config_word(nic->pdev, 0x42, &msi_ctrl);
3763 pci_write_config_word(nic->pdev, 0x42, msi_ctrl);
3765 /* program MSI-1 into all usable Tx_Mat and Rx_Mat fields */
3766 tx_mat = readq(&bar0->tx_mat0_n[0]);
3767 for (i=0; i<config->tx_fifo_num; i++) {
3768 tx_mat |= TX_MAT_SET(i, 1);
3770 writeq(tx_mat, &bar0->tx_mat0_n[0]);
3772 rx_mat = readq(&bar0->rx_mat);
3773 for (i=0; i<config->rx_ring_num; i++) {
3774 rx_mat |= RX_MAT_SET(i, 1);
3776 writeq(rx_mat, &bar0->rx_mat);
3778 dev->irq = nic->pdev->irq;
3782 static int s2io_enable_msi_x(struct s2io_nic *nic)
3784 struct XENA_dev_config __iomem *bar0 = nic->bar0;
3786 u16 msi_control; /* Temp variable */
3787 int ret, i, j, msix_indx = 1;
3789 nic->entries = kmalloc(MAX_REQUESTED_MSI_X * sizeof(struct msix_entry),
3791 if (nic->entries == NULL) {
3792 DBG_PRINT(INFO_DBG, "%s: Memory allocation failed\n", \
3794 nic->mac_control.stats_info->sw_stat.mem_alloc_fail_cnt++;
3797 nic->mac_control.stats_info->sw_stat.mem_allocated
3798 += (MAX_REQUESTED_MSI_X * sizeof(struct msix_entry));
3799 memset(nic->entries, 0,MAX_REQUESTED_MSI_X * sizeof(struct msix_entry));
3802 kmalloc(MAX_REQUESTED_MSI_X * sizeof(struct s2io_msix_entry),
3804 if (nic->s2io_entries == NULL) {
3805 DBG_PRINT(INFO_DBG, "%s: Memory allocation failed\n",
3807 nic->mac_control.stats_info->sw_stat.mem_alloc_fail_cnt++;
3808 kfree(nic->entries);
3809 nic->mac_control.stats_info->sw_stat.mem_freed
3810 += (MAX_REQUESTED_MSI_X * sizeof(struct msix_entry));
3813 nic->mac_control.stats_info->sw_stat.mem_allocated
3814 += (MAX_REQUESTED_MSI_X * sizeof(struct s2io_msix_entry));
3815 memset(nic->s2io_entries, 0,
3816 MAX_REQUESTED_MSI_X * sizeof(struct s2io_msix_entry));
3818 for (i=0; i< MAX_REQUESTED_MSI_X; i++) {
3819 nic->entries[i].entry = i;
3820 nic->s2io_entries[i].entry = i;
3821 nic->s2io_entries[i].arg = NULL;
3822 nic->s2io_entries[i].in_use = 0;
3825 tx_mat = readq(&bar0->tx_mat0_n[0]);
3826 for (i=0; i<nic->config.tx_fifo_num; i++, msix_indx++) {
3827 tx_mat |= TX_MAT_SET(i, msix_indx);
3828 nic->s2io_entries[msix_indx].arg = &nic->mac_control.fifos[i];
3829 nic->s2io_entries[msix_indx].type = MSIX_FIFO_TYPE;
3830 nic->s2io_entries[msix_indx].in_use = MSIX_FLG;
3832 writeq(tx_mat, &bar0->tx_mat0_n[0]);
3834 if (!nic->config.bimodal) {
3835 rx_mat = readq(&bar0->rx_mat);
3836 for (j=0; j<nic->config.rx_ring_num; j++, msix_indx++) {
3837 rx_mat |= RX_MAT_SET(j, msix_indx);
3838 nic->s2io_entries[msix_indx].arg
3839 = &nic->mac_control.rings[j];
3840 nic->s2io_entries[msix_indx].type = MSIX_RING_TYPE;
3841 nic->s2io_entries[msix_indx].in_use = MSIX_FLG;
3843 writeq(rx_mat, &bar0->rx_mat);
3845 tx_mat = readq(&bar0->tx_mat0_n[7]);
3846 for (j=0; j<nic->config.rx_ring_num; j++, msix_indx++) {
3847 tx_mat |= TX_MAT_SET(i, msix_indx);
3848 nic->s2io_entries[msix_indx].arg
3849 = &nic->mac_control.rings[j];
3850 nic->s2io_entries[msix_indx].type = MSIX_RING_TYPE;
3851 nic->s2io_entries[msix_indx].in_use = MSIX_FLG;
3853 writeq(tx_mat, &bar0->tx_mat0_n[7]);
3856 nic->avail_msix_vectors = 0;
3857 ret = pci_enable_msix(nic->pdev, nic->entries, MAX_REQUESTED_MSI_X);
3858 /* We fail init if error or we get less vectors than min required */
3859 if (ret >= (nic->config.tx_fifo_num + nic->config.rx_ring_num + 1)) {
3860 nic->avail_msix_vectors = ret;
3861 ret = pci_enable_msix(nic->pdev, nic->entries, ret);
3864 DBG_PRINT(ERR_DBG, "%s: Enabling MSIX failed\n", nic->dev->name);
3865 kfree(nic->entries);
3866 nic->mac_control.stats_info->sw_stat.mem_freed
3867 += (MAX_REQUESTED_MSI_X * sizeof(struct msix_entry));
3868 kfree(nic->s2io_entries);
3869 nic->mac_control.stats_info->sw_stat.mem_freed
3870 += (MAX_REQUESTED_MSI_X * sizeof(struct s2io_msix_entry));
3871 nic->entries = NULL;
3872 nic->s2io_entries = NULL;
3873 nic->avail_msix_vectors = 0;
3876 if (!nic->avail_msix_vectors)
3877 nic->avail_msix_vectors = MAX_REQUESTED_MSI_X;
3880 * To enable MSI-X, MSI also needs to be enabled, due to a bug
3881 * in the herc NIC. (Temp change, needs to be removed later)
3883 pci_read_config_word(nic->pdev, 0x42, &msi_control);
3884 msi_control |= 0x1; /* Enable MSI */
3885 pci_write_config_word(nic->pdev, 0x42, msi_control);
3890 /* ********************************************************* *
3891 * Functions defined below concern the OS part of the driver *
3892 * ********************************************************* */
3895 * s2io_open - open entry point of the driver
3896 * @dev : pointer to the device structure.
3898 * This function is the open entry point of the driver. It mainly calls a
3899 * function to allocate Rx buffers and inserts them into the buffer
3900 * descriptors and then enables the Rx part of the NIC.
3902 * 0 on success and an appropriate (-)ve integer as defined in errno.h
3906 static int s2io_open(struct net_device *dev)
3908 struct s2io_nic *sp = dev->priv;
3912 * Make sure you have link off by default every time
3913 * Nic is initialized
3915 netif_carrier_off(dev);
3916 sp->last_link_state = 0;
3918 /* Initialize H/W and enable interrupts */
3919 err = s2io_card_up(sp);
3921 DBG_PRINT(ERR_DBG, "%s: H/W initialization failed\n",
3923 goto hw_init_failed;
3926 if (s2io_set_mac_addr(dev, dev->dev_addr) == FAILURE) {
3927 DBG_PRINT(ERR_DBG, "Set Mac Address Failed\n");
3930 goto hw_init_failed;
3933 netif_start_queue(dev);
3937 if (sp->intr_type == MSI_X) {
3940 sp->mac_control.stats_info->sw_stat.mem_freed
3941 += (MAX_REQUESTED_MSI_X * sizeof(struct msix_entry));
3943 if (sp->s2io_entries) {
3944 kfree(sp->s2io_entries);
3945 sp->mac_control.stats_info->sw_stat.mem_freed
3946 += (MAX_REQUESTED_MSI_X * sizeof(struct s2io_msix_entry));
3953 * s2io_close -close entry point of the driver
3954 * @dev : device pointer.
3956 * This is the stop entry point of the driver. It needs to undo exactly
3957 * whatever was done by the open entry point,thus it's usually referred to
3958 * as the close function.Among other things this function mainly stops the
3959 * Rx side of the NIC and frees all the Rx buffers in the Rx rings.
3961 * 0 on success and an appropriate (-)ve integer as defined in errno.h
3965 static int s2io_close(struct net_device *dev)
3967 struct s2io_nic *sp = dev->priv;
3969 netif_stop_queue(dev);
3970 /* Reset card, kill tasklet and free Tx and Rx buffers. */
3977 * s2io_xmit - Tx entry point of te driver
3978 * @skb : the socket buffer containing the Tx data.
3979 * @dev : device pointer.
3981 * This function is the Tx entry point of the driver. S2IO NIC supports
3982 * certain protocol assist features on Tx side, namely CSO, S/G, LSO.
3983 * NOTE: when device cant queue the pkt,just the trans_start variable will
3986 * 0 on success & 1 on failure.
3989 static int s2io_xmit(struct sk_buff *skb, struct net_device *dev)
3991 struct s2io_nic *sp = dev->priv;
3992 u16 frg_cnt, frg_len, i, queue, queue_len, put_off, get_off;
3995 struct TxFIFO_element __iomem *tx_fifo;
3996 unsigned long flags;
3998 int vlan_priority = 0;
3999 struct mac_info *mac_control;
4000 struct config_param *config;
4003 mac_control = &sp->mac_control;
4004 config = &sp->config;
4006 DBG_PRINT(TX_DBG, "%s: In Neterion Tx routine\n", dev->name);
4008 if (unlikely(skb->len <= 0)) {
4009 DBG_PRINT(TX_DBG, "%s:Buffer has no data..\n", dev->name);
4010 dev_kfree_skb_any(skb);
4014 spin_lock_irqsave(&sp->tx_lock, flags);
4015 if (atomic_read(&sp->card_state) == CARD_DOWN) {
4016 DBG_PRINT(TX_DBG, "%s: Card going down for reset\n",
4018 spin_unlock_irqrestore(&sp->tx_lock, flags);
4024 /* Get Fifo number to Transmit based on vlan priority */
4025 if (sp->vlgrp && vlan_tx_tag_present(skb)) {
4026 vlan_tag = vlan_tx_tag_get(skb);
4027 vlan_priority = vlan_tag >> 13;
4028 queue = config->fifo_mapping[vlan_priority];
4031 put_off = (u16) mac_control->fifos[queue].tx_curr_put_info.offset;
4032 get_off = (u16) mac_control->fifos[queue].tx_curr_get_info.offset;
4033 txdp = (struct TxD *) mac_control->fifos[queue].list_info[put_off].
4036 queue_len = mac_control->fifos[queue].tx_curr_put_info.fifo_len + 1;
4037 /* Avoid "put" pointer going beyond "get" pointer */
4038 if (txdp->Host_Control ||
4039 ((put_off+1) == queue_len ? 0 : (put_off+1)) == get_off) {
4040 DBG_PRINT(TX_DBG, "Error in xmit, No free TXDs.\n");
4041 netif_stop_queue(dev);
4043 spin_unlock_irqrestore(&sp->tx_lock, flags);
4047 offload_type = s2io_offload_type(skb);
4048 if (offload_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) {
4049 txdp->Control_1 |= TXD_TCP_LSO_EN;
4050 txdp->Control_1 |= TXD_TCP_LSO_MSS(s2io_tcp_mss(skb));
4052 if (skb->ip_summed == CHECKSUM_PARTIAL) {
4054 (TXD_TX_CKO_IPV4_EN | TXD_TX_CKO_TCP_EN |
4057 txdp->Control_1 |= TXD_GATHER_CODE_FIRST;
4058 txdp->Control_1 |= TXD_LIST_OWN_XENA;
4059 txdp->Control_2 |= config->tx_intr_type;
4061 if (sp->vlgrp && vlan_tx_tag_present(skb)) {
4062 txdp->Control_2 |= TXD_VLAN_ENABLE;
4063 txdp->Control_2 |= TXD_VLAN_TAG(vlan_tag);
4066 frg_len = skb->len - skb->data_len;
4067 if (offload_type == SKB_GSO_UDP) {
4070 ufo_size = s2io_udp_mss(skb);
4072 txdp->Control_1 |= TXD_UFO_EN;
4073 txdp->Control_1 |= TXD_UFO_MSS(ufo_size);
4074 txdp->Control_1 |= TXD_BUFFER0_SIZE(8);
4076 sp->ufo_in_band_v[put_off] =
4077 (u64)skb_shinfo(skb)->ip6_frag_id;
4079 sp->ufo_in_band_v[put_off] =
4080 (u64)skb_shinfo(skb)->ip6_frag_id << 32;
4082 txdp->Host_Control = (unsigned long)sp->ufo_in_band_v;
4083 txdp->Buffer_Pointer = pci_map_single(sp->pdev,
4085 sizeof(u64), PCI_DMA_TODEVICE);
4089 txdp->Buffer_Pointer = pci_map_single
4090 (sp->pdev, skb->data, frg_len, PCI_DMA_TODEVICE);
4091 txdp->Host_Control = (unsigned long) skb;
4092 txdp->Control_1 |= TXD_BUFFER0_SIZE(frg_len);
4093 if (offload_type == SKB_GSO_UDP)
4094 txdp->Control_1 |= TXD_UFO_EN;
4096 frg_cnt = skb_shinfo(skb)->nr_frags;
4097 /* For fragmented SKB. */
4098 for (i = 0; i < frg_cnt; i++) {
4099 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4100 /* A '0' length fragment will be ignored */
4104 txdp->Buffer_Pointer = (u64) pci_map_page
4105 (sp->pdev, frag->page, frag->page_offset,
4106 frag->size, PCI_DMA_TODEVICE);
4107 txdp->Control_1 = TXD_BUFFER0_SIZE(frag->size);
4108 if (offload_type == SKB_GSO_UDP)
4109 txdp->Control_1 |= TXD_UFO_EN;
4111 txdp->Control_1 |= TXD_GATHER_CODE_LAST;
4113 if (offload_type == SKB_GSO_UDP)
4114 frg_cnt++; /* as Txd0 was used for inband header */
4116 tx_fifo = mac_control->tx_FIFO_start[queue];
4117 val64 = mac_control->fifos[queue].list_info[put_off].list_phy_addr;
4118 writeq(val64, &tx_fifo->TxDL_Pointer);
4120 val64 = (TX_FIFO_LAST_TXD_NUM(frg_cnt) | TX_FIFO_FIRST_LIST |
4123 val64 |= TX_FIFO_SPECIAL_FUNC;
4125 writeq(val64, &tx_fifo->List_Control);
4130 if (put_off == mac_control->fifos[queue].tx_curr_put_info.fifo_len + 1)
4132 mac_control->fifos[queue].tx_curr_put_info.offset = put_off;
4134 /* Avoid "put" pointer going beyond "get" pointer */
4135 if (((put_off+1) == queue_len ? 0 : (put_off+1)) == get_off) {
4136 sp->mac_control.stats_info->sw_stat.fifo_full_cnt++;
4138 "No free TxDs for xmit, Put: 0x%x Get:0x%x\n",
4140 netif_stop_queue(dev);
4142 mac_control->stats_info->sw_stat.mem_allocated += skb->truesize;
4143 dev->trans_start = jiffies;
4144 spin_unlock_irqrestore(&sp->tx_lock, flags);
4150 s2io_alarm_handle(unsigned long data)
4152 struct s2io_nic *sp = (struct s2io_nic *)data;
4154 alarm_intr_handler(sp);
4155 mod_timer(&sp->alarm_timer, jiffies + HZ / 2);
4158 static int s2io_chk_rx_buffers(struct s2io_nic *sp, int rng_n)
4160 int rxb_size, level;
4163 rxb_size = atomic_read(&sp->rx_bufs_left[rng_n]);
4164 level = rx_buffer_level(sp, rxb_size, rng_n);
4166 if ((level == PANIC) && (!TASKLET_IN_USE)) {
4168 DBG_PRINT(INTR_DBG, "%s: Rx BD hit ", __FUNCTION__);
4169 DBG_PRINT(INTR_DBG, "PANIC levels\n");
4170 if ((ret = fill_rx_buffers(sp, rng_n)) == -ENOMEM) {
4171 DBG_PRINT(INFO_DBG, "Out of memory in %s",
4173 clear_bit(0, (&sp->tasklet_status));
4176 clear_bit(0, (&sp->tasklet_status));
4177 } else if (level == LOW)
4178 tasklet_schedule(&sp->task);
4180 } else if (fill_rx_buffers(sp, rng_n) == -ENOMEM) {
4181 DBG_PRINT(INFO_DBG, "%s:Out of memory", sp->dev->name);
4182 DBG_PRINT(INFO_DBG, " in Rx Intr!!\n");
4187 static irqreturn_t s2io_msi_handle(int irq, void *dev_id)
4189 struct net_device *dev = (struct net_device *) dev_id;
4190 struct s2io_nic *sp = dev->priv;
4192 struct mac_info *mac_control;
4193 struct config_param *config;
4195 atomic_inc(&sp->isr_cnt);
4196 mac_control = &sp->mac_control;
4197 config = &sp->config;
4198 DBG_PRINT(INTR_DBG, "%s: MSI handler\n", __FUNCTION__);
4200 /* If Intr is because of Rx Traffic */
4201 for (i = 0; i < config->rx_ring_num; i++)
4202 rx_intr_handler(&mac_control->rings[i]);
4204 /* If Intr is because of Tx Traffic */
4205 for (i = 0; i < config->tx_fifo_num; i++)
4206 tx_intr_handler(&mac_control->fifos[i]);
4209 * If the Rx buffer count is below the panic threshold then
4210 * reallocate the buffers from the interrupt handler itself,
4211 * else schedule a tasklet to reallocate the buffers.
4213 for (i = 0; i < config->rx_ring_num; i++)
4214 s2io_chk_rx_buffers(sp, i);
4216 atomic_dec(&sp->isr_cnt);
4220 static irqreturn_t s2io_msix_ring_handle(int irq, void *dev_id)
4222 struct ring_info *ring = (struct ring_info *)dev_id;
4223 struct s2io_nic *sp = ring->nic;
4225 atomic_inc(&sp->isr_cnt);
4227 rx_intr_handler(ring);
4228 s2io_chk_rx_buffers(sp, ring->ring_no);
4230 atomic_dec(&sp->isr_cnt);
4234 static irqreturn_t s2io_msix_fifo_handle(int irq, void *dev_id)
4236 struct fifo_info *fifo = (struct fifo_info *)dev_id;
4237 struct s2io_nic *sp = fifo->nic;
4239 atomic_inc(&sp->isr_cnt);
4240 tx_intr_handler(fifo);
4241 atomic_dec(&sp->isr_cnt);
4244 static void s2io_txpic_intr_handle(struct s2io_nic *sp)
4246 struct XENA_dev_config __iomem *bar0 = sp->bar0;
4249 val64 = readq(&bar0->pic_int_status);
4250 if (val64 & PIC_INT_GPIO) {
4251 val64 = readq(&bar0->gpio_int_reg);
4252 if ((val64 & GPIO_INT_REG_LINK_DOWN) &&
4253 (val64 & GPIO_INT_REG_LINK_UP)) {
4255 * This is unstable state so clear both up/down
4256 * interrupt and adapter to re-evaluate the link state.
4258 val64 |= GPIO_INT_REG_LINK_DOWN;
4259 val64 |= GPIO_INT_REG_LINK_UP;
4260 writeq(val64, &bar0->gpio_int_reg);
4261 val64 = readq(&bar0->gpio_int_mask);
4262 val64 &= ~(GPIO_INT_MASK_LINK_UP |
4263 GPIO_INT_MASK_LINK_DOWN);
4264 writeq(val64, &bar0->gpio_int_mask);
4266 else if (val64 & GPIO_INT_REG_LINK_UP) {
4267 val64 = readq(&bar0->adapter_status);
4268 /* Enable Adapter */
4269 val64 = readq(&bar0->adapter_control);
4270 val64 |= ADAPTER_CNTL_EN;
4271 writeq(val64, &bar0->adapter_control);
4272 val64 |= ADAPTER_LED_ON;
4273 writeq(val64, &bar0->adapter_control);
4274 if (!sp->device_enabled_once)
4275 sp->device_enabled_once = 1;
4277 s2io_link(sp, LINK_UP);
4279 * unmask link down interrupt and mask link-up
4282 val64 = readq(&bar0->gpio_int_mask);
4283 val64 &= ~GPIO_INT_MASK_LINK_DOWN;
4284 val64 |= GPIO_INT_MASK_LINK_UP;
4285 writeq(val64, &bar0->gpio_int_mask);
4287 }else if (val64 & GPIO_INT_REG_LINK_DOWN) {
4288 val64 = readq(&bar0->adapter_status);
4289 s2io_link(sp, LINK_DOWN);
4290 /* Link is down so unmaks link up interrupt */
4291 val64 = readq(&bar0->gpio_int_mask);
4292 val64 &= ~GPIO_INT_MASK_LINK_UP;
4293 val64 |= GPIO_INT_MASK_LINK_DOWN;
4294 writeq(val64, &bar0->gpio_int_mask);
4297 val64 = readq(&bar0->adapter_control);
4298 val64 = val64 &(~ADAPTER_LED_ON);
4299 writeq(val64, &bar0->adapter_control);
4302 val64 = readq(&bar0->gpio_int_mask);
4306 * s2io_isr - ISR handler of the device .
4307 * @irq: the irq of the device.
4308 * @dev_id: a void pointer to the dev structure of the NIC.
4309 * Description: This function is the ISR handler of the device. It
4310 * identifies the reason for the interrupt and calls the relevant
4311 * service routines. As a contongency measure, this ISR allocates the
4312 * recv buffers, if their numbers are below the panic value which is
4313 * presently set to 25% of the original number of rcv buffers allocated.
4315 * IRQ_HANDLED: will be returned if IRQ was handled by this routine
4316 * IRQ_NONE: will be returned if interrupt is not from our device
4318 static irqreturn_t s2io_isr(int irq, void *dev_id)
4320 struct net_device *dev = (struct net_device *) dev_id;
4321 struct s2io_nic *sp = dev->priv;
4322 struct XENA_dev_config __iomem *bar0 = sp->bar0;
4325 struct mac_info *mac_control;
4326 struct config_param *config;
4328 /* Pretend we handled any irq's from a disconnected card */
4329 if (pci_channel_offline(sp->pdev))
4332 atomic_inc(&sp->isr_cnt);
4333 mac_control = &sp->mac_control;
4334 config = &sp->config;
4337 * Identify the cause for interrupt and call the appropriate
4338 * interrupt handler. Causes for the interrupt could be;
4342 * 4. Error in any functional blocks of the NIC.
4344 reason = readq(&bar0->general_int_status);
4347 /* The interrupt was not raised by us. */
4348 atomic_dec(&sp->isr_cnt);
4351 else if (unlikely(reason == S2IO_MINUS_ONE) ) {
4352 /* Disable device and get out */
4353 atomic_dec(&sp->isr_cnt);
4358 if (reason & GEN_INTR_RXTRAFFIC) {
4359 if ( likely ( netif_rx_schedule_prep(dev)) ) {
4360 __netif_rx_schedule(dev);
4361 writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_mask);
4364 writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_int);
4368 * Rx handler is called by default, without checking for the
4369 * cause of interrupt.
4370 * rx_traffic_int reg is an R1 register, writing all 1's
4371 * will ensure that the actual interrupt causing bit get's
4372 * cleared and hence a read can be avoided.
4374 if (reason & GEN_INTR_RXTRAFFIC)
4375 writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_int);
4377 for (i = 0; i < config->rx_ring_num; i++) {
4378 rx_intr_handler(&mac_control->rings[i]);
4383 * tx_traffic_int reg is an R1 register, writing all 1's
4384 * will ensure that the actual interrupt causing bit get's
4385 * cleared and hence a read can be avoided.
4387 if (reason & GEN_INTR_TXTRAFFIC)
4388 writeq(S2IO_MINUS_ONE, &bar0->tx_traffic_int);
4390 for (i = 0; i < config->tx_fifo_num; i++)
4391 tx_intr_handler(&mac_control->fifos[i]);
4393 if (reason & GEN_INTR_TXPIC)
4394 s2io_txpic_intr_handle(sp);
4396 * If the Rx buffer count is below the panic threshold then
4397 * reallocate the buffers from the interrupt handler itself,
4398 * else schedule a tasklet to reallocate the buffers.
4401 for (i = 0; i < config->rx_ring_num; i++)
4402 s2io_chk_rx_buffers(sp, i);
4405 writeq(0, &bar0->general_int_mask);
4406 readl(&bar0->general_int_status);
4408 atomic_dec(&sp->isr_cnt);
4415 static void s2io_updt_stats(struct s2io_nic *sp)
4417 struct XENA_dev_config __iomem *bar0 = sp->bar0;
4421 if (atomic_read(&sp->card_state) == CARD_UP) {
4422 /* Apprx 30us on a 133 MHz bus */
4423 val64 = SET_UPDT_CLICKS(10) |
4424 STAT_CFG_ONE_SHOT_EN | STAT_CFG_STAT_EN;
4425 writeq(val64, &bar0->stat_cfg);
4428 val64 = readq(&bar0->stat_cfg);
4429 if (!(val64 & BIT(0)))
4433 break; /* Updt failed */
4439 * s2io_get_stats - Updates the device statistics structure.
4440 * @dev : pointer to the device structure.
4442 * This function updates the device statistics structure in the s2io_nic
4443 * structure and returns a pointer to the same.
4445 * pointer to the updated net_device_stats structure.
4448 static struct net_device_stats *s2io_get_stats(struct net_device *dev)
4450 struct s2io_nic *sp = dev->priv;
4451 struct mac_info *mac_control;
4452 struct config_param *config;
4455 mac_control = &sp->mac_control;
4456 config = &sp->config;
4458 /* Configure Stats for immediate updt */
4459 s2io_updt_stats(sp);
4461 sp->stats.tx_packets =
4462 le32_to_cpu(mac_control->stats_info->tmac_frms);
4463 sp->stats.tx_errors =
4464 le32_to_cpu(mac_control->stats_info->tmac_any_err_frms);
4465 sp->stats.rx_errors =
4466 le64_to_cpu(mac_control->stats_info->rmac_drop_frms);
4467 sp->stats.multicast =
4468 le32_to_cpu(mac_control->stats_info->rmac_vld_mcst_frms);
4469 sp->stats.rx_length_errors =
4470 le64_to_cpu(mac_control->stats_info->rmac_long_frms);
4472 return (&sp->stats);
4476 * s2io_set_multicast - entry point for multicast address enable/disable.
4477 * @dev : pointer to the device structure
4479 * This function is a driver entry point which gets called by the kernel
4480 * whenever multicast addresses must be enabled/disabled. This also gets
4481 * called to set/reset promiscuous mode. Depending on the deivce flag, we
4482 * determine, if multicast address must be enabled or if promiscuous mode
4483 * is to be disabled etc.
4488 static void s2io_set_multicast(struct net_device *dev)
4491 struct dev_mc_list *mclist;
4492 struct s2io_nic *sp = dev->priv;
4493 struct XENA_dev_config __iomem *bar0 = sp->bar0;
4494 u64 val64 = 0, multi_mac = 0x010203040506ULL, mask =
4496 u64 dis_addr = 0xffffffffffffULL, mac_addr = 0;
4499 if ((dev->flags & IFF_ALLMULTI) && (!sp->m_cast_flg)) {
4500 /* Enable all Multicast addresses */
4501 writeq(RMAC_ADDR_DATA0_MEM_ADDR(multi_mac),
4502 &bar0->rmac_addr_data0_mem);
4503 writeq(RMAC_ADDR_DATA1_MEM_MASK(mask),
4504 &bar0->rmac_addr_data1_mem);
4505 val64 = RMAC_ADDR_CMD_MEM_WE |
4506 RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
4507 RMAC_ADDR_CMD_MEM_OFFSET(MAC_MC_ALL_MC_ADDR_OFFSET);
4508 writeq(val64, &bar0->rmac_addr_cmd_mem);
4509 /* Wait till command completes */
4510 wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
4511 RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
4515 sp->all_multi_pos = MAC_MC_ALL_MC_ADDR_OFFSET;
4516 } else if ((dev->flags & IFF_ALLMULTI) && (sp->m_cast_flg)) {
4517 /* Disable all Multicast addresses */
4518 writeq(RMAC_ADDR_DATA0_MEM_ADDR(dis_addr),
4519 &bar0->rmac_addr_data0_mem);
4520 writeq(RMAC_ADDR_DATA1_MEM_MASK(0x0),
4521 &bar0->rmac_addr_data1_mem);
4522 val64 = RMAC_ADDR_CMD_MEM_WE |
4523 RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
4524 RMAC_ADDR_CMD_MEM_OFFSET(sp->all_multi_pos);
4525 writeq(val64, &bar0->rmac_addr_cmd_mem);
4526 /* Wait till command completes */
4527 wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
4528 RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
4532 sp->all_multi_pos = 0;
4535 if ((dev->flags & IFF_PROMISC) && (!sp->promisc_flg)) {
4536 /* Put the NIC into promiscuous mode */
4537 add = &bar0->mac_cfg;
4538 val64 = readq(&bar0->mac_cfg);
4539 val64 |= MAC_CFG_RMAC_PROM_ENABLE;
4541 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
4542 writel((u32) val64, add);
4543 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
4544 writel((u32) (val64 >> 32), (add + 4));
4546 if (vlan_tag_strip != 1) {
4547 val64 = readq(&bar0->rx_pa_cfg);
4548 val64 &= ~RX_PA_CFG_STRIP_VLAN_TAG;
4549 writeq(val64, &bar0->rx_pa_cfg);
4550 vlan_strip_flag = 0;
4553 val64 = readq(&bar0->mac_cfg);
4554 sp->promisc_flg = 1;
4555 DBG_PRINT(INFO_DBG, "%s: entered promiscuous mode\n",
4557 } else if (!(dev->flags & IFF_PROMISC) && (sp->promisc_flg)) {
4558 /* Remove the NIC from promiscuous mode */
4559 add = &bar0->mac_cfg;
4560 val64 = readq(&bar0->mac_cfg);
4561 val64 &= ~MAC_CFG_RMAC_PROM_ENABLE;
4563 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
4564 writel((u32) val64, add);
4565 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
4566 writel((u32) (val64 >> 32), (add + 4));
4568 if (vlan_tag_strip != 0) {
4569 val64 = readq(&bar0->rx_pa_cfg);
4570 val64 |= RX_PA_CFG_STRIP_VLAN_TAG;
4571 writeq(val64, &bar0->rx_pa_cfg);
4572 vlan_strip_flag = 1;
4575 val64 = readq(&bar0->mac_cfg);
4576 sp->promisc_flg = 0;
4577 DBG_PRINT(INFO_DBG, "%s: left promiscuous mode\n",
4581 /* Update individual M_CAST address list */
4582 if ((!sp->m_cast_flg) && dev->mc_count) {
4584 (MAX_ADDRS_SUPPORTED - MAC_MC_ADDR_START_OFFSET - 1)) {
4585 DBG_PRINT(ERR_DBG, "%s: No more Rx filters ",
4587 DBG_PRINT(ERR_DBG, "can be added, please enable ");
4588 DBG_PRINT(ERR_DBG, "ALL_MULTI instead\n");
4592 prev_cnt = sp->mc_addr_count;
4593 sp->mc_addr_count = dev->mc_count;
4595 /* Clear out the previous list of Mc in the H/W. */
4596 for (i = 0; i < prev_cnt; i++) {
4597 writeq(RMAC_ADDR_DATA0_MEM_ADDR(dis_addr),
4598 &bar0->rmac_addr_data0_mem);
4599 writeq(RMAC_ADDR_DATA1_MEM_MASK(0ULL),
4600 &bar0->rmac_addr_data1_mem);
4601 val64 = RMAC_ADDR_CMD_MEM_WE |
4602 RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
4603 RMAC_ADDR_CMD_MEM_OFFSET
4604 (MAC_MC_ADDR_START_OFFSET + i);
4605 writeq(val64, &bar0->rmac_addr_cmd_mem);
4607 /* Wait for command completes */
4608 if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
4609 RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
4611 DBG_PRINT(ERR_DBG, "%s: Adding ",
4613 DBG_PRINT(ERR_DBG, "Multicasts failed\n");
4618 /* Create the new Rx filter list and update the same in H/W. */
4619 for (i = 0, mclist = dev->mc_list; i < dev->mc_count;
4620 i++, mclist = mclist->next) {
4621 memcpy(sp->usr_addrs[i].addr, mclist->dmi_addr,
4624 for (j = 0; j < ETH_ALEN; j++) {
4625 mac_addr |= mclist->dmi_addr[j];
4629 writeq(RMAC_ADDR_DATA0_MEM_ADDR(mac_addr),
4630 &bar0->rmac_addr_data0_mem);
4631 writeq(RMAC_ADDR_DATA1_MEM_MASK(0ULL),
4632 &bar0->rmac_addr_data1_mem);
4633 val64 = RMAC_ADDR_CMD_MEM_WE |
4634 RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
4635 RMAC_ADDR_CMD_MEM_OFFSET
4636 (i + MAC_MC_ADDR_START_OFFSET);
4637 writeq(val64, &bar0->rmac_addr_cmd_mem);
4639 /* Wait for command completes */
4640 if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
4641 RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
4643 DBG_PRINT(ERR_DBG, "%s: Adding ",
4645 DBG_PRINT(ERR_DBG, "Multicasts failed\n");
4653 * s2io_set_mac_addr - Programs the Xframe mac address
4654 * @dev : pointer to the device structure.
4655 * @addr: a uchar pointer to the new mac address which is to be set.
4656 * Description : This procedure will program the Xframe to receive
4657 * frames with new Mac Address
4658 * Return value: SUCCESS on success and an appropriate (-)ve integer
4659 * as defined in errno.h file on failure.
4662 static int s2io_set_mac_addr(struct net_device *dev, u8 * addr)
4664 struct s2io_nic *sp = dev->priv;
4665 struct XENA_dev_config __iomem *bar0 = sp->bar0;
4666 register u64 val64, mac_addr = 0;
4668 u64 old_mac_addr = 0;
4671 * Set the new MAC address as the new unicast filter and reflect this
4672 * change on the device address registered with the OS. It will be
4675 for (i = 0; i < ETH_ALEN; i++) {
4677 mac_addr |= addr[i];
4679 old_mac_addr |= sp->def_mac_addr[0].mac_addr[i];
4685 /* Update the internal structure with this new mac address */
4686 if(mac_addr != old_mac_addr) {
4687 memset(sp->def_mac_addr[0].mac_addr, 0, sizeof(ETH_ALEN));
4688 sp->def_mac_addr[0].mac_addr[5] = (u8) (mac_addr);
4689 sp->def_mac_addr[0].mac_addr[4] = (u8) (mac_addr >> 8);
4690 sp->def_mac_addr[0].mac_addr[3] = (u8) (mac_addr >> 16);
4691 sp->def_mac_addr[0].mac_addr[2] = (u8) (mac_addr >> 24);
4692 sp->def_mac_addr[0].mac_addr[1] = (u8) (mac_addr >> 32);
4693 sp->def_mac_addr[0].mac_addr[0] = (u8) (mac_addr >> 40);
4696 writeq(RMAC_ADDR_DATA0_MEM_ADDR(mac_addr),
4697 &bar0->rmac_addr_data0_mem);
4700 RMAC_ADDR_CMD_MEM_WE | RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
4701 RMAC_ADDR_CMD_MEM_OFFSET(0);
4702 writeq(val64, &bar0->rmac_addr_cmd_mem);
4703 /* Wait till command completes */
4704 if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
4705 RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING, S2IO_BIT_RESET)) {
4706 DBG_PRINT(ERR_DBG, "%s: set_mac_addr failed\n", dev->name);
4714 * s2io_ethtool_sset - Sets different link parameters.
4715 * @sp : private member of the device structure, which is a pointer to the * s2io_nic structure.
4716 * @info: pointer to the structure with parameters given by ethtool to set
4719 * The function sets different link parameters provided by the user onto
4725 static int s2io_ethtool_sset(struct net_device *dev,
4726 struct ethtool_cmd *info)
4728 struct s2io_nic *sp = dev->priv;
4729 if ((info->autoneg == AUTONEG_ENABLE) ||
4730 (info->speed != SPEED_10000) || (info->duplex != DUPLEX_FULL))
4733 s2io_close(sp->dev);
4741 * s2io_ethtol_gset - Return link specific information.
4742 * @sp : private member of the device structure, pointer to the
4743 * s2io_nic structure.
4744 * @info : pointer to the structure with parameters given by ethtool
4745 * to return link information.
4747 * Returns link specific information like speed, duplex etc.. to ethtool.
4749 * return 0 on success.
4752 static int s2io_ethtool_gset(struct net_device *dev, struct ethtool_cmd *info)
4754 struct s2io_nic *sp = dev->priv;
4755 info->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE);
4756 info->advertising = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE);
4757 info->port = PORT_FIBRE;
4758 /* info->transceiver?? TODO */
4760 if (netif_carrier_ok(sp->dev)) {
4761 info->speed = 10000;
4762 info->duplex = DUPLEX_FULL;
4768 info->autoneg = AUTONEG_DISABLE;
4773 * s2io_ethtool_gdrvinfo - Returns driver specific information.
4774 * @sp : private member of the device structure, which is a pointer to the
4775 * s2io_nic structure.
4776 * @info : pointer to the structure with parameters given by ethtool to
4777 * return driver information.
4779 * Returns driver specefic information like name, version etc.. to ethtool.
4784 static void s2io_ethtool_gdrvinfo(struct net_device *dev,
4785 struct ethtool_drvinfo *info)
4787 struct s2io_nic *sp = dev->priv;
4789 strncpy(info->driver, s2io_driver_name, sizeof(info->driver));
4790 strncpy(info->version, s2io_driver_version, sizeof(info->version));
4791 strncpy(info->fw_version, "", sizeof(info->fw_version));
4792 strncpy(info->bus_info, pci_name(sp->pdev), sizeof(info->bus_info));
4793 info->regdump_len = XENA_REG_SPACE;
4794 info->eedump_len = XENA_EEPROM_SPACE;
4795 info->testinfo_len = S2IO_TEST_LEN;
4797 if (sp->device_type == XFRAME_I_DEVICE)
4798 info->n_stats = XFRAME_I_STAT_LEN;
4800 info->n_stats = XFRAME_II_STAT_LEN;
4804 * s2io_ethtool_gregs - dumps the entire space of Xfame into the buffer.
4805 * @sp: private member of the device structure, which is a pointer to the
4806 * s2io_nic structure.
4807 * @regs : pointer to the structure with parameters given by ethtool for
4808 * dumping the registers.
4809 * @reg_space: The input argumnet into which all the registers are dumped.
4811 * Dumps the entire register space of xFrame NIC into the user given
4817 static void s2io_ethtool_gregs(struct net_device *dev,
4818 struct ethtool_regs *regs, void *space)
4822 u8 *reg_space = (u8 *) space;
4823 struct s2io_nic *sp = dev->priv;
4825 regs->len = XENA_REG_SPACE;
4826 regs->version = sp->pdev->subsystem_device;
4828 for (i = 0; i < regs->len; i += 8) {
4829 reg = readq(sp->bar0 + i);
4830 memcpy((reg_space + i), ®, 8);
4835 * s2io_phy_id - timer function that alternates adapter LED.
4836 * @data : address of the private member of the device structure, which
4837 * is a pointer to the s2io_nic structure, provided as an u32.
4838 * Description: This is actually the timer function that alternates the
4839 * adapter LED bit of the adapter control bit to set/reset every time on
4840 * invocation. The timer is set for 1/2 a second, hence tha NIC blinks
4841 * once every second.
4843 static void s2io_phy_id(unsigned long data)
4845 struct s2io_nic *sp = (struct s2io_nic *) data;
4846 struct XENA_dev_config __iomem *bar0 = sp->bar0;
4850 subid = sp->pdev->subsystem_device;
4851 if ((sp->device_type == XFRAME_II_DEVICE) ||
4852 ((subid & 0xFF) >= 0x07)) {
4853 val64 = readq(&bar0->gpio_control);
4854 val64 ^= GPIO_CTRL_GPIO_0;
4855 writeq(val64, &bar0->gpio_control);
4857 val64 = readq(&bar0->adapter_control);
4858 val64 ^= ADAPTER_LED_ON;
4859 writeq(val64, &bar0->adapter_control);
4862 mod_timer(&sp->id_timer, jiffies + HZ / 2);
4866 * s2io_ethtool_idnic - To physically identify the nic on the system.
4867 * @sp : private member of the device structure, which is a pointer to the
4868 * s2io_nic structure.
4869 * @id : pointer to the structure with identification parameters given by
4871 * Description: Used to physically identify the NIC on the system.
4872 * The Link LED will blink for a time specified by the user for
4874 * NOTE: The Link has to be Up to be able to blink the LED. Hence
4875 * identification is possible only if it's link is up.
4877 * int , returns 0 on success
4880 static int s2io_ethtool_idnic(struct net_device *dev, u32 data)
4882 u64 val64 = 0, last_gpio_ctrl_val;
4883 struct s2io_nic *sp = dev->priv;
4884 struct XENA_dev_config __iomem *bar0 = sp->bar0;
4887 subid = sp->pdev->subsystem_device;
4888 last_gpio_ctrl_val = readq(&bar0->gpio_control);
4889 if ((sp->device_type == XFRAME_I_DEVICE) &&
4890 ((subid & 0xFF) < 0x07)) {
4891 val64 = readq(&bar0->adapter_control);
4892 if (!(val64 & ADAPTER_CNTL_EN)) {
4894 "Adapter Link down, cannot blink LED\n");
4898 if (sp->id_timer.function == NULL) {
4899 init_timer(&sp->id_timer);
4900 sp->id_timer.function = s2io_phy_id;
4901 sp->id_timer.data = (unsigned long) sp;
4903 mod_timer(&sp->id_timer, jiffies);
4905 msleep_interruptible(data * HZ);
4907 msleep_interruptible(MAX_FLICKER_TIME);
4908 del_timer_sync(&sp->id_timer);
4910 if (CARDS_WITH_FAULTY_LINK_INDICATORS(sp->device_type, subid)) {
4911 writeq(last_gpio_ctrl_val, &bar0->gpio_control);
4912 last_gpio_ctrl_val = readq(&bar0->gpio_control);
4918 static void s2io_ethtool_gringparam(struct net_device *dev,
4919 struct ethtool_ringparam *ering)
4921 struct s2io_nic *sp = dev->priv;
4922 int i,tx_desc_count=0,rx_desc_count=0;
4924 if (sp->rxd_mode == RXD_MODE_1)
4925 ering->rx_max_pending = MAX_RX_DESC_1;
4926 else if (sp->rxd_mode == RXD_MODE_3B)
4927 ering->rx_max_pending = MAX_RX_DESC_2;
4928 else if (sp->rxd_mode == RXD_MODE_3A)
4929 ering->rx_max_pending = MAX_RX_DESC_3;
4931 ering->tx_max_pending = MAX_TX_DESC;
4932 for (i = 0 ; i < sp->config.tx_fifo_num ; i++) {
4933 tx_desc_count += sp->config.tx_cfg[i].fifo_len;
4935 DBG_PRINT(INFO_DBG,"\nmax txds : %d\n",sp->config.max_txds);
4936 ering->tx_pending = tx_desc_count;
4938 for (i = 0 ; i < sp->config.rx_ring_num ; i++) {
4939 rx_desc_count += sp->config.rx_cfg[i].num_rxd;
4941 ering->rx_pending = rx_desc_count;
4943 ering->rx_mini_max_pending = 0;
4944 ering->rx_mini_pending = 0;
4945 if(sp->rxd_mode == RXD_MODE_1)
4946 ering->rx_jumbo_max_pending = MAX_RX_DESC_1;
4947 else if (sp->rxd_mode == RXD_MODE_3B)
4948 ering->rx_jumbo_max_pending = MAX_RX_DESC_2;
4949 ering->rx_jumbo_pending = rx_desc_count;
4953 * s2io_ethtool_getpause_data -Pause frame frame generation and reception.
4954 * @sp : private member of the device structure, which is a pointer to the
4955 * s2io_nic structure.
4956 * @ep : pointer to the structure with pause parameters given by ethtool.
4958 * Returns the Pause frame generation and reception capability of the NIC.
4962 static void s2io_ethtool_getpause_data(struct net_device *dev,
4963 struct ethtool_pauseparam *ep)
4966 struct s2io_nic *sp = dev->priv;
4967 struct XENA_dev_config __iomem *bar0 = sp->bar0;
4969 val64 = readq(&bar0->rmac_pause_cfg);
4970 if (val64 & RMAC_PAUSE_GEN_ENABLE)
4971 ep->tx_pause = TRUE;
4972 if (val64 & RMAC_PAUSE_RX_ENABLE)
4973 ep->rx_pause = TRUE;
4974 ep->autoneg = FALSE;
4978 * s2io_ethtool_setpause_data - set/reset pause frame generation.
4979 * @sp : private member of the device structure, which is a pointer to the
4980 * s2io_nic structure.
4981 * @ep : pointer to the structure with pause parameters given by ethtool.
4983 * It can be used to set or reset Pause frame generation or reception
4984 * support of the NIC.
4986 * int, returns 0 on Success
4989 static int s2io_ethtool_setpause_data(struct net_device *dev,
4990 struct ethtool_pauseparam *ep)
4993 struct s2io_nic *sp = dev->priv;
4994 struct XENA_dev_config __iomem *bar0 = sp->bar0;
4996 val64 = readq(&bar0->rmac_pause_cfg);
4998 val64 |= RMAC_PAUSE_GEN_ENABLE;
5000 val64 &= ~RMAC_PAUSE_GEN_ENABLE;
5002 val64 |= RMAC_PAUSE_RX_ENABLE;
5004 val64 &= ~RMAC_PAUSE_RX_ENABLE;
5005 writeq(val64, &bar0->rmac_pause_cfg);
5010 * read_eeprom - reads 4 bytes of data from user given offset.
5011 * @sp : private member of the device structure, which is a pointer to the
5012 * s2io_nic structure.
5013 * @off : offset at which the data must be written
5014 * @data : Its an output parameter where the data read at the given
5017 * Will read 4 bytes of data from the user given offset and return the
5019 * NOTE: Will allow to read only part of the EEPROM visible through the
5022 * -1 on failure and 0 on success.
5025 #define S2IO_DEV_ID 5
5026 static int read_eeprom(struct s2io_nic * sp, int off, u64 * data)
5031 struct XENA_dev_config __iomem *bar0 = sp->bar0;
5033 if (sp->device_type == XFRAME_I_DEVICE) {
5034 val64 = I2C_CONTROL_DEV_ID(S2IO_DEV_ID) | I2C_CONTROL_ADDR(off) |
5035 I2C_CONTROL_BYTE_CNT(0x3) | I2C_CONTROL_READ |
5036 I2C_CONTROL_CNTL_START;
5037 SPECIAL_REG_WRITE(val64, &bar0->i2c_control, LF);
5039 while (exit_cnt < 5) {
5040 val64 = readq(&bar0->i2c_control);
5041 if (I2C_CONTROL_CNTL_END(val64)) {
5042 *data = I2C_CONTROL_GET_DATA(val64);
5051 if (sp->device_type == XFRAME_II_DEVICE) {
5052 val64 = SPI_CONTROL_KEY(0x9) | SPI_CONTROL_SEL1 |
5053 SPI_CONTROL_BYTECNT(0x3) |
5054 SPI_CONTROL_CMD(0x3) | SPI_CONTROL_ADDR(off);
5055 SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
5056 val64 |= SPI_CONTROL_REQ;
5057 SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
5058 while (exit_cnt < 5) {
5059 val64 = readq(&bar0->spi_control);
5060 if (val64 & SPI_CONTROL_NACK) {
5063 } else if (val64 & SPI_CONTROL_DONE) {
5064 *data = readq(&bar0->spi_data);
5077 * write_eeprom - actually writes the relevant part of the data value.
5078 * @sp : private member of the device structure, which is a pointer to the
5079 * s2io_nic structure.
5080 * @off : offset at which the data must be written
5081 * @data : The data that is to be written
5082 * @cnt : Number of bytes of the data that are actually to be written into
5083 * the Eeprom. (max of 3)
5085 * Actually writes the relevant part of the data value into the Eeprom
5086 * through the I2C bus.
5088 * 0 on success, -1 on failure.
5091 static int write_eeprom(struct s2io_nic * sp, int off, u64 data, int cnt)
5093 int exit_cnt = 0, ret = -1;
5095 struct XENA_dev_config __iomem *bar0 = sp->bar0;
5097 if (sp->device_type == XFRAME_I_DEVICE) {
5098 val64 = I2C_CONTROL_DEV_ID(S2IO_DEV_ID) | I2C_CONTROL_ADDR(off) |
5099 I2C_CONTROL_BYTE_CNT(cnt) | I2C_CONTROL_SET_DATA((u32)data) |
5100 I2C_CONTROL_CNTL_START;
5101 SPECIAL_REG_WRITE(val64, &bar0->i2c_control, LF);
5103 while (exit_cnt < 5) {
5104 val64 = readq(&bar0->i2c_control);
5105 if (I2C_CONTROL_CNTL_END(val64)) {
5106 if (!(val64 & I2C_CONTROL_NACK))
5115 if (sp->device_type == XFRAME_II_DEVICE) {
5116 int write_cnt = (cnt == 8) ? 0 : cnt;
5117 writeq(SPI_DATA_WRITE(data,(cnt<<3)), &bar0->spi_data);
5119 val64 = SPI_CONTROL_KEY(0x9) | SPI_CONTROL_SEL1 |
5120 SPI_CONTROL_BYTECNT(write_cnt) |
5121 SPI_CONTROL_CMD(0x2) | SPI_CONTROL_ADDR(off);
5122 SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
5123 val64 |= SPI_CONTROL_REQ;
5124 SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
5125 while (exit_cnt < 5) {
5126 val64 = readq(&bar0->spi_control);
5127 if (val64 & SPI_CONTROL_NACK) {
5130 } else if (val64 & SPI_CONTROL_DONE) {
5140 static void s2io_vpd_read(struct s2io_nic *nic)
5144 int i=0, cnt, fail = 0;
5145 int vpd_addr = 0x80;
5147 if (nic->device_type == XFRAME_II_DEVICE) {
5148 strcpy(nic->product_name, "Xframe II 10GbE network adapter");
5152 strcpy(nic->product_name, "Xframe I 10GbE network adapter");
5155 strcpy(nic->serial_num, "NOT AVAILABLE");
5157 vpd_data = kmalloc(256, GFP_KERNEL);
5159 nic->mac_control.stats_info->sw_stat.mem_alloc_fail_cnt++;
5162 nic->mac_control.stats_info->sw_stat.mem_allocated += 256;
5164 for (i = 0; i < 256; i +=4 ) {
5165 pci_write_config_byte(nic->pdev, (vpd_addr + 2), i);
5166 pci_read_config_byte(nic->pdev, (vpd_addr + 2), &data);
5167 pci_write_config_byte(nic->pdev, (vpd_addr + 3), 0);
5168 for (cnt = 0; cnt <5; cnt++) {
5170 pci_read_config_byte(nic->pdev, (vpd_addr + 3), &data);
5175 DBG_PRINT(ERR_DBG, "Read of VPD data failed\n");
5179 pci_read_config_dword(nic->pdev, (vpd_addr + 4),
5180 (u32 *)&vpd_data[i]);
5184 /* read serial number of adapter */
5185 for (cnt = 0; cnt < 256; cnt++) {
5186 if ((vpd_data[cnt] == 'S') &&
5187 (vpd_data[cnt+1] == 'N') &&
5188 (vpd_data[cnt+2] < VPD_STRING_LEN)) {
5189 memset(nic->serial_num, 0, VPD_STRING_LEN);
5190 memcpy(nic->serial_num, &vpd_data[cnt + 3],
5197 if ((!fail) && (vpd_data[1] < VPD_STRING_LEN)) {
5198 memset(nic->product_name, 0, vpd_data[1]);
5199 memcpy(nic->product_name, &vpd_data[3], vpd_data[1]);
5202 nic->mac_control.stats_info->sw_stat.mem_freed += 256;
5206 * s2io_ethtool_geeprom - reads the value stored in the Eeprom.
5207 * @sp : private member of the device structure, which is a pointer to the * s2io_nic structure.
5208 * @eeprom : pointer to the user level structure provided by ethtool,
5209 * containing all relevant information.
5210 * @data_buf : user defined value to be written into Eeprom.
5211 * Description: Reads the values stored in the Eeprom at given offset
5212 * for a given length. Stores these values int the input argument data
5213 * buffer 'data_buf' and returns these to the caller (ethtool.)
5218 static int s2io_ethtool_geeprom(struct net_device *dev,
5219 struct ethtool_eeprom *eeprom, u8 * data_buf)
5223 struct s2io_nic *sp = dev->priv;
5225 eeprom->magic = sp->pdev->vendor | (sp->pdev->device << 16);
5227 if ((eeprom->offset + eeprom->len) > (XENA_EEPROM_SPACE))
5228 eeprom->len = XENA_EEPROM_SPACE - eeprom->offset;
5230 for (i = 0; i < eeprom->len; i += 4) {
5231 if (read_eeprom(sp, (eeprom->offset + i), &data)) {
5232 DBG_PRINT(ERR_DBG, "Read of EEPROM failed\n");
5236 memcpy((data_buf + i), &valid, 4);
5242 * s2io_ethtool_seeprom - tries to write the user provided value in Eeprom
5243 * @sp : private member of the device structure, which is a pointer to the
5244 * s2io_nic structure.
5245 * @eeprom : pointer to the user level structure provided by ethtool,
5246 * containing all relevant information.
5247 * @data_buf ; user defined value to be written into Eeprom.
5249 * Tries to write the user provided value in the Eeprom, at the offset
5250 * given by the user.
5252 * 0 on success, -EFAULT on failure.
5255 static int s2io_ethtool_seeprom(struct net_device *dev,
5256 struct ethtool_eeprom *eeprom,
5259 int len = eeprom->len, cnt = 0;
5260 u64 valid = 0, data;
5261 struct s2io_nic *sp = dev->priv;
5263 if (eeprom->magic != (sp->pdev->vendor | (sp->pdev->device << 16))) {
5265 "ETHTOOL_WRITE_EEPROM Err: Magic value ");
5266 DBG_PRINT(ERR_DBG, "is wrong, Its not 0x%x\n",
5272 data = (u32) data_buf[cnt] & 0x000000FF;
5274 valid = (u32) (data << 24);
5278 if (write_eeprom(sp, (eeprom->offset + cnt), valid, 0)) {
5280 "ETHTOOL_WRITE_EEPROM Err: Cannot ");
5282 "write into the specified offset\n");
5293 * s2io_register_test - reads and writes into all clock domains.
5294 * @sp : private member of the device structure, which is a pointer to the
5295 * s2io_nic structure.
5296 * @data : variable that returns the result of each of the test conducted b
5299 * Read and write into all clock domains. The NIC has 3 clock domains,
5300 * see that registers in all the three regions are accessible.
5305 static int s2io_register_test(struct s2io_nic * sp, uint64_t * data)
5307 struct XENA_dev_config __iomem *bar0 = sp->bar0;
5308 u64 val64 = 0, exp_val;
5311 val64 = readq(&bar0->pif_rd_swapper_fb);
5312 if (val64 != 0x123456789abcdefULL) {
5314 DBG_PRINT(INFO_DBG, "Read Test level 1 fails\n");
5317 val64 = readq(&bar0->rmac_pause_cfg);
5318 if (val64 != 0xc000ffff00000000ULL) {
5320 DBG_PRINT(INFO_DBG, "Read Test level 2 fails\n");
5323 val64 = readq(&bar0->rx_queue_cfg);
5324 if (sp->device_type == XFRAME_II_DEVICE)
5325 exp_val = 0x0404040404040404ULL;
5327 exp_val = 0x0808080808080808ULL;
5328 if (val64 != exp_val) {
5330 DBG_PRINT(INFO_DBG, "Read Test level 3 fails\n");
5333 val64 = readq(&bar0->xgxs_efifo_cfg);
5334 if (val64 != 0x000000001923141EULL) {
5336 DBG_PRINT(INFO_DBG, "Read Test level 4 fails\n");
5339 val64 = 0x5A5A5A5A5A5A5A5AULL;
5340 writeq(val64, &bar0->xmsi_data);
5341 val64 = readq(&bar0->xmsi_data);
5342 if (val64 != 0x5A5A5A5A5A5A5A5AULL) {
5344 DBG_PRINT(ERR_DBG, "Write Test level 1 fails\n");
5347 val64 = 0xA5A5A5A5A5A5A5A5ULL;
5348 writeq(val64, &bar0->xmsi_data);
5349 val64 = readq(&bar0->xmsi_data);
5350 if (val64 != 0xA5A5A5A5A5A5A5A5ULL) {
5352 DBG_PRINT(ERR_DBG, "Write Test level 2 fails\n");
5360 * s2io_eeprom_test - to verify that EEprom in the xena can be programmed.
5361 * @sp : private member of the device structure, which is a pointer to the
5362 * s2io_nic structure.
5363 * @data:variable that returns the result of each of the test conducted by
5366 * Verify that EEPROM in the xena can be programmed using I2C_CONTROL
5372 static int s2io_eeprom_test(struct s2io_nic * sp, uint64_t * data)
5375 u64 ret_data, org_4F0, org_7F0;
5376 u8 saved_4F0 = 0, saved_7F0 = 0;
5377 struct net_device *dev = sp->dev;
5379 /* Test Write Error at offset 0 */
5380 /* Note that SPI interface allows write access to all areas
5381 * of EEPROM. Hence doing all negative testing only for Xframe I.
5383 if (sp->device_type == XFRAME_I_DEVICE)
5384 if (!write_eeprom(sp, 0, 0, 3))
5387 /* Save current values at offsets 0x4F0 and 0x7F0 */
5388 if (!read_eeprom(sp, 0x4F0, &org_4F0))
5390 if (!read_eeprom(sp, 0x7F0, &org_7F0))
5393 /* Test Write at offset 4f0 */
5394 if (write_eeprom(sp, 0x4F0, 0x012345, 3))
5396 if (read_eeprom(sp, 0x4F0, &ret_data))
5399 if (ret_data != 0x012345) {
5400 DBG_PRINT(ERR_DBG, "%s: eeprom test error at offset 0x4F0. "
5401 "Data written %llx Data read %llx\n",
5402 dev->name, (unsigned long long)0x12345,
5403 (unsigned long long)ret_data);
5407 /* Reset the EEPROM data go FFFF */
5408 write_eeprom(sp, 0x4F0, 0xFFFFFF, 3);
5410 /* Test Write Request Error at offset 0x7c */
5411 if (sp->device_type == XFRAME_I_DEVICE)
5412 if (!write_eeprom(sp, 0x07C, 0, 3))
5415 /* Test Write Request at offset 0x7f0 */
5416 if (write_eeprom(sp, 0x7F0, 0x012345, 3))
5418 if (read_eeprom(sp, 0x7F0, &ret_data))
5421 if (ret_data != 0x012345) {
5422 DBG_PRINT(ERR_DBG, "%s: eeprom test error at offset 0x7F0. "
5423 "Data written %llx Data read %llx\n",
5424 dev->name, (unsigned long long)0x12345,
5425 (unsigned long long)ret_data);
5429 /* Reset the EEPROM data go FFFF */
5430 write_eeprom(sp, 0x7F0, 0xFFFFFF, 3);
5432 if (sp->device_type == XFRAME_I_DEVICE) {
5433 /* Test Write Error at offset 0x80 */
5434 if (!write_eeprom(sp, 0x080, 0, 3))
5437 /* Test Write Error at offset 0xfc */
5438 if (!write_eeprom(sp, 0x0FC, 0, 3))
5441 /* Test Write Error at offset 0x100 */
5442 if (!write_eeprom(sp, 0x100, 0, 3))
5445 /* Test Write Error at offset 4ec */
5446 if (!write_eeprom(sp, 0x4EC, 0, 3))
5450 /* Restore values at offsets 0x4F0 and 0x7F0 */
5452 write_eeprom(sp, 0x4F0, org_4F0, 3);
5454 write_eeprom(sp, 0x7F0, org_7F0, 3);
5461 * s2io_bist_test - invokes the MemBist test of the card .
5462 * @sp : private member of the device structure, which is a pointer to the
5463 * s2io_nic structure.
5464 * @data:variable that returns the result of each of the test conducted by
5467 * This invokes the MemBist test of the card. We give around
5468 * 2 secs time for the Test to complete. If it's still not complete
5469 * within this peiod, we consider that the test failed.
5471 * 0 on success and -1 on failure.
5474 static int s2io_bist_test(struct s2io_nic * sp, uint64_t * data)
5477 int cnt = 0, ret = -1;
5479 pci_read_config_byte(sp->pdev, PCI_BIST, &bist);
5480 bist |= PCI_BIST_START;
5481 pci_write_config_word(sp->pdev, PCI_BIST, bist);
5484 pci_read_config_byte(sp->pdev, PCI_BIST, &bist);
5485 if (!(bist & PCI_BIST_START)) {
5486 *data = (bist & PCI_BIST_CODE_MASK);
5498 * s2io-link_test - verifies the link state of the nic
5499 * @sp ; private member of the device structure, which is a pointer to the
5500 * s2io_nic structure.
5501 * @data: variable that returns the result of each of the test conducted by
5504 * The function verifies the link state of the NIC and updates the input
5505 * argument 'data' appropriately.
5510 static int s2io_link_test(struct s2io_nic * sp, uint64_t * data)
5512 struct XENA_dev_config __iomem *bar0 = sp->bar0;
5515 val64 = readq(&bar0->adapter_status);
5516 if(!(LINK_IS_UP(val64)))
5525 * s2io_rldram_test - offline test for access to the RldRam chip on the NIC
5526 * @sp - private member of the device structure, which is a pointer to the
5527 * s2io_nic structure.
5528 * @data - variable that returns the result of each of the test
5529 * conducted by the driver.
5531 * This is one of the offline test that tests the read and write
5532 * access to the RldRam chip on the NIC.
5537 static int s2io_rldram_test(struct s2io_nic * sp, uint64_t * data)
5539 struct XENA_dev_config __iomem *bar0 = sp->bar0;
5541 int cnt, iteration = 0, test_fail = 0;
5543 val64 = readq(&bar0->adapter_control);
5544 val64 &= ~ADAPTER_ECC_EN;
5545 writeq(val64, &bar0->adapter_control);
5547 val64 = readq(&bar0->mc_rldram_test_ctrl);
5548 val64 |= MC_RLDRAM_TEST_MODE;
5549 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_test_ctrl, LF);
5551 val64 = readq(&bar0->mc_rldram_mrs);
5552 val64 |= MC_RLDRAM_QUEUE_SIZE_ENABLE;
5553 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
5555 val64 |= MC_RLDRAM_MRS_ENABLE;
5556 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
5558 while (iteration < 2) {
5559 val64 = 0x55555555aaaa0000ULL;
5560 if (iteration == 1) {
5561 val64 ^= 0xFFFFFFFFFFFF0000ULL;
5563 writeq(val64, &bar0->mc_rldram_test_d0);
5565 val64 = 0xaaaa5a5555550000ULL;
5566 if (iteration == 1) {
5567 val64 ^= 0xFFFFFFFFFFFF0000ULL;
5569 writeq(val64, &bar0->mc_rldram_test_d1);
5571 val64 = 0x55aaaaaaaa5a0000ULL;
5572 if (iteration == 1) {
5573 val64 ^= 0xFFFFFFFFFFFF0000ULL;
5575 writeq(val64, &bar0->mc_rldram_test_d2);
5577 val64 = (u64) (0x0000003ffffe0100ULL);
5578 writeq(val64, &bar0->mc_rldram_test_add);
5580 val64 = MC_RLDRAM_TEST_MODE | MC_RLDRAM_TEST_WRITE |
5582 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_test_ctrl, LF);
5584 for (cnt = 0; cnt < 5; cnt++) {
5585 val64 = readq(&bar0->mc_rldram_test_ctrl);
5586 if (val64 & MC_RLDRAM_TEST_DONE)
5594 val64 = MC_RLDRAM_TEST_MODE | MC_RLDRAM_TEST_GO;
5595 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_test_ctrl, LF);
5597 for (cnt = 0; cnt < 5; cnt++) {
5598 val64 = readq(&bar0->mc_rldram_test_ctrl);
5599 if (val64 & MC_RLDRAM_TEST_DONE)
5607 val64 = readq(&bar0->mc_rldram_test_ctrl);
5608 if (!(val64 & MC_RLDRAM_TEST_PASS))
5616 /* Bring the adapter out of test mode */
5617 SPECIAL_REG_WRITE(0, &bar0->mc_rldram_test_ctrl, LF);
5623 * s2io_ethtool_test - conducts 6 tsets to determine the health of card.
5624 * @sp : private member of the device structure, which is a pointer to the
5625 * s2io_nic structure.
5626 * @ethtest : pointer to a ethtool command specific structure that will be
5627 * returned to the user.
5628 * @data : variable that returns the result of each of the test
5629 * conducted by the driver.
5631 * This function conducts 6 tests ( 4 offline and 2 online) to determine
5632 * the health of the card.
5637 static void s2io_ethtool_test(struct net_device *dev,
5638 struct ethtool_test *ethtest,
5641 struct s2io_nic *sp = dev->priv;
5642 int orig_state = netif_running(sp->dev);
5644 if (ethtest->flags == ETH_TEST_FL_OFFLINE) {
5645 /* Offline Tests. */
5647 s2io_close(sp->dev);
5649 if (s2io_register_test(sp, &data[0]))
5650 ethtest->flags |= ETH_TEST_FL_FAILED;
5654 if (s2io_rldram_test(sp, &data[3]))
5655 ethtest->flags |= ETH_TEST_FL_FAILED;
5659 if (s2io_eeprom_test(sp, &data[1]))
5660 ethtest->flags |= ETH_TEST_FL_FAILED;
5662 if (s2io_bist_test(sp, &data[4]))
5663 ethtest->flags |= ETH_TEST_FL_FAILED;
5673 "%s: is not up, cannot run test\n",
5682 if (s2io_link_test(sp, &data[2]))
5683 ethtest->flags |= ETH_TEST_FL_FAILED;
5692 static void s2io_get_ethtool_stats(struct net_device *dev,
5693 struct ethtool_stats *estats,
5697 struct s2io_nic *sp = dev->priv;
5698 struct stat_block *stat_info = sp->mac_control.stats_info;
5700 s2io_updt_stats(sp);
5702 (u64)le32_to_cpu(stat_info->tmac_frms_oflow) << 32 |
5703 le32_to_cpu(stat_info->tmac_frms);
5705 (u64)le32_to_cpu(stat_info->tmac_data_octets_oflow) << 32 |
5706 le32_to_cpu(stat_info->tmac_data_octets);
5707 tmp_stats[i++] = le64_to_cpu(stat_info->tmac_drop_frms);
5709 (u64)le32_to_cpu(stat_info->tmac_mcst_frms_oflow) << 32 |
5710 le32_to_cpu(stat_info->tmac_mcst_frms);
5712 (u64)le32_to_cpu(stat_info->tmac_bcst_frms_oflow) << 32 |
5713 le32_to_cpu(stat_info->tmac_bcst_frms);
5714 tmp_stats[i++] = le64_to_cpu(stat_info->tmac_pause_ctrl_frms);
5716 (u64)le32_to_cpu(stat_info->tmac_ttl_octets_oflow) << 32 |
5717 le32_to_cpu(stat_info->tmac_ttl_octets);
5719 (u64)le32_to_cpu(stat_info->tmac_ucst_frms_oflow) << 32 |
5720 le32_to_cpu(stat_info->tmac_ucst_frms);
5722 (u64)le32_to_cpu(stat_info->tmac_nucst_frms_oflow) << 32 |
5723 le32_to_cpu(stat_info->tmac_nucst_frms);
5725 (u64)le32_to_cpu(stat_info->tmac_any_err_frms_oflow) << 32 |
5726 le32_to_cpu(stat_info->tmac_any_err_frms);
5727 tmp_stats[i++] = le64_to_cpu(stat_info->tmac_ttl_less_fb_octets);
5728 tmp_stats[i++] = le64_to_cpu(stat_info->tmac_vld_ip_octets);
5730 (u64)le32_to_cpu(stat_info->tmac_vld_ip_oflow) << 32 |
5731 le32_to_cpu(stat_info->tmac_vld_ip);
5733 (u64)le32_to_cpu(stat_info->tmac_drop_ip_oflow) << 32 |
5734 le32_to_cpu(stat_info->tmac_drop_ip);
5736 (u64)le32_to_cpu(stat_info->tmac_icmp_oflow) << 32 |
5737 le32_to_cpu(stat_info->tmac_icmp);
5739 (u64)le32_to_cpu(stat_info->tmac_rst_tcp_oflow) << 32 |
5740 le32_to_cpu(stat_info->tmac_rst_tcp);
5741 tmp_stats[i++] = le64_to_cpu(stat_info->tmac_tcp);
5742 tmp_stats[i++] = (u64)le32_to_cpu(stat_info->tmac_udp_oflow) << 32 |
5743 le32_to_cpu(stat_info->tmac_udp);
5745 (u64)le32_to_cpu(stat_info->rmac_vld_frms_oflow) << 32 |
5746 le32_to_cpu(stat_info->rmac_vld_frms);
5748 (u64)le32_to_cpu(stat_info->rmac_data_octets_oflow) << 32 |
5749 le32_to_cpu(stat_info->rmac_data_octets);
5750 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_fcs_err_frms);
5751 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_drop_frms);
5753 (u64)le32_to_cpu(stat_info->rmac_vld_mcst_frms_oflow) << 32 |
5754 le32_to_cpu(stat_info->rmac_vld_mcst_frms);
5756 (u64)le32_to_cpu(stat_info->rmac_vld_bcst_frms_oflow) << 32 |
5757 le32_to_cpu(stat_info->rmac_vld_bcst_frms);
5758 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_in_rng_len_err_frms);
5759 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_out_rng_len_err_frms);
5760 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_long_frms);
5761 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_pause_ctrl_frms);
5762 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_unsup_ctrl_frms);
5764 (u64)le32_to_cpu(stat_info->rmac_ttl_octets_oflow) << 32 |
5765 le32_to_cpu(stat_info->rmac_ttl_octets);
5767 (u64)le32_to_cpu(stat_info->rmac_accepted_ucst_frms_oflow)
5768 << 32 | le32_to_cpu(stat_info->rmac_accepted_ucst_frms);
5770 (u64)le32_to_cpu(stat_info->rmac_accepted_nucst_frms_oflow)
5771 << 32 | le32_to_cpu(stat_info->rmac_accepted_nucst_frms);
5773 (u64)le32_to_cpu(stat_info->rmac_discarded_frms_oflow) << 32 |
5774 le32_to_cpu(stat_info->rmac_discarded_frms);
5776 (u64)le32_to_cpu(stat_info->rmac_drop_events_oflow)
5777 << 32 | le32_to_cpu(stat_info->rmac_drop_events);
5778 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_less_fb_octets);
5779 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_frms);
5781 (u64)le32_to_cpu(stat_info->rmac_usized_frms_oflow) << 32 |
5782 le32_to_cpu(stat_info->rmac_usized_frms);
5784 (u64)le32_to_cpu(stat_info->rmac_osized_frms_oflow) << 32 |
5785 le32_to_cpu(stat_info->rmac_osized_frms);
5787 (u64)le32_to_cpu(stat_info->rmac_frag_frms_oflow) << 32 |
5788 le32_to_cpu(stat_info->rmac_frag_frms);
5790 (u64)le32_to_cpu(stat_info->rmac_jabber_frms_oflow) << 32 |
5791 le32_to_cpu(stat_info->rmac_jabber_frms);
5792 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_64_frms);
5793 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_65_127_frms);
5794 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_128_255_frms);
5795 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_256_511_frms);
5796 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_512_1023_frms);
5797 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_1024_1518_frms);
5799 (u64)le32_to_cpu(stat_info->rmac_ip_oflow) << 32 |
5800 le32_to_cpu(stat_info->rmac_ip);
5801 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ip_octets);
5802 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_hdr_err_ip);
5804 (u64)le32_to_cpu(stat_info->rmac_drop_ip_oflow) << 32 |
5805 le32_to_cpu(stat_info->rmac_drop_ip);
5807 (u64)le32_to_cpu(stat_info->rmac_icmp_oflow) << 32 |
5808 le32_to_cpu(stat_info->rmac_icmp);
5809 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_tcp);
5811 (u64)le32_to_cpu(stat_info->rmac_udp_oflow) << 32 |
5812 le32_to_cpu(stat_info->rmac_udp);
5814 (u64)le32_to_cpu(stat_info->rmac_err_drp_udp_oflow) << 32 |
5815 le32_to_cpu(stat_info->rmac_err_drp_udp);
5816 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_xgmii_err_sym);
5817 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q0);
5818 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q1);
5819 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q2);
5820 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q3);
5821 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q4);
5822 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q5);
5823 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q6);
5824 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q7);
5825 tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q0);
5826 tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q1);
5827 tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q2);
5828 tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q3);
5829 tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q4);
5830 tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q5);
5831 tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q6);
5832 tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q7);
5834 (u64)le32_to_cpu(stat_info->rmac_pause_cnt_oflow) << 32 |
5835 le32_to_cpu(stat_info->rmac_pause_cnt);
5836 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_xgmii_data_err_cnt);
5837 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_xgmii_ctrl_err_cnt);
5839 (u64)le32_to_cpu(stat_info->rmac_accepted_ip_oflow) << 32 |
5840 le32_to_cpu(stat_info->rmac_accepted_ip);
5841 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_err_tcp);
5842 tmp_stats[i++] = le32_to_cpu(stat_info->rd_req_cnt);
5843 tmp_stats[i++] = le32_to_cpu(stat_info->new_rd_req_cnt);
5844 tmp_stats[i++] = le32_to_cpu(stat_info->new_rd_req_rtry_cnt);
5845 tmp_stats[i++] = le32_to_cpu(stat_info->rd_rtry_cnt);
5846 tmp_stats[i++] = le32_to_cpu(stat_info->wr_rtry_rd_ack_cnt);
5847 tmp_stats[i++] = le32_to_cpu(stat_info->wr_req_cnt);
5848 tmp_stats[i++] = le32_to_cpu(stat_info->new_wr_req_cnt);
5849 tmp_stats[i++] = le32_to_cpu(stat_info->new_wr_req_rtry_cnt);
5850 tmp_stats[i++] = le32_to_cpu(stat_info->wr_rtry_cnt);
5851 tmp_stats[i++] = le32_to_cpu(stat_info->wr_disc_cnt);
5852 tmp_stats[i++] = le32_to_cpu(stat_info->rd_rtry_wr_ack_cnt);
5853 tmp_stats[i++] = le32_to_cpu(stat_info->txp_wr_cnt);
5854 tmp_stats[i++] = le32_to_cpu(stat_info->txd_rd_cnt);
5855 tmp_stats[i++] = le32_to_cpu(stat_info->txd_wr_cnt);
5856 tmp_stats[i++] = le32_to_cpu(stat_info->rxd_rd_cnt);
5857 tmp_stats[i++] = le32_to_cpu(stat_info->rxd_wr_cnt);
5858 tmp_stats[i++] = le32_to_cpu(stat_info->txf_rd_cnt);
5859 tmp_stats[i++] = le32_to_cpu(stat_info->rxf_wr_cnt);
5861 /* Enhanced statistics exist only for Hercules */
5862 if(sp->device_type == XFRAME_II_DEVICE) {
5864 le64_to_cpu(stat_info->rmac_ttl_1519_4095_frms);
5866 le64_to_cpu(stat_info->rmac_ttl_4096_8191_frms);
5868 le64_to_cpu(stat_info->rmac_ttl_8192_max_frms);
5869 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_gt_max_frms);
5870 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_osized_alt_frms);
5871 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_jabber_alt_frms);
5872 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_gt_max_alt_frms);
5873 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_vlan_frms);
5874 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_len_discard);
5875 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_fcs_discard);
5876 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_pf_discard);
5877 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_da_discard);
5878 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_red_discard);
5879 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_rts_discard);
5880 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_ingm_full_discard);
5881 tmp_stats[i++] = le32_to_cpu(stat_info->link_fault_cnt);
5885 tmp_stats[i++] = stat_info->sw_stat.single_ecc_errs;
5886 tmp_stats[i++] = stat_info->sw_stat.double_ecc_errs;
5887 tmp_stats[i++] = stat_info->sw_stat.parity_err_cnt;
5888 tmp_stats[i++] = stat_info->sw_stat.serious_err_cnt;
5889 tmp_stats[i++] = stat_info->sw_stat.soft_reset_cnt;
5890 tmp_stats[i++] = stat_info->sw_stat.fifo_full_cnt;
5891 tmp_stats[i++] = stat_info->sw_stat.ring_full_cnt;
5892 tmp_stats[i++] = stat_info->xpak_stat.alarm_transceiver_temp_high;
5893 tmp_stats[i++] = stat_info->xpak_stat.alarm_transceiver_temp_low;
5894 tmp_stats[i++] = stat_info->xpak_stat.alarm_laser_bias_current_high;
5895 tmp_stats[i++] = stat_info->xpak_stat.alarm_laser_bias_current_low;
5896 tmp_stats[i++] = stat_info->xpak_stat.alarm_laser_output_power_high;
5897 tmp_stats[i++] = stat_info->xpak_stat.alarm_laser_output_power_low;
5898 tmp_stats[i++] = stat_info->xpak_stat.warn_transceiver_temp_high;
5899 tmp_stats[i++] = stat_info->xpak_stat.warn_transceiver_temp_low;
5900 tmp_stats[i++] = stat_info->xpak_stat.warn_laser_bias_current_high;
5901 tmp_stats[i++] = stat_info->xpak_stat.warn_laser_bias_current_low;
5902 tmp_stats[i++] = stat_info->xpak_stat.warn_laser_output_power_high;
5903 tmp_stats[i++] = stat_info->xpak_stat.warn_laser_output_power_low;
5904 tmp_stats[i++] = stat_info->sw_stat.clubbed_frms_cnt;
5905 tmp_stats[i++] = stat_info->sw_stat.sending_both;
5906 tmp_stats[i++] = stat_info->sw_stat.outof_sequence_pkts;
5907 tmp_stats[i++] = stat_info->sw_stat.flush_max_pkts;
5908 if (stat_info->sw_stat.num_aggregations) {
5909 u64 tmp = stat_info->sw_stat.sum_avg_pkts_aggregated;
5912 * Since 64-bit divide does not work on all platforms,
5913 * do repeated subtraction.
5915 while (tmp >= stat_info->sw_stat.num_aggregations) {
5916 tmp -= stat_info->sw_stat.num_aggregations;
5919 tmp_stats[i++] = count;
5923 tmp_stats[i++] = stat_info->sw_stat.mem_alloc_fail_cnt;
5924 tmp_stats[i++] = stat_info->sw_stat.watchdog_timer_cnt;
5925 tmp_stats[i++] = stat_info->sw_stat.mem_allocated;
5926 tmp_stats[i++] = stat_info->sw_stat.mem_freed;
5927 tmp_stats[i++] = stat_info->sw_stat.link_up_cnt;
5928 tmp_stats[i++] = stat_info->sw_stat.link_down_cnt;
5929 tmp_stats[i++] = stat_info->sw_stat.link_up_time;
5930 tmp_stats[i++] = stat_info->sw_stat.link_down_time;
5932 tmp_stats[i++] = stat_info->sw_stat.tx_buf_abort_cnt;
5933 tmp_stats[i++] = stat_info->sw_stat.tx_desc_abort_cnt;
5934 tmp_stats[i++] = stat_info->sw_stat.tx_parity_err_cnt;
5935 tmp_stats[i++] = stat_info->sw_stat.tx_link_loss_cnt;
5936 tmp_stats[i++] = stat_info->sw_stat.tx_list_proc_err_cnt;
5938 tmp_stats[i++] = stat_info->sw_stat.rx_parity_err_cnt;
5939 tmp_stats[i++] = stat_info->sw_stat.rx_abort_cnt;
5940 tmp_stats[i++] = stat_info->sw_stat.rx_parity_abort_cnt;
5941 tmp_stats[i++] = stat_info->sw_stat.rx_rda_fail_cnt;
5942 tmp_stats[i++] = stat_info->sw_stat.rx_unkn_prot_cnt;
5943 tmp_stats[i++] = stat_info->sw_stat.rx_fcs_err_cnt;
5944 tmp_stats[i++] = stat_info->sw_stat.rx_buf_size_err_cnt;
5945 tmp_stats[i++] = stat_info->sw_stat.rx_rxd_corrupt_cnt;
5946 tmp_stats[i++] = stat_info->sw_stat.rx_unkn_err_cnt;
5949 static int s2io_ethtool_get_regs_len(struct net_device *dev)
5951 return (XENA_REG_SPACE);
5955 static u32 s2io_ethtool_get_rx_csum(struct net_device * dev)
5957 struct s2io_nic *sp = dev->priv;
5959 return (sp->rx_csum);
5962 static int s2io_ethtool_set_rx_csum(struct net_device *dev, u32 data)
5964 struct s2io_nic *sp = dev->priv;
5974 static int s2io_get_eeprom_len(struct net_device *dev)
5976 return (XENA_EEPROM_SPACE);
5979 static int s2io_ethtool_self_test_count(struct net_device *dev)
5981 return (S2IO_TEST_LEN);
5984 static void s2io_ethtool_get_strings(struct net_device *dev,
5985 u32 stringset, u8 * data)
5988 struct s2io_nic *sp = dev->priv;
5990 switch (stringset) {
5992 memcpy(data, s2io_gstrings, S2IO_STRINGS_LEN);
5995 stat_size = sizeof(ethtool_xena_stats_keys);
5996 memcpy(data, ðtool_xena_stats_keys,stat_size);
5997 if(sp->device_type == XFRAME_II_DEVICE) {
5998 memcpy(data + stat_size,
5999 ðtool_enhanced_stats_keys,
6000 sizeof(ethtool_enhanced_stats_keys));
6001 stat_size += sizeof(ethtool_enhanced_stats_keys);
6004 memcpy(data + stat_size, ðtool_driver_stats_keys,
6005 sizeof(ethtool_driver_stats_keys));
6008 static int s2io_ethtool_get_stats_count(struct net_device *dev)
6010 struct s2io_nic *sp = dev->priv;
6012 switch(sp->device_type) {
6013 case XFRAME_I_DEVICE:
6014 stat_count = XFRAME_I_STAT_LEN;
6017 case XFRAME_II_DEVICE:
6018 stat_count = XFRAME_II_STAT_LEN;
6025 static int s2io_ethtool_op_set_tx_csum(struct net_device *dev, u32 data)
6028 dev->features |= NETIF_F_IP_CSUM;
6030 dev->features &= ~NETIF_F_IP_CSUM;
6035 static u32 s2io_ethtool_op_get_tso(struct net_device *dev)
6037 return (dev->features & NETIF_F_TSO) != 0;
6039 static int s2io_ethtool_op_set_tso(struct net_device *dev, u32 data)
6042 dev->features |= (NETIF_F_TSO | NETIF_F_TSO6);
6044 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6);
6049 static const struct ethtool_ops netdev_ethtool_ops = {
6050 .get_settings = s2io_ethtool_gset,
6051 .set_settings = s2io_ethtool_sset,
6052 .get_drvinfo = s2io_ethtool_gdrvinfo,
6053 .get_regs_len = s2io_ethtool_get_regs_len,
6054 .get_regs = s2io_ethtool_gregs,
6055 .get_link = ethtool_op_get_link,
6056 .get_eeprom_len = s2io_get_eeprom_len,
6057 .get_eeprom = s2io_ethtool_geeprom,
6058 .set_eeprom = s2io_ethtool_seeprom,
6059 .get_ringparam = s2io_ethtool_gringparam,
6060 .get_pauseparam = s2io_ethtool_getpause_data,
6061 .set_pauseparam = s2io_ethtool_setpause_data,
6062 .get_rx_csum = s2io_ethtool_get_rx_csum,
6063 .set_rx_csum = s2io_ethtool_set_rx_csum,
6064 .get_tx_csum = ethtool_op_get_tx_csum,
6065 .set_tx_csum = s2io_ethtool_op_set_tx_csum,
6066 .get_sg = ethtool_op_get_sg,
6067 .set_sg = ethtool_op_set_sg,
6068 .get_tso = s2io_ethtool_op_get_tso,
6069 .set_tso = s2io_ethtool_op_set_tso,
6070 .get_ufo = ethtool_op_get_ufo,
6071 .set_ufo = ethtool_op_set_ufo,
6072 .self_test_count = s2io_ethtool_self_test_count,
6073 .self_test = s2io_ethtool_test,
6074 .get_strings = s2io_ethtool_get_strings,
6075 .phys_id = s2io_ethtool_idnic,
6076 .get_stats_count = s2io_ethtool_get_stats_count,
6077 .get_ethtool_stats = s2io_get_ethtool_stats
6081 * s2io_ioctl - Entry point for the Ioctl
6082 * @dev : Device pointer.
6083 * @ifr : An IOCTL specefic structure, that can contain a pointer to
6084 * a proprietary structure used to pass information to the driver.
6085 * @cmd : This is used to distinguish between the different commands that
6086 * can be passed to the IOCTL functions.
6088 * Currently there are no special functionality supported in IOCTL, hence
6089 * function always return EOPNOTSUPPORTED
6092 static int s2io_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
6098 * s2io_change_mtu - entry point to change MTU size for the device.
6099 * @dev : device pointer.
6100 * @new_mtu : the new MTU size for the device.
6101 * Description: A driver entry point to change MTU size for the device.
6102 * Before changing the MTU the device must be stopped.
6104 * 0 on success and an appropriate (-)ve integer as defined in errno.h
6108 static int s2io_change_mtu(struct net_device *dev, int new_mtu)
6110 struct s2io_nic *sp = dev->priv;
6112 if ((new_mtu < MIN_MTU) || (new_mtu > S2IO_JUMBO_SIZE)) {
6113 DBG_PRINT(ERR_DBG, "%s: MTU size is invalid.\n",
6119 if (netif_running(dev)) {
6121 netif_stop_queue(dev);
6122 if (s2io_card_up(sp)) {
6123 DBG_PRINT(ERR_DBG, "%s: Device bring up failed\n",
6126 if (netif_queue_stopped(dev))
6127 netif_wake_queue(dev);
6128 } else { /* Device is down */
6129 struct XENA_dev_config __iomem *bar0 = sp->bar0;
6130 u64 val64 = new_mtu;
6132 writeq(vBIT(val64, 2, 14), &bar0->rmac_max_pyld_len);
6139 * s2io_tasklet - Bottom half of the ISR.
6140 * @dev_adr : address of the device structure in dma_addr_t format.
6142 * This is the tasklet or the bottom half of the ISR. This is
6143 * an extension of the ISR which is scheduled by the scheduler to be run
6144 * when the load on the CPU is low. All low priority tasks of the ISR can
6145 * be pushed into the tasklet. For now the tasklet is used only to
6146 * replenish the Rx buffers in the Rx buffer descriptors.
6151 static void s2io_tasklet(unsigned long dev_addr)
6153 struct net_device *dev = (struct net_device *) dev_addr;
6154 struct s2io_nic *sp = dev->priv;
6156 struct mac_info *mac_control;
6157 struct config_param *config;
6159 mac_control = &sp->mac_control;
6160 config = &sp->config;
6162 if (!TASKLET_IN_USE) {
6163 for (i = 0; i < config->rx_ring_num; i++) {
6164 ret = fill_rx_buffers(sp, i);
6165 if (ret == -ENOMEM) {
6166 DBG_PRINT(INFO_DBG, "%s: Out of ",
6168 DBG_PRINT(INFO_DBG, "memory in tasklet\n");
6170 } else if (ret == -EFILL) {
6172 "%s: Rx Ring %d is full\n",
6177 clear_bit(0, (&sp->tasklet_status));
6182 * s2io_set_link - Set the LInk status
6183 * @data: long pointer to device private structue
6184 * Description: Sets the link status for the adapter
6187 static void s2io_set_link(struct work_struct *work)
6189 struct s2io_nic *nic = container_of(work, struct s2io_nic, set_link_task);
6190 struct net_device *dev = nic->dev;
6191 struct XENA_dev_config __iomem *bar0 = nic->bar0;
6197 if (!netif_running(dev))
6200 if (test_and_set_bit(0, &(nic->link_state))) {
6201 /* The card is being reset, no point doing anything */
6205 subid = nic->pdev->subsystem_device;
6206 if (s2io_link_fault_indication(nic) == MAC_RMAC_ERR_TIMER) {
6208 * Allow a small delay for the NICs self initiated
6209 * cleanup to complete.
6214 val64 = readq(&bar0->adapter_status);
6215 if (LINK_IS_UP(val64)) {
6216 if (!(readq(&bar0->adapter_control) & ADAPTER_CNTL_EN)) {
6217 if (verify_xena_quiescence(nic)) {
6218 val64 = readq(&bar0->adapter_control);
6219 val64 |= ADAPTER_CNTL_EN;
6220 writeq(val64, &bar0->adapter_control);
6221 if (CARDS_WITH_FAULTY_LINK_INDICATORS(
6222 nic->device_type, subid)) {
6223 val64 = readq(&bar0->gpio_control);
6224 val64 |= GPIO_CTRL_GPIO_0;
6225 writeq(val64, &bar0->gpio_control);
6226 val64 = readq(&bar0->gpio_control);
6228 val64 |= ADAPTER_LED_ON;
6229 writeq(val64, &bar0->adapter_control);
6231 nic->device_enabled_once = TRUE;
6233 DBG_PRINT(ERR_DBG, "%s: Error: ", dev->name);
6234 DBG_PRINT(ERR_DBG, "device is not Quiescent\n");
6235 netif_stop_queue(dev);
6238 val64 = readq(&bar0->adapter_status);
6239 if (!LINK_IS_UP(val64)) {
6240 DBG_PRINT(ERR_DBG, "%s:", dev->name);
6241 DBG_PRINT(ERR_DBG, " Link down after enabling ");
6242 DBG_PRINT(ERR_DBG, "device \n");
6244 s2io_link(nic, LINK_UP);
6246 if (CARDS_WITH_FAULTY_LINK_INDICATORS(nic->device_type,
6248 val64 = readq(&bar0->gpio_control);
6249 val64 &= ~GPIO_CTRL_GPIO_0;
6250 writeq(val64, &bar0->gpio_control);
6251 val64 = readq(&bar0->gpio_control);
6253 s2io_link(nic, LINK_DOWN);
6255 clear_bit(0, &(nic->link_state));
6261 static int set_rxd_buffer_pointer(struct s2io_nic *sp, struct RxD_t *rxdp,
6263 struct sk_buff **skb, u64 *temp0, u64 *temp1,
6264 u64 *temp2, int size)
6266 struct net_device *dev = sp->dev;
6267 struct sk_buff *frag_list;
6269 if ((sp->rxd_mode == RXD_MODE_1) && (rxdp->Host_Control == 0)) {
6272 DBG_PRINT(INFO_DBG, "SKB is not NULL\n");
6274 * As Rx frame are not going to be processed,
6275 * using same mapped address for the Rxd
6278 ((struct RxD1*)rxdp)->Buffer0_ptr = *temp0;
6280 *skb = dev_alloc_skb(size);
6282 DBG_PRINT(INFO_DBG, "%s: Out of ", dev->name);
6283 DBG_PRINT(INFO_DBG, "memory to allocate ");
6284 DBG_PRINT(INFO_DBG, "1 buf mode SKBs\n");
6285 sp->mac_control.stats_info->sw_stat. \
6286 mem_alloc_fail_cnt++;
6289 sp->mac_control.stats_info->sw_stat.mem_allocated
6290 += (*skb)->truesize;
6291 /* storing the mapped addr in a temp variable
6292 * such it will be used for next rxd whose
6293 * Host Control is NULL
6295 ((struct RxD1*)rxdp)->Buffer0_ptr = *temp0 =
6296 pci_map_single( sp->pdev, (*skb)->data,
6297 size - NET_IP_ALIGN,
6298 PCI_DMA_FROMDEVICE);
6299 rxdp->Host_Control = (unsigned long) (*skb);
6301 } else if ((sp->rxd_mode == RXD_MODE_3B) && (rxdp->Host_Control == 0)) {
6302 /* Two buffer Mode */
6304 ((struct RxD3*)rxdp)->Buffer2_ptr = *temp2;
6305 ((struct RxD3*)rxdp)->Buffer0_ptr = *temp0;
6306 ((struct RxD3*)rxdp)->Buffer1_ptr = *temp1;
6308 *skb = dev_alloc_skb(size);
6310 DBG_PRINT(INFO_DBG, "%s: Out of ", dev->name);
6311 DBG_PRINT(INFO_DBG, "memory to allocate ");
6312 DBG_PRINT(INFO_DBG, "2 buf mode SKBs\n");
6313 sp->mac_control.stats_info->sw_stat. \
6314 mem_alloc_fail_cnt++;
6317 sp->mac_control.stats_info->sw_stat.mem_allocated
6318 += (*skb)->truesize;
6319 ((struct RxD3*)rxdp)->Buffer2_ptr = *temp2 =
6320 pci_map_single(sp->pdev, (*skb)->data,
6322 PCI_DMA_FROMDEVICE);
6323 ((struct RxD3*)rxdp)->Buffer0_ptr = *temp0 =
6324 pci_map_single( sp->pdev, ba->ba_0, BUF0_LEN,
6325 PCI_DMA_FROMDEVICE);
6326 rxdp->Host_Control = (unsigned long) (*skb);
6328 /* Buffer-1 will be dummy buffer not used */
6329 ((struct RxD3*)rxdp)->Buffer1_ptr = *temp1 =
6330 pci_map_single(sp->pdev, ba->ba_1, BUF1_LEN,
6331 PCI_DMA_FROMDEVICE);
6333 } else if ((rxdp->Host_Control == 0)) {
6334 /* Three buffer mode */
6336 ((struct RxD3*)rxdp)->Buffer0_ptr = *temp0;
6337 ((struct RxD3*)rxdp)->Buffer1_ptr = *temp1;
6338 ((struct RxD3*)rxdp)->Buffer2_ptr = *temp2;
6340 *skb = dev_alloc_skb(size);
6342 DBG_PRINT(INFO_DBG, "%s: Out of ", dev->name);
6343 DBG_PRINT(INFO_DBG, "memory to allocate ");
6344 DBG_PRINT(INFO_DBG, "3 buf mode SKBs\n");
6345 sp->mac_control.stats_info->sw_stat. \
6346 mem_alloc_fail_cnt++;
6349 sp->mac_control.stats_info->sw_stat.mem_allocated
6350 += (*skb)->truesize;
6351 ((struct RxD3*)rxdp)->Buffer0_ptr = *temp0 =
6352 pci_map_single(sp->pdev, ba->ba_0, BUF0_LEN,
6353 PCI_DMA_FROMDEVICE);
6354 /* Buffer-1 receives L3/L4 headers */
6355 ((struct RxD3*)rxdp)->Buffer1_ptr = *temp1 =
6356 pci_map_single( sp->pdev, (*skb)->data,
6358 PCI_DMA_FROMDEVICE);
6360 * skb_shinfo(skb)->frag_list will have L4
6363 skb_shinfo(*skb)->frag_list = dev_alloc_skb(dev->mtu +
6365 if (skb_shinfo(*skb)->frag_list == NULL) {
6366 DBG_PRINT(ERR_DBG, "%s: dev_alloc_skb \
6367 failed\n ", dev->name);
6368 sp->mac_control.stats_info->sw_stat. \
6369 mem_alloc_fail_cnt++;
6372 frag_list = skb_shinfo(*skb)->frag_list;
6373 frag_list->next = NULL;
6374 sp->mac_control.stats_info->sw_stat.mem_allocated
6375 += frag_list->truesize;
6377 * Buffer-2 receives L4 data payload
6379 ((struct RxD3*)rxdp)->Buffer2_ptr = *temp2 =
6380 pci_map_single( sp->pdev, frag_list->data,
6381 dev->mtu, PCI_DMA_FROMDEVICE);
6386 static void set_rxd_buffer_size(struct s2io_nic *sp, struct RxD_t *rxdp,
6389 struct net_device *dev = sp->dev;
6390 if (sp->rxd_mode == RXD_MODE_1) {
6391 rxdp->Control_2 = SET_BUFFER0_SIZE_1( size - NET_IP_ALIGN);
6392 } else if (sp->rxd_mode == RXD_MODE_3B) {
6393 rxdp->Control_2 = SET_BUFFER0_SIZE_3(BUF0_LEN);
6394 rxdp->Control_2 |= SET_BUFFER1_SIZE_3(1);
6395 rxdp->Control_2 |= SET_BUFFER2_SIZE_3( dev->mtu + 4);
6397 rxdp->Control_2 = SET_BUFFER0_SIZE_3(BUF0_LEN);
6398 rxdp->Control_2 |= SET_BUFFER1_SIZE_3(l3l4hdr_size + 4);
6399 rxdp->Control_2 |= SET_BUFFER2_SIZE_3(dev->mtu);
6403 static int rxd_owner_bit_reset(struct s2io_nic *sp)
6405 int i, j, k, blk_cnt = 0, size;
6406 struct mac_info * mac_control = &sp->mac_control;
6407 struct config_param *config = &sp->config;
6408 struct net_device *dev = sp->dev;
6409 struct RxD_t *rxdp = NULL;
6410 struct sk_buff *skb = NULL;
6411 struct buffAdd *ba = NULL;
6412 u64 temp0_64 = 0, temp1_64 = 0, temp2_64 = 0;
6414 /* Calculate the size based on ring mode */
6415 size = dev->mtu + HEADER_ETHERNET_II_802_3_SIZE +
6416 HEADER_802_2_SIZE + HEADER_SNAP_SIZE;
6417 if (sp->rxd_mode == RXD_MODE_1)
6418 size += NET_IP_ALIGN;
6419 else if (sp->rxd_mode == RXD_MODE_3B)
6420 size = dev->mtu + ALIGN_SIZE + BUF0_LEN + 4;
6422 size = l3l4hdr_size + ALIGN_SIZE + BUF0_LEN + 4;
6424 for (i = 0; i < config->rx_ring_num; i++) {
6425 blk_cnt = config->rx_cfg[i].num_rxd /
6426 (rxd_count[sp->rxd_mode] +1);
6428 for (j = 0; j < blk_cnt; j++) {
6429 for (k = 0; k < rxd_count[sp->rxd_mode]; k++) {
6430 rxdp = mac_control->rings[i].
6431 rx_blocks[j].rxds[k].virt_addr;
6432 if(sp->rxd_mode >= RXD_MODE_3A)
6433 ba = &mac_control->rings[i].ba[j][k];
6434 if (set_rxd_buffer_pointer(sp, rxdp, ba,
6435 &skb,(u64 *)&temp0_64,
6442 set_rxd_buffer_size(sp, rxdp, size);
6444 /* flip the Ownership bit to Hardware */
6445 rxdp->Control_1 |= RXD_OWN_XENA;
6453 static int s2io_add_isr(struct s2io_nic * sp)
6456 struct net_device *dev = sp->dev;
6459 if (sp->intr_type == MSI)
6460 ret = s2io_enable_msi(sp);
6461 else if (sp->intr_type == MSI_X)
6462 ret = s2io_enable_msi_x(sp);
6464 DBG_PRINT(ERR_DBG, "%s: Defaulting to INTA\n", dev->name);
6465 sp->intr_type = INTA;
6468 /* Store the values of the MSIX table in the struct s2io_nic structure */
6469 store_xmsi_data(sp);
6471 /* After proper initialization of H/W, register ISR */
6472 if (sp->intr_type == MSI) {
6473 err = request_irq((int) sp->pdev->irq, s2io_msi_handle,
6474 IRQF_SHARED, sp->name, dev);
6476 pci_disable_msi(sp->pdev);
6477 DBG_PRINT(ERR_DBG, "%s: MSI registration failed\n",
6482 if (sp->intr_type == MSI_X) {
6483 int i, msix_tx_cnt=0,msix_rx_cnt=0;
6485 for (i=1; (sp->s2io_entries[i].in_use == MSIX_FLG); i++) {
6486 if (sp->s2io_entries[i].type == MSIX_FIFO_TYPE) {
6487 sprintf(sp->desc[i], "%s:MSI-X-%d-TX",
6489 err = request_irq(sp->entries[i].vector,
6490 s2io_msix_fifo_handle, 0, sp->desc[i],
6491 sp->s2io_entries[i].arg);
6492 /* If either data or addr is zero print it */
6493 if(!(sp->msix_info[i].addr &&
6494 sp->msix_info[i].data)) {
6495 DBG_PRINT(ERR_DBG, "%s @ Addr:0x%llx"
6496 "Data:0x%lx\n",sp->desc[i],
6497 (unsigned long long)
6498 sp->msix_info[i].addr,
6500 ntohl(sp->msix_info[i].data));
6505 sprintf(sp->desc[i], "%s:MSI-X-%d-RX",
6507 err = request_irq(sp->entries[i].vector,
6508 s2io_msix_ring_handle, 0, sp->desc[i],
6509 sp->s2io_entries[i].arg);
6510 /* If either data or addr is zero print it */
6511 if(!(sp->msix_info[i].addr &&
6512 sp->msix_info[i].data)) {
6513 DBG_PRINT(ERR_DBG, "%s @ Addr:0x%llx"
6514 "Data:0x%lx\n",sp->desc[i],
6515 (unsigned long long)
6516 sp->msix_info[i].addr,
6518 ntohl(sp->msix_info[i].data));
6524 DBG_PRINT(ERR_DBG,"%s:MSI-X-%d registration "
6525 "failed\n", dev->name, i);
6526 DBG_PRINT(ERR_DBG, "Returned: %d\n", err);
6529 sp->s2io_entries[i].in_use = MSIX_REGISTERED_SUCCESS;
6531 printk("MSI-X-TX %d entries enabled\n",msix_tx_cnt);
6532 printk("MSI-X-RX %d entries enabled\n",msix_rx_cnt);
6534 if (sp->intr_type == INTA) {
6535 err = request_irq((int) sp->pdev->irq, s2io_isr, IRQF_SHARED,
6538 DBG_PRINT(ERR_DBG, "%s: ISR registration failed\n",
6545 static void s2io_rem_isr(struct s2io_nic * sp)
6548 struct net_device *dev = sp->dev;
6550 if (sp->intr_type == MSI_X) {
6554 for (i=1; (sp->s2io_entries[i].in_use ==
6555 MSIX_REGISTERED_SUCCESS); i++) {
6556 int vector = sp->entries[i].vector;
6557 void *arg = sp->s2io_entries[i].arg;
6559 free_irq(vector, arg);
6561 pci_read_config_word(sp->pdev, 0x42, &msi_control);
6562 msi_control &= 0xFFFE; /* Disable MSI */
6563 pci_write_config_word(sp->pdev, 0x42, msi_control);
6565 pci_disable_msix(sp->pdev);
6567 free_irq(sp->pdev->irq, dev);
6568 if (sp->intr_type == MSI) {
6571 pci_disable_msi(sp->pdev);
6572 pci_read_config_word(sp->pdev, 0x4c, &val);
6574 pci_write_config_word(sp->pdev, 0x4c, val);
6577 /* Waiting till all Interrupt handlers are complete */
6581 if (!atomic_read(&sp->isr_cnt))
6587 static void do_s2io_card_down(struct s2io_nic * sp, int do_io)
6590 struct XENA_dev_config __iomem *bar0 = sp->bar0;
6591 unsigned long flags;
6592 register u64 val64 = 0;
6594 del_timer_sync(&sp->alarm_timer);
6595 /* If s2io_set_link task is executing, wait till it completes. */
6596 while (test_and_set_bit(0, &(sp->link_state))) {
6599 atomic_set(&sp->card_state, CARD_DOWN);
6601 /* disable Tx and Rx traffic on the NIC */
6608 tasklet_kill(&sp->task);
6610 /* Check if the device is Quiescent and then Reset the NIC */
6612 /* As per the HW requirement we need to replenish the
6613 * receive buffer to avoid the ring bump. Since there is
6614 * no intention of processing the Rx frame at this pointwe are
6615 * just settting the ownership bit of rxd in Each Rx
6616 * ring to HW and set the appropriate buffer size
6617 * based on the ring mode
6619 rxd_owner_bit_reset(sp);
6621 val64 = readq(&bar0->adapter_status);
6622 if (verify_xena_quiescence(sp)) {
6623 if(verify_pcc_quiescent(sp, sp->device_enabled_once))
6631 "s2io_close:Device not Quiescent ");
6632 DBG_PRINT(ERR_DBG, "adaper status reads 0x%llx\n",
6633 (unsigned long long) val64);
6640 spin_lock_irqsave(&sp->tx_lock, flags);
6641 /* Free all Tx buffers */
6642 free_tx_buffers(sp);
6643 spin_unlock_irqrestore(&sp->tx_lock, flags);
6645 /* Free all Rx buffers */
6646 spin_lock_irqsave(&sp->rx_lock, flags);
6647 free_rx_buffers(sp);
6648 spin_unlock_irqrestore(&sp->rx_lock, flags);
6650 clear_bit(0, &(sp->link_state));
6653 static void s2io_card_down(struct s2io_nic * sp)
6655 do_s2io_card_down(sp, 1);
6658 static int s2io_card_up(struct s2io_nic * sp)
6661 struct mac_info *mac_control;
6662 struct config_param *config;
6663 struct net_device *dev = (struct net_device *) sp->dev;
6666 /* Initialize the H/W I/O registers */
6667 if (init_nic(sp) != 0) {
6668 DBG_PRINT(ERR_DBG, "%s: H/W initialization failed\n",
6675 * Initializing the Rx buffers. For now we are considering only 1
6676 * Rx ring and initializing buffers into 30 Rx blocks
6678 mac_control = &sp->mac_control;
6679 config = &sp->config;
6681 for (i = 0; i < config->rx_ring_num; i++) {
6682 if ((ret = fill_rx_buffers(sp, i))) {
6683 DBG_PRINT(ERR_DBG, "%s: Out of memory in Open\n",
6686 free_rx_buffers(sp);
6689 DBG_PRINT(INFO_DBG, "Buf in ring:%d is %d:\n", i,
6690 atomic_read(&sp->rx_bufs_left[i]));
6692 /* Maintain the state prior to the open */
6693 if (sp->promisc_flg)
6694 sp->promisc_flg = 0;
6695 if (sp->m_cast_flg) {
6697 sp->all_multi_pos= 0;
6700 /* Setting its receive mode */
6701 s2io_set_multicast(dev);
6704 /* Initialize max aggregatable pkts per session based on MTU */
6705 sp->lro_max_aggr_per_sess = ((1<<16) - 1) / dev->mtu;
6706 /* Check if we can use(if specified) user provided value */
6707 if (lro_max_pkts < sp->lro_max_aggr_per_sess)
6708 sp->lro_max_aggr_per_sess = lro_max_pkts;
6711 /* Enable Rx Traffic and interrupts on the NIC */
6712 if (start_nic(sp)) {
6713 DBG_PRINT(ERR_DBG, "%s: Starting NIC failed\n", dev->name);
6715 free_rx_buffers(sp);
6719 /* Add interrupt service routine */
6720 if (s2io_add_isr(sp) != 0) {
6721 if (sp->intr_type == MSI_X)
6724 free_rx_buffers(sp);
6728 S2IO_TIMER_CONF(sp->alarm_timer, s2io_alarm_handle, sp, (HZ/2));
6730 /* Enable tasklet for the device */
6731 tasklet_init(&sp->task, s2io_tasklet, (unsigned long) dev);
6733 /* Enable select interrupts */
6734 if (sp->intr_type != INTA)
6735 en_dis_able_nic_intrs(sp, ENA_ALL_INTRS, DISABLE_INTRS);
6737 interruptible = TX_TRAFFIC_INTR | RX_TRAFFIC_INTR;
6738 interruptible |= TX_PIC_INTR | RX_PIC_INTR;
6739 interruptible |= TX_MAC_INTR | RX_MAC_INTR;
6740 en_dis_able_nic_intrs(sp, interruptible, ENABLE_INTRS);
6744 atomic_set(&sp->card_state, CARD_UP);
6749 * s2io_restart_nic - Resets the NIC.
6750 * @data : long pointer to the device private structure
6752 * This function is scheduled to be run by the s2io_tx_watchdog
6753 * function after 0.5 secs to reset the NIC. The idea is to reduce
6754 * the run time of the watch dog routine which is run holding a
6758 static void s2io_restart_nic(struct work_struct *work)
6760 struct s2io_nic *sp = container_of(work, struct s2io_nic, rst_timer_task);
6761 struct net_device *dev = sp->dev;
6765 if (!netif_running(dev))
6769 if (s2io_card_up(sp)) {
6770 DBG_PRINT(ERR_DBG, "%s: Device bring up failed\n",
6773 netif_wake_queue(dev);
6774 DBG_PRINT(ERR_DBG, "%s: was reset by Tx watchdog timer\n",
6781 * s2io_tx_watchdog - Watchdog for transmit side.
6782 * @dev : Pointer to net device structure
6784 * This function is triggered if the Tx Queue is stopped
6785 * for a pre-defined amount of time when the Interface is still up.
6786 * If the Interface is jammed in such a situation, the hardware is
6787 * reset (by s2io_close) and restarted again (by s2io_open) to
6788 * overcome any problem that might have been caused in the hardware.
6793 static void s2io_tx_watchdog(struct net_device *dev)
6795 struct s2io_nic *sp = dev->priv;
6797 if (netif_carrier_ok(dev)) {
6798 sp->mac_control.stats_info->sw_stat.watchdog_timer_cnt++;
6799 schedule_work(&sp->rst_timer_task);
6800 sp->mac_control.stats_info->sw_stat.soft_reset_cnt++;
6805 * rx_osm_handler - To perform some OS related operations on SKB.
6806 * @sp: private member of the device structure,pointer to s2io_nic structure.
6807 * @skb : the socket buffer pointer.
6808 * @len : length of the packet
6809 * @cksum : FCS checksum of the frame.
6810 * @ring_no : the ring from which this RxD was extracted.
6812 * This function is called by the Rx interrupt serivce routine to perform
6813 * some OS related operations on the SKB before passing it to the upper
6814 * layers. It mainly checks if the checksum is OK, if so adds it to the
6815 * SKBs cksum variable, increments the Rx packet count and passes the SKB
6816 * to the upper layer. If the checksum is wrong, it increments the Rx
6817 * packet error count, frees the SKB and returns error.
6819 * SUCCESS on success and -1 on failure.
6821 static int rx_osm_handler(struct ring_info *ring_data, struct RxD_t * rxdp)
6823 struct s2io_nic *sp = ring_data->nic;
6824 struct net_device *dev = (struct net_device *) sp->dev;
6825 struct sk_buff *skb = (struct sk_buff *)
6826 ((unsigned long) rxdp->Host_Control);
6827 int ring_no = ring_data->ring_no;
6828 u16 l3_csum, l4_csum;
6829 unsigned long long err = rxdp->Control_1 & RXD_T_CODE;
6836 /* Check for parity error */
6838 sp->mac_control.stats_info->sw_stat.parity_err_cnt++;
6840 err_mask = err >> 48;
6843 sp->mac_control.stats_info->sw_stat.
6844 rx_parity_err_cnt++;
6848 sp->mac_control.stats_info->sw_stat.
6853 sp->mac_control.stats_info->sw_stat.
6854 rx_parity_abort_cnt++;
6858 sp->mac_control.stats_info->sw_stat.
6863 sp->mac_control.stats_info->sw_stat.
6868 sp->mac_control.stats_info->sw_stat.
6873 sp->mac_control.stats_info->sw_stat.
6874 rx_buf_size_err_cnt++;
6878 sp->mac_control.stats_info->sw_stat.
6879 rx_rxd_corrupt_cnt++;
6883 sp->mac_control.stats_info->sw_stat.
6888 * Drop the packet if bad transfer code. Exception being
6889 * 0x5, which could be due to unsupported IPv6 extension header.
6890 * In this case, we let stack handle the packet.
6891 * Note that in this case, since checksum will be incorrect,
6892 * stack will validate the same.
6894 if (err_mask != 0x5) {
6895 DBG_PRINT(ERR_DBG, "%s: Rx error Value: 0x%x\n",
6896 dev->name, err_mask);
6897 sp->stats.rx_crc_errors++;
6898 sp->mac_control.stats_info->sw_stat.mem_freed
6901 atomic_dec(&sp->rx_bufs_left[ring_no]);
6902 rxdp->Host_Control = 0;
6907 /* Updating statistics */
6908 rxdp->Host_Control = 0;
6909 if (sp->rxd_mode == RXD_MODE_1) {
6910 int len = RXD_GET_BUFFER0_SIZE_1(rxdp->Control_2);
6912 sp->stats.rx_bytes += len;
6915 } else if (sp->rxd_mode >= RXD_MODE_3A) {
6916 int get_block = ring_data->rx_curr_get_info.block_index;
6917 int get_off = ring_data->rx_curr_get_info.offset;
6918 int buf0_len = RXD_GET_BUFFER0_SIZE_3(rxdp->Control_2);
6919 int buf2_len = RXD_GET_BUFFER2_SIZE_3(rxdp->Control_2);
6920 unsigned char *buff = skb_push(skb, buf0_len);
6922 struct buffAdd *ba = &ring_data->ba[get_block][get_off];
6923 sp->stats.rx_bytes += buf0_len + buf2_len;
6924 memcpy(buff, ba->ba_0, buf0_len);
6926 if (sp->rxd_mode == RXD_MODE_3A) {
6927 int buf1_len = RXD_GET_BUFFER1_SIZE_3(rxdp->Control_2);
6929 skb_put(skb, buf1_len);
6930 skb->len += buf2_len;
6931 skb->data_len += buf2_len;
6932 skb_put(skb_shinfo(skb)->frag_list, buf2_len);
6933 sp->stats.rx_bytes += buf1_len;
6936 skb_put(skb, buf2_len);
6939 if ((rxdp->Control_1 & TCP_OR_UDP_FRAME) && ((!sp->lro) ||
6940 (sp->lro && (!(rxdp->Control_1 & RXD_FRAME_IP_FRAG)))) &&
6942 l3_csum = RXD_GET_L3_CKSUM(rxdp->Control_1);
6943 l4_csum = RXD_GET_L4_CKSUM(rxdp->Control_1);
6944 if ((l3_csum == L3_CKSUM_OK) && (l4_csum == L4_CKSUM_OK)) {
6946 * NIC verifies if the Checksum of the received
6947 * frame is Ok or not and accordingly returns
6948 * a flag in the RxD.
6950 skb->ip_summed = CHECKSUM_UNNECESSARY;
6956 ret = s2io_club_tcp_session(skb->data, &tcp,
6957 &tcp_len, &lro, rxdp, sp);
6959 case 3: /* Begin anew */
6962 case 1: /* Aggregate */
6964 lro_append_pkt(sp, lro,
6968 case 4: /* Flush session */
6970 lro_append_pkt(sp, lro,
6972 queue_rx_frame(lro->parent);
6973 clear_lro_session(lro);
6974 sp->mac_control.stats_info->
6975 sw_stat.flush_max_pkts++;
6978 case 2: /* Flush both */
6979 lro->parent->data_len =
6981 sp->mac_control.stats_info->
6982 sw_stat.sending_both++;
6983 queue_rx_frame(lro->parent);
6984 clear_lro_session(lro);
6986 case 0: /* sessions exceeded */
6987 case -1: /* non-TCP or not
6991 * First pkt in session not
6992 * L3/L4 aggregatable
6997 "%s: Samadhana!!\n",
7004 * Packet with erroneous checksum, let the
7005 * upper layers deal with it.
7007 skb->ip_summed = CHECKSUM_NONE;
7010 skb->ip_summed = CHECKSUM_NONE;
7012 sp->mac_control.stats_info->sw_stat.mem_freed += skb->truesize;
7014 skb->protocol = eth_type_trans(skb, dev);
7015 if ((sp->vlgrp && RXD_GET_VLAN_TAG(rxdp->Control_2) &&
7017 /* Queueing the vlan frame to the upper layer */
7019 vlan_hwaccel_receive_skb(skb, sp->vlgrp,
7020 RXD_GET_VLAN_TAG(rxdp->Control_2));
7022 vlan_hwaccel_rx(skb, sp->vlgrp,
7023 RXD_GET_VLAN_TAG(rxdp->Control_2));
7026 netif_receive_skb(skb);
7032 queue_rx_frame(skb);
7034 dev->last_rx = jiffies;
7036 atomic_dec(&sp->rx_bufs_left[ring_no]);
7041 * s2io_link - stops/starts the Tx queue.
7042 * @sp : private member of the device structure, which is a pointer to the
7043 * s2io_nic structure.
7044 * @link : inidicates whether link is UP/DOWN.
7046 * This function stops/starts the Tx queue depending on whether the link
7047 * status of the NIC is is down or up. This is called by the Alarm
7048 * interrupt handler whenever a link change interrupt comes up.
7053 static void s2io_link(struct s2io_nic * sp, int link)
7055 struct net_device *dev = (struct net_device *) sp->dev;
7057 if (link != sp->last_link_state) {
7058 if (link == LINK_DOWN) {
7059 DBG_PRINT(ERR_DBG, "%s: Link down\n", dev->name);
7060 netif_carrier_off(dev);
7061 if(sp->mac_control.stats_info->sw_stat.link_up_cnt)
7062 sp->mac_control.stats_info->sw_stat.link_up_time =
7063 jiffies - sp->start_time;
7064 sp->mac_control.stats_info->sw_stat.link_down_cnt++;
7066 DBG_PRINT(ERR_DBG, "%s: Link Up\n", dev->name);
7067 if (sp->mac_control.stats_info->sw_stat.link_down_cnt)
7068 sp->mac_control.stats_info->sw_stat.link_down_time =
7069 jiffies - sp->start_time;
7070 sp->mac_control.stats_info->sw_stat.link_up_cnt++;
7071 netif_carrier_on(dev);
7074 sp->last_link_state = link;
7075 sp->start_time = jiffies;
7079 * s2io_init_pci -Initialization of PCI and PCI-X configuration registers .
7080 * @sp : private member of the device structure, which is a pointer to the
7081 * s2io_nic structure.
7083 * This function initializes a few of the PCI and PCI-X configuration registers
7084 * with recommended values.
7089 static void s2io_init_pci(struct s2io_nic * sp)
7091 u16 pci_cmd = 0, pcix_cmd = 0;
7093 /* Enable Data Parity Error Recovery in PCI-X command register. */
7094 pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
7096 pci_write_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
7098 pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
7101 /* Set the PErr Response bit in PCI command register. */
7102 pci_read_config_word(sp->pdev, PCI_COMMAND, &pci_cmd);
7103 pci_write_config_word(sp->pdev, PCI_COMMAND,
7104 (pci_cmd | PCI_COMMAND_PARITY));
7105 pci_read_config_word(sp->pdev, PCI_COMMAND, &pci_cmd);
7108 static int s2io_verify_parm(struct pci_dev *pdev, u8 *dev_intr_type)
7110 if ( tx_fifo_num > 8) {
7111 DBG_PRINT(ERR_DBG, "s2io: Requested number of Tx fifos not "
7113 DBG_PRINT(ERR_DBG, "s2io: Default to 8 Tx fifos\n");
7116 if ( rx_ring_num > 8) {
7117 DBG_PRINT(ERR_DBG, "s2io: Requested number of Rx rings not "
7119 DBG_PRINT(ERR_DBG, "s2io: Default to 8 Rx rings\n");
7122 if (*dev_intr_type != INTA)
7125 #ifndef CONFIG_PCI_MSI
7126 if (*dev_intr_type != INTA) {
7127 DBG_PRINT(ERR_DBG, "s2io: This kernel does not support"
7128 "MSI/MSI-X. Defaulting to INTA\n");
7129 *dev_intr_type = INTA;
7132 if (*dev_intr_type > MSI_X) {
7133 DBG_PRINT(ERR_DBG, "s2io: Wrong intr_type requested. "
7134 "Defaulting to INTA\n");
7135 *dev_intr_type = INTA;
7138 if ((*dev_intr_type == MSI_X) &&
7139 ((pdev->device != PCI_DEVICE_ID_HERC_WIN) &&
7140 (pdev->device != PCI_DEVICE_ID_HERC_UNI))) {
7141 DBG_PRINT(ERR_DBG, "s2io: Xframe I does not support MSI_X. "
7142 "Defaulting to INTA\n");
7143 *dev_intr_type = INTA;
7146 if (rx_ring_mode > 3) {
7147 DBG_PRINT(ERR_DBG, "s2io: Requested ring mode not supported\n");
7148 DBG_PRINT(ERR_DBG, "s2io: Defaulting to 3-buffer mode\n");
7155 * rts_ds_steer - Receive traffic steering based on IPv4 or IPv6 TOS
7156 * or Traffic class respectively.
7157 * @nic: device peivate variable
7158 * Description: The function configures the receive steering to
7159 * desired receive ring.
7160 * Return Value: SUCCESS on success and
7161 * '-1' on failure (endian settings incorrect).
7163 static int rts_ds_steer(struct s2io_nic *nic, u8 ds_codepoint, u8 ring)
7165 struct XENA_dev_config __iomem *bar0 = nic->bar0;
7166 register u64 val64 = 0;
7168 if (ds_codepoint > 63)
7171 val64 = RTS_DS_MEM_DATA(ring);
7172 writeq(val64, &bar0->rts_ds_mem_data);
7174 val64 = RTS_DS_MEM_CTRL_WE |
7175 RTS_DS_MEM_CTRL_STROBE_NEW_CMD |
7176 RTS_DS_MEM_CTRL_OFFSET(ds_codepoint);
7178 writeq(val64, &bar0->rts_ds_mem_ctrl);
7180 return wait_for_cmd_complete(&bar0->rts_ds_mem_ctrl,
7181 RTS_DS_MEM_CTRL_STROBE_CMD_BEING_EXECUTED,
7186 * s2io_init_nic - Initialization of the adapter .
7187 * @pdev : structure containing the PCI related information of the device.
7188 * @pre: List of PCI devices supported by the driver listed in s2io_tbl.
7190 * The function initializes an adapter identified by the pci_dec structure.
7191 * All OS related initialization including memory and device structure and
7192 * initlaization of the device private variable is done. Also the swapper
7193 * control register is initialized to enable read and write into the I/O
7194 * registers of the device.
7196 * returns 0 on success and negative on failure.
7199 static int __devinit
7200 s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
7202 struct s2io_nic *sp;
7203 struct net_device *dev;
7205 int dma_flag = FALSE;
7206 u32 mac_up, mac_down;
7207 u64 val64 = 0, tmp64 = 0;
7208 struct XENA_dev_config __iomem *bar0 = NULL;
7210 struct mac_info *mac_control;
7211 struct config_param *config;
7213 u8 dev_intr_type = intr_type;
7215 if ((ret = s2io_verify_parm(pdev, &dev_intr_type)))
7218 if ((ret = pci_enable_device(pdev))) {
7220 "s2io_init_nic: pci_enable_device failed\n");
7224 if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
7225 DBG_PRINT(INIT_DBG, "s2io_init_nic: Using 64bit DMA\n");
7227 if (pci_set_consistent_dma_mask
7228 (pdev, DMA_64BIT_MASK)) {
7230 "Unable to obtain 64bit DMA for \
7231 consistent allocations\n");
7232 pci_disable_device(pdev);
7235 } else if (!pci_set_dma_mask(pdev, DMA_32BIT_MASK)) {
7236 DBG_PRINT(INIT_DBG, "s2io_init_nic: Using 32bit DMA\n");
7238 pci_disable_device(pdev);
7241 if (dev_intr_type != MSI_X) {
7242 if (pci_request_regions(pdev, s2io_driver_name)) {
7243 DBG_PRINT(ERR_DBG, "Request Regions failed\n");
7244 pci_disable_device(pdev);
7249 if (!(request_mem_region(pci_resource_start(pdev, 0),
7250 pci_resource_len(pdev, 0), s2io_driver_name))) {
7251 DBG_PRINT(ERR_DBG, "bar0 Request Regions failed\n");
7252 pci_disable_device(pdev);
7255 if (!(request_mem_region(pci_resource_start(pdev, 2),
7256 pci_resource_len(pdev, 2), s2io_driver_name))) {
7257 DBG_PRINT(ERR_DBG, "bar1 Request Regions failed\n");
7258 release_mem_region(pci_resource_start(pdev, 0),
7259 pci_resource_len(pdev, 0));
7260 pci_disable_device(pdev);
7265 dev = alloc_etherdev(sizeof(struct s2io_nic));
7267 DBG_PRINT(ERR_DBG, "Device allocation failed\n");
7268 pci_disable_device(pdev);
7269 pci_release_regions(pdev);
7273 pci_set_master(pdev);
7274 pci_set_drvdata(pdev, dev);
7275 SET_MODULE_OWNER(dev);
7276 SET_NETDEV_DEV(dev, &pdev->dev);
7278 /* Private member variable initialized to s2io NIC structure */
7280 memset(sp, 0, sizeof(struct s2io_nic));
7283 sp->high_dma_flag = dma_flag;
7284 sp->device_enabled_once = FALSE;
7285 if (rx_ring_mode == 1)
7286 sp->rxd_mode = RXD_MODE_1;
7287 if (rx_ring_mode == 2)
7288 sp->rxd_mode = RXD_MODE_3B;
7289 if (rx_ring_mode == 3)
7290 sp->rxd_mode = RXD_MODE_3A;
7292 sp->intr_type = dev_intr_type;
7294 if ((pdev->device == PCI_DEVICE_ID_HERC_WIN) ||
7295 (pdev->device == PCI_DEVICE_ID_HERC_UNI))
7296 sp->device_type = XFRAME_II_DEVICE;
7298 sp->device_type = XFRAME_I_DEVICE;
7302 /* Initialize some PCI/PCI-X fields of the NIC. */
7306 * Setting the device configuration parameters.
7307 * Most of these parameters can be specified by the user during
7308 * module insertion as they are module loadable parameters. If
7309 * these parameters are not not specified during load time, they
7310 * are initialized with default values.
7312 mac_control = &sp->mac_control;
7313 config = &sp->config;
7315 /* Tx side parameters. */
7316 config->tx_fifo_num = tx_fifo_num;
7317 for (i = 0; i < MAX_TX_FIFOS; i++) {
7318 config->tx_cfg[i].fifo_len = tx_fifo_len[i];
7319 config->tx_cfg[i].fifo_priority = i;
7322 /* mapping the QoS priority to the configured fifos */
7323 for (i = 0; i < MAX_TX_FIFOS; i++)
7324 config->fifo_mapping[i] = fifo_map[config->tx_fifo_num][i];
7326 config->tx_intr_type = TXD_INT_TYPE_UTILZ;
7327 for (i = 0; i < config->tx_fifo_num; i++) {
7328 config->tx_cfg[i].f_no_snoop =
7329 (NO_SNOOP_TXD | NO_SNOOP_TXD_BUFFER);
7330 if (config->tx_cfg[i].fifo_len < 65) {
7331 config->tx_intr_type = TXD_INT_TYPE_PER_LIST;
7335 /* + 2 because one Txd for skb->data and one Txd for UFO */
7336 config->max_txds = MAX_SKB_FRAGS + 2;
7338 /* Rx side parameters. */
7339 config->rx_ring_num = rx_ring_num;
7340 for (i = 0; i < MAX_RX_RINGS; i++) {
7341 config->rx_cfg[i].num_rxd = rx_ring_sz[i] *
7342 (rxd_count[sp->rxd_mode] + 1);
7343 config->rx_cfg[i].ring_priority = i;
7346 for (i = 0; i < rx_ring_num; i++) {
7347 config->rx_cfg[i].ring_org = RING_ORG_BUFF1;
7348 config->rx_cfg[i].f_no_snoop =
7349 (NO_SNOOP_RXD | NO_SNOOP_RXD_BUFFER);
7352 /* Setting Mac Control parameters */
7353 mac_control->rmac_pause_time = rmac_pause_time;
7354 mac_control->mc_pause_threshold_q0q3 = mc_pause_threshold_q0q3;
7355 mac_control->mc_pause_threshold_q4q7 = mc_pause_threshold_q4q7;
7358 /* Initialize Ring buffer parameters. */
7359 for (i = 0; i < config->rx_ring_num; i++)
7360 atomic_set(&sp->rx_bufs_left[i], 0);
7362 /* Initialize the number of ISRs currently running */
7363 atomic_set(&sp->isr_cnt, 0);
7365 /* initialize the shared memory used by the NIC and the host */
7366 if (init_shared_mem(sp)) {
7367 DBG_PRINT(ERR_DBG, "%s: Memory allocation failed\n",
7370 goto mem_alloc_failed;
7373 sp->bar0 = ioremap(pci_resource_start(pdev, 0),
7374 pci_resource_len(pdev, 0));
7376 DBG_PRINT(ERR_DBG, "%s: Neterion: cannot remap io mem1\n",
7379 goto bar0_remap_failed;
7382 sp->bar1 = ioremap(pci_resource_start(pdev, 2),
7383 pci_resource_len(pdev, 2));
7385 DBG_PRINT(ERR_DBG, "%s: Neterion: cannot remap io mem2\n",
7388 goto bar1_remap_failed;
7391 dev->irq = pdev->irq;
7392 dev->base_addr = (unsigned long) sp->bar0;
7394 /* Initializing the BAR1 address as the start of the FIFO pointer. */
7395 for (j = 0; j < MAX_TX_FIFOS; j++) {
7396 mac_control->tx_FIFO_start[j] = (struct TxFIFO_element __iomem *)
7397 (sp->bar1 + (j * 0x00020000));
7400 /* Driver entry points */
7401 dev->open = &s2io_open;
7402 dev->stop = &s2io_close;
7403 dev->hard_start_xmit = &s2io_xmit;
7404 dev->get_stats = &s2io_get_stats;
7405 dev->set_multicast_list = &s2io_set_multicast;
7406 dev->do_ioctl = &s2io_ioctl;
7407 dev->change_mtu = &s2io_change_mtu;
7408 SET_ETHTOOL_OPS(dev, &netdev_ethtool_ops);
7409 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
7410 dev->vlan_rx_register = s2io_vlan_rx_register;
7413 * will use eth_mac_addr() for dev->set_mac_address
7414 * mac address will be set every time dev->open() is called
7416 dev->poll = s2io_poll;
7419 #ifdef CONFIG_NET_POLL_CONTROLLER
7420 dev->poll_controller = s2io_netpoll;
7423 dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM;
7424 if (sp->high_dma_flag == TRUE)
7425 dev->features |= NETIF_F_HIGHDMA;
7426 dev->features |= NETIF_F_TSO;
7427 dev->features |= NETIF_F_TSO6;
7428 if ((sp->device_type & XFRAME_II_DEVICE) && (ufo)) {
7429 dev->features |= NETIF_F_UFO;
7430 dev->features |= NETIF_F_HW_CSUM;
7433 dev->tx_timeout = &s2io_tx_watchdog;
7434 dev->watchdog_timeo = WATCH_DOG_TIMEOUT;
7435 INIT_WORK(&sp->rst_timer_task, s2io_restart_nic);
7436 INIT_WORK(&sp->set_link_task, s2io_set_link);
7438 pci_save_state(sp->pdev);
7440 /* Setting swapper control on the NIC, for proper reset operation */
7441 if (s2io_set_swapper(sp)) {
7442 DBG_PRINT(ERR_DBG, "%s:swapper settings are wrong\n",
7445 goto set_swap_failed;
7448 /* Verify if the Herc works on the slot its placed into */
7449 if (sp->device_type & XFRAME_II_DEVICE) {
7450 mode = s2io_verify_pci_mode(sp);
7452 DBG_PRINT(ERR_DBG, "%s: ", __FUNCTION__);
7453 DBG_PRINT(ERR_DBG, " Unsupported PCI bus mode\n");
7455 goto set_swap_failed;
7459 /* Not needed for Herc */
7460 if (sp->device_type & XFRAME_I_DEVICE) {
7462 * Fix for all "FFs" MAC address problems observed on
7465 fix_mac_address(sp);
7470 * MAC address initialization.
7471 * For now only one mac address will be read and used.
7474 val64 = RMAC_ADDR_CMD_MEM_RD | RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
7475 RMAC_ADDR_CMD_MEM_OFFSET(0 + MAC_MAC_ADDR_START_OFFSET);
7476 writeq(val64, &bar0->rmac_addr_cmd_mem);
7477 wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
7478 RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING, S2IO_BIT_RESET);
7479 tmp64 = readq(&bar0->rmac_addr_data0_mem);
7480 mac_down = (u32) tmp64;
7481 mac_up = (u32) (tmp64 >> 32);
7483 sp->def_mac_addr[0].mac_addr[3] = (u8) (mac_up);
7484 sp->def_mac_addr[0].mac_addr[2] = (u8) (mac_up >> 8);
7485 sp->def_mac_addr[0].mac_addr[1] = (u8) (mac_up >> 16);
7486 sp->def_mac_addr[0].mac_addr[0] = (u8) (mac_up >> 24);
7487 sp->def_mac_addr[0].mac_addr[5] = (u8) (mac_down >> 16);
7488 sp->def_mac_addr[0].mac_addr[4] = (u8) (mac_down >> 24);
7490 /* Set the factory defined MAC address initially */
7491 dev->addr_len = ETH_ALEN;
7492 memcpy(dev->dev_addr, sp->def_mac_addr, ETH_ALEN);
7494 /* reset Nic and bring it to known state */
7498 * Initialize the tasklet status and link state flags
7499 * and the card state parameter
7501 atomic_set(&(sp->card_state), 0);
7502 sp->tasklet_status = 0;
7505 /* Initialize spinlocks */
7506 spin_lock_init(&sp->tx_lock);
7509 spin_lock_init(&sp->put_lock);
7510 spin_lock_init(&sp->rx_lock);
7513 * SXE-002: Configure link and activity LED to init state
7516 subid = sp->pdev->subsystem_device;
7517 if ((subid & 0xFF) >= 0x07) {
7518 val64 = readq(&bar0->gpio_control);
7519 val64 |= 0x0000800000000000ULL;
7520 writeq(val64, &bar0->gpio_control);
7521 val64 = 0x0411040400000000ULL;
7522 writeq(val64, (void __iomem *) bar0 + 0x2700);
7523 val64 = readq(&bar0->gpio_control);
7526 sp->rx_csum = 1; /* Rx chksum verify enabled by default */
7528 if (register_netdev(dev)) {
7529 DBG_PRINT(ERR_DBG, "Device registration failed\n");
7531 goto register_failed;
7534 DBG_PRINT(ERR_DBG, "Copyright(c) 2002-2007 Neterion Inc.\n");
7535 DBG_PRINT(ERR_DBG, "%s: Neterion %s (rev %d)\n",dev->name,
7536 sp->product_name, pdev->revision);
7537 DBG_PRINT(ERR_DBG, "%s: Driver version %s\n", dev->name,
7538 s2io_driver_version);
7539 DBG_PRINT(ERR_DBG, "%s: MAC ADDR: "
7540 "%02x:%02x:%02x:%02x:%02x:%02x", dev->name,
7541 sp->def_mac_addr[0].mac_addr[0],
7542 sp->def_mac_addr[0].mac_addr[1],
7543 sp->def_mac_addr[0].mac_addr[2],
7544 sp->def_mac_addr[0].mac_addr[3],
7545 sp->def_mac_addr[0].mac_addr[4],
7546 sp->def_mac_addr[0].mac_addr[5]);
7547 DBG_PRINT(ERR_DBG, "SERIAL NUMBER: %s\n", sp->serial_num);
7548 if (sp->device_type & XFRAME_II_DEVICE) {
7549 mode = s2io_print_pci_mode(sp);
7551 DBG_PRINT(ERR_DBG, " Unsupported PCI bus mode\n");
7553 unregister_netdev(dev);
7554 goto set_swap_failed;
7557 switch(sp->rxd_mode) {
7559 DBG_PRINT(ERR_DBG, "%s: 1-Buffer receive mode enabled\n",
7563 DBG_PRINT(ERR_DBG, "%s: 2-Buffer receive mode enabled\n",
7567 DBG_PRINT(ERR_DBG, "%s: 3-Buffer receive mode enabled\n",
7573 DBG_PRINT(ERR_DBG, "%s: NAPI enabled\n", dev->name);
7574 switch(sp->intr_type) {
7576 DBG_PRINT(ERR_DBG, "%s: Interrupt type INTA\n", dev->name);
7579 DBG_PRINT(ERR_DBG, "%s: Interrupt type MSI\n", dev->name);
7582 DBG_PRINT(ERR_DBG, "%s: Interrupt type MSI-X\n", dev->name);
7586 DBG_PRINT(ERR_DBG, "%s: Large receive offload enabled\n",
7589 DBG_PRINT(ERR_DBG, "%s: UDP Fragmentation Offload(UFO)"
7590 " enabled\n", dev->name);
7591 /* Initialize device name */
7592 sprintf(sp->name, "%s Neterion %s", dev->name, sp->product_name);
7594 /* Initialize bimodal Interrupts */
7595 sp->config.bimodal = bimodal;
7596 if (!(sp->device_type & XFRAME_II_DEVICE) && bimodal) {
7597 sp->config.bimodal = 0;
7598 DBG_PRINT(ERR_DBG,"%s:Bimodal intr not supported by Xframe I\n",
7603 * Make Link state as off at this point, when the Link change
7604 * interrupt comes the state will be automatically changed to
7607 netif_carrier_off(dev);
7618 free_shared_mem(sp);
7619 pci_disable_device(pdev);
7620 if (dev_intr_type != MSI_X)
7621 pci_release_regions(pdev);
7623 release_mem_region(pci_resource_start(pdev, 0),
7624 pci_resource_len(pdev, 0));
7625 release_mem_region(pci_resource_start(pdev, 2),
7626 pci_resource_len(pdev, 2));
7628 pci_set_drvdata(pdev, NULL);
7635 * s2io_rem_nic - Free the PCI device
7636 * @pdev: structure containing the PCI related information of the device.
7637 * Description: This function is called by the Pci subsystem to release a
7638 * PCI device and free up all resource held up by the device. This could
7639 * be in response to a Hot plug event or when the driver is to be removed
7643 static void __devexit s2io_rem_nic(struct pci_dev *pdev)
7645 struct net_device *dev =
7646 (struct net_device *) pci_get_drvdata(pdev);
7647 struct s2io_nic *sp;
7650 DBG_PRINT(ERR_DBG, "Driver Data is NULL!!\n");
7654 flush_scheduled_work();
7657 unregister_netdev(dev);
7659 free_shared_mem(sp);
7662 if (sp->intr_type != MSI_X)
7663 pci_release_regions(pdev);
7665 release_mem_region(pci_resource_start(pdev, 0),
7666 pci_resource_len(pdev, 0));
7667 release_mem_region(pci_resource_start(pdev, 2),
7668 pci_resource_len(pdev, 2));
7670 pci_set_drvdata(pdev, NULL);
7672 pci_disable_device(pdev);
7676 * s2io_starter - Entry point for the driver
7677 * Description: This function is the entry point for the driver. It verifies
7678 * the module loadable parameters and initializes PCI configuration space.
7681 int __init s2io_starter(void)
7683 return pci_register_driver(&s2io_driver);
7687 * s2io_closer - Cleanup routine for the driver
7688 * Description: This function is the cleanup routine for the driver. It unregist * ers the driver.
7691 static __exit void s2io_closer(void)
7693 pci_unregister_driver(&s2io_driver);
7694 DBG_PRINT(INIT_DBG, "cleanup done\n");
7697 module_init(s2io_starter);
7698 module_exit(s2io_closer);
7700 static int check_L2_lro_capable(u8 *buffer, struct iphdr **ip,
7701 struct tcphdr **tcp, struct RxD_t *rxdp)
7704 u8 l2_type = (u8)((rxdp->Control_1 >> 37) & 0x7), ip_len;
7706 if (!(rxdp->Control_1 & RXD_FRAME_PROTO_TCP)) {
7707 DBG_PRINT(INIT_DBG,"%s: Non-TCP frames not supported for LRO\n",
7713 * By default the VLAN field in the MAC is stripped by the card, if this
7714 * feature is turned off in rx_pa_cfg register, then the ip_off field
7715 * has to be shifted by a further 2 bytes
7718 case 0: /* DIX type */
7719 case 4: /* DIX type with VLAN */
7720 ip_off = HEADER_ETHERNET_II_802_3_SIZE;
7722 /* LLC, SNAP etc are considered non-mergeable */
7727 *ip = (struct iphdr *)((u8 *)buffer + ip_off);
7728 ip_len = (u8)((*ip)->ihl);
7730 *tcp = (struct tcphdr *)((unsigned long)*ip + ip_len);
7735 static int check_for_socket_match(struct lro *lro, struct iphdr *ip,
7738 DBG_PRINT(INFO_DBG,"%s: Been here...\n", __FUNCTION__);
7739 if ((lro->iph->saddr != ip->saddr) || (lro->iph->daddr != ip->daddr) ||
7740 (lro->tcph->source != tcp->source) || (lro->tcph->dest != tcp->dest))
7745 static inline int get_l4_pyld_length(struct iphdr *ip, struct tcphdr *tcp)
7747 return(ntohs(ip->tot_len) - (ip->ihl << 2) - (tcp->doff << 2));
7750 static void initiate_new_session(struct lro *lro, u8 *l2h,
7751 struct iphdr *ip, struct tcphdr *tcp, u32 tcp_pyld_len)
7753 DBG_PRINT(INFO_DBG,"%s: Been here...\n", __FUNCTION__);
7757 lro->tcp_next_seq = tcp_pyld_len + ntohl(tcp->seq);
7758 lro->tcp_ack = ntohl(tcp->ack_seq);
7760 lro->total_len = ntohs(ip->tot_len);
7763 * check if we saw TCP timestamp. Other consistency checks have
7764 * already been done.
7766 if (tcp->doff == 8) {
7768 ptr = (u32 *)(tcp+1);
7770 lro->cur_tsval = *(ptr+1);
7771 lro->cur_tsecr = *(ptr+2);
7776 static void update_L3L4_header(struct s2io_nic *sp, struct lro *lro)
7778 struct iphdr *ip = lro->iph;
7779 struct tcphdr *tcp = lro->tcph;
7781 struct stat_block *statinfo = sp->mac_control.stats_info;
7782 DBG_PRINT(INFO_DBG,"%s: Been here...\n", __FUNCTION__);
7784 /* Update L3 header */
7785 ip->tot_len = htons(lro->total_len);
7787 nchk = ip_fast_csum((u8 *)lro->iph, ip->ihl);
7790 /* Update L4 header */
7791 tcp->ack_seq = lro->tcp_ack;
7792 tcp->window = lro->window;
7794 /* Update tsecr field if this session has timestamps enabled */
7796 u32 *ptr = (u32 *)(tcp + 1);
7797 *(ptr+2) = lro->cur_tsecr;
7800 /* Update counters required for calculation of
7801 * average no. of packets aggregated.
7803 statinfo->sw_stat.sum_avg_pkts_aggregated += lro->sg_num;
7804 statinfo->sw_stat.num_aggregations++;
7807 static void aggregate_new_rx(struct lro *lro, struct iphdr *ip,
7808 struct tcphdr *tcp, u32 l4_pyld)
7810 DBG_PRINT(INFO_DBG,"%s: Been here...\n", __FUNCTION__);
7811 lro->total_len += l4_pyld;
7812 lro->frags_len += l4_pyld;
7813 lro->tcp_next_seq += l4_pyld;
7816 /* Update ack seq no. and window ad(from this pkt) in LRO object */
7817 lro->tcp_ack = tcp->ack_seq;
7818 lro->window = tcp->window;
7822 /* Update tsecr and tsval from this packet */
7823 ptr = (u32 *) (tcp + 1);
7824 lro->cur_tsval = *(ptr + 1);
7825 lro->cur_tsecr = *(ptr + 2);
7829 static int verify_l3_l4_lro_capable(struct lro *l_lro, struct iphdr *ip,
7830 struct tcphdr *tcp, u32 tcp_pyld_len)
7834 DBG_PRINT(INFO_DBG,"%s: Been here...\n", __FUNCTION__);
7836 if (!tcp_pyld_len) {
7837 /* Runt frame or a pure ack */
7841 if (ip->ihl != 5) /* IP has options */
7844 /* If we see CE codepoint in IP header, packet is not mergeable */
7845 if (INET_ECN_is_ce(ipv4_get_dsfield(ip)))
7848 /* If we see ECE or CWR flags in TCP header, packet is not mergeable */
7849 if (tcp->urg || tcp->psh || tcp->rst || tcp->syn || tcp->fin ||
7850 tcp->ece || tcp->cwr || !tcp->ack) {
7852 * Currently recognize only the ack control word and
7853 * any other control field being set would result in
7854 * flushing the LRO session
7860 * Allow only one TCP timestamp option. Don't aggregate if
7861 * any other options are detected.
7863 if (tcp->doff != 5 && tcp->doff != 8)
7866 if (tcp->doff == 8) {
7867 ptr = (u8 *)(tcp + 1);
7868 while (*ptr == TCPOPT_NOP)
7870 if (*ptr != TCPOPT_TIMESTAMP || *(ptr+1) != TCPOLEN_TIMESTAMP)
7873 /* Ensure timestamp value increases monotonically */
7875 if (l_lro->cur_tsval > *((u32 *)(ptr+2)))
7878 /* timestamp echo reply should be non-zero */
7879 if (*((u32 *)(ptr+6)) == 0)
7887 s2io_club_tcp_session(u8 *buffer, u8 **tcp, u32 *tcp_len, struct lro **lro,
7888 struct RxD_t *rxdp, struct s2io_nic *sp)
7891 struct tcphdr *tcph;
7894 if (!(ret = check_L2_lro_capable(buffer, &ip, (struct tcphdr **)tcp,
7896 DBG_PRINT(INFO_DBG,"IP Saddr: %x Daddr: %x\n",
7897 ip->saddr, ip->daddr);
7902 tcph = (struct tcphdr *)*tcp;
7903 *tcp_len = get_l4_pyld_length(ip, tcph);
7904 for (i=0; i<MAX_LRO_SESSIONS; i++) {
7905 struct lro *l_lro = &sp->lro0_n[i];
7906 if (l_lro->in_use) {
7907 if (check_for_socket_match(l_lro, ip, tcph))
7909 /* Sock pair matched */
7912 if ((*lro)->tcp_next_seq != ntohl(tcph->seq)) {
7913 DBG_PRINT(INFO_DBG, "%s:Out of order. expected "
7914 "0x%x, actual 0x%x\n", __FUNCTION__,
7915 (*lro)->tcp_next_seq,
7918 sp->mac_control.stats_info->
7919 sw_stat.outof_sequence_pkts++;
7924 if (!verify_l3_l4_lro_capable(l_lro, ip, tcph,*tcp_len))
7925 ret = 1; /* Aggregate */
7927 ret = 2; /* Flush both */
7933 /* Before searching for available LRO objects,
7934 * check if the pkt is L3/L4 aggregatable. If not
7935 * don't create new LRO session. Just send this
7938 if (verify_l3_l4_lro_capable(NULL, ip, tcph, *tcp_len)) {
7942 for (i=0; i<MAX_LRO_SESSIONS; i++) {
7943 struct lro *l_lro = &sp->lro0_n[i];
7944 if (!(l_lro->in_use)) {
7946 ret = 3; /* Begin anew */
7952 if (ret == 0) { /* sessions exceeded */
7953 DBG_PRINT(INFO_DBG,"%s:All LRO sessions already in use\n",
7961 initiate_new_session(*lro, buffer, ip, tcph, *tcp_len);
7964 update_L3L4_header(sp, *lro);
7967 aggregate_new_rx(*lro, ip, tcph, *tcp_len);
7968 if ((*lro)->sg_num == sp->lro_max_aggr_per_sess) {
7969 update_L3L4_header(sp, *lro);
7970 ret = 4; /* Flush the LRO */
7974 DBG_PRINT(ERR_DBG,"%s:Dont know, can't say!!\n",
7982 static void clear_lro_session(struct lro *lro)
7984 static u16 lro_struct_size = sizeof(struct lro);
7986 memset(lro, 0, lro_struct_size);
7989 static void queue_rx_frame(struct sk_buff *skb)
7991 struct net_device *dev = skb->dev;
7993 skb->protocol = eth_type_trans(skb, dev);
7995 netif_receive_skb(skb);
8000 static void lro_append_pkt(struct s2io_nic *sp, struct lro *lro,
8001 struct sk_buff *skb,
8004 struct sk_buff *first = lro->parent;
8006 first->len += tcp_len;
8007 first->data_len = lro->frags_len;
8008 skb_pull(skb, (skb->len - tcp_len));
8009 if (skb_shinfo(first)->frag_list)
8010 lro->last_frag->next = skb;
8012 skb_shinfo(first)->frag_list = skb;
8013 first->truesize += skb->truesize;
8014 lro->last_frag = skb;
8015 sp->mac_control.stats_info->sw_stat.clubbed_frms_cnt++;
8020 * s2io_io_error_detected - called when PCI error is detected
8021 * @pdev: Pointer to PCI device
8022 * @state: The current pci connection state
8024 * This function is called after a PCI bus error affecting
8025 * this device has been detected.
8027 static pci_ers_result_t s2io_io_error_detected(struct pci_dev *pdev,
8028 pci_channel_state_t state)
8030 struct net_device *netdev = pci_get_drvdata(pdev);
8031 struct s2io_nic *sp = netdev->priv;
8033 netif_device_detach(netdev);
8035 if (netif_running(netdev)) {
8036 /* Bring down the card, while avoiding PCI I/O */
8037 do_s2io_card_down(sp, 0);
8039 pci_disable_device(pdev);
8041 return PCI_ERS_RESULT_NEED_RESET;
8045 * s2io_io_slot_reset - called after the pci bus has been reset.
8046 * @pdev: Pointer to PCI device
8048 * Restart the card from scratch, as if from a cold-boot.
8049 * At this point, the card has exprienced a hard reset,
8050 * followed by fixups by BIOS, and has its config space
8051 * set up identically to what it was at cold boot.
8053 static pci_ers_result_t s2io_io_slot_reset(struct pci_dev *pdev)
8055 struct net_device *netdev = pci_get_drvdata(pdev);
8056 struct s2io_nic *sp = netdev->priv;
8058 if (pci_enable_device(pdev)) {
8059 printk(KERN_ERR "s2io: "
8060 "Cannot re-enable PCI device after reset.\n");
8061 return PCI_ERS_RESULT_DISCONNECT;
8064 pci_set_master(pdev);
8067 return PCI_ERS_RESULT_RECOVERED;
8071 * s2io_io_resume - called when traffic can start flowing again.
8072 * @pdev: Pointer to PCI device
8074 * This callback is called when the error recovery driver tells
8075 * us that its OK to resume normal operation.
8077 static void s2io_io_resume(struct pci_dev *pdev)
8079 struct net_device *netdev = pci_get_drvdata(pdev);
8080 struct s2io_nic *sp = netdev->priv;
8082 if (netif_running(netdev)) {
8083 if (s2io_card_up(sp)) {
8084 printk(KERN_ERR "s2io: "
8085 "Can't bring device back up after reset.\n");
8089 if (s2io_set_mac_addr(netdev, netdev->dev_addr) == FAILURE) {
8091 printk(KERN_ERR "s2io: "
8092 "Can't resetore mac addr after reset.\n");
8097 netif_device_attach(netdev);
8098 netif_wake_queue(netdev);